From 5919b8dde292e107431a621b1de5396710a2100a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 7 Jun 2022 23:05:58 +0800 Subject: [PATCH 01/99] Delete kernel.py --- kernel.py | 1344 ----------------------------------------------------- 1 file changed, 1344 deletions(-) delete mode 100644 kernel.py diff --git a/kernel.py b/kernel.py deleted file mode 100644 index 24f442467..000000000 --- a/kernel.py +++ /dev/null @@ -1,1344 +0,0 @@ -import torch -import numpy as np -import matplotlib.pyplot as plt -import pickle -import os -import time - - -class kernel: - def __init__(self,nn=None): - if nn!=None: - self.nn=nn - try: - if self.nn.km==0: - self.nn.km=1 - except AttributeError: - pass - self.PO=None - self.thread_lock=None - self.thread=None - self.ol=None - self.stop=None - self.batch=None - self.epoch=0 - self.end_loss=None - self.end_acc=None - self.end_test_loss=None - self.end_test_acc=None - self.acc_flag1=None - self.acc_flag2=None - self.flag=None - self.train_loss=None - self.train_acc=None - self.train_loss_list=[] - self.train_acc_list=[] - self.test_loss=None - self.test_acc=None - self.test_loss_list=[] - self.test_acc_list=[] - self.test=False - self.total_epoch=0 - self.time=0 - self.total_time=0 - - - def data(self,train_data,train_labels,test_data=None,test_labels=None): - self.train_data=train_data - self.train_labels=train_labels - if type(train_data)==list: - self.data_batch=[x for x in range(len(train_data))] - if type(train_labels)==list: - self.labels_batch=[x for x in range(len(train_labels))] - self.test_data=test_data - self.test_labels=test_labels - if test_data!=None: - self.test=True - if type(self.train_data)==list: - self.shape0=train_data[0].shape[0] - else: - self.shape0=train_data.shape[0] - if self.thread!=None: - self.t=-np.arange(-self.thread,1) - if self.PO==None: - self.train_loss=np.zeros(self.thread) - self.train_acc=np.zeros(self.thread) - self.train_loss_list=[[] for _ in range(self.thread)] - self.train_acc_list=[[] for _ in range(self.thread)] - if test_data!=None: - if self.PO==None: - self.test_loss=np.zeros(self.thread) - self.test_acc=np.zeros(self.thread) - self.test_loss_list=[[] for _ in range(self.thread)] - self.test_acc_list=[[] for _ in range(self.thread)] - self.stop=np.zeros(self.thread) - self.epoch=np.zeros(self.thread) - self.total_epoch=np.zeros(self.thread) - self.time=np.zeros(self.thread) - self.total_time=np.zeros(self.thread) - return - - - def init(self,param=None): - if param!=None: - self.nn.param=param - self.train_loss_list.clear() - self.train_acc_list.clear() - self.test_loss_list.clear() - self.test_acc_list.clear() - self.test=False - self.epoch=0 - self.total_epoch=0 - self.time=0 - self.total_time=0 - return - - - def add_threads(self,thread): - t=-np.arange(-thread,1)+self.thread+1 - self.t=t.extend(self.t) - self.thread+=thread - if self.PO==None: - self.train_loss=np.concatenate((self.train_loss,np.zeros(self.t))) - self.train_acc=np.concatenate((self.train_acc,np.zeros(self.t))) - self.train_loss_list.extend([[] for _ in range(len(self.t))]) - self.train_acc_list.extend([[] for _ in range(len(self.t))]) - if self.test==True: - if self.PO==None: - self.test_loss=np.concatenate((self.test_loss,np.zeros(self.t))) - self.test_acc=np.concatenate((self.test_acc,np.zeros(self.t))) - self.test_loss_list.extend([[] for _ in range(len(self.t))]) - self.test_acc_list.extend([[] for _ in range(len(self.t))]) - self.stop=np.concatenate((self.stop,np.zeros(self.t))) - self.epoch=np.concatenate((self.epoch,np.zeros(self.t))) - self.total_epoch=np.concatenate((self.total_epoch,np.zeros(self.t))) - self.time=np.concatenate((self.time,np.zeros(self.t))) - self.total_time=np.concatenate((self.total_time,np.zeros(self.t))) - return - - - def set_end(self,end_loss=None,end_acc=None,end_test_loss=None,end_test_acc=None): - if end_loss!=None: - self.end_loss=end_loss - if end_acc!=None: - self.end_acc=end_acc - if end_test_loss!=None: - self.end_test_loss=end_test_loss - if end_test_acc!=None: - self.end_test_acc=end_test_acc - return - - - def apply_gradient(self,tape,opt,loss,parameter): - gradient=tape.gradient(loss,parameter) - opt.apply_gradients(zip(gradient,parameter)) - return - - - def end(self): - if self.end_loss!=None and self.train_loss<=self.end_loss: - return True - elif self.end_acc!=None and self.train_acc>=self.end_acc: - return True - elif self.end_loss!=None and self.end_acc!=None and self.train_loss<=self.end_loss and self.train_acc>=self.end_acc: - return True - elif self.end_test_loss!=None and self.test_loss<=self.end_test_loss: - return True - elif self.end_test_acc!=None and self.test_acc>=self.end_test_acc: - return True - elif self.end_test_loss!=None and self.end_test_acc!=None and self.test_loss<=self.end_test_loss and self.test_acc>=self.end_test_acc: - return True - - - def loss_acc(self,output=None,labels_batch=None,batch_loss=None,batch=None,test_batch=None,train_loss=None,total_loss=None,total_acc=None,t=None): - if batch!=None: - if self.total_epoch>=1: - batch_loss=batch_loss - total_loss+=batch_loss - if self.acc_flag1==1: - batch_acc=self.nn.accuracy(output,labels_batch) - batch_acc=batch_acc - total_acc+=batch_acc - if self.shape0%batch!=0: - batch_loss=batch_loss - total_loss+=batch_loss - if self.acc_flag1==1: - batch_acc=self.nn.accuracy(output,labels_batch) - batch_acc=batch_acc - total_acc+=batch_acc - return total_loss,total_acc - elif self.ol==None: - if self.total_epoch>=1: - loss=train_loss.numpy() - if self.thread==None: - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - else: - self.train_loss_list[t].append(loss.astype(np.float32)) - self.train_loss[t]=loss - self.train_loss[t]=self.train_loss[t].astype(np.float32) - if self.acc_flag1==1: - if self.thread==None: - acc=self.nn.accuracy(output,self.train_labels) - acc=acc.numpy() - self.train_acc_list.append(acc.astype(np.float32)) - self.train_acc=acc - self.train_acc=self.train_acc.astype(np.float32) - else: - acc=self.nn.accuracy(output,self.train_labels[t]) - acc=acc.numpy() - self.train_acc_list[t].append(acc.astype(np.float32)) - self.train_acc[t]=acc - self.train_acc[t]=self.train_acc[t].astype(np.float32) - if self.test==True: - if self.thread==None: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) - else: - self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) - self.test_loss_list[t].append(self.test_loss[t]) - if self.acc_flag1==1: - self.test_acc_list[t].append(self.test_acc[t]) - return - - - def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_batch=None,t=None,i=None): - if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: - self._param=self.nn.param - if batch!=None: - total_loss=0 - total_acc=0 - batches=int((self.shape0-self.shape0%batch)/batch) - for j in range(batches): - index1=j*batch - index2=(j+1)*batch - if type(self.train_data)==list: - for i in range(len(self.train_data)): - if batch!=1: - data_batch[i]=self.train_data[i][index1:index2] - else: - data_batch[i]=self.train_data[i][j] - else: - if batch!=1: - data_batch=self.train_data[index1:index2] - else: - data_batch=self.train_data[j] - if type(self.train_labels)==list: - for i in range(len(self.train_data)): - if batch!=1: - labels_batch[i]=self.train_labels[i][index1:index2] - else: - labels_batch[i]=self.train_labels[i][j] - else: - if batch!=1: - labels_batch=self.train_labels[index1:index2] - else: - labels_batch=self.train_labels[j] - output=self.nn(data_batch) - batch_loss=self.nn.loss(output,labels_batch) - self.nn.opt.zero_grad() - batch_loss.backward() - self.nn.opt.step() -# try: -# if self.thread==None: -# if self.nn.opt!=None: -# pass -# self.apply_gradient(tape,self.nn.opt,batch_loss,self.nn.param) -# else: -# if self.nn.opt: -# pass -# self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) -# except AttributeError: -# if self.thread==None: -# gradient=tape.gradient(batch_loss,self.nn.param) -# self.nn.oopt(gradient,self.nn.param) -# else: -# gradient=tape.gradient(batch_loss,self.nn.param[t]) -# self.nn.oopt(gradient,self.nn.param,t) - if i==epoch-1: -# if self.thread==None: -# output=self.nn.fp(data_batch) -# else: -# output=self.nn.fp(data_batch,t) - output=self.nn.fp(data_batch) - _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=_batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) - else: - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) -# if self.thread==None: -# try: -# self.nn.bc=j -# except AttributeError: -# pass -# else: -# try: -# self.nn.bc[t]=j -# except AttributeError: -# pass - try: - self.nn.bc=j - except AttributeError: - pass - if self.shape0%batch!=0: - batches+=1 - index1=batches*batch - index2=batch-(self.shape0-batches*batch) - if type(self.train_data)==list: - for i in range(len(self.train_data)): - data_batch[i]=torch.concat([self.train_data[i][index1:],self.train_data[i][:index2]]) - else: - data_batch=torch.concat([self.train_data[index1:],self.train_data[:index2]]) - if type(self.train_labels)==list: - for i in range(len(self.train_data)): - labels_batch[i]=torch.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]]) - else: - labels_batch=torch.concat([self.train_labels[index1:],self.train_labels[:index2]]) - output=self.nn(data_batch) - batch_loss=self.nn.loss(output,labels_batch) - self.nn.opt.zero_grad() - batch_loss.backward() - self.nn.opt.step() -# try: -# if self.thread==None: -# if self.nn.opt!=None: -# pass -# self.apply_gradient(tape,self.nn.opt,batch_loss,self.nn.param) -# else: -# if self.nn.opt: -# pass -# self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) -# except AttributeError: -# if self.thread==None: -# gradient=tape.gradient(batch_loss,self.nn.param) -# self.nn.oopt(gradient,self.param) -# else: -# gradient=tape.gradient(batch_loss,self.nn.param[t]) -# self.nn.oopt(gradient,self.nn.param,t) - if i==epoch-1: - if self.thread==None: - output=self.nn.fp(data_batch) - else: - output=self.nn.fp(data_batch,t) - _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=_batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) - else: - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) -# if self.thread==None: -# try: -# self.nn.bc+=1 -# except AttributeError: -# pass -# else: -# try: -# self.nn.bc[t]+=1 -# except AttributeError: -# pass - try: - self.nn.bc+=1 - except AttributeError: - pass - if self.total_epoch>=1: - loss=total_loss.numpy()/batches - if self.acc_flag1==1: - train_acc=total_acc/batches - if self.thread==None: - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - if i==epoch-1: - loss=_total_loss.numpy()/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - else: - self.train_loss_list[t].append(loss.astype(np.float32)) - self.train_loss[t]=loss - self.train_loss[t]=self.train_loss[t].astype(np.float32) - if i==epoch-1: - loss=_total_loss.numpy()/batches - self.train_loss_list[t].append(loss.astype(np.float32)) - self.train_loss[t]=loss - self.train_loss[t]=self.train_loss[t].astype(np.float32) - if self.acc_flag1==1: - if self.thread==None: - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - if i==epoch-1: - train_acc=_total_acc.numpy()/batches - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - else: - self.train_acc_list[t].append(train_acc.astype(np.float32)) - self.train_acc[t]=train_acc - self.train_acc[t]=self.train_acc[t].astype(np.float32) - if i==epoch-1: - train_acc=_total_acc.numpy()/batches - self.train_acc_list[t].append(train_acc.astype(np.float32)) - self.train_acc[t]=train_acc - self.train_acc[t]=self.train_acc[t].astype(np.float32) - if self.test==True: - if self.thread==None: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) - else: - self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) - self.test_loss_list[t].append(self.test_loss[t]) - if self.acc_flag1==1: - self.test_acc_list[t].append(self.test_acc[t]) - elif self.ol==None: - output=self.nn(self.train_data) - train_loss=self.nn.loss(output,self.train_labels) - self.nn.opt.zero_grad() - batch_loss.backward() - self.nn.opt.step() -# try: -# if self.thread==None: -# if self.nn.opt!=None: -# pass -# self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) -# else: -# if self.nn.opt: -# pass -# self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) -# except AttributeError: -# if self.thread==None: -# gradient=tape.gradient(train_loss,self.nn.param) -# self.nn.oopt(gradient,self.nn.param) -# else: -# gradient=tape.gradient(batch_loss,self.nn.param[t]) -# self.nn.oopt(gradient,self.nn.param,t) -# self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) - if i==epoch-1: -# if self.thread==None: -# output=self.nn.fp(self.train_data) -# else: -# output=self.nn.fp(data_batch,t) - output=self.nn.fp(self.train_data) - train_loss=self.nn.loss(output,self.train_labels) - self.loss_acc(output=output,labels_batch=labels_batch,train_loss=train_loss,batch=batch,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) - else: - data=self.ol() - if self.stop==True: - return - output=self.nn(data[0]) - train_loss=self.nn.loss(output,data[1]) - self.nn.opt.zero_grad() - batch_loss.backward() - self.nn.opt.step() -# if self.thread_lock!=None: -# try: -# if self.nn.opt!=None: -# pass -# if self.PO==1: -# self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) -# else: -# self.thread_lock.acquire() -# self.param=self.nn.param -# self.gradient=tape.gradient(train_loss,self.param) -# self.thread_lock.release() -# self.thread_lock.acquire() -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# self.thread_lock.release() -# except AttributeError: -# if self.PO==1: -# self.gradient=tape.gradient(train_loss,self.nn.param) -# self.nn.oopt(self.gradient,self.nn.param) -# else: -# self.thread_lock.acquire() -# self.gradient=tape.gradient(train_loss,self.nn.param) -# self.thread_lock.release() -# self.thread_lock.acquire() -# self.nn.oopt(self.gradient,self.nn.param) -# self.thread_lock.release() -# else: -# try: -# if self.nn.opt!=None: -# pass -# self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) -# except AttributeError: -# gradient=tape.gradient(train_loss,self.nn.param) -# self.nn.oopt(gradient,self.nn.param) - train_loss=self.nn.loss(output,data[1]) - loss=train_loss.numpy() -# if self.thread_lock!=None: -# self.thread_lock.acquire() -# self.nn.train_loss=loss.astype(np.float32) -# try: -# self.nn.ec+=1 -# except AttributeError: -# pass -# self.total_epoch+=1 -# self.thread_lock.release() -# else: -# self.nn.train_loss=loss.astype(np.float32) -# try: -# self.nn.ec+=1 -# except AttributeError: -# pass -# self.total_epoch+=1 - self.nn.train_loss=loss.astype(np.float32) - try: - self.nn.ec+=1 - except AttributeError: - pass - self.total_epoch+=1 - return - - -# def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch=None,test_batch=None,index1=None,index2=None,j=None,t=None,i=None): -# if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: -# self._param=self.nn.param -# if batch!=None: -# if type(self.train_data)==list: -# for i in range(len(self.train_data)): -# if batch!=1: -# data_batch[i]=self.train_data[i][index1:index2] -# else: -# data_batch[i]=self.train_data[i][j] -# else: -# if batch!=1: -# data_batch=self.train_data[index1:index2] -# else: -# data_batch=self.train_data[j] -# if type(self.train_labels)==list: -# for i in range(len(self.train_data)): -# if batch!=1: -# labels_batch[i]=self.train_labels[i][index1:index2] -# else: -# labels_batch[i]=self.train_labels[i][j] -# else: -# if batch!=1: -# labels_batch=self.train_labels[index1:index2] -# else: -# labels_batch=self.train_labels[j] -# if self.PO==1: -# with tf.GradientTape() as tape: -# self.output=self.nn.fp(data_batch) -# self.batch_loss=self.nn.loss(self.output,labels_batch) -# self.gradient=tape.gradient(self.batch_loss,self.nn.param) -# try: -# if self.nn.opt!=None: -# pass -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# except AttributeError: -# self.nn.oopt(self.gradient,self.nn.param,t) -# if self.total_epoch[t]>=1: -# if self.acc_flag1==1: -# self.batch_acc=self.nn.accuracy(self.output,labels_batch) -# if i==epoch-1: -# self.output=self.nn.fp(data_batch) -# self._batch_loss=self.nn.loss(self.output,labels_batch) -# self._batch_acc=self.nn.accuracy(self.output,labels_batch) -# try: -# self.nn.bc=j -# except AttributeError: -# pass -# else: -# self.thread_lock.acquire() -# self.param=self.nn.param -# with tf.GradientTape() as tape: -# self.output=self.nn.fp(data_batch) -# self.batch_loss=self.nn.loss(self.output,labels_batch) -# self.gradient=tape.gradient(self.batch_loss,self.param) -# self.thread_lock.release() -# self.thread_lock.acquire() -# try: -# if self.nn.opt!=None: -# pass -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# except AttributeError: -# self.nn.oopt(self.gradient,self.nn.param,t) -# if self.total_epoch[t]>=1: -# if self.acc_flag1==1: -# self.batch_acc=self.nn.accuracy(self.output,labels_batch) -# if i==epoch-1: -# self.output=self.nn.fp(data_batch) -# self._batch_loss=self.nn.loss(self.output,labels_batch) -# try: -# self.nn.bc=j -# except AttributeError: -# pass -# if self.acc_flag1==1 and batch!=None: -# return self.batch_loss,self.batch_acc -# elif batch!=None: -# return self.batch_loss -# self.thread_lock.release() -# if index1==batches*batch: -# if type(self.train_data)==list: -# for i in range(len(self.train_data)): -# data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]]) -# else: -# data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]]) -# if type(self.train_labels)==list: -# for i in range(len(self.train_data)): -# labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]]) -# else: -# labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]]) -# if self.PO==1: -# with tf.GradientTape() as tape: -# self.output=self.nn.fp(data_batch) -# self.batch_loss=self.nn.loss(self.output,labels_batch) -# self.gradient=tape.gradient(self.batch_loss,self.nn.param) -# try: -# if self.nn.opt!=None: -# pass -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# except AttributeError: -# self.nn.oopt(self.gradient,self.param,t) -# if self.total_epoch[t]>=1: -# if self.acc_flag1==1: -# self.batch_acc=self.nn.accuracy(self.output,labels_batch) -# if i==epoch-1: -# self.output=self.nn.fp(data_batch) -# self._batch_loss=self.nn.loss(self.output,labels_batch) -# try: -# self.nn.bc=j -# except AttributeError: -# pass -# else: -# self.thread_lock.acquire() -# self.param=self.nn.param -# with tf.GradientTape() as tape: -# self.output=self.nn.fp(data_batch) -# self.batch_loss=self.nn.loss(self.output,labels_batch) -# self.gradient=tape.gradient(self.batch_loss,self.param) -# self.thread_lock.release() -# self.thread_lock.acquire() -# try: -# if self.nn.opt!=None: -# pass -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# except AttributeError: -# self.nn.oopt(self.gradient,self.nn.param,t) -# if self.total_epoch[t]>=1: -# if self.acc_flag1==1: -# self.batch_acc=self.nn.accuracy(self.output,labels_batch) -# if i==epoch-1: -# self.output=self.nn.fp(data_batch) -# self._batch_loss=self.nn.loss(self.output,labels_batch) -# try: -# self.nn.bc+=1 -# except AttributeError: -# pass -# if self.acc_flag1==1 and batch!=None: -# return self.batch_loss,self.batch_acc -# elif batch!=None: -# return self.batch_loss -# self.thread_lock.release() -# else: -# if self.PO==1: -# with tf.GradientTape() as tape: -# self.output=self.nn.fp(self.train_data) -# self._train_loss=self.nn.loss(self.output,self.train_labels) -# self.gradient=tape.gradient(self._train_loss,self.nn.param) -# try: -# if self.nn.opt!=None: -# pass -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# except AttributeError: -# self.nn.oopt(self.gradient,self.nn.param) -# if self.total_epoch[t]>=1: -# self.loss=self._train_loss.numpy() -# self.train_loss_list.append(self.loss.astype(np.float32)) -# self.train_loss=self.loss -# self.train_loss=self.train_loss.astype(np.float32) -# if i==epoch-1: -# self.output=self.nn.fp(self.train_data) -# self._train_loss=self.nn.loss(self.output,self.train_labels) -# self.loss=self._train_loss_.numpy() -# self.train_loss_list.append(self.loss.astype(np.float32)) -# self.train_loss=self.loss -# self.train_loss=self.train_loss.astype(np.float32) -# if self.acc_flag1==1: -# self.acc=self.nn.accuracy(self.output,self.train_labels) -# self.acc=self.acc.numpy() -# self.train_acc_list.append(self.acc.astype(np.float32)) -# self.train_acc=self.acc -# self.train_acc=self.train_acc.astype(np.float32) -# if i==epoch-1: -# self.acc=self.nn.accuracy(self.output,self.train_labels) -# self.acc=self.acc.numpy() -# self.train_acc_list.append(self.acc.astype(np.float32)) -# self.train_acc=self.acc -# self.train_acc=self.train_acc.astype(np.float32) -# if self.test==True: -# self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) -# self.test_loss_list.append(self.test_loss) -# if self.acc_flag1==1: -# self.test_acc_list.append(self.test_acc) -# else: -# self.thread_lock.acquire() -# self.param=self.nn.param -# with tf.GradientTape() as tape: -# self.output=self.nn.fp(self.train_data) -# self._train_loss=self.nn.loss(self.output,self.train_labels) -# self.gradient=tape.gradient(self._train_loss,self.param) -# self.thread_lock.release() -# self.thread_lock.acquire() -# try: -# if self.nn.opt!=None: -# pass -# self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) -# except AttributeError: -# self.nn.oopt(self.gradient,self.nn.param,t) -# if self.total_epoch[t]>=1: -# self.loss=self._train_loss.numpy() -# self.train_loss_list.append(self.loss.astype(np.float32)) -# self.train_loss=self.loss -# self.train_loss=self.train_loss.astype(np.float32) -# if i==epoch-1: -# self.output=self.nn.fp(self.train_data) -# self._train_loss=self.nn.loss(self.output,self.train_labels) -# self.loss=self._train_loss.numpy() -# self.train_loss_list.append(self.loss.astype(np.float32)) -# self.train_loss=self.loss -# self.train_loss=self.train_loss.astype(np.float32) -# if self.acc_flag1==1: -# self.acc=self.nn.accuracy(self.output,self.train_labels) -# self.acc=self.acc.numpy() -# self.train_acc_list.append(self.acc.astype(np.float32)) -# self.train_acc=self.acc -# self.train_acc=self.train_acc.astype(np.float32) -# if i==epoch-1: -# self.acc=self.nn.accuracy(self.output,self.train_labels) -# self.acc=self.acc.numpy() -# self.train_acc_list.append(self.acc.astype(np.float32)) -# self.train_acc=self.acc -# self.train_acc=self.train_acc.astype(np.float32) -# if self.test==True: -# self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) -# self.test_loss_list.append(self.test_loss) -# if self.acc_flag1==1: -# self.test_acc_list.append(self.test_acc) -# self.thread_lock.release() -# return -# -# -# def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_batch=None,t=None,i=None): -# total_loss=0 -# _total_loss=0 -# total_acc=0 -# _total_acc=0 -# batches=int((self.shape0-self.shape0%batch)/batch) -# for j in range(batches): -# index1=j*batch -# index2=(j+1)*batch -# if self.acc_flag1==1: -# self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t,i) -# if self.total_epoch[t]>=1: -# total_loss+=self.batch_loss -# total_acc+=self.batch_acc -# if i==epoch-1: -# _total_loss+=self._batch_loss -# _total_acc+=self._batch_acc -# else: -# self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t,i) -# if self.total_epoch[t]>=1: -# total_loss+=self.batch_loss -# if i==epoch-1: -# _total_loss+=self._batch_loss -# if self.shape0%batch!=0: -# batches+=1 -# index1=batches*batch -# index2=batch-(self.shape0-batches*batch) -# self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t,i) -# if self.acc_flag1==1: -# if self.total_epoch[t]>=1: -# total_loss+=self.batch_loss -# total_acc+=self.batch_acc -# if i==epoch-1: -# _total_loss+=self._batch_loss -# _total_acc+=self._batch_acc -# else: -# if self.total_epoch[t]>=1: -# total_loss+=self.batch_loss -# if i==epoch-1: -# _total_loss+=self._batch_loss -# if self.total_epoch[t]>=1: -# loss=total_loss.numpy()/batches -# if self.acc_flag1==1: -# train_acc=total_acc.numpy()/batches -# self.train_loss_list.append(loss.astype(np.float32)) -# self.train_loss=loss -# self.train_loss=self.train_loss.astype(np.float32) -# if i==epoch-1: -# loss=_total_loss.numpy()/batches -# self.train_loss_list.append(loss.astype(np.float32)) -# self.train_loss=loss -# self.train_loss=self.train_loss.astype(np.float32) -# if self.acc_flag1==1: -# self.train_acc_list.append(train_acc.astype(np.float32)) -# self.train_acc=train_acc -# self.train_acc=self.train_acc.astype(np.float32) -# if i==epoch-1: -# train_acc=_total_acc.numpy()/batches -# self.train_acc_list.append(train_acc.astype(np.float32)) -# self.train_acc=train_acc -# self.train_acc=self.train_acc.astype(np.float32) -# if self.test==True: -# self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) -# self.test_loss_list.append(self.test_loss) -# if self.acc_flag1==1: -# self.test_acc_list.append(self.test_acc) -# return - - - def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=None,s=None): - self.batch=batch - self.epoch=0 - t1=None - t2=None - t=None - if self.flag==None: - self.flag=True - if self.p==None: - self.p=9 - else: - self.p=p-1 - if self.s==None: - self.s=2 - else: - if type(s)!=list: - self.s=s - else: - self.s=s[0] - self.mf=s[1] - self.file_list=[] - if type(self.train_data)==list: - data_batch=[x for x in range(len(self.train_data))] - if type(self.train_labels)==list: - labels_batch=[x for x in range(len(self.train_labels))] - self.nn.train() - if epoch!=None: - for i in range(epoch): - t1=time.time() -# if self.thread==None: -# try: -# self.nn.ec+=1 -# except AttributeError: -# pass -# else: -# try: -# self.nn.ec[self.t[-1]]+=1 -# except AttributeError: -# pass - try: - self.nn.ec+=1 - except AttributeError: - pass -# if self.thread==None: -# self._train(batch,epoch,test_batch,data_batch,labels_batch,i=i) -# else: -# t=self.t.pop() -# if self.PO==1: -# self.thread_lock.acquire() -# self._train_(batch,epoch,data_batch,labels_batch,test_batch,t,i) -# self.thread_lock.release() -# elif self.PO!=None: -# self._train_(batch,epoch,data_batch,labels_batch,test_batch,t,i) -# else: -# self._train(batch,epoch,test_batch,data_batch,labels_batch,t,i) - self._train(batch,epoch,test_batch,data_batch,labels_batch,i=i) -# if self.thread==None: -# self.epoch+=1 -# self.total_epoch+=1 -# else: -# self.epoch[t]+=1 -# self.total_epoch[t]+=1 - self.epoch+=1 - self.total_epoch+=1 -# if self.thread==None: - if epoch%10!=0: - d=epoch-epoch%self.p - d=int(d/self.p) - else: - d=epoch/(self.p+1) - d=int(d) - if d==0: - d=1 - e=d*self.s - if i%d==0: - if self.flag==None: - if self.test==False: - print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) - else: - if self.test==False: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if nn_path!=None and i%e==0: - self.save(nn_path,i,one) - t2=time.time() -# if self.thread==None: -# self.time+=(t2-t1) -# else: -# self.time[t]+=(t2-t1) - self.time+=(t2-t1) -# if self.thread==None: -# if self.stop==True: -# break -# else: -# if self.stop[t]==True: -# break - if self.end_flag==True and self.end()==True: - self.nn.param=self._param - self._param=None - break - elif self.ol==None: - i=0 - while True: - t1=time.time() -# if self.thread==None: -# self._train(epoch=epoch,test_batch=test_batch,i=i) -# else: -# t=self.t.pop() -# if self.PO==1: -# self.thread_lock.acquire() -# self._train_(epoch=epoch,test_batch=test_batch,t=t,i=i) -# self.thread_lock.release() -# elif self.PO!=None: -# self._train_(epoch=epoch,test_batch=test_batch,t=t,i=i) -# else: -# self._train(epoch=epoch,test_batch=test_batch,t=t,i=i) - self._train(epoch=epoch,test_batch=test_batch,i=i) - i+=1 -# if self.thread==None: -# self.epoch+=1 -# self.total_epoch+=1 -# else: -# self.epoch[t]+=1 -# self.total_epoch[t]+=1 - self.epoch+=1 - self.total_epoch+=1 -# if self.thread==None: - if epoch%10!=0: - d=epoch-epoch%self.p - d=int(d/self.p) - else: - d=epoch/(self.p+1) - d=int(d) - if d==0: - d=1 - e=d*self.s - if i%d==0: - if self.flag==None: - if self.test==False: - print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) - else: - if self.test==False: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if nn_path!=None and i%e==0: - self.save(nn_path,i,one) -# if self.thread==None: -# try: -# self.nn.ec+=1 -# except AttributeError: -# pass -# else: -# try: -# self.nn.ec[t]+=1 -# except AttributeError: -# pass - try: - self.nn.ec+=1 - except AttributeError: - pass - t2=time.time() -# if self.thread==None: -# self.time+=(t2-t1) -# else: -# self.time[t]+=(t2-t1) - self.time+=(t2-t1) -# if self.thread==None: -# if self.stop==True: -# break -# else: -# if self.stop[t]==True: -# break - if self.end_flag==True and self.end()==True: - self.nn.param=self._param - self._param=None - break - else: - while True: - self._train() - data=self.ol() - output=self.nn.fp(data[0]) - train_loss=self.nn.loss(output,data[1]) - loss=train_loss.numpy() - self.nn.train_loss=loss.astype(np.float32) - if nn_path!=None: - self.save(nn_path) - try: - self.nn.ec+=1 - except AttributeError: - pass - if nn_path!=None: - self.save(nn_path) - if self.thread==None: - self.time=self.time-int(self.time) - if self.time<0.5: - self.time=int(self.time) - else: - self.time=int(self.time)+1 - self.total_time+=self.time - else: - self.time[t]=self.time[t]-int(self.time[t]) - if self.time[t]<0.5: - self.time[t]=int(self.time[t]) - else: - self.time[t]=int(self.time[t])+1 - self.total_time[t]+=self.time[t] -# if self.thread==None: - print() - if self.test==False: - print('last loss:{0:.6f}'.format(self.train_loss)) - else: - print('last loss:{0:.6f},last test loss:{1:.6f}'.format(self.train_loss,self.test_loss)) - if self.acc_flag1==1: - if self.acc_flag2=='%': - if self.test==False: - print('accuracy:{0:.1f}'.format(self.train_acc*100)) - else: - print('accuracy:{0:.1f},test accuracy:{1:.1f}'.format(self.train_acc*100,self.test_acc*100)) - else: - if self.test==False: - print('accuracy:{0:.6f}'.format(self.train_acc)) - else: - print('accuracy:{0:.6f},test accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) - print('time:{0}s'.format(self.time)) -# if self.thread==None: - try: - if self.nn.km==1: - self.nn.km=0 - except AttributeError: - pass - return - - - def test(self,test_data,test_labels,batch=None,t=None): - if type(test_data)==list: - data_batch=[x for x in range(len(test_data))] - if type(test_labels)==list: - labels_batch=[x for x in range(len(test_labels))] - if batch!=None: - total_loss=0 - total_acc=0 - if type(test_data)==list: - batches=int((test_data[0].shape[0]-test_data[0].shape[0]%batch)/batch) - shape0=test_data[0].shape[0] - else: - batches=int((test_data.shape[0]-test_data.shape[0]%batch)/batch) - shape0=test_data.shape[0] - for j in range(batches): - index1=j*batch - index2=(j+1)*batch - if type(test_data)==list: - for i in range(len(test_data)): - data_batch[i]=test_data[i][index1:index2] - else: - data_batch=test_data[index1:index2] - if type(test_labels)==list: - for i in range(len(test_labels)): - labels_batch[i]=test_labels[i][index1:index2] - else: - labels_batch=test_labels[index1:index2] - if self.thread==None: - output=self.nn.fp(data_batch) - else: - output=self.nn.fp(data_batch,t) - batch_loss=self.nn.loss(output,labels_batch) - total_loss+=batch_loss - if self.acc_flag1==1: - batch_acc=self.nn.accuracy(output,labels_batch) - total_acc+=batch_acc - if shape0%batch!=0: - batches+=1 - index1=batches*batch - index2=batch-(shape0-batches*batch) - if type(test_data)==list: - for i in range(len(test_data)): - if type(test_data)==np.ndarray: - data_batch[i]=np.concatenate(test_data[i][index1:],test_data[i][:index2]) - else: - data_batch[i]=torch.concat(test_data[i][index1:],test_data[i][:index2]) - else: - if type(test_data)==np.ndarray: - data_batch=np.concatenate(test_data[index1:],test_data[:index2]) - else: - data_batch=torch.concat(test_data[index1:],test_data[:index2]) - if type(self.test_labels)==list: - for i in range(len(test_labels)): - if type(test_labels)==np.ndarray: - labels_batch[i]=np.concatenate(test_labels[i][index1:],test_labels[i][:index2]) - else: - labels_batch[i]=torch.concat(test_labels[i][index1:],test_labels[i][:index2]) - else: - if type(test_labels)==np.ndarray: - labels_batch=np.concatenate(test_labels[index1:],test_labels[:index2]) - else: - labels_batch=torch.concat(test_labels[index1:],test_labels[:index2]) - if self.thread==None: - output=self.nn.fp(data_batch) - else: - output=self.nn.fp(data_batch,t) - batch_loss=self.nn.loss(output,labels_batch) - total_loss+=batch_loss - if self.acc_flag1==1: - batch_acc=self.nn.accuracy(output,labels_batch) - total_acc+=batch_acc - test_loss=total_loss.numpy()/batches - test_loss=test_loss - test_loss=test_loss.astype(np.float32) - if self.acc_flag1==1: - test_acc=total_acc.numpy()/batches - test_acc=test_acc - test_acc=test_acc.astype(np.float32) - else: - if self.thread==None: - output=self.nn.fp(test_data) - else: - output=self.nn.fp(test_data,t) - test_loss=self.nn.loss(output,test_labels) - if self.acc_flag1==1: - test_acc=self.nn.accuracy(output,test_labels) - test_loss=test_loss.numpy().astype(np.float32) - test_acc=test_acc.numpy().astype(np.float32) - if self.thread==None: - print('test loss:{0:.6f}'.format(test_loss)) - if self.acc_flag1==1: - if self.acc_flag2=='%': - print('accuracy:{0:.1f}'.format(test_acc*100)) - else: - print('accuracy:{0:.6f}'.format(test_acc)) - if self.acc_flag2=='%': - return test_loss,test_acc*100 - else: - return test_loss,test_acc - else: - return test_loss - - - def train_info(self): - print() - print('batch:{0}'.format(self.batch)) - print() - print('epoch:{0}'.format(self.total_epoch)) - print() - print('learning rate:{0}'.format(self.nn.lr)) - print() - print('time:{0:.3f}s'.format(self.total_time)) - print() - print('-------------------------------------') - print() - print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc_flag2=='%': - print('train acc:{0:.1f}'.format(self.train_acc*100)) - else: - print('train acc:{0:.6f}'.format(self.train_acc)) - return - - - def test_info(self): - print() - print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': - print('test acc:{0:.1f}'.format(self.test_acc*100)) - else: - print('test acc:{0:.6f}'.format(self.test_acc)) - return - - - def info(self): - self.train_info() - if self.test==True: - print() - print('-------------------------------------') - self.test_info() - return - - - def train_visual(self): - print() - plt.figure(1) - plt.plot(np.arange(self.total_epoch),self.train_loss_list) - plt.title('train loss') - plt.xlabel('epoch') - plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.total_epoch),self.train_acc_list) - plt.title('train acc') - plt.xlabel('epoch') - plt.ylabel('acc') - print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc_flag2=='%': - print('train acc:{0:.1f}'.format(self.train_acc*100)) - else: - print('train acc:{0:.6f}'.format(self.train_acc)) - return - - - def test_visual(self): - print() - plt.figure(1) - plt.plot(np.arange(self.total_epoch),self.test_loss_list) - plt.title('test loss') - plt.xlabel('epoch') - plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.total_epoch),self.test_acc_list) - plt.title('test acc') - plt.xlabel('epoch') - plt.ylabel('acc') - print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': - print('test acc:{0:.1f}'.format(self.test_acc*100)) - else: - print('test acc:{0:.6f}'.format(self.test_acc)) - return - - - def comparison(self): - print() - plt.figure(1) - plt.plot(np.arange(self.total_epoch),self.train_loss_list,'b-',label='train loss') - if self.test==True: - plt.plot(np.arange(self.total_epoch),self.test_loss_list,'r-',label='test loss') - plt.title('loss') - plt.xlabel('epoch') - plt.ylabel('loss') - plt.legend() - plt.figure(2) - plt.plot(np.arange(self.total_epoch),self.train_acc_list,'b-',label='train acc') - if self.test==True: - plt.plot(np.arange(self.total_epoch),self.test_acc_list,'r-',label='test acc') - plt.title('accuracy') - plt.xlabel('epoch') - plt.ylabel('acc') - plt.legend() - print('train loss:{0}'.format(self.train_loss)) - if self.acc_flag2=='%': - print('train acc:{0:.1f}'.format(self.train_acc*100)) - else: - print('train acc:{0:.6f}'.format(self.train_acc)) - if self.test==True: - print() - print('-------------------------------------') - print() - print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': - print('test acc:{0:.1f}'.format(self.test_acc*100)) - else: - print('test acc:{0:.6f}'.format(self.test_acc)) - return - - - def save_p(self,path): - parameter_file=open(path+'.dat','wb') - pickle.dump(self.nn.param,parameter_file) - parameter_file.close() - return - - - def save(self,path,i=None,one=True): - if one==True: - output_file=open(path+'\save.dat','wb') - path=path+'\save.dat' - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter.dat'),'wb') - else: - output_file=open(path+'\save-{0}.dat'.format(i+1),'wb') - path=path+'\save-{0}.dat'.format(i+1) - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i+1)),'wb') - self.file_list.append(['save-{0}.dat','parameter-{0}.dat']) - if len(self.file_list)>self.mf: - os.remove(self.file_list[0][0]) - os.remove(self.file_list[0][1]) - pickle.dump(self.nn.param,parameter_file) - self.nn.param=None - pickle.dump(self.nn,output_file) - pickle.dump(self.ol,output_file) - pickle.dump(self.batch,output_file) - pickle.dump(self.end_loss,output_file) - pickle.dump(self.end_acc,output_file) - pickle.dump(self.end_test_loss,output_file) - pickle.dump(self.end_test_acc,output_file) - pickle.dump(self.acc_flag1,output_file) - pickle.dump(self.acc_flag2,output_file) - pickle.dump(self.p,output_file) - pickle.dump(self.s,output_file) - pickle.dump(self.mf,output_file) - pickle.dump(self.file_list,output_file) - pickle.dump(self.flag,output_file) - pickle.dump(self.train_loss,output_file) - pickle.dump(self.train_acc,output_file) - pickle.dump(self.train_loss_list,output_file) - pickle.dump(self.train_acc_list,output_file) - pickle.dump(self.test,output_file) - if self.test==True: - pickle.dump(self.test_loss,output_file) - pickle.dump(self.test_acc,output_file) - pickle.dump(self.test_loss_list,output_file) - pickle.dump(self.test_acc_list,output_file) - pickle.dump(self.total_epoch,output_file) - pickle.dump(self.total_epoch,output_file) - pickle.dump(self.total_time,output_file) - output_file.close() - parameter_file.close() - return - - - def restore(self,s_path,p_path): - input_file=open(s_path,'rb') - parameter_file=open(p_path,'rb') - param=pickle.load(parameter_file) - self.nn=pickle.load(input_file) - self.nn.param=param - param=None - try: - if self.nn.km==0: - self.nn.km=1 - except AttributeError: - pass - self.ol=pickle.load(input_file) - self.batch=pickle.load(input_file) - self.end_loss=pickle.load(input_file) - self.end_acc=pickle.load(input_file) - self.end_test_loss=pickle.load(input_file) - self.end_test_acc=pickle.load(input_file) - self.acc_flag1=pickle.load(input_file) - self.acc_flag2=pickle.load(input_file) - self.p=pickle.load(input_file) - self.s=pickle.load(input_file) - self.mf=pickle.load(input_file) - self.file_list=pickle.load(input_file) - self.flag=pickle.load(input_file) - self.train_loss=pickle.load(input_file) - self.train_acc=pickle.load(input_file) - self.train_loss_list=pickle.load(input_file) - self.train_acc_list=pickle.load(input_file) - self.test=pickle.load(input_file) - if self.test==True: - self.test_loss=pickle.load(input_file) - self.test_acc=pickle.load(input_file) - self.test_loss_list=pickle.load(input_file) - self.test_acc_list=pickle.load(input_file) - self.total_epoch=pickle.load(input_file) - self.total_time=pickle.load(input_file) - input_file.close() - parameter_file.close() - return \ No newline at end of file From 6a0808a5944287b794b53999154807c07559dd1b Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 10 Jun 2022 13:01:16 +0800 Subject: [PATCH 02/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index b0182641e..d6acc20ec 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -27,7 +27,7 @@ def __init__(self,nn=None): self.end_test_loss=None self.end_test_acc=None self.acc_flag1=None - self.acc_flag2=None + self.acc_flag2='%' self.flag=None self.train_loss=None self.train_acc=None From 1e9d118a92b0ea93c269ec72883d833dba9dfb6b Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jun 2022 13:18:44 +0800 Subject: [PATCH 03/99] Update kernel.py --- Note/create/kernel.py | 76 +++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 42 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index d6acc20ec..8c86a7a68 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -788,14 +788,10 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non else: self.p=p-1 if self.s==None: - self.s=2 + self.s=0 else: - if type(s)!=list: - self.s=s - else: - self.s=s[0] - self.mf=s[1] - self.file_list=[] + self.s=s-1 + self.file_list=[] if type(self.train_data)==list: data_batch=[x for x in range(len(self.train_data))] else: @@ -829,23 +825,22 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non self._train_(batch,epoch,data_batch,labels_batch,test_batch,t,i) else: self._train(batch,epoch,test_batch,data_batch,labels_batch,t,i) - if self.thread==None: - self.epoch+=1 - self.total_epoch+=1 - else: - self.epoch[t]+=1 - self.total_epoch[t]+=1 if self.thread==None: if epoch%10!=0: - d=epoch-epoch%self.p - d=int(d/self.p) + p=epoch-epoch%self.p + p=int(p/self.p) + s=epoch-epoch%self.s + s=int(s/self.s) else: - d=epoch/(self.p+1) - d=int(d) - if d==0: - d=1 - e=d*self.s - if i%d==0: + p=epoch/(self.p+1) + p=int(p) + s=epoch/(self.s+1) + s=int(s) + if p==0: + p=1 + if s==0: + s=1 + if i%p==0: if self.flag==None: if self.test==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) @@ -856,8 +851,8 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if nn_path!=None and i%e==0: - self.save(nn_path,i,one) + if nn_path!=None and i%s==0: + self.save(nn_path,self.total_epoch,one) t2=time.time() if self.thread==None: self.time+=(t2-t1) @@ -890,23 +885,22 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non else: self._train(epoch=epoch,test_batch=test_batch,t=t,i=i) i+=1 - if self.thread==None: - self.epoch+=1 - self.total_epoch+=1 - else: - self.epoch[t]+=1 - self.total_epoch[t]+=1 if self.thread==None: if epoch%10!=0: - d=epoch-epoch%self.p - d=int(d/self.p) + p=epoch-epoch%self.p + p=int(p/self.p) + s=epoch-epoch%self.s + s=int(s/self.s) else: - d=epoch/(self.p+1) - d=int(d) - if d==0: - d=1 - e=d*self.s - if i%d==0: + p=epoch/(self.p+1) + p=int(p) + s=epoch/(self.s+1) + s=int(s) + if p==0: + p=1 + if s==0: + s=1 + if i%p==0: if self.flag==None: if self.test==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) @@ -917,8 +911,8 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if nn_path!=None and i%e==0: - self.save(nn_path,i,one) + if nn_path!=None and i%s==0: + self.save(nn_path,self.total_epoch,one) if self.thread==None: try: self.nn.ec+=1 @@ -1237,7 +1231,7 @@ def save(self,path,i=None,one=True): index=path.rfind('\\') parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i+1)),'wb') self.file_list.append(['save-{0}.dat','parameter-{0}.dat']) - if len(self.file_list)>self.mf: + if len(self.file_list)>self.s+1: os.remove(self.file_list[0][0]) os.remove(self.file_list[0][1]) pickle.dump(self.nn.param,parameter_file) @@ -1253,7 +1247,6 @@ def save(self,path,i=None,one=True): pickle.dump(self.acc_flag2,output_file) pickle.dump(self.p,output_file) pickle.dump(self.s,output_file) - pickle.dump(self.mf,output_file) pickle.dump(self.file_list,output_file) pickle.dump(self.flag,output_file) pickle.dump(self.train_loss,output_file) @@ -1296,7 +1289,6 @@ def restore(self,s_path,p_path): self.acc_flag2=pickle.load(input_file) self.p=pickle.load(input_file) self.s=pickle.load(input_file) - self.mf=pickle.load(input_file) self.file_list=pickle.load(input_file) self.flag=pickle.load(input_file) self.train_loss=pickle.load(input_file) From 1cb6ddf9d5297c6955bb670d06a271790e5b1365 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 16 Jun 2022 13:22:05 +0800 Subject: [PATCH 04/99] Update kernel.py --- Note/create/kernel.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 8c86a7a68..99b839b98 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -775,7 +775,7 @@ def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_ba return - def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=None,s=None): + def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=None,s=None): self.batch=batch self.epoch=0 t1=None @@ -851,8 +851,8 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if nn_path!=None and i%s==0: - self.save(nn_path,self.total_epoch,one) + if file_path!=None and i%s==0: + self.save(file_path,self.total_epoch,one) t2=time.time() if self.thread==None: self.time+=(t2-t1) @@ -911,8 +911,8 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if nn_path!=None and i%s==0: - self.save(nn_path,self.total_epoch,one) + if file_path!=None and i%s==0: + self.save(file_path,self.total_epoch,one) if self.thread==None: try: self.nn.ec+=1 @@ -946,14 +946,14 @@ def train(self,batch=None,epoch=None,test_batch=None,nn_path=None,one=True,p=Non train_loss=self.nn.loss(output,data[1]) loss=train_loss.numpy() self.nn.train_loss=loss.astype(np.float32) - if nn_path!=None: - self.save(nn_path) + if file_path!=None: + self.save(file_path) try: self.nn.ec+=1 except AttributeError: pass - if nn_path!=None: - self.save(nn_path) + if file_path!=None: + self.save(file_path) if self.thread==None: self.time=self.time-int(self.time) if self.time<0.5: From 91b0908cc924a7626e25c16f3f146dc85a756aec Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jun 2022 13:40:56 +0800 Subject: [PATCH 05/99] Update kernel.py --- Note/create/kernel.py | 52 +++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 99b839b98..ac9ef5912 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -37,7 +37,7 @@ def __init__(self,nn=None): self.test_acc=None self.test_loss_list=[] self.test_acc_list=[] - self.test=False + self.test_flag=False self.total_epoch=0 self.time=0 self.total_time=0 @@ -53,7 +53,7 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.test_data=test_data self.test_labels=test_labels if test_data!=None: - self.test=True + self.test_flag=True if type(self.train_data)==list: self.shape0=train_data[0].shape[0] else: @@ -86,7 +86,7 @@ def init(self,param=None): self.train_acc_list.clear() self.test_loss_list.clear() self.test_acc_list.clear() - self.test=False + self.test_flag=False self.epoch=0 self.total_epoch=0 self.time=0 @@ -103,7 +103,7 @@ def add_threads(self,thread): self.train_acc=np.concatenate((self.train_acc,np.zeros(self.t))) self.train_loss_list.extend([[] for _ in range(len(self.t))]) self.train_acc_list.extend([[] for _ in range(len(self.t))]) - if self.test==True: + if self.test_flag==True: if self.PO==None: self.test_loss=np.concatenate((self.test_loss,np.zeros(self.t))) self.test_acc=np.concatenate((self.test_acc,np.zeros(self.t))) @@ -191,7 +191,7 @@ def loss_acc(self,output=None,labels_batch=None,batch_loss=None,batch=None,test_ self.train_acc_list[t].append(acc.astype(np.float32)) self.train_acc[t]=acc self.train_acc[t]=self.train_acc[t].astype(np.float32) - if self.test==True: + if self.test_flag==True: if self.thread==None: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) @@ -374,7 +374,7 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.train_acc_list[t].append(train_acc.astype(np.float32)) self.train_acc[t]=train_acc self.train_acc[t]=self.train_acc[t].astype(np.float32) - if self.test==True: + if self.test_flag==True: if self.thread==None: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) @@ -653,7 +653,7 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.train_acc_list.append(self.acc.astype(np.float32)) self.train_acc=self.acc self.train_acc=self.train_acc.astype(np.float32) - if self.test==True: + if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) if self.acc_flag1==1: @@ -697,7 +697,7 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.train_acc_list.append(self.acc.astype(np.float32)) self.train_acc=self.acc self.train_acc=self.train_acc.astype(np.float32) - if self.test==True: + if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) if self.acc_flag1==1: @@ -767,7 +767,7 @@ def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_ba self.train_acc_list.append(train_acc.astype(np.float32)) self.train_acc=train_acc self.train_acc=self.train_acc.astype(np.float32) - if self.test==True: + if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) if self.acc_flag1==1: @@ -842,12 +842,12 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N s=1 if i%p==0: if self.flag==None: - if self.test==False: + if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) else: - if self.test==False: + if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) @@ -902,12 +902,12 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N s=1 if i%p==0: if self.flag==None: - if self.test==False: + if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) else: - if self.test==False: + if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) @@ -970,21 +970,21 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N self.total_time[t]+=self.time[t] if self.thread==None: print() - if self.test==False: + if self.test_flag==False: print('last loss:{0:.6f}'.format(self.train_loss)) else: print('last loss:{0:.6f},last test loss:{1:.6f}'.format(self.train_loss,self.test_loss)) if self.acc_flag1==1: if self.acc_flag2=='%': - if self.test==False: + if self.test_flag==False: print('accuracy:{0:.1f}'.format(self.train_acc*100)) else: - print('accuracy:{0:.1f},test accuracy:{1:.1f}'.format(self.train_acc*100,self.test_acc*100)) + print('accuracy:{0:.1f},test_flag accuracy:{1:.1f}'.format(self.train_acc*100,self.test_acc*100)) else: - if self.test==False: + if self.test_flag==False: print('accuracy:{0:.6f}'.format(self.train_acc)) else: - print('accuracy:{0:.6f},test accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) + print('accuracy:{0:.6f},test_flag accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) print('time:{0}s'.format(self.time)) if self.thread==None: try: @@ -1130,7 +1130,7 @@ def test_info(self): def info(self): self.train_info() - if self.test==True: + if self.test_flag==True: print() print('-------------------------------------') self.test_info() @@ -1181,7 +1181,7 @@ def comparison(self): print() plt.figure(1) plt.plot(np.arange(self.total_epoch),self.train_loss_list,'b-',label='train loss') - if self.test==True: + if self.test_flag==True: plt.plot(np.arange(self.total_epoch),self.test_loss_list,'r-',label='test loss') plt.title('loss') plt.xlabel('epoch') @@ -1189,7 +1189,7 @@ def comparison(self): plt.legend() plt.figure(2) plt.plot(np.arange(self.total_epoch),self.train_acc_list,'b-',label='train acc') - if self.test==True: + if self.test_flag==True: plt.plot(np.arange(self.total_epoch),self.test_acc_list,'r-',label='test acc') plt.title('accuracy') plt.xlabel('epoch') @@ -1200,7 +1200,7 @@ def comparison(self): print('train acc:{0:.1f}'.format(self.train_acc*100)) else: print('train acc:{0:.6f}'.format(self.train_acc)) - if self.test==True: + if self.test_flag==True: print() print('-------------------------------------') print() @@ -1253,8 +1253,8 @@ def save(self,path,i=None,one=True): pickle.dump(self.train_acc,output_file) pickle.dump(self.train_loss_list,output_file) pickle.dump(self.train_acc_list,output_file) - pickle.dump(self.test,output_file) - if self.test==True: + pickle.dump(self.test_flag,output_file) + if self.test_flag==True: pickle.dump(self.test_loss,output_file) pickle.dump(self.test_acc,output_file) pickle.dump(self.test_loss_list,output_file) @@ -1295,8 +1295,8 @@ def restore(self,s_path,p_path): self.train_acc=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) self.train_acc_list=pickle.load(input_file) - self.test=pickle.load(input_file) - if self.test==True: + self.test_flag=pickle.load(input_file) + if self.test_flag==True: self.test_loss=pickle.load(input_file) self.test_acc=pickle.load(input_file) self.test_loss_list=pickle.load(input_file) From 87f33e70baffa963f7a600aa5e1bcf7a985cb1cd Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jun 2022 14:44:55 +0800 Subject: [PATCH 06/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index ac9ef5912..a1e5492ee 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -788,7 +788,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N else: self.p=p-1 if self.s==None: - self.s=0 + self.s=1 else: self.s=s-1 self.file_list=[] From 7515208b108e927c9ca78bb733fa8daa5075f99a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jun 2022 21:00:08 +0800 Subject: [PATCH 07/99] Update kernel.py --- Note/create/kernel.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index a1e5492ee..bbdfba158 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -153,18 +153,14 @@ def end(self): def loss_acc(self,output=None,labels_batch=None,batch_loss=None,batch=None,test_batch=None,train_loss=None,total_loss=None,total_acc=None,t=None): if batch!=None: if self.total_epoch>=1: - batch_loss=batch_loss total_loss+=batch_loss if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) - batch_acc=batch_acc total_acc+=batch_acc if self.shape0%batch!=0: - batch_loss=batch_loss total_loss+=batch_loss if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) - batch_acc=batch_acc total_acc+=batch_acc return total_loss,total_acc elif self.ol==None: From 4eb548a6361d9fc6c5d4a548b7815ec8da4ae71e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 17 Jun 2022 21:29:28 +0800 Subject: [PATCH 08/99] Update kernel.py --- Note/create/kernel.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index bbdfba158..8d14d8781 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -255,6 +255,7 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat else: gradient=tape.gradient(batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) + total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: if self.thread==None: output=self.nn.fp(data_batch) @@ -262,8 +263,6 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat output=self.nn.fp(data_batch,t) _batch_loss=self.nn.loss(output,labels_batch) _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=_batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) - else: - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) if self.thread==None: try: self.nn.bc=j @@ -310,6 +309,7 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat else: gradient=tape.gradient(batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) + total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: if self.thread==None: output=self.nn.fp(data_batch) @@ -317,8 +317,6 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat output=self.nn.fp(data_batch,t) _batch_loss=self.nn.loss(output,labels_batch) _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=_batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) - else: - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) if self.thread==None: try: self.nn.bc+=1 From 1b5aa0084df65fad98dc8549264acd047798a7f8 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 18 Jun 2022 13:32:34 +0800 Subject: [PATCH 09/99] Update kernel.py --- Note/create/kernel.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 8d14d8781..0bff4c679 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -150,22 +150,22 @@ def end(self): return True - def loss_acc(self,output=None,labels_batch=None,batch_loss=None,batch=None,test_batch=None,train_loss=None,total_loss=None,total_acc=None,t=None): - if batch!=None: + def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_loss=None,total_acc=None,t=None,flag=None): + if self.batch!=None: if self.total_epoch>=1: - total_loss+=batch_loss + total_loss+=loss if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc - if self.shape0%batch!=0: - total_loss+=batch_loss + if flag!=None: + total_loss+=loss if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc return total_loss,total_acc elif self.ol==None: if self.total_epoch>=1: - loss=train_loss.numpy() + loss=loss.numpy() if self.thread==None: self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss @@ -205,6 +205,8 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: self._param=self.nn.param if batch!=None: + _total_loss=0 + _total_acc=0 total_loss=0 total_acc=0 batches=int((self.shape0-self.shape0%batch)/batch) @@ -255,14 +257,14 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat else: gradient=tape.gradient(batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) + total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: if self.thread==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=_batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) + _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=_batch_loss,total_loss=_total_loss,total_acc=_total_acc,t=t) if self.thread==None: try: self.nn.bc=j @@ -309,14 +311,14 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat else: gradient=tape.gradient(batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) + total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t,flag=True) if i==epoch-1: if self.thread==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,batch_loss=_batch_loss,batch=batch,total_loss=total_loss,total_acc=total_acc,t=t) + _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=_batch_loss,total_loss=_total_loss,total_acc=_total_acc,t=t,flag=True) if self.thread==None: try: self.nn.bc+=1 @@ -402,14 +404,14 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat else: gradient=tape.gradient(batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) - self.loss_acc(output=output,labels_batch=labels_batch,train_loss=train_loss,batch=batch,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) + self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: if self.thread==None: output=self.nn.fp(self.train_data) else: output=self.nn.fp(data_batch,t) train_loss=self.nn.loss(output,self.train_labels) - self.loss_acc(output=output,labels_batch=labels_batch,train_loss=train_loss,batch=batch,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) + self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=_total_loss,total_acc=_total_acc,t=t) else: data=self.ol() if self.stop==True: From 2834cc64c692b42ffea44d3d30516615dd38fa8a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 18 Jun 2022 13:38:19 +0800 Subject: [PATCH 10/99] Update kernel.py --- Note/create/kernel.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 0bff4c679..632001b6f 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -150,18 +150,13 @@ def end(self): return True - def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_loss=None,total_acc=None,t=None,flag=None): + def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_loss=None,total_acc=None,t=None): if self.batch!=None: if self.total_epoch>=1: total_loss+=loss if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc - if flag!=None: - total_loss+=loss - if self.acc_flag1==1: - batch_acc=self.nn.accuracy(output,labels_batch) - total_acc+=batch_acc return total_loss,total_acc elif self.ol==None: if self.total_epoch>=1: @@ -311,14 +306,14 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat else: gradient=tape.gradient(batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) - total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t,flag=True) + total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: if self.thread==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=_batch_loss,total_loss=_total_loss,total_acc=_total_acc,t=t,flag=True) + _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=_batch_loss,total_loss=_total_loss,total_acc=_total_acc,t=t) if self.thread==None: try: self.nn.bc+=1 From cf94d5ee3cd3fff4de130070ce093bc4ae31b57d Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 21 Jun 2022 17:11:24 +0800 Subject: [PATCH 11/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 632001b6f..1191303d2 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -242,7 +242,7 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat pass self.apply_gradient(tape,self.nn.opt,batch_loss,self.nn.param) else: - if self.nn.opt: + if self.nn.opt!=None: pass self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) except AttributeError: From 058a0bb733ee67b4c587d19a4e0fc4f177b44e1a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 21 Jun 2022 17:28:52 +0800 Subject: [PATCH 12/99] Update kernel.py --- Note/create/kernel.py | 60 ++++++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 1191303d2..b09bec538 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -247,10 +247,10 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) except AttributeError: if self.thread==None: - gradient=tape.gradient(batch_loss,self.nn.param) + gradient=self.nn.gradient(tape,batch_loss,self.nn.param) self.nn.oopt(gradient,self.nn.param) else: - gradient=tape.gradient(batch_loss,self.nn.param[t]) + gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: @@ -301,10 +301,10 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) except AttributeError: if self.thread==None: - gradient=tape.gradient(batch_loss,self.nn.param) + gradient=self.nn.gradient(tape,batch_loss,self.nn.param) self.nn.oopt(gradient,self.param) else: - gradient=tape.gradient(batch_loss,self.nn.param[t]) + gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: @@ -394,10 +394,10 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) except AttributeError: if self.thread==None: - gradient=tape.gradient(train_loss,self.nn.param) + gradient=self.nn.gradient(tape,train_loss,self.nn.param) self.nn.oopt(gradient,self.nn.param) else: - gradient=tape.gradient(batch_loss,self.nn.param[t]) + gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) if i==epoch-1: @@ -430,11 +430,11 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.thread_lock.release() except AttributeError: if self.PO==1: - self.gradient=tape.gradient(train_loss,self.nn.param) + self.gradient=self.nn.gradient(tape,train_loss,self.nn.param) self.nn.oopt(self.gradient,self.nn.param) else: self.thread_lock.acquire() - self.gradient=tape.gradient(train_loss,self.nn.param) + self.gradient=self.nn.gradient(tape,train_loss,self.nn.param) self.thread_lock.release() self.thread_lock.acquire() self.nn.oopt(self.gradient,self.nn.param) @@ -445,7 +445,7 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat pass self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) except AttributeError: - gradient=tape.gradient(train_loss,self.nn.param) + gradient=self.nn.gradient(tape,train_loss,self.nn.param) self.nn.oopt(gradient,self.nn.param) train_loss=self.nn.loss(output,data[1]) loss=train_loss.numpy() @@ -487,7 +487,12 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) self.batch_loss=self.nn.loss(self.output,labels_batch) - self.gradient=tape.gradient(self.batch_loss,self.nn.param) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self.batch_loss,self.nn.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: if self.nn.opt!=None: pass @@ -510,7 +515,12 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) self.batch_loss=self.nn.loss(self.output,labels_batch) - self.gradient=tape.gradient(self.batch_loss,self.param) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self.batch_loss,self.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) self.thread_lock.release() self.thread_lock.acquire() try: @@ -560,7 +570,12 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) self.batch_loss=self.nn.loss(self.output,labels_batch) - self.gradient=tape.gradient(self.batch_loss,self.nn.param) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self.batch_loss,self.nn.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: if self.nn.opt!=None: pass @@ -584,7 +599,12 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) self.batch_loss=self.nn.loss(self.output,labels_batch) - self.gradient=tape.gradient(self.batch_loss,self.param) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self.batch_loss,self.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) self.thread_lock.release() self.thread_lock.acquire() try: @@ -613,7 +633,12 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= with tf.GradientTape() as tape: self.output=self.nn.fp(self.train_data) self._train_loss=self.nn.loss(self.output,self.train_labels) - self.gradient=tape.gradient(self._train_loss,self.nn.param) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self._train_loss,self.nn.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self._train_loss,self.nn.param) try: if self.nn.opt!=None: pass @@ -655,7 +680,12 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= with tf.GradientTape() as tape: self.output=self.nn.fp(self.train_data) self._train_loss=self.nn.loss(self.output,self.train_labels) - self.gradient=tape.gradient(self._train_loss,self.param) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self._train_loss,self.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self._train_loss,self.param) self.thread_lock.release() self.thread_lock.acquire() try: From a224a48d7c8e97b09bc44b313b85684d5176d35f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 22 Jun 2022 08:07:12 +0800 Subject: [PATCH 13/99] Update kernel.py --- Note/create/kernel.py | 44 +++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index b09bec538..74575e59e 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -59,7 +59,15 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): else: self.shape0=train_data.shape[0] if self.thread!=None: - self.t=-np.arange(-self.thread,1) + self.t=list(self.t) + try: + self.nn.ec=np.zeros(self.thread) + except AttributeError: + pass + try: + self.nn.bc=np.zeros(self.thread) + except AttributeError: + pass if self.PO==None: self.train_loss=np.zeros(self.thread) self.train_acc=np.zeros(self.thread) @@ -98,22 +106,30 @@ def add_threads(self,thread): t=-np.arange(-thread,1)+self.thread+1 self.t=t.extend(self.t) self.thread+=thread + try: + self.nn.ec=np.concatenate((self.nn.ec,np.zeros(thread))) + except AttributeError: + pass + try: + self.nn.bc=np.concatenate((self.nn.bc,np.zeros(thread))) + except AttributeError: + pass if self.PO==None: - self.train_loss=np.concatenate((self.train_loss,np.zeros(self.t))) - self.train_acc=np.concatenate((self.train_acc,np.zeros(self.t))) - self.train_loss_list.extend([[] for _ in range(len(self.t))]) - self.train_acc_list.extend([[] for _ in range(len(self.t))]) + self.train_loss=np.concatenate((self.train_loss,np.zeros(thread))) + self.train_acc=np.concatenate((self.train_acc,np.zeros(thread))) + self.train_loss_list.extend([[] for _ in range(thread)]) + self.train_acc_list.extend([[] for _ in range(thread)]) if self.test_flag==True: if self.PO==None: - self.test_loss=np.concatenate((self.test_loss,np.zeros(self.t))) - self.test_acc=np.concatenate((self.test_acc,np.zeros(self.t))) - self.test_loss_list.extend([[] for _ in range(len(self.t))]) - self.test_acc_list.extend([[] for _ in range(len(self.t))]) - self.stop=np.concatenate((self.stop,np.zeros(self.t))) - self.epoch=np.concatenate((self.epoch,np.zeros(self.t))) - self.total_epoch=np.concatenate((self.total_epoch,np.zeros(self.t))) - self.time=np.concatenate((self.time,np.zeros(self.t))) - self.total_time=np.concatenate((self.total_time,np.zeros(self.t))) + self.test_loss=np.concatenate((self.test_loss,np.zeros(thread))) + self.test_acc=np.concatenate((self.test_acc,np.zeros(thread))) + self.test_loss_list.extend([[] for _ in range(thread)]) + self.test_acc_list.extend([[] for _ in range(thread)]) + self.stop=np.concatenate((self.stop,np.zeros(thread))) + self.epoch=np.concatenate((self.epoch,np.zeros(thread))) + self.total_epoch=np.concatenate((self.total_epoch,np.zeros(thread))) + self.time=np.concatenate((self.time,np.zeros(thread))) + self.total_time=np.concatenate((self.total_time,np.zeros(thread))) return From a49d935dc3ce40d7b42133512f8dd9165e4e3f87 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 22 Jun 2022 13:22:41 +0800 Subject: [PATCH 14/99] Update kernel.py --- Note/create/kernel.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 74575e59e..042d646d5 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -19,7 +19,6 @@ def __init__(self,nn=None): self.thread_lock=None self.thread=None self.ol=None - self.stop=None self.batch=None self.epoch=0 self.end_loss=None @@ -79,7 +78,6 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.test_acc=np.zeros(self.thread) self.test_loss_list=[[] for _ in range(self.thread)] self.test_acc_list=[[] for _ in range(self.thread)] - self.stop=np.zeros(self.thread) self.epoch=np.zeros(self.thread) self.total_epoch=np.zeros(self.thread) self.time=np.zeros(self.thread) @@ -125,7 +123,6 @@ def add_threads(self,thread): self.test_acc=np.concatenate((self.test_acc,np.zeros(thread))) self.test_loss_list.extend([[] for _ in range(thread)]) self.test_acc_list.extend([[] for _ in range(thread)]) - self.stop=np.concatenate((self.stop,np.zeros(thread))) self.epoch=np.concatenate((self.epoch,np.zeros(thread))) self.total_epoch=np.concatenate((self.total_epoch,np.zeros(thread))) self.time=np.concatenate((self.time,np.zeros(thread))) @@ -425,8 +422,6 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=_total_loss,total_acc=_total_acc,t=t) else: data=self.ol() - if self.stop==True: - return with tf.GradientTape() as tape: output=self.nn.fp(data[0]) train_loss=self.nn.loss(output,data[1]) @@ -895,12 +890,6 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N self.time+=(t2-t1) else: self.time[t]+=(t2-t1) - if self.thread==None: - if self.stop==True: - break - else: - if self.stop[t]==True: - break if self.end_flag==True and self.end()==True: self.nn.param=self._param self._param=None @@ -965,12 +954,6 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N self.time+=(t2-t1) else: self.time[t]+=(t2-t1) - if self.thread==None: - if self.stop==True: - break - else: - if self.stop[t]==True: - break if self.end_flag==True and self.end()==True: self.nn.param=self._param self._param=None From ad61d5071564d0274b132827f7dbbcd8f992fe76 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 22 Jun 2022 13:22:54 +0800 Subject: [PATCH 15/99] Update kernel.py --- Note/create/RL/kernel.py | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index c14b9f734..af43fe2e6 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -30,9 +30,8 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,thread=Non self.update_step=None self.end_loss=None self.thread=thread - self.t=-np.arange(-self.thread,1) + self.t=list(-np.arange(-self.thread,1)) self.thread_lock=thread_lock - self.stop=np.zeros(self.thread) self.state_list=None self._state_list=[] self.p=[] @@ -84,10 +83,9 @@ def add_threads(self,thread): self.t=t.extend(self.t) self.thread+=thread if self.PO!=True: - self.loss=np.concatenate((self.train_loss,np.zeros(self.t))) - self.loss_list.extend([[] for _ in range(len(self.t))]) - self.stop=np.concatenate((self.stop,np.zeros(self.t))) - self.episode_num=np.concatenate((self.epoch,np.zeros(self.t))) + self.loss=np.concatenate((self.train_loss,np.zeros(thread))) + self.loss_list.extend([[] for _ in range(thread)]) + self.episode_num=np.concatenate((self.epoch,np.zeros(thread))) return @@ -681,8 +679,6 @@ def learn2(self,i,episode_num=None,k=None): length=min(len(self.state_pool[i]),len(self.action_pool[i]),len(self.next_state_pool[i]),len(self.reward_pool[i])) train_ds=tf.data.Dataset.from_tensor_slices((self.state_pool[i][:length],self.action_pool[i][:length],self.next_state_pool[i][:length],self.reward_pool[i][:length])).shuffle(length).batch(self.batch) for state_batch,action_batch,next_state_batch,reward_batch in train_ds: - if self.stop[i]==True: - break if self.PO==1: with tf.GradientTape() as tape: if type(self.nn.nn)!=list: @@ -780,8 +776,6 @@ def learn3(self,i,episode_num,k): if length%self.batch!=0: batches+=1 for j in range(batches): - if self.stop[i]==True: - break self.learn1(i,j,batches,length,episode_num,k) else: try: @@ -850,8 +844,6 @@ def learn(self,epsilon,episode_num): elif i not in self.finish_lis and self.state_list!=None: self.state_list[i+1]=1 for k in range(episode_num): - if self.stop[i]==True: - break if self.episode_num[i]==self.epi_num[i]: break self.episode_num[i]+=1 @@ -862,8 +854,6 @@ def learn(self,epsilon,episode_num): s=int(np.random.uniform(0,len(self.state_name))) if self.episode_step==None: while True: - if self.stop[i]==True: - break next_s,end,_episode,index=self._explore(s,self.epsilon[i],i) s=next_s if self.state_pool[i]!=None and self.action_pool[i]!=None and self.next_state_pool[i]!=None and self.reward_pool[i]!=None: @@ -886,8 +876,6 @@ def learn(self,epsilon,episode_num): self.thread_lock.release() else: for _ in range(self.episode_step): - if self.stop[i]==True: - break next_s,end,episode,index=self._explore(s,self.epsilon[i],i) s=next_s if self.state_pool[i]!=None and self.action_pool[i]!=None and self.next_state_pool[i]!=None and self.reward_pool[i]!=None: From ba29336b467682123ff0d3c9b7c8f8f3e0abb40e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 22 Jun 2022 13:23:08 +0800 Subject: [PATCH 16/99] Update kernel.py --- Note/create/RL/st/kernel.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 8cbfa2e5e..9742e7410 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -18,7 +18,6 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,save_episo self.ol=None self.PO=None self.thread_lock=None - self.stop=None self.state_pool=None self.action_pool=None self.next_state_pool=None @@ -648,24 +647,6 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): else: while True: data=self.ol() - if self.stop==True: - if type(self.nn.nn)!=list: - loss=self.nn.loss(self.nn.nn,data[0],data[1],data[2],data[3]) - elif len(self.nn.param)==4: - value=self.nn.nn[0](data[0],p=0) - TD=tf.reduce_mean((data[3]+self.discount*self.nn.nn[0](data[2],p=2)-value)**2) - loss=TD - else: - value=self.nn.nn[0](data[0]) - TD=tf.reduce_mean((data[3]+self.discount*self.nn.nn[0](data[2])-value)**2) - loss=TD - if len(self.loss_list)==0: - self.loss_list.append(loss.numpy()) - else: - self.loss_list[0]=loss.numpy() - if path!=None: - self.save(path) - return with tf.GradientTape() as tape: if type(self.nn.nn)!=list: loss=self.nn.loss(self.nn.nn,data[0],data[1],data[2],data[3]) From 5c5c7658f0b676d5013059e0a976bfa7d8d5aee5 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 22 Jun 2022 23:58:26 +0800 Subject: [PATCH 17/99] Update kernel.py --- Note/create/kernel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 042d646d5..96f43e36c 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -975,8 +975,8 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N if file_path!=None: self.save(file_path) if self.thread==None: - self.time=self.time-int(self.time) - if self.time<0.5: + self._time=self.time-int(self.time) + if self._time<0.5: self.time=int(self.time) else: self.time=int(self.time)+1 From 0f0cd85ef162a5800bf586d8bc8b1e09ce2a43d3 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 22 Jun 2022 23:58:43 +0800 Subject: [PATCH 18/99] Update kernel.py --- Note/create/RL/st/kernel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 9742e7410..8f59e6508 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -738,7 +738,8 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): self.total_e+=1 if path!=None: self.save(path) - if self.time<0.5: + self._time=self.time-int(self.time) + if self._time<0.5: self.time=int(self.time) else: self.time=int(self.time)+1 From 7c990da0cd822c314e4f39168f3969f07eced963 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 23 Jun 2022 00:14:37 +0800 Subject: [PATCH 19/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 96f43e36c..41d1dc537 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -981,7 +981,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N else: self.time=int(self.time)+1 self.total_time+=self.time - else: + elif type(self.total_time)==list: self.time[t]=self.time[t]-int(self.time[t]) if self.time[t]<0.5: self.time[t]=int(self.time[t]) From 37bd9db91d5e2017c9cd1d4b280620371efb4515 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 27 Jun 2022 08:08:32 +0800 Subject: [PATCH 20/99] Update kernel.py --- Note/create/kernel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 41d1dc537..442ea657e 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1246,10 +1246,10 @@ def save(self,path,i=None,one=True): index=path.rfind('\\') parameter_file=open(path.replace(path[index+1:],'parameter.dat'),'wb') else: - output_file=open(path+'\save-{0}.dat'.format(i+1),'wb') + output_file=open(path+'\save-{0}.dat'.format(i),'wb') path=path+'\save-{0}.dat'.format(i+1) index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i+1)),'wb') + parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i)),'wb') self.file_list.append(['save-{0}.dat','parameter-{0}.dat']) if len(self.file_list)>self.s+1: os.remove(self.file_list[0][0]) From 22a80163475cab2eaf0805551b75b6a33a0b7479 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 27 Jun 2022 08:08:49 +0800 Subject: [PATCH 21/99] Update kernel.py --- Note/create/RL/st/kernel.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 8f59e6508..122e1e5eb 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -791,12 +791,12 @@ def save(self,path,i=None,one=True): pickle.dump(self.episode,episode_file) episode_file.close() else: - output_file=open(path+'\save-{0}.dat'.format(i+1),'wb') - path=path+'\save-{0}.dat'.format(i+1) + output_file=open(path+'\save-{0}.dat'.format(i),'wb') + path=path+'\save-{0}.dat'.format(i) index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i+1)),'wb') + parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i)),'wb') if self.save_episode==True: - episode_file=open(path.replace(path[index+1:],'episode-{0}.dat'.format(i+1)),'wb') + episode_file=open(path.replace(path[index+1:],'episode-{0}.dat'.format(i)),'wb') pickle.dump(self.episode,episode_file) episode_file.close() self.episode_num=self.epi_num From 9b7ee6d0fffd3316fc2a1b7bd45b74bab66b5db4 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 27 Jun 2022 08:11:20 +0800 Subject: [PATCH 22/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 442ea657e..409d7581f 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1247,7 +1247,7 @@ def save(self,path,i=None,one=True): parameter_file=open(path.replace(path[index+1:],'parameter.dat'),'wb') else: output_file=open(path+'\save-{0}.dat'.format(i),'wb') - path=path+'\save-{0}.dat'.format(i+1) + path=path+'\save-{0}.dat'.format(i) index=path.rfind('\\') parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i)),'wb') self.file_list.append(['save-{0}.dat','parameter-{0}.dat']) From 71dfc13892ac5603afec149b67e4c6e5e7d0f43a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 30 Jun 2022 13:41:03 +0800 Subject: [PATCH 23/99] Update kernel.py --- Note/create/RL/kernel.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index af43fe2e6..9e764ecac 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -10,8 +10,7 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,thread=Non self.nn=nn self.opt=nn.opt try: - if self.nn.km==0: - self.nn.km=1 + self.nn.km=1 except AttributeError: pass self.state_pool=[] @@ -1001,8 +1000,7 @@ def restore(self,s_path,p_path,e_path=None): self.nn.param=param param=None try: - if self.nn.km==0: - self.nn.km=1 + self.nn.km=1 except AttributeError: pass self.opt=self.nn.opt From c99091941048a3f644352721a1cb3bedc602e09f Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 30 Jun 2022 13:41:18 +0800 Subject: [PATCH 24/99] Update kernel.py --- Note/create/RL/st/kernel.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 122e1e5eb..8c3dd7680 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -11,8 +11,7 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,save_episo self.nn=nn self.opt=nn.opt try: - if self.nn.km==0: - self.nn.km=1 + self.nn.km=1 except AttributeError: pass self.ol=None @@ -747,11 +746,6 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): print() print('last loss:{0:.6f}'.format(loss)) print('time:{0}s'.format(self.time)) - try: - if self.nn.km==1: - self.nn.km=0 - except AttributeError: - pass return @@ -843,8 +837,7 @@ def restore(self,s_path,p_path,e_path=None): self.nn.param=param param=None try: - if self.nn.km==0: - self.nn.km=1 + self.nn.km=1 except AttributeError: pass self.opt=self.nn.opt From 3783bc3bedf776de18b6025f19140a043f7ba066 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 30 Jun 2022 13:41:31 +0800 Subject: [PATCH 25/99] Update kernel.py --- Note/create/kernel.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 409d7581f..fad3a8cfb 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -11,8 +11,7 @@ def __init__(self,nn=None): if nn!=None: self.nn=nn try: - if self.nn.km==0: - self.nn.km=1 + self.nn.km=1 except AttributeError: pass self.PO=None @@ -1006,12 +1005,6 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N else: print('accuracy:{0:.6f},test_flag accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) print('time:{0}s'.format(self.time)) - if self.thread==None: - try: - if self.nn.km==1: - self.nn.km=0 - except AttributeError: - pass return @@ -1295,8 +1288,7 @@ def restore(self,s_path,p_path): self.nn.param=param param=None try: - if self.nn.km==0: - self.nn.km=1 + self.nn.km=1 except AttributeError: pass self.ol=pickle.load(input_file) From 78f62b474c1bac69425317f9a01353edfd38b5d6 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 2 Jul 2022 13:22:23 +0800 Subject: [PATCH 26/99] Update kernel.py --- Note/create/kernel.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index fad3a8cfb..4544dbd35 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -26,7 +26,6 @@ def __init__(self,nn=None): self.end_test_acc=None self.acc_flag1=None self.acc_flag2='%' - self.flag=None self.train_loss=None self.train_acc=None self.train_loss_list=[] @@ -812,8 +811,6 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N t1=None t2=None t=None - if self.flag==None: - self.flag=True if self.p==None: self.p=9 else: @@ -872,7 +869,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N if s==0: s=1 if i%p==0: - if self.flag==None: + if self.total_epoch==None: if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: @@ -926,7 +923,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N if s==0: s=1 if i%p==0: - if self.flag==None: + if self.total_epoch==None: if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: @@ -1261,7 +1258,6 @@ def save(self,path,i=None,one=True): pickle.dump(self.p,output_file) pickle.dump(self.s,output_file) pickle.dump(self.file_list,output_file) - pickle.dump(self.flag,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_acc,output_file) pickle.dump(self.train_loss_list,output_file) @@ -1302,7 +1298,6 @@ def restore(self,s_path,p_path): self.p=pickle.load(input_file) self.s=pickle.load(input_file) self.file_list=pickle.load(input_file) - self.flag=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_acc=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) From 419bfe5cafcd137e9f9cb7480bc923608894744e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 2 Jul 2022 13:29:46 +0800 Subject: [PATCH 27/99] Update kernel.py --- Note/create/kernel.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 4544dbd35..56b36afae 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -26,6 +26,7 @@ def __init__(self,nn=None): self.end_test_acc=None self.acc_flag1=None self.acc_flag2='%' + self.train_counter=0 self.train_loss=None self.train_acc=None self.train_loss_list=[] @@ -811,6 +812,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N t1=None t2=None t=None + self.train_counter+=1 if self.p==None: self.p=9 else: @@ -869,7 +871,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N if s==0: s=1 if i%p==0: - if self.total_epoch==None: + if self.train_counter==1: if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: @@ -923,7 +925,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N if s==0: s=1 if i%p==0: - if self.total_epoch==None: + if self.train_counter==1: if self.test_flag==False: print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: @@ -1258,6 +1260,7 @@ def save(self,path,i=None,one=True): pickle.dump(self.p,output_file) pickle.dump(self.s,output_file) pickle.dump(self.file_list,output_file) + pickle.dump(self.train_counter,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_acc,output_file) pickle.dump(self.train_loss_list,output_file) @@ -1298,6 +1301,7 @@ def restore(self,s_path,p_path): self.p=pickle.load(input_file) self.s=pickle.load(input_file) self.file_list=pickle.load(input_file) + self.train_counter=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_acc=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) From bc0366983caeb19c4a54437e5e89f49256cefa66 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 2 Jul 2022 23:06:07 +0800 Subject: [PATCH 28/99] Update kernel.py --- Note/create/kernel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 56b36afae..03541f873 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -813,11 +813,11 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N t2=None t=None self.train_counter+=1 - if self.p==None: + if p==None: self.p=9 else: self.p=p-1 - if self.s==None: + if s==None: self.s=1 else: self.s=s-1 From 510fac564863b5f5b687851fb5cd48da7a7a2154 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 3 Jul 2022 09:33:43 +0800 Subject: [PATCH 29/99] Update kernel.py --- Note/create/kernel.py | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 03541f873..35bfa25a1 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -288,14 +288,14 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat index2=batch-(self.shape0-batches*batch) if type(self.train_data)==list: for i in range(len(self.train_data)): - data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]]) + data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) else: - data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]]) + data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]],0) if type(self.train_labels)==list: for i in range(len(self.train_data)): - labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]]) + labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) else: - labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]]) + labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]],0) with tf.GradientTape() as tape: if self.thread==None: output=self.nn.fp(data_batch) @@ -485,14 +485,14 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= if index1==batches*batch: if type(self.train_data)==list: for i in range(len(self.train_data)): - data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]]) + data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) else: - data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]]) + data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]],0) if type(self.train_labels)==list: for i in range(len(self.train_data)): - labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]]) + labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) else: - labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]]) + labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]],0) if self.PO==1: with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) @@ -1049,26 +1049,14 @@ def test(self,test_data,test_labels,batch=None,t=None): index2=batch-(shape0-batches*batch) if type(test_data)==list: for i in range(len(test_data)): - if type(test_data)==np.ndarray: - data_batch[i]=np.concatenate(test_data[i][index1:],test_data[i][:index2]) - else: - data_batch[i]=tf.concat(test_data[i][index1:],test_data[i][:index2]) + data_batch[i]=tf.concat(test_data[i][index1:],test_data[i][:index2],0) else: - if type(test_data)==np.ndarray: - data_batch=np.concatenate(test_data[index1:],test_data[:index2]) - else: - data_batch=tf.concat(test_data[index1:],test_data[:index2]) + data_batch=tf.concat(test_data[index1:],test_data[:index2],0) if type(self.test_labels)==list: for i in range(len(test_labels)): - if type(test_labels)==np.ndarray: - labels_batch[i]=np.concatenate(test_labels[i][index1:],test_labels[i][:index2]) - else: - labels_batch[i]=tf.concat(test_labels[i][index1:],test_labels[i][:index2]) + labels_batch[i]=tf.concat(test_labels[i][index1:],test_labels[i][:index2],0) else: - if type(test_labels)==np.ndarray: - labels_batch=np.concatenate(test_labels[index1:],test_labels[:index2]) - else: - labels_batch=tf.concat(test_labels[index1:],test_labels[:index2]) + labels_batch=tf.concat(test_labels[index1:],test_labels[:index2],0) if self.thread==None: output=self.nn.fp(data_batch) else: From e19ca2bb35b6c14346d348bf1628db2f0c46ac25 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 3 Jul 2022 09:35:18 +0800 Subject: [PATCH 30/99] Update kernel.py --- Note/create/RL/kernel.py | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 9e764ecac..6592afbf3 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -270,29 +270,29 @@ def _explore(self,s,epsilon,i): else: try: if self.state==None: - self.state_pool[index]=tf.concat([self.state_pool[index],tf.expand_dims(s,axis=0)]) - self.action_pool[index]=tf.concat([self.action_pool[index],tf.expand_dims(a,axis=0)]) - self.next_state_pool[index]=tf.concat([self.next_state_pool[index],tf.expand_dims(next_s,axis=0)]) - self.reward_pool[index]=tf.concat([self.reward_pool[index],tf.expand_dims(r,axis=0)]) + self.state_pool[index]=tf.concat([self.state_pool[index],tf.expand_dims(s,axis=0)],0) + self.action_pool[index]=tf.concat([self.action_pool[index],tf.expand_dims(a,axis=0)],0) + self.next_state_pool[index]=tf.concat([self.next_state_pool[index],tf.expand_dims(next_s,axis=0)],0) + self.reward_pool[index]=tf.concat([self.reward_pool[index],tf.expand_dims(r,axis=0)],0) else: - self.state_pool[index]=tf.concat([self.state_pool[index],tf.expand_dims(self.state[self.state_name[s]],axis=0)]) - self.action_pool[index]=tf.concat([self.action_pool[index],tf.expand_dims(a,axis=0)]) - self.next_state_pool[index]=tf.concat([self.next_state_pool[index],tf.expand_dims(self.state[self.state_name[next_s]],axis=0)]) - self.reward_pool[index]=tf.concat([self.reward_pool[index],tf.expand_dims(r,axis=0)]) + self.state_pool[index]=tf.concat([self.state_pool[index],tf.expand_dims(self.state[self.state_name[s]],axis=0)],0) + self.action_pool[index]=tf.concat([self.action_pool[index],tf.expand_dims(a,axis=0)],0) + self.next_state_pool[index]=tf.concat([self.next_state_pool[index],tf.expand_dims(self.state[self.state_name[next_s]],axis=0)],0) + self.reward_pool[index]=tf.concat([self.reward_pool[index],tf.expand_dims(r,axis=0)],0) except: pass self.thread_lock.release() else: if self.state==None: - self.state_pool[i]=tf.concat([self.state_pool[i],tf.expand_dims(s,axis=0)]) - self.action_pool[i]=tf.concat([self.action_pool[i],tf.expand_dims(a,axis=0)]) - self.next_state_pool[i]=tf.concat([self.next_state_pool[i],tf.expand_dims(next_s,axis=0)]) - self.reward_pool[i]=tf.concat([self.reward_pool[i],tf.expand_dims(r,axis=0)]) + self.state_pool[i]=tf.concat([self.state_pool[i],tf.expand_dims(s,axis=0)],0) + self.action_pool[i]=tf.concat([self.action_pool[i],tf.expand_dims(a,axis=0)],0) + self.next_state_pool[i]=tf.concat([self.next_state_pool[i],tf.expand_dims(next_s,axis=0)],0) + self.reward_pool[i]=tf.concat([self.reward_pool[i],tf.expand_dims(r,axis=0)],0) else: - self.state_pool[i]=tf.concat([self.state_pool[i],tf.expand_dims(self.state[self.state_name[s]],axis=0)]) - self.action_pool[i]=tf.concat([self.action_pool[i],tf.expand_dims(a,axis=0)]) - self.next_state_pool[i]=tf.concat([self.next_state_pool[i],tf.expand_dims(self.state[self.state_name[next_s]],axis=0)]) - self.reward_pool[i]=tf.concat([self.reward_pool[i],tf.expand_dims(r,axis=0)]) + self.state_pool[i]=tf.concat([self.state_pool[i],tf.expand_dims(self.state[self.state_name[s]],axis=0)],0) + self.action_pool[i]=tf.concat([self.action_pool[i],tf.expand_dims(a,axis=0)],0) + self.next_state_pool[i]=tf.concat([self.next_state_pool[i],tf.expand_dims(self.state[self.state_name[next_s]],axis=0)],0) + self.reward_pool[i]=tf.concat([self.reward_pool[i],tf.expand_dims(r,axis=0)],0) if len(self.state_pool[i])>self.pool_size: self.state_pool[i]=self.state_pool[i][1:] self.action_pool[i]=self.action_pool[i][1:] @@ -585,10 +585,10 @@ def learn1(self,i,j=None,batches=None,length=None,episode_num=None,k=None): batches+=1 index1=batches*self.batch index2=self.batch-(self.shape0-batches*self.batch) - state_batch=tf.concat([self.state_pool[i][index1:length],self.state_pool[i][:index2]]) - action_batch=tf.concat([self.action_pool[i][index1:length],self.action_pool[i][:index2]]) - next_state_batch=tf.concat([self.next_state_pool[i][index1:length],self.next_state_pool[i][:index2]]) - reward_batch=tf.concat([self.reward_pool[i][index1:length],self.reward_pool[i][:index2]]) + state_batch=tf.concat([self.state_pool[i][index1:length],self.state_pool[i][:index2]],0) + action_batch=tf.concat([self.action_pool[i][index1:length],self.action_pool[i][:index2]],0) + next_state_batch=tf.concat([self.next_state_pool[i][index1:length],self.next_state_pool[i][:index2]],0) + reward_batch=tf.concat([self.reward_pool[i][index1:length],self.reward_pool[i][:index2]],0) if self.PO==1: with tf.GradientTape() as tape: if type(self.nn.nn)!=list: From 4f881be4dc98a11dc1ab836ba3644f99a6aee436 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 3 Jul 2022 09:36:27 +0800 Subject: [PATCH 31/99] Update kernel.py --- Note/create/RL/st/kernel.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 8c3dd7680..e71c8a4aa 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -434,15 +434,15 @@ def learn2(self,episode_num,i): self.reward_pool=tf.expand_dims(r,axis=0) else: if self.state==None: - self.state_pool=tf.concat([self.state_pool,tf.expand_dims(s,axis=0)]) - self.action_pool=tf.concat([self.action_pool,tf.expand_dims(a,axis=0)]) - self.next_state_pool=tf.concat([self.next_state_pool,tf.expand_dims(next_s,axis=0)]) - self.reward_pool=tf.concat([self.reward_pool,tf.expand_dims(r,axis=0)]) + self.state_pool=tf.concat([self.state_pool,tf.expand_dims(s,axis=0)],0) + self.action_pool=tf.concat([self.action_pool,tf.expand_dims(a,axis=0)],0) + self.next_state_pool=tf.concat([self.next_state_pool,tf.expand_dims(next_s,axis=0)],0) + self.reward_pool=tf.concat([self.reward_pool,tf.expand_dims(r,axis=0)],0) else: - self.state_pool=tf.concat([self.state_pool,tf.expand_dims(self.state[self.state_name[s]],axis=0)]) - self.action_pool=tf.concat([self.action_pool,tf.expand_dims(a,axis=0)]) - self.next_state_pool=tf.concat([self.next_state_pool,tf.expand_dims(self.state[self.state_name[next_s]],axis=0)]) - self.reward_pool=tf.concat([self.reward_pool,tf.expand_dims(r,axis=0)]) + self.state_pool=tf.concat([self.state_pool,tf.expand_dims(self.state[self.state_name[s]],axis=0)],0) + self.action_pool=tf.concat([self.action_pool,tf.expand_dims(a,axis=0)],0) + self.next_state_pool=tf.concat([self.next_state_pool,tf.expand_dims(self.state[self.state_name[next_s]],axis=0)],0) + self.reward_pool=tf.concat([self.reward_pool,tf.expand_dims(r,axis=0)],0) if len(self.state_pool)>self.pool_size: self.state_pool=self.state_pool[1:] self.action_pool=self.action_pool[1:] @@ -525,10 +525,10 @@ def learn2(self,episode_num,i): self.next_state_pool=tf.expand_dims(self.state[self.state_name[next_s]],axis=0) self.reward_pool=tf.expand_dims(r,axis=0) else: - self.state_pool=tf.concat([self.state_pool,tf.expand_dims(self.state[self.state_name[s]],axis=0)]) - self.action_pool=tf.concat([self.action_pool,tf.expand_dims(a,axis=0)]) - self.next_state_pool=tf.concat([self.next_state_pool,tf.expand_dims(self.state[self.state_name[next_s]],axis=0)]) - self.reward_pool=tf.concat([self.reward_pool,tf.expand_dims(r,axis=0)]) + self.state_pool=tf.concat([self.state_pool,tf.expand_dims(self.state[self.state_name[s]],axis=0)],0) + self.action_pool=tf.concat([self.action_pool,tf.expand_dims(a,axis=0)],0) + self.next_state_pool=tf.concat([self.next_state_pool,tf.expand_dims(self.state[self.state_name[next_s]],axis=0)],0) + self.reward_pool=tf.concat([self.reward_pool,tf.expand_dims(r,axis=0)],0) if len(self.state_pool)>self.pool_size: self.state_pool=self.state_pool[1:] self.action_pool=self.action_pool[1:] From 6551717e1a76ca4d29002f61bd9ee7e1a01afb81 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 3 Jul 2022 12:00:26 +0800 Subject: [PATCH 32/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 35bfa25a1..ba0cc8dcb 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -921,7 +921,7 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N s=epoch/(self.s+1) s=int(s) if p==0: - p=1 + p=epoch if s==0: s=1 if i%p==0: From e828cfa11ba76cb181829c6640a746eb1f4ea3c3 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 3 Jul 2022 23:12:05 +0800 Subject: [PATCH 33/99] Update kernel.py --- Note/create/kernel.py | 388 +++++++++++++++--------------------------- 1 file changed, 135 insertions(+), 253 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index ba0cc8dcb..663567f58 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -164,56 +164,52 @@ def end(self): def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_loss=None,total_acc=None,t=None): if self.batch!=None: - if self.total_epoch>=1: - total_loss+=loss - if self.acc_flag1==1: - batch_acc=self.nn.accuracy(output,labels_batch) - total_acc+=batch_acc + total_loss+=loss + if self.acc_flag1==1: + batch_acc=self.nn.accuracy(output,labels_batch) + total_acc+=batch_acc return total_loss,total_acc elif self.ol==None: - if self.total_epoch>=1: - loss=loss.numpy() + loss=loss.numpy() + if self.thread==None: + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + else: + self.train_loss_list[t].append(loss.astype(np.float32)) + self.train_loss[t]=loss + self.train_loss[t]=self.train_loss[t].astype(np.float32) + if self.acc_flag1==1: if self.thread==None: - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + acc=self.nn.accuracy(output,self.train_labels) + acc=acc.numpy() + self.train_acc_list.append(acc.astype(np.float32)) + self.train_acc=acc + self.train_acc=self.train_acc.astype(np.float32) else: - self.train_loss_list[t].append(loss.astype(np.float32)) - self.train_loss[t]=loss - self.train_loss[t]=self.train_loss[t].astype(np.float32) - if self.acc_flag1==1: - if self.thread==None: - acc=self.nn.accuracy(output,self.train_labels) - acc=acc.numpy() - self.train_acc_list.append(acc.astype(np.float32)) - self.train_acc=acc - self.train_acc=self.train_acc.astype(np.float32) - else: - acc=self.nn.accuracy(output,self.train_labels[t]) - acc=acc.numpy() - self.train_acc_list[t].append(acc.astype(np.float32)) - self.train_acc[t]=acc - self.train_acc[t]=self.train_acc[t].astype(np.float32) - if self.test_flag==True: - if self.thread==None: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) - else: - self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) - self.test_loss_list[t].append(self.test_loss[t]) - if self.acc_flag1==1: - self.test_acc_list[t].append(self.test_acc[t]) + acc=self.nn.accuracy(output,self.train_labels[t]) + acc=acc.numpy() + self.train_acc_list[t].append(acc.astype(np.float32)) + self.train_acc[t]=acc + self.train_acc[t]=self.train_acc[t].astype(np.float32) + if self.test_flag==True: + if self.thread==None: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) + if self.acc_flag1==1: + self.test_acc_list.append(self.test_acc) + else: + self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) + self.test_loss_list[t].append(self.test_loss[t]) + if self.acc_flag1==1: + self.test_acc_list[t].append(self.test_acc[t]) return - def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_batch=None,t=None,i=None): + def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_batch=None,t=None): if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: self._param=self.nn.param if batch!=None: - _total_loss=0 - _total_acc=0 total_loss=0 total_acc=0 batches=int((self.shape0-self.shape0%batch)/batch) @@ -265,13 +261,6 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) - if i==epoch-1: - if self.thread==None: - output=self.nn.fp(data_batch) - else: - output=self.nn.fp(data_batch,t) - _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=_batch_loss,total_loss=_total_loss,total_acc=_total_acc,t=t) if self.thread==None: try: self.nn.bc=j @@ -319,13 +308,6 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) - if i==epoch-1: - if self.thread==None: - output=self.nn.fp(data_batch) - else: - output=self.nn.fp(data_batch,t) - _batch_loss=self.nn.loss(output,labels_batch) - _total_loss,_total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=_batch_loss,total_loss=_total_loss,total_acc=_total_acc,t=t) if self.thread==None: try: self.nn.bc+=1 @@ -336,58 +318,37 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.nn.bc[t]+=1 except AttributeError: pass - if self.total_epoch>=1: - loss=total_loss.numpy()/batches - if self.acc_flag1==1: - train_acc=total_acc/batches + loss=total_loss.numpy()/batches + if self.acc_flag1==1: + train_acc=total_acc/batches + if self.thread==None: + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + else: + self.train_loss_list[t].append(loss.astype(np.float32)) + self.train_loss[t]=loss + self.train_loss[t]=self.train_loss[t].astype(np.float32) + if self.acc_flag1==1: if self.thread==None: - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - if i==epoch-1: - loss=_total_loss.numpy()/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) + self.train_acc_list.append(train_acc.astype(np.float32)) + self.train_acc=train_acc + self.train_acc=self.train_acc.astype(np.float32) else: - self.train_loss_list[t].append(loss.astype(np.float32)) - self.train_loss[t]=loss - self.train_loss[t]=self.train_loss[t].astype(np.float32) - if i==epoch-1: - loss=_total_loss.numpy()/batches - self.train_loss_list[t].append(loss.astype(np.float32)) - self.train_loss[t]=loss - self.train_loss[t]=self.train_loss[t].astype(np.float32) - if self.acc_flag1==1: - if self.thread==None: - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - if i==epoch-1: - train_acc=_total_acc.numpy()/batches - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - else: - self.train_acc_list[t].append(train_acc.astype(np.float32)) - self.train_acc[t]=train_acc - self.train_acc[t]=self.train_acc[t].astype(np.float32) - if i==epoch-1: - train_acc=_total_acc.numpy()/batches - self.train_acc_list[t].append(train_acc.astype(np.float32)) - self.train_acc[t]=train_acc - self.train_acc[t]=self.train_acc[t].astype(np.float32) - if self.test_flag==True: - if self.thread==None: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) - else: - self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) - self.test_loss_list[t].append(self.test_loss[t]) - if self.acc_flag1==1: - self.test_acc_list[t].append(self.test_acc[t]) + self.train_acc_list[t].append(train_acc.astype(np.float32)) + self.train_acc[t]=train_acc + self.train_acc[t]=self.train_acc[t].astype(np.float32) + if self.test_flag==True: + if self.thread==None: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) + if self.acc_flag1==1: + self.test_acc_list.append(self.test_acc) + else: + self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) + self.test_loss_list[t].append(self.test_loss[t]) + if self.acc_flag1==1: + self.test_acc_list[t].append(self.test_acc[t]) elif self.ol==None: with tf.GradientTape() as tape: if self.thread==None: @@ -412,13 +373,6 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) self.nn.oopt(gradient,self.nn.param,t) self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) - if i==epoch-1: - if self.thread==None: - output=self.nn.fp(self.train_data) - else: - output=self.nn.fp(data_batch,t) - train_loss=self.nn.loss(output,self.train_labels) - self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=_total_loss,total_acc=_total_acc,t=t) else: data=self.ol() with tf.GradientTape() as tape: @@ -478,7 +432,7 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat return - def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch=None,test_batch=None,index1=None,index2=None,j=None,t=None,i=None): + def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch=None,test_batch=None,index1=None,index2=None,j=None,t=None): if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: self._param=self.nn.param if batch!=None: @@ -509,12 +463,8 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.param,t) - if self.total_epoch[t]>=1: - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) - if i==epoch-1: - self.output=self.nn.fp(data_batch) - self._batch_loss=self.nn.loss(self.output,labels_batch) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: self.nn.bc=j except AttributeError: @@ -539,12 +489,8 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param,t) - if self.total_epoch[t]>=1: - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) - if i==epoch-1: - self.output=self.nn.fp(data_batch) - self._batch_loss=self.nn.loss(self.output,labels_batch) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: self.nn.bc+=1 except AttributeError: @@ -592,13 +538,8 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param,t) - if self.total_epoch[t]>=1: - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) - if i==epoch-1: - self.output=self.nn.fp(data_batch) - self._batch_loss=self.nn.loss(self.output,labels_batch) - self._batch_acc=self.nn.accuracy(self.output,labels_batch) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: self.nn.bc=j except AttributeError: @@ -623,12 +564,8 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param,t) - if self.total_epoch[t]>=1: - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) - if i==epoch-1: - self.output=self.nn.fp(data_batch) - self._batch_loss=self.nn.loss(self.output,labels_batch) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: self.nn.bc=j except AttributeError: @@ -655,35 +592,21 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param) - if self.total_epoch[t]>=1: - self.loss=self._train_loss.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss - self.train_loss=self.train_loss.astype(np.float32) - if i==epoch-1: - self.output=self.nn.fp(self.train_data) - self._train_loss=self.nn.loss(self.output,self.train_labels) - self.loss=self._train_loss_.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss - self.train_loss=self.train_loss.astype(np.float32) + self.loss=self._train_loss.numpy() + self.train_loss_list.append(self.loss.astype(np.float32)) + self.train_loss=self.loss + self.train_loss=self.train_loss.astype(np.float32) + if self.acc_flag1==1: + self.acc=self.nn.accuracy(self.output,self.train_labels) + self.acc=self.acc.numpy() + self.train_acc_list.append(self.acc.astype(np.float32)) + self.train_acc=self.acc + self.train_acc=self.train_acc.astype(np.float32) + if self.test_flag==True: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) if self.acc_flag1==1: - self.acc=self.nn.accuracy(self.output,self.train_labels) - self.acc=self.acc.numpy() - self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc - self.train_acc=self.train_acc.astype(np.float32) - if i==epoch-1: - self.acc=self.nn.accuracy(self.output,self.train_labels) - self.acc=self.acc.numpy() - self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc - self.train_acc=self.train_acc.astype(np.float32) - if self.test_flag==True: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) + self.test_acc_list.append(self.test_acc) else: self.thread_lock.acquire() self.param=self.nn.param @@ -704,105 +627,64 @@ def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch= self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param,t) - if self.total_epoch[t]>=1: - self.loss=self._train_loss.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss - self.train_loss=self.train_loss.astype(np.float32) - if i==epoch-1: - self.output=self.nn.fp(self.train_data) - self._train_loss=self.nn.loss(self.output,self.train_labels) - self.loss=self._train_loss.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss - self.train_loss=self.train_loss.astype(np.float32) + self.loss=self._train_loss.numpy() + self.train_loss_list.append(self.loss.astype(np.float32)) + self.train_loss=self.loss + self.train_loss=self.train_loss.astype(np.float32) + if self.acc_flag1==1: + self.acc=self.nn.accuracy(self.output,self.train_labels) + self.acc=self.acc.numpy() + self.train_acc_list.append(self.acc.astype(np.float32)) + self.train_acc=self.acc + self.train_acc=self.train_acc.astype(np.float32) + if self.test_flag==True: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) if self.acc_flag1==1: - self.acc=self.nn.accuracy(self.output,self.train_labels) - self.acc=self.acc.numpy() - self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc - self.train_acc=self.train_acc.astype(np.float32) - if i==epoch-1: - self.acc=self.nn.accuracy(self.output,self.train_labels) - self.acc=self.acc.numpy() - self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc - self.train_acc=self.train_acc.astype(np.float32) - if self.test_flag==True: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) + self.test_acc_list.append(self.test_acc) self.thread_lock.release() return - def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_batch=None,t=None,i=None): + def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_batch=None,t=None): total_loss=0 - _total_loss=0 total_acc=0 - _total_acc=0 batches=int((self.shape0-self.shape0%batch)/batch) for j in range(batches): index1=j*batch index2=(j+1)*batch if self.acc_flag1==1: - self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t,i) - if self.total_epoch[t]>=1: - total_loss+=self.batch_loss - total_acc+=self.batch_acc - if i==epoch-1: - _total_loss+=self._batch_loss - _total_acc+=self._batch_acc + self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t) + total_loss+=self.batch_loss + total_acc+=self.batch_acc else: - self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t,i) - if self.total_epoch[t]>=1: - total_loss+=self.batch_loss - if i==epoch-1: - _total_loss+=self._batch_loss + self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t) + total_loss+=self.batch_loss if self.shape0%batch!=0: batches+=1 index1=batches*batch index2=batch-(self.shape0-batches*batch) - self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t,i) + self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t) if self.acc_flag1==1: - if self.total_epoch[t]>=1: - total_loss+=self.batch_loss - total_acc+=self.batch_acc - if i==epoch-1: - _total_loss+=self._batch_loss - _total_acc+=self._batch_acc + total_loss+=self.batch_loss + total_acc+=self.batch_acc else: - if self.total_epoch[t]>=1: - total_loss+=self.batch_loss - if i==epoch-1: - _total_loss+=self._batch_loss - if self.total_epoch[t]>=1: - loss=total_loss.numpy()/batches - if self.acc_flag1==1: - train_acc=total_acc.numpy()/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - if i==epoch-1: - loss=_total_loss.numpy()/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - if self.acc_flag1==1: - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - if i==epoch-1: - train_acc=_total_acc.numpy()/batches - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - if self.test_flag==True: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) + total_loss+=self.batch_loss + loss=total_loss.numpy()/batches + if self.acc_flag1==1: + train_acc=total_acc.numpy()/batches + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if self.acc_flag1==1: + self.train_acc_list.append(train_acc.astype(np.float32)) + self.train_acc=train_acc + self.train_acc=self.train_acc.astype(np.float32) + if self.test_flag==True: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) + if self.acc_flag1==1: + self.test_acc_list.append(self.test_acc) return @@ -844,17 +726,17 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N except AttributeError: pass if self.thread==None: - self._train(batch,epoch,test_batch,data_batch,labels_batch,i=i) + self._train(batch,epoch,test_batch,data_batch,labels_batch) else: t=self.t.pop() if self.PO==1: self.thread_lock.acquire() - self._train_(batch,epoch,data_batch,labels_batch,test_batch,t,i) + self._train_(batch,epoch,data_batch,labels_batch,test_batch,t) self.thread_lock.release() elif self.PO!=None: - self._train_(batch,epoch,data_batch,labels_batch,test_batch,t,i) + self._train_(batch,epoch,data_batch,labels_batch,test_batch,t) else: - self._train(batch,epoch,test_batch,data_batch,labels_batch,t,i) + self._train(batch,epoch,test_batch,data_batch,labels_batch,t) if self.thread==None: if epoch%10!=0: p=epoch-epoch%self.p @@ -897,17 +779,17 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N while True: t1=time.time() if self.thread==None: - self._train(epoch=epoch,test_batch=test_batch,i=i) + self._train(epoch=epoch,test_batch=test_batch) else: t=self.t.pop() if self.PO==1: self.thread_lock.acquire() - self._train_(epoch=epoch,test_batch=test_batch,t=t,i=i) + self._train_(epoch=epoch,test_batch=test_batch,t=t) self.thread_lock.release() elif self.PO!=None: - self._train_(epoch=epoch,test_batch=test_batch,t=t,i=i) + self._train_(epoch=epoch,test_batch=test_batch,t=t) else: - self._train(epoch=epoch,test_batch=test_batch,t=t,i=i) + self._train(epoch=epoch,test_batch=test_batch,t=t) i+=1 if self.thread==None: if epoch%10!=0: From 9271df49e15a0547da9524899bc2ed80c938f6ca Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 4 Jul 2022 11:23:28 +0800 Subject: [PATCH 34/99] Update kernel.py --- Note/create/kernel.py | 81 +++++++++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 663567f58..e5fd3fd4b 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1026,16 +1026,21 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.total_epoch),self.train_acc_list) - plt.title('train acc') - plt.xlabel('epoch') - plt.ylabel('acc') - print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc_flag2=='%': - print('train acc:{0:.1f}'.format(self.train_acc*100)) - else: - print('train acc:{0:.6f}'.format(self.train_acc)) + try: + if self.nn.accuracy!=None: + pass + plt.figure(2) + plt.plot(np.arange(self.total_epoch),self.train_acc_list) + plt.title('train acc') + plt.xlabel('epoch') + plt.ylabel('acc') + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc_flag2=='%': + print('train acc:{0:.1f}'.format(self.train_acc*100)) + else: + print('train acc:{0:.6f}'.format(self.train_acc)) + except AttributeError: + pass return @@ -1046,16 +1051,21 @@ def test_visual(self): plt.title('test loss') plt.xlabel('epoch') plt.ylabel('loss') - plt.figure(2) - plt.plot(np.arange(self.total_epoch),self.test_acc_list) - plt.title('test acc') - plt.xlabel('epoch') - plt.ylabel('acc') - print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': - print('test acc:{0:.1f}'.format(self.test_acc*100)) - else: - print('test acc:{0:.6f}'.format(self.test_acc)) + try: + if self.nn.accuracy!=None: + pass + plt.figure(2) + plt.plot(np.arange(self.total_epoch),self.test_acc_list) + plt.title('test acc') + plt.xlabel('epoch') + plt.ylabel('acc') + print('test loss:{0:.6f}'.format(self.test_loss)) + if self.acc_flag2=='%': + print('test acc:{0:.1f}'.format(self.test_acc*100)) + else: + print('test acc:{0:.6f}'.format(self.test_acc)) + except AttributeError: + pass return @@ -1069,19 +1079,24 @@ def comparison(self): plt.xlabel('epoch') plt.ylabel('loss') plt.legend() - plt.figure(2) - plt.plot(np.arange(self.total_epoch),self.train_acc_list,'b-',label='train acc') - if self.test_flag==True: - plt.plot(np.arange(self.total_epoch),self.test_acc_list,'r-',label='test acc') - plt.title('accuracy') - plt.xlabel('epoch') - plt.ylabel('acc') - plt.legend() - print('train loss:{0}'.format(self.train_loss)) - if self.acc_flag2=='%': - print('train acc:{0:.1f}'.format(self.train_acc*100)) - else: - print('train acc:{0:.6f}'.format(self.train_acc)) + try: + if self.nn.accuracy!=None: + pass + plt.figure(2) + plt.plot(np.arange(self.total_epoch),self.train_acc_list,'b-',label='train acc') + if self.test_flag==True: + plt.plot(np.arange(self.total_epoch),self.test_acc_list,'r-',label='test acc') + plt.title('accuracy') + plt.xlabel('epoch') + plt.ylabel('acc') + plt.legend() + print('train loss:{0}'.format(self.train_loss)) + if self.acc_flag2=='%': + print('train acc:{0:.1f}'.format(self.train_acc*100)) + else: + print('train acc:{0:.6f}'.format(self.train_acc)) + except AttributeError: + pass if self.test_flag==True: print() print('-------------------------------------') From 90d73095381dae2ec825dc972476e86a0d52aedc Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 4 Jul 2022 12:03:35 +0800 Subject: [PATCH 35/99] Update kernel.py --- Note/create/RL/kernel.py | 113 ++++++--------------------------------- 1 file changed, 15 insertions(+), 98 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 6592afbf3..7d02e1a7d 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -413,7 +413,7 @@ def index_matrix(self,i): return - def learn1(self,i,j=None,batches=None,length=None,episode_num=None,k=None): + def learn1(self,i,j=None,batches=None,length=None): if len(self.state_pool[i])=1: - self.loss[i]+=batch_loss - if k==episode_num-1: - batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self._loss[i]+=batch_loss + self.loss[i]+=batch_loss elif len(self.nn.param)==4: self.value_gradient=tape.gradient(TD,self.nn.param[0]) self.actor_gradient=tape.gradient(value,action_batch)*tape.gradient(action_batch,self.nn.param[1]) self.opt(self.value_gradient,self.actor_gradient,self.nn.param) - if j>=1: - self.loss[i]+=TD - if k==episode_num-1: - value=self.nn.nn[0](state_batch,p=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-value)**2) - self._loss[i]+=TD + self.loss[i]+=TD else: self.value_gradient=tape.gradient(TD,self.nn.param[0]) self.actor_gradient=TD*tape.gradient(tf.math.log(action_batch),self.nn.param[1]) self.opt(self.value_gradient,self.actor_gradient,self.nn.param) - if j>=1: - self.loss[i]+=TD - if k==episode_num-1: - value=self.nn.nn[0](state_batch) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-value)**2) - self._loss[i]+=TD + self.loss[i]+=TD else: self.thread_lock.acquire() self.param=self.nn.param @@ -550,27 +536,10 @@ def learn1(self,i,j=None,batches=None,length=None,episode_num=None,k=None): self.thread_lock.acquire() if type(self.nn.nn)!=list: self.opt(self.gradient,self.nn.param[0],self.lr) - if j>=1: - self.loss[i]+=self.batch_loss - if k==episode_num-1: - self.batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self._loss[i]+=self.batch_loss - elif len(self.nn.param)==4: - self.opt(self.value_gradient,self.actor_gradient,self.nn.param) - if j>=1: - self.loss[i]+=self.TD - if k==episode_num-1: - self.value=self.nn.nn[0](state_batch,p=0) - self.TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-self.value)**2) - self._loss[i]+=self.TD + self.loss[i]+=self.batch_loss else: self.opt(self.value_gradient,self.actor_gradient,self.nn.param) - if j>=1: - self.loss[i]+=self.TD - if k==episode_num-1: - self.value=self.nn.nn[0](state_batch) - self.TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-self.value)**2) - self._loss[i]+=self.TD + self.loss[i]+=self.TD self.thread_lock.release() try: self.nn.bc[i]=j @@ -603,27 +572,16 @@ def learn1(self,i,j=None,batches=None,length=None,episode_num=None,k=None): self.gradient=tape.gradient(batch_loss,self.nn.param[0]) self.opt(self.gradient,self.nn.param[0],self.lr) self.loss[i]+=batch_loss - if k==episode_num-1: - batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self._loss[i]+=batch_loss elif len(self.nn.param)==4: self.value_gradient=tape.gradient(TD,self.nn.param[0]) self.actor_gradient=tape.gradient(value,action_batch)*tape.gradient(action_batch,self.nn.param[1]) self.opt(self.value_gradient,self.actor_gradient,self.nn.param) self.loss[i]+=TD - if k==episode_num-1: - value=self.nn.nn[0](state_batch,p=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-value)**2) - self._loss[i]+=TD else: self.value_gradient=tape.gradient(TD,self.nn.param[0]) self.actor_gradient=TD*tape.gradient(tf.math.log(action_batch),self.nn.param[1]) self.opt(self.value_gradient,self.actor_gradient,self.nn.param) self.loss[i]+=TD - if k==episode_num-1: - value=self.nn.nn[0](state_batch) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,)-value)**2) - self._loss[i]+=TD else: self.thread_lock.acquire() self.param=self.nn.param @@ -649,23 +607,9 @@ def learn1(self,i,j=None,batches=None,length=None,episode_num=None,k=None): if type(self.nn.nn)!=list: self.opt(self.gradient,self.nn.param[0],self.lr) self.loss[i]+=self.batch_loss - if k==episode_num-1: - self.batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self._loss[i]+=self.batch_loss - elif len(self.nn.param)==4: - self.opt(self.value_gradient,self.actor_gradient,self.nn.param) - self.loss[i]+=self.TD - if k==episode_num-1: - self.value=self.nn.nn[0](state_batch,p=0) - self.TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-self.value)**2) - self._loss[i]+=self.TD else: self.opt(self.value_gradient,self.actor_gradient,self.nn.param) self.loss[i]+=self.TD - if k==episode_num-1: - self.value=self.nn.nn[0](state_batch) - self.TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-self.value)**2) - self._loss[i]+=self.TD self.thread_lock.release() try: self.nn.bc[i]+=1 @@ -674,7 +618,7 @@ def learn1(self,i,j=None,batches=None,length=None,episode_num=None,k=None): return - def learn2(self,i,episode_num=None,k=None): + def learn2(self,i): length=min(len(self.state_pool[i]),len(self.action_pool[i]),len(self.next_state_pool[i]),len(self.reward_pool[i])) train_ds=tf.data.Dataset.from_tensor_slices((self.state_pool[i][:length],self.action_pool[i][:length],self.next_state_pool[i][:length],self.reward_pool[i][:length])).shuffle(length).batch(self.batch) for state_batch,action_batch,next_state_batch,reward_batch in train_ds: @@ -692,27 +636,16 @@ def learn2(self,i,episode_num=None,k=None): self.gradient=tape.gradient(batch_loss,self.nn.param[0]) self.opt(self.gradient,self.nn.param[0],self.lr) self.loss[i]+=batch_loss - if k==episode_num-1: - batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self._loss[i]+=batch_loss elif len(self.nn.param)==4: self.value_gradient=tape.gradient(TD,self.nn.param[0]) self.actor_gradient=tape.gradient(value,action_batch)*tape.gradient(action_batch,self.nn.param[1]) self.opt(self.value_gradient,self.actor_gradient,self.nn.param) self.loss[i]+=TD - if k==episode_num-1: - value=self.nn.nn[0](state_batch,p=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-value)**2) - self._loss[i]+=TD else: self.value_gradient=tape.gradient(TD,self.nn.param[0]) self.actor_gradient=TD*tape.gradient(tf.math.log(action_batch),self.nn.param[1]) self.opt(self.value_gradient,self.actor_gradient,self.nn.param) self.loss[i]+=TD - if k==episode_num-1: - value=self.nn.nn[0](state_batch,param=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,param=1)-value)**2) - self._loss[i]+=TD else: self.thread_lock.acquire() self.param=self.nn.param @@ -738,23 +671,9 @@ def learn2(self,i,episode_num=None,k=None): if type(self.nn.nn)!=list: self.opt(self.gradient,self.nn.param[0],self.lr) self.loss[i]+=self.batch_loss - if k==episode_num-1: - self.batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self._loss[i]+=self.batch_loss - elif len(self.nn.param)==4: - self.opt(self.value_gradient,self.actor_gradient,self.nn.param) - self.loss[i]+=self.TD - if k==episode_num-1: - self.value=self.nn.nn[0](state_batch,p=0) - self.TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-self.value)**2) - self._loss[i]+=self.TD else: self.opt(self.value_gradient,self.actor_gradient,self.nn.param) self.loss[i]+=self.TD - if k==episode_num-1: - self.value=self.nn.nn[0](state_batch) - self.TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-self.value)**2) - self._loss[i]+=self.TD self.thread_lock.release() try: self.nn.bc[i]+=1 @@ -763,10 +682,10 @@ def learn2(self,i,episode_num=None,k=None): return - def learn3(self,i,episode_num,k): + def learn3(self,i): self.a+=1 if len(self.state_pool[i]) Date: Mon, 4 Jul 2022 12:03:51 +0800 Subject: [PATCH 36/99] Update kernel.py --- Note/create/RL/st/kernel.py | 68 +++++++------------------------------ 1 file changed, 13 insertions(+), 55 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index e71c8a4aa..33208d0b1 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -181,7 +181,7 @@ def get_episode(self,s): return episode - def learn1(self,episode_num,i): + def learn1(self): if self.end_loss!=None: self.param=self.nn.param if len(self.state_pool)=1: - loss+=batch_loss - if i==episode_num-1: - batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self.loss+=batch_loss + loss+=batch_loss elif len(self.nn.param)==4: value_gradient=tape.gradient(TD,self.nn.param[0]) actor_gradient=tape.gradient(value,self.action_pool)*tape.gradient(self.action_pool,self.nn.param[1]) self.opt(value_gradient,actor_gradient,self.nn.param) - if j>=1: - loss+=TD - if i==episode_num-1: - value=self.nn.nn[0](state_batch,p=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-value)**2) - self.loss+=TD + loss+=TD else: value_gradient=tape.gradient(TD,self.nn.param[0]) actor_gradient=TD*tape.gradient(tf.math.log(action_batch),self.nn.param[1]) self.opt(value_gradient,actor_gradient,self.nn.param) - if j>=1: - loss+=TD - if i==episode_num-1: - value=self.nn.nn[0](state_batch) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-value)**2) - self.loss+=TD + loss+=TD try: self.nn.bc=j except AttributeError: @@ -278,28 +264,16 @@ def learn1(self,episode_num,i): gradient=tape.gradient(batch_loss,self.nn.param[0]) self.opt(gradient,self.nn.param[0]) loss+=batch_loss - if i==episode_num-1: - batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self.loss+=batch_loss elif len(self.nn.param)==4: value_gradient=tape.gradient(TD,self.nn.param[0]) actor_gradient=tape.gradient(value,self.action_pool)*tape.gradient(self.action_pool,self.nn.param[1]) self.opt(value_gradient,actor_gradient,self.nn.param) - if j>=1: - loss+=TD - if i==episode_num-1: - value=self.nn.nn[0](state_batch,p=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-value)**2) - self.loss+=TD + loss+=TD else: value_gradient=tape.gradient(TD,self.nn.param[0]) actor_gradient=TD*tape.gradient(tf.math.log(action_batch),self.nn.param[1]) self.opt(value_gradient,actor_gradient,self.nn.param) loss+=TD - if i==episode_num-1: - value=self.nn.nn[0](state_batch) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-value)**2) - self.loss+=TD try: self.nn.bc+=1 except AttributeError: @@ -324,31 +298,17 @@ def learn1(self,episode_num,i): if type(self.nn.nn)!=list: gradient=tape.gradient(batch_loss,self.nn.param[0]) self.opt(gradient,self.nn.param[0]) - if j>=1: - loss+=batch_loss - if i==episode_num-1: - batch_loss=self.nn.loss(self.nn.nn,state_batch,action_batch,next_state_batch,reward_batch) - self.loss+=batch_loss + loss+=batch_loss elif len(self.nn.param)==4: value_gradient=tape.gradient(TD,self.nn.param[0]) actor_gradient=tape.gradient(value,self.action_pool)*tape.gradient(self.action_pool,self.nn.param[1]) self.opt(value_gradient,actor_gradient,self.nn.param) - if j>=1: - loss+=TD - if i==episode_num-1: - value=self.nn.nn[0](state_batch,p=0) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch,p=2)-value)**2) - self.loss+=TD + loss+=TD else: value_gradient=tape.gradient(TD,self.nn.param[0]) actor_gradient=TD*tape.gradient(tf.math.log(action_batch),self.nn.param[1]) self.opt(value_gradient,actor_gradient,self.nn.param) - if j>=1: - loss+=TD - if i==episode_num-1: - value=self.nn.nn[0](state_batch) - TD=tf.reduce_mean((reward_batch+self.discount*self.nn.nn[0](next_state_batch)-value)**2) - self.loss+=TD + loss+=TD j+=1 try: self.nn.bc+=1 @@ -363,12 +323,10 @@ def learn1(self,episode_num,i): loss=loss.numpy() else: loss=loss.numpy()/batches - if i==episode_num-1: - self.loss=self.loss.numpy()/batches return loss - def learn2(self,episode_num,i): + def learn2(self): episode=[] if self.state_name==None: s=self.nn.explore(init=True) @@ -469,7 +427,7 @@ def learn2(self,episode_num,i): else: episode=[self.state_name[s],self.action_name[a],self.state_name[next_s],r] s=next_s - loss=self.learn1(episode_num,i) + loss=self.learn1() t2=time.time() self.time+=(t2-t1) else: @@ -555,7 +513,7 @@ def learn2(self,episode_num,i): else: episode=[self.state_name[s],self.action_name[a],self.state_name[next_s],r] s=next_s - loss=self.learn1(episode_num,i) + loss=self.learn1() t2=time.time() self.time+=(t2-t1) return loss,episode,end @@ -577,7 +535,7 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): loss=0 if episode_num!=None: for i in range(episode_num): - loss,episode,end=self.learn2(episode_num,i) + loss,episode,end=self.learn2() self.loss_list.append(loss) if i==episode_num-1: self.loss_list.append(self.loss) @@ -611,7 +569,7 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): elif self.ol==None: i=0 while True: - loss,episode,end=self.learn2(episode_num,i) + loss,episode,end=self.learn2() self.loss_list.append(loss) if i==episode_num-1: self.loss_list.append(self.loss) From 1a3de97904ef867fafac436968a2569f3bc44228 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 4 Jul 2022 12:35:40 +0800 Subject: [PATCH 37/99] Update kernel.py --- Note/create/RL/kernel.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 7d02e1a7d..87e9af7b0 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -842,27 +842,25 @@ def train_visual(self): return - def save_p(self,path): - parameter_file=open(path+'.dat','wb') + def save_p(self): + parameter_file=open('parameter.dat','wb') pickle.dump(self.value_p,parameter_file) parameter_file.close() return - def save_e(self,path): - episode_file=open(path+'.dat','wb') + def save_e(self): + episode_file=open('episode.dat','wb') pickle.dump(self.episode,episode_file) episode_file.close() return - def save(self,path): - output_file=open(path+'\save.dat','wb') - path=path+'\save.dat' - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter.dat'),'wb') + def save(self): + output_file=open('save.dat','wb') + parameter_file=open('parameter.dat','wb') if self.save_episode==True: - episode_file=open(path.replace(path[index+1:],'episode.dat'),'wb') + episode_file=open('episode.dat','wb') pickle.dump(self.episode,episode_file) episode_file.close() self.one_list=self.one_list*0 From 190a43eafc54240fe17ba8717a078583328efedc Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 4 Jul 2022 12:35:53 +0800 Subject: [PATCH 38/99] Update kernel.py --- Note/create/RL/st/kernel.py | 38 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 33208d0b1..b58d90238 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -519,7 +519,7 @@ def learn2(self): return loss,episode,end - def learn(self,episode_num,path=None,one=True,p=None,s=None): + def learn(self,episode_num,save=None,one=True,p=None,s=None): if p==None and s==None: self.p=9 self.s=2 @@ -551,7 +551,7 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): if i%d==0: print('episode num:{0} loss:{1:.6f}'.format(i+1,loss)) if path!=None and i%e==0: - self.save(path,i,one) + self.save(i,one) self.epi_num+=1 self.total_episode+=1 if self.save_episode==True: @@ -585,8 +585,8 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): e=d*self.s if i%d==0: print('episode num:{0} loss:{1:.6f}'.format(i+1,loss)) - if path!=None and i%e==0: - self.save(path,i,one) + if save!=None and i%e==0: + self.save(i,one) self.epi_num+=1 self.total_e+=1 if self.save_episode==True: @@ -693,8 +693,8 @@ def learn(self,episode_num,path=None,one=True,p=None,s=None): except AttributeError: pass self.total_e+=1 - if path!=None: - self.save(path) + if save!=None: + self.save() self._time=self.time-int(self.time) if self._time<0.5: self.time=int(self.time) @@ -718,37 +718,33 @@ def train_visual(self): return - def save_p(self,path): - parameter_file=open(path+'.dat','wb') + def save_p(self): + parameter_file=open('episode.dat','wb') pickle.dump(self.nn.param,parameter_file) parameter_file.close() return - def save_e(self,path): - episode_file=open(path+'.dat','wb') + def save_e(self): + episode_file=open('parameter.dat','wb') pickle.dump(self.episode,episode_file) episode_file.close() return - def save(self,path,i=None,one=True): + def save(self,i=None,one=True): if one==True: - output_file=open(path+'\save.dat','wb') - path=path+'\save.dat' - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter.dat'),'wb') + output_file=open('save.dat','wb') + parameter_file=open('parameter.dat','wb') if self.save_episode==True: - episode_file=open(path.replace(path[index+1:],'episode.dat'),'wb') + episode_file=open('episode.dat','wb') pickle.dump(self.episode,episode_file) episode_file.close() else: - output_file=open(path+'\save-{0}.dat'.format(i),'wb') - path=path+'\save-{0}.dat'.format(i) - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i)),'wb') + output_file=open('save-{0}.dat'.format(i),'wb') + parameter_file=open('parameter-{0}.dat'.format(i),'wb') if self.save_episode==True: - episode_file=open(path.replace(path[index+1:],'episode-{0}.dat'.format(i)),'wb') + episode_file=open('episode-{0}.dat'.format(i),'wb') pickle.dump(self.episode,episode_file) episode_file.close() self.episode_num=self.epi_num From ec809c5bcb287dbd3cae4a6746a5cf63a870720a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 4 Jul 2022 12:36:09 +0800 Subject: [PATCH 39/99] Update kernel.py --- Note/create/kernel.py | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index e5fd3fd4b..44776a03b 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -688,7 +688,7 @@ def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_ba return - def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=None,s=None): + def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s=None): self.batch=batch self.epoch=0 t1=None @@ -763,8 +763,8 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if file_path!=None and i%s==0: - self.save(file_path,self.total_epoch,one) + if save!=None and i%s==0: + self.save(self.total_epoch,one) t2=time.time() if self.thread==None: self.time+=(t2-t1) @@ -817,8 +817,8 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) else: print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) - if file_path!=None and i%s==0: - self.save(file_path,self.total_epoch,one) + if save!=None and i%s==0: + self.save(self.total_epoch,one) if self.thread==None: try: self.nn.ec+=1 @@ -846,14 +846,14 @@ def train(self,batch=None,epoch=None,test_batch=None,file_path=None,one=True,p=N train_loss=self.nn.loss(output,data[1]) loss=train_loss.numpy() self.nn.train_loss=loss.astype(np.float32) - if file_path!=None: - self.save(file_path) + if save!=None: + self.save() try: self.nn.ec+=1 except AttributeError: pass - if file_path!=None: - self.save(file_path) + if save!=None: + self.save() if self.thread==None: self._time=self.time-int(self.time) if self._time<0.5: @@ -1109,8 +1109,8 @@ def comparison(self): return - def save_p(self,path): - parameter_file=open(path+'.dat','wb') + def save_p(self): + parameter_file=open('parameter.dat','wb') pickle.dump(self.nn.param,parameter_file) parameter_file.close() return @@ -1118,15 +1118,11 @@ def save_p(self,path): def save(self,path,i=None,one=True): if one==True: - output_file=open(path+'\save.dat','wb') - path=path+'\save.dat' - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter.dat'),'wb') + output_file=open('save.dat','wb') + parameter_file=open('parameter.dat','wb') else: - output_file=open(path+'\save-{0}.dat'.format(i),'wb') - path=path+'\save-{0}.dat'.format(i) - index=path.rfind('\\') - parameter_file=open(path.replace(path[index+1:],'parameter-{0}.dat'.format(i)),'wb') + output_file=open('save-{0}.dat'.format(i),'wb') + parameter_file=open('parameter-{0}.dat'.format(i),'wb') self.file_list.append(['save-{0}.dat','parameter-{0}.dat']) if len(self.file_list)>self.s+1: os.remove(self.file_list[0][0]) From e491b902234b8dd34b2f5105c62c1facbe7ad869 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 4 Jul 2022 19:42:21 +0800 Subject: [PATCH 40/99] Update kernel.py --- Note/create/kernel.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 44776a03b..c0b4c1551 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -760,9 +760,9 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) else: if self.test_flag==False: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) + print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch,self.train_loss,self.test_loss)) if save!=None and i%s==0: self.save(self.total_epoch,one) t2=time.time() @@ -814,9 +814,9 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) else: if self.test_flag==False: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch+i+1,self.train_loss)) + print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch+i+1,self.train_loss,self.test_loss)) + print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch,self.train_loss,self.test_loss)) if save!=None and i%s==0: self.save(self.total_epoch,one) if self.thread==None: From f0fd70846222bbdc6fd3bda581066c96be49e7a9 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 27 Jul 2022 13:47:31 +0800 Subject: [PATCH 41/99] Update kernel.py --- Note/create/RL/st/kernel.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index b58d90238..81325061f 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -750,7 +750,9 @@ def save(self,i=None,one=True): self.episode_num=self.epi_num pickle.dump(self.nn.param,parameter_file) self.nn.param=None + self.nn.opt=None pickle.dump(self.nn,output_file) + pickle.dump(self.opt.get_config(),output_file) pickle.dump(self.ol,output_file) pickle.dump(self.state_pool,output_file) pickle.dump(self.action_pool,output_file) @@ -794,7 +796,7 @@ def restore(self,s_path,p_path,e_path=None): self.nn.km=1 except AttributeError: pass - self.opt=self.nn.opt + self.config=pickle.load(input_file) self.ol=pickle.load(input_file) self.state_pool=pickle.load(input_file) self.action_pool=pickle.load(input_file) From 6dd037340054260a70cce651bc069431e1f4f2d5 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 27 Jul 2022 13:47:47 +0800 Subject: [PATCH 42/99] Update kernel.py --- Note/create/RL/kernel.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 87e9af7b0..36ed1e438 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -866,7 +866,9 @@ def save(self): self.one_list=self.one_list*0 pickle.dump(self.nn.param,parameter_file) self.nn.param=None + self.nn.opt=None pickle.dump(self.nn,output_file) + pickle.dump(self.opt.get_config(),output_file) pickle.dump(self.state_pool,output_file) pickle.dump(self.action_pool,output_file) pickle.dump(self.next_state_pool,output_file) @@ -918,7 +920,7 @@ def restore(self,s_path,p_path,e_path=None): self.nn.km=1 except AttributeError: pass - self.opt=self.nn.opt + self.config=pickle.load(input_file) self.state_pool=pickle.load(input_file) self.action_pool=pickle.load(input_file) self.next_state_pool=pickle.load(input_file) From 6840baeed5b149aa41dd45239ecaec5b0518ba9a Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 27 Jul 2022 13:48:02 +0800 Subject: [PATCH 43/99] Update kernel.py --- Note/create/kernel.py | 1204 +++++++++++++++++++++++++++-------------- 1 file changed, 795 insertions(+), 409 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index c0b4c1551..d1474d093 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1,4 +1,4 @@ -import tensorflow as tf +from tensorflow import function import numpy as np import matplotlib.pyplot as plt import pickle @@ -14,18 +14,24 @@ def __init__(self,nn=None): self.nn.km=1 except AttributeError: pass + self.core=None self.PO=None self.thread_lock=None self.thread=None self.ol=None + self.suspend=False + self.stop=None + self.stop_flag=None + self.end_flag=None + self.save_flag=None + self.save_epoch=None self.batch=None self.epoch=0 self.end_loss=None self.end_acc=None self.end_test_loss=None self.end_test_acc=None - self.acc_flag1=None - self.acc_flag2='%' + self.acc_flag='%' self.train_counter=0 self.train_loss=None self.train_acc=None @@ -57,6 +63,7 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): else: self.shape0=train_data.shape[0] if self.thread!=None: + self.t=-np.arange(-(self.thread-1),1) self.t=list(self.t) try: self.nn.ec=np.zeros(self.thread) @@ -71,16 +78,20 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.train_acc=np.zeros(self.thread) self.train_loss_list=[[] for _ in range(self.thread)] self.train_acc_list=[[] for _ in range(self.thread)] + self.epoch=np.zeros(self.thread) + self.total_epoch=np.zeros(self.thread) + self.time=np.zeros(self.thread) + self.total_time=np.zeros(self.thread) if test_data!=None: if self.PO==None: self.test_loss=np.zeros(self.thread) self.test_acc=np.zeros(self.thread) self.test_loss_list=[[] for _ in range(self.thread)] self.test_acc_list=[[] for _ in range(self.thread)] - self.epoch=np.zeros(self.thread) - self.total_epoch=np.zeros(self.thread) - self.time=np.zeros(self.thread) - self.total_time=np.zeros(self.thread) + if self.PO==3: + self.batch_shape0=int(self.shape0/self.thread) + self.shape0=self.batch_shape0 + return @@ -92,6 +103,7 @@ def init(self,param=None): self.test_loss_list.clear() self.test_acc_list.clear() self.test_flag=False + self.train_counter=0 self.epoch=0 self.total_epoch=0 self.time=0 @@ -141,12 +153,6 @@ def set_end(self,end_loss=None,end_acc=None,end_test_loss=None,end_test_acc=None return - def apply_gradient(self,tape,opt,loss,parameter): - gradient=tape.gradient(loss,parameter) - opt.apply_gradients(zip(gradient,parameter)) - return - - def end(self): if self.end_loss!=None and self.train_loss<=self.end_loss: return True @@ -165,9 +171,13 @@ def end(self): def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_loss=None,total_acc=None,t=None): if self.batch!=None: total_loss+=loss - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc + except AttributeError: + pass return total_loss,total_acc elif self.ol==None: loss=loss.numpy() @@ -179,7 +189,9 @@ def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_ self.train_loss_list[t].append(loss.astype(np.float32)) self.train_loss[t]=loss self.train_loss[t]=self.train_loss[t].astype(np.float32) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass if self.thread==None: acc=self.nn.accuracy(output,self.train_labels) acc=acc.numpy() @@ -192,74 +204,386 @@ def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_ self.train_acc_list[t].append(acc.astype(np.float32)) self.train_acc[t]=acc self.train_acc[t]=self.train_acc[t].astype(np.float32) + except AttributeError: + pass if self.test_flag==True: if self.thread==None: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass self.test_acc_list.append(self.test_acc) + except AttributeError: + pass else: self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) self.test_loss_list[t].append(self.test_loss[t]) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass self.test_acc_list[t].append(self.test_acc[t]) + except AttributeError: + pass return - def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_batch=None,t=None): - if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: - self._param=self.nn.param - if batch!=None: - total_loss=0 - total_acc=0 - batches=int((self.shape0-self.shape0%batch)/batch) - for j in range(batches): - index1=j*batch - index2=(j+1)*batch - if type(self.train_data)==list: + def data_func(self,data_batch=None,labels_batch=None,batch=None,index1=None,index2=None,j=None,flag=None,t=None): + if self.PO==3 and t+1!=self.thread and self.shape0%self.thread!=0: + _index1=self.thread*self.batch_shape0 + _index2=self.batch_shape0-(self.shape0-self.thread*self.batch_shape0) + elif self.PO==3: + _index1=t*self.batch_shape0 + _index2=(t+1)*self.batch_shape0 + if flag==None: + if type(self.train_data)==list: + if self.PO!=3: for i in range(len(self.train_data)): if batch!=1: data_batch[i]=self.train_data[i][index1:index2] else: data_batch[i]=self.train_data[i][j] else: + for i in range(len(self.train_data)): + if batch!=1: + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch[i]=self.train_data[i][index1:index2] + else: + data_batch[i]=self.train_data[i][_index1:_index2][index1:index2] + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch[i]=self.train_data[i][j] + else: + data_batch[i]=self.train_data[i][_index1:_index2][j] + else: + if self.PO!=3: if batch!=1: data_batch=self.train_data[index1:index2] else: data_batch=self.train_data[j] - if type(self.train_labels)==list: + else: + if batch!=1: + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch=self.train_data[index1:index2] + else: + data_batch=self.train_data[_index1:_index2][index1:index2] + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch=self.train_data[j] + else: + data_batch=self.train_data[_index1:_index2][j] + if type(self.train_labels)==list: + if self.PO!=3: for i in range(len(self.train_data)): if batch!=1: labels_batch[i]=self.train_labels[i][index1:index2] else: labels_batch[i]=self.train_labels[i][j] else: + for i in range(len(self.train_data)): + if batch!=1: + if t+1!=self.thread and self.shape0%self.thread!=0: + labels_batch[i]=self.train_labels[i][index1:index2] + else: + labels_batch[i]=self.train_labels[i][_index1:_index2][index1:index2] + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + labels_batch[i]=self.train_labels[i][j] + else: + labels_batch[i]=self.train_labels[i][_index1:_index2][j] + else: + if self.PO!=3: if batch!=1: labels_batch=self.train_labels[index1:index2] else: labels_batch=self.train_labels[j] - with tf.GradientTape() as tape: - if self.thread==None: - output=self.nn.fp(data_batch) + else: + if batch!=1: + if t+1!=self.thread and self.shape0%self.thread!=0: + labels_batch=self.train_labels[index1:index2] + else: + labels_batch=self.train_labels[_index1:_index2][index1:index2] + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + labels_batch=self.train_labels[j] + else: + labels_batch=self.train_labels[_index1:_index2][j] + else: + try: + if type(self.train_data)==list: + if self.PO!=3: + for i in range(len(self.train_data)): + data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) + else: + for i in range(len(self.train_data)): + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) + else: + data_batch[i]=self.core.concat([self.train_data[i][_index1:_index2][index1:],self.train_data[i][_index1:_index2][:index2]],0) + else: + if self.PO!=3: + data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) else: - output=self.nn.fp(data_batch,t) - batch_loss=self.nn.loss(output,labels_batch) + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) + else: + data_batch=self.core.concat([self.train_data[_index1:_index2][index1:],self.train_data[_index1:_index2][:index2]],0) + if type(self.train_labels)==list: + if self.PO!=3: + for i in range(len(self.train_data)): + labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + for i in range(len(self.train_data)): + labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) + else: + for i in range(len(self.train_data)): + labels_batch[i]=self.core.concat([self.train_labels[i][_index1:_index2][index1:],self.train_labels[i][_index1:_index2][:index2]],0) + else: + if self.PO!=3: + labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) + else: + labels_batch=self.core.concat([self.train_labels[_index1:_index2][index1:],self.train_labels[_index1:_index2][:index2]],0) + except: + if type(self.train_data)==list: + if self.PO!=3: + for i in range(len(self.train_data)): + data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + for i in range(len(self.train_data)): + data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) + else: + for i in range(len(self.train_data)): + data_batch[i]=self.core.concat([self.train_data[i][_index1:_index2][index1:],self.train_data[i][_index1:_index2][:index2]],0) + else: + if self.PO!=3: + data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) + else: + data_batch=self.core.concat([self.train_data[_index1:_index2][index1:],self.train_data[_index1:_index2][:index2]],0) + if type(self.train_labels)==list: + if self.PO!=3: + for i in range(len(self.train_data)): + labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + for i in range(len(self.train_data)): + labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) + else: + for i in range(len(self.train_data)): + labels_batch[i]=self.core.concat([self.train_labels[i][_index1:_index2][index1:],self.train_labels[i][_index1:_index2][:index2]],0) + else: + if self.PO!=3: + labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) + else: + if t+1!=self.thread and self.shape0%self.thread!=0: + labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) + else: + labels_batch=self.core.concat([self.train_labels[_index1:_index2][index1:],self.train_labels[_index1:_index2][:index2]],0) + return data_batch,labels_batch + + + @function + def tf_opt(self,data,labels,t=None): + try: + if self.nn.gradient!=None: + pass + try: + if self.thread==None: + output=self.nn.fp(data) + else: + output=self.nn.fp(data,t) + loss=self.nn.loss(output,labels) + except TypeError: + if self.thread==None: + output,loss=self.nn.fp(data,labels) + else: + output,loss=self.nn.fp(data,labels,t) + except AttributeError: + with self.core.GradientTape() as tape: try: if self.thread==None: - if self.nn.opt!=None: - pass - self.apply_gradient(tape,self.nn.opt,batch_loss,self.nn.param) + output=self.nn.fp(data) else: - if self.nn.opt!=None: - pass - self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) - except AttributeError: + output=self.nn.fp(data,t) + loss=self.nn.loss(output,labels) + except TypeError: if self.thread==None: - gradient=self.nn.gradient(tape,batch_loss,self.nn.param) + output,loss=self.nn.fp(data,labels) + else: + output,loss=self.nn.fp(data,labels,t) + if self.ol==None: + try: + if self.thread==None: + if self.nn.opt!=None: + pass + gradient=tape.gradient(loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + else: + if self.nn.opt!=None: + pass + gradient=tape.gradient(loss,self.nn.param[t]) + self.nn.opt[t].apply_gradients(zip(gradient,self.nn.param[t])) + except AttributeError: + if self.thread==None: + gradient=self.nn.gradient(output,loss,self.nn.param) + self.nn.oopt(gradient,self.nn.param) + else: + gradient=self.nn.gradient(output,loss,self.nn.param[t]) + self.nn.oopt(gradient,self.nn.param,t) + else: + if self.thread_lock!=None: + try: + if self.nn.opt!=None: + pass + if self.PO==1: + gradient=tape.gradient(loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + else: + self.thread_lock[0].acquire() + self.param=self.nn.param + self.gradient=tape.gradient(loss,self.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + self.thread_lock[1].release() + except AttributeError: + if self.PO==1: + gradient=self.nn.gradient(output,loss,self.nn.param) self.nn.oopt(gradient,self.nn.param) else: - gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) - self.nn.oopt(gradient,self.nn.param,t) + self.thread_lock[0].acquire() + self.param=self.nn.param + self.gradient=self.nn.gradient(output,loss,self.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() + self.nn.oopt(self.gradient,self.nn.param) + self.thread_lock[1].release() + else: + try: + if self.nn.opt!=None: + pass + gradient=tape.gradient(loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + except AttributeError: + gradient=self.nn.gradient(output,loss,self.nn.param) + self.nn.oopt(gradient,self.nn.param) + return output,loss + + + @function + def tf_opt_t(self,data,labels,t): + try: + if self.nn.gradient!=None: + pass + try: + output=self.nn.fp(data) + loss=self.nn.loss(output,labels) + except TypeError: + output,loss=self.nn.fp(data,labels) + except AttributeError: + with self.core.GradientTape() as tape: + try: + output=self.nn.fp(data) + loss=self.nn.loss(output,labels) + except TypeError: + output,loss=self.nn.fp(data,labels) + if self.PO==1: + try: + if self.nn.opt!=None: + pass + gradient=tape.gradient(loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + except AttributeError: + gradient=self.nn.gradient(output,loss,self.nn.param) + try: + self.nn.oopt(self.gradient,self.nn.param,t) + except TypeError: + self.nn.oopt(self.gradient,self.nn.param) + else: + self.thread_lock[0].acquire() + self.param=self.nn.param + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(loss,self.param) + except AttributeError: + self.gradient=self.nn.gradient(output,loss,self.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() + try: + if self.nn.opt!=None: + pass + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + except AttributeError: + try: + self.nn.oopt(self.gradient,self.nn.param,t) + except TypeError: + self.nn.oopt(self.gradient,self.nn.param) + self.thread_lock[1].release() + return output,loss + + + def opt(self,data,labels,t=None): + try: + if self.core.DType!=None: + pass + output,loss=self.tf_opt(data,labels,t) + except AttributeError: + if self.thread==None: + output=self.nn.fp(data) + else: + output=self.nn.fp(data,t) + loss=self.nn.loss(output,labels) + self.nn.opt.zero_grad() + loss.backward() + self.nn.opt.step() + return output,loss + + + def opt_t(self,data,labels,t): + try: + if self.core.DType!=None: + pass + output,loss=self.tf_opt_t(data,labels,t) + return output,loss + except AttributeError: + pass + return + + + def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t=None): + if self.stop==True: + self.stop_func() + if batch!=None: + total_loss=0 + total_acc=0 + batches=int((self.shape0-self.shape0%batch)/batch) + for j in range(batches): + self.suspend_func() + index1=j*batch + index2=(j+1)*batch + data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,j) + try: + output,batch_loss=self.opt(data_batch,labels_batch,t) + except: + while True: + try: + output,batch_loss=self.opt(data_batch,labels_batch,t) + break + except: + if self.thread==None: + _try=input('\nCore unsuccessfully be replaced,try again?:') + if _try==True: + continue + else: + return total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if self.thread==None: try: @@ -275,38 +599,21 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat batches+=1 index1=batches*batch index2=batch-(self.shape0-batches*batch) - if type(self.train_data)==list: - for i in range(len(self.train_data)): - data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) - else: - data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]],0) - if type(self.train_labels)==list: - for i in range(len(self.train_data)): - labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) - else: - labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]],0) - with tf.GradientTape() as tape: - if self.thread==None: - output=self.nn.fp(data_batch) - else: - output=self.nn.fp(data_batch,t) - batch_loss=self.nn.loss(output,labels_batch) + data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,flag=True) try: - if self.thread==None: - if self.nn.opt!=None: - pass - self.apply_gradient(tape,self.nn.opt,batch_loss,self.nn.param) - else: - if self.nn.opt: - pass - self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) - except AttributeError: - if self.thread==None: - gradient=self.nn.gradient(tape,batch_loss,self.nn.param) - self.nn.oopt(gradient,self.param) - else: - gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) - self.nn.oopt(gradient,self.nn.param,t) + output,batch_loss=self.opt(data_batch,labels_batch,t) + except: + while True: + try: + output,batch_loss=self.opt(data_batch,labels_batch,t) + break + except: + if self.thread==None: + _try=input('\nCore unsuccessfully be replaced,try again?:') + if _try==True: + continue + else: + return total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if self.thread==None: try: @@ -319,8 +626,12 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat except AttributeError: pass loss=total_loss.numpy()/batches - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass train_acc=total_acc/batches + except AttributeError: + pass if self.thread==None: self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss @@ -329,7 +640,9 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.train_loss_list[t].append(loss.astype(np.float32)) self.train_loss[t]=loss self.train_loss[t]=self.train_loss[t].astype(np.float32) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass if self.thread==None: self.train_acc_list.append(train_acc.astype(np.float32)) self.train_acc=train_acc @@ -338,79 +651,61 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat self.train_acc_list[t].append(train_acc.astype(np.float32)) self.train_acc[t]=train_acc self.train_acc[t]=self.train_acc[t].astype(np.float32) + except AttributeError: + pass if self.test_flag==True: if self.thread==None: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass self.test_acc_list.append(self.test_acc) + except AttributeError: + pass else: self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) self.test_loss_list[t].append(self.test_loss[t]) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass self.test_acc_list[t].append(self.test_acc[t]) + except AttributeError: + pass elif self.ol==None: - with tf.GradientTape() as tape: - if self.thread==None: - output=self.nn.fp(self.train_data) - else: - output=self.nn.fp(data_batch,t) - train_loss=self.nn.loss(output,self.train_labels) + self.suspend_func() try: - if self.thread==None: - if self.nn.opt!=None: - pass - self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) - else: - if self.nn.opt: - pass - self.apply_gradient(tape,self.nn.opt[t],batch_loss,self.nn.param[t]) - except AttributeError: - if self.thread==None: - gradient=self.nn.gradient(tape,train_loss,self.nn.param) - self.nn.oopt(gradient,self.nn.param) - else: - gradient=self.nn.gradient(tape,batch_loss,self.nn.param[t]) - self.nn.oopt(gradient,self.nn.param,t) + output,train_loss=self.opt(self.train_data,self.train_labels,t) + except: + while True: + try: + output,train_loss=self.opt(self.train_data,self.train_labels,t) + break + except: + if self.thread==None: + _try=input('\nCore unsuccessfully be replaced,try again?:') + if _try==True: + continue + else: + return self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) else: + self.suspend_func() data=self.ol() - with tf.GradientTape() as tape: - output=self.nn.fp(data[0]) - train_loss=self.nn.loss(output,data[1]) - if self.thread_lock!=None: - try: - if self.nn.opt!=None: - pass - if self.PO==1: - self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) - else: - self.thread_lock.acquire() - self.param=self.nn.param - self.gradient=tape.gradient(train_loss,self.param) - self.thread_lock.release() - self.thread_lock.acquire() - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - self.thread_lock.release() - except AttributeError: - if self.PO==1: - self.gradient=self.nn.gradient(tape,train_loss,self.nn.param) - self.nn.oopt(self.gradient,self.nn.param) - else: - self.thread_lock.acquire() - self.gradient=self.nn.gradient(tape,train_loss,self.nn.param) - self.thread_lock.release() - self.thread_lock.acquire() - self.nn.oopt(self.gradient,self.nn.param) - self.thread_lock.release() - else: - try: - if self.nn.opt!=None: - pass - self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) - except AttributeError: - gradient=self.nn.gradient(tape,train_loss,self.nn.param) - self.nn.oopt(gradient,self.nn.param) + try: + output,_=self.opt(data[0],output) + except: + while True: + try: + output,_=self.opt(data[0],output) + break + except: + if self.thread==None: + _try=input('\nCore unsuccessfully be replaced,try again?:') + if _try==True: + continue + else: + return train_loss=self.nn.loss(output,data[1]) loss=train_loss.numpy() if self.thread_lock!=None: @@ -429,263 +724,222 @@ def _train(self,batch=None,epoch=None,test_batch=None,data_batch=None,labels_bat except AttributeError: pass self.total_epoch+=1 + if self.stop==True: + self.stop_flag=1 return - def train_(self,data_batch=None,labels_batch=None,batches=None,batch=None,epoch=None,test_batch=None,index1=None,index2=None,j=None,t=None): - if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: - self._param=self.nn.param + def train_(self,_data_batch=None,_labels_batch=None,batch=None,batches=None,test_batch=None,index1=None,index2=None,j=None,t=None): + if self.stop==True: + self.stop_func() if batch!=None: if index1==batches*batch: - if type(self.train_data)==list: - for i in range(len(self.train_data)): - data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) - else: - data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]],0) - if type(self.train_labels)==list: - for i in range(len(self.train_data)): - labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) - else: - labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]],0) + data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,j,True,t) + try: + output,batch_loss=self.opt_t(data_batch,labels_batch,t) + except: + while True: + try: + output,batch_loss=self.opt_t(data_batch,labels_batch,t) + break + except: + continue + try: + self.nn.bc[t]+=1 + except AttributeError: + pass if self.PO==1: - with tf.GradientTape() as tape: - self.output=self.nn.fp(data_batch) - self.batch_loss=self.nn.loss(self.output,labels_batch) - try: - if self.nn.opt!=None: - pass - self.gradient=tape.gradient(self.batch_loss,self.nn.param) - except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - except AttributeError: - self.nn.oopt(self.gradient,self.param,t) - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) - try: - self.nn.bc=j + batch_acc=self.nn.accuracy(output,labels_batch) except AttributeError: pass else: - self.thread_lock.acquire() - self.param=self.nn.param - with tf.GradientTape() as tape: - self.output=self.nn.fp(data_batch) - self.batch_loss=self.nn.loss(self.output,labels_batch) try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.gradient=tape.gradient(self.batch_loss,self.param) - except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) - self.thread_lock.release() - self.thread_lock.acquire() - try: - if self.nn.opt!=None: - pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - except AttributeError: - self.nn.oopt(self.gradient,self.nn.param,t) - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) - try: - self.nn.bc+=1 + self.thread_lock[0].acquire() + batch_acc=self.nn.accuracy(output,labels_batch) + self.thread_lock[0].release() except AttributeError: pass - if self.acc_flag1==1: - return self.batch_loss,self.batch_acc - else: - return self.batch_loss - self.thread_lock.release() - if type(self.train_data)==list: - for i in range(len(self.train_data)): - if batch!=1: - data_batch[i]=self.train_data[i][index1:index2] - else: - data_batch[i]=self.train_data[i][j] - else: - if batch!=1: - data_batch=self.train_data[index1:index2] - else: - data_batch=self.train_data[j] - if type(self.train_labels)==list: - for i in range(len(self.train_data)): - if batch!=1: - labels_batch[i]=self.train_labels[i][index1:index2] - else: - labels_batch[i]=self.train_labels[i][j] - else: - if batch!=1: - labels_batch=self.train_labels[index1:index2] - else: - labels_batch=self.train_labels[j] - if self.PO==1: - with tf.GradientTape() as tape: - self.output=self.nn.fp(data_batch) - self.batch_loss=self.nn.loss(self.output,labels_batch) try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.gradient=tape.gradient(self.batch_loss,self.nn.param) + return batch_loss,batch_acc except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) + return batch_loss,None + data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,j,t=t) + try: + output,batch_loss=self.opt_t(data_batch,labels_batch,t) + except: + while True: + try: + output,batch_loss=self.opt_t(data_batch,labels_batch,t) + break + except: + continue + if self.PO==1: try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + batch_acc=self.nn.accuracy(output,labels_batch) except AttributeError: - self.nn.oopt(self.gradient,self.nn.param,t) - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) + pass try: - self.nn.bc=j + self.nn.bc[t]=j except AttributeError: pass else: - self.thread_lock.acquire() - self.param=self.nn.param - with tf.GradientTape() as tape: - self.output=self.nn.fp(data_batch) - self.batch_loss=self.nn.loss(self.output,labels_batch) try: - if self.nn.opt!=None: - pass - self.gradient=tape.gradient(self.batch_loss,self.param) - except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) - self.thread_lock.release() - self.thread_lock.acquire() - try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + self.thread_lock[0].acquire() + batch_acc=self.nn.accuracy(output,labels_batch) + self.thread_lock[0].release() except AttributeError: - self.nn.oopt(self.gradient,self.nn.param,t) - if self.acc_flag1==1: - self.batch_acc=self.nn.accuracy(self.output,labels_batch) + pass try: - self.nn.bc=j + self.nn.bc[t]=j except AttributeError: pass - if self.acc_flag1==1: - return self.batch_loss,self.batch_acc - else: - return self.batch_loss - self.thread_lock.release() + try: + if self.nn.accuracy!=None: + pass + return batch_loss,batch_acc + except AttributeError: + return batch_loss,None else: + output,train_loss=self.opt_t(self.train_data,self.train_labels,t) if self.PO==1: - with tf.GradientTape() as tape: - self.output=self.nn.fp(self.train_data) - self._train_loss=self.nn.loss(self.output,self.train_labels) - try: - if self.nn.opt!=None: - pass - self.gradient=tape.gradient(self._train_loss,self.nn.param) - except AttributeError: - self.gradient=self.nn.gradient(tape,self._train_loss,self.nn.param) + self.loss=train_loss.numpy() + self.train_loss_list.append(self.loss.astype(np.float32)) + self.train_loss=self.loss.astype(np.float32) try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - except AttributeError: - self.nn.oopt(self.gradient,self.nn.param) - self.loss=self._train_loss.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss - self.train_loss=self.train_loss.astype(np.float32) - if self.acc_flag1==1: - self.acc=self.nn.accuracy(self.output,self.train_labels) + self.acc=self.nn.accuracy(output,self.train_labels) self.acc=self.acc.numpy() self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc - self.train_acc=self.train_acc.astype(np.float32) + self.train_acc=self.acc.astype(np.float32) + except AttributeError: + pass if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass self.test_acc_list.append(self.test_acc) - else: - self.thread_lock.acquire() - self.param=self.nn.param - with tf.GradientTape() as tape: - self.output=self.nn.fp(self.train_data) - self._train_loss=self.nn.loss(self.output,self.train_labels) - try: - if self.nn.opt!=None: + except AttributeError: pass - self.gradient=tape.gradient(self._train_loss,self.param) - except AttributeError: - self.gradient=self.nn.gradient(tape,self._train_loss,self.param) - self.thread_lock.release() - self.thread_lock.acquire() + else: + self.thread_lock[1].acquire() + self._train_loss=train_loss.numpy() + self.train_loss_list.append(self._train_loss.astype(np.float32)) + self.train_loss=self._train_loss.astype(np.float32) try: - if self.nn.opt!=None: + if self.nn.accuracy!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - except AttributeError: - self.nn.oopt(self.gradient,self.nn.param,t) - self.loss=self._train_loss.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss - self.train_loss=self.train_loss.astype(np.float32) - if self.acc_flag1==1: - self.acc=self.nn.accuracy(self.output,self.train_labels) + self.acc=self.nn.accuracy(output,self.train_labels) self.acc=self.acc.numpy() self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc - self.train_acc=self.train_acc.astype(np.float32) + self.train_acc=self.acc.astype(np.float32) + except AttributeError: + pass if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass self.test_acc_list.append(self.test_acc) - self.thread_lock.release() + except AttributeError: + pass + self.thread_lock[1].release() + if self.stop==True: + self.stop_flag=1 return - def _train_(self,batch=None,epoch=None,data_batch=None,labels_batch=None,test_batch=None,t=None): + def _train_(self,batch=None,data_batch=None,labels_batch=None,test_batch=None,t=None): + if self.stop==True: + self.stop_func() total_loss=0 total_acc=0 batches=int((self.shape0-self.shape0%batch)/batch) - for j in range(batches): - index1=j*batch - index2=(j+1)*batch - if self.acc_flag1==1: - self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t) - total_loss+=self.batch_loss - total_acc+=self.batch_acc - else: - self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t) - total_loss+=self.batch_loss - if self.shape0%batch!=0: - batches+=1 - index1=batches*batch - index2=batch-(self.shape0-batches*batch) - self.train_(data_batch,labels_batch,batch,epoch,batches,test_batch,index1,index2,j,t) - if self.acc_flag1==1: - total_loss+=self.batch_loss - total_acc+=self.batch_acc + if batch!=None: + for j in range(batches): + self.suspend_func() + index1=j*batch + index2=(j+1)*batch + if self.PO==1: + self.thread_lock.acquire() + batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + self.thread_lock.release() + else: + batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + try: + if self.nn.accuracy!=None: + pass + total_loss+=batch_loss + total_acc+=batch_acc + except AttributeError: + total_loss+=batch_loss + if self.shape0%batch!=0: + batches+=1 + index1=batches*batch + index2=batch-(self.shape0-batches*batch) + if self.PO==1: + self.thread_lock.acquire() + batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,None,t) + self.thread_lock.release() + else: + batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,None,t) + try: + if self.nn.accuracy!=None: + pass + total_loss+=batch_loss + total_acc+=batch_acc + except AttributeError: + total_loss+=batch_loss + loss=total_loss.numpy()/batches + try: + if self.nn.accuracy!=None: + pass + train_acc=total_acc.numpy()/batches + except AttributeError: + pass + self.thread_lock[1].acquire() + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss.astype(np.float32) + try: + if self.nn.accuracy!=None: + pass + self.train_acc_list.append(train_acc.astype(np.float32)) + self.train_acc=train_acc.astype(np.float32) + except AttributeError: + pass + if self.test_flag==True: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) + try: + if self.nn.accuracy!=None: + pass + self.test_acc_list.append(self.test_acc) + except AttributeError: + pass + self.thread_lock[1].release() + return + else: + if self.PO==1: + self.thread_lock.acquire() + batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + self.thread_lock.release() else: - total_loss+=self.batch_loss - loss=total_loss.numpy()/batches - if self.acc_flag1==1: - train_acc=total_acc.numpy()/batches - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss - self.train_loss=self.train_loss.astype(np.float32) - if self.acc_flag1==1: - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc - self.train_acc=self.train_acc.astype(np.float32) - if self.test_flag==True: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - if self.acc_flag1==1: - self.test_acc_list.append(self.test_acc) - return + batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + return def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s=None): @@ -693,7 +947,6 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self.epoch=0 t1=None t2=None - t=None self.train_counter+=1 if p==None: self.p=9 @@ -712,8 +965,16 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s labels_batch=[x for x in range(len(self.train_labels))] else: labels_batch=None + if self.thread!=None: + try: + t=self.t.pop() + except IndexError: + print('\nError,please add thread.') + return if epoch!=None: for i in range(epoch): + if self.stop==True: + self.stop_func() t1=time.time() if self.thread==None: try: @@ -723,20 +984,36 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s else: try: self.nn.ec[self.t[-1]]+=1 - except AttributeError: + except: pass if self.thread==None: - self._train(batch,epoch,test_batch,data_batch,labels_batch) + self._train(batch,data_batch,labels_batch,test_batch) else: - t=self.t.pop() - if self.PO==1: - self.thread_lock.acquire() - self._train_(batch,epoch,data_batch,labels_batch,test_batch,t) - self.thread_lock.release() - elif self.PO!=None: - self._train_(batch,epoch,data_batch,labels_batch,test_batch,t) + if self.PO!=None: + self._train_(batch,data_batch,labels_batch,test_batch,t) + else: + self._train(batch,data_batch,labels_batch,test_batch,t) + if self.stop==True: + if self.stop_flag==1: + self.stop_func() else: - self._train(batch,epoch,test_batch,data_batch,labels_batch,t) + return + if type(self.total_epoch)!=list: + if self.thread_lock!=None: + if type(self.thread_lock)!=list: + self.thread_lock.acquire() + self.total_epoch+=1 + self.thread_lock.release() + else: + self.thread_lock[1].acquire() + self.total_epoch+=1 + self.thread_lock[1].release() + else: + self.epoch+=1 + self.total_epoch+=1 + else: + self.epoch[t]+=1 + self.total_epoch[t]+=1 if self.thread==None: if epoch%10!=0: p=epoch-epoch%self.p @@ -766,31 +1043,40 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s if save!=None and i%s==0: self.save(self.total_epoch,one) t2=time.time() - if self.thread==None: + if type(self.time)!=list: self.time+=(t2-t1) else: self.time[t]+=(t2-t1) - if self.end_flag==True and self.end()==True: - self.nn.param=self._param - self._param=None - break elif self.ol==None: + self.suspend_func() i=0 while True: t1=time.time() if self.thread==None: - self._train(epoch=epoch,test_batch=test_batch) + self._train(test_batch=test_batch) else: t=self.t.pop() - if self.PO==1: - self.thread_lock.acquire() - self._train_(epoch=epoch,test_batch=test_batch,t=t) - self.thread_lock.release() - elif self.PO!=None: - self._train_(epoch=epoch,test_batch=test_batch,t=t) + if self.PO!=None: + self._train_(test_batch=test_batch,t=t) else: - self._train(epoch=epoch,test_batch=test_batch,t=t) + self._train(test_batch=test_batch,t=t) i+=1 + if type(self.total_epoch)!=list: + if self.thread_lock!=None: + if type(self.thread_lock)!=list: + self.thread_lock.acquire() + self.total_epoch+=1 + self.thread_lock.release() + else: + self.thread_lock[1].acquire() + self.total_epoch+=1 + self.thread_lock[1].release() + else: + self.epoch+=1 + self.total_epoch+=1 + else: + self.epoch[t]+=1 + self.total_epoch[t]+=1 if self.thread==None: if epoch%10!=0: p=epoch-epoch%self.p @@ -803,7 +1089,7 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s s=epoch/(self.s+1) s=int(s) if p==0: - p=epoch + p=1 if s==0: s=1 if i%p==0: @@ -830,14 +1116,10 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s except AttributeError: pass t2=time.time() - if self.thread==None: + if type(self.time)!=list: self.time+=(t2-t1) else: self.time[t]+=(t2-t1) - if self.end_flag==True and self.end()==True: - self.nn.param=self._param - self._param=None - break else: while True: self._train() @@ -845,7 +1127,7 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s output=self.nn.fp(data[0]) train_loss=self.nn.loss(output,data[1]) loss=train_loss.numpy() - self.nn.train_loss=loss.astype(np.float32) + self.nn.train_loss.append(loss.astype(np.float32)) if save!=None: self.save() try: @@ -874,8 +1156,10 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('last loss:{0:.6f}'.format(self.train_loss)) else: print('last loss:{0:.6f},last test loss:{1:.6f}'.format(self.train_loss,self.test_loss)) - if self.acc_flag1==1: - if self.acc_flag2=='%': + try: + if self.nn.accuracy!=None: + pass + if self.acc_flag=='%': if self.test_flag==False: print('accuracy:{0:.1f}'.format(self.train_acc*100)) else: @@ -885,6 +1169,8 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('accuracy:{0:.6f}'.format(self.train_acc)) else: print('accuracy:{0:.6f},test_flag accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) + except AttributeError: + pass print('time:{0}s'.format(self.time)) return @@ -916,70 +1202,152 @@ def test(self,test_data,test_labels,batch=None,t=None): labels_batch[i]=test_labels[i][index1:index2] else: labels_batch=test_labels[index1:index2] - if self.thread==None: + if self.thread==None or t==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) batch_loss=self.nn.loss(output,labels_batch) total_loss+=batch_loss - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc + except AttributeError: + pass if shape0%batch!=0: batches+=1 index1=batches*batch index2=batch-(shape0-batches*batch) - if type(test_data)==list: - for i in range(len(test_data)): - data_batch[i]=tf.concat(test_data[i][index1:],test_data[i][:index2],0) - else: - data_batch=tf.concat(test_data[index1:],test_data[:index2],0) - if type(self.test_labels)==list: - for i in range(len(test_labels)): - labels_batch[i]=tf.concat(test_labels[i][index1:],test_labels[i][:index2],0) - else: - labels_batch=tf.concat(test_labels[index1:],test_labels[:index2],0) - if self.thread==None: + try: + if type(test_data)==list: + for i in range(len(test_data)): + data_batch[i]=self.core.concat([test_data[i][index1:],test_data[i][:index2]],0) + else: + data_batch=self.core.concat([test_data[index1:],test_data[:index2]],0) + if type(self.test_labels)==list: + for i in range(len(test_labels)): + labels_batch[i]=self.core.concat([test_labels[i][index1:],test_labels[i][:index2]],0) + else: + labels_batch=self.core.concat([test_labels[index1:],test_labels[:index2]],0) + except: + if type(test_data)==list: + for i in range(len(test_data)): + data_batch[i]=self.core.concat([test_data[i][index1:],test_data[i][:index2]],0) + else: + data_batch=self.core.concat([test_data[index1:],test_data[:index2]],0) + if type(self.test_labels)==list: + for i in range(len(test_labels)): + labels_batch[i]=self.core.concat([test_labels[i][index1:],test_labels[i][:index2]],0) + else: + labels_batch=self.core.concat([test_labels[index1:],test_labels[:index2]],0) + if self.thread==None or t==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) batch_loss=self.nn.loss(output,labels_batch) total_loss+=batch_loss - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc + except AttributeError: + pass test_loss=total_loss.numpy()/batches test_loss=test_loss test_loss=test_loss.astype(np.float32) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass test_acc=total_acc.numpy()/batches test_acc=test_acc test_acc=test_acc.astype(np.float32) + except AttributeError: + pass else: - if self.thread==None: + if self.thread==None or t==None: output=self.nn.fp(test_data) else: output=self.nn.fp(test_data,t) test_loss=self.nn.loss(output,test_labels) - if self.acc_flag1==1: + try: + if self.nn.accuracy!=None: + pass test_acc=self.nn.accuracy(output,test_labels) test_loss=test_loss.numpy().astype(np.float32) test_acc=test_acc.numpy().astype(np.float32) - if self.thread==None: + except AttributeError: + pass + if self.thread==None or t==None: print('test loss:{0:.6f}'.format(test_loss)) - if self.acc_flag1==1: - if self.acc_flag2=='%': + try: + if self.nn.accuracy!=None: + pass + if self.acc_flag=='%': print('accuracy:{0:.1f}'.format(test_acc*100)) else: print('accuracy:{0:.6f}'.format(test_acc)) - if self.acc_flag2=='%': + if self.acc_flag=='%': return test_loss,test_acc*100 else: return test_loss,test_acc - else: + except AttributeError: return test_loss + def suspend_func(self): + if self.suspend==True: + if self.thread==None: + if self.save_epoch==None: + print('Training have suspended.') + else: + self._save() + while True: + if self.suspend==False: + if self.thread==None: + print('Training have continued.') + break + return + + + def stop_func(self): + if self.thread_lock==None: + if self.save_flag==True: + self.save(self.total_epoch,True) + print('\nSystem have stopped training,Neural network have been saved.') + return + else: + print('\nSystem have stopped training.') + return + elif self.end() and self.end_flag==True: + self.thread_lock.acquire() + self.save(self.total_epoch,True) + self.stop_flag=2 + self.thread_lock.release() + return + else: + if self.save_flag==True: + self.thread_lock.acquire() + self.save(self.total_epoch,True) + self.stop_flag=2 + self.thread_lock.release() + return + else: + return + + + def _save(self): + if self.save_epoch==self.total_epoch: + self.save(self.total_epoch,False) + self.save_epoch=None + print('\nNeural network have saved and training have suspended.') + return + elif self.save_epoch!=None and self.save_epoch>self.total_epoch: + print('\nsave_epoch>total_epoch') + return + + def train_info(self): print() print('batch:{0}'.format(self.batch)) @@ -993,7 +1361,7 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc_flag2=='%': + if self.acc_flag=='%': print('train acc:{0:.1f}'.format(self.train_acc*100)) else: print('train acc:{0:.6f}'.format(self.train_acc)) @@ -1003,7 +1371,7 @@ def train_info(self): def test_info(self): print() print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': + if self.acc_flag=='%': print('test acc:{0:.1f}'.format(self.test_acc*100)) else: print('test acc:{0:.6f}'.format(self.test_acc)) @@ -1026,6 +1394,7 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') + print('train loss:{0:.6f}'.format(self.train_loss)) try: if self.nn.accuracy!=None: pass @@ -1034,11 +1403,10 @@ def train_visual(self): plt.title('train acc') plt.xlabel('epoch') plt.ylabel('acc') - print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc_flag2=='%': + if self.acc_flag=='%': print('train acc:{0:.1f}'.format(self.train_acc*100)) else: - print('train acc:{0:.6f}'.format(self.train_acc)) + print('train acc:{0:.6f}'.format(self.train_acc)) except AttributeError: pass return @@ -1051,6 +1419,7 @@ def test_visual(self): plt.title('test loss') plt.xlabel('epoch') plt.ylabel('loss') + print('test loss:{0:.6f}'.format(self.test_loss)) try: if self.nn.accuracy!=None: pass @@ -1059,11 +1428,10 @@ def test_visual(self): plt.title('test acc') plt.xlabel('epoch') plt.ylabel('acc') - print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': + if self.acc_flag=='%': print('test acc:{0:.1f}'.format(self.test_acc*100)) else: - print('test acc:{0:.6f}'.format(self.test_acc)) + print('test acc:{0:.6f}'.format(self.test_acc)) except AttributeError: pass return @@ -1078,6 +1446,7 @@ def comparison(self): plt.title('loss') plt.xlabel('epoch') plt.ylabel('loss') + print('train loss:{0}'.format(self.train_loss)) plt.legend() try: if self.nn.accuracy!=None: @@ -1090,11 +1459,10 @@ def comparison(self): plt.xlabel('epoch') plt.ylabel('acc') plt.legend() - print('train loss:{0}'.format(self.train_loss)) - if self.acc_flag2=='%': + if self.acc_flag=='%': print('train acc:{0:.1f}'.format(self.train_acc*100)) else: - print('train acc:{0:.6f}'.format(self.train_acc)) + print('train acc:{0:.6f}'.format(self.train_acc)) except AttributeError: pass if self.test_flag==True: @@ -1102,7 +1470,7 @@ def comparison(self): print('-------------------------------------') print() print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag2=='%': + if self.acc_flag=='%': print('test acc:{0:.1f}'.format(self.test_acc*100)) else: print('test acc:{0:.6f}'.format(self.test_acc)) @@ -1116,7 +1484,9 @@ def save_p(self): return - def save(self,path,i=None,one=True): + def save(self,i=None,one=True): + if self.stop_flag==2: + return if one==True: output_file=open('save.dat','wb') parameter_file=open('parameter.dat','wb') @@ -1129,15 +1499,29 @@ def save(self,path,i=None,one=True): os.remove(self.file_list[0][1]) pickle.dump(self.nn.param,parameter_file) self.nn.param=None - pickle.dump(self.nn,output_file) + try: + if self.nn.opt: + pass + opt=self.nn.opt + self.nn.opt=None + pickle.dump(self.nn,output_file) + self.nn.opt=opt + except AttributeError: + try: + pickle.dump(self.nn,output_file) + except: + opt=self.nn.oopt + self.nn.oopt=None + pickle.dump(self.nn,output_file) + self.nn.oopt=opt + pickle.dump(opt.get_config(),output_file) pickle.dump(self.ol,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.end_loss,output_file) pickle.dump(self.end_acc,output_file) pickle.dump(self.end_test_loss,output_file) pickle.dump(self.end_test_acc,output_file) - pickle.dump(self.acc_flag1,output_file) - pickle.dump(self.acc_flag2,output_file) + pickle.dump(self.acc_flag,output_file) pickle.dump(self.p,output_file) pickle.dump(self.s,output_file) pickle.dump(self.file_list,output_file) @@ -1157,6 +1541,8 @@ def save(self,path,i=None,one=True): pickle.dump(self.total_time,output_file) output_file.close() parameter_file.close() + if self.stop==True and self.stop_flag!=None: + print('\nSystem have stopped,Neural network have saved.') return @@ -1171,14 +1557,14 @@ def restore(self,s_path,p_path): self.nn.km=1 except AttributeError: pass + self.config=pickle.load(input_file) self.ol=pickle.load(input_file) self.batch=pickle.load(input_file) self.end_loss=pickle.load(input_file) self.end_acc=pickle.load(input_file) self.end_test_loss=pickle.load(input_file) self.end_test_acc=pickle.load(input_file) - self.acc_flag1=pickle.load(input_file) - self.acc_flag2=pickle.load(input_file) + self.acc_flag=pickle.load(input_file) self.p=pickle.load(input_file) self.s=pickle.load(input_file) self.file_list=pickle.load(input_file) From 78c354c020ae9e66286cc9c83488eef45f7dab45 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 29 Jul 2022 13:19:20 +0800 Subject: [PATCH 44/99] Update kernel.py --- Note/create/kernel.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index d1474d093..5169ce5f9 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1354,8 +1354,11 @@ def train_info(self): print() print('epoch:{0}'.format(self.total_epoch)) print() - print('learning rate:{0}'.format(self.nn.lr)) - print() + try: + print('learning rate:{0}'.format(self.nn.lr)) + print() + except AttributeError: + pass print('time:{0:.3f}s'.format(self.total_time)) print() print('-------------------------------------') From dffcb5d3ec354422b09f974f3c1bb7c9303bc051 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 29 Jul 2022 14:33:24 +0800 Subject: [PATCH 45/99] Update kernel.py --- Note/create/kernel.py | 1196 +++++++++++++++-------------------------- 1 file changed, 441 insertions(+), 755 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 5169ce5f9..48ebb3193 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1,11 +1,9 @@ -from tensorflow import function +import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pickle import os import time - - class kernel: def __init__(self,nn=None): if nn!=None: @@ -14,24 +12,19 @@ def __init__(self,nn=None): self.nn.km=1 except AttributeError: pass - self.core=None self.PO=None self.thread_lock=None self.thread=None self.ol=None - self.suspend=False - self.stop=None - self.stop_flag=None - self.end_flag=None - self.save_flag=None - self.save_epoch=None self.batch=None self.epoch=0 self.end_loss=None self.end_acc=None self.end_test_loss=None self.end_test_acc=None - self.acc_flag='%' + self.acc_flag1=None + self.acc_flag2='%' + self.train_flag=None self.train_counter=0 self.train_loss=None self.train_acc=None @@ -78,20 +71,16 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.train_acc=np.zeros(self.thread) self.train_loss_list=[[] for _ in range(self.thread)] self.train_acc_list=[[] for _ in range(self.thread)] - self.epoch=np.zeros(self.thread) - self.total_epoch=np.zeros(self.thread) - self.time=np.zeros(self.thread) - self.total_time=np.zeros(self.thread) if test_data!=None: if self.PO==None: self.test_loss=np.zeros(self.thread) self.test_acc=np.zeros(self.thread) self.test_loss_list=[[] for _ in range(self.thread)] self.test_acc_list=[[] for _ in range(self.thread)] - if self.PO==3: - self.batch_shape0=int(self.shape0/self.thread) - self.shape0=self.batch_shape0 - + self.epoch=np.zeros(self.thread) + self.total_epoch=np.zeros(self.thread) + self.time=np.zeros(self.thread) + self.total_time=np.zeros(self.thread) return @@ -171,13 +160,9 @@ def end(self): def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_loss=None,total_acc=None,t=None): if self.batch!=None: total_loss+=loss - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc - except AttributeError: - pass return total_loss,total_acc elif self.ol==None: loss=loss.numpy() @@ -189,9 +174,7 @@ def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_ self.train_loss_list[t].append(loss.astype(np.float32)) self.train_loss[t]=loss self.train_loss[t]=self.train_loss[t].astype(np.float32) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: if self.thread==None: acc=self.nn.accuracy(output,self.train_labels) acc=acc.numpy() @@ -204,386 +187,85 @@ def loss_acc(self,output=None,labels_batch=None,loss=None,test_batch=None,total_ self.train_acc_list[t].append(acc.astype(np.float32)) self.train_acc[t]=acc self.train_acc[t]=self.train_acc[t].astype(np.float32) - except AttributeError: - pass if self.test_flag==True: if self.thread==None: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: self.test_acc_list.append(self.test_acc) - except AttributeError: - pass else: self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) self.test_loss_list[t].append(self.test_loss[t]) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: self.test_acc_list[t].append(self.test_acc[t]) - except AttributeError: - pass return - def data_func(self,data_batch=None,labels_batch=None,batch=None,index1=None,index2=None,j=None,flag=None,t=None): - if self.PO==3 and t+1!=self.thread and self.shape0%self.thread!=0: - _index1=self.thread*self.batch_shape0 - _index2=self.batch_shape0-(self.shape0-self.thread*self.batch_shape0) - elif self.PO==3: - _index1=t*self.batch_shape0 - _index2=(t+1)*self.batch_shape0 - if flag==None: - if type(self.train_data)==list: - if self.PO!=3: + def _train(self,batch=None,data_batch=None,labels_batch=None,test_batch=None,t=None): + if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: + self._param=self.nn.param + if batch!=None: + total_loss=0 + total_acc=0 + batches=int((self.shape0-self.shape0%batch)/batch) + for j in range(batches): + index1=j*batch + index2=(j+1)*batch + if type(self.train_data)==list: for i in range(len(self.train_data)): if batch!=1: data_batch[i]=self.train_data[i][index1:index2] else: data_batch[i]=self.train_data[i][j] else: - for i in range(len(self.train_data)): - if batch!=1: - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch[i]=self.train_data[i][index1:index2] - else: - data_batch[i]=self.train_data[i][_index1:_index2][index1:index2] - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch[i]=self.train_data[i][j] - else: - data_batch[i]=self.train_data[i][_index1:_index2][j] - else: - if self.PO!=3: if batch!=1: data_batch=self.train_data[index1:index2] else: data_batch=self.train_data[j] - else: - if batch!=1: - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch=self.train_data[index1:index2] - else: - data_batch=self.train_data[_index1:_index2][index1:index2] - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch=self.train_data[j] - else: - data_batch=self.train_data[_index1:_index2][j] - if type(self.train_labels)==list: - if self.PO!=3: + if type(self.train_labels)==list: for i in range(len(self.train_data)): if batch!=1: labels_batch[i]=self.train_labels[i][index1:index2] else: labels_batch[i]=self.train_labels[i][j] else: - for i in range(len(self.train_data)): - if batch!=1: - if t+1!=self.thread and self.shape0%self.thread!=0: - labels_batch[i]=self.train_labels[i][index1:index2] - else: - labels_batch[i]=self.train_labels[i][_index1:_index2][index1:index2] - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - labels_batch[i]=self.train_labels[i][j] - else: - labels_batch[i]=self.train_labels[i][_index1:_index2][j] - else: - if self.PO!=3: if batch!=1: labels_batch=self.train_labels[index1:index2] else: labels_batch=self.train_labels[j] - else: - if batch!=1: - if t+1!=self.thread and self.shape0%self.thread!=0: - labels_batch=self.train_labels[index1:index2] - else: - labels_batch=self.train_labels[_index1:_index2][index1:index2] - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - labels_batch=self.train_labels[j] - else: - labels_batch=self.train_labels[_index1:_index2][j] - else: - try: - if type(self.train_data)==list: - if self.PO!=3: - for i in range(len(self.train_data)): - data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) - else: - for i in range(len(self.train_data)): - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) - else: - data_batch[i]=self.core.concat([self.train_data[i][_index1:_index2][index1:],self.train_data[i][_index1:_index2][:index2]],0) - else: - if self.PO!=3: - data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) - else: - data_batch=self.core.concat([self.train_data[_index1:_index2][index1:],self.train_data[_index1:_index2][:index2]],0) - if type(self.train_labels)==list: - if self.PO!=3: - for i in range(len(self.train_data)): - labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - for i in range(len(self.train_data)): - labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) - else: - for i in range(len(self.train_data)): - labels_batch[i]=self.core.concat([self.train_labels[i][_index1:_index2][index1:],self.train_labels[i][_index1:_index2][:index2]],0) - else: - if self.PO!=3: - labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) - else: - labels_batch=self.core.concat([self.train_labels[_index1:_index2][index1:],self.train_labels[_index1:_index2][:index2]],0) - except: - if type(self.train_data)==list: - if self.PO!=3: - for i in range(len(self.train_data)): - data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - for i in range(len(self.train_data)): - data_batch[i]=self.core.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) - else: - for i in range(len(self.train_data)): - data_batch[i]=self.core.concat([self.train_data[i][_index1:_index2][index1:],self.train_data[i][_index1:_index2][:index2]],0) - else: - if self.PO!=3: - data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - data_batch=self.core.concat([self.train_data[index1:],self.train_data[:index2]],0) - else: - data_batch=self.core.concat([self.train_data[_index1:_index2][index1:],self.train_data[_index1:_index2][:index2]],0) - if type(self.train_labels)==list: - if self.PO!=3: - for i in range(len(self.train_data)): - labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - for i in range(len(self.train_data)): - labels_batch[i]=self.core.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) - else: - for i in range(len(self.train_data)): - labels_batch[i]=self.core.concat([self.train_labels[i][_index1:_index2][index1:],self.train_labels[i][_index1:_index2][:index2]],0) - else: - if self.PO!=3: - labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) - else: - if t+1!=self.thread and self.shape0%self.thread!=0: - labels_batch=self.core.concat([self.train_labels[index1:],self.train_labels[:index2]],0) - else: - labels_batch=self.core.concat([self.train_labels[_index1:_index2][index1:],self.train_labels[_index1:_index2][:index2]],0) - return data_batch,labels_batch - - - @function - def tf_opt(self,data,labels,t=None): - try: - if self.nn.gradient!=None: - pass - try: - if self.thread==None: - output=self.nn.fp(data) - else: - output=self.nn.fp(data,t) - loss=self.nn.loss(output,labels) - except TypeError: - if self.thread==None: - output,loss=self.nn.fp(data,labels) - else: - output,loss=self.nn.fp(data,labels,t) - except AttributeError: - with self.core.GradientTape() as tape: try: + if self.nn.gradient!=None: + pass if self.thread==None: - output=self.nn.fp(data) - else: - output=self.nn.fp(data,t) - loss=self.nn.loss(output,labels) - except TypeError: - if self.thread==None: - output,loss=self.nn.fp(data,labels) + output=self.nn.fp(data_batch) else: - output,loss=self.nn.fp(data,labels,t) - if self.ol==None: - try: - if self.thread==None: - if self.nn.opt!=None: - pass - gradient=tape.gradient(loss,self.nn.param) - self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) - else: - if self.nn.opt!=None: - pass - gradient=tape.gradient(loss,self.nn.param[t]) - self.nn.opt[t].apply_gradients(zip(gradient,self.nn.param[t])) - except AttributeError: - if self.thread==None: - gradient=self.nn.gradient(output,loss,self.nn.param) - self.nn.oopt(gradient,self.nn.param) - else: - gradient=self.nn.gradient(output,loss,self.nn.param[t]) - self.nn.oopt(gradient,self.nn.param,t) - else: - if self.thread_lock!=None: + output=self.nn.fp(data_batch,t) + batch_loss=self.nn.loss(output,labels_batch) + except AttributeError: + with tf.GradientTape() as tape: + if self.thread==None: + output=self.nn.fp(data_batch) + else: + output=self.nn.fp(data_batch,t) + batch_loss=self.nn.loss(output,labels_batch) try: - if self.nn.opt!=None: - pass - if self.PO==1: - gradient=tape.gradient(loss,self.nn.param) + if self.thread==None: + if self.nn.opt!=None: + pass + gradient=tape.gradient(batch_loss,self.nn.param) self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) else: - self.thread_lock[0].acquire() - self.param=self.nn.param - self.gradient=tape.gradient(loss,self.param) - self.thread_lock[0].release() - self.thread_lock[1].acquire() - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - self.thread_lock[1].release() + if self.nn.opt!=None: + pass + gradient=tape.gradient(batch_loss,self.nn.param[t]) + self.nn.opt[t].apply_gradients(zip(gradient,self.nn.param[t])) except AttributeError: - if self.PO==1: - gradient=self.nn.gradient(output,loss,self.nn.param) + if self.thread==None: + gradient=self.nn.gradient(batch_loss,self.nn.param) self.nn.oopt(gradient,self.nn.param) else: - self.thread_lock[0].acquire() - self.param=self.nn.param - self.gradient=self.nn.gradient(output,loss,self.param) - self.thread_lock[0].release() - self.thread_lock[1].acquire() - self.nn.oopt(self.gradient,self.nn.param) - self.thread_lock[1].release() - else: - try: - if self.nn.opt!=None: - pass - gradient=tape.gradient(loss,self.nn.param) - self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) - except AttributeError: - gradient=self.nn.gradient(output,loss,self.nn.param) - self.nn.oopt(gradient,self.nn.param) - return output,loss - - - @function - def tf_opt_t(self,data,labels,t): - try: - if self.nn.gradient!=None: - pass - try: - output=self.nn.fp(data) - loss=self.nn.loss(output,labels) - except TypeError: - output,loss=self.nn.fp(data,labels) - except AttributeError: - with self.core.GradientTape() as tape: - try: - output=self.nn.fp(data) - loss=self.nn.loss(output,labels) - except TypeError: - output,loss=self.nn.fp(data,labels) - if self.PO==1: - try: - if self.nn.opt!=None: - pass - gradient=tape.gradient(loss,self.nn.param) - self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) - except AttributeError: - gradient=self.nn.gradient(output,loss,self.nn.param) - try: - self.nn.oopt(self.gradient,self.nn.param,t) - except TypeError: - self.nn.oopt(self.gradient,self.nn.param) - else: - self.thread_lock[0].acquire() - self.param=self.nn.param - try: - if self.nn.opt!=None: - pass - self.gradient=tape.gradient(loss,self.param) - except AttributeError: - self.gradient=self.nn.gradient(output,loss,self.param) - self.thread_lock[0].release() - self.thread_lock[1].acquire() - try: - if self.nn.opt!=None: - pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) - except AttributeError: - try: - self.nn.oopt(self.gradient,self.nn.param,t) - except TypeError: - self.nn.oopt(self.gradient,self.nn.param) - self.thread_lock[1].release() - return output,loss - - - def opt(self,data,labels,t=None): - try: - if self.core.DType!=None: - pass - output,loss=self.tf_opt(data,labels,t) - except AttributeError: - if self.thread==None: - output=self.nn.fp(data) - else: - output=self.nn.fp(data,t) - loss=self.nn.loss(output,labels) - self.nn.opt.zero_grad() - loss.backward() - self.nn.opt.step() - return output,loss - - - def opt_t(self,data,labels,t): - try: - if self.core.DType!=None: - pass - output,loss=self.tf_opt_t(data,labels,t) - return output,loss - except AttributeError: - pass - return - - - def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t=None): - if self.stop==True: - self.stop_func() - if batch!=None: - total_loss=0 - total_acc=0 - batches=int((self.shape0-self.shape0%batch)/batch) - for j in range(batches): - self.suspend_func() - index1=j*batch - index2=(j+1)*batch - data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,j) - try: - output,batch_loss=self.opt(data_batch,labels_batch,t) - except: - while True: - try: - output,batch_loss=self.opt(data_batch,labels_batch,t) - break - except: - if self.thread==None: - _try=input('\nCore unsuccessfully be replaced,try again?:') - if _try==True: - continue - else: - return + gradient=self.nn.gradient(batch_loss,self.nn.param[t]) + self.nn.oopt(gradient,self.nn.param,t) total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if self.thread==None: try: @@ -599,21 +281,49 @@ def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t batches+=1 index1=batches*batch index2=batch-(self.shape0-batches*batch) - data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,flag=True) + if type(self.train_data)==list: + for i in range(len(self.train_data)): + data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) + else: + data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]],0) + if type(self.train_labels)==list: + for i in range(len(self.train_data)): + labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) + else: + labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]],0) try: - output,batch_loss=self.opt(data_batch,labels_batch,t) - except: - while True: - try: - output,batch_loss=self.opt(data_batch,labels_batch,t) - break - except: - if self.thread==None: - _try=input('\nCore unsuccessfully be replaced,try again?:') - if _try==True: - continue - else: - return + if self.nn.gradient!=None: + pass + if self.thread==None: + output=self.nn.fp(data_batch) + else: + output=self.nn.fp(data_batch,t) + batch_loss=self.nn.loss(output,labels_batch) + except AttributeError: + with tf.GradientTape() as tape: + if self.thread==None: + output=self.nn.fp(data_batch) + else: + output=self.nn.fp(data_batch,t) + batch_loss=self.nn.loss(output,labels_batch) + try: + if self.thread==None: + if self.nn.opt!=None: + pass + gradient=tape.gradient(batch_loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + else: + if self.nn.opt: + pass + gradient=tape.gradient(batch_loss,self.nn.param[t]) + self.nn.opt[t].apply_gradients(zip(gradient,self.nn.param[t])) + except AttributeError: + if self.thread==None: + gradient=self.nn.gradient(batch_loss,self.nn.param) + self.nn.oopt(gradient,self.param) + else: + gradient=self.nn.gradient(batch_loss,self.nn.param[t]) + self.nn.oopt(gradient,self.nn.param,t) total_loss,total_acc=self.loss_acc(output=output,labels_batch=labels_batch,loss=batch_loss,total_loss=total_loss,total_acc=total_acc,t=t) if self.thread==None: try: @@ -626,12 +336,8 @@ def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t except AttributeError: pass loss=total_loss.numpy()/batches - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: train_acc=total_acc/batches - except AttributeError: - pass if self.thread==None: self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss @@ -640,9 +346,7 @@ def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t self.train_loss_list[t].append(loss.astype(np.float32)) self.train_loss[t]=loss self.train_loss[t]=self.train_loss[t].astype(np.float32) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: if self.thread==None: self.train_acc_list.append(train_acc.astype(np.float32)) self.train_acc=train_acc @@ -651,72 +355,105 @@ def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t self.train_acc_list[t].append(train_acc.astype(np.float32)) self.train_acc[t]=train_acc self.train_acc[t]=self.train_acc[t].astype(np.float32) - except AttributeError: - pass if self.test_flag==True: if self.thread==None: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: self.test_acc_list.append(self.test_acc) - except AttributeError: - pass else: self.test_loss[t],self.test_acc[t]=self.test(self.test_data,self.test_labels,test_batch,t) self.test_loss_list[t].append(self.test_loss[t]) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: self.test_acc_list[t].append(self.test_acc[t]) - except AttributeError: - pass elif self.ol==None: - self.suspend_func() try: - output,train_loss=self.opt(self.train_data,self.train_labels,t) - except: - while True: - try: - output,train_loss=self.opt(self.train_data,self.train_labels,t) - break - except: - if self.thread==None: - _try=input('\nCore unsuccessfully be replaced,try again?:') - if _try==True: - continue - else: - return + if self.nn.gradient!=None: + if self.thread==None: + output=self.nn.fp(self.train_data) + else: + output=self.nn.fp(data_batch,t) + train_loss=self.nn.loss(output,self.train_labels) + except AttributeError: + with tf.GradientTape() as tape: + if self.thread==None: + output=self.nn.fp(self.train_data) + else: + output=self.nn.fp(data_batch,t) + train_loss=self.nn.loss(output,self.train_labels) + try: + if self.thread==None: + if self.nn.opt!=None: + pass + gradient=tape.gradient(train_loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + else: + if self.nn.opt: + pass + gradient=tape.gradient(train_loss,self.nn.param[t]) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param[t])) + except AttributeError: + if self.thread==None: + gradient=self.nn.gradient(train_loss,self.nn.param) + self.nn.oopt(gradient,self.nn.param) + else: + gradient=self.nn.gradient(batch_loss,self.nn.param[t]) + self.nn.oopt(gradient,self.nn.param,t) self.loss_acc(output=output,labels_batch=labels_batch,loss=train_loss,test_batch=test_batch,total_loss=total_loss,total_acc=total_acc,t=t) else: - self.suspend_func() data=self.ol() try: - output,_=self.opt(data[0],output) - except: - while True: - try: - output,_=self.opt(data[0],output) - break - except: - if self.thread==None: - _try=input('\nCore unsuccessfully be replaced,try again?:') - if _try==True: - continue - else: - return - train_loss=self.nn.loss(output,data[1]) + if self.nn.gradient!=None: + output=self.nn.fp(data[0]) + train_loss=self.nn.loss(output,data[1]) + except AttributeError: + with tf.GradientTape() as tape: + output=self.nn.fp(data[0]) + train_loss=self.nn.loss(output,data[1]) + if self.thread_lock!=None: + try: + if self.nn.opt!=None: + pass + if self.PO==1: + gradient=tape.gradient(train_loss,self.nn.param) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) + else: + self.thread_lock[0].acquire() + self.param=self.nn.param + self.gradient=tape.gradient(train_loss,self.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + self.thread_lock[1].release() + except AttributeError: + if self.PO==1: + self.gradient=self.nn.gradient(train_loss,self.nn.param) + self.nn.oopt(self.gradient,self.nn.param) + else: + self.thread_lock[0].acquire() + self.gradient=self.nn.gradient(train_loss,self.nn.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() + self.nn.oopt(self.gradient,self.nn.param) + self.thread_lock[1].release() + else: + try: + if self.nn.opt!=None: + pass + self.apply_gradient(tape,self.nn.opt,train_loss,self.nn.param) + except AttributeError: + gradient=self.nn.gradient(train_loss,self.nn.param) + self.nn.oopt(gradient,self.nn.param) loss=train_loss.numpy() if self.thread_lock!=None: - self.thread_lock.acquire() + self.thread_lock[2].acquire() self.nn.train_loss=loss.astype(np.float32) try: self.nn.ec+=1 except AttributeError: pass self.total_epoch+=1 - self.thread_lock.release() + self.thread_lock[2].release() else: self.nn.train_loss=loss.astype(np.float32) try: @@ -724,229 +461,272 @@ def _train(self,batch=None,_data_batch=None,_labels_batch=None,test_batch=None,t except AttributeError: pass self.total_epoch+=1 - if self.stop==True: - self.stop_flag=1 return - def train_(self,_data_batch=None,_labels_batch=None,batch=None,batches=None,test_batch=None,index1=None,index2=None,j=None,t=None): - if self.stop==True: - self.stop_func() + def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_batch=None,index1=None,index2=None,j=None,t=None): + if self.end_loss!=None or self.end_acc!=None or self.end_test_loss!=None or self.end_test_acc!=None: + self._param=self.nn.param if batch!=None: if index1==batches*batch: - data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,j,True,t) - try: - output,batch_loss=self.opt_t(data_batch,labels_batch,t) - except: - while True: - try: - output,batch_loss=self.opt_t(data_batch,labels_batch,t) - break - except: - continue - try: - self.nn.bc[t]+=1 - except AttributeError: - pass + if type(self.train_data)==list: + for i in range(len(self.train_data)): + data_batch[i]=tf.concat([self.train_data[i][index1:],self.train_data[i][:index2]],0) + else: + data_batch=tf.concat([self.train_data[index1:],self.train_data[:index2]],0) + if type(self.train_labels)==list: + for i in range(len(self.train_data)): + labels_batch[i]=tf.concat([self.train_labels[i][index1:],self.train_labels[i][:index2]],0) + else: + labels_batch=tf.concat([self.train_labels[index1:],self.train_labels[:index2]],0) if self.PO==1: + with tf.GradientTape() as tape: + self.output=self.nn.fp(data_batch) + self.batch_loss=self.nn.loss(self.output,labels_batch) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self.batch_loss,self.nn.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - batch_acc=self.nn.accuracy(output,labels_batch) + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + except AttributeError: + self.nn.oopt(self.gradient,self.param,t) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) + try: + self.nn.bc=j except AttributeError: pass else: + self.thread_lock[0].acquire() + self.param=self.nn.param + with tf.GradientTape() as tape: + self.output=self.nn.fp(data_batch) + self.batch_loss=self.nn.loss(self.output,labels_batch) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - self.thread_lock[0].acquire() - batch_acc=self.nn.accuracy(output,labels_batch) - self.thread_lock[0].release() + self.gradient=tape.gradient(self.batch_loss,self.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() + try: + if self.nn.opt!=None: + pass + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + except AttributeError: + self.nn.oopt(self.gradient,self.nn.param,t) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) + try: + self.nn.bc+=1 except AttributeError: pass + if self.acc_flag1==1: + return self.batch_loss,self.batch_acc + else: + return self.batch_loss + self.thread_lock[1].release() + if type(self.train_data)==list: + for i in range(len(self.train_data)): + if batch!=1: + data_batch[i]=self.train_data[i][index1:index2] + else: + data_batch[i]=self.train_data[i][j] + else: + if batch!=1: + data_batch=self.train_data[index1:index2] + else: + data_batch=self.train_data[j] + if type(self.train_labels)==list: + for i in range(len(self.train_data)): + if batch!=1: + labels_batch[i]=self.train_labels[i][index1:index2] + else: + labels_batch[i]=self.train_labels[i][j] + else: + if batch!=1: + labels_batch=self.train_labels[index1:index2] + else: + labels_batch=self.train_labels[j] + if self.PO==1: + with tf.GradientTape() as tape: + self.output=self.nn.fp(data_batch) + self.batch_loss=self.nn.loss(self.output,labels_batch) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - return batch_loss,batch_acc + self.gradient=tape.gradient(self.batch_loss,self.nn.param) except AttributeError: - return batch_loss,None - data_batch,labels_batch=self.data_func(_data_batch,_labels_batch,batch,index1,index2,j,t=t) - try: - output,batch_loss=self.opt_t(data_batch,labels_batch,t) - except: - while True: - try: - output,batch_loss=self.opt_t(data_batch,labels_batch,t) - break - except: - continue - if self.PO==1: + self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - batch_acc=self.nn.accuracy(output,labels_batch) + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: - pass + self.nn.oopt(self.gradient,self.nn.param,t) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: - self.nn.bc[t]=j + self.nn.bc=j except AttributeError: pass else: + self.thread_lock[0].acquire() + self.param=self.nn.param + with tf.GradientTape() as tape: + self.output=self.nn.fp(data_batch) + self.batch_loss=self.nn.loss(self.output,labels_batch) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - self.thread_lock[0].acquire() - batch_acc=self.nn.accuracy(output,labels_batch) - self.thread_lock[0].release() + self.gradient=tape.gradient(self.batch_loss,self.param) except AttributeError: - pass + self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) + self.thread_lock[0].release() + self.thread_lock[1].acquire() try: - self.nn.bc[t]=j + if self.nn.opt!=None: + pass + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + except AttributeError: + self.nn.oopt(self.gradient,self.nn.param,t) + if self.acc_flag1==1: + self.batch_acc=self.nn.accuracy(self.output,labels_batch) + try: + self.nn.bc=j except AttributeError: pass - try: - if self.nn.accuracy!=None: - pass - return batch_loss,batch_acc - except AttributeError: - return batch_loss,None + if self.acc_flag1==1: + return self.batch_loss,self.batch_acc + else: + return self.batch_loss + self.thread_lock[1].release() else: - output,train_loss=self.opt_t(self.train_data,self.train_labels,t) if self.PO==1: - self.loss=train_loss.numpy() - self.train_loss_list.append(self.loss.astype(np.float32)) - self.train_loss=self.loss.astype(np.float32) + with tf.GradientTape() as tape: + self.output=self.nn.fp(self.train_data) + self._train_loss=self.nn.loss(self.output,self.train_labels) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self._train_loss,self.nn.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self._train_loss,self.nn.param) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - self.acc=self.nn.accuracy(output,self.train_labels) + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + except AttributeError: + self.nn.oopt(self.gradient,self.nn.param) + self.loss=self._train_loss.numpy() + self.train_loss_list.append(self.loss.astype(np.float32)) + self.train_loss=self.loss + self.train_loss=self.train_loss.astype(np.float32) + if self.acc_flag1==1: + self.acc=self.nn.accuracy(self.output,self.train_labels) self.acc=self.acc.numpy() self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc.astype(np.float32) - except AttributeError: - pass + self.train_acc=self.acc + self.train_acc=self.train_acc.astype(np.float32) if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: self.test_acc_list.append(self.test_acc) - except AttributeError: - pass else: + self.thread_lock[0].acquire() + self.param=self.nn.param + with tf.GradientTape() as tape: + self.output=self.nn.fp(self.train_data) + self._train_loss=self.nn.loss(self.output,self.train_labels) + try: + if self.nn.opt!=None: + pass + self.gradient=tape.gradient(self._train_loss,self.param) + except AttributeError: + self.gradient=self.nn.gradient(tape,self._train_loss,self.param) + self.thread_lock[0].release() self.thread_lock[1].acquire() - self._train_loss=train_loss.numpy() - self.train_loss_list.append(self._train_loss.astype(np.float32)) - self.train_loss=self._train_loss.astype(np.float32) try: - if self.nn.accuracy!=None: + if self.nn.opt!=None: pass - self.acc=self.nn.accuracy(output,self.train_labels) + self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + except AttributeError: + self.nn.oopt(self.gradient,self.nn.param,t) + self.loss=self._train_loss.numpy() + self.train_loss_list.append(self.loss.astype(np.float32)) + self.train_loss=self.loss + self.train_loss=self.train_loss.astype(np.float32) + if self.acc_flag1==1: + self.acc=self.nn.accuracy(self.output,self.train_labels) self.acc=self.acc.numpy() self.train_acc_list.append(self.acc.astype(np.float32)) - self.train_acc=self.acc.astype(np.float32) - except AttributeError: - pass + self.train_acc=self.acc + self.train_acc=self.train_acc.astype(np.float32) if self.test_flag==True: self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) self.test_loss_list.append(self.test_loss) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: self.test_acc_list.append(self.test_acc) - except AttributeError: - pass self.thread_lock[1].release() - if self.stop==True: - self.stop_flag=1 return def _train_(self,batch=None,data_batch=None,labels_batch=None,test_batch=None,t=None): - if self.stop==True: - self.stop_func() total_loss=0 total_acc=0 batches=int((self.shape0-self.shape0%batch)/batch) - if batch!=None: - for j in range(batches): - self.suspend_func() - index1=j*batch - index2=(j+1)*batch - if self.PO==1: - self.thread_lock.acquire() - batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) - self.thread_lock.release() - else: - batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) - try: - if self.nn.accuracy!=None: - pass - total_loss+=batch_loss - total_acc+=batch_acc - except AttributeError: - total_loss+=batch_loss - if self.shape0%batch!=0: - batches+=1 - index1=batches*batch - index2=batch-(self.shape0-batches*batch) - if self.PO==1: - self.thread_lock.acquire() - batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,None,t) - self.thread_lock.release() - else: - batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,None,t) - try: - if self.nn.accuracy!=None: - pass - total_loss+=batch_loss - total_acc+=batch_acc - except AttributeError: - total_loss+=batch_loss - loss=total_loss.numpy()/batches - try: - if self.nn.accuracy!=None: - pass - train_acc=total_acc.numpy()/batches - except AttributeError: - pass - self.thread_lock[1].acquire() - self.train_loss_list.append(loss.astype(np.float32)) - self.train_loss=loss.astype(np.float32) - try: - if self.nn.accuracy!=None: - pass - self.train_acc_list.append(train_acc.astype(np.float32)) - self.train_acc=train_acc.astype(np.float32) - except AttributeError: - pass - if self.test_flag==True: - self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) - self.test_loss_list.append(self.test_loss) - try: - if self.nn.accuracy!=None: - pass - self.test_acc_list.append(self.test_acc) - except AttributeError: - pass - self.thread_lock[1].release() - return - else: - if self.PO==1: - self.thread_lock.acquire() - batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) - self.thread_lock.release() + for j in range(batches): + index1=j*batch + index2=(j+1)*batch + if self.acc_flag1==1: + self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + total_loss+=self.batch_loss + total_acc+=self.batch_acc else: - batch_loss,batch_acc=self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) - return + self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + total_loss+=self.batch_loss + if self.shape0%batch!=0: + batches+=1 + index1=batches*batch + index2=batch-(self.shape0-batches*batch) + self.train_(data_batch,labels_batch,batch,batches,test_batch,index1,index2,j,t) + if self.acc_flag1==1: + total_loss+=self.batch_loss + total_acc+=self.batch_acc + else: + total_loss+=self.batch_loss + loss=total_loss.numpy()/batches + if self.acc_flag1==1: + train_acc=total_acc.numpy()/batches + self.train_loss_list.append(loss.astype(np.float32)) + self.train_loss=loss + self.train_loss=self.train_loss.astype(np.float32) + if self.acc_flag1==1: + self.train_acc_list.append(train_acc.astype(np.float32)) + self.train_acc=train_acc + self.train_acc=self.train_acc.astype(np.float32) + if self.test_flag==True: + self.test_loss,self.test_acc=self.test(self.test_data,self.test_labels,test_batch) + self.test_loss_list.append(self.test_loss) + if self.acc_flag1==1: + self.test_acc_list.append(self.test_acc) + return def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s=None): + self.train_flag=True self.batch=batch self.epoch=0 t1=None t2=None + t=None self.train_counter+=1 if p==None: self.p=9 @@ -965,16 +745,8 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s labels_batch=[x for x in range(len(self.train_labels))] else: labels_batch=None - if self.thread!=None: - try: - t=self.t.pop() - except IndexError: - print('\nError,please add thread.') - return if epoch!=None: for i in range(epoch): - if self.stop==True: - self.stop_func() t1=time.time() if self.thread==None: try: @@ -983,21 +755,21 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s pass else: try: - self.nn.ec[self.t[-1]]+=1 - except: + self.nn.ec[t]+=1 + except AttributeError: pass if self.thread==None: self._train(batch,data_batch,labels_batch,test_batch) else: - if self.PO!=None: + t=self.t.pop() + if self.PO==1: + self.thread_lock.acquire() + self._train_(batch,data_batch,labels_batch,test_batch,t) + self.thread_lock.release() + elif self.PO!=None: self._train_(batch,data_batch,labels_batch,test_batch,t) else: self._train(batch,data_batch,labels_batch,test_batch,t) - if self.stop==True: - if self.stop_flag==1: - self.stop_func() - else: - return if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: @@ -1005,9 +777,9 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self.total_epoch+=1 self.thread_lock.release() else: - self.thread_lock[1].acquire() + self.thread_lock[2].acquire() self.total_epoch+=1 - self.thread_lock[1].release() + self.thread_lock[2].release() else: self.epoch+=1 self.total_epoch+=1 @@ -1043,12 +815,15 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s if save!=None and i%s==0: self.save(self.total_epoch,one) t2=time.time() - if type(self.time)!=list: + if self.thread==None: self.time+=(t2-t1) else: self.time[t]+=(t2-t1) + if self.end_flag==True and self.end()==True: + self.nn.param=self._param + self._param=None + break elif self.ol==None: - self.suspend_func() i=0 while True: t1=time.time() @@ -1056,7 +831,11 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self._train(test_batch=test_batch) else: t=self.t.pop() - if self.PO!=None: + if self.PO==1: + self.thread_lock.acquire() + self._train_(test_batch=test_batch,t=t) + self.thread_lock.release() + elif self.PO!=None: self._train_(test_batch=test_batch,t=t) else: self._train(test_batch=test_batch,t=t) @@ -1068,9 +847,9 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self.total_epoch+=1 self.thread_lock.release() else: - self.thread_lock[1].acquire() + self.thread_lock[2].acquire() self.total_epoch+=1 - self.thread_lock[1].release() + self.thread_lock[2].release() else: self.epoch+=1 self.total_epoch+=1 @@ -1089,7 +868,7 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s s=epoch/(self.s+1) s=int(s) if p==0: - p=1 + p=epoch if s==0: s=1 if i%p==0: @@ -1116,24 +895,17 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s except AttributeError: pass t2=time.time() - if type(self.time)!=list: + if self.thread==None: self.time+=(t2-t1) else: self.time[t]+=(t2-t1) + if self.end_flag==True and self.end()==True: + self.nn.param=self._param + self._param=None + break else: while True: self._train() - data=self.ol() - output=self.nn.fp(data[0]) - train_loss=self.nn.loss(output,data[1]) - loss=train_loss.numpy() - self.nn.train_loss.append(loss.astype(np.float32)) - if save!=None: - self.save() - try: - self.nn.ec+=1 - except AttributeError: - pass if save!=None: self.save() if self.thread==None: @@ -1156,10 +928,8 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('last loss:{0:.6f}'.format(self.train_loss)) else: print('last loss:{0:.6f},last test loss:{1:.6f}'.format(self.train_loss,self.test_loss)) - try: - if self.nn.accuracy!=None: - pass - if self.acc_flag=='%': + if self.acc_flag1==1: + if self.acc_flag2=='%': if self.test_flag==False: print('accuracy:{0:.1f}'.format(self.train_acc*100)) else: @@ -1169,9 +939,8 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('accuracy:{0:.6f}'.format(self.train_acc)) else: print('accuracy:{0:.6f},test_flag accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) - except AttributeError: - pass print('time:{0}s'.format(self.time)) + self.train_flag=False return @@ -1202,150 +971,68 @@ def test(self,test_data,test_labels,batch=None,t=None): labels_batch[i]=test_labels[i][index1:index2] else: labels_batch=test_labels[index1:index2] - if self.thread==None or t==None: + if self.thread==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) batch_loss=self.nn.loss(output,labels_batch) total_loss+=batch_loss - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc - except AttributeError: - pass if shape0%batch!=0: batches+=1 index1=batches*batch index2=batch-(shape0-batches*batch) - try: - if type(test_data)==list: - for i in range(len(test_data)): - data_batch[i]=self.core.concat([test_data[i][index1:],test_data[i][:index2]],0) - else: - data_batch=self.core.concat([test_data[index1:],test_data[:index2]],0) - if type(self.test_labels)==list: - for i in range(len(test_labels)): - labels_batch[i]=self.core.concat([test_labels[i][index1:],test_labels[i][:index2]],0) - else: - labels_batch=self.core.concat([test_labels[index1:],test_labels[:index2]],0) - except: - if type(test_data)==list: - for i in range(len(test_data)): - data_batch[i]=self.core.concat([test_data[i][index1:],test_data[i][:index2]],0) - else: - data_batch=self.core.concat([test_data[index1:],test_data[:index2]],0) - if type(self.test_labels)==list: - for i in range(len(test_labels)): - labels_batch[i]=self.core.concat([test_labels[i][index1:],test_labels[i][:index2]],0) - else: - labels_batch=self.core.concat([test_labels[index1:],test_labels[:index2]],0) - if self.thread==None or t==None: + if type(test_data)==list: + for i in range(len(test_data)): + data_batch[i]=tf.concat([test_data[i][index1:],test_data[i][:index2]],0) + else: + data_batch=tf.concat([test_data[index1:],test_data[:index2]],0) + if type(self.test_labels)==list: + for i in range(len(test_labels)): + labels_batch[i]=tf.concat([test_labels[i][index1:],test_labels[i][:index2]],0) + else: + labels_batch=tf.concat([test_labels[index1:],test_labels[:index2]],0) + if self.thread==None: output=self.nn.fp(data_batch) else: output=self.nn.fp(data_batch,t) batch_loss=self.nn.loss(output,labels_batch) total_loss+=batch_loss - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: batch_acc=self.nn.accuracy(output,labels_batch) total_acc+=batch_acc - except AttributeError: - pass test_loss=total_loss.numpy()/batches test_loss=test_loss test_loss=test_loss.astype(np.float32) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: test_acc=total_acc.numpy()/batches test_acc=test_acc test_acc=test_acc.astype(np.float32) - except AttributeError: - pass else: - if self.thread==None or t==None: + if self.thread==None: output=self.nn.fp(test_data) else: output=self.nn.fp(test_data,t) test_loss=self.nn.loss(output,test_labels) - try: - if self.nn.accuracy!=None: - pass + if self.acc_flag1==1: test_acc=self.nn.accuracy(output,test_labels) test_loss=test_loss.numpy().astype(np.float32) test_acc=test_acc.numpy().astype(np.float32) - except AttributeError: - pass - if self.thread==None or t==None: + if self.thread==None: print('test loss:{0:.6f}'.format(test_loss)) - try: - if self.nn.accuracy!=None: - pass - if self.acc_flag=='%': + if self.acc_flag1==1: + if self.acc_flag2=='%': print('accuracy:{0:.1f}'.format(test_acc*100)) else: print('accuracy:{0:.6f}'.format(test_acc)) - if self.acc_flag=='%': + if self.acc_flag2=='%': return test_loss,test_acc*100 else: return test_loss,test_acc - except AttributeError: - return test_loss - - - def suspend_func(self): - if self.suspend==True: - if self.thread==None: - if self.save_epoch==None: - print('Training have suspended.') - else: - self._save() - while True: - if self.suspend==False: - if self.thread==None: - print('Training have continued.') - break - return - - - def stop_func(self): - if self.thread_lock==None: - if self.save_flag==True: - self.save(self.total_epoch,True) - print('\nSystem have stopped training,Neural network have been saved.') - return else: - print('\nSystem have stopped training.') - return - elif self.end() and self.end_flag==True: - self.thread_lock.acquire() - self.save(self.total_epoch,True) - self.stop_flag=2 - self.thread_lock.release() - return - else: - if self.save_flag==True: - self.thread_lock.acquire() - self.save(self.total_epoch,True) - self.stop_flag=2 - self.thread_lock.release() - return - else: - return - - - def _save(self): - if self.save_epoch==self.total_epoch: - self.save(self.total_epoch,False) - self.save_epoch=None - print('\nNeural network have saved and training have suspended.') - return - elif self.save_epoch!=None and self.save_epoch>self.total_epoch: - print('\nsave_epoch>total_epoch') - return + return test_loss def train_info(self): @@ -1364,7 +1051,7 @@ def train_info(self): print('-------------------------------------') print() print('train loss:{0:.6f}'.format(self.train_loss)) - if self.acc_flag=='%': + if self.acc_flag2=='%': print('train acc:{0:.1f}'.format(self.train_acc*100)) else: print('train acc:{0:.6f}'.format(self.train_acc)) @@ -1374,12 +1061,12 @@ def train_info(self): def test_info(self): print() print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag=='%': + if self.acc_flag2=='%': print('test acc:{0:.1f}'.format(self.test_acc*100)) else: print('test acc:{0:.6f}'.format(self.test_acc)) return - + def info(self): self.train_info() @@ -1388,8 +1075,8 @@ def info(self): print('-------------------------------------') self.test_info() return - - + + def train_visual(self): print() plt.figure(1) @@ -1397,7 +1084,6 @@ def train_visual(self): plt.title('train loss') plt.xlabel('epoch') plt.ylabel('loss') - print('train loss:{0:.6f}'.format(self.train_loss)) try: if self.nn.accuracy!=None: pass @@ -1406,10 +1092,11 @@ def train_visual(self): plt.title('train acc') plt.xlabel('epoch') plt.ylabel('acc') - if self.acc_flag=='%': + print('train loss:{0:.6f}'.format(self.train_loss)) + if self.acc_flag2=='%': print('train acc:{0:.1f}'.format(self.train_acc*100)) else: - print('train acc:{0:.6f}'.format(self.train_acc)) + print('train acc:{0:.6f}'.format(self.train_acc)) except AttributeError: pass return @@ -1422,7 +1109,6 @@ def test_visual(self): plt.title('test loss') plt.xlabel('epoch') plt.ylabel('loss') - print('test loss:{0:.6f}'.format(self.test_loss)) try: if self.nn.accuracy!=None: pass @@ -1431,10 +1117,11 @@ def test_visual(self): plt.title('test acc') plt.xlabel('epoch') plt.ylabel('acc') - if self.acc_flag=='%': + print('test loss:{0:.6f}'.format(self.test_loss)) + if self.acc_flag2=='%': print('test acc:{0:.1f}'.format(self.test_acc*100)) else: - print('test acc:{0:.6f}'.format(self.test_acc)) + print('test acc:{0:.6f}'.format(self.test_acc)) except AttributeError: pass return @@ -1449,7 +1136,6 @@ def comparison(self): plt.title('loss') plt.xlabel('epoch') plt.ylabel('loss') - print('train loss:{0}'.format(self.train_loss)) plt.legend() try: if self.nn.accuracy!=None: @@ -1462,10 +1148,11 @@ def comparison(self): plt.xlabel('epoch') plt.ylabel('acc') plt.legend() - if self.acc_flag=='%': + print('train loss:{0}'.format(self.train_loss)) + if self.acc_flag2=='%': print('train acc:{0:.1f}'.format(self.train_acc*100)) else: - print('train acc:{0:.6f}'.format(self.train_acc)) + print('train acc:{0:.6f}'.format(self.train_acc)) except AttributeError: pass if self.test_flag==True: @@ -1473,7 +1160,7 @@ def comparison(self): print('-------------------------------------') print() print('test loss:{0:.6f}'.format(self.test_loss)) - if self.acc_flag=='%': + if self.acc_flag2=='%': print('test acc:{0:.1f}'.format(self.test_acc*100)) else: print('test acc:{0:.6f}'.format(self.test_acc)) @@ -1488,8 +1175,6 @@ def save_p(self): def save(self,i=None,one=True): - if self.stop_flag==2: - return if one==True: output_file=open('save.dat','wb') parameter_file=open('parameter.dat','wb') @@ -1501,7 +1186,8 @@ def save(self,i=None,one=True): os.remove(self.file_list[0][0]) os.remove(self.file_list[0][1]) pickle.dump(self.nn.param,parameter_file) - self.nn.param=None + if self.train_flag==False: + self.nn.param=None try: if self.nn.opt: pass @@ -1524,7 +1210,8 @@ def save(self,i=None,one=True): pickle.dump(self.end_acc,output_file) pickle.dump(self.end_test_loss,output_file) pickle.dump(self.end_test_acc,output_file) - pickle.dump(self.acc_flag,output_file) + pickle.dump(self.acc_flag1,output_file) + pickle.dump(self.acc_flag2,output_file) pickle.dump(self.p,output_file) pickle.dump(self.s,output_file) pickle.dump(self.file_list,output_file) @@ -1544,8 +1231,6 @@ def save(self,i=None,one=True): pickle.dump(self.total_time,output_file) output_file.close() parameter_file.close() - if self.stop==True and self.stop_flag!=None: - print('\nSystem have stopped,Neural network have saved.') return @@ -1567,7 +1252,8 @@ def restore(self,s_path,p_path): self.end_acc=pickle.load(input_file) self.end_test_loss=pickle.load(input_file) self.end_test_acc=pickle.load(input_file) - self.acc_flag=pickle.load(input_file) + self.acc_flag1=pickle.load(input_file) + self.acc_flag2=pickle.load(input_file) self.p=pickle.load(input_file) self.s=pickle.load(input_file) self.file_list=pickle.load(input_file) From 4ba70ae94957939e7ed2dc3752b1ba76b4079a00 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 29 Jul 2022 20:19:54 +0800 Subject: [PATCH 46/99] Update nc.py --- Note/create/nc.py | 112 +++++++++++++++++++++++----------------------- 1 file changed, 57 insertions(+), 55 deletions(-) diff --git a/Note/create/nc.py b/Note/create/nc.py index 7afca891f..4b147f982 100644 --- a/Note/create/nc.py +++ b/Note/create/nc.py @@ -3,6 +3,7 @@ def __init__(self,filename): self.filename=filename self.init={'z':'tf.zeros(','n':'tf.random.normal(','u':'tf.random.uniform(','o':'tf.ones('} self.operator=['.*','.^','.|','.||','./','.='] + self._operator=['*','^','|','||','/','='] self.define=dict() self.define_list=None self.index_list=None @@ -13,6 +14,7 @@ def __init__(self,filename): self.oj5='' self.oj6=['',''] self.line='' + self.test=[] def tf_function(self,oj1=None,oj2=None,oj3=None,oj4=None,oj5=None,oj6=None,init=None): @@ -33,23 +35,9 @@ def tf_function(self,oj1=None,oj2=None,oj3=None,oj4=None,oj5=None,oj6=None,init= return 'state_ops.assign('+oj6[0]+','+oj6[1]+')' - def concat(self,string_list): - line='' - for i in range(len(string_list)): - if i!=len(string_list)-1: - if i==0: - line+=self.line[string_list[i][0]-1:]+self.line.replace(self.line[string_list[i][0]:],string_list[i][2])+self.line[string_list[i][1]+1:string_list[i+1][0]] - else: - line+=string_list[i][2]+self.line[string_list[i][1]+1:string_list[i+1][0]] - else: - line+=string_list[i][2]+self.line[string_list[i][1]+1:] - self.line=line - return - - def getchar(self,line): self.line=line - string_list=[] + flag=None if len(self.define)!=0: index=[define in line for define in self.define] for i in range(len(index)): @@ -61,102 +49,116 @@ def getchar(self,line): break for i in range(len(line)): if self.line[i]=='.' and self.line[i+1]==' ': - if '=' in self.line: + if 'tf.Variable' not in self.line and '=' in self.line: indexf=self.line.find('=')+1 init=self.line[indexf] + elif 'tf.Variable' in self.line: + indexf=self.line.find('(')+1 + init=self.line[indexf] else: indexf=self.line.find('r')+7 init=self.line[indexf] self.oj1[1]='tf.'+self.line[indexf+1:i] - elif self.line[i]=='[': - index1=i - elif self.line[i]==']': - self.oj1[0]=self.line[index1:i+1]+',' - indexl=i - elif self.line[i]=='(': - self.oj1[2]=self.line[i+1]+',' - elif self.line[i]==')': - self.oj1[3]=self.line[i-1] - indexl=i - else: - line=self.tf_function(oj1=self.oj1,init=init)+self.index[indexl+1:] - self.line=self.line.replace(self.line[indexf:],line) - self.oj1=['','','',''] + flag=True + continue + if flag==True: + if self.line[i]=='[': + index1=i + elif self.line[i]==']': + self.oj1[0]=self.line[index1:i+1]+',' + indexl=i + elif self.line[i]=='(': + if self.line[i+1]!=init: + self.oj1[2]=self.line[i+1]+',' + elif self.line[i]==')': + if self.line[i+1]!='\n': + self.oj1[3]=self.line[i-1] + indexl=i + line=self.tf_function(oj1=self.oj1,init=init)+self.line[indexl+1:] + self.line=self.line.replace(self.line[indexf:],line) + self.oj1=['','','',''] + flag=False + return + continue if self.line[i]=='(': index1=i continue - elif self.line[i]=='.': + elif self.line[i]=='.' and (self.line[i+1] in self._operator or self.line[i+1:i+3] in self._operator): index2=i continue - elif self.line[i]==')' and index1!=None: - index3=i + elif self.line[i]==')': + try: + if self.line[index2+2:i]==self.line[index2+2:i] or self.self.line[index2+3:i]==self.line[index2+2:i]: + index3=i + except IndexError: + pass if '.*' in self.line[index1:index3+1]: - self.oj1[0]=self.line[index1+1:index2] + self.oj2[0]=self.line[index1+1:index2] self.oj2[1]=self.line[index2+2:index3] string=self.tf_function(oj2=self.oj2) - string_list.append([index1,index3,string]) + self.line=self.line.replace(self.line[index1:index3+1],string) if '.^' in self.line[index1:index3+1]: self.oj2=self.line[index1+1:index2] string=self.tf_function(oj3=self.oj3) - string_list.append([index1,index3,string]) + self.line=self.line.replace(self.line[index1:index3+1],string) if '.|' in self.line[index1:index3+1]: self.oj4[0]=self.line[index1+1:index2] self.oj4[1]=self.line[index2+2:index3] self.oj4[2]='.|' string=self.tf_function(oj4=self.oj4) - string_list.append([index1,index3,string]) + self.line=self.line.replace(self.line[index1:index3+1],string) if '.||' in self.line[index1:index3+1]: self.oj4[0]=self.line[index1+1:index2] self.oj4[1]=self.line[index2+3:index3] self.oj4[2]='.||' string=self.tf_function(oj4=self.oj4) - string_list.append([index1,index3,string]) + self.line=self.line.replace(self.line[index1:index3+1],string) if './' in self.line[index1:index3+1]: self.oj5=self.line[index1+1:index2] string=self.tf_function(oj5=self.oj5) - string_list.append([index1,index3,string]) + self.line=self.line.replace(self.line[index1:index3+1],string) if '.=' in self.line[index1:index3+1]: self.oj6[0]=self.line[index1+1:index2] self.oj6[1]=self.line[index2+2:index3] string=self.tf_function(oj6=self.oj6) - string_list.append([index1,index3,string]) - index1=None - self.concat(string_list) + self.line=self.line.replace(self.line[index1:index3+1],string) return def readlines(self,line): - return self.getchar(line) + self.getchar(line) + return def writelines(self): + flag=None outfile=self.filename - outfile=outfile.replace(outfile[outfile.rfind('n')],'py') + outfile=outfile[:outfile.rfind('.')]+'.py' outfile=open(outfile,'w') with open(self.filename) as infile: while 1: line=infile.readline() + if line=='': + break + if line=='"""' or line=="'''": + flag=False + outfile.write(line) + elif flag==False: + outfile.write(line) + if line=='"""' or line=="'''": + flag=True + continue if 'define. ' in line: self.define[line[line.find(' ')+1,line.rfine(' ')]]=line[line.rfind(' ')+1:] continue if len(self.define)!=0 and self.define_list==None: self.define_list=[define for define in self.define] - if ('. ' in line and "'" not in line or '"' not in line) or ("'" not in line and True in [operator in line for operator in self.operator]): + if ('. ' in line and ("'" not in line or '"' not in line)) or ("'" not in line and True in [operator in line for operator in self.operator]): self.readlines(line) outfile.write(self.line) self.line='' elif '#' in line: outfile.write(line) - elif line=='"""' or line=="'''": - flag=False - outfile.write(line) - elif flag==False: - outfile.write(line) - elif flag==False and line=='"""' or line=="'''": - flag=True - outfile.write(line) - elif line=='': - break else: outfile.write(line) outfile.close() From 35a7d604b85d27eb8f63657e95903ea632537eff Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 30 Jul 2022 13:25:32 +0800 Subject: [PATCH 47/99] Update kernel.py --- Note/create/RL/st/kernel.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 81325061f..c7a377b09 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -31,6 +31,7 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,save_episo self.pool_size=None self.batch=None self.update_step=None + self.train_flag=None self.end_loss=None self.save_episode=save_episode self.loss_list=[] @@ -520,6 +521,7 @@ def learn2(self): def learn(self,episode_num,save=None,one=True,p=None,s=None): + self.train_flag=True if p==None and s==None: self.p=9 self.s=2 @@ -550,7 +552,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): e=d*self.s if i%d==0: print('episode num:{0} loss:{1:.6f}'.format(i+1,loss)) - if path!=None and i%e==0: + if save!=None and i%e==0: self.save(i,one) self.epi_num+=1 self.total_episode+=1 @@ -704,6 +706,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): print() print('last loss:{0:.6f}'.format(loss)) print('time:{0}s'.format(self.time)) + self.train_flag=False return @@ -749,7 +752,8 @@ def save(self,i=None,one=True): episode_file.close() self.episode_num=self.epi_num pickle.dump(self.nn.param,parameter_file) - self.nn.param=None + if self.train_flag==False: + self.nn.param=None self.nn.opt=None pickle.dump(self.nn,output_file) pickle.dump(self.opt.get_config(),output_file) From dc08d418aa501113060f780d3896bea919006f1e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 30 Jul 2022 13:41:44 +0800 Subject: [PATCH 48/99] Update nc.py --- Note/create/nc.py | 104 +++++++++++++++++++++++----------------------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/Note/create/nc.py b/Note/create/nc.py index 4b147f982..5e6614344 100644 --- a/Note/create/nc.py +++ b/Note/create/nc.py @@ -48,80 +48,82 @@ def getchar(self,line): else: break for i in range(len(line)): - if self.line[i]=='.' and self.line[i+1]==' ': - if 'tf.Variable' not in self.line and '=' in self.line: - indexf=self.line.find('=')+1 - init=self.line[indexf] - elif 'tf.Variable' in self.line: - indexf=self.line.find('(')+1 - init=self.line[indexf] - else: - indexf=self.line.find('r')+7 - init=self.line[indexf] - self.oj1[1]='tf.'+self.line[indexf+1:i] - flag=True - continue + try: + if line[i]=='.' and line[i+1]==' ': + if 'tf.Variable' not in line and '=' in line: + indexf=line.find('=')+1 + init=line[indexf] + elif 'tf.Variable' in line: + indexf=line.find('(')+1 + init=line[indexf] + else: + indexf=line.find('r')+7 + init=line[indexf] + self.oj1[1]='tf.'+line[indexf+1:i] + flag=True + continue + except IndexError: + pass if flag==True: - if self.line[i]=='[': + if line[i]=='[': index1=i - elif self.line[i]==']': - self.oj1[0]=self.line[index1:i+1]+',' + elif line[i]==']': + self.oj1[0]=line[index1:i+1]+',' indexl=i - elif self.line[i]=='(': - if self.line[i+1]!=init: - self.oj1[2]=self.line[i+1]+',' - elif self.line[i]==')': - if self.line[i+1]!='\n': - self.oj1[3]=self.line[i-1] + elif line[i]=='(': + if line[i+1]!=init: + self.oj1[2]=line[i+1]+',' + elif line[i]==')': + if line[i+1]!='\n': + self.oj1[3]=line[i-1] indexl=i - line=self.tf_function(oj1=self.oj1,init=init)+self.line[indexl+1:] - self.line=self.line.replace(self.line[indexf:],line) + string=self.tf_function(oj1=self.oj1,init=init) + self.line=self.line.replace(line[indexf:indexl+1],string) self.oj1=['','','',''] flag=False - return continue - if self.line[i]=='(': + if line[i]=='(': index1=i continue - elif self.line[i]=='.' and (self.line[i+1] in self._operator or self.line[i+1:i+3] in self._operator): + elif line[i]=='.' and (line[i+1] in self._operator or line[i+1:i+3] in self._operator): index2=i continue - elif self.line[i]==')': + elif line[i]==')': try: - if self.line[index2+2:i]==self.line[index2+2:i] or self.self.line[index2+3:i]==self.line[index2+2:i]: + if line[index2+2:i]==line[index2+2:i] or line[index2+3:i]==line[index2+2:i]: index3=i except IndexError: pass - if '.*' in self.line[index1:index3+1]: - self.oj2[0]=self.line[index1+1:index2] - self.oj2[1]=self.line[index2+2:index3] + if '.*' in line[index1:index3+1]: + self.oj2[0]=line[index1+1:index2] + self.oj2[1]=line[index2+2:index3] string=self.tf_function(oj2=self.oj2) - self.line=self.line.replace(self.line[index1:index3+1],string) - if '.^' in self.line[index1:index3+1]: - self.oj2=self.line[index1+1:index2] + self.line=self.line.replace(line[index1:index3+1],string) + if '.^' in line[index1:index3+1]: + self.oj2=line[index1+1:index2] string=self.tf_function(oj3=self.oj3) - self.line=self.line.replace(self.line[index1:index3+1],string) - if '.|' in self.line[index1:index3+1]: - self.oj4[0]=self.line[index1+1:index2] - self.oj4[1]=self.line[index2+2:index3] + self.line=self.line.replace(line[index1:index3+1],string) + if '.|' in line[index1:index3+1]: + self.oj4[0]=line[index1+1:index2] + self.oj4[1]=line[index2+2:index3] self.oj4[2]='.|' string=self.tf_function(oj4=self.oj4) - self.line=self.line.replace(self.line[index1:index3+1],string) - if '.||' in self.line[index1:index3+1]: - self.oj4[0]=self.line[index1+1:index2] - self.oj4[1]=self.line[index2+3:index3] + self.line=self.line.replace(line[index1:index3+1],string) + if '.||' in line[index1:index3+1]: + self.oj4[0]=line[index1+1:index2] + self.oj4[1]=line[index2+3:index3] self.oj4[2]='.||' string=self.tf_function(oj4=self.oj4) - self.line=self.line.replace(self.line[index1:index3+1],string) - if './' in self.line[index1:index3+1]: - self.oj5=self.line[index1+1:index2] + self.line=self.line.replace(line[index1:index3+1],string) + if './' in line[index1:index3+1]: + self.oj5=line[index1+1:index2] string=self.tf_function(oj5=self.oj5) - self.line=self.line.replace(self.line[index1:index3+1],string) - if '.=' in self.line[index1:index3+1]: - self.oj6[0]=self.line[index1+1:index2] - self.oj6[1]=self.line[index2+2:index3] + self.line=self.line.replace(line[index1:index3+1],string) + if '.=' in line[index1:index3+1]: + self.oj6[0]=line[index1+1:index2] + self.oj6[1]=line[index2+2:index3] string=self.tf_function(oj6=self.oj6) - self.line=self.line.replace(self.line[index1:index3+1],string) + self.line=self.line.replace(line[index1:index3+1],string) return From 8f53aa68386c2f040dd20b93bdf0999909baf28e Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 30 Jul 2022 18:27:55 +0800 Subject: [PATCH 49/99] Update nc.py --- Note/create/nc.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/Note/create/nc.py b/Note/create/nc.py index 5e6614344..5b02e1c27 100644 --- a/Note/create/nc.py +++ b/Note/create/nc.py @@ -88,19 +88,15 @@ def getchar(self,line): elif line[i]=='.' and (line[i+1] in self._operator or line[i+1:i+3] in self._operator): index2=i continue - elif line[i]==')': - try: - if line[index2+2:i]==line[index2+2:i] or line[index2+3:i]==line[index2+2:i]: - index3=i - except IndexError: - pass + elif line[i]==')' and index1!=None and index2!=None: + index3=i if '.*' in line[index1:index3+1]: self.oj2[0]=line[index1+1:index2] self.oj2[1]=line[index2+2:index3] string=self.tf_function(oj2=self.oj2) self.line=self.line.replace(line[index1:index3+1],string) if '.^' in line[index1:index3+1]: - self.oj2=line[index1+1:index2] + self.oj3=line[index1+1:index2] string=self.tf_function(oj3=self.oj3) self.line=self.line.replace(line[index1:index3+1],string) if '.|' in line[index1:index3+1]: @@ -124,6 +120,7 @@ def getchar(self,line): self.oj6[1]=line[index2+2:index3] string=self.tf_function(oj6=self.oj6) self.line=self.line.replace(line[index1:index3+1],string) + index1,index2,index3=None,None,None return From f2b6960161401526ce84a58c65e36c8c5e1f4de0 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 30 Jul 2022 22:57:48 +0800 Subject: [PATCH 50/99] Update nc.py --- Note/create/nc.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Note/create/nc.py b/Note/create/nc.py index 5b02e1c27..4af271c36 100644 --- a/Note/create/nc.py +++ b/Note/create/nc.py @@ -35,16 +35,15 @@ def tf_function(self,oj1=None,oj2=None,oj3=None,oj4=None,oj5=None,oj6=None,init= return 'state_ops.assign('+oj6[0]+','+oj6[1]+')' - def getchar(self,line): + def getchar(self,line,index): self.line=line flag=None if len(self.define)!=0: - index=[define in line for define in self.define] for i in range(len(index)): if index[i]==True: while 1: - if self.define_list[i] in line: - line=line.replace(self.define_list[i],self.define[self.define_list[i]]) + if self.define_list[i] in self.line: + self.line=self.line.replace(self.define_list[i],self.define[self.define_list[i]]) else: break for i in range(len(line)): @@ -124,8 +123,8 @@ def getchar(self,line): return - def readlines(self,line): - self.getchar(line) + def readlines(self,line,index): + self.getchar(line,index) return @@ -148,12 +147,13 @@ def writelines(self): flag=True continue if 'define. ' in line: - self.define[line[line.find(' ')+1,line.rfine(' ')]]=line[line.rfind(' ')+1:] + self.define[line[line.find(' ')+1:line.rfind(' ')]]=line[line.rfind(' ')+1:-1] continue if len(self.define)!=0 and self.define_list==None: self.define_list=[define for define in self.define] - if ('. ' in line and ("'" not in line or '"' not in line)) or ("'" not in line and True in [operator in line for operator in self.operator]): - self.readlines(line) + index=[define in line for define in self.define] + if ('. ' in line and ("'" not in line or '"' not in line)) or ("'" not in line and True in [operator in line for operator in self.operator]) or True in index: + self.readlines(line,index) outfile.write(self.line) self.line='' elif '#' in line: From 070bc58ed1d672c85fe89d526aa0da891be1a025 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Mon, 1 Aug 2022 19:01:19 +0800 Subject: [PATCH 51/99] Update kernel.py --- Note/create/kernel.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 48ebb3193..91322990d 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -4,6 +4,8 @@ import pickle import os import time + + class kernel: def __init__(self,nn=None): if nn!=None: @@ -734,6 +736,7 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self.p=p-1 if s==None: self.s=1 + self.file_list=None else: self.s=s-1 self.file_list=[] From 841de883c65f5ac43774bfc4003189a1ad92193d Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 2 Aug 2022 13:47:17 +0800 Subject: [PATCH 52/99] Update kernel.py --- Note/create/kernel.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 91322990d..f131b1f7f 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1,4 +1,5 @@ import tensorflow as tf +from tensorflow.python.ops import state_ops import numpy as np import matplotlib.pyplot as plt import pickle @@ -1038,6 +1039,12 @@ def test(self,test_data,test_labels,batch=None,t=None): return test_loss + def assign(self): + for i in range(len(self.nn.model.weights)): + state_ops.assign(self.nn.model.weights[i],self.nn.param[i]) + return + + def train_info(self): print() print('batch:{0}'.format(self.batch)) @@ -1196,15 +1203,36 @@ def save(self,i=None,one=True): pass opt=self.nn.opt self.nn.opt=None - pickle.dump(self.nn,output_file) + try: + if self.nn.model!=None: + pass + model=self.nn.model + pickle.dump(self.nn.param,parameter_file) + self.nn.model=model + except: + pickle.dump(self.nn,output_file) self.nn.opt=opt except AttributeError: try: - pickle.dump(self.nn,output_file) + try: + if self.nn.model!=None: + pass + model=self.nn.model + pickle.dump(self.nn.param,parameter_file) + self.nn.model=model + except: + pickle.dump(self.nn,output_file) except: opt=self.nn.oopt self.nn.oopt=None - pickle.dump(self.nn,output_file) + try: + if self.nn.model!=None: + pass + model=self.nn.model + pickle.dump(self.nn.param,parameter_file) + self.nn.model=model + except: + pickle.dump(self.nn,output_file) self.nn.oopt=opt pickle.dump(opt.get_config(),output_file) pickle.dump(self.ol,output_file) From d96a1b5f7de8749447126d79d25849579dff58bc Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 4 Aug 2022 20:58:43 +0800 Subject: [PATCH 53/99] Update kernel.py --- Note/create/kernel.py | 96 +++++++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 45 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index f131b1f7f..0dc074d2e 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1,5 +1,4 @@ import tensorflow as tf -from tensorflow.python.ops import state_ops import numpy as np import matplotlib.pyplot as plt import pickle @@ -1039,12 +1038,6 @@ def test(self,test_data,test_labels,batch=None,t=None): return test_loss - def assign(self): - for i in range(len(self.nn.model.weights)): - state_ops.assign(self.nn.model.weights[i],self.nn.param[i]) - return - - def train_info(self): print() print('batch:{0}'.format(self.batch)) @@ -1187,15 +1180,37 @@ def save_p(self): def save(self,i=None,one=True): if one==True: output_file=open('save.dat','wb') - parameter_file=open('parameter.dat','wb') + try: + if len(self.nn.model.weights)==self.nn.param: + pass + else: + parameter_file=open('param.dat','wb') + except AttributeError: + parameter_file=open('param.dat','wb') else: output_file=open('save-{0}.dat'.format(i),'wb') - parameter_file=open('parameter-{0}.dat'.format(i),'wb') - self.file_list.append(['save-{0}.dat','parameter-{0}.dat']) - if len(self.file_list)>self.s+1: - os.remove(self.file_list[0][0]) - os.remove(self.file_list[0][1]) - pickle.dump(self.nn.param,parameter_file) + try: + if len(self.nn.model.weights)==self.nn.param: + self.file_list.append(['save-{0}.dat']) + if len(self.file_list)>self.s+1: + os.remove(self.file_list[0][0]) + else: + parameter_file=open('param-{0}.dat'.format(i),'wb') + self.file_list.append(['save-{0}.dat','param-{0}.dat']) + if len(self.file_list)>self.s+1: + os.remove(self.file_list[0][0]) + os.remove(self.file_list[0][1]) + except AttributeError: + parameter_file=open('param-{0}.dat'.format(i),'wb') + self.file_list.append(['save-{0}.dat','param-{0}.dat']) + if len(self.file_list)>self.s+1: + os.remove(self.file_list[0][0]) + os.remove(self.file_list[0][1]) + try: + if len(self.nn.model.weights)!=self.nn.param: + pickle.dump(self.nn.param[:-len(self.nn.model)],parameter_file) + except AttributeError: + pickle.dump(self.nn.param,parameter_file) if self.train_flag==False: self.nn.param=None try: @@ -1203,36 +1218,15 @@ def save(self,i=None,one=True): pass opt=self.nn.opt self.nn.opt=None - try: - if self.nn.model!=None: - pass - model=self.nn.model - pickle.dump(self.nn.param,parameter_file) - self.nn.model=model - except: - pickle.dump(self.nn,output_file) + pickle.dump(self.nn,output_file) self.nn.opt=opt except AttributeError: try: - try: - if self.nn.model!=None: - pass - model=self.nn.model - pickle.dump(self.nn.param,parameter_file) - self.nn.model=model - except: - pickle.dump(self.nn,output_file) + pickle.dump(self.nn,output_file) except: opt=self.nn.oopt self.nn.oopt=None - try: - if self.nn.model!=None: - pass - model=self.nn.model - pickle.dump(self.nn.param,parameter_file) - self.nn.model=model - except: - pickle.dump(self.nn,output_file) + pickle.dump(self.nn,output_file) self.nn.oopt=opt pickle.dump(opt.get_config(),output_file) pickle.dump(self.ol,output_file) @@ -1261,17 +1255,28 @@ def save(self,i=None,one=True): pickle.dump(self.total_epoch,output_file) pickle.dump(self.total_time,output_file) output_file.close() - parameter_file.close() + try: + if len(self.nn.model.weights)==self.nn.param: + pass + else: + parameter_file.close() + except AttributeError: + parameter_file.close() return - def restore(self,s_path,p_path): + def restore(self,s_path,p_path=None): input_file=open(s_path,'rb') - parameter_file=open(p_path,'rb') - param=pickle.load(parameter_file) + if p_path!=None: + parameter_file=open(p_path,'rb') + param=pickle.load(parameter_file) self.nn=pickle.load(input_file) - self.nn.param=param - param=None + try: + if self.nn.model!=None: + pass + self.nn.param=param.extend(self.nn.model.weights) + except AttributeError: + self.nn.param=param try: self.nn.km=1 except AttributeError: @@ -1302,5 +1307,6 @@ def restore(self,s_path,p_path): self.total_epoch=pickle.load(input_file) self.total_time=pickle.load(input_file) input_file.close() - parameter_file.close() + if p_path!=None: + parameter_file.close() return From 97268ed84354c37c65dd7186d7da5708030c1785 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Thu, 4 Aug 2022 21:00:20 +0800 Subject: [PATCH 54/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 0dc074d2e..15506fb62 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -1208,7 +1208,7 @@ def save(self,i=None,one=True): os.remove(self.file_list[0][1]) try: if len(self.nn.model.weights)!=self.nn.param: - pickle.dump(self.nn.param[:-len(self.nn.model)],parameter_file) + pickle.dump(self.nn.param[:-len(self.nn.model.weights)],parameter_file) except AttributeError: pickle.dump(self.nn.param,parameter_file) if self.train_flag==False: From d637c31d055b88e3067e4b75151ee08b88f2d828 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 5 Aug 2022 08:32:52 +0800 Subject: [PATCH 55/99] Update kernel.py --- Note/create/RL/st/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index c7a377b09..191244a0c 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -590,7 +590,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): if save!=None and i%e==0: self.save(i,one) self.epi_num+=1 - self.total_e+=1 + self.total_eisode+=1 if self.save_episode==True: if end: episode.append('end') From 79892b30281b0b38e175418ae22a97327be5a972 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 5 Aug 2022 08:33:04 +0800 Subject: [PATCH 56/99] Update kernel.py --- Note/create/kernel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 15506fb62..934e5dce6 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -936,12 +936,12 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s if self.test_flag==False: print('accuracy:{0:.1f}'.format(self.train_acc*100)) else: - print('accuracy:{0:.1f},test_flag accuracy:{1:.1f}'.format(self.train_acc*100,self.test_acc*100)) + print('accuracy:{0:.1f},test accuracy:{1:.1f}'.format(self.train_acc*100,self.test_acc*100)) else: if self.test_flag==False: print('accuracy:{0:.6f}'.format(self.train_acc)) else: - print('accuracy:{0:.6f},test_flag accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) + print('accuracy:{0:.6f},test accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) print('time:{0}s'.format(self.time)) self.train_flag=False return From 74cf70fd5bd36b8e015822f033a3c7a0ffbd2781 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Fri, 5 Aug 2022 13:43:41 +0800 Subject: [PATCH 57/99] Update kernel.py --- Note/create/RL/st/kernel.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/st/kernel.py index 191244a0c..1206751dd 100644 --- a/Note/create/RL/st/kernel.py +++ b/Note/create/RL/st/kernel.py @@ -41,7 +41,6 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,save_episo self.epi_num=0 self.episode_num=0 self.total_episode=0 - self.total_e=0 self.time=0 self.total_time=0 @@ -86,7 +85,6 @@ def set_up(self,param=None,epsilon=None,discount=None,episode_step=None,pool_siz self.epi_num=0 self.episode_num=0 self.total_episode=0 - self.total_e=0 self.time=0 self.total_time=0 return @@ -539,6 +537,8 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): for i in range(episode_num): loss,episode,end=self.learn2() self.loss_list.append(loss) + self.epi_num+=1 + self.total_episode+=1 if i==episode_num-1: self.loss_list.append(self.loss) if episode_num%10!=0: @@ -553,9 +553,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): if i%d==0: print('episode num:{0} loss:{1:.6f}'.format(i+1,loss)) if save!=None and i%e==0: - self.save(i,one) - self.epi_num+=1 - self.total_episode+=1 + self.save(self.total_episode,one) if self.save_episode==True: if end: episode.append('end') @@ -573,6 +571,8 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): while True: loss,episode,end=self.learn2() self.loss_list.append(loss) + self.epi_num+=1 + self.total_episode+=1 if i==episode_num-1: self.loss_list.append(self.loss) i+=1 @@ -588,9 +588,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): if i%d==0: print('episode num:{0} loss:{1:.6f}'.format(i+1,loss)) if save!=None and i%e==0: - self.save(i,one) - self.epi_num+=1 - self.total_eisode+=1 + self.save(self.total_episode,one) if self.save_episode==True: if end: episode.append('end') @@ -683,7 +681,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): self.nn.ec+=1 except AttributeError: pass - self.total_e+=1 + self.total_episode+=1 self.thread_lock.release() else: if len(self.loss_list)==0: @@ -694,7 +692,7 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): self.nn.ec+=1 except AttributeError: pass - self.total_e+=1 + self.total_episode+=1 if save!=None: self.save() self._time=self.time-int(self.time) @@ -779,7 +777,6 @@ def save(self,i=None,one=True): pickle.dump(self.s,output_file) pickle.dump(self.episode_num,output_file) pickle.dump(self.total_episode,output_file) - pickle.dump(self.total_e,output_file) pickle.dump(self.total_time,output_file) output_file.close() return @@ -823,7 +820,6 @@ def restore(self,s_path,p_path,e_path=None): self.s=pickle.load(input_file) self.episode_num=pickle.load(input_file) self.total_episode=pickle.load(input_file) - self.total_e=pickle.load(input_file) self.total_time=pickle.load(input_file) input_file.close() return From a8894deac4307e52a42c4059bd1cd014830e870d Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sat, 6 Aug 2022 14:41:13 +0800 Subject: [PATCH 58/99] Update kernel.py --- Note/create/RL/kernel.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 36ed1e438..97dbb6c5d 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -81,9 +81,7 @@ def add_threads(self,thread): t=-np.arange(-thread,1)+self.thread+1 self.t=t.extend(self.t) self.thread+=thread - if self.PO!=True: - self.loss=np.concatenate((self.train_loss,np.zeros(thread))) - self.loss_list.extend([[] for _ in range(thread)]) + self.loss=np.concatenate((self.train_loss,np.zeros(thread))) self.episode_num=np.concatenate((self.epoch,np.zeros(thread))) return @@ -251,7 +249,6 @@ def _explore(self,s,epsilon,i): if self.pool_net==True: self.thread_lock.acquire() if len(self.state_pool[index])>self.pool_size: - self.thread_lock.acquire() self.state_pool[index]=self.state_pool[index][1:] self.action_pool[index]=self.action_pool[index][1:] self.next_state_pool[index]=self.next_state_pool[index][1:] From 9e1229fe45268735fda487595169827eafc86424 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Sun, 7 Aug 2022 09:12:01 +0800 Subject: [PATCH 59/99] Update kernel.py --- Note/create/kernel.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 934e5dce6..634896d01 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -613,20 +613,20 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b if self.PO==1: with tf.GradientTape() as tape: self.output=self.nn.fp(self.train_data) - self._train_loss=self.nn.loss(self.output,self.train_labels) + self.train_loss=self.nn.loss(self.output,self.train_labels) try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self._train_loss,self.nn.param) + self.gradient=tape.gradient(self.train_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self._train_loss,self.nn.param) + self.gradient=self.nn.gradient(tape,self.train_loss,self.nn.param) try: if self.nn.opt!=None: pass self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param) - self.loss=self._train_loss.numpy() + self.loss=self.train_loss.numpy() self.train_loss_list.append(self.loss.astype(np.float32)) self.train_loss=self.loss self.train_loss=self.train_loss.astype(np.float32) @@ -646,13 +646,13 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b self.param=self.nn.param with tf.GradientTape() as tape: self.output=self.nn.fp(self.train_data) - self._train_loss=self.nn.loss(self.output,self.train_labels) + self.train_loss=self.nn.loss(self.output,self.train_labels) try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self._train_loss,self.param) + self.gradient=tape.gradient(self.train_loss,self.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self._train_loss,self.param) + self.gradient=self.nn.gradient(tape,self.train_loss,self.param) self.thread_lock[0].release() self.thread_lock[1].acquire() try: @@ -661,7 +661,7 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) except AttributeError: self.nn.oopt(self.gradient,self.nn.param,t) - self.loss=self._train_loss.numpy() + self.loss=self.train_loss.numpy() self.train_loss_list.append(self.loss.astype(np.float32)) self.train_loss=self.loss self.train_loss=self.train_loss.astype(np.float32) From 52b210b8d091bbca1714d36ff835c7ced5c2cc6c Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 9 Aug 2022 18:50:08 +0800 Subject: [PATCH 60/99] Update kernel.py --- Note/create/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/kernel.py b/Note/create/kernel.py index 634896d01..5bc196334 100644 --- a/Note/create/kernel.py +++ b/Note/create/kernel.py @@ -103,7 +103,7 @@ def init(self,param=None): def add_threads(self,thread): - t=-np.arange(-thread,1)+self.thread+1 + t=-np.arange(-thread+1,1)+self.thread self.t=t.extend(self.t) self.thread+=thread try: From fb9d5fed9202b256778104f9d1d2c3082b87e431 Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Tue, 9 Aug 2022 18:50:22 +0800 Subject: [PATCH 61/99] Update kernel.py --- Note/create/RL/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 97dbb6c5d..ab6c84f3c 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -78,7 +78,7 @@ def init(self,dtype=np.int32): def add_threads(self,thread): - t=-np.arange(-thread,1)+self.thread+1 + t=-np.arange(-thread+1,1)+self.thread self.t=t.extend(self.t) self.thread+=thread self.loss=np.concatenate((self.train_loss,np.zeros(thread))) From 1fc27133b1ba9273bf1d4b84020ce5b96dedcb1d Mon Sep 17 00:00:00 2001 From: 7NoteDancing <63648431+7NoteDancing@users.noreply.github.com> Date: Wed, 10 Aug 2022 14:05:13 +0800 Subject: [PATCH 62/99] Update kernel.py --- Note/create/RL/kernel.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index ab6c84f3c..95e70a7ac 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -726,7 +726,6 @@ def learn3(self,i): def learn(self,epsilon,episode_num): i=self.t.pop() self.thread_lock.acquire() - self.thread+=1 self._loss.append(0) self.thread_lock.release() while len(self.state_pool) Date: Wed, 21 Sep 2022 18:19:29 +0800 Subject: [PATCH 63/99] Rename Note/create/RL/st/kernel.py to Note/create/RL/nspn/kernel.py --- Note/create/RL/{st => nspn}/kernel.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Note/create/RL/{st => nspn}/kernel.py (100%) diff --git a/Note/create/RL/st/kernel.py b/Note/create/RL/nspn/kernel.py similarity index 100% rename from Note/create/RL/st/kernel.py rename to Note/create/RL/nspn/kernel.py From 37feb01349549662b53dd27881b355a9b5357d1d Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Wed, 21 Sep 2022 18:19:47 +0800 Subject: [PATCH 64/99] Rename Note/create/kernel.py to Note/create/DL/kernel.py --- Note/create/{ => DL}/kernel.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Note/create/{ => DL}/kernel.py (100%) diff --git a/Note/create/kernel.py b/Note/create/DL/kernel.py similarity index 100% rename from Note/create/kernel.py rename to Note/create/DL/kernel.py From 2ac4f682259bb3b4b1de9803fd575bbb9f8b5a76 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Wed, 21 Sep 2022 18:20:01 +0800 Subject: [PATCH 65/99] Rename Note/create/rl.py to Note/create/RL/rl.py --- Note/create/{ => RL}/rl.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Note/create/{ => RL}/rl.py (100%) diff --git a/Note/create/rl.py b/Note/create/RL/rl.py similarity index 100% rename from Note/create/rl.py rename to Note/create/RL/rl.py From 70124eb0e482502a7b9b2509eaa977a8e0086071 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 23 Oct 2022 19:29:14 +0800 Subject: [PATCH 66/99] Update version.py --- Note/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/version.py b/Note/version.py index 7b4273b4e..00cfc25b8 100644 --- a/Note/version.py +++ b/Note/version.py @@ -1,2 +1,2 @@ -version='3.0' +version='4.0' date='2022.4.2' From 1d4b490470f25251aae3acea828ae6190004d9b5 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 23 Oct 2022 19:41:43 +0800 Subject: [PATCH 67/99] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9f6cc8045..ca8942c12 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ # Note -documentation:https://github.com/7NoteDancing/Note-documentation +documentation:https://github.com/NoteDancing/Note-documentation/tree/main/Note%204.0%20documentation From d5764764dfacaee4598405272675b18ee27c8a1a Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Tue, 1 Nov 2022 16:55:11 +0800 Subject: [PATCH 68/99] Update kernel.py --- Note/create/DL/kernel.py | 39 ++++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 5bc196334..2cdee26fc 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -488,15 +488,15 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self.batch_loss,self.nn.param) + gradient=tape.gradient(self.batch_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) + gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: if self.nn.opt!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) except AttributeError: - self.nn.oopt(self.gradient,self.param,t) + self.nn.oopt(gradient,self.param,t) if self.acc_flag1==1: self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: @@ -505,16 +505,15 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b pass else: self.thread_lock[0].acquire() - self.param=self.nn.param with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) self.batch_loss=self.nn.loss(self.output,labels_batch) try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self.batch_loss,self.param) + self.gradient=tape.gradient(self.batch_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) + self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) self.thread_lock[0].release() self.thread_lock[1].acquire() try: @@ -563,15 +562,15 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self.batch_loss,self.nn.param) + gradient=tape.gradient(self.batch_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) + gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) try: if self.nn.opt!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) except AttributeError: - self.nn.oopt(self.gradient,self.nn.param,t) + self.nn.oopt(gradient,self.nn.param,t) if self.acc_flag1==1: self.batch_acc=self.nn.accuracy(self.output,labels_batch) try: @@ -580,16 +579,15 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b pass else: self.thread_lock[0].acquire() - self.param=self.nn.param with tf.GradientTape() as tape: self.output=self.nn.fp(data_batch) self.batch_loss=self.nn.loss(self.output,labels_batch) try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self.batch_loss,self.param) + self.gradient=tape.gradient(self.batch_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self.batch_loss,self.param) + self.gradient=self.nn.gradient(tape,self.batch_loss,self.nn.param) self.thread_lock[0].release() self.thread_lock[1].acquire() try: @@ -617,15 +615,15 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self.train_loss,self.nn.param) + gradient=tape.gradient(self.train_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self.train_loss,self.nn.param) + gradient=self.nn.gradient(tape,self.train_loss,self.nn.param) try: if self.nn.opt!=None: pass - self.nn.opt.apply_gradients(zip(self.gradient,self.nn.param)) + self.nn.opt.apply_gradients(zip(gradient,self.nn.param)) except AttributeError: - self.nn.oopt(self.gradient,self.nn.param) + self.nn.oopt(gradient,self.nn.param) self.loss=self.train_loss.numpy() self.train_loss_list.append(self.loss.astype(np.float32)) self.train_loss=self.loss @@ -643,16 +641,15 @@ def train_(self,data_batch=None,labels_batch=None,batch=None,batches=None,test_b self.test_acc_list.append(self.test_acc) else: self.thread_lock[0].acquire() - self.param=self.nn.param with tf.GradientTape() as tape: self.output=self.nn.fp(self.train_data) self.train_loss=self.nn.loss(self.output,self.train_labels) try: if self.nn.opt!=None: pass - self.gradient=tape.gradient(self.train_loss,self.param) + self.gradient=tape.gradient(self.train_loss,self.nn.param) except AttributeError: - self.gradient=self.nn.gradient(tape,self.train_loss,self.param) + self.gradient=self.nn.gradient(tape,self.train_loss,self.nn.param) self.thread_lock[0].release() self.thread_lock[1].acquire() try: From f09da95ebbc36bb84cf1841857efae4b7dcdaa41 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 7 Nov 2022 16:18:37 +0800 Subject: [PATCH 69/99] Update kernel.py --- Note/create/RL/nspn/kernel.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Note/create/RL/nspn/kernel.py b/Note/create/RL/nspn/kernel.py index 1206751dd..e3d4f1937 100644 --- a/Note/create/RL/nspn/kernel.py +++ b/Note/create/RL/nspn/kernel.py @@ -773,8 +773,6 @@ def save(self,i=None,one=True): pickle.dump(self.save_episode,output_file) pickle.dump(self.loss_list,output_file) pickle.dump(self.a,output_file) - pickle.dump(self.p,output_file) - pickle.dump(self.s,output_file) pickle.dump(self.episode_num,output_file) pickle.dump(self.total_episode,output_file) pickle.dump(self.total_time,output_file) @@ -816,8 +814,6 @@ def restore(self,s_path,p_path,e_path=None): self.save_episode=pickle.load(input_file) self.loss_list=pickle.load(input_file) self.a=pickle.load(input_file) - self.p=pickle.load(input_file) - self.s=pickle.load(input_file) self.episode_num=pickle.load(input_file) self.total_episode=pickle.load(input_file) self.total_time=pickle.load(input_file) From 2a208c624a21d789186975f50803adf680772cee Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 7 Nov 2022 16:18:51 +0800 Subject: [PATCH 70/99] Update kernel.py --- Note/create/DL/kernel.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 2cdee26fc..47c6e987b 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -1234,8 +1234,6 @@ def save(self,i=None,one=True): pickle.dump(self.end_test_acc,output_file) pickle.dump(self.acc_flag1,output_file) pickle.dump(self.acc_flag2,output_file) - pickle.dump(self.p,output_file) - pickle.dump(self.s,output_file) pickle.dump(self.file_list,output_file) pickle.dump(self.train_counter,output_file) pickle.dump(self.train_loss,output_file) @@ -1287,8 +1285,6 @@ def restore(self,s_path,p_path=None): self.end_test_acc=pickle.load(input_file) self.acc_flag1=pickle.load(input_file) self.acc_flag2=pickle.load(input_file) - self.p=pickle.load(input_file) - self.s=pickle.load(input_file) self.file_list=pickle.load(input_file) self.train_counter=pickle.load(input_file) self.train_loss=pickle.load(input_file) From 06fa15ea8899ea2870eb22f098a27d4207edcf66 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Fri, 18 Nov 2022 18:47:40 +0800 Subject: [PATCH 71/99] Update kernel.py --- Note/create/DL/kernel.py | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 47c6e987b..7c6d3b537 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -748,16 +748,6 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s if epoch!=None: for i in range(epoch): t1=time.time() - if self.thread==None: - try: - self.nn.ec+=1 - except AttributeError: - pass - else: - try: - self.nn.ec[t]+=1 - except AttributeError: - pass if self.thread==None: self._train(batch,data_batch,labels_batch,test_batch) else: @@ -770,6 +760,16 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self._train_(batch,data_batch,labels_batch,test_batch,t) else: self._train(batch,data_batch,labels_batch,test_batch,t) + if self.thread==None: + try: + self.nn.ec+=1 + except AttributeError: + pass + else: + try: + self.nn.ec[t]+=1 + except AttributeError: + pass if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: @@ -840,6 +840,16 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s else: self._train(test_batch=test_batch,t=t) i+=1 + if self.thread==None: + try: + self.nn.ec+=1 + except AttributeError: + pass + else: + try: + self.nn.ec[t]+=1 + except AttributeError: + pass if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: @@ -884,16 +894,6 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch,self.train_loss,self.test_loss)) if save!=None and i%s==0: self.save(self.total_epoch,one) - if self.thread==None: - try: - self.nn.ec+=1 - except AttributeError: - pass - else: - try: - self.nn.ec[t]+=1 - except AttributeError: - pass t2=time.time() if self.thread==None: self.time+=(t2-t1) From 9faab87264f8dc24a0fd0eaa2680c4fe8af4969f Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sat, 19 Nov 2022 14:32:54 +0800 Subject: [PATCH 72/99] Update kernel.py --- Note/create/DL/kernel.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 7c6d3b537..6eb51a8cc 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -86,9 +86,7 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): return - def init(self,param=None): - if param!=None: - self.nn.param=param + def init(self): self.train_loss_list.clear() self.train_acc_list.clear() self.test_loss_list.clear() From af4b3f070dce2e3defde32df7f636ee6c78c83f4 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 20 Nov 2022 13:50:50 +0800 Subject: [PATCH 73/99] Update kernel.py --- Note/create/RL/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 95e70a7ac..682c7be4f 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -788,7 +788,7 @@ def learn(self,epsilon,episode_num): self.thread_lock.release() else: for _ in range(self.episode_step): - next_s,end,episode,index=self._explore(s,self.epsilon[i],i) + next_s,end,_episode,index=self._explore(s,self.epsilon[i],i) s=next_s if self.state_pool[i]!=None and self.action_pool[i]!=None and self.next_state_pool[i]!=None and self.reward_pool[i]!=None: if self.PO==1: From 183e76e246fbdbf560de29ad19cda34ab41ca339 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 20 Nov 2022 14:16:23 +0800 Subject: [PATCH 74/99] Update kernel.py From db9320855dd45a8954e295f1d9c63af9f26870cc Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 20 Nov 2022 14:16:36 +0800 Subject: [PATCH 75/99] Update kernel.py From 7b4621c176ff7439e33e7ee8d739bd178467e9f7 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Tue, 22 Nov 2022 19:27:21 +0800 Subject: [PATCH 76/99] Update kernel.py --- Note/create/DL/kernel.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 6eb51a8cc..27ef3002b 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -1223,7 +1223,10 @@ def save(self,i=None,one=True): self.nn.oopt=None pickle.dump(self.nn,output_file) self.nn.oopt=opt - pickle.dump(opt.get_config(),output_file) + try: + pickle.dump(opt.get_config(),output_file) + except: + pickle.dump(None,output_file) pickle.dump(self.ol,output_file) pickle.dump(self.batch,output_file) pickle.dump(self.end_loss,output_file) From f806cc539503486d24593b0973c84c4956a0b20f Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Tue, 22 Nov 2022 19:34:11 +0800 Subject: [PATCH 77/99] Update kernel.py --- Note/create/DL/kernel.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 27ef3002b..ca4265fdc 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -1267,16 +1267,16 @@ def restore(self,s_path,p_path=None): parameter_file=open(p_path,'rb') param=pickle.load(parameter_file) self.nn=pickle.load(input_file) + try: + self.nn.km=1 + except AttributeError: + pass try: if self.nn.model!=None: pass self.nn.param=param.extend(self.nn.model.weights) except AttributeError: self.nn.param=param - try: - self.nn.km=1 - except AttributeError: - pass self.config=pickle.load(input_file) self.ol=pickle.load(input_file) self.batch=pickle.load(input_file) From 45bd13877f3d8df09759726293087f88f9a4b26c Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sat, 26 Nov 2022 14:01:15 +0800 Subject: [PATCH 78/99] Update kernel.py --- Note/create/DL/kernel.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index ca4265fdc..7923d684f 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -112,21 +112,6 @@ def add_threads(self,thread): self.nn.bc=np.concatenate((self.nn.bc,np.zeros(thread))) except AttributeError: pass - if self.PO==None: - self.train_loss=np.concatenate((self.train_loss,np.zeros(thread))) - self.train_acc=np.concatenate((self.train_acc,np.zeros(thread))) - self.train_loss_list.extend([[] for _ in range(thread)]) - self.train_acc_list.extend([[] for _ in range(thread)]) - if self.test_flag==True: - if self.PO==None: - self.test_loss=np.concatenate((self.test_loss,np.zeros(thread))) - self.test_acc=np.concatenate((self.test_acc,np.zeros(thread))) - self.test_loss_list.extend([[] for _ in range(thread)]) - self.test_acc_list.extend([[] for _ in range(thread)]) - self.epoch=np.concatenate((self.epoch,np.zeros(thread))) - self.total_epoch=np.concatenate((self.total_epoch,np.zeros(thread))) - self.time=np.concatenate((self.time,np.zeros(thread))) - self.total_time=np.concatenate((self.total_time,np.zeros(thread))) return From 132d73ccd35677c9d380409abacfa6c43c11214f Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sat, 24 Dec 2022 21:21:37 +0800 Subject: [PATCH 79/99] Update kernel.py --- Note/create/DL/kernel.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 7923d684f..79f394c52 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -1233,7 +1233,6 @@ def save(self,i=None,one=True): pickle.dump(self.test_loss_list,output_file) pickle.dump(self.test_acc_list,output_file) pickle.dump(self.total_epoch,output_file) - pickle.dump(self.total_epoch,output_file) pickle.dump(self.total_time,output_file) output_file.close() try: From 03c7c5f959de9a81e813ebf4e5dba70cae15bbd0 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 26 Dec 2022 16:28:20 +0800 Subject: [PATCH 80/99] Update kernel.py --- Note/create/DL/kernel.py | 55 ++++------------------------------------ 1 file changed, 5 insertions(+), 50 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 79f394c52..f694d5ef7 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -1160,39 +1160,12 @@ def save_p(self): def save(self,i=None,one=True): if one==True: output_file=open('save.dat','wb') - try: - if len(self.nn.model.weights)==self.nn.param: - pass - else: - parameter_file=open('param.dat','wb') - except AttributeError: - parameter_file=open('param.dat','wb') else: output_file=open('save-{0}.dat'.format(i),'wb') - try: - if len(self.nn.model.weights)==self.nn.param: - self.file_list.append(['save-{0}.dat']) - if len(self.file_list)>self.s+1: - os.remove(self.file_list[0][0]) - else: - parameter_file=open('param-{0}.dat'.format(i),'wb') - self.file_list.append(['save-{0}.dat','param-{0}.dat']) - if len(self.file_list)>self.s+1: - os.remove(self.file_list[0][0]) - os.remove(self.file_list[0][1]) - except AttributeError: - parameter_file=open('param-{0}.dat'.format(i),'wb') - self.file_list.append(['save-{0}.dat','param-{0}.dat']) - if len(self.file_list)>self.s+1: - os.remove(self.file_list[0][0]) - os.remove(self.file_list[0][1]) - try: - if len(self.nn.model.weights)!=self.nn.param: - pickle.dump(self.nn.param[:-len(self.nn.model.weights)],parameter_file) - except AttributeError: - pickle.dump(self.nn.param,parameter_file) - if self.train_flag==False: - self.nn.param=None + self.file_list.append(['save-{0}.dat']) + if len(self.file_list)>self.s+1: + os.remove(self.file_list[0][0]) + del self.file_list[0] try: if self.nn.opt: pass @@ -1235,32 +1208,16 @@ def save(self,i=None,one=True): pickle.dump(self.total_epoch,output_file) pickle.dump(self.total_time,output_file) output_file.close() - try: - if len(self.nn.model.weights)==self.nn.param: - pass - else: - parameter_file.close() - except AttributeError: - parameter_file.close() return - def restore(self,s_path,p_path=None): + def restore(self,s_path): input_file=open(s_path,'rb') - if p_path!=None: - parameter_file=open(p_path,'rb') - param=pickle.load(parameter_file) self.nn=pickle.load(input_file) try: self.nn.km=1 except AttributeError: pass - try: - if self.nn.model!=None: - pass - self.nn.param=param.extend(self.nn.model.weights) - except AttributeError: - self.nn.param=param self.config=pickle.load(input_file) self.ol=pickle.load(input_file) self.batch=pickle.load(input_file) @@ -1285,6 +1242,4 @@ def restore(self,s_path,p_path=None): self.total_epoch=pickle.load(input_file) self.total_time=pickle.load(input_file) input_file.close() - if p_path!=None: - parameter_file.close() return From e433849ec74288463ae37d8bfdbbb26a7860c5eb Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Wed, 11 Jan 2023 23:09:47 +0800 Subject: [PATCH 81/99] Update kernel.py --- Note/create/DL/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index f694d5ef7..802e4964c 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -322,7 +322,7 @@ def _train(self,batch=None,data_batch=None,labels_batch=None,test_batch=None,t=N pass loss=total_loss.numpy()/batches if self.acc_flag1==1: - train_acc=total_acc/batches + train_acc=total_acc.numpy()/batches if self.thread==None: self.train_loss_list.append(loss.astype(np.float32)) self.train_loss=loss From ede758684493951728cf0ab0945d387d61fce007 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Fri, 13 Jan 2023 18:00:56 +0800 Subject: [PATCH 82/99] Update kernel.py --- Note/create/DL/kernel.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 802e4964c..9805b23de 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -706,9 +706,6 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self.train_flag=True self.batch=batch self.epoch=0 - t1=None - t2=None - t=None self.train_counter+=1 if p==None: self.p=9 From 0f67fb7dbdad373291d12d677709a3f93514cfb9 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 15 Jan 2023 22:35:15 +0800 Subject: [PATCH 83/99] Update kernel.py --- Note/create/DL/kernel.py | 36 ++++++++---------------------------- 1 file changed, 8 insertions(+), 28 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 9805b23de..cbaf1e33e 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -60,10 +60,6 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): if self.thread!=None: self.t=-np.arange(-(self.thread-1),1) self.t=list(self.t) - try: - self.nn.ec=np.zeros(self.thread) - except AttributeError: - pass try: self.nn.bc=np.zeros(self.thread) except AttributeError: @@ -104,10 +100,6 @@ def add_threads(self,thread): t=-np.arange(-thread+1,1)+self.thread self.t=t.extend(self.t) self.thread+=thread - try: - self.nn.ec=np.concatenate((self.nn.ec,np.zeros(thread))) - except AttributeError: - pass try: self.nn.bc=np.concatenate((self.nn.bc,np.zeros(thread))) except AttributeError: @@ -740,16 +732,10 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self._train_(batch,data_batch,labels_batch,test_batch,t) else: self._train(batch,data_batch,labels_batch,test_batch,t) - if self.thread==None: - try: - self.nn.ec+=1 - except AttributeError: - pass - else: - try: - self.nn.ec[t]+=1 - except AttributeError: - pass + try: + self.nn.ec+=1 + except AttributeError: + pass if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: @@ -820,16 +806,10 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s else: self._train(test_batch=test_batch,t=t) i+=1 - if self.thread==None: - try: - self.nn.ec+=1 - except AttributeError: - pass - else: - try: - self.nn.ec[t]+=1 - except AttributeError: - pass + try: + self.nn.ec+=1 + except AttributeError: + pass if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: From 3959d0cfb18f72d64871b6599bf7849bb30736a8 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 15 Jan 2023 22:35:47 +0800 Subject: [PATCH 84/99] Update kernel.py --- Note/create/RL/kernel.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 682c7be4f..e38422bda 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -717,7 +717,7 @@ def learn3(self,i): else: self.loss[i]=self.loss[i].numpy()/batches try: - self.nn.ec[i]+=1 + self.nn.ec+=1 except AttributeError: pass return @@ -741,10 +741,6 @@ def learn(self,epsilon,episode_num): self.reward_pool.append(None) self.epsilon.append(epsilon) self.epi_num.append(episode_num) - try: - self.nn.ec.append(0) - except AttributeError: - pass try: self.nn.bc.append(0) except AttributeError: From 12220d937042b1102eae94c8124e73e5323a8e53 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 15 Jan 2023 23:08:45 +0800 Subject: [PATCH 85/99] Update kernel.py --- Note/create/DL/kernel.py | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index cbaf1e33e..9805b23de 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -60,6 +60,10 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): if self.thread!=None: self.t=-np.arange(-(self.thread-1),1) self.t=list(self.t) + try: + self.nn.ec=np.zeros(self.thread) + except AttributeError: + pass try: self.nn.bc=np.zeros(self.thread) except AttributeError: @@ -100,6 +104,10 @@ def add_threads(self,thread): t=-np.arange(-thread+1,1)+self.thread self.t=t.extend(self.t) self.thread+=thread + try: + self.nn.ec=np.concatenate((self.nn.ec,np.zeros(thread))) + except AttributeError: + pass try: self.nn.bc=np.concatenate((self.nn.bc,np.zeros(thread))) except AttributeError: @@ -732,10 +740,16 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self._train_(batch,data_batch,labels_batch,test_batch,t) else: self._train(batch,data_batch,labels_batch,test_batch,t) - try: - self.nn.ec+=1 - except AttributeError: - pass + if self.thread==None: + try: + self.nn.ec+=1 + except AttributeError: + pass + else: + try: + self.nn.ec[t]+=1 + except AttributeError: + pass if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: @@ -806,10 +820,16 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s else: self._train(test_batch=test_batch,t=t) i+=1 - try: - self.nn.ec+=1 - except AttributeError: - pass + if self.thread==None: + try: + self.nn.ec+=1 + except AttributeError: + pass + else: + try: + self.nn.ec[t]+=1 + except AttributeError: + pass if type(self.total_epoch)!=list: if self.thread_lock!=None: if type(self.thread_lock)!=list: From ebcfd0550fc0d91e16a61705f1ce9006d70ba1a2 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 15 Jan 2023 23:09:36 +0800 Subject: [PATCH 86/99] Update kernel.py --- Note/create/RL/kernel.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index e38422bda..682c7be4f 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -717,7 +717,7 @@ def learn3(self,i): else: self.loss[i]=self.loss[i].numpy()/batches try: - self.nn.ec+=1 + self.nn.ec[i]+=1 except AttributeError: pass return @@ -741,6 +741,10 @@ def learn(self,epsilon,episode_num): self.reward_pool.append(None) self.epsilon.append(epsilon) self.epi_num.append(episode_num) + try: + self.nn.ec.append(0) + except AttributeError: + pass try: self.nn.bc.append(0) except AttributeError: From 4f1fb8c7fd34713e2a998921b363f4f00460461f Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Thu, 26 Jan 2023 12:55:06 +0800 Subject: [PATCH 87/99] Update kernel.py --- Note/create/DL/kernel.py | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 9805b23de..3bac18122 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -27,7 +27,6 @@ def __init__(self,nn=None): self.acc_flag1=None self.acc_flag2='%' self.train_flag=None - self.train_counter=0 self.train_loss=None self.train_acc=None self.train_loss_list=[] @@ -92,7 +91,6 @@ def init(self): self.test_loss_list.clear() self.test_acc_list.clear() self.test_flag=False - self.train_counter=0 self.epoch=0 self.total_epoch=0 self.time=0 @@ -706,7 +704,6 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s self.train_flag=True self.batch=batch self.epoch=0 - self.train_counter+=1 if p==None: self.p=9 else: @@ -782,16 +779,10 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s if s==0: s=1 if i%p==0: - if self.train_counter==1: - if self.test_flag==False: - print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) + if self.test_flag==False: + print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: - if self.test_flag==False: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch,self.train_loss,self.test_loss)) + print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) if save!=None and i%s==0: self.save(self.total_epoch,one) t2=time.time() @@ -862,16 +853,10 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s if s==0: s=1 if i%p==0: - if self.train_counter==1: - if self.test_flag==False: - print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) + if self.test_flag==False: + print('epoch:{0} loss:{1:.6f}'.format(i+1,self.train_loss)) else: - if self.test_flag==False: - print('epoch:{0} loss:{1:.6f}'.format(self.total_epoch,self.train_loss)) - else: - print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(self.total_epoch,self.train_loss,self.test_loss)) + print('epoch:{0} loss:{1:.6f},test loss:{2:.6f}'.format(i+1,self.train_loss,self.test_loss)) if save!=None and i%s==0: self.save(self.total_epoch,one) t2=time.time() @@ -1191,7 +1176,6 @@ def save(self,i=None,one=True): pickle.dump(self.acc_flag1,output_file) pickle.dump(self.acc_flag2,output_file) pickle.dump(self.file_list,output_file) - pickle.dump(self.train_counter,output_file) pickle.dump(self.train_loss,output_file) pickle.dump(self.train_acc,output_file) pickle.dump(self.train_loss_list,output_file) @@ -1225,7 +1209,6 @@ def restore(self,s_path): self.acc_flag1=pickle.load(input_file) self.acc_flag2=pickle.load(input_file) self.file_list=pickle.load(input_file) - self.train_counter=pickle.load(input_file) self.train_loss=pickle.load(input_file) self.train_acc=pickle.load(input_file) self.train_loss_list=pickle.load(input_file) From eb7351beaeee48b774db5232a2cc89a6d08c8cc1 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 6 Feb 2023 18:35:37 +0800 Subject: [PATCH 88/99] Update kernel.py --- Note/create/RL/kernel.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/Note/create/RL/kernel.py b/Note/create/RL/kernel.py index 682c7be4f..e64fbc25f 100644 --- a/Note/create/RL/kernel.py +++ b/Note/create/RL/kernel.py @@ -58,8 +58,8 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,thread=Non self._loss=[] self.loss_list=[] self.a=0 - self.epi_num=[] - self.episode_num=np.zeros(self.thread) + self.epi_count=[] + self.episode_count=np.zeros(self.thread) self.total_episode=0 self.total_time=0 @@ -82,17 +82,17 @@ def add_threads(self,thread): self.t=t.extend(self.t) self.thread+=thread self.loss=np.concatenate((self.train_loss,np.zeros(thread))) - self.episode_num=np.concatenate((self.epoch,np.zeros(thread))) + self.episode_count=np.concatenate((self.epoch,np.zeros(thread))) return - def set_up(self,param=None,discount=None,episode_num=None,episode_step=None,pool_size=None,batch=None,update_step=None,end_loss=None): + def set_up(self,param=None,discount=None,episode_count=None,episode_step=None,pool_size=None,batch=None,update_step=None,end_loss=None): if param!=None: self.nn.param=param if discount!=None: self.discount=discount - if episode_num!=None: - self.epi_num=episode_num + if episode_count!=None: + self.epi_count=episode_count if episode_step!=None: self.episode_step=episode_step if pool_size!=None: @@ -132,8 +132,8 @@ def set_up(self,param=None,discount=None,episode_num=None,episode_step=None,pool self._loss=[] self.loss_list=[] self.a=0 - self.epi_num=[] - self.episode_num=np.zeros(self.thread) + self.epi_count=[] + self.episode_count=np.zeros(self.thread) self.total_episode=0 self.total_time=0 return @@ -723,7 +723,7 @@ def learn3(self,i): return - def learn(self,epsilon,episode_num): + def learn(self,epsilon,episode_count): i=self.t.pop() self.thread_lock.acquire() self._loss.append(0) @@ -740,7 +740,7 @@ def learn(self,epsilon,episode_num): self.next_state_pool.append(None) self.reward_pool.append(None) self.epsilon.append(epsilon) - self.epi_num.append(episode_num) + self.epi_count.append(episode_count) try: self.nn.ec.append(0) except AttributeError: @@ -755,10 +755,10 @@ def learn(self,epsilon,episode_num): self.thread_lock.release() elif i not in self.finish_lis and self.state_list!=None: self.state_list[i+1]=1 - for k in range(episode_num): - if self.episode_num[i]==self.epi_num[i]: + for k in range(episode_count): + if self.episode_count[i]==self.epi_count[i]: break - self.episode_num[i]+=1 + self.episode_count[i]+=1 episode=[] if self.state_name==None: s=self.nn.explore(init=True) @@ -893,8 +893,8 @@ def save(self): pickle.dump(self.save_episode,output_file) pickle.dump(self.loss_list,output_file) pickle.dump(self.a,output_file) - pickle.dump(self.epi_num,output_file) - pickle.dump(self.episode_num,output_file) + pickle.dump(self.epi_count,output_file) + pickle.dump(self.episode_count,output_file) pickle.dump(self.total_episode,output_file) pickle.dump(self.total_time,output_file) output_file.close() @@ -945,8 +945,8 @@ def restore(self,s_path,p_path,e_path=None): self.save_episode=pickle.load(input_file) self.loss_list=pickle.load(input_file) self.a=pickle.load(input_file) - self.epi_num=pickle.load(input_file) - self.episode_num=pickle.load(input_file) + self.epi_count=pickle.load(input_file) + self.episode_count=pickle.load(input_file) self.total_episode=pickle.load(input_file) self.total_time=pickle.load(input_file) input_file.close() From 9e6be67d3fd0a612221f2de490bdfab4689081f0 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 6 Feb 2023 18:36:14 +0800 Subject: [PATCH 89/99] Update kernel.py --- Note/create/RL/nspn/kernel.py | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/Note/create/RL/nspn/kernel.py b/Note/create/RL/nspn/kernel.py index e3d4f1937..23b786ae8 100644 --- a/Note/create/RL/nspn/kernel.py +++ b/Note/create/RL/nspn/kernel.py @@ -38,8 +38,8 @@ def __init__(self,nn=None,state=None,state_name=None,action_name=None,save_episo self.a=0 self.d=None self.e=None - self.epi_num=0 - self.episode_num=0 + self.epi_count=0 + self.episode_count=0 self.total_episode=0 self.time=0 self.total_time=0 @@ -82,8 +82,8 @@ def set_up(self,param=None,epsilon=None,discount=None,episode_step=None,pool_siz self.reward_pool=None self.loss_list=[] self.a=0 - self.epi_num=0 - self.episode_num=0 + self.epi_count=0 + self.episode_count=0 self.total_episode=0 self.time=0 self.total_time=0 @@ -518,7 +518,7 @@ def learn2(self): return loss,episode,end - def learn(self,episode_num,save=None,one=True,p=None,s=None): + def learn(self,episode_count,save=None,one=True,p=None,s=None): self.train_flag=True if p==None and s==None: self.p=9 @@ -533,19 +533,19 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): self.p=p-1 self.s=s loss=0 - if episode_num!=None: - for i in range(episode_num): + if episode_count!=None: + for i in range(episode_count): loss,episode,end=self.learn2() self.loss_list.append(loss) - self.epi_num+=1 + self.epi_count+=1 self.total_episode+=1 - if i==episode_num-1: + if i==episode_count-1: self.loss_list.append(self.loss) - if episode_num%10!=0: - d=episode_num-episode_num%self.p + if episode_count%10!=0: + d=episode_count-episode_count%self.p d=int(d/self.p) else: - d=episode_num/(self.p+1) + d=episode_count/(self.p+1) d=int(d) if d==0: d=1 @@ -571,16 +571,16 @@ def learn(self,episode_num,save=None,one=True,p=None,s=None): while True: loss,episode,end=self.learn2() self.loss_list.append(loss) - self.epi_num+=1 + self.epi_count+=1 self.total_episode+=1 - if i==episode_num-1: + if i==episode_count-1: self.loss_list.append(self.loss) i+=1 - if episode_num%10!=0: - d=episode_num-episode_num%self.p + if episode_count%10!=0: + d=episode_count-episode_count%self.p d=int(d/self.p) else: - d=episode_num/(self.p+1) + d=episode_count/(self.p+1) d=int(d) if d==0: d=1 @@ -748,7 +748,7 @@ def save(self,i=None,one=True): episode_file=open('episode-{0}.dat'.format(i),'wb') pickle.dump(self.episode,episode_file) episode_file.close() - self.episode_num=self.epi_num + self.episode_count=self.epi_count pickle.dump(self.nn.param,parameter_file) if self.train_flag==False: self.nn.param=None @@ -773,7 +773,7 @@ def save(self,i=None,one=True): pickle.dump(self.save_episode,output_file) pickle.dump(self.loss_list,output_file) pickle.dump(self.a,output_file) - pickle.dump(self.episode_num,output_file) + pickle.dump(self.episode_count,output_file) pickle.dump(self.total_episode,output_file) pickle.dump(self.total_time,output_file) output_file.close() @@ -814,7 +814,7 @@ def restore(self,s_path,p_path,e_path=None): self.save_episode=pickle.load(input_file) self.loss_list=pickle.load(input_file) self.a=pickle.load(input_file) - self.episode_num=pickle.load(input_file) + self.episode_count=pickle.load(input_file) self.total_episode=pickle.load(input_file) self.total_time=pickle.load(input_file) input_file.close() From f3d64d453ff6f49385ab1426193699739b74bb26 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sun, 12 Feb 2023 20:23:02 +0800 Subject: [PATCH 90/99] Update kernel.py --- Note/create/DL/kernel.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 3bac18122..cf951a8e3 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -26,7 +26,6 @@ def __init__(self,nn=None): self.end_test_acc=None self.acc_flag1=None self.acc_flag2='%' - self.train_flag=None self.train_loss=None self.train_acc=None self.train_loss_list=[] @@ -701,7 +700,6 @@ def _train_(self,batch=None,data_batch=None,labels_batch=None,test_batch=None,t= def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s=None): - self.train_flag=True self.batch=batch self.epoch=0 if p==None: @@ -905,7 +903,6 @@ def train(self,batch=None,epoch=None,test_batch=None,save=None,one=True,p=None,s else: print('accuracy:{0:.6f},test accuracy:{1:.6f}'.format(self.train_acc,self.test_acc)) print('time:{0}s'.format(self.time)) - self.train_flag=False return From 48df4ddce475575f0d2d828859ff9726d9d1b79c Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:44:28 +0800 Subject: [PATCH 91/99] Update kernel.py --- Note/create/DL/kernel.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index cf951a8e3..72e9d5e9e 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -112,18 +112,6 @@ def add_threads(self,thread): return - def set_end(self,end_loss=None,end_acc=None,end_test_loss=None,end_test_acc=None): - if end_loss!=None: - self.end_loss=end_loss - if end_acc!=None: - self.end_acc=end_acc - if end_test_loss!=None: - self.end_test_loss=end_test_loss - if end_test_acc!=None: - self.end_test_acc=end_test_acc - return - - def end(self): if self.end_loss!=None and self.train_loss<=self.end_loss: return True From a852f54d44e3e4c959e27c501a82ea31218bd7ee Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Fri, 17 Feb 2023 23:26:07 +0800 Subject: [PATCH 92/99] Update kernel.py --- Note/create/DL/kernel.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 72e9d5e9e..441ca02d9 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -40,7 +40,7 @@ def __init__(self,nn=None): self.total_time=0 - def data(self,train_data,train_labels,test_data=None,test_labels=None): + def data(self,train_data,train_labels,test_data=None,test_labels=None,test_flag=False): self.train_data=train_data self.train_labels=train_labels if type(train_data)==list: @@ -49,8 +49,7 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.labels_batch=[x for x in range(len(train_labels))] self.test_data=test_data self.test_labels=test_labels - if test_data!=None: - self.test_flag=True + self.test_flag=test_flag if type(self.train_data)==list: self.shape0=train_data[0].shape[0] else: @@ -982,7 +981,7 @@ def test(self,test_data,test_labels,batch=None,t=None): else: return test_loss,test_acc else: - return test_loss + return test_loss,None def train_info(self): From 3cbcd7c5b5ceec5b5100731ac736050c401b9097 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Sat, 18 Feb 2023 00:02:16 +0800 Subject: [PATCH 93/99] Update kernel.py --- Note/create/DL/kernel.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 441ca02d9..71e9a6ed9 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -40,7 +40,7 @@ def __init__(self,nn=None): self.total_time=0 - def data(self,train_data,train_labels,test_data=None,test_labels=None,test_flag=False): + def data(self,train_data,train_labels,test_data=None,test_labels=None): self.train_data=train_data self.train_labels=train_labels if type(train_data)==list: @@ -49,7 +49,6 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None,test_flag= self.labels_batch=[x for x in range(len(train_labels))] self.test_data=test_data self.test_labels=test_labels - self.test_flag=test_flag if type(self.train_data)==list: self.shape0=train_data[0].shape[0] else: From 8ebfac31fbb94dc5c0f72757c91620e6fadd1822 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Tue, 21 Feb 2023 13:49:07 +0800 Subject: [PATCH 94/99] Update kernel.py --- Note/create/DL/kernel.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 71e9a6ed9..7b1d79e18 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -49,6 +49,11 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.labels_batch=[x for x in range(len(train_labels))] self.test_data=test_data self.test_labels=test_labels + try: + if test_data!=None: + self.test_flag=True + except ValueError: + self.test_flag=True if type(self.train_data)==list: self.shape0=train_data[0].shape[0] else: From 6d27ac42783d933850a5a57d95d599cf92efeb86 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 20 Mar 2023 15:58:45 +0800 Subject: [PATCH 95/99] Update kernel.py --- Note/create/DL/kernel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 7b1d79e18..2fdd449d5 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -50,8 +50,8 @@ def data(self,train_data,train_labels,test_data=None,test_labels=None): self.test_data=test_data self.test_labels=test_labels try: - if test_data!=None: - self.test_flag=True + if test_data==None: + self.test_flag=False except ValueError: self.test_flag=True if type(self.train_data)==list: From af831ccd00617548f8d68be05a93071ed9c771c6 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 3 Apr 2023 14:16:07 +0800 Subject: [PATCH 96/99] Update kernel.py --- Note/create/DL/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 2fdd449d5..62ab8bb7e 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -985,7 +985,7 @@ def test(self,test_data,test_labels,batch=None,t=None): else: return test_loss,test_acc else: - return test_loss,None + return test_loss def train_info(self): From 663ad3eef3973a45b0bdd0c15a49b74553cde589 Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Mon, 3 Apr 2023 18:12:00 +0800 Subject: [PATCH 97/99] Update kernel.py --- Note/create/DL/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Note/create/DL/kernel.py b/Note/create/DL/kernel.py index 62ab8bb7e..2fdd449d5 100644 --- a/Note/create/DL/kernel.py +++ b/Note/create/DL/kernel.py @@ -985,7 +985,7 @@ def test(self,test_data,test_labels,batch=None,t=None): else: return test_loss,test_acc else: - return test_loss + return test_loss,None def train_info(self): From 6fd2d22764ca785a6a340baefbbe637a5afcdaee Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Wed, 24 May 2023 14:12:13 +0800 Subject: [PATCH 98/99] Update README.md --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index ca8942c12..8b1378917 100644 --- a/README.md +++ b/README.md @@ -1,2 +1 @@ -# Note -documentation:https://github.com/NoteDancing/Note-documentation/tree/main/Note%204.0%20documentation + From 7042a1e06bbff8ac0aa5b4beeaf9726da2388eed Mon Sep 17 00:00:00 2001 From: NoteDancing <63648431+NoteDancing@users.noreply.github.com> Date: Wed, 24 May 2023 14:12:54 +0800 Subject: [PATCH 99/99] Delete README.md --- README.md | 1 - 1 file changed, 1 deletion(-) delete mode 100644 README.md diff --git a/README.md b/README.md deleted file mode 100644 index 8b1378917..000000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -