Skip to content
Closed
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
started to add reuse of memory for maximizing algorithm
  • Loading branch information
re-cursion committed Sep 22, 2016
commit b782b0bdaf0a239b494cfc24ac1788e3e0185306
26 changes: 26 additions & 0 deletions tmva/tmva/inc/TMVA/NeuralNet.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include <thread>
#include <future>
#include <type_traits>
#include <mutex>

#include "Pattern.h"
#include "Monitoring.h"
Expand Down Expand Up @@ -372,6 +373,31 @@ namespace TMVA
double m_alpha; ///< internal parameter (learningRate)
double m_beta; ///< internal parameter (momentum)
std::vector<double> m_prevGradients; ///< vector remembers the gradients of the previous step





/* typedef std::smart_ptr<std::vector<double> > VecPtr; */
/* typedef std::vector<VecPtr> VecPool; */
/* VecPool m_local; */
/* VecPool m_localInUse; */
/* std::mutex m_vec_mutex; */


/* VecPtr getVec () */
/* { */
/* std::lock_guard<std::mutex> lock (m_vec_mutex); */

/* } */

/* void releaseVec (VecPtr vecPtr) */
/* { */
/* std::lock_guard<std::mutex> lock (m_vec_mutex); */
/* VecPool::iterator it = m_localInUse.find (vecPtr); */
/* m_local.push_back (*it); */
/* m_localInUse.erase (it); */
/* } */
};


Expand Down