Skip to content
Closed
Changes from 1 commit
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
818afb9
included xxhash.h
adi-sharma Jun 23, 2016
db6990f
added hashing initializations
adi-sharma Jun 24, 2016
efa2efb
added weightBucket
adi-sharma Jun 24, 2016
c821a57
edited applyWeights() without drop-out as an example
adi-sharma Jun 28, 2016
b6328a7
Updated all applyWeights() with HashedNets
adi-sharma Jul 1, 2016
a58ac1d
Update NeuralNet.h
adi-sharma Jul 1, 2016
c81f248
updated update() and Steepest Gradient Descent
adi-sharma Jul 1, 2016
96c8f42
Updated SGD operator() with HashedNets
adi-sharma Jul 1, 2016
0250775
updated weightDecay()
adi-sharma Jul 1, 2016
7c62f3b
updated weightDecay() with HashedNets
adi-sharma Jul 1, 2016
fe93bb2
Edited dropOutWeightFactor()
adi-sharma Jul 13, 2016
13d151a
Edited dropOutWeightFactor()
adi-sharma Jul 13, 2016
725bba3
Updated with HashedNets
adi-sharma Jul 18, 2016
4ec8917
Updated train() with HashedNets
adi-sharma Jul 18, 2016
b1caf73
Updated with complete HashedNets
adi-sharma Aug 23, 2016
c696958
Updated with complete HashedNets
adi-sharma Aug 23, 2016
8d6b2d4
Updated with Complete HashedNets
adi-sharma Aug 23, 2016
e277c73
Update MethodDNN.h
adi-sharma Aug 23, 2016
a820db6
Update NeuralNet.h
adi-sharma Aug 23, 2016
919a319
Updated with HashedNets
adi-sharma Aug 23, 2016
4d9e46c
Error corrections
adi-sharma Aug 24, 2016
3d7eec3
Error corrections NeuralNet.icc
adi-sharma Aug 24, 2016
d202ae1
Error corrections MethodDNN.cxx
adi-sharma Aug 24, 2016
52dc851
Successful compile
adi-sharma Aug 25, 2016
70484aa
Successful compile
adi-sharma Aug 25, 2016
6a0932f
Successful compile
adi-sharma Aug 25, 2016
4f5b919
Successful compile
adi-sharma Aug 25, 2016
96b29df
Successful build NeuralNet.h
adi-sharma Aug 25, 2016
f301784
Successful build NeuralNet.icc
adi-sharma Aug 25, 2016
9f1dce5
Successful build NeuralNet.cxx
adi-sharma Aug 25, 2016
f5e6943
Update NeuralNet.icc
adi-sharma Aug 26, 2016
c4f8749
Update NeuralNet.h
adi-sharma Aug 26, 2016
9637a69
Update MethodDNN.cxx
adi-sharma Aug 26, 2016
f62fcf2
Update NeuralNet.cxx
adi-sharma Aug 26, 2016
e48d24f
Made some logical changes in HashedNets
adi-sharma Aug 27, 2016
2bf3295
Made some logical changes in HashedNets
adi-sharma Aug 27, 2016
e048347
Update NeuralNet.icc
adi-sharma Aug 28, 2016
24c19c2
Production version v1.0
adi-sharma Aug 28, 2016
3c7f7ac
Production Version v1.0
adi-sharma Aug 28, 2016
e6332d5
Production Version v1.0
adi-sharma Aug 28, 2016
5c33612
Production Version v1.0
adi-sharma Aug 28, 2016
6eed664
Production Version v1.0
adi-sharma Aug 28, 2016
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Error corrections
  • Loading branch information
adi-sharma authored Aug 24, 2016
commit 4d9e46cead78e85686fcedcc481c3bda94011e12
54 changes: 27 additions & 27 deletions tmva/tmva/inc/TMVA/NeuralNet.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ namespace TMVA

// double gaussDoubl (edouble mean, double sigma);

int BUCKET_SIZE = 8; // ------------------------------- Declare Bucket Size --------------------------------------------
const int BUCKET_SIZE = 8; // ------------------------------- Declare Bucket Size --------------------------------------------

double gaussDouble (double mean, double sigma);
double uniformDouble (double minValue, double maxValue);
Expand Down Expand Up @@ -267,12 +267,12 @@ namespace TMVA



template <typename ItValue, typename ItFunction>
void applyFunctions (ItValue itValue, ItValue itValueEnd, ItFunction itFunction);
template <typename ItValue, typename Fnc>
void applyFunctions (ItValue itValue, ItValue itValueEnd, Fnc fnc);


template <typename ItValue, typename ItFunction, typename ItInverseFunction, typename ItGradient>
void applyFunctions (ItValue itValue, ItValue itValueEnd, ItFunction itFunction, ItInverseFunction itInverseFunction, int itGradient, std::vector<double>& gradientBucket);
template <typename ItValue, typename Fnc, typename ItInverseFunction>
void applyFunctions (ItValue itValue, ItValue itValueEnd, Fnc fnc, ItInverseFunction itInverseFunction, int itGradient, std::vector<double>& gradientBucket);



Expand Down Expand Up @@ -390,7 +390,7 @@ namespace TMVA


template <typename ItOutput, typename ItTruth, typename ItDelta, typename ItInvActFnc>
double sumOfSquares (ItOutput itOutputBegin, ItOutput itOutputEnd, ItTruth itTruthBegin, ItTruth /*itTruthEnd*/, int itDelta, int itDeltaEnd, std::vector<double>& deltaBucket, InvFnc invFnc, double patternWeight);
double sumOfSquares (ItOutput itOutputBegin, ItOutput itOutputEnd, ItTruth itTruthBegin, ItTruth /*itTruthEnd*/, int itDelta, int itDeltaEnd, std::vector<double>& deltaBucket, ItInvActFnc itInvActFnc, double patternWeight);



Expand Down Expand Up @@ -644,8 +644,6 @@ namespace TMVA
std::vector<double> m_deltaBucket; ///< stores the deltas for the DNN training
std::vector<double> m_valueGradientBucket; ///< stores the gradients of the values (nodes)

Net::initializeGradientsDeltas(std::back_inserter (m_valueGradientBucket), std::back_inserter (m_deltaBucket)); // initialize delta and gradient buckets.

std::vector<double> m_values; ///< stores the values of the nodes in this layer
const_dropout_iterator m_itDropOut; ///< iterator to a container indicating if the corresponding node is to be dropped
bool m_hasDropOut; ///< dropOut is turned on?
Expand Down Expand Up @@ -1110,7 +1108,7 @@ namespace TMVA
*
*
*/
template <typename WeightsType, typename DropProbabilities>
template <typename DropProbabilities>
void dropOutWeightFactor (std::vector<double>& weightBucket,
const DropProbabilities& drops,
bool inverse = false);
Expand Down Expand Up @@ -1151,28 +1149,32 @@ namespace TMVA
* \param dropContainer the configuration for DNN drop-out
*/
template <typename Iterator, typename Minimizer>
inline double trainCycle (Minimizer& minimizer, std::vector<double>& weightBucket,
inline double trainCycle (Minimizer& minimizer, std::vector<double>& weightBucket, std::vector<double>& gradientBucket,
Iterator itPatternBegin, Iterator itPatternEnd,
Settings& settings,
DropContainer& dropContainer);

template <typename LayerContainer>
void forwardPattern (const LayerContainer& _layers,
std::vector<LayerData>& layerData, std::vector<double>& weightBucket) const;

size_t numWeights (size_t trainingStartLayer = 0) const; ///< returns the number of weights in this net
size_t numNodes (size_t trainingStartLayer = 0) const; ///< returns the number of nodes in this net

template <typename Weights>
std::vector<double> compute (const std::vector<double>& input, const std::vector<double>& weightBucket) const; ///< compute the net with the given input and the given weights
std::vector<double> compute (const std::vector<double>& input, std::vector<double>& weightBucket) const; ///< compute the net with the given input and the given weights

template <typename Weights, typename PassThrough>
double operator() (PassThrough& settingsAndBatch, const Weights& weights) const; ///< execute computation of the DNN for one mini-batch (used by the minimizer); no computation of gradients
template <typename PassThrough>
double operator() (PassThrough& settingsAndBatch, std::vector<double>& weightBucket) const; ///< execute computation of the DNN for one mini-batch (used by the minimizer); no computation of gradients

template <typename Weights, typename PassThrough, typename OutContainer>
double operator() (PassThrough& settingsAndBatch, const Weights& weights, ModeOutput eFetch, OutContainer& outputContainer) const; ///< execute computation of the DNN for one mini-batch; helper function
template <typename PassThrough, typename OutContainer>
double operator() (PassThrough& settingsAndBatch, std::vector<double>& weightBucket, ModeOutput /*eFetch*/, OutContainer& outputContainer) const; ///< execute computation of the DNN for one mini-batch; helper function

template <typename Weights, typename Gradients, typename PassThrough>
double operator() (PassThrough& settingsAndBatch, Weights& weights, Gradients& gradients) const; ///< execute computation of the DNN for one mini-batch (used by the minimizer); returns gradients as well
template <typename Gradients, typename PassThrough>
double operator() (PassThrough& settingsAndBatch, std::vector<double>& weightBucket, std::vector<double>& gradientBucket) const; ///< execute computation of the DNN for one mini-batch (used by the minimizer); returns gradients as well

template <typename Weights, typename Gradients, typename PassThrough, typename OutContainer>
double operator() (PassThrough& settingsAndBatch, Weights& weights, Gradients& gradients, ModeOutput eFetch, OutContainer& outputContainer) const;
template <typename Gradients, typename PassThrough, typename OutContainer>
double operator() (PassThrough& settingsAndBatch, std::vector<double>& weightBucket, std::vector<double>& gradientBucket, ModeOutput eFetch, OutContainer& outputContainer) const;


template <typename LayerContainer, typename DropContainer, typename ItWeight, typename ItGradient>
Expand All @@ -1185,17 +1187,15 @@ namespace TMVA
int itGradientEnd,
size_t& totalNumWeights) const;

template <typename LayerContainer>
void forwardPattern (const LayerContainer& _layers,
std::vector<LayerData>& layerData, std::vector<double>& weightBucket) const;



template <typename LayerContainer, typename LayerPatternContainer>
void forwardBatch (const LayerContainer& _layers,
LayerPatternContainer& layerPatternData,
std::vector<double>& valuesMean,
std::vector<double>& valuesStdDev,
size_t trainFromLayer) const;
size_t trainFromLayer, std::vector<double>& weightBucket, std::vector<double>& gradientBucket) const;

template <typename OutputContainer>
void fetchOutput (const LayerData& lastLayerData, OutputContainer& outputContainer) const;
Expand Down Expand Up @@ -1228,7 +1228,7 @@ namespace TMVA
int itWeightBegin, int itWeightEnd,
int itGradientBegin, int itGradientEnd,
size_t trainFromLayer,
OutContainer& outputContainer, bool fetchOutput, std::vector<double>& weightBucket) const;
OutContainer& outputContainer, bool fetchOutput, std::vector<double>& weightBucket, std::vector<double>& gradientBucket) const;



Expand All @@ -1240,12 +1240,12 @@ namespace TMVA
*
*
*/
template <typename Container, typename ItWeight>
template <typename Container>
double errorFunction (LayerData& layerData,
LayerData& nextLayerData,
Container truth,
ItWeight itWeight,
ItWeight itWeightEnd,
int itWeight,
int itWeightEnd,
double patternWeight,
double factorWeightDecay,
EnumRegularization eRegularization) const;
Expand Down