Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions tmva/tmva/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ set(headers1 Configurable.h Factory.h MethodBase.h MethodCompositeBase.h
MethodKNN.h MethodCFMlpANN.h MethodCFMlpANN_Utils.h MethodLikelihood.h
MethodHMatrix.h MethodPDERS.h MethodBDT.h MethodDT.h MethodSVM.h MethodBayesClassifier.h
MethodFDA.h MethodMLP.h MethodBoost.h
MethodPDEFoam.h MethodLD.h MethodCategory.h MethodDNN.h)
MethodPDEFoam.h MethodLD.h MethodCategory.h MethodDNN.h MethodCNN.h)
set(headers2 TSpline2.h TSpline1.h PDF.h BinaryTree.h BinarySearchTreeNode.h BinarySearchTree.h
Timer.h RootFinder.h CrossEntropy.h DecisionTree.h DecisionTreeNode.h MisClassificationError.h
Node.h SdivSqrtSplusB.h SeparationBase.h RegressionVariance.h Tools.h Reader.h
Expand Down Expand Up @@ -94,7 +94,7 @@ if(NOT gnuinstall)
endif()

ROOT_ADD_TEST_SUBDIRECTORY(test/DNN)

ROOT_ADD_TEST_SUBDIRECTORY(test/DNN/CNN)



2 changes: 1 addition & 1 deletion tmva/tmva/Module.mk
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ TMVAH1 := Configurable.h Event.h Factory.h MethodBase.h MethodCompositeBas
MethodKNN.h MethodCFMlpANN.h MethodCFMlpANN_Utils.h MethodLikelihood.h \
MethodHMatrix.h MethodPDERS.h MethodBDT.h MethodDT.h MethodSVM.h MethodBayesClassifier.h \
MethodFDA.h MethodMLP.h MethodCommittee.h MethodBoost.h \
MethodPDEFoam.h MethodLD.h MethodCategory.h MethodNN.h MethodDNN.h
MethodPDEFoam.h MethodLD.h MethodCategory.h MethodNN.h MethodDNN.h MethodCNN.h
TMVAH2 := TSpline2.h TSpline1.h PDF.h BinaryTree.h BinarySearchTreeNode.h BinarySearchTree.h \
Timer.h RootFinder.h CrossEntropy.h DecisionTree.h DecisionTreeNode.h MisClassificationError.h \
Node.h SdivSqrtSplusB.h SeparationBase.h RegressionVariance.h Tools.h Reader.h \
Expand Down
2 changes: 2 additions & 0 deletions tmva/tmva/inc/LinkDef1.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@
#pragma link C++ class TMVA::MethodPDEFoam+;
#pragma link C++ class TMVA::MethodLD+;
#pragma link C++ class TMVA::MethodCategory+;
#pragma link C++ class TMVA::MethodDL+;
#pragma link C++ class TMVA::MethodDNN+;
#pragma link C++ class TMVA::MethodCNN+;

#endif
163 changes: 155 additions & 8 deletions tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@
* For the list of contributors see $ROOTSYS/README/CREDITS. *
*************************************************************************/

//////////////////////////////////////////////////////////////////
// Definition of the TCpu architecture, which provides a //
// multi-threaded CPU implementation of the low-level interface //
// networks for Cpus using BLAS and Roots TThreadExecutor //
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
// Definition of the TCpu architecture, which provides a //
// multi-threaded CPU implementation of the low-level interface //
// networks for Cpus using BLAS and Roots TThreadExecutor //
//////////////////////////////////////////////////////////////////

#ifndef TMVA_DNN_ARCHITECTURES_CPU
#define TMVA_DNN_ARCHITECTURES_CPU
Expand Down Expand Up @@ -61,15 +61,17 @@ class TCpu
/** Add the vectors biases row-wise to the matrix output */
static void AddRowWise(TCpuMatrix<Scalar_t> &output,
const TCpuMatrix<Scalar_t> &biases);

///@}

/** @name Backward Propagation
* Low-level functions required for the forward propagation of activations
* through the network.
*/
///@{
/** Perform the complete backward propagation step. If the provided
* \p activationGradientsBackward matrix is not empty, compute the

/** Perform the complete backward propagation step in a Fully Connected Layer.
* If the provided \p activationGradientsBackward matrix is not empty, compute the
* gradients of the objective function with respect to the activations
* of the previous layer (backward direction).
* Also compute the weight and the bias gradients. Modifies the values
Expand All @@ -83,6 +85,7 @@ class TCpu
const TCpuMatrix<Scalar_t> & activationGradients,
const TCpuMatrix<Scalar_t> & weights,
const TCpuMatrix<Scalar_t> & activationBackward);

/** Adds a the elements in matrix B scaled by c to the elements in
* the matrix A. This is required for the weight update in the gradient
* descent step.*/
Expand Down Expand Up @@ -249,7 +252,151 @@ class TCpu
static void Dropout(TCpuMatrix<Scalar_t> & A, Scalar_t p);

///@}


//______________________________________________________________________________
//
// Convolutional Nets Propagation
//______________________________________________________________________________

/** @name Propagation in Convolutional Nets
*/
///@{


/** Transform the matrix B in local view format, suitable for
* convolution, and store it in matrix A */
static void Im2col(TCpuMatrix<AReal> &A,
TCpuMatrix<AReal> &B,
size_t imgHeight,
size_t imgWidth,
size_t fltHeight,
size_t fltWidth,
size_t strideRows,
size_t strideCols,
size_t zeroPaddingHeight,
size_t zeroPaddingWidth);

/** Rotates the matrix \p B, which is representing a weights,
* and stores them in the matrix \p A. */
static void RotateWeights(TCpuMatrix<AReal> &A,
const TCpuMatrix<AReal> &B,
size_t filterDepth,
size_t filterHeight,
size_t filterWidth,
size_t numFilters);

/** Flattens the tensor \p B, such that each matrix, is stretched in
* one row, resulting with a matrix \p A. */
static void Flatten(TCpuMatrix<AReal> &A,
const std::vector<TCpuMatrix<AReal>> B,
size_t size,
size_t nRows,
size_t nCols);

/** Transforms each row of \p B to a matrix and stores it in the
* tensor \p B. */
static void Deflatten(std::vector<TCpuMatrix<AReal>> A,
const TCpuMatrix<AReal> &B,
size_t index,
size_t nRows,
size_t nCols);

/** Perform the complete backward propagation step in a Convolutional Layer.
* If the provided \p activationGradientsBackward matrix is not empty, compute the
* gradients of the objective function with respect to the activations
* of the previous layer (backward direction).
* Also compute the weight and the bias gradients. Modifies the values
* in \p df and thus produces only a valid result, if it is applied the
* first time after the corresponding forward propagation has been per-
* formed. */
static void ConvLayerBackward(std::vector<TCpuMatrix<Scalar_t>> & activationGradientsBackward,
TCpuMatrix<Scalar_t> & weightGradients,
TCpuMatrix<Scalar_t> & biasGradients,
std::vector<TCpuMatrix<Scalar_t>> & df,
const std::vector<TCpuMatrix<Scalar_t>> & activationGradients,
const TCpuMatrix<Scalar_t> & weights,
const std::vector<TCpuMatrix<Scalar_t>> & activationBackward,
size_t batchSize,
size_t inputHeight,
size_t inputWidth,
size_t depth,
size_t height,
size_t width,
size_t filterDepth,
size_t filterHeight,
size_t filterWidth,
size_t nLocalViews);

/** Utility function for calculating the activation gradients of the layer
* before the convolutional layer. */
static void CalculateConvActivationGradients(std::vector<TCpuMatrix<Scalar_t>> & activationGradientsBackward,
std::vector<TCpuMatrix<Scalar_t>> & df,
const TCpuMatrix<Scalar_t> & weights,
size_t batchSize,
size_t inputHeight,
size_t inputWidth,
size_t depth,
size_t height,
size_t width,
size_t filterDepth,
size_t filterHeight,
size_t filterWidth);

/** Utility function for calculating the weight gradients of the convolutional
* layer. */
static void CalculateConvWeightGradients(TCpuMatrix<Scalar_t> & weightGradients,
std::vector<TCpuMatrix<Scalar_t>> & df,
const std::vector<TCpuMatrix<Scalar_t>> & activations_backward,
size_t batchSize,
size_t inputHeight,
size_t inputWidth,
size_t depth,
size_t height,
size_t width,
size_t filterDepth,
size_t filterHeight,
size_t filterWidth,
size_t nLocalViews);

/** Utility function for calculating the bias gradients of the convolutional
* layer */
static void CalculateConvBiasGradients(TCpuMatrix<Scalar_t> & biasGradients,
std::vector<TCpuMatrix<Scalar_t>> & df,
size_t batchSize,
size_t depth,
size_t nLocalViews);

/** Add the biases in the Convolutional Layer. */
static void AddConvBiases(TCpuMatrix<Scalar_t> &output,
const TCpuMatrix<Scalar_t> &biases);


/** Downsample the matrix \p C to the matrix \p A, using max
* operation, such that the winning indices are stored in matrix
* \p B. */
static void Downsample(TCpuMatrix<AReal> &A,
TCpuMatrix<AReal> &B,
const TCpuMatrix<AReal> &C,
size_t imgHeight,
size_t imgWidth,
size_t fltHeight,
size_t fltWidth,
size_t strideRows,
size_t strideCols);

/** Perform the complete backward propagation step in a Pooling Layer. Based on the
* winning idices stored in the index matrix, it just forwards the actiovation
* gradients to the previous layer. */
static void PoolLayerBackward(std::vector<TCpuMatrix<AReal>> & activationGradientsBackward,
const std::vector<TCpuMatrix<AReal>> & activationGradients,
const std::vector<TCpuMatrix<AReal>> & indexMatrix,
size_t batchSize,
size_t depth,
size_t nLocalViews);

///@}


//____________________________________________________________________________
//
// Additional Arithmetic Functions
Expand Down
Loading