diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake index f046ef29825c9..177bc77c4fb16 100644 --- a/cmake/modules/RootConfiguration.cmake +++ b/cmake/modules/RootConfiguration.cmake @@ -486,6 +486,16 @@ if (tmva-cudnn) else() set(hastmvacudnn undef) endif() +if (tmva-pymva) + set(haspymva define) +else() + set(haspymva undef) +endif() +if (tmva-rmva) + set(hasrmva define) +else() + set(hasrmva undef) +endif() # clear cache to allow reconfiguring # with a different CMAKE_CXX_STANDARD diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake index b4122fbe2618d..4b12f702b0a78 100644 --- a/cmake/modules/SearchInstalledSoftware.cmake +++ b/cmake/modules/SearchInstalledSoftware.cmake @@ -1519,6 +1519,10 @@ if(tmva) set(tmva-pymva OFF CACHE BOOL "Disabled because Numpy or Python development package were not found (${tmva-pymva_description})" FORCE) endif() endif() + if (R_FOUND) + #Rmva is enable when r is found and tmva is on + set(tmva-rmva ON) + endif() if(tmva-rmva AND NOT R_FOUND) set(tmva-rmva OFF CACHE BOOL "Disabled because R was not found (${tmva-rmva_description})" FORCE) endif() diff --git a/config/RConfigure.in b/config/RConfigure.in index 281b5e429c811..14921f244b03a 100644 --- a/config/RConfigure.in +++ b/config/RConfigure.in @@ -64,6 +64,8 @@ #@hastmvacpu@ R__HAS_TMVACPU /**/ #@hastmvagpu@ R__HAS_TMVAGPU /**/ #@hastmvacudnn@ R__HAS_CUDNN /**/ +#@haspymva@ R__HAS_PYMVA /**/ +#@hasrmva@ R__HAS_RMVA /**/ #endif diff --git a/tutorials/tmva/TMVA_CNN_Classification.C b/tutorials/tmva/TMVA_CNN_Classification.C index 1653eb33af228..ac1853a33072e 100644 --- a/tutorials/tmva/TMVA_CNN_Classification.C +++ b/tutorials/tmva/TMVA_CNN_Classification.C @@ -125,16 +125,27 @@ void TMVA_CNN_Classification(std::vector opt = {1, 1, 1, 1}) bool writeOutputFile = true; + int num_threads = 0; // use default threads + TMVA::Tools::Instance(); // do enable MT running - ROOT::EnableImplicitMT(); + if (num_threads >= 0) { + ROOT::EnableImplicitMT(num_threads); + if (num_threads > 0) gSystem->Setenv("OMP_NUM_THREADS", TString::Format("%d",num_threads)); + } + else + gSystem->Setenv("OMP_NUM_THREADS", "1"); - // for using Keras + std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl; + +#ifdef R__HAS_PYMVA gSystem->Setenv("KERAS_BACKEND", "tensorflow"); - // for setting openblas in single thread on SWAN - gSystem->Setenv("OMP_NUM_THREADS", "1"); + // for using Keras TMVA::PyMethodBase::PyInitialize(); +#else + useKerasCNN = false; +#endif TFile *outputFile = nullptr; if (writeOutputFile) diff --git a/tutorials/tmva/TMVA_RNN_Classification.C b/tutorials/tmva/TMVA_RNN_Classification.C index 916f518eff337..82c16b0cb5fa7 100644 --- a/tutorials/tmva/TMVA_RNN_Classification.C +++ b/tutorials/tmva/TMVA_RNN_Classification.C @@ -184,15 +184,26 @@ void TMVA_RNN_Classification(int use_type = 1) const char *rnn_type = "RNN"; +#ifdef R__HAS_PYMVA TMVA::PyMethodBase::PyInitialize(); +#else + useKeras = false; +#endif + + int num_threads = 0; // use by default all threads + // do enable MT running + if (num_threads >= 0) { + ROOT::EnableImplicitMT(num_threads); + if (num_threads > 0) gSystem->Setenv("OMP_NUM_THREADS", TString::Format("%d",num_threads)); + } + else + gSystem->Setenv("OMP_NUM_THREADS", "1"); - ROOT::EnableImplicitMT(); TMVA::Config::Instance(); - std::cout << "nthreads = " << ROOT::GetThreadPoolSize() << std::endl; + std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl; TString inputFileName = "time_data_t10_d30.root"; - // TString inputFileName = "/home/moneta/data/sample_images_32x32.gsoc.root"; bool fileExist = !gSystem->AccessPathName(inputFileName);