diff --git a/core/base/inc/LinkDef3.h b/core/base/inc/LinkDef3.h index 17f51135c5ac2..b2ae35584ad2b 100644 --- a/core/base/inc/LinkDef3.h +++ b/core/base/inc/LinkDef3.h @@ -165,6 +165,7 @@ #pragma link C++ class TFileCollection+; #pragma link C++ class TVirtualAuth; #pragma link C++ class TVirtualMutex; +#pragma link C++ class TVirtualRWMutex; #pragma link C++ class TLockGuard; #pragma link C++ class TRedirectOutputGuard; #pragma link C++ class TVirtualPerfStats; diff --git a/core/base/inc/TVirtualMutex.h b/core/base/inc/TVirtualMutex.h index ff331209b725a..676ee9b4a3091 100644 --- a/core/base/inc/TVirtualMutex.h +++ b/core/base/inc/TVirtualMutex.h @@ -29,7 +29,7 @@ class TVirtualMutex; // Global mutex set in TThread::Init R__EXTERN TVirtualMutex *gGlobalMutex; -class TVirtualMutex : public TObject { +class TVirtualMutex { public: TVirtualMutex(Bool_t /* recursive */ = kFALSE) { } diff --git a/core/base/inc/TVirtualRWMutex.h b/core/base/inc/TVirtualRWMutex.h new file mode 100644 index 0000000000000..4affeaee90a71 --- /dev/null +++ b/core/base/inc/TVirtualRWMutex.h @@ -0,0 +1,49 @@ +// @(#)root/base:$Id$ +// Author: Philippe Canal, 2017 + +/************************************************************************* + * Copyright (C) 1995-2017, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +#ifndef ROOT_TVirtualRWMutex +#define ROOT_TVirtualRWMutex + + +////////////////////////////////////////////////////////////////////////// +// // +// TVirtualRWMutex // +// // +// This class implements a read-write mutex interface. The actual work // +// is done via TRWSpinLock which is available as soon as the thread // +// library is loaded. // +// // +////////////////////////////////////////////////////////////////////////// + +#include "TVirtualMutex.h" + + +class TVirtualRWMutex : public TVirtualMutex { + +public: + virtual void ReadLock() = 0; + virtual void ReadUnLock() = 0; + virtual void WriteLock() = 0; + virtual void WriteUnLock() = 0; + + Int_t Lock() override { WriteLock(); return 1; } + Int_t TryLock() override { WriteLock(); return 1; } + Int_t UnLock() override { WriteUnLock(); return 1; } + Int_t CleanUp() override { WriteUnLock(); return 1; } + + TVirtualRWMutex *Factory(Bool_t /*recursive*/ = kFALSE) override = 0; + + ClassDefOverride(TVirtualRWMutex,0) // Virtual mutex lock class +}; + + + +#endif diff --git a/core/base/src/TROOT.cxx b/core/base/src/TROOT.cxx index ec6493f624141..f357dad02bdf0 100644 --- a/core/base/src/TROOT.cxx +++ b/core/base/src/TROOT.cxx @@ -497,9 +497,6 @@ namespace Internal { /// Enables the global mutex to make ROOT thread safe/aware. void EnableThreadSafety() { - // 'Insure' gROOT is created before initializing the Thread safe behavior - // (to make sure we do not have two attempting to create it). - GetROOT(); static void (*sym)() = (void(*)())Internal::GetSymInLibImt("ROOT_TThread_Initialize"); if (sym) sym(); diff --git a/core/cont/inc/TCollection.h b/core/cont/inc/TCollection.h index be8c8c524bcc9..62095100dbc16 100644 --- a/core/cont/inc/TCollection.h +++ b/core/cont/inc/TCollection.h @@ -60,7 +60,7 @@ R__EXTERN TVirtualMutex *gCollectionMutex; class TCollection : public TObject { #ifdef R__CHECK_COLLECTION_MULTI_ACCESS -protected: +public: class TErrorLock { // Warn when multiple thread try to acquire the same 'lock' std::atomic fWriteCurrent; @@ -81,7 +81,7 @@ class TCollection : public TObject { const char *function); public: - TErrorLock() : fWriteCurrentRecurse(0), fReadCurrentRecurse(0) { std::atomic_flag_clear(&fSpinLockFlag); } + TErrorLock() : fWriteCurrent(), fWriteCurrentRecurse(0), fReadCurrentRecurse(0) { std::atomic_flag_clear(&fSpinLockFlag); } class WriteGuard { TErrorLock *fLock; diff --git a/core/metacling/src/TCling.cxx b/core/metacling/src/TCling.cxx index 50b98f230f28f..d136f2a55f585 100644 --- a/core/metacling/src/TCling.cxx +++ b/core/metacling/src/TCling.cxx @@ -2981,6 +2981,12 @@ Bool_t TCling::HandleNewTransaction(const cling::Transaction &T) void TCling::RecursiveRemove(TObject* obj) { + // NOTE: When replacing the mutex by a ReadWrite mutex, we **must** + // put in place the Read/Write part here. Keeping the write lock + // here is 'catasptrophic' for scaling as it means that ALL calls + // to RecursiveRemove will take the write lock and performance + // of many threads trying to access the write lock at the same + // time is relatively bad. R__LOCKGUARD(gInterpreterMutex); // Note that fgSetOfSpecials is supposed to be updated by TClingCallbacks::tryFindROOTSpecialInternal // (but isn't at the moment). diff --git a/core/thread/CMakeLists.txt b/core/thread/CMakeLists.txt index 687a7d266e939..1e4e0a48b3880 100644 --- a/core/thread/CMakeLists.txt +++ b/core/thread/CMakeLists.txt @@ -5,7 +5,7 @@ set(headers TAtomicCount.h TCondition.h TConditionImp.h TMutex.h TMutexImp.h TRWLock.h ROOT/TRWSpinLock.hxx TSemaphore.h TThread.h TThreadFactory.h TThreadImp.h ROOT/TThreadedObject.hxx TThreadPool.h - ThreadLocalStorage.h ROOT/TSpinMutex.hxx) + ThreadLocalStorage.h ROOT/TSpinMutex.hxx ROOT/TReentrantRWLock.hxx) if(NOT WIN32) set(headers ${headers} TPosixCondition.h TPosixMutex.h TPosixThread.h TPosixThreadFactory.h PosixThreadInc.h) @@ -18,7 +18,7 @@ endif() set(sources TCondition.cxx TConditionImp.cxx TMutex.cxx TMutexImp.cxx TRWLock.cxx TRWSpinLock.cxx TSemaphore.cxx TThread.cxx TThreadFactory.cxx - TThreadImp.cxx) + TThreadImp.cxx TRWMutexImp.cxx TReentrantRWLock.cxx) if(NOT WIN32) set(sources ${sources} TPosixCondition.cxx TPosixMutex.cxx TPosixThread.cxx TPosixThreadFactory.cxx) @@ -32,3 +32,7 @@ ROOT_GENERATE_DICTIONARY(G__Thread ${headers} STAGE1 MODULE Thread LINKDEF LinkD ROOT_OBJECT_LIBRARY(ThreadObjs ${sources} G__Thread.cxx) ROOT_LINKER_LIBRARY(Thread $ LIBRARIES ${CMAKE_THREAD_LIBS_INIT} DEPENDENCIES Core BUILTINS) ROOT_INSTALL_HEADERS(${installoptions}) + +if(testing) + add_subdirectory(test) +endif() diff --git a/core/thread/Module.mk b/core/thread/Module.mk index a9fb6135c808e..192e1c37ba158 100644 --- a/core/thread/Module.mk +++ b/core/thread/Module.mk @@ -25,7 +25,8 @@ THREADH := $(MODDIRI)/TCondition.h $(MODDIRI)/TConditionImp.h \ $(MODDIRI)/TThreadImp.h $(MODDIRI)/TAtomicCount.h \ $(MODDIRI)/TThreadPool.h $(MODDIRI)/ThreadLocalStorage.h \ $(MODDIRI)/ROOT/TThreadedObject.hxx \ - $(MODDIRI)/ROOT/TSpinMutex.hxx + $(MODDIRI)/ROOT/TSpinMutex.hxx \ + $(MODDIRI)/ROOT/TReentrantRWLock.hxx ifeq ($(IMT),yes) THREADH += $(MODDIRI)/ROOT/TThreadExecutor.hxx @@ -48,7 +49,7 @@ THREADS := $(MODDIRS)/TCondition.cxx $(MODDIRS)/TConditionImp.cxx \ $(MODDIRS)/TMutex.cxx $(MODDIRS)/TMutexImp.cxx \ $(MODDIRS)/TRWLock.cxx $(MODDIRS)/TSemaphore.cxx \ $(MODDIRS)/TThread.cxx $(MODDIRS)/TThreadFactory.cxx \ - $(MODDIRS)/TThreadImp.cxx + $(MODDIRS)/TThreadImp.cxx $(MODDIRS)/TReentrantRWLock.cxx ifneq ($(ARCH),win32) THREADS += $(MODDIRS)/TPosixCondition.cxx $(MODDIRS)/TPosixMutex.cxx \ $(MODDIRS)/TPosixThread.cxx $(MODDIRS)/TPosixThreadFactory.cxx diff --git a/core/thread/inc/ROOT/TReentrantRWLock.hxx b/core/thread/inc/ROOT/TReentrantRWLock.hxx new file mode 100644 index 0000000000000..53209029a7780 --- /dev/null +++ b/core/thread/inc/ROOT/TReentrantRWLock.hxx @@ -0,0 +1,150 @@ +// @(#)root/thread:$Id$ +// Authors: Enric Tejedor CERN 12/09/2016 +// Philippe Canal FNAL 12/09/2016 + +/************************************************************************* + * Copyright (C) 1995-2016, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +#ifndef ROOT_TRWSpinLock +#define ROOT_TRWSpinLock + +#include "TSpinMutex.hxx" + +#include +#include +#include +#include + +namespace ROOT { +namespace Internal { +struct UniqueLockRecurseCount { + struct LocalCounts { + int fReadersCount = 0; + bool fIsWriter = false; + }; + size_t fWriteRecurse = 0; ///fReadersCount); } + + template + void IncrementReadCount(local_t &local, MutexT &) { IncrementReadCount(local); } + + void DecrementReadCount(local_t &local) { --(local->fReadersCount); } + + template + void DecrementReadCount(local_t &local, MutexT &) { DecrementReadCount(local); } + + bool IsNotCurrentWriter(local_t &local) { return !local->fIsWriter; } + + void SetIsWriter(local_t &local) + { + // if (fWriteRecurse == std::numeric_limits::max()) { + // ::Fatal("TRWSpinLock::WriteLock", "Too many recursions in TRWSpinLock!"); + // } + ++fWriteRecurse; + local->fIsWriter = true; + } + + void DecrementWriteCount() { --fWriteRecurse; } + + void ResetIsWriter(local_t &local) { local->fIsWriter = false; } + + size_t GetLocalReadersCount(local_t &local) { return local->fReadersCount; } +}; + +struct RecurseCounts { + using ReaderColl_t = std::unordered_map; + size_t fWriteRecurse; /// + void IncrementReadCount(local_t &local, MutexT &mutex) + { + std::unique_lock lock(mutex); + IncrementReadCount(local); + } + + void DecrementReadCount(local_t &local) { --(fReadersCount[local]); } + + template + void DecrementReadCount(local_t &local, MutexT &mutex) + { + std::unique_lock lock(mutex); + DecrementReadCount(local); + } + + bool IsNotCurrentWriter(local_t &local) { return fWriterThread != local; } + + void SetIsWriter(local_t &local) + { + // if (fWriteRecurse == std::numeric_limits::max()) { + // ::Fatal("TRWSpinLock::WriteLock", "Too many recursions in TRWSpinLock!"); + // } + ++fWriteRecurse; + fWriterThread = local; + } + + void DecrementWriteCount() { --fWriteRecurse; } + + void ResetIsWriter(local_t & /* local */) { fWriterThread = std::thread::id(); } + + size_t GetLocalReadersCount(local_t &local) { return fReadersCount[local]; } + + +}; +} // Internal + +template +class TReentrantRWLock { +private: + + std::atomic fReaders; /// fReaderReservation; /// fWriterReservation; /// fWriter; /// - +#include "TError.h" ClassImp(TMutex); diff --git a/core/thread/src/TRWMutexImp.cxx b/core/thread/src/TRWMutexImp.cxx new file mode 100644 index 0000000000000..ab65845c6e623 --- /dev/null +++ b/core/thread/src/TRWMutexImp.cxx @@ -0,0 +1,73 @@ +// @(#)root/thread:$Id$ +// Author: Fons Rademakers 26/06/97 + +/************************************************************************* + * Copyright (C) 1995-2017, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +////////////////////////////////////////////////////////////////////////// +// // +// TRWMutexImp // +// // +// This class implements the TVirtualRWMutex interface, // +// based on TRWSpinLock. // +// // +////////////////////////////////////////////////////////////////////////// + +#include "TRWMutexImp.h" +#include "ROOT/TSpinMutex.hxx" +#include "TMutex.h" + +//////////////////////////////////////////////////////////////////////////////// +/// Take the Read Lock of the mutex. + +template +void TRWMutexImp::ReadLock() +{ + fMutexImp.ReadLock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Take the Write Lock of the mutex. + +template +void TRWMutexImp::WriteLock() +{ + fMutexImp.WriteLock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Release the read lock of the mutex + +template +void TRWMutexImp::ReadUnLock() +{ + fMutexImp.ReadUnLock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Release the read lock of the mutex + +template +void TRWMutexImp::WriteUnLock() +{ + fMutexImp.WriteUnLock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Create mutex and return pointer to it. + +template +TVirtualRWMutex *TRWMutexImp::Factory(Bool_t /*recursive = kFALSE*/) +{ + return new TRWMutexImp(); +} + +template class TRWMutexImp; +template class TRWMutexImp; +template class TRWMutexImp; +template class TRWMutexImp; \ No newline at end of file diff --git a/core/thread/src/TRWMutexImp.h b/core/thread/src/TRWMutexImp.h new file mode 100644 index 0000000000000..a2166a652ff41 --- /dev/null +++ b/core/thread/src/TRWMutexImp.h @@ -0,0 +1,36 @@ +// Author: Philippe Canal, 2017 + +/************************************************************************* + * Copyright (C) 1995-2017, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +#ifndef ROOT_TRWMutexImp +#define ROOT_TRWMutexImp + +#include "TVirtualRWMutex.h" +#include "ROOT/TSpinMutex.hxx" +#include "ROOT/TReentrantRWLock.hxx" + +#include "TBuffer.h" // Needed by ClassDEfInlineOverride + +template +class TRWMutexImp : public TVirtualRWMutex { + ROOT::TReentrantRWLock fMutexImp; + +public: + + void ReadLock() override; + void ReadUnLock() override; + void WriteLock() override; + void WriteUnLock() override; + + TVirtualRWMutex *Factory(Bool_t /*recursive*/ = kFALSE) override; + + ClassDefInlineOverride(TRWMutexImp,0) // Concrete RW mutex lock class +}; + +#endif \ No newline at end of file diff --git a/core/thread/src/TReentrantRWLock.cxx b/core/thread/src/TReentrantRWLock.cxx new file mode 100644 index 0000000000000..cd07c27fbe19c --- /dev/null +++ b/core/thread/src/TReentrantRWLock.cxx @@ -0,0 +1,190 @@ +// @(#)root/thread:$Id$ +// Authors: Enric Tejedor CERN 12/09/2016 +// Philippe Canal FNAL 12/09/2016 + +/************************************************************************* + * Copyright (C) 1995-2016, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +/** \class TReentrantRWLock + \brief An implementation of a reentrant read-write lock with a + configurable internal mutex/lock (default Spin Lock). + +This class provides an implementation of a rreentrant ead-write lock +that uses an internal lock and a condition variable to synchronize +readers and writers when necessary. + +The implementation allows a single reader to take the write lock without +releasing the reader lock. It also allows the writer to take a read lock. +In other word, the lock is re-entrant for both reading and writing. + +The implementation tries to make faster the scenario when readers come +and go but there is no writer. In that case, readers will not pay the +price of taking the internal spin lock. + +Moreover, this RW lock tries to be fair with writers, giving them the +possibility to claim the lock and wait for only the remaining readers, +thus preventing starvation. +*/ + +#include "ROOT/TReentrantRWLock.hxx" +#include "ROOT/TSpinMutex.hxx" +#include "TMutex.h" +#include "TError.h" + +using namespace ROOT; + +Internal::UniqueLockRecurseCount::UniqueLockRecurseCount() +{ + static bool singleton = false; + if (singleton) { + ::Fatal("UniqueLockRecurseCount Ctor","Only one TReentrantRWLock using a UniqueLockRecurseCount is allowed."); + } + singleton = true; +} + + +//////////////////////////////////////////////////////////////////////////// +/// Acquire the lock in read mode. +template +void TReentrantRWLock::ReadLock() +{ + ++fReaderReservation; + + // if (fReaders == std::numeric_limits::max()) { + // ::Fatal("TRWSpinLock::WriteLock", "Too many recursions in TRWSpinLock!"); + // } + + auto local = fRecurseCounts.GetLocal(); + + if (!fWriter) { + // There is no writer, go freely to the critical section + ++fReaders; + --fReaderReservation; + + fRecurseCounts.IncrementReadCount(local, fMutex); + } else { + // A writer claimed the RW lock, we will need to wait on the + // internal lock + --fReaderReservation; + + std::unique_lock lock(fMutex); + + // Wait for writers, if any + if (fWriter && fRecurseCounts.IsNotCurrentWriter(local)) fCond.wait(lock, [this] { return !fWriter; }); + + fRecurseCounts.IncrementReadCount(local); + + // This RW lock now belongs to the readers + ++fReaders; + + lock.unlock(); + } +} + +////////////////////////////////////////////////////////////////////////// +/// Release the lock in read mode. +template +void TReentrantRWLock::ReadUnLock() +{ + auto local = fRecurseCounts.GetLocal(); + + --fReaders; + if (fWriterReservation && fReaders == 0) { + // We still need to lock here to prevent interleaving with a writer + std::lock_guard lock(fMutex); + + fRecurseCounts.DecrementReadCount(local); + + // Make sure you wake up a writer, if any + // Note: spurrious wakeups are okay, fReaders + // will be checked again in WriteLock + fCond.notify_all(); + } else { + + fRecurseCounts.DecrementReadCount(local, fMutex); + } +} + +////////////////////////////////////////////////////////////////////////// +/// Acquire the lock in write mode. +template +void TReentrantRWLock::WriteLock() +{ + ++fWriterReservation; + + std::unique_lock lock(fMutex); + + auto local = fRecurseCounts.GetLocal(); + + // Release this thread's reader lock(s) + auto readerCount = fRecurseCounts.GetLocalReadersCount(local); + + fReaders -= readerCount; + + // Wait for other writers, if any + if (fWriter && fRecurseCounts.IsNotCurrentWriter(local)) { + if (readerCount && fReaders == 0) { + // we decrease fReaders to zero, let's wake up the + // other writer. + fCond.notify_all(); + } + fCond.wait(lock, [this] { return !fWriter; }); + } + + // Claim the lock for this writer + fWriter = true; + fRecurseCounts.SetIsWriter(local); + + // Wait until all reader reservations finish + while (fReaderReservation) { + }; + + // Wait for remaining readers + fCond.wait(lock, [this] { return fReaders == 0; }); + + // Restore this thread's reader lock(s) + fReaders += readerCount; + + --fWriterReservation; + + lock.unlock(); +} + +////////////////////////////////////////////////////////////////////////// +/// Release the lock in write mode. +template +void TReentrantRWLock::WriteUnLock() +{ + // We need to lock here to prevent interleaving with a reader + std::lock_guard lock(fMutex); + + if (!fWriter || fRecurseCounts.fWriteRecurse == 0) { + Error("TRWSpinLock::WriteUnLock", "Write lock already released for %p", this); + return; + } + + fRecurseCounts.DecrementWriteCount(); + + if (!fRecurseCounts.fWriteRecurse) { + fWriter = false; + + auto local = fRecurseCounts.GetLocal(); + fRecurseCounts.ResetIsWriter(local); + + // Notify all potential readers/writers that are waiting + fCond.notify_all(); + } +} + +namespace ROOT { +template class TReentrantRWLock; +template class TReentrantRWLock; + +template class TReentrantRWLock; +template class TReentrantRWLock; +} diff --git a/core/thread/src/TThread.cxx b/core/thread/src/TThread.cxx index fd134cad14009..aeb3b62d28f10 100644 --- a/core/thread/src/TThread.cxx +++ b/core/thread/src/TThread.cxx @@ -321,6 +321,10 @@ void TThread::Init() ::Fatal("Init","_REENTRANT must be #define-d for TThread to work properly."); #endif + // 'Insure' gROOT is created before initializing the Thread safe behavior + // (to make sure we do not have two attempting to create it). + ROOT::GetROOT(); + fgThreadImp = gThreadFactory->CreateThreadImp(); gMainInternalMutex = new TMutex(kTRUE); diff --git a/core/thread/test/CMakeLists.txt b/core/thread/test/CMakeLists.txt new file mode 100644 index 0000000000000..63f272bc34728 --- /dev/null +++ b/core/thread/test/CMakeLists.txt @@ -0,0 +1 @@ +ROOT_ADD_UNITTEST_DIR(Core Thread) diff --git a/core/thread/test/testRWLock.cxx b/core/thread/test/testRWLock.cxx new file mode 100644 index 0000000000000..5d5be8ef29aae --- /dev/null +++ b/core/thread/test/testRWLock.cxx @@ -0,0 +1,407 @@ +// #include "TReentrantRWLock.h" +#include "TVirtualMutex.h" +#include "TMutex.h" +#include "TVirtualRWMutex.h" +#include "ROOT/TReentrantRWLock.hxx" +#include "ROOT/TRWSpinLock.hxx" + +#include "../src/TRWMutexImp.h" + +#include "TSystem.h" +#include "TROOT.h" +#include "TError.h" + +#include "gtest/gtest.h" +#include "gmock/gmock.h" + +void testWriteLockV(TVirtualMutex *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->Lock(); + } +} + +void testWriteUnLockV(TVirtualMutex *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->UnLock(); + } +} + +template +void testWriteTLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->Lock(); + } +} + +template +void testWriteLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->WriteLock(); + } +} + +template +void testReadLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->ReadLock(); + } +} + +template +void testNonReentrantLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->lock(); + m->unlock(); + } +} + +template +void testWriteTUnLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->UnLock(); + } +} + +template +void testWriteUnLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->WriteUnLock(); + } +} + +template +void testReadUnLock(M *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->ReadUnLock(); + } +} + +void testWriteGuard(TVirtualMutex *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + TLockGuard guard(m); + } +} + +void testReadGuard(TVirtualRWMutex *m, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->ReadLock(); + m->ReadUnLock(); + } +} + +struct Globals { + size_t fFirst = 0; + size_t fSecond = 0; + size_t fThird = 0; +}; + +void writer(TVirtualRWMutex *m, Globals *global, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + { + TLockGuard guard(m); + global->fFirst++; + // Waste some time + for(size_t k = 0; k < 100; ++k) { + global->fSecond += global->fThird * global->fFirst + k; + } + global->fThird++; + } + gSystem->Sleep(3 /* milliseconds */); // give sometimes to the readers + } +} + +void reader(TVirtualRWMutex *m, Globals *global, size_t repetition) { + for(size_t i = 0; i < repetition; ++i) { + m->ReadLock(); + ASSERT_EQ(global->fFirst,global->fThird); + m->ReadUnLock(); + gSystem->Sleep(1 /* milliseconds */); // give sometimes to the writers + } +} + +void concurrent(TVirtualRWMutex *m, size_t nwriters, size_t nreaders, size_t repetition) { + // ROOT::EnableThreadSafety(); + + std::vector threads; + + Globals global; + + for(size_t i=0 ; i +void Reentrant(T &m) { + + m.ReadLock(); + m.ReadLock(); + m.ReadLock(); + + m.WriteLock(); + + m.ReadLock(); + m.ReadLock(); + + m.WriteLock(); + + m.ReadLock(); + + + m.ReadUnLock(); + m.WriteUnLock(); + m.ReadUnLock(); + m.ReadUnLock(); + m.WriteUnLock(); + m.ReadUnLock(); + m.ReadUnLock(); + m.ReadUnLock(); + +} + +constexpr size_t gRepetition = 10000000; + +auto gMutex = new TMutex(kTRUE); +auto gRWMutex = new TRWMutexImp(); +auto gRWMutexSpin = new TRWMutexImp(); +auto gReentrantRWMutex = new ROOT::TReentrantRWLock(); +auto gReentrantRWMutexSM = new ROOT::TReentrantRWLock(); +auto gSpinMutex = new ROOT::TSpinMutex(); + +// Intentionally ignore the Fatal error due to the shread thread-local storage. +// In this test we need to be 'careful' to not use all those mutex at the same time. +int trigger1 = gErrorIgnoreLevel = kFatal+1; +auto gReentrantRWMutexTL = new ROOT::TReentrantRWLock(); +auto gReentrantRWMutexSMTL = new ROOT::TReentrantRWLock(); +auto gRWMutexTL = new TRWMutexImp(); +auto gRWMutexTLSpin = new TRWMutexImp(); +int trigger2 = gErrorIgnoreLevel = 0; + +TEST(RWLock, MutexLockVirtual) +{ + testWriteLockV(gMutex, gRepetition); +} + +TEST(RWLock, MutexUnLockVirtual) +{ + testWriteTUnLock(gMutex, gRepetition); +} + + +TEST(RWLock, WriteLockVirtual) +{ + testWriteLockV(gRWMutex, gRepetition); +} + +TEST(RWLock, WriteUnLockVirtual) +{ + testWriteTUnLock(gRWMutex, gRepetition); +} + + +TEST(RWLock, WriteSpinLockVirtual) +{ + testWriteLock(gRWMutexSpin, gRepetition); +} + +TEST(RWLock, WriteSpinUnLockVirtual) +{ + testWriteTUnLock(gRWMutexSpin, gRepetition); +} + + +TEST(RWLock, WriteLock) +{ + testWriteLock(gRWMutex, gRepetition); +} + +TEST(RWLock, WriteUnLock) +{ + testWriteTUnLock(gRWMutex, gRepetition); +} + + +TEST(RWLock, WriteSpinLock) +{ + testWriteLock(gRWMutexSpin, gRepetition); +} + +TEST(RWLock, WriteSpinUnLock) +{ + testWriteTUnLock(gRWMutexSpin, gRepetition); +} + + +TEST(RWLock, WriteSpinDirectLock) +{ + testWriteLock(gReentrantRWMutexSM, gRepetition); +} + +TEST(RWLock, WriteSpinDirectUnLock) +{ + testWriteUnLock(gReentrantRWMutexSM, gRepetition); +} + + +TEST(RWLock, WriteDirectLock) +{ + testWriteLock(gReentrantRWMutex, gRepetition); +} + +TEST(RWLock, WriteDirectUnLock) +{ + testWriteUnLock(gReentrantRWMutex, gRepetition); +} + + +TEST(RWLock, ReadLockSpinDirect) +{ + testReadLock(gReentrantRWMutexSM, gRepetition); +} + +TEST(RWLock, ReadUnLockSpinDirect) +{ + testReadUnLock(gReentrantRWMutexSM, gRepetition); +} + + +TEST(RWLock, ReadLockDirect) +{ + testReadLock(gReentrantRWMutex, gRepetition); +} + +TEST(RWLock, ReadUnLockDirect) +{ + testReadUnLock(gReentrantRWMutex, gRepetition); +} + + + + +TEST(RWLock, WriteSpinTLDirectLock) +{ + testWriteLock(gReentrantRWMutexSMTL, gRepetition); +} + +TEST(RWLock, WriteSpinTLsDirectUnLock) +{ + testWriteUnLock(gReentrantRWMutexSMTL, gRepetition); +} + + +TEST(RWLock, WriteTLDirectLock) +{ + testWriteLock(gReentrantRWMutexTL, gRepetition); +} + +TEST(RWLock, WriteTLDirectUnLock) +{ + testWriteUnLock(gReentrantRWMutexTL, gRepetition); +} + + +TEST(RWLock, ReadLockSpinTLDirect) +{ + testReadLock(gReentrantRWMutexSMTL, gRepetition); +} + +TEST(RWLock, ReadUnLockSpinTLDirect) +{ + testReadUnLock(gReentrantRWMutexSMTL, gRepetition); +} + + +TEST(RWLock, ReadLockTLDirect) +{ + testReadLock(gReentrantRWMutexTL, gRepetition); +} + +TEST(RWLock, ReadUnLockTLDirect) +{ + testReadUnLock(gReentrantRWMutexTL, gRepetition); +} + + + + + +TEST(RWLock, SpinMutexLockUnlock) +{ + testNonReentrantLock(gSpinMutex, gRepetition); +} + + + + +TEST(RWLock, MutexGuard) +{ + testWriteGuard(gMutex, gRepetition); +} + +TEST(RWLock, WriteGuard) +{ + testWriteGuard(gRWMutex, gRepetition); +} + +TEST(RWLock, WriteSpinGuard) +{ + testWriteGuard(gRWMutexSpin, gRepetition); +} + +TEST(RWLock, ReentrantSpin) +{ + Reentrant(*gReentrantRWMutexSM); +} + +TEST(RWLock, Reentrant) +{ + Reentrant(*gReentrantRWMutex); +} + +TEST(RWLock, ReentrantTLSpin) +{ + Reentrant(*gReentrantRWMutexSMTL); +} + +TEST(RWLock, ReentrantTL) +{ + Reentrant(*gReentrantRWMutexTL); +} + + +TEST(RWLock, Concurrent) +{ + concurrent(gRWMutex,1,2,gRepetition / 10000); +} + +TEST(RWLock, ConcurrentSpin) +{ + concurrent(gRWMutexSpin,1,2,gRepetition / 10000); +} + +TEST(RWLock, LargeConcurrent) +{ + concurrent(gRWMutex,10,20,gRepetition / 1000); +} + +// TEST(RWLock, LargeConcurrentSpin) +// { +// concurrent(gRWMutexSpin,10,20,gRepetition / 1000); +// } + +TEST(RWLock, ConcurrentTL) +{ + concurrent(gRWMutexTL,1,2,gRepetition / 10000); +} + +TEST(RWLock, LargeConcurrentTL) +{ + concurrent(gRWMutexTL,10,20,gRepetition / 1000); +}