diff --git a/src/coreclr/gc/env/gcenv.interlocked.inl b/src/coreclr/gc/env/gcenv.interlocked.inl index 03d487a5320170..23b05d622fac45 100644 --- a/src/coreclr/gc/env/gcenv.interlocked.inl +++ b/src/coreclr/gc/env/gcenv.interlocked.inl @@ -13,13 +13,12 @@ #ifndef _MSC_VER __forceinline void Interlocked::ArmInterlockedOperationBarrier() { -#ifdef HOST_ARM64 +#if defined(HOST_ARM64) || defined(HOST_LOONGARCH64) + #if !defined(HOST_OSX) // See PAL_ArmInterlockedOperationBarrier() in the PAL __sync_synchronize(); -#endif // HOST_ARM64 -#ifdef HOST_LOONGARCH64 - __sync_synchronize(); -#endif //HOST_LOONGARCH64 + #endif // !HOST_OSX +#endif // HOST_ARM64 || HOST_LOONGARCH64 } #endif // !_MSC_VER diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index dd7e99c665b100..60375d30cee755 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -3447,7 +3447,8 @@ BitScanReverse64( FORCEINLINE void PAL_ArmInterlockedOperationBarrier() { -#ifdef HOST_ARM64 +#if defined(HOST_ARM64) || defined(HOST_LOONGARCH64) + #if !defined(HOST_OSX) // On arm64, most of the __sync* functions generate a code sequence like: // loop: // ldaxr (load acquire exclusive) @@ -3460,10 +3461,10 @@ FORCEINLINE void PAL_ArmInterlockedOperationBarrier() // require the load to occur after the store. This memory barrier should be used following a call to a __sync* function to // prevent that reordering. Code generated for arm32 includes a 'dmb' after 'cbnz', so no issue there at the moment. __sync_synchronize(); -#endif // HOST_ARM64 -#ifdef HOST_LOONGARCH64 - __sync_synchronize(); -#endif + #else + // For OSX Arm64, the default Arm architecture is v8.1 which uses atomic instructions that don't need a full barrier. + #endif // !HOST_OSX +#endif// HOST_ARM64 || HOST_LOONGARCH64 } /*++