diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index bfa71ddc491a54..fdbb55ae934fb7 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -934,7 +934,7 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT GenTree* op2 = impPopStack().val; GenTree* op1 = impSIMDPopStack(); - GenTree* tmpOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, op2, CORINFO_TYPE_INT, 16); + GenTree* tmpOp = gtNewSimdCreateScalarNode(TYP_SIMD16, op2, CORINFO_TYPE_INT, 16); return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, simdBaseJitType, genTypeSize(simdType)); } diff --git a/src/coreclr/jit/simd.h b/src/coreclr/jit/simd.h index 478b0a1af4f4a3..9e8781714dbc41 100644 --- a/src/coreclr/jit/simd.h +++ b/src/coreclr/jit/simd.h @@ -453,7 +453,23 @@ void EvaluateUnarySimd(genTreeOps oper, bool scalar, var_types baseType, TSimd* template TBase EvaluateBinaryScalarRSZ(TBase arg0, TBase arg1) { - return arg0 >> (arg1 & ((sizeof(TBase) * 8) - 1)); +#if defined(TARGET_XARCH) + if ((arg1 < 0) || (arg1 >= (sizeof(TBase) * 8))) + { + // For SIMD, xarch allows overshifting and treats + // it as zeroing. So ensure we do the same here. + // + // The xplat APIs ensure the shiftAmount is masked + // to be within range, so we can't hit this for them. + + return static_cast(0); + } +#else + // Other platforms enforce masking in their encoding + assert((arg1 >= 0) && (arg1 < (sizeof(TBase) * 8))); +#endif + + return arg0 >> arg1; } template <> @@ -513,7 +529,22 @@ TBase EvaluateBinaryScalarSpecialized(genTreeOps oper, TBase arg0, TBase arg1) case GT_LSH: { - return arg0 << (arg1 & ((sizeof(TBase) * 8) - 1)); +#if defined(TARGET_XARCH) + if ((arg1 < 0) || (arg1 >= (sizeof(TBase) * 8))) + { + // For SIMD, xarch allows overshifting and treats + // it as zeroing. So ensure we do the same here. + // + // The xplat APIs ensure the shiftAmount is masked + // to be within range, so we can't hit this for them. + + return static_cast(0); + } +#else + // Other platforms enforce masking in their encoding + assert((arg1 >= 0) && (arg1 < (sizeof(TBase) * 8))); +#endif + return arg0 << arg1; } case GT_OR: @@ -535,7 +566,24 @@ TBase EvaluateBinaryScalarSpecialized(genTreeOps oper, TBase arg0, TBase arg1) case GT_RSH: { - return arg0 >> (arg1 & ((sizeof(TBase) * 8) - 1)); +#if defined(TARGET_XARCH) + if ((arg1 < 0) || (arg1 >= (sizeof(TBase) * 8))) + { + // For SIMD, xarch allows overshifting and treats + // it as propagating the sign bit (returning Zero + // or AllBitsSet). So ensure we do the same here. + // + // The xplat APIs ensure the shiftAmount is masked + // to be within range, so we can't hit this for them. + + arg0 >>= ((sizeof(TBase) * 8) - 1); + arg1 = static_cast(1); + } +#else + // Other platforms enforce masking in their encoding + assert((arg1 >= 0) && (arg1 < (sizeof(TBase) * 8))); +#endif + return arg0 >> arg1; } case GT_RSZ: diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index fbf649052ac64b..e4fb8cb99ed127 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -7561,6 +7561,11 @@ ValueNum ValueNumStore::EvalHWIntrinsicFunBinary(var_types type, if (genTypeSize(baseType) != 8) { + if (shiftAmount > INT_MAX) + { + // Ensure we don't lose track the the amount is an overshift + shiftAmount = -1; + } arg1VN = VNForIntCon(static_cast(shiftAmount)); } else @@ -7594,6 +7599,11 @@ ValueNum ValueNumStore::EvalHWIntrinsicFunBinary(var_types type, if (genTypeSize(baseType) != 8) { + if (shiftAmount > INT_MAX) + { + // Ensure we don't lose track the the amount is an overshift + shiftAmount = -1; + } arg1VN = VNForIntCon(static_cast(shiftAmount)); } else @@ -7626,6 +7636,11 @@ ValueNum ValueNumStore::EvalHWIntrinsicFunBinary(var_types type, if (genTypeSize(baseType) != 8) { + if (shiftAmount > INT_MAX) + { + // Ensure we don't lose track the the amount is an overshift + shiftAmount = -1; + } arg1VN = VNForIntCon(static_cast(shiftAmount)); } else diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_93698/Runtime_93698.cs b/src/tests/JIT/Regression/JitBlue/Runtime_93698/Runtime_93698.cs new file mode 100644 index 00000000000000..012387e9c656bc --- /dev/null +++ b/src/tests/JIT/Regression/JitBlue/Runtime_93698/Runtime_93698.cs @@ -0,0 +1,49 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.Intrinsics; +using System.Runtime.Intrinsics.X86; +using Xunit; + +public static class Runtime_93698 +{ + [Fact] + public static void TestShiftLeftLogicalOvershift() + { + if (Sse2.IsSupported) + { + var result1 = Sse2.ShiftLeftLogical(Vector128.Create(-1, +2, -3, +4), 32); + Assert.Equal(Vector128.Zero, result1); + + var result2 = Sse2.ShiftLeftLogical(Vector128.Create(-5, +6, -7, +8), Vector128.Create(0, 32, 0, 0)); + Assert.Equal(Vector128.Zero, result2); + } + } + + [Fact] + public static void TestShiftRightLogicalOvershift() + { + if (Sse2.IsSupported) + { + var result1 = Sse2.ShiftRightLogical(Vector128.Create(-1, +2, -3, +4), 32); + Assert.Equal(Vector128.Zero, result1); + + var result2 = Sse2.ShiftRightLogical(Vector128.Create(-5, +6, -7, +8), Vector128.Create(0, 32, 0, 0)); + Assert.Equal(Vector128.Zero, result2); + } + } + + [Fact] + public static void TestShiftRightArithmeticOvershift() + { + if (Sse2.IsSupported) + { + var result1 = Sse2.ShiftRightArithmetic(Vector128.Create(-1, +2, -3, +4), 32); + Assert.Equal(Vector128.Create(-1, 0, -1, 0), result1); + + var result2 = Sse2.ShiftRightArithmetic(Vector128.Create(-5, +6, -7, +8), Vector128.Create(0, 32, 0, 0)); + Assert.Equal(Vector128.Create(-1, 0, -1, 0), result2); + } + } +} diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_93698/Runtime_93698.csproj b/src/tests/JIT/Regression/JitBlue/Runtime_93698/Runtime_93698.csproj new file mode 100644 index 00000000000000..15edd99711a1a4 --- /dev/null +++ b/src/tests/JIT/Regression/JitBlue/Runtime_93698/Runtime_93698.csproj @@ -0,0 +1,8 @@ + + + True + + + + + \ No newline at end of file