diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 1d3de8a07a5716..a973436fa1d408 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2546,29 +2546,25 @@ class Compiler GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, @@ -2576,36 +2572,31 @@ class Compiler GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic = false); + unsigned simdSize); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { - bool isSimdAsHWIntrinsic = true; - return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { - bool isSimdAsHWIntrinsic = true; - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, @@ -2615,8 +2606,7 @@ class Compiler CorInfoType simdBaseJitType, unsigned simdSize) { - bool isSimdAsHWIntrinsic = true; - return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, @@ -2627,181 +2617,162 @@ class Compiler CorInfoType simdBaseJitType, unsigned simdSize) { - bool isSimdAsHWIntrinsic = true; - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize); } GenTree* gtNewSimdAbsNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdCeilNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdCreateBroadcastNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdCreateScalarNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdCreateScalarUnsafeNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdFloorNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdGetLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdGetUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdLoadNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdLoadAlignedNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdLoadNonTemporalNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdShuffleNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdSqrtNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdStoreNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdStoreAlignedNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdStoreNonTemporalNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdSumNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdWidenLowerNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdWidenUpperNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); + var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdWithLowerNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTree* gtNewSimdWithUpperNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic); + unsigned simdSize); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); @@ -3049,10 +3020,6 @@ class Compiler GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- - // Get the handle, if any. - CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); - // Get the handle, and assert if not found. - CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call @@ -5783,9 +5750,9 @@ class Compiler // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. - GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); - TempInfo fgMakeTemp(GenTree* rhs, CORINFO_CLASS_HANDLE structType = nullptr); - GenTree* fgMakeMultiUse(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); + GenTree* fgInsertCommaFormTemp(GenTree** ppTree); + TempInfo fgMakeTemp(GenTree* rhs); + GenTree* fgMakeMultiUse(GenTree** ppTree); // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); @@ -8463,13 +8430,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX struct SIMDHandlesCache { - // BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG - // NATIVEINT, NATIVEUINT, FLOAT, and DOUBLE - static const uint32_t SupportedTypeCount = 12; - - // SIMD Types - CORINFO_CLASS_HANDLE VectorTHandles[SupportedTypeCount]; - CORINFO_CLASS_HANDLE PlaneHandle; CORINFO_CLASS_HANDLE QuaternionHandle; CORINFO_CLASS_HANDLE Vector2Handle; @@ -8477,239 +8437,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX CORINFO_CLASS_HANDLE Vector4Handle; CORINFO_CLASS_HANDLE VectorHandle; -#ifdef FEATURE_HW_INTRINSICS -#if defined(TARGET_ARM64) - CORINFO_CLASS_HANDLE Vector64THandles[SupportedTypeCount]; -#endif // defined(TARGET_ARM64) - CORINFO_CLASS_HANDLE Vector128THandles[SupportedTypeCount]; -#if defined(TARGET_XARCH) - CORINFO_CLASS_HANDLE Vector256THandles[SupportedTypeCount]; - CORINFO_CLASS_HANDLE Vector512THandles[SupportedTypeCount]; -#endif // defined(TARGET_XARCH) -#endif // FEATURE_HW_INTRINSICS - - CORINFO_CLASS_HANDLE CanonicalSimd8Handle; - CORINFO_CLASS_HANDLE CanonicalSimd16Handle; - CORINFO_CLASS_HANDLE CanonicalSimd32Handle; - CORINFO_CLASS_HANDLE CanonicalSimd64Handle; - SIMDHandlesCache() { - assert(SupportedTypeCount == static_cast(CORINFO_TYPE_DOUBLE - CORINFO_TYPE_BYTE + 1)); memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; -#if defined(FEATURE_HW_INTRINSICS) - CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) - { - assert(varTypeIsSIMD(simdType)); - assert((simdBaseJitType >= CORINFO_TYPE_BYTE) && (simdBaseJitType <= CORINFO_TYPE_DOUBLE)); - - // We should only be called from gtGetStructHandleForSimdOrHW and this should've been checked already - assert(m_simdHandleCache != nullptr); - - if (simdBaseJitType == CORINFO_TYPE_FLOAT) - { - switch (simdType) - { - case TYP_SIMD8: - { - return m_simdHandleCache->Vector2Handle; - } - - case TYP_SIMD12: - { - return m_simdHandleCache->Vector3Handle; - } - - case TYP_SIMD16: - { - // We order the checks roughly by expected hit count so early exits are possible - - if (m_simdHandleCache->Vector4Handle != NO_CLASS_HANDLE) - { - return m_simdHandleCache->Vector4Handle; - } - - if (m_simdHandleCache->QuaternionHandle != NO_CLASS_HANDLE) - { - return m_simdHandleCache->QuaternionHandle; - } - - if (m_simdHandleCache->PlaneHandle != NO_CLASS_HANDLE) - { - return m_simdHandleCache->PlaneHandle; - } - - break; - } - -#if defined(TARGET_XARCH) - case TYP_SIMD32: - case TYP_SIMD64: - { - // This should be handled by the Vector path below - break; - } -#endif // TARGET_XARCH - - default: - { - unreached(); - } - } - } - - if (emitTypeSize(simdType) != getSIMDVectorRegisterByteLength()) - { - // We have scenarios, such as shifting Vector by a non-constant - // which may introduce different sized vectors that are marked as - // isSimdAsHWIntrinsic. - - return NO_CLASS_HANDLE; - } - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - return m_simdHandleCache->VectorTHandles[handleIndex]; - } - - CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType) - { - assert(varTypeIsSIMD(simdType)); - assert((simdBaseJitType >= CORINFO_TYPE_BYTE) && (simdBaseJitType <= CORINFO_TYPE_DOUBLE)); - - // We should only be called from gtGetStructHandleForSimdOrHW and this should've been checked already - assert(m_simdHandleCache != nullptr); - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - switch (simdType) - { - case TYP_SIMD8: - { -#if defined(TARGET_ARM64) - return m_simdHandleCache->Vector64THandles[handleIndex]; -#else - // This can only be Vector2 and should've been handled by gtGetStructHandleForSIMD - return NO_CLASS_HANDLE; -#endif - } - - case TYP_SIMD12: - { - // This can only be Vector3 and should've been handled by gtGetStructHandleForSIMD - return NO_CLASS_HANDLE; - } - - case TYP_SIMD16: - { - return m_simdHandleCache->Vector128THandles[handleIndex]; - } - -#if defined(TARGET_XARCH) - case TYP_SIMD32: - { - return m_simdHandleCache->Vector256THandles[handleIndex]; - } - - case TYP_SIMD64: - { - return m_simdHandleCache->Vector512THandles[handleIndex]; - } -#endif // TARGET_XARCH - - default: - { - unreached(); - } - } - } - - CORINFO_CLASS_HANDLE gtGetStructHandleForSimdOrHW(var_types simdType, - CorInfoType simdBaseJitType, - bool isSimdAsHWIntrinsic = false) - { - assert(varTypeIsSIMD(simdType)); - assert((simdBaseJitType >= CORINFO_TYPE_BYTE) && (simdBaseJitType <= CORINFO_TYPE_DOUBLE)); - - if (m_simdHandleCache == nullptr) - { - // This may happen if the JIT generates SIMD node on its own, without importing them. - // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. - return NO_CLASS_HANDLE; - } - - CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; - - if (isSimdAsHWIntrinsic) - { - clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); - } - - if (clsHnd == NO_CLASS_HANDLE) - { - clsHnd = gtGetStructHandleForHWSIMD(simdType, simdBaseJitType); - } - - if (clsHnd == NO_CLASS_HANDLE) - { - // TODO-cleanup: We can probably just always use the canonical handle. - clsHnd = gtGetCanonicalStructHandleForSIMD(simdType); - } - - return clsHnd; - } -#endif // FEATURE_HW_INTRINSICS - - //------------------------------------------------------------------------ - // gtGetCanonicalStructHandleForSIMD: Get the "canonical" SIMD type handle. - // - // Some SIMD-typed trees do not carry struct handles with them (and in - // some cases, they cannot, due to being created by the compiler itself). - // To enable CSEing of these trees, we use "canonical" handles. These are - // captured during importation, and can represent any type normalized to - // be TYP_SIMD. - // - // Arguments: - // simdType - The SIMD type - // - // Return Value: - // The "canonical" type handle for "simdType", if one was available. - // "NO_CLASS_HANDLE" otherwise. - // - CORINFO_CLASS_HANDLE gtGetCanonicalStructHandleForSIMD(var_types simdType) - { - if (m_simdHandleCache == nullptr) - { - return NO_CLASS_HANDLE; - } - - switch (simdType) - { - case TYP_SIMD8: - return m_simdHandleCache->CanonicalSimd8Handle; - case TYP_SIMD12: - return m_simdHandleCache->Vector3Handle; - case TYP_SIMD16: - return m_simdHandleCache->CanonicalSimd16Handle; -#if defined(TARGET_XARCH) - case TYP_SIMD32: - return m_simdHandleCache->CanonicalSimd32Handle; - case TYP_SIMD64: - return m_simdHandleCache->CanonicalSimd64Handle; -#endif // TARGET_XARCH - - default: - unreached(); - } - } - // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index cda3b1725b6725..cb03e0b604f8d6 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1658,14 +1658,12 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) if (varTypeIsStruct(argType)) { - structHnd = gtGetStructHandleIfPresent(argNode); - noway_assert((structHnd != NO_CLASS_HANDLE) || (argType != TYP_STRUCT)); + structHnd = lclVarInfo[argNum].lclVerTypeInfo.GetClassHandleForValueClass(); + assert(structHnd != NO_CLASS_HANDLE); } - // Unsafe value cls check is not needed for - // argTmpNum here since in-linee compiler instance - // would have iterated over these and marked them - // accordingly. + // Unsafe value cls check is not needed for argTmpNum here since in-linee compiler instance + // would have iterated over these and marked them accordingly. impAssignTempGen(tmpNum, argNode, structHnd, CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index f14688a114b5e8..cf9802e89801b6 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -2146,16 +2146,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis // ret(...) -> // ret(comma(comma(tmp=...,call mon_exit), tmp)) // - // - // Before morph stage, it is possible to have a case of GT_RETURN(TYP_LONG, op1) where op1's type is - // TYP_STRUCT (of 8-bytes) and op1 is call node. See the big comment block in impReturnInstruction() - // for details for the case where info.compRetType is not the same as info.compRetNativeType. For - // this reason pass compMethodInfo->args.retTypeClass which is guaranteed to be a valid class handle - // if the return type is a value class. Note that fgInsertCommFormTemp() in turn uses this class handle - // if the type of op1 is TYP_STRUCT to perform lvaSetStruct() on the new temp that is created, which - // in turn passes it to VM to know the size of value type. - GenTree* temp = fgInsertCommaFormTemp(&retNode->AsOp()->gtOp1, info.compMethodInfo->args.retTypeClass); - + GenTree* temp = fgInsertCommaFormTemp(&retNode->AsOp()->gtOp1); GenTree* lclVar = retNode->AsOp()->gtOp1->AsOp()->gtOp2; // The return can't handle all of the trees that could be on the right-hand-side of an assignment, diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index bdefd38fb42798..aeea5d349313ca 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -1381,11 +1381,13 @@ void NewCallArg::ValidateTypes() if (varTypeIsStruct(SignatureType)) { assert(SignatureClsHnd != NO_CLASS_HANDLE); + assert(SignatureType == Node->TypeGet()); - Compiler* comp = JitTls::GetCompiler(); - CORINFO_CLASS_HANDLE clsHnd = comp->gtGetStructHandleIfPresent(Node); - assert((clsHnd == nullptr) || (SignatureClsHnd == clsHnd) || - (comp->info.compCompHnd->getClassSize(SignatureClsHnd) == comp->info.compCompHnd->getClassSize(clsHnd))); + if (SignatureType == TYP_STRUCT) + { + Compiler* comp = JitTls::GetCompiler(); + assert(ClassLayout::AreCompatible(comp->typGetObjLayout(SignatureClsHnd), Node->GetLayout(comp))); + } } } #endif @@ -8967,8 +8969,7 @@ GenTree* Compiler::gtCloneExpr( copy = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()), tree->AsHWIntrinsic()->GetHWIntrinsicId(), - tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(), - tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic()); + tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize()); copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType()); goto CLONE_MULTIOP_OPERANDS; #endif @@ -17842,95 +17843,6 @@ bool Compiler::gtStoreDefinesField( return false; } -CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree) -{ - CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; - tree = tree->gtEffectiveVal(); - if (varTypeIsStruct(tree->gtType)) - { - switch (tree->gtOper) - { - case GT_MKREFANY: - structHnd = impGetRefAnyClass(); - break; - case GT_OBJ: - structHnd = tree->AsObj()->GetLayout()->GetClassHandle(); - break; - case GT_BLK: - structHnd = tree->AsBlk()->GetLayout()->GetClassHandle(); - break; - case GT_CALL: - structHnd = tree->AsCall()->gtRetClsHnd; - break; - case GT_RET_EXPR: - structHnd = tree->AsRetExpr()->gtInlineCandidate->gtRetClsHnd; - break; - case GT_FIELD: - info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd); - break; - case GT_ASG: - structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1()); - break; - case GT_LCL_FLD: -#ifdef FEATURE_SIMD - if (varTypeIsSIMD(tree)) - { - structHnd = gtGetCanonicalStructHandleForSIMD(tree->TypeGet()); - } - else -#endif // FEATURE_SIMD - { - structHnd = tree->AsLclFld()->GetLayout()->GetClassHandle(); - } - break; - case GT_LCL_VAR: - { - LclVarDsc* dsc = lvaGetDesc(tree->AsLclVar()); - if ((dsc->GetLayout() != nullptr) && !dsc->GetLayout()->IsBlockLayout()) - { - structHnd = dsc->GetLayout()->GetClassHandle(); - } - break; - } - case GT_RETURN: - structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1); - break; -#ifdef FEATURE_SIMD - case GT_IND: - if (varTypeIsSIMD(tree)) - { - structHnd = gtGetCanonicalStructHandleForSIMD(tree->TypeGet()); - } - break; - case GT_CNS_VEC: - structHnd = gtGetCanonicalStructHandleForSIMD(tree->TypeGet()); - break; -#endif // FEATURE_SIMD -#ifdef FEATURE_HW_INTRINSICS - case GT_HWINTRINSIC: - if (varTypeIsSIMD(tree)) - { - structHnd = - gtGetStructHandleForSimdOrHW(tree->TypeGet(), tree->AsHWIntrinsic()->GetSimdBaseJitType(), - tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic()); - } - break; -#endif - default: - break; - } - } - - return structHnd; -} - -CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree) -{ - CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree); - assert(structHnd != NO_CLASS_HANDLE); - return structHnd; -} - //------------------------------------------------------------------------ // gtGetClassHandle: find class handle for a ref type // @@ -19240,24 +19152,19 @@ bool GenTree::isEvexCompatibleHWIntrinsic() GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) { - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize); } -GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, - GenTree* op1, - NamedIntrinsic hwIntrinsicID, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode( + var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -19265,14 +19172,13 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); SetOpLclRelatedToSIMDIntrinsic(op2); - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1, op2); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -19281,15 +19187,14 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); SetOpLclRelatedToSIMDIntrinsic(op2); SetOpLclRelatedToSIMDIntrinsic(op3); - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1, op2, op3); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -19299,17 +19204,15 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) { SetOpLclRelatedToSIMDIntrinsic(op1); SetOpLclRelatedToSIMDIntrinsic(op2); SetOpLclRelatedToSIMDIntrinsic(op3); SetOpLclRelatedToSIMDIntrinsic(op4); - return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic, op1, op2, op3, op4); + return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, + simdBaseJitType, simdSize, op1, op2, op3, op4); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -19317,8 +19220,7 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) { IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount); for (size_t i = 0; i < operandCount; i++) @@ -19328,15 +19230,14 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, } return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize); } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) { for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++) { @@ -19344,11 +19245,10 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types ty } return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdAbsNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -19397,7 +19297,7 @@ GenTree* Compiler::gtNewSimdAbsNode( } } - return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize); } assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2)); @@ -19405,26 +19305,24 @@ GenTree* Compiler::gtNewSimdAbsNode( if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3))) { NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs; - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } else { - GenTree* tmp; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); - - GenTree* op1Dup1 = fgMakeMultiUse(&op1, clsHnd); - GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1, clsHnd); + GenTree* tmp; + GenTree* op1Dup1 = fgMakeMultiUse(&op1); + GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1); // op1 = op1 < Zero tmp = gtNewZeroConNode(type); - op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize); // tmp = Zero - op1Dup1 tmp = gtNewZeroConNode(type); - tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize); // result = ConditionalSelect(op1, tmp, op1Dup2) - return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize); } #elif defined(TARGET_ARM64) NamedIntrinsic intrinsic = NI_AdvSimd_Abs; @@ -19439,19 +19337,14 @@ GenTree* Compiler::gtNewSimdAbsNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); #else #error Unsupported platform #endif } -GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, - var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdBinOpNode( + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -19477,8 +19370,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, (op2->TypeIs(TYP_SIMD12) && type == TYP_SIMD16)); } - NamedIntrinsic intrinsic = NI_Illegal; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); + NamedIntrinsic intrinsic = NI_Illegal; switch (op) { @@ -19611,7 +19503,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, if (varTypeIsArithmetic(op2)) { - op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize); } if (simdSize == 32) @@ -19667,7 +19559,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, { op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask)); op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT, - 16, isSimdAsHWIntrinsic); + 16); } if (simdSize == 32) @@ -19719,8 +19611,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, if (broadcastOp != nullptr) { - *broadcastOp = - gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + *broadcastOp = gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize); } switch (simdBaseType) @@ -19755,37 +19646,35 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, else { // op1Dup = op1 - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); // op2Dup = op2 - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + GenTree* op2Dup = fgMakeMultiUse(&op2); // op1Dup = Sse2.ShiftRightLogical128BitLane(op1Dup, 4) - op1Dup = gtNewSimdHWIntrinsicNode(type, op1Dup, gtNewIconNode(4, TYP_INT), - NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + op1Dup = + gtNewSimdHWIntrinsicNode(type, op1Dup, gtNewIconNode(4, TYP_INT), + NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); // op2Dup = Sse2.ShiftRightLogical128BitLane(op2Dup, 4) - op2Dup = gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(4, TYP_INT), - NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + op2Dup = + gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(4, TYP_INT), + NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize); // op2Dup = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32() op2Dup = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, - simdSize, isSimdAsHWIntrinsic); + simdSize); // op2Dup = Sse2.Shuffle(op2Dup, (0, 0, 2, 0)) - op2Dup = - gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), - NI_SSE2_Shuffle, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op2Dup = gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), + NI_SSE2_Shuffle, simdBaseJitType, simdSize); // op1 = Sse2.Multiply(op1.AsUInt32(), op2.AsUInt32()).AsInt32() - op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize, - isSimdAsHWIntrinsic); + op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize); // op1 = Sse2.Shuffle(op1, (0, 0, 2, 0)) op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle, - simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + simdBaseJitType, simdSize); // op2 = op2Dup; op2 = op2Dup; @@ -19986,7 +19875,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, if (varTypeIsArithmetic(op2)) { - op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize); } if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE)) @@ -20071,7 +19960,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, op2 = gtNewOperNode(GT_NEG, TYP_INT, op2); } - op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize); if ((simdSize == 8) && varTypeIsLong(simdBaseType)) { @@ -20128,8 +20017,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, { if (scalarOp != nullptr) { - *scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + *scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize); } intrinsic = NI_AdvSimd_Multiply; break; @@ -20144,8 +20032,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, if (scalarOp != nullptr) { intrinsic = NI_AdvSimd_MultiplyByScalar; - *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8, - isSimdAsHWIntrinsic); + *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8); } else { @@ -20159,8 +20046,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, if (scalarOp != nullptr) { intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar; - *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8, - isSimdAsHWIntrinsic); + *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8); } else { @@ -20221,11 +20107,10 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdCeilNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -20265,16 +20150,11 @@ GenTree* Compiler::gtNewSimdCeilNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, - var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCmpOpNode( + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -20290,8 +20170,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); - NamedIntrinsic intrinsic = NI_Illegal; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); + NamedIntrinsic intrinsic = NI_Illegal; switch (op) { @@ -20335,14 +20214,13 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of // respective long elements. - GenTree* tmp = - gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + GenTree* tmp = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize); - op1 = fgMakeMultiUse(&tmp, clsHnd); + op1 = fgMakeMultiUse(&tmp); op2 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); - return gtNewSimdBinOpNode(GT_AND, type, tmp, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_AND, type, tmp, op2, simdBaseJitType, simdSize); } } else @@ -20384,13 +20262,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, if (!varTypeIsLong(simdBaseType)) { assert(!varTypeIsFloating(simdBaseType)); - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); // EQ(Max(op1, op2), op1) - GenTree* maxNode = - gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - return gtNewSimdCmpOpNode(GT_EQ, type, maxNode, op1Dup, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + GenTree* maxNode = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_EQ, type, maxNode, op1Dup, simdBaseJitType, simdSize); } } @@ -20405,13 +20281,13 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // // result = BitwiseOr(op1, op2) - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); - op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize); } break; } @@ -20488,10 +20364,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, GenTree* vecCon2 = gtCloneExpr(vecCon1); // op1 = op1 - constVector - op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize); // op2 = op2 - constVector - op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize); } // This should have been mutated by the above path @@ -20552,28 +20428,25 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // // result = BitwiseOr(op1, op2) - GenTree* op1Dup1 = fgMakeMultiUse(&op1, clsHnd); - GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1, clsHnd); + GenTree* op1Dup1 = fgMakeMultiUse(&op1); + GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1); - GenTree* op2Dup1 = fgMakeMultiUse(&op2, clsHnd); - GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1, clsHnd); + GenTree* op2Dup1 = fgMakeMultiUse(&op2); + GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1); - GenTree* t = - gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); - GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize, - isSimdAsHWIntrinsic); - GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize, - isSimdAsHWIntrinsic); + GenTree* t = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize); + GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize); + GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize); op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); - op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize); } } else @@ -20615,13 +20488,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, if (!varTypeIsLong(simdBaseType)) { assert(!varTypeIsFloating(simdBaseType)); - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); // EQ(Min(op1, op2), op1) - GenTree* minNode = - gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - return gtNewSimdCmpOpNode(GT_EQ, type, minNode, op1Dup, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + GenTree* minNode = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize); + return gtNewSimdCmpOpNode(GT_EQ, type, minNode, op1Dup, simdBaseJitType, simdSize); } } @@ -20636,13 +20507,13 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // // result = BitwiseOr(op1, op2) - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); - op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize); } break; } @@ -20719,10 +20590,10 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, GenTree* vecCon2 = gtCloneExpr(vecCon1); // op1 = op1 - constVector - op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize); // op2 = op2 - constVector - op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize); } // This should have been mutated by the above path @@ -20783,28 +20654,25 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // // result = BitwiseOr(op1, op2) - GenTree* op1Dup1 = fgMakeMultiUse(&op1, clsHnd); - GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1, clsHnd); + GenTree* op1Dup1 = fgMakeMultiUse(&op1); + GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1); - GenTree* op2Dup1 = fgMakeMultiUse(&op2, clsHnd); - GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1, clsHnd); + GenTree* op2Dup1 = fgMakeMultiUse(&op2); + GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1); - GenTree* t = - gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); - GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize, - isSimdAsHWIntrinsic); - GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize, - isSimdAsHWIntrinsic); + GenTree* t = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize); + GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize); + GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize); op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle, - CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_INT, simdSize); - op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize); + return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize); } } else @@ -20892,16 +20760,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op, - var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCmpOpAllNode( + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); assert(type == TYP_BOOL); @@ -20961,7 +20824,7 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op, intrinsic = NI_Vector128_op_Equality; } - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); op2 = gtNewAllBitsSetConNode(simdType); if (simdBaseType == TYP_FLOAT) @@ -21000,7 +20863,7 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op, intrinsic = NI_Vector128_op_Equality; } - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); op2 = gtNewAllBitsSetConNode(simdType); if (simdBaseType == TYP_FLOAT) @@ -21026,16 +20889,11 @@ GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op, } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op, - var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCmpOpAnyNode( + genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); assert(type == TYP_BOOL); @@ -21081,7 +20939,7 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op, intrinsic = NI_Vector128_op_Inequality; } - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); op2 = gtNewZeroConNode(simdType); if (simdBaseType == TYP_FLOAT) @@ -21124,7 +20982,7 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op, intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality; - op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize); op2 = gtNewZeroConNode(simdType); if (simdBaseType == TYP_FLOAT) @@ -21156,16 +21014,11 @@ GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op, } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdCndSelNode(var_types type, - GenTree* op1, - GenTree* op2, - GenTree* op3, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCndSelNode( + var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -21189,10 +21042,9 @@ GenTree* Compiler::gtNewSimdCndSelNode(var_types type, #if defined(TARGET_XARCH) assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX)); intrinsic = (simdSize == 32) ? NI_Vector256_ConditionalSelect : NI_Vector128_ConditionalSelect; - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize); #elif defined(TARGET_ARM64) - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -21206,13 +21058,14 @@ GenTree* Compiler::gtNewSimdCndSelNode(var_types type, // op1 - The value of broadcast to every element of the simd value // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created CreateBroadcast node // -GenTree* Compiler::gtNewSimdCreateBroadcastNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type, + GenTree* op1, + CorInfoType simdBaseJitType, + unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_Create; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); @@ -21330,7 +21183,7 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode( #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -21341,13 +21194,14 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode( // op1 - The value of element 0 of the simd value // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created CreateScalar node // -GenTree* Compiler::gtNewSimdCreateScalarNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type, + GenTree* op1, + CorInfoType simdBaseJitType, + unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_CreateScalar; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); @@ -21442,7 +21296,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode( #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -21453,7 +21307,6 @@ GenTree* Compiler::gtNewSimdCreateScalarNode( // op1 - The value of element 0 of the simd value // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created CreateScalarUnsafe node @@ -21461,8 +21314,10 @@ GenTree* Compiler::gtNewSimdCreateScalarNode( // Remarks: // This API is unsafe as it leaves the upper-bits of the vector undefined // -GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type, + GenTree* op1, + CorInfoType simdBaseJitType, + unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_CreateScalarUnsafe; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); @@ -21586,15 +21441,11 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode( #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdDotProdNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdDotProdNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -21636,11 +21487,10 @@ GenTree* Compiler::gtNewSimdDotProdNode(var_types type, #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdFloorNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -21679,15 +21529,11 @@ GenTree* Compiler::gtNewSimdFloorNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdGetElementNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdGetElementNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { NamedIntrinsic intrinsicId = NI_Vector128_GetElement; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); @@ -21745,11 +21591,10 @@ GenTree* Compiler::gtNewSimdGetElementNode(var_types type, op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound); } - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdGetLowerNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdGetLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); @@ -21775,11 +21620,10 @@ GenTree* Compiler::gtNewSimdGetLowerNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsicId != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdGetUpperNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdGetUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); @@ -21805,7 +21649,7 @@ GenTree* Compiler::gtNewSimdGetUpperNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsicId != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -21816,13 +21660,11 @@ GenTree* Compiler::gtNewSimdGetUpperNode( // op1 - The address of the value to be loaded // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created Load node // -GenTree* Compiler::gtNewSimdLoadNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdLoadNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(varTypeIsSIMD(type)); assert(getSIMDTypeForSize(simdSize) == type); @@ -21843,13 +21685,14 @@ GenTree* Compiler::gtNewSimdLoadNode( // op1 - The address of the value to be loaded // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created LoadAligned node // -GenTree* Compiler::gtNewSimdLoadAlignedNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type, + GenTree* op1, + CorInfoType simdBaseJitType, + unsigned simdSize) { #if defined(TARGET_XARCH) assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -21884,14 +21727,14 @@ GenTree* Compiler::gtNewSimdLoadAlignedNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -21905,13 +21748,14 @@ GenTree* Compiler::gtNewSimdLoadAlignedNode( // op1 - The address of the value to be loaded // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created LoadNonTemporal node // -GenTree* Compiler::gtNewSimdLoadNonTemporalNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type, + GenTree* op1, + CorInfoType simdBaseJitType, + unsigned simdSize) { #if defined(TARGET_XARCH) assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -21981,25 +21825,21 @@ GenTree* Compiler::gtNewSimdLoadNonTemporalNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdMaxNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdMaxNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -22015,8 +21855,7 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); - NamedIntrinsic intrinsic = NI_Illegal; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); + NamedIntrinsic intrinsic = NI_Illegal; #if defined(TARGET_XARCH) if (simdSize == 32) @@ -22102,20 +21941,20 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, // op1 = op1 - constVector // -or- // op1 = op1 + constVector - op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, vecCon1, opJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, vecCon1, opJitType, simdSize); // op2 = op2 - constVector // -or- // op2 = op2 + constVector - op2 = gtNewSimdBinOpNode(fixupOp1, type, op2, vecCon2, opJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdBinOpNode(fixupOp1, type, op2, vecCon2, opJitType, simdSize); // op1 = Max(op1, op2) - op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize); // result = op1 + constVector // -or- // result = op1 - constVector - return gtNewSimdBinOpNode(fixupOp2, type, op1, vecCon3, opJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(fixupOp2, type, op1, vecCon3, opJitType, simdSize); } case TYP_INT: @@ -22173,25 +22012,21 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, if (intrinsic != NI_Illegal) { - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); // op1 = op1 > op2 - op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize); // result = ConditionalSelect(op1, op1Dup, op2Dup) - return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdMinNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdMinNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -22207,8 +22042,7 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); - NamedIntrinsic intrinsic = NI_Illegal; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); + NamedIntrinsic intrinsic = NI_Illegal; #if defined(TARGET_XARCH) if (simdSize == 32) @@ -22282,31 +22116,28 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, assert(opJitType != simdBaseJitType); assert(opType != simdBaseType); - GenTree* constVector = - gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); + GenTree* constVector = gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize); - GenTree* constVectorDup1 = fgMakeMultiUse(&constVector, clsHnd); - GenTree* constVectorDup2 = fgMakeMultiUse(&constVectorDup1, clsHnd); + GenTree* constVectorDup1 = fgMakeMultiUse(&constVector); + GenTree* constVectorDup2 = fgMakeMultiUse(&constVectorDup1); // op1 = op1 - constVector // -or- // op1 = op1 + constVector - op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize); // op2 = op2 - constVectorDup1 // -or- // op2 = op2 + constVectorDup1 - op2 = - gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic); + op2 = gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize); // op1 = Min(op1, op2) - op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize); // result = op1 + constVectorDup2 // -or- // result = op1 - constVectorDup2 - return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize); } case TYP_INT: @@ -22364,25 +22195,21 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, if (intrinsic != NI_Illegal) { - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize); } - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); // op1 = op1 < op2 - op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize); // result = ConditionalSelect(op1, op1Dup, op2Dup) - return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdNarrowNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdNarrowNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -22533,19 +22360,18 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, } } - tmp1 = gtNewSimdHWIntrinsicNode(tmpSimdType, op1, intrinsicId, opBaseJitType, simdSize, isSimdAsHWIntrinsic); - tmp2 = gtNewSimdHWIntrinsicNode(tmpSimdType, op2, intrinsicId, opBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(tmpSimdType, op1, intrinsicId, opBaseJitType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(tmpSimdType, op2, intrinsicId, opBaseJitType, simdSize); if (simdSize == 16) { - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, CORINFO_TYPE_FLOAT, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, CORINFO_TYPE_FLOAT, simdSize); } intrinsicId = (simdSize == 64) ? NI_Vector256_ToVector512Unsafe : NI_Vector128_ToVector256Unsafe; - tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsicId, simdBaseJitType, simdSize / 2, isSimdAsHWIntrinsic); - return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsicId, simdBaseJitType, simdSize / 2); + return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize); } else if (simdSize == 32) { @@ -22560,10 +22386,6 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // This is the same in principle to the other comments below, however due to // code formatting, its too long to reasonably display here. - - CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); - GenTreeVecCon* vecCon1 = gtNewVconNode(type); for (unsigned i = 0; i < (simdSize / 8); i++) @@ -22573,14 +22395,14 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE, - simdSize, isSimdAsHWIntrinsic); + simdSize); CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64, - permuteBaseJitType, simdSize, isSimdAsHWIntrinsic); + permuteBaseJitType, simdSize); } case TYP_SHORT: @@ -22602,9 +22424,6 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // var tmp3 = Avx2.PackUnsignedSaturate(tmp1, tmp2); // return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).As(); - CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); - GenTreeVecCon* vecCon1 = gtNewVconNode(type); for (unsigned i = 0; i < (simdSize / 8); i++) @@ -22614,14 +22433,14 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_USHORT, - simdSize, isSimdAsHWIntrinsic); + simdSize); CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64, - permuteBaseJitType, simdSize, isSimdAsHWIntrinsic); + permuteBaseJitType, simdSize); } case TYP_INT: @@ -22642,21 +22461,17 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // var tmp3 = Avx2.UnpackLow(tmp1, tmp2); // return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32(); - CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); + CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize); + tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize); return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64, - opBaseJitType, simdSize, isSimdAsHWIntrinsic); + opBaseJitType, simdSize); } case TYP_FLOAT: @@ -22674,14 +22489,13 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE; - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, opBaseJitType, - simdSize, isSimdAsHWIntrinsic); - tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, opBaseJitType, - simdSize, isSimdAsHWIntrinsic); + tmp1 = + gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, opBaseJitType, simdSize); + tmp2 = + gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, opBaseJitType, simdSize); - tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16, - isSimdAsHWIntrinsic); - return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16); + return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize); } default: @@ -22711,9 +22525,6 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // var tmp2 = Sse2.And(op2.AsSByte(), vcns); // return Sse2.PackUnsignedSaturate(tmp1, tmp2).As(); - CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); - GenTreeVecCon* vecCon1 = gtNewVconNode(type); for (unsigned i = 0; i < (simdSize / 8); i++) @@ -22723,11 +22534,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE, - simdSize, isSimdAsHWIntrinsic); + simdSize); } case TYP_SHORT: @@ -22737,10 +22548,6 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U // // ... - - CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); - if (compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // ... @@ -22763,13 +22570,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, GenTree* vecCon2 = gtCloneExpr(vecCon1); - tmp1 = - gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - tmp2 = - gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize); + tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize); return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE41_PackUnsignedSaturate, - CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic); + CORINFO_TYPE_USHORT, simdSize); } else { @@ -22787,26 +22592,21 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // var tmp4 = Sse2.UnpackHigh(tmp1, tmp2); // return Sse2.UnpackLow(tmp3, tmp4).As(); - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); - - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); - clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize); + tmp2 = + gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize); - GenTree* tmp1Dup = fgMakeMultiUse(&tmp1, clsHnd); - GenTree* tmp2Dup = fgMakeMultiUse(&tmp2, clsHnd); + GenTree* tmp1Dup = fgMakeMultiUse(&tmp1); + GenTree* tmp2Dup = fgMakeMultiUse(&tmp2); - tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize); + tmp4 = + gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize); } } @@ -22824,19 +22624,13 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32()); // return Sse2.UnpackLow(tmp1, tmp2).As(); - CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op2Dup = fgMakeMultiUse(&op2); - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize); } case TYP_FLOAT: @@ -22854,13 +22648,10 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE; - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize); + tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize); - return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize); } default: @@ -22877,20 +22668,18 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1); // return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8); return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + simdSize); } else { // var tmp1 = AdvSimd.ExtractNarrowingLower(op1); // return AdvSimd.ExtractNarrowingUpper(tmp1, op2); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8); return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + simdSize); } } else if (varTypeIsFloating(simdBaseType)) @@ -22901,12 +22690,10 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE; - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize); + tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16); - return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize); } else { @@ -22917,24 +22704,18 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG; - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize); + tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16); - return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdShuffleNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdShuffleNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23057,8 +22838,6 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, // If we aren't crossing lanes, then we can decompose the byte/sbyte // and short/ushort operations into 2x 128-bit operations - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); - // We want to build what is essentially the following managed code: // var op1Lower = op1.GetLower(); // op1Lower = Ssse3.Shuffle(op1Lower, Vector128.Create(...)); @@ -23070,25 +22849,22 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE; - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - GenTree* op1Lower = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + GenTree* op1Dup = fgMakeMultiUse(&op1); + GenTree* op1Lower = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); op2 = gtNewVconNode(TYP_SIMD16); op2->AsVecCon()->gtSimd16Val = vecCns.v128[0]; - op1Lower = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Lower, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16, - isSimdAsHWIntrinsic); + op1Lower = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Lower, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16); - GenTree* op1Upper = - gtNewSimdGetUpperNode(TYP_SIMD16, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + GenTree* op1Upper = gtNewSimdGetUpperNode(TYP_SIMD16, op1Dup, simdBaseJitType, simdSize); op2 = gtNewVconNode(TYP_SIMD16); op2->AsVecCon()->gtSimd16Val = vecCns.v128[1]; - op1Upper = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Upper, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16, - isSimdAsHWIntrinsic); + op1Upper = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Upper, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16); - return gtNewSimdWithUpperNode(type, op1Lower, op1Upper, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdWithUpperNode(type, op1Lower, op1Upper, simdBaseJitType, simdSize); } if (elementSize == 4) @@ -23102,16 +22878,14 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, op2->AsVecCon()->gtSimdVal = vecCns; // swap the operands to match the encoding requirements - retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize); } else { assert(elementSize == 8); cnsNode = gtNewIconNode(control); - retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX2_Permute4x64, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX2_Permute4x64, simdBaseJitType, simdSize); } } else @@ -23123,8 +22897,7 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimd16Val = vecCns.v128[0]; - return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSSE3_Shuffle, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSSE3_Shuffle, simdBaseJitType, simdSize); } if (varTypeIsLong(simdBaseType)) @@ -23142,22 +22915,18 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, if (varTypeIsIntegral(simdBaseType)) { - retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_SSE2_Shuffle, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_SSE2_Shuffle, simdBaseJitType, simdSize); } else if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { - retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX_Permute, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX_Permute, simdBaseJitType, simdSize); } else { - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); // for double we need SSE2, but we can't use the integral path ^ because we still need op1Dup here NamedIntrinsic ni = simdBaseType == TYP_DOUBLE ? NI_SSE2_Shuffle : NI_SSE_Shuffle; - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); - retNode = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, cnsNode, ni, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + GenTree* op1Dup = fgMakeMultiUse(&op1); + retNode = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, cnsNode, ni, simdBaseJitType, simdSize); } } @@ -23171,7 +22940,7 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, op2->AsVecCon()->gtSimd16Val = mskCns.v128[0]; GenTree* zero = gtNewZeroConNode(type); - retNode = gtNewSimdCndSelNode(type, op2, retNode, zero, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + retNode = gtNewSimdCndSelNode(type, op2, retNode, zero, simdBaseJitType, simdSize); } return retNode; @@ -23205,8 +22974,7 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, { lookupIntrinsic = NI_AdvSimd_Arm64_VectorTableLookup; - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128, simdBaseJitType, simdSize); } // VectorTableLookup is only valid on byte/sbyte @@ -23215,15 +22983,13 @@ GenTree* Compiler::gtNewSimdShuffleNode(var_types type, op2 = gtNewVconNode(type); op2->AsVecCon()->gtSimdVal = vecCns; - return gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - + return gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseJitType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdSqrtNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23266,7 +23032,7 @@ GenTree* Compiler::gtNewSimdSqrtNode( #endif // !TARGET_XARCH && !TARGET_ARM64 assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } //---------------------------------------------------------------------------------------------- @@ -23277,13 +23043,11 @@ GenTree* Compiler::gtNewSimdSqrtNode( // op2 - The SIMD value to be stored at op1 // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created Store node // -GenTree* Compiler::gtNewSimdStoreNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { assert(op1 != nullptr); assert(op2 != nullptr); @@ -23306,13 +23070,11 @@ GenTree* Compiler::gtNewSimdStoreNode( // op2 - The SIMD value to be stored at op1 // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created StoreAligned node // -GenTree* Compiler::gtNewSimdStoreAlignedNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { #if defined(TARGET_XARCH) assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23347,14 +23109,14 @@ GenTree* Compiler::gtNewSimdStoreAlignedNode( intrinsic = NI_SSE_StoreAligned; } - return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -23368,13 +23130,14 @@ GenTree* Compiler::gtNewSimdStoreAlignedNode( // op2 - The SIMD value to be stored at op1 // simdBaseJitType - The base JIT type of SIMD type of the intrinsic // simdSize - The size of the SIMD type of the intrinsic -// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false // // Returns: // The created StoreNonTemporal node // -GenTree* Compiler::gtNewSimdStoreNonTemporalNode( - GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdStoreNonTemporalNode(GenTree* op1, + GenTree* op2, + CorInfoType simdBaseJitType, + unsigned simdSize) { #if defined(TARGET_XARCH) assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23409,21 +23172,20 @@ GenTree* Compiler::gtNewSimdStoreNonTemporalNode( intrinsic = NI_SSE_StoreAlignedNonTemporal; } - return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize); #elif defined(TARGET_ARM64) // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned when optimizations are disable, so only skip the intrinsic handling // if optimizations are enabled assert(opts.OptimizationEnabled()); - return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdSumNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23436,9 +23198,8 @@ GenTree* Compiler::gtNewSimdSumNode( var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); - NamedIntrinsic intrinsic = NI_Illegal; - GenTree* tmp = nullptr; - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(simdType, simdBaseJitType, isSimdAsHWIntrinsic); + NamedIntrinsic intrinsic = NI_Illegal; + GenTree* tmp = nullptr; #if defined(TARGET_XARCH) assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType)); @@ -23476,22 +23237,22 @@ GenTree* Compiler::gtNewSimdSumNode( for (int i = 0; i < haddCount; i++) { - tmp = fgMakeMultiUse(&op1, clsHnd); - op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp = fgMakeMultiUse(&op1); + op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize); } if (simdSize == 32) { intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add; - tmp = fgMakeMultiUse(&op1, clsHnd); - op1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp = fgMakeMultiUse(&op1); + op1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); - tmp = gtNewSimdGetLowerNode(TYP_SIMD16, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic); + tmp = gtNewSimdGetLowerNode(TYP_SIMD16, tmp, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16); } - return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, 16, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, 16); #elif defined(TARGET_ARM64) switch (simdBaseType) { @@ -23500,9 +23261,8 @@ GenTree* Compiler::gtNewSimdSumNode( case TYP_SHORT: case TYP_USHORT: { - tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic); + tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8); } case TYP_INT: @@ -23510,16 +23270,14 @@ GenTree* Compiler::gtNewSimdSumNode( { if (simdSize == 8) { - tmp = fgMakeMultiUse(&op1, clsHnd); - tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + tmp = fgMakeMultiUse(&op1); + tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize); } else { - tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16, - isSimdAsHWIntrinsic); + tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16); } - return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8); } case TYP_FLOAT: @@ -23527,7 +23285,7 @@ GenTree* Compiler::gtNewSimdSumNode( if (simdSize == 8) { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + simdSize); } else { @@ -23536,13 +23294,12 @@ GenTree* Compiler::gtNewSimdSumNode( for (int i = 0; i < haddCount; i++) { - tmp = fgMakeMultiUse(&op1, clsHnd); + tmp = fgMakeMultiUse(&op1); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + simdSize); } } - return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize); } case TYP_DOUBLE: @@ -23552,9 +23309,9 @@ GenTree* Compiler::gtNewSimdSumNode( if (simdSize == 16) { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, - simdSize, isSimdAsHWIntrinsic); + simdSize); } - return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8); } default: { @@ -23566,12 +23323,8 @@ GenTree* Compiler::gtNewSimdSumNode( #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op, - var_types type, - GenTree* op1, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdUnOpNode( + genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23600,7 +23353,7 @@ GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op, op2 = gtNewZeroConNode(type); // Zero - op1 - return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize); } case GT_NOT: @@ -23615,7 +23368,7 @@ GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op, } op2 = gtNewAllBitsSetConNode(type); - return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize); } #elif defined(TARGET_ARM64) case GT_NEG: @@ -23635,19 +23388,19 @@ GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op, intrinsic = NI_AdvSimd_Negate; } - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } else { // Zero - op1 op2 = gtNewZeroConNode(type); - return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize); } } case GT_NOT: { - return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize); } #else #error Unsupported platform @@ -23660,8 +23413,7 @@ GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op, } } -GenTree* Compiler::gtNewSimdWidenLowerNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23683,7 +23435,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( { assert(IsBaselineVector512IsaSupportedDebugOnly()); - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseJitType, simdSize); switch (simdBaseType) { @@ -23736,14 +23488,14 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); } else if (simdSize == 32) { assert(compIsaSupportedDebugOnly(InstructionSet_AVX)); assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2)); - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); switch (simdBaseType) { @@ -23781,7 +23533,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); } else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41)) { @@ -23821,7 +23573,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } else { @@ -23829,21 +23581,17 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( if (varTypeIsSigned(simdBaseType)) { - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); - - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize); } - return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize); } #elif defined(TARGET_ARM64) if (simdSize == 16) { - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize); } else { @@ -23866,11 +23614,11 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( } assert(intrinsic != NI_Illegal); - tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8); if (simdSize == 8) { - tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, tmp1, simdBaseJitType, 16, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, tmp1, simdBaseJitType, 16); } return tmp1; @@ -23879,8 +23627,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdWidenUpperNode( - var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize) { assert(IsBaselineSimdIsaSupportedDebugOnly()); @@ -23902,7 +23649,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( { assert(IsBaselineVector512IsaSupportedDebugOnly()); - tmp1 = gtNewSimdGetUpperNode(TYP_SIMD32, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdGetUpperNode(TYP_SIMD32, op1, simdBaseJitType, simdSize); switch (simdBaseType) { @@ -23955,14 +23702,14 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); } else if (simdSize == 32) { assert(compIsaSupportedDebugOnly(InstructionSet_AVX)); assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2)); - tmp1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); switch (simdBaseType) { @@ -24000,24 +23747,21 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); } else if (varTypeIsFloating(simdBaseType)) { assert(simdBaseType == TYP_FLOAT); - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); - return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize); + return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize); } else if (compOpportunisticallyDependsOn(InstructionSet_SSE41)) { tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane, - simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + simdBaseJitType, simdSize); switch (simdBaseType) { @@ -24049,7 +23793,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize); } else { @@ -24057,16 +23801,12 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( if (varTypeIsSigned(simdBaseType)) { - CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); - - GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd); + GenTree* op1Dup = fgMakeMultiUse(&op1); - tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize); } - return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize); } #elif defined(TARGET_ARM64) GenTree* zero; @@ -24088,7 +23828,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( } assert(intrinsic != NI_Illegal); - return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize); } else { @@ -24111,22 +23851,17 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( assert(intrinsic != NI_Illegal); - tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize); zero = gtNewZeroConNode(TYP_SIMD16); - return gtNewSimdGetUpperNode(TYP_SIMD8, tmp1, simdBaseJitType, 16, isSimdAsHWIntrinsic); + return gtNewSimdGetUpperNode(TYP_SIMD8, tmp1, simdBaseJitType, 16); } #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 } -GenTree* Compiler::gtNewSimdWithElementNode(var_types type, - GenTree* op1, - GenTree* op2, - GenTree* op3, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdWithElementNode( + var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize) { NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); @@ -24178,8 +23913,7 @@ GenTree* Compiler::gtNewSimdWithElementNode(var_types type, case TYP_DOUBLE: if (simdSize == 8) { - return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize); } break; @@ -24201,7 +23935,7 @@ GenTree* Compiler::gtNewSimdWithElementNode(var_types type, #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize); } #ifdef TARGET_ARM64 @@ -24236,12 +23970,8 @@ GenTreeFieldList* Compiler::gtConvertTableOpToFieldList(GenTree* op, unsigned fi } #endif // TARGET_ARM64 -GenTree* Compiler::gtNewSimdWithLowerNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdWithLowerNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); @@ -24266,15 +23996,11 @@ GenTree* Compiler::gtNewSimdWithLowerNode(var_types type, #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize); } -GenTree* Compiler::gtNewSimdWithUpperNode(var_types type, - GenTree* op1, - GenTree* op2, - CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) +GenTree* Compiler::gtNewSimdWithUpperNode( + var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); @@ -24299,21 +24025,21 @@ GenTree* Compiler::gtNewSimdWithUpperNode(var_types type, #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 - return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID) { - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID) { SetOpLclRelatedToSIMDIntrinsic(op1); - return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, - CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1); + return new (this, GT_HWINTRINSIC) + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, @@ -24325,8 +24051,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, SetOpLclRelatedToSIMDIntrinsic(op2); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, - /* isSimdAsHWIntrinsic */ false, op1, op2); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1, op2); } GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode( @@ -24337,8 +24062,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode( SetOpLclRelatedToSIMDIntrinsic(op3); return new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, - /* isSimdAsHWIntrinsic */ false, op1, op2, op3); + GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1, op2, op3); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 01cdc178f92f09..fcd83c40969be8 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -571,9 +571,6 @@ enum GenTreeFlags : unsigned int GTF_MDARRLEN_NONFAULTING = 0x20000000, // GT_MDARR_LENGTH -- An MD array length operation that cannot fault. Same as GT_IND_NONFAULTING. GTF_MDARRLOWERBOUND_NONFAULTING = 0x20000000, // GT_MDARR_LOWER_BOUND -- An MD array lower bound operation that cannot fault. Same as GT_IND_NONFAULTING. - - GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD - // rather than from gtGetStructHandleForHWSIMD. }; inline constexpr GenTreeFlags operator ~(GenTreeFlags a) @@ -6161,11 +6158,10 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, - unsigned simdSize, - bool isSimdAsHWIntrinsic) + unsigned simdSize) : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize) { - Initialize(hwIntrinsicID, isSimdAsHWIntrinsic); + Initialize(hwIntrinsicID); } template @@ -6174,11 +6170,10 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, - bool isSimdAsHWIntrinsic, Operands... operands) : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...) { - Initialize(hwIntrinsicID, isSimdAsHWIntrinsic); + Initialize(hwIntrinsicID); } #if DEBUGGABLE_GENTREE @@ -6191,11 +6186,6 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic bool OperIsMemoryStore(GenTree** pAddr = nullptr) const; bool OperIsMemoryLoadOrStore() const; - bool IsSimdAsHWIntrinsic() const - { - return (gtFlags & GTF_SIMDASHW_OP) != 0; - } - unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3); ClassLayout* GetLayout(Compiler* compiler) const; @@ -6289,7 +6279,7 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic private: void SetHWIntrinsicId(NamedIntrinsic intrinsicId); - void Initialize(NamedIntrinsic intrinsicId, bool isSimdAsHWIntrinsic) + void Initialize(NamedIntrinsic intrinsicId) { SetHWIntrinsicId(intrinsicId); @@ -6305,11 +6295,6 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic gtFlags |= GTF_ASG; } } - - if (isSimdAsHWIntrinsic) - { - gtFlags |= GTF_SIMDASHW_OP; - } } }; #endif // FEATURE_HW_INTRINSICS diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index 95b61635f87421..5ad88df28b9bd5 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -343,7 +343,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -357,8 +357,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -370,8 +369,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -424,8 +422,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(retType == TYP_SIMD8); op1 = impSIMDPopStack(TYP_SIMD16); - retNode = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize); break; } @@ -467,13 +464,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* idx = gtNewIconNode(2, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16, - /* isSimdAsHWIntrinsic */ false); + op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); idx = gtNewIconNode(3, TYP_INT); zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); break; } @@ -495,8 +490,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* idx = gtNewIconNode(3, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); break; } @@ -531,8 +525,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -546,8 +539,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -558,7 +550,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(varTypeIsFloating(simdBaseType)); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -571,8 +563,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize); break; } @@ -874,8 +865,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -891,8 +881,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -905,8 +894,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -921,8 +909,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -935,8 +922,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1042,8 +1028,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op3 = vecCon3; op2 = vecCon2; - op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_And, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_And, simdBaseJitType, simdSize); NamedIntrinsic shiftIntrinsic = NI_AdvSimd_ShiftLogical; @@ -1052,33 +1037,24 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, shiftIntrinsic = NI_AdvSimd_ShiftLogicalScalar; } - op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op3, shiftIntrinsic, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op3, shiftIntrinsic, simdBaseJitType, simdSize); if (varTypeIsByte(simdBaseType) && (simdSize == 16)) { - CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSimdOrHW(TYP_SIMD16, simdBaseJitType); - - op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); - op1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); - op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8, - /* isSimdAsHWIntrinsic */ false); - op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, - /* isSimdAsHWIntrinsic */ false); + op1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize); + op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8); + op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8); op1 = gtNewCastNode(TYP_INT, op1, /* isUnsigned */ true, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_SIMD16); ssize_t index = 8 / genTypeSize(simdBaseType); - op2 = gtNewSimdGetUpperNode(TYP_SIMD8, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); - op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8, - /* isSimdAsHWIntrinsic */ false); - op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8, - /* isSimdAsHWIntrinsic */ false); + op2 = gtNewSimdGetUpperNode(TYP_SIMD8, op2, simdBaseJitType, simdSize); + op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8); + op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8); op2 = gtNewCastNode(TYP_INT, op2, /* isUnsigned */ true, TYP_INT); op2 = gtNewOperNode(GT_LSH, TYP_INT, op2, gtNewIconNode(8)); @@ -1090,27 +1066,24 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { if ((simdSize == 8) && ((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT))) { - CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSimdOrHW(simdType, simdBaseJitType); - - op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType, - simdSize, /* isSimdAsHWIntrinsic */ false); + simdSize); } else { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, - simdSize, /* isSimdAsHWIntrinsic */ false); + simdSize); } } else if (simdSize == 16) { op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType, - simdSize, /* isSimdAsHWIntrinsic */ false); + simdSize); } - retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8); if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) { @@ -1127,7 +1100,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(varTypeIsFloating(simdBaseType)); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1164,8 +1137,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - const bool isSimdAsHWIntrinsic = true; - retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1174,7 +1146,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1183,7 +1155,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 1); op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1195,8 +1167,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1209,8 +1180,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1223,8 +1193,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1236,8 +1205,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1250,8 +1218,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1264,8 +1231,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1277,8 +1243,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1291,8 +1256,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1305,8 +1269,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1318,8 +1281,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1332,8 +1294,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1346,8 +1307,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1382,7 +1342,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); } - retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1407,8 +1367,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1433,8 +1392,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1446,7 +1404,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1458,7 +1416,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1486,8 +1444,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1499,8 +1456,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1510,9 +1466,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector128_op_UnaryNegation: { assert(sig->numArgs == 1); - op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + op1 = impSIMDPopStack(retType); + retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize); break; } @@ -1522,9 +1477,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector128_op_OnesComplement: { assert(sig->numArgs == 1); - op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + op1 = impSIMDPopStack(retType); + retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize); break; } @@ -1537,8 +1491,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1560,8 +1513,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1573,8 +1525,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1587,8 +1538,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1600,8 +1550,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1624,8 +1573,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1638,7 +1586,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { op1 = impSIMDPopStack(retType); - retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize); } break; } @@ -1659,7 +1607,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); break; } @@ -1703,7 +1651,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize); break; } @@ -1736,7 +1684,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize); break; } @@ -1769,8 +1717,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize); break; } @@ -1781,7 +1728,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, var_types simdType = getSIMDTypeForSize(simdSize); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1792,7 +1739,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(retType); - retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1803,7 +1750,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(retType); - retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1833,8 +1780,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, impPopStack(); // pop the indexOp that we already have. GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize); break; } @@ -1844,8 +1790,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(TYP_SIMD8); op1 = impSIMDPopStack(TYP_SIMD16); - retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1855,8 +1800,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(TYP_SIMD8); op1 = impSIMDPopStack(TYP_SIMD16); - retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1870,8 +1814,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize); break; } diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index 961c7741ade211..e9cef9eb2c2e69 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -612,7 +612,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, compExactlyDependsOn(InstructionSet_AVX2)) { op1 = impSIMDPopStack(retType); - retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize); } break; } @@ -631,8 +631,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -649,8 +648,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -775,13 +773,11 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* idx = gtNewIconNode(2, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16, - /* isSimdAsHWIntrinsic */ false); + op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); idx = gtNewIconNode(3, TYP_INT); zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); break; } @@ -803,8 +799,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* idx = gtNewIconNode(3, TYP_INT); GenTree* zero = gtNewZeroConNode(TYP_FLOAT); - retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16); break; } @@ -936,8 +931,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -953,8 +947,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -971,7 +964,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -984,8 +977,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize); break; } @@ -1302,8 +1294,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1341,8 +1332,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = - gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1356,8 +1346,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1376,8 +1365,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1394,8 +1382,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1413,8 +1400,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AVX512F_MoveMaskSpecial, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AVX512F_MoveMaskSpecial, simdBaseJitType, simdSize); } break; } @@ -1498,8 +1484,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, simdType = TYP_SIMD16; - op1 = gtNewSimdGetLowerNode(simdType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + op1 = gtNewSimdGetLowerNode(simdType, op1, simdBaseJitType, simdSize); simdSize = 16; } @@ -1535,8 +1520,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(moveMaskIntrinsic != NI_Illegal); assert(op1 != nullptr); - retNode = gtNewSimdHWIntrinsicNode(retType, op1, moveMaskIntrinsic, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdHWIntrinsicNode(retType, op1, moveMaskIntrinsic, simdBaseJitType, simdSize); } break; } @@ -1554,7 +1538,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(retType); - retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1619,8 +1603,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, GenTree* op2 = impPopStack().val; GenTree* op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1634,8 +1617,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1652,8 +1634,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1670,8 +1651,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1686,8 +1666,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1704,8 +1683,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1722,8 +1700,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1738,8 +1715,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1756,8 +1732,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1774,8 +1749,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1790,8 +1764,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1808,8 +1781,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1826,8 +1798,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1867,7 +1838,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); } - retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1885,8 +1856,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1904,8 +1874,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -1919,8 +1888,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1935,8 +1903,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -1977,8 +1944,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); op1 = getArgForHWIntrinsic(argType, argClass); - retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -1995,8 +1961,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2010,9 +1975,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if ((simdSize != 32) || varTypeIsFloating(simdBaseType) || compExactlyDependsOn(InstructionSet_AVX2)) { - op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + op1 = impSIMDPopStack(retType); + retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize); } break; } @@ -2025,9 +1989,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector512_op_OnesComplement: { assert(sig->numArgs == 1); - op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + op1 = impSIMDPopStack(retType); + retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize); break; } @@ -2043,8 +2006,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2071,8 +2033,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2095,8 +2056,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2121,8 +2081,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2145,8 +2104,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impPopStack().val; op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2227,8 +2185,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize); } break; } @@ -2241,7 +2198,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, if (varTypeIsFloating(simdBaseType)) { op1 = impSIMDPopStack(retType); - retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize); } break; } @@ -2265,7 +2222,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize); break; } @@ -2311,7 +2268,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize); break; } @@ -2337,7 +2294,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize); break; } @@ -2363,8 +2320,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize); break; } @@ -2399,7 +2355,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, } op1 = impSIMDPopStack(simdType); - retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -2439,7 +2395,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(compIsaSupportedDebugOnly(InstructionSet_AVX)); op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -2449,7 +2405,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(compIsaSupportedDebugOnly(InstructionSet_AVX)); op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -2459,7 +2415,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(IsBaselineVector512IsaSupportedDebugOnly()); op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -2469,7 +2425,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(IsBaselineVector512IsaSupportedDebugOnly()); op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize); break; } @@ -2498,8 +2454,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize); } break; } @@ -2516,8 +2471,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = impSIMDPopStack(retType); - retNode = - gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize); } break; } @@ -2580,8 +2534,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, impPopStack(); // Pop the indexOp now that we know its valid GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize)); - retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize); break; } @@ -2592,8 +2545,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(TYP_SIMD16); op1 = impSIMDPopStack(TYP_SIMD32); - retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -2604,8 +2556,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(TYP_SIMD16); op1 = impSIMDPopStack(TYP_SIMD32); - retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -2616,8 +2567,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(TYP_SIMD32); op1 = impSIMDPopStack(TYP_SIMD64); - retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -2628,8 +2578,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(TYP_SIMD32); op1 = impSIMDPopStack(TYP_SIMD64); - retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -2645,8 +2594,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op2 = impSIMDPopStack(retType); op1 = impSIMDPopStack(retType); - retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ false); + retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize); break; } @@ -2865,11 +2813,8 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, op1 = getArgForHWIntrinsic(argType, argClass); SetOpLclRelatedToSIMDIntrinsic(op1); - const bool isSimdAsHWIntrinsic = false; - - retNode = new (this, GT_HWINTRINSIC) - GenTreeHWIntrinsic(retType, getAllocator(CMK_ASTNode), intrinsic, simdBaseJitType, simdSize, - isSimdAsHWIntrinsic, op1, op2, op3, op4, op5); + retNode = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(retType, getAllocator(CMK_ASTNode), intrinsic, + simdBaseJitType, simdSize, op1, op2, op3, op4, op5); retNode->AsHWIntrinsic()->SetAuxiliaryJitType(indexBaseJitType); break; } diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 375c12035875b2..7ec92202b353b2 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -265,14 +265,14 @@ GenTree* Compiler::impExpandHalfConstEqualsSIMD( { // Apply ASCII-only ToLowerCase mask (bitwise OR 0x20 for all a-Z chars) assert((toLowerVec1 != nullptr) && (toLowerVec2 != nullptr)); - vec1 = gtNewSimdBinOpNode(GT_OR, simdType, vec1, toLowerVec1, baseType, simdSize, false); - vec2 = gtNewSimdBinOpNode(GT_OR, simdType, vec2, toLowerVec2, baseType, simdSize, false); + vec1 = gtNewSimdBinOpNode(GT_OR, simdType, vec1, toLowerVec1, baseType, simdSize); + vec2 = gtNewSimdBinOpNode(GT_OR, simdType, vec2, toLowerVec2, baseType, simdSize); } // ((v1 ^ cns1) | (v2 ^ cns2)) == zero - GenTree* xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize, false); - GenTree* xor2 = gtNewSimdBinOpNode(GT_XOR, simdType, vec2, cnsVec2, baseType, simdSize, false); - GenTree* orr = gtNewSimdBinOpNode(GT_OR, simdType, xor1, xor2, baseType, simdSize, false); + GenTree* xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize); + GenTree* xor2 = gtNewSimdBinOpNode(GT_XOR, simdType, vec2, cnsVec2, baseType, simdSize); + GenTree* orr = gtNewSimdBinOpNode(GT_OR, simdType, xor1, xor2, baseType, simdSize); return gtNewSimdHWIntrinsicNode(TYP_BOOL, useSingleVector ? xor1 : orr, zero, niEquals, baseType, simdSize); } #endif // defined(FEATURE_HW_INTRINSICS) @@ -865,7 +865,8 @@ GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* JITDUMP("Trying to unroll MemoryExtensions.Equals|SequenceEqual|StartsWith(op1, \"%ws\")...\n", str) } - CORINFO_CLASS_HANDLE spanCls = gtGetStructHandle(spanObj); + CORINFO_CLASS_HANDLE spanCls; + info.compCompHnd->getArgType(sig, sig->args, &spanCls); CORINFO_FIELD_HANDLE pointerHnd = info.compCompHnd->getFieldInClass(spanCls, 0); CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(spanCls, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 27a517211a6e38..715b8b0211c997 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -1162,16 +1162,14 @@ class LocalAddressVisitor final : public GenTreeVisitor { GenTree* indexNode = m_compiler->gtNewIconNode(val.Offset() / genTypeSize(elementType)); hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode, CORINFO_TYPE_FLOAT, - genTypeSize(varDsc), - /* isSimdAsHWIntrinsic */ true); + genTypeSize(varDsc)); } else { assert(elementType == TYP_SIMD12); assert(genTypeSize(varDsc) == 16); - hwiNode = - m_compiler->gtNewSimdHWIntrinsicNode(elementType, lclNode, NI_Vector128_AsVector3, - CORINFO_TYPE_FLOAT, 16, /* isSimdAsHWIntrinsic */ true); + hwiNode = m_compiler->gtNewSimdHWIntrinsicNode(elementType, lclNode, NI_Vector128_AsVector3, + CORINFO_TYPE_FLOAT, 16); } indir = hwiNode; @@ -1192,9 +1190,9 @@ class LocalAddressVisitor final : public GenTreeVisitor if (elementType == TYP_FLOAT) { GenTree* indexNode = m_compiler->gtNewIconNode(val.Offset() / genTypeSize(elementType)); - hwiNode = m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode, - elementNode, CORINFO_TYPE_FLOAT, genTypeSize(varDsc), - /* isSimdAsHWIntrinsic */ true); + hwiNode = + m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode, elementNode, + CORINFO_TYPE_FLOAT, genTypeSize(varDsc)); } else { @@ -1204,9 +1202,8 @@ class LocalAddressVisitor final : public GenTreeVisitor // We inverse the operands here and take elementNode as the main value and simdLclNode[3] as the // new value. This gives us a new TYP_SIMD16 with all elements in the right spots GenTree* indexNode = m_compiler->gtNewIconNode(3, TYP_INT); - hwiNode = - m_compiler->gtNewSimdWithElementNode(TYP_SIMD16, elementNode, indexNode, simdLclNode, - CORINFO_TYPE_FLOAT, 16, /* isSimdAsHWIntrinsic */ true); + hwiNode = m_compiler->gtNewSimdWithElementNode(TYP_SIMD16, elementNode, indexNode, simdLclNode, + CORINFO_TYPE_FLOAT, 16); } user->AsOp()->gtOp2 = hwiNode; diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index cc3732238bf1ab..7e21041683ff64 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -1883,11 +1883,10 @@ GenTree* Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cm // GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node) { - var_types simdType = node->gtType; - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - var_types simdBaseType = node->GetSimdBaseType(); - unsigned simdSize = node->GetSimdSize(); - bool isSimdAsHWIntrinsic = node->IsSimdAsHWIntrinsic(); + var_types simdType = node->gtType; + CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); + var_types simdBaseType = node->GetSimdBaseType(); + unsigned simdSize = node->GetSimdSize(); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); @@ -1966,21 +1965,21 @@ GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node) // ... // tmp2 = op1 & op2 // ... - tmp2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseJitType, simdSize); BlockRange().InsertAfter(op2, tmp2); LowerNode(tmp2); // ... // tmp3 = op3 & ~tmp1 // ... - tmp3 = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, tmp1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp3 = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, tmp1, simdBaseJitType, simdSize); BlockRange().InsertAfter(op3, tmp3); LowerNode(tmp3); // ... // tmp4 = tmp2 | tmp3 // ... - tmp4 = comp->gtNewSimdBinOpNode(GT_OR, simdType, tmp2, tmp3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); + tmp4 = comp->gtNewSimdBinOpNode(GT_OR, simdType, tmp2, tmp3, simdBaseJitType, simdSize); BlockRange().InsertBefore(node, tmp4); LIR::Use use; @@ -2323,7 +2322,7 @@ GenTree* Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) // var tmp3 = tmp2.ToVector256Unsafe(); // return tmp3.WithUpper(tmp1); - tmp1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, op1, simdBaseJitType, 16, false); + tmp1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, op1, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); node->Op(1) = tmp1; @@ -3292,7 +3291,7 @@ void Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) imm8 -= count / 2; - tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } @@ -3305,7 +3304,7 @@ void Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) // ... // op1 = op1.GetLower(); - tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } @@ -3524,7 +3523,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) imm8 -= count / 2; - tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } @@ -3539,7 +3538,7 @@ GenTree* Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) // ... // op1 = op1.GetLower(); - tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } @@ -3862,15 +3861,15 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); - tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize, false); + tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); - tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize); BlockRange().InsertAfter(tmp3, tmp1); LowerNode(tmp1); - tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16, false); + tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16); BlockRange().InsertAfter(tmp1, tmp2); LowerNode(tmp2); @@ -4092,7 +4091,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) memcpy(&vecCon1->gtSimdVal, &simd16Val, sizeof(simd16_t)); BlockRange().InsertAfter(op1, vecCon1); - op1 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon1, simdBaseJitType, simdSize, false); + op1 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon1, simdBaseJitType, simdSize); BlockRange().InsertAfter(vecCon1, op1); LowerNode(vecCon1); @@ -4120,7 +4119,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) memcpy(&vecCon2->gtSimdVal, &simd16Val, sizeof(simd16_t)); BlockRange().InsertAfter(op2, vecCon2); - op2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op2, vecCon2, simdBaseJitType, simdSize, false); + op2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op2, vecCon2, simdBaseJitType, simdSize); BlockRange().InsertAfter(vecCon2, op2); LowerNode(vecCon2); @@ -4138,7 +4137,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // var tmp1 = Isa.Multiply(op1, op2); // ... - tmp1 = comp->gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseJitType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); @@ -4343,7 +4342,7 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) // tmp1 = Isa.Add(tmp1, tmp2); // ... - tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize); } BlockRange().InsertAfter(tmp2, tmp1); @@ -4384,15 +4383,15 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); - tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize, false); + tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); - tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize, false); + tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize); BlockRange().InsertAfter(tmp3, tmp1); LowerNode(tmp1); - tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16, false); + tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16); BlockRange().InsertAfter(tmp1, tmp2); LowerNode(tmp2); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 6361cd25a0cf19..34ff0903e7d020 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -1859,21 +1859,11 @@ void CallArgs::SetNeedsTemp(CallArg* arg) // 'TempInfo' data that contains the GT_ASG and GT_LCL_VAR nodes for assignment // and variable load respectively. // -TempInfo Compiler::fgMakeTemp(GenTree* rhs, CORINFO_CLASS_HANDLE structType /*= nullptr*/) +TempInfo Compiler::fgMakeTemp(GenTree* rhs) { unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgMakeTemp is creating a new local variable")); - - if (varTypeIsStruct(rhs)) - { - assert(structType != nullptr); - lvaSetStruct(lclNum, structType, false); - } - - // If rhs->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. - // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use type of rhs for - // setting type of lcl vars created. - GenTree* asg = gtNewTempAssign(lclNum, rhs); - GenTree* load = gtNewLclvNode(lclNum, genActualType(rhs)); + GenTree* asg = gtNewTempAssign(lclNum, rhs); + GenTree* load = gtNewLclvNode(lclNum, genActualType(rhs)); TempInfo tempInfo{}; tempInfo.asg = asg; @@ -1890,8 +1880,6 @@ TempInfo Compiler::fgMakeTemp(GenTree* rhs, CORINFO_CLASS_HANDLE structType /*= // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // -// structType - value type handle if the temp created is of TYP_STRUCT. -// // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // @@ -1901,7 +1889,7 @@ TempInfo Compiler::fgMakeTemp(GenTree* rhs, CORINFO_CLASS_HANDLE structType /*= // original use and new use is possible. Otherwise, fgInsertCommaFormTemp // should be used directly. // -GenTree* Compiler::fgMakeMultiUse(GenTree** pOp, CORINFO_CLASS_HANDLE structType /*= nullptr*/) +GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; @@ -1910,7 +1898,7 @@ GenTree* Compiler::fgMakeMultiUse(GenTree** pOp, CORINFO_CLASS_HANDLE structType return gtCloneExpr(tree); } - return fgInsertCommaFormTemp(pOp, structType); + return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ @@ -1921,17 +1909,15 @@ GenTree* Compiler::fgMakeMultiUse(GenTree** pOp, CORINFO_CLASS_HANDLE structType // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // -// structType - value type handle if the temp created is of TYP_STRUCT. -// // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // -GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) +GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree) { GenTree* subTree = *ppTree; - TempInfo tempInfo = fgMakeTemp(subTree, structType); + TempInfo tempInfo = fgMakeTemp(subTree); GenTree* asg = tempInfo.asg; GenTree* load = tempInfo.load; @@ -10908,9 +10894,7 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) if (sqrt != nullptr) { - CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); - node = gtNewSimdSqrtNode(simdType, hwop1, simdBaseJitType, simdSize, node->IsSimdAsHWIntrinsic()) - ->AsHWIntrinsic(); + node = gtNewSimdSqrtNode(simdType, hwop1, node->GetSimdBaseJitType(), simdSize)->AsHWIntrinsic(); DEBUG_DESTROY_NODE(sqrt); } else diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index e7775e32a6dbaf..3b739b1f38853b 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -301,11 +301,6 @@ CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeH JITDUMP(" Found Vector<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType))); size = getSIMDVectorRegisterByteLength(); - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - m_simdHandleCache->VectorTHandles[handleIndex] = typeHnd; break; } @@ -347,11 +342,6 @@ CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeH } JITDUMP(" Found Vector64<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType))); - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - m_simdHandleCache->Vector64THandles[handleIndex] = typeHnd; break; } #endif // TARGET_ARM64 @@ -372,11 +362,6 @@ CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeH } JITDUMP(" Found Vector128<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType))); - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - m_simdHandleCache->Vector128THandles[handleIndex] = typeHnd; break; } @@ -403,11 +388,6 @@ CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeH } JITDUMP(" Found Vector256<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType))); - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - m_simdHandleCache->Vector256THandles[handleIndex] = typeHnd; break; } @@ -433,11 +413,6 @@ CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeH } JITDUMP(" Found Vector512<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType))); - - uint32_t handleIndex = static_cast(simdBaseJitType - CORINFO_TYPE_BYTE); - assert(handleIndex < SIMDHandlesCache::SupportedTypeCount); - - m_simdHandleCache->Vector512THandles[handleIndex] = typeHnd; break; } #endif // TARGET_XARCH @@ -459,34 +434,6 @@ CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeH { assert(size == info.compCompHnd->getClassSize(typeHnd)); setUsesSIMDTypes(true); - - CORINFO_CLASS_HANDLE* pCanonicalHnd = nullptr; - - switch (size) - { - case 8: - pCanonicalHnd = &m_simdHandleCache->CanonicalSimd8Handle; - break; - case 12: - // There is no need for a canonical SIMD12 handle because it is always Vector3. - break; - case 16: - pCanonicalHnd = &m_simdHandleCache->CanonicalSimd16Handle; - break; - case 32: - pCanonicalHnd = &m_simdHandleCache->CanonicalSimd32Handle; - break; - case 64: - pCanonicalHnd = &m_simdHandleCache->CanonicalSimd64Handle; - break; - default: - unreached(); - } - - if ((pCanonicalHnd != nullptr) && (*pCanonicalHnd == NO_CLASS_HANDLE)) - { - *pCanonicalHnd = typeHnd; - } } return simdBaseJitType; diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index 836cd0a209512b..45f6c02fc09299 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -922,7 +922,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Abs: #endif // TARGET_XARCH { - return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize); } case NI_VectorT128_Ceiling: @@ -930,7 +930,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Ceiling: #endif // TARGET_XARCH { - return gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize); } case NI_Quaternion_Conjugate: @@ -942,8 +942,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, vecCon->gtSimdVal.f32[2] = -1.0f; vecCon->gtSimdVal.f32[3] = +1.0f; - return gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize); } case NI_VectorT128_Floor: @@ -951,7 +950,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Floor: #endif // TARGET_XARCH { - return gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize); } case NI_Quaternion_Inverse: @@ -971,14 +970,11 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, vecCon->gtSimdVal.f32[2] = -1.0f; vecCon->gtSimdVal.f32[3] = +1.0f; - GenTree* conjugate = gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + GenTree* conjugate = gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize); - op1 = gtNewSimdDotProdNode(retType, clonedOp1, clonedOp2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdDotProdNode(retType, clonedOp1, clonedOp2, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_DIV, retType, conjugate, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_DIV, retType, conjugate, op1, simdBaseJitType, simdSize); } case NI_Quaternion_Length: @@ -990,8 +986,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector length")); - op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize); return new (this, GT_INTRINSIC) GenTreeIntrinsic(simdBaseType, op1, NI_System_Math_Sqrt, NO_METHOD_HANDLE); @@ -1006,8 +1001,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector length squared")); - return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize); } case NI_VectorT128_Load: @@ -1023,7 +1017,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize); } case NI_VectorT128_LoadAligned: @@ -1037,8 +1031,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - return gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize); } case NI_VectorT128_LoadAlignedNonTemporal: @@ -1052,8 +1045,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op1 = op1->gtGetOp1(); } - return gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize); } case NI_Quaternion_Negate: @@ -1071,8 +1063,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_UnaryNegation: #endif // TARGET_XARCH { - return gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize); } case NI_Quaternion_Normalize: @@ -1088,13 +1079,11 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, clonedOp1 = impCloneExpr(clonedOp1, &clonedOp2, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector normalize (2)")); - op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize); - op1 = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize); - return gtNewSimdBinOpNode(GT_DIV, retType, clonedOp2, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_DIV, retType, clonedOp2, op1, simdBaseJitType, simdSize); } case NI_VectorT128_OnesComplement: @@ -1104,8 +1093,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_OnesComplement: #endif // TARGET_XARCH { - return gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize); } case NI_Vector2_Sqrt: @@ -1116,7 +1104,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Sqrt: #endif // TARGET_XARCH { - return gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize); } case NI_VectorT128_Sum: @@ -1124,13 +1112,12 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Sum: #endif // TARGET_XARCH { - return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize); } case NI_VectorT128_ToScalar: { - return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize); } case NI_VectorT128_op_UnaryPlus: @@ -1146,8 +1133,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_WidenLower: #endif // TARGET_XARCH { - return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize); } case NI_VectorT128_WidenUpper: @@ -1155,8 +1141,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_WidenUpper: #endif // TARGET_XARCH { - return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize); } #if defined(TARGET_XARCH) @@ -1166,8 +1151,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_FLOAT); NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation : NI_SSE2_ConvertToVector128Int32WithTruncation; - return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize); } case NI_VectorT128_ConvertToSingle: @@ -1176,56 +1160,54 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, assert(simdBaseType == TYP_INT); NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Single : NI_SSE2_ConvertToVector128Single; - return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize); } case NI_VectorT256_ToScalar: { - return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector256_ToScalar, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector256_ToScalar, simdBaseJitType, simdSize); } #elif defined(TARGET_ARM64) case NI_VectorT128_ConvertToDouble: { assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG)); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToDouble, simdBaseJitType, - simdSize, /* isSimdAsHWIntrinsic */ true); + simdSize); } case NI_VectorT128_ConvertToInt32: { assert(simdBaseType == TYP_FLOAT); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToInt32RoundToZero, simdBaseJitType, - simdSize, /* isSimdAsHWIntrinsic */ true); + simdSize); } case NI_VectorT128_ConvertToInt64: { assert(simdBaseType == TYP_DOUBLE); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToInt64RoundToZero, - simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + simdBaseJitType, simdSize); } case NI_VectorT128_ConvertToSingle: { assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)); - return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, + simdSize); } case NI_VectorT128_ConvertToUInt32: { assert(simdBaseType == TYP_FLOAT); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToUInt32RoundToZero, - simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + simdBaseJitType, simdSize); } case NI_VectorT128_ConvertToUInt64: { assert(simdBaseType == TYP_DOUBLE); return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero, - simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + simdBaseJitType, simdSize); } #else #error Unsupported platform @@ -1288,8 +1270,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_Addition: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_AndNot: @@ -1297,8 +1278,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_AndNot: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_BitwiseAnd: @@ -1308,8 +1288,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_BitwiseAnd: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_BitwiseOr: @@ -1319,8 +1298,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_BitwiseOr: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize); } case NI_Vector2_CreateBroadcast: @@ -1334,8 +1312,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, assert(retType == TYP_VOID); copyBlkDst = op1; - copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize); break; } @@ -1353,15 +1330,13 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_Vector3_Distance: case NI_Vector4_Distance: { - op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize); GenTree* clonedOp1; op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone diff for vector distance")); - op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize); return new (this, GT_INTRINSIC) GenTreeIntrinsic(retType, op1, NI_System_Math_Sqrt, NO_METHOD_HANDLE); @@ -1371,15 +1346,13 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_Vector3_DistanceSquared: case NI_Vector4_DistanceSquared: { - op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize); GenTree* clonedOp1; op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone diff for vector distance squared")); - return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize); } case NI_Quaternion_Divide: @@ -1396,8 +1369,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_Division: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize); } case NI_Plane_Dot: @@ -1410,8 +1382,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Dot: #endif // TARGET_XARCH { - return gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_Equals: @@ -1419,8 +1390,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Equals: #endif // TARGET_XARCH { - return gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); } case NI_Plane_op_Equality: @@ -1435,8 +1405,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_Equality: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_EqualsAny: @@ -1444,8 +1413,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_EqualsAny: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_Xor: @@ -1455,8 +1423,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_ExclusiveOr: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize); } case NI_Quaternion_get_Item: @@ -1470,8 +1437,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GetElement: #endif // TARGET_XARCH { - return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_GreaterThan: @@ -1479,8 +1445,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GreaterThan: #endif // TARGET_XARCH { - return gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_GreaterThanAll: @@ -1488,8 +1453,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GreaterThanAll: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_GreaterThanAny: @@ -1497,8 +1461,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GreaterThanAny: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_GreaterThanOrEqual: @@ -1506,8 +1469,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GreaterThanOrEqual: #endif // TARGET_XARCH { - return gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_GreaterThanOrEqualAll: @@ -1515,8 +1477,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GreaterThanOrEqualAll: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_GreaterThanOrEqualAny: @@ -1524,8 +1485,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_GreaterThanOrEqualAny: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_Plane_op_Inequality: @@ -1538,8 +1498,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_Inequality: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LessThan: @@ -1547,8 +1506,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_LessThan: #endif // TARGET_XARCH { - return gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LessThanAll: @@ -1556,8 +1514,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_LessThanAll: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LessThanAny: @@ -1565,8 +1522,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_LessThanAny: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LessThanOrEqual: @@ -1574,8 +1530,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_LessThanOrEqual: #endif // TARGET_XARCH { - return gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LessThanOrEqualAll: @@ -1583,8 +1538,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_LessThanOrEqualAll: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LessThanOrEqualAny: @@ -1592,8 +1546,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_LessThanOrEqualAny: #endif // TARGET_XARCH { - return gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_LoadUnsafeIndex: @@ -1613,7 +1566,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op2 = gtNewOperNode(GT_MUL, op2->TypeGet(), op2, tmp); op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2); - return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize); } case NI_Vector2_Max: @@ -1624,8 +1577,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Max: #endif // TARGET_XARCH { - return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize); } case NI_Vector2_Min: @@ -1636,8 +1588,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Min: #endif // TARGET_XARCH { - return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize); } case NI_Quaternion_Multiply: @@ -1655,8 +1606,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_Multiply: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_Narrow: @@ -1664,8 +1614,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_Narrow: #endif // TARGET_XARCH { - return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_ShiftLeft: @@ -1675,8 +1624,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_LeftShift: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_ShiftRightArithmetic: @@ -1687,8 +1635,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, #endif // TARGET_XARCH { genTreeOps op = varTypeIsUnsigned(simdBaseType) ? GT_RSZ : GT_RSH; - return gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_ShiftRightLogical: @@ -1698,8 +1645,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_UnsignedRightShift: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_Store: @@ -1717,7 +1663,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op2 = op2->gtGetOp1(); } - return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize); } case NI_VectorT128_StoreAligned: @@ -1733,8 +1679,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op2 = op2->gtGetOp1(); } - return gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize); } case NI_VectorT128_StoreAlignedNonTemporal: @@ -1750,8 +1695,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op2 = op2->gtGetOp1(); } - return gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize); } case NI_Quaternion_Subtract: @@ -1769,8 +1713,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_op_Subtraction: #endif // TARGET_XARCH { - return gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize); } default: @@ -1828,10 +1771,8 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_Vector3_Clamp: case NI_Vector4_Clamp: { - GenTree* maxNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); - return gtNewSimdMinNode(retType, maxNode, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + GenTree* maxNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize); + return gtNewSimdMinNode(retType, maxNode, op3, simdBaseJitType, simdSize); } case NI_VectorT128_ConditionalSelect: @@ -1839,8 +1780,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_ConditionalSelect: #endif // TARGET_XARCH { - return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize); } case NI_Vector2_Lerp: @@ -1855,8 +1795,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, #if defined(TARGET_XARCH) // op3 = broadcast(op3) - op3 = gtNewSimdCreateBroadcastNode(retType, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op3 = gtNewSimdCreateBroadcastNode(retType, op3, simdBaseJitType, simdSize); #endif // TARGET_XARCH // clonedOp3 = op3 @@ -1867,8 +1806,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, #if defined(TARGET_XARCH) // op3 = 1.0f - op3 GenTree* oneCon = gtNewOneConNode(retType, simdBaseType); - op3 = gtNewSimdBinOpNode(GT_SUB, retType, oneCon, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op3 = gtNewSimdBinOpNode(GT_SUB, retType, oneCon, op3, simdBaseJitType, simdSize); #elif defined(TARGET_ARM64) // op3 = 1.0f - op3 GenTree* oneCon = gtNewOneConNode(simdBaseType); @@ -1878,16 +1816,13 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, #endif // op1 *= op3 - op1 = gtNewSimdBinOpNode(GT_MUL, retType, op1, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op1 = gtNewSimdBinOpNode(GT_MUL, retType, op1, op3, simdBaseJitType, simdSize); // op2 *= clonedOp3 - op2 = gtNewSimdBinOpNode(GT_MUL, retType, op2, clonedOp3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op2 = gtNewSimdBinOpNode(GT_MUL, retType, op2, clonedOp3, simdBaseJitType, simdSize); // return op1 + op2 - return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize); } case NI_VectorT128_StoreUnsafeIndex: @@ -1908,7 +1843,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, op3 = gtNewOperNode(GT_MUL, op3->TypeGet(), op3, tmp); op2 = gtNewOperNode(GT_ADD, op2->TypeGet(), op2, op3); - return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); + return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize); } case NI_Vector2_Create: @@ -1944,10 +1879,10 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, nodeBuilder.AddOperand(3, gtNewZeroConNode(TYP_FLOAT)); copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD8, std::move(nodeBuilder), NI_Vector128_Create, - simdBaseJitType, 16, /* isSimdAsHWIntrinsic */ true); + simdBaseJitType, 16); #elif defined(TARGET_ARM64) - copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, op3, NI_Vector64_Create, simdBaseJitType, - 8, /* isSimdAsHWIntrinsic */ true); + copyBlkSrc = + gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, op3, NI_Vector64_Create, simdBaseJitType, 8); #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 @@ -1988,8 +1923,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, else { GenTree* idx = gtNewIconNode((simdSize == 12) ? 2 : 3, TYP_INT); - copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize); } copyBlkDst = op1; @@ -2001,8 +1935,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT256_WithElement: #endif // TARGET_XARCH { - return gtNewSimdWithElementNode(retType, op1, op2, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + return gtNewSimdWithElementNode(retType, op1, op2, op3, simdBaseJitType, simdSize); } default: @@ -2085,7 +2018,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, nodeBuilder.AddOperand(3, gtNewZeroConNode(TYP_FLOAT)); copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD12, std::move(nodeBuilder), NI_Vector128_Create, - simdBaseJitType, 16, /* isSimdAsHWIntrinsic */ true); + simdBaseJitType, 16); } copyBlkDst = op1; @@ -2114,12 +2047,10 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, else { GenTree* idx = gtNewIconNode(2, TYP_INT); - op2 = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + op2 = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize); idx = gtNewIconNode(3, TYP_INT); - copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op4, simdBaseJitType, simdSize, - /* isSimdAsHWIntrinsic */ true); + copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op4, simdBaseJitType, simdSize); } copyBlkDst = op1; @@ -2214,7 +2145,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, nodeBuilder.AddOperand(3, op5); copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD16, std::move(nodeBuilder), NI_Vector128_Create, - simdBaseJitType, 16, /* isSimdAsHWIntrinsic */ true); + simdBaseJitType, 16); } copyBlkDst = op1;