From 6a867351edc538bd8067133efab42b2240c4e53a Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Thu, 14 Jul 2022 22:18:17 +0300 Subject: [PATCH 1/8] Add tests --- .../JitBlue/Runtime_72133/Runtime_72133.il | 300 ++++++++++++++++++ .../Runtime_72133/Runtime_72133.ilproj | 11 + 2 files changed, 311 insertions(+) create mode 100644 src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.il create mode 100644 src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.ilproj diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.il b/src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.il new file mode 100644 index 0000000000000..074337b2f50f6 --- /dev/null +++ b/src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.il @@ -0,0 +1,300 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +.assembly extern System.Runtime { } +.assembly extern System.Console { } +.assembly extern System.Numerics.Vectors { } + +.assembly Runtime_72133 { } + +#define TRUE "1" +#define FALSE "0" + +.typedef [System.Numerics.Vectors]System.Numerics.Vector2 as Vector2 + +.class Runtime_72133 extends [System.Runtime]System.Object +{ + .method static int32 Main() + { + .entrypoint + .locals (int32 result) + + ldc.i4 100 + stloc result + + call bool .this::ProblemWithStructStObj() + brfalse VECTOR_STOBJ + + ldloca result + ldstr "STRUCT_STOBJ failed" + call void .this::ReportFailure(int32*, string) + + VECTOR_STOBJ: + call bool .this::ProblemWithVectorStObj() + brfalse CPBLK + + ldloca result + ldstr "VECTOR_STOBJ failed" + call void .this::ReportFailure(int32*, string) + + CPBLK: + call bool .this::ProblemWithCpBlk() + brfalse INITBLK + + ldloca result + ldstr "CPBLK failed" + call void .this::ReportFailure(int32*, string) + + INITBLK: + call bool .this::ProblemWithInitBlk() + brfalse INITOBJ + + ldloca result + ldstr "INITBLK failed" + call void .this::ReportFailure(int32*, string) + + INITOBJ: + call bool .this::ProblemWithInitObj() + brfalse DIRECT_INITOBJ + + ldloca result + ldstr "INITOBJ failed" + call void .this::ReportFailure(int32*, string) + + DIRECT_INITOBJ: + call bool .this::ProblemWithDirectInitObj() + brfalse RETURN + + ldloca result + ldstr "DIRECT_INITOBJ failed" + call void .this::ReportFailure(int32*, string) + + RETURN: + ldloc result + ret + } + + .method private static bool ProblemWithStructStObj() noinlining + { + .locals (int32 a, int32 b, int32 offs) + + ldc.i4 1 + call !!0 .this::Get(!!0) + stloc a + + ldc.i4 2 + call !!0 .this::Get(!!0) + stloc b + + ldc.i4 0 + call !!0 .this::Get(!!0) + stloc offs + + ldloc a + + ldloca a + ldloc offs + add + ldloca b + ldobj StructWithInt + stobj StructWithInt + + ldc.i4 1 + bne.un FAILURE + ldc.i4 FALSE + ret + + FAILURE: + ldc.i4 TRUE + ret + } + + .method private static bool ProblemWithVectorStObj() noinlining + { + .locals (int64 a, int64 b, int32 offs) + + ldc.i8 1 + call !!0 .this::Get(!!0) + stloc a + + ldc.i8 2 + call !!0 .this::Get(!!0) + stloc b + + ldc.i4 0 + call !!0 .this::Get(!!0) + stloc offs + + ldloc a + + ldloca a + ldloc offs + add + ldloca b + ldobj Vector2 + stobj Vector2 + + ldc.i8 1 + bne.un FAILURE + ldc.i4 FALSE + ret + + FAILURE: + ldc.i4 TRUE + ret + } + + .method private static bool ProblemWithCpBlk() noinlining + { + .locals (int32 a, int32 b, int32 offs) + + ldc.i4 1 + call !!0 .this::Get(!!0) + stloc a + + ldc.i4 2 + call !!0 .this::Get(!!0) + stloc b + + ldc.i4 0 + call !!0 .this::Get(!!0) + stloc offs + + ldloc a + + ldloca a + ldloc offs + add + ldloca b + sizeof int32 + cpblk + + ldc.i4 1 + bne.un FAILURE + ldc.i4 FALSE + ret + + FAILURE: + ldc.i4 TRUE + ret + } + + .method private static bool ProblemWithInitBlk() noinlining + { + .locals (int32 a, int32 b, int32 offs) + + ldc.i4 1 + call !!0 .this::Get(!!0) + stloc a + + ldc.i4 2 + call !!0 .this::Get(!!0) + stloc b + + ldc.i4 0 + call !!0 .this::Get(!!0) + stloc offs + + ldloc a + + ldloca a + ldloc offs + add + ldloc b + sizeof int32 + initblk + + ldc.i4 1 + bne.un FAILURE + ldc.i4 FALSE + ret + + FAILURE: + ldc.i4 TRUE + ret + } + + .method private static bool ProblemWithInitObj() noinlining + { + .locals (object a, int32 offs) + + newobj instance void object::.ctor() + call !!0 .this::Get(!!0) + stloc a + + ldc.i4 0 + call !!0 .this::Get(!!0) + stloc offs + + ldloc a + + ldloca a + ldloc offs + add + initobj object + + ldnull + beq FAILURE + ldc.i4 FALSE + ret + + FAILURE: + ldc.i4 TRUE + ret + } + + .method private static bool ProblemWithDirectInitObj() noinlining + { + .locals (valuetype StructWithInt a, valuetype StructWithInt* pA) + + ldloca a + ldflda int32 StructWithInt::Value + ldc.i4 1 + call !!0 .this::Get(!!0) + stfld int32 StructWithInt::Value + + ldloca a + stloc pA + + ldloc pA + ldfld int32 StructWithInt::Value + + ldloca a + initobj StructWithInt + + ldc.i4 1 + bne.un FAILURE + ldc.i4 FALSE + ret + + FAILURE: + ldc.i4 TRUE + ret + } + + .method private static !!T Get(!!T arg) noinlining + { + ldarg arg + ret + } + + .method private static void ReportFailure(int32* pResult, string msg) noinlining + { + ldarg pResult + ldarg pResult + ldind.i4 + ldc.i4 1 + add + stind.i4 + + ldarg msg + call void [System.Console]System.Console::WriteLine(string) + + ret + } +} + +.class sealed sequential StructWithInt extends [System.Runtime]System.ValueType +{ + .field public int32 Value +} diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.ilproj b/src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.ilproj new file mode 100644 index 0000000000000..c61c0c5d312f4 --- /dev/null +++ b/src/tests/JIT/Regression/JitBlue/Runtime_72133/Runtime_72133.ilproj @@ -0,0 +1,11 @@ + + + Exe + + + True + + + + + From 98543f15a1605d4917a37d8d2446fc1696a1751f Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Thu, 14 Jul 2022 00:26:43 +0300 Subject: [PATCH 2/8] Fix losing GLOB_REF on the LHS The comment states we don't need it, which is incorrect. Diffs are improvements because we block forward substitution of calls into "ASG(BLK(ADDR(LCL_VAR, ...)))", which allows morph to leave the "can be replaced with its field" local alone. --- src/coreclr/jit/importer.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index bf9d81921021b..4127401e26089 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1500,11 +1500,6 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); - // Although an obj as a call argument was always assumed to be a globRef - // (which is itself overly conservative), that is not true of the operands - // of a block assignment. - dest->gtFlags &= ~GTF_GLOB_REF; - dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { From d855559fa57ec1f3f12afaaf49b67d314dd6ad0b Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 15 Jul 2022 15:25:30 +0300 Subject: [PATCH 3/8] Prospective fix Spill "glob refs" on stores to "aliased" locals. --- src/coreclr/jit/compiler.h | 9 +-- src/coreclr/jit/importer.cpp | 103 +++++++++++++++-------------------- 2 files changed, 47 insertions(+), 65 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index f397d4bcd9e88..d07ba5455ee9e 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3799,11 +3799,8 @@ class Compiler Statement* impLastStmt; // The last statement for the current BB. public: - enum - { - CHECK_SPILL_ALL = -1, - CHECK_SPILL_NONE = -2 - }; + static const unsigned CHECK_SPILL_ALL = static_cast(-1); + static const unsigned CHECK_SPILL_NONE = static_cast(-2); void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); @@ -4003,7 +4000,7 @@ class Compiler void impSpillSpecialSideEff(); void impSpillSideEffect(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); - void impSpillLclRefs(unsigned lclNum); + void impSpillLclRefs(ssize_t lclNum, unsigned chkLevel = CHECK_SPILL_ALL); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 4127401e26089..aadc0cc937dc3 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -490,7 +490,7 @@ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check -// for interference with stmt and spill if needed. +// for interference with stmt and spilled if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, @@ -509,61 +509,40 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu { assert(chkLevel <= verCurrentState.esStackDepth); - /* If the statement being appended has any side-effects, check the stack - to see if anything needs to be spilled to preserve correct ordering. */ + // If the statement being appended has any side-effects, check the stack to see if anything + // needs to be spilled to preserve correct ordering. GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; - // Assignment to (unaliased) locals don't count as a side-effect as - // we handle them specially using impSpillLclRefs(). Temp locals should - // be fine too. - - if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && - ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) - { - GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; - assert(flags == (op2Flags | GTF_ASG)); - flags = op2Flags; - } - - if (flags != 0) + // If the only side effect of this tree is an assignment to an unaliased local, we can avoid + // spilling pending loads from the stack. Note we still need to spill LCL_VAR/LCL_FLD nodes + // that refer to this local in such a case. + // + if (expr->OperIs(GT_ASG) && expr->AsOp()->gtOp1->OperIsLocal()) { - bool spillGlobEffects = false; + LclVarDsc* spillVarDsc = lvaGetDesc(expr->AsOp()->gtOp1->AsLclVarCommon()); - if ((flags & GTF_CALL) != 0) - { - // If there is a call, we have to spill global refs - spillGlobEffects = true; - } - else if (!expr->OperIs(GT_ASG)) - { - if ((flags & GTF_ASG) != 0) - { - // The expression is not an assignment node but it has an assignment side effect, it - // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. - // Since we don't know what it assigns to, we need to spill global refs. - spillGlobEffects = true; - } - } - else + // We make two assumptions here: + // + // 1. All locals which can be accessed indirectly are marked as address-exposed or with + // "lvHasLdAddrOp". + // 2. Trees that assign to unaliased locals are always top-level (this avoids having to + // walk down the tree here). + // + // If any of the above are violated (say for some temps), the relevant code must spill + // any possible pending references manually. + // + if (!spillVarDsc->IsAddressExposed() && !spillVarDsc->lvHasLdAddrOp) { - GenTree* lhs = expr->gtGetOp1(); - GenTree* rhs = expr->gtGetOp2(); - - if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) - { - // Either side of the assignment node has an assignment side effect. - // Since we don't know what it assigns to, we need to spill global refs. - spillGlobEffects = true; - } - else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) - { - spillGlobEffects = true; - } + impSpillLclRefs(lvaGetLclNum(spillVarDsc), chkLevel); + flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; } + } - impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); + if (flags != 0) + { + impSpillSideEffects((flags & (GTF_ASG | GTF_CALL)) != 0, chkLevel DEBUGARG("impAppendStmt")); } else { @@ -2578,21 +2557,27 @@ inline void Compiler::impSpillSpecialSideEff() } } -/***************************************************************************** - * - * If the stack contains any trees with references to local #lclNum, assign - * those trees to temps and replace their place on the stack with refs to - * their temps. - */ - -void Compiler::impSpillLclRefs(unsigned lclNum) +//------------------------------------------------------------------------ +// impSpillLclRefs: Spill all trees referencing the given local. +// +// Arguments: +// lclNum - The local's number +// chkLevel - Height (exclusive) of the portion of the stack to check +// +void Compiler::impSpillLclRefs(ssize_t lclNum, unsigned chkLevel /* = CHECK_SPILL_ALL */) { - /* Before we make any appends to the tree list we must spill the - * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ - + // Before we make any appends to the tree list we must spill the + // "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG. impSpillSpecialSideEff(); - for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) + if (chkLevel == CHECK_SPILL_ALL) + { + chkLevel = verCurrentState.esStackDepth; + } + + assert(chkLevel <= verCurrentState.esStackDepth); + + for (unsigned level = 0; level < chkLevel; level++) { GenTree* tree = verCurrentState.esStack[level].val; From 5bbde994cf98adfd5890de002ca0d2bf15a4a869 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 15 Jul 2022 15:27:15 +0300 Subject: [PATCH 4/8] Delete now-not-necessary code --- src/coreclr/jit/importer.cpp | 100 +++++++---------------------------- 1 file changed, 18 insertions(+), 82 deletions(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index aadc0cc937dc3..6252893a20d30 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -516,26 +516,29 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // If the only side effect of this tree is an assignment to an unaliased local, we can avoid - // spilling pending loads from the stack. Note we still need to spill LCL_VAR/LCL_FLD nodes - // that refer to this local in such a case. + // spilling all pending loads from the stack. Instead we only spill trees with LCL_[VAR|FLD] + // nodes that refer to the local. // if (expr->OperIs(GT_ASG) && expr->AsOp()->gtOp1->OperIsLocal()) { - LclVarDsc* spillVarDsc = lvaGetDesc(expr->AsOp()->gtOp1->AsLclVarCommon()); + LclVarDsc* dstVarDsc = lvaGetDesc(expr->AsOp()->gtOp1->AsLclVarCommon()); // We make two assumptions here: // - // 1. All locals which can be accessed indirectly are marked as address-exposed or with - // "lvHasLdAddrOp". + // 1. All locals which can be modified indirectly are marked as address-exposed or with + // "lvHasLdAddrOp" -- we will rely on "impSpillSideEffects(spillGlobEffects: true)" + // below to spill them. // 2. Trees that assign to unaliased locals are always top-level (this avoids having to // walk down the tree here). // // If any of the above are violated (say for some temps), the relevant code must spill // any possible pending references manually. // - if (!spillVarDsc->IsAddressExposed() && !spillVarDsc->lvHasLdAddrOp) + if (!dstVarDsc->IsAddressExposed() && !dstVarDsc->lvHasLdAddrOp) { - impSpillLclRefs(lvaGetLclNum(spillVarDsc), chkLevel); + impSpillLclRefs(lvaGetLclNum(dstVarDsc), chkLevel); + + // We still needs to spill things that the RHS could modify/interfere with. flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; } } @@ -2331,14 +2334,6 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken return gtNewLclvNode(tmp, TYP_I_IMPL); } -/****************************************************************************** - * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. - * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, - * else, grab a new temp. - * For structs (which can be pushed on the stack using obj, etc), - * special handling is needed - */ - struct RecursiveGuard { public: @@ -13141,56 +13136,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto DECODE_OPCODE; SPILL_APPEND: - - // We need to call impSpillLclRefs() for a struct type lclVar. - // This is because there may be loads of that lclVar on the evaluation stack, and - // we need to ensure that those loads are completed before we modify it. - if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) - { - GenTree* lhs = op1->gtGetOp1(); - GenTreeLclVarCommon* lclVar = nullptr; - if (lhs->gtOper == GT_LCL_VAR) - { - lclVar = lhs->AsLclVarCommon(); - } - else if (lhs->OperIsBlk()) - { - // Check if LHS address is within some struct local, to catch - // cases where we're updating the struct by something other than a stfld - GenTree* addr = lhs->AsBlk()->Addr(); - - // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) - lclVar = addr->IsLocalAddrExpr(); - - // Catches ADDR(FIELD(... ADDR(LCL_VAR))) - if (lclVar == nullptr) - { - GenTree* lclTree = nullptr; - if (impIsAddressInLocal(addr, &lclTree)) - { - lclVar = lclTree->AsLclVarCommon(); - } - } - } - if (lclVar != nullptr) - { - impSpillLclRefs(lclVar->GetLclNum()); - } - } - - /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: - - /* Append 'op1' to the list of statements */ - impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: - #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); @@ -13468,25 +13421,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - /* Create the assignment node */ - op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); - /* If the local is aliased or pinned, we need to spill calls and - indirections from the stack. */ - - if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || - lvaTable[lclNum].lvPinned) && - (verCurrentState.esStackDepth > 0)) + // Stores to pinned locals can have the implicit side effect of "unpinning", so we must spill + // things that could depend on the pin. TODO-Bug: which can actually be anything, including + // unpinned unaliased locals, not just side-effecting trees. + if (lvaTable[lclNum].lvPinned) { - impSpillSideEffects(false, - (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); + impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("Spill before store to pinned local")); } - /* Spill any refs to the local from the stack */ - - impSpillLclRefs(lclNum); - // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // @@ -13498,13 +13442,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (varTypeIsStruct(lclTyp)) { - op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStruct(op2, op1, clsHnd, CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } - goto SPILL_APPEND; case CEE_LDLOCA: @@ -15119,6 +15062,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); + op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; if (prefixFlags & PREFIX_VOLATILE) { @@ -15134,15 +15078,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } op1 = gtNewAssignNode(op1, op2); - op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; - - // Spill side-effects AND global-data-accesses - if (verCurrentState.esStackDepth > 0) - { - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); - } - - goto APPEND; + goto SPILL_APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; From 4b43613ee42f860ed16616b1820d533f0dcb10b0 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 15 Jul 2022 20:26:33 +0300 Subject: [PATCH 5/8] Fix up asserts --- src/coreclr/jit/importer.cpp | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 6252893a20d30..dde9dfe6abfd9 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -453,25 +453,23 @@ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) } } - if (tree->gtOper == GT_ASG) + if (tree->OperIs(GT_ASG)) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled - if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) + if (tree->AsOp()->gtOp1->OperIsLocal()) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { - assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); - assert(!lvaTable[lclNum].IsAddressExposed() || - (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); + GenTree* stkTree = verCurrentState.esStack[level].val; + assert(!gtHasRef(stkTree, lclNum) || impIsInvariant(stkTree)); + assert(!lvaTable[lclNum].IsAddressExposed() || ((stkTree->gtFlags & GTF_SIDE_EFFECT) == 0)); } } - // If the access may be to global memory, all side effects have to be spilled. - else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) From c56cbe8f0838c1659b48ea21bd7a635461009d6a Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 15 Jul 2022 20:27:24 +0300 Subject: [PATCH 6/8] Clean out '(unsigned)CHECK_SPILL_ALL/NONE' casts --- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/fginline.cpp | 6 +- src/coreclr/jit/gentree.cpp | 107 ++++++------ src/coreclr/jit/hwintrinsic.cpp | 2 +- src/coreclr/jit/hwintrinsicarm64.cpp | 6 +- src/coreclr/jit/hwintrinsicxarch.cpp | 4 +- src/coreclr/jit/importer.cpp | 183 ++++++++++----------- src/coreclr/jit/importer_vectorization.cpp | 2 +- src/coreclr/jit/simd.cpp | 2 +- 9 files changed, 153 insertions(+), 161 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index d07ba5455ee9e..4feb70a9090ee 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3813,7 +3813,7 @@ class Compiler void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, - unsigned curLevel = (unsigned)CHECK_SPILL_NONE, + unsigned curLevel = CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 9891317505e82..b06f0d11ec68b 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1611,7 +1611,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) // argTmpNum here since in-linee compiler instance // would have iterated over these and marked them // accordingly. - impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); + impAssignTempGen(tmpNum, argNode, structHnd, CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on // the actual arg, but we now do this up front, when @@ -1818,8 +1818,8 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { // Unsafe value cls check is not needed here since in-linee compiler instance would have // iterated over locals and marked accordingly. - impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, - (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); + impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, CHECK_SPILL_NONE, + &afterStmt, callDI, block); } else { diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index b15378b8e9b97..e8375d3b0f2c7 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -15354,7 +15354,7 @@ GenTree* Compiler::gtNewTempAssign( } dest->gtFlags |= GTF_DONT_CSE; valx->gtFlags |= GTF_DONT_CSE; - asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block); + asg = impAssignStruct(dest, val, valStructHnd, CHECK_SPILL_NONE, pAfterStmt, di, block); } else { @@ -15408,7 +15408,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT) { assert(structType != nullptr); - assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true); + assg = impGetStructAddr(assg, structType, CHECK_SPILL_ALL, true); } else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT) { @@ -15484,7 +15484,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, if (!varTypeIsStruct(lclTyp)) { // get the result as primitive type - result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true); + result = impGetStructAddr(result, structType, CHECK_SPILL_ALL, true); result = gtNewOperNode(GT_IND, lclTyp, result); } } @@ -15514,7 +15514,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, { if (varTypeIsStruct(lclTyp)) { - result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL); + result = impAssignStructPtr(result, assg, structType, CHECK_SPILL_ALL); } else { @@ -18767,12 +18767,11 @@ GenTree* Compiler::gtNewSimdAbsNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup1; - op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for vector abs")); + op1 = impCloneExpr(op1, &op1Dup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector abs")); GenTree* op1Dup2; - op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for vector abs")); + op1Dup1 = + impCloneExpr(op1Dup1, &op1Dup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector abs")); // op1 = op1 < Zero tmp = gtNewZeroConNode(type, simdBaseJitType); @@ -19072,12 +19071,12 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, { // op1Dup = op1 GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector multiply")); // op2Dup = op2 GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector multiply")); // op1 = Sse2.ShiftRightLogical128BitLane(op1, 4) @@ -19613,7 +19612,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, GenTree* tmp = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); - tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp = impCloneExpr(tmp, &op1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp for vector Equals")); op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle, @@ -19663,11 +19662,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual")); op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -19746,7 +19745,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup; - constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector GreaterThan")); // op1 = op1 - constVector @@ -19815,19 +19814,19 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup1; - op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector GreaterThan")); GenTree* op1Dup2; - op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector GreaterThan")); GenTree* op2Dup1; - op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector GreaterThan")); GenTree* op2Dup2; - op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 vector GreaterThan")); GenTree* t = @@ -19889,11 +19888,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual")); op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -19972,7 +19971,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup; - constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector LessThan")); // op1 = op1 - constVector @@ -20041,19 +20040,19 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup1; - op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector LessThan")); GenTree* op1Dup2; - op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector LessThan")); GenTree* op2Dup1; - op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector LessThan")); GenTree* op2Dup2; - op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 vector LessThan")); GenTree* t = @@ -20758,11 +20757,11 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup1; - constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Max")); GenTree* constVectorDup2; - constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Max")); // op1 = op1 - constVector @@ -20836,10 +20835,10 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, } GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max")); + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max")); + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max")); // op1 = op1 > op2 op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -20942,11 +20941,11 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup1; - constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Min")); GenTree* constVectorDup2; - constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Min")); // op1 = op1 - constVector @@ -21020,10 +21019,10 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, } GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min")); + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min")); + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min")); // op1 = op1 < op2 op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -21081,7 +21080,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21122,7 +21121,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21159,11 +21158,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector narrow")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize, @@ -21233,7 +21232,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21273,7 +21272,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21301,11 +21300,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // return Sse2.UnpackLow(tmp3, tmp4).As(); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, @@ -21316,11 +21315,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); GenTree* tmp2Dup; - tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp2 for vector narrow")); tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, @@ -21351,11 +21350,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector narrow")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, @@ -21854,7 +21853,7 @@ GenTree* Compiler::gtNewSimdSumNode( for (int i = 0; i < haddCount; i++) { - op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } @@ -21862,7 +21861,7 @@ GenTree* Compiler::gtNewSimdSumNode( { intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add; - op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -21890,8 +21889,7 @@ GenTree* Compiler::gtNewSimdSumNode( { if (simdSize == 8) { - op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } @@ -21917,8 +21915,8 @@ GenTree* Compiler::gtNewSimdSumNode( for (int i = 0; i < haddCount; i++) { - op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = + impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } @@ -22148,7 +22146,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector widen lower")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize, @@ -22271,8 +22269,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, - nullptr DEBUGARG("Clone op1 for vector widen upper")); + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector widen upper")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -22325,7 +22322,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector widen upper")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize, diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index 00bd37ec5112f..f654d8d2b3b86 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -580,7 +580,7 @@ GenTree* Compiler::addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound GenTree* immOpDup = nullptr; - immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone an immediate operand for immediate value bounds check")); if (immLowerBound != 0) diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index 247c546668429..db17cf51f2696 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -957,7 +957,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); - op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize, @@ -992,7 +992,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); - op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); @@ -1810,7 +1810,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); const unsigned lclNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg intrinsic")); - impAssignTempGen(lclNum, loadIntrinsic, sig->retTypeSigClass, (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(lclNum, loadIntrinsic, sig->retTypeSigClass, CHECK_SPILL_ALL); LclVarDsc* varDsc = lvaGetDesc(lclNum); // The following is to exclude the fields of the local to have SSA. diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index 01629a3080b1e..e1d25a621789a 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -2540,7 +2540,7 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND else { GenTree* clonedOp1 = nullptr; - op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); @@ -2620,7 +2620,7 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN else { GenTree* clonedOp1 = nullptr; - op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index dde9dfe6abfd9..26450b28fd2a3 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -431,12 +431,12 @@ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) return; #else - if (chkLevel == (unsigned)CHECK_SPILL_ALL) + if (chkLevel == CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } - if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) + if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == CHECK_SPILL_NONE) { return; } @@ -498,12 +498,12 @@ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { - if (chkLevel == (unsigned)CHECK_SPILL_ALL) + if (chkLevel == CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } - if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) + if ((chkLevel != 0) && (chkLevel != CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); @@ -2180,7 +2180,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken if (pRuntimeLookup->testForNull) { - slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } @@ -2192,7 +2192,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { - indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } @@ -2219,7 +2219,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken { if (isLastIndirectionWithSizeCheck) { - lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } @@ -2247,7 +2247,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); - impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); + impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets @@ -2267,7 +2267,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); - impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(qmark, CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } @@ -2328,7 +2328,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); - impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(tmp, result, CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } @@ -2467,7 +2467,7 @@ void Compiler::impSpillStackEnsure(bool spillLeaves) inline void Compiler::impEvalSideEffects() { - impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); + impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } @@ -2504,14 +2504,14 @@ void Compiler::impSpillSideEffect(bool spillGlobEffects, unsigned i DEBUGARG(con inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { - assert(chkLevel != (unsigned)CHECK_SPILL_NONE); + assert(chkLevel != CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); - if (chkLevel == (unsigned)CHECK_SPILL_ALL) + if (chkLevel == CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } @@ -2842,7 +2842,7 @@ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { - impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(gtNewNothingNode(), CHECK_SPILL_NONE, impCurStmtDI); } } @@ -3558,8 +3558,8 @@ GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span - impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); - impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(lengthFieldAsg, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(pointerFieldAsg, CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); @@ -3877,7 +3877,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); - impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); @@ -3930,9 +3930,9 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. - index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); - ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check @@ -4424,8 +4424,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, if (cnsNode->IsFloatNaN()) { - impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( - "spill side effects before propagating NaN")); + impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("spill side effects before propagating NaN")); // maxsd, maxss, minsd, and minss all return op2 if either is NaN // we require NaN to be propagated so ensure the known NaN is op2 @@ -4676,7 +4675,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { - gtArr = impCloneExpr(gtArr, >ArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + gtArr = impCloneExpr(gtArr, >ArrClone, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } @@ -6562,7 +6561,7 @@ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGA GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewIconNode(block->bbCodeOffs)); // verCurrentState.esStackDepth = 0; - impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. @@ -7798,8 +7797,7 @@ int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) - objToBox = - impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); + objToBox = impGetStructAddr(objToBox, nullableCls, CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); @@ -7997,7 +7995,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); - Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + Statement* asgStmt = impAppendTree(asg, CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). @@ -8079,7 +8077,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); - op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStructPtr(op1, exprToBox, operCls, CHECK_SPILL_ALL); } else { @@ -8116,10 +8114,10 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) } // Spill eval stack to flush out any pending side effects. - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); + impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. - Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + Statement* copyStmt = impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); @@ -8151,8 +8149,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) return; } - op1 = gtNewHelperCallNode(boxHelper, TYP_REF, op2, - impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); + op1 = gtNewHelperCallNode(boxHelper, TYP_REF, op2, impGetStructAddr(exprToBox, operCls, CHECK_SPILL_ALL, true)); } /* Push the result back on the stack, */ @@ -8208,7 +8205,7 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORI // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); + impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: @@ -9164,7 +9161,7 @@ void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) callout->gtArgs.PushFront(this, NewCallArg::Primitive(currentArg)); } - impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(callout, CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ @@ -9645,7 +9642,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); - impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(lclNum, stubAddr, CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node @@ -9732,7 +9729,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; - thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); @@ -9744,7 +9741,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); - impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(lclNum, fptr, CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call->AsCall()->gtCallAddr = fptr; @@ -9815,7 +9812,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); - impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(lclNum, fptr, CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, di); @@ -10335,7 +10332,7 @@ var_types Compiler::impImportCall(OPCODE opcode, } // append the call node. - impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack @@ -10652,7 +10649,7 @@ var_types Compiler::impImportCall(OPCODE opcode, } else { - impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI); } } else @@ -10705,7 +10702,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. - impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); + impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); @@ -10746,7 +10743,7 @@ var_types Compiler::impImportCall(OPCODE opcode, unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; - impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); @@ -11131,7 +11128,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. - impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); @@ -11172,7 +11169,7 @@ void Compiler::impImportLeave(BasicBlock* block) // LEAVE clears the stack, spill side effects, and set stack to 0 - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); + impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); @@ -11246,7 +11243,7 @@ void Compiler::impImportLeave(BasicBlock* block) callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) - impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(endCatches, CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) @@ -11333,7 +11330,7 @@ void Compiler::impImportLeave(BasicBlock* block) block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) - impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(endCatches, CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) @@ -11432,7 +11429,7 @@ void Compiler::impImportLeave(BasicBlock* block) // LEAVE clears the stack, spill side effects, and set stack to 0 - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); + impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); @@ -12428,7 +12425,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree( // This can replace op1 with a GT_COMMA that evaluates op1 into a local // - op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); + op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on @@ -12512,7 +12509,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree( // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); - impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(tmp, qmarkNull, CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // @@ -12967,7 +12964,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); - impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(placeHolder, CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } @@ -13134,11 +13131,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto DECODE_OPCODE; SPILL_APPEND: - impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: - impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: @@ -13933,8 +13930,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { - impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( - "Strict ordering of exceptions for Array store")); + impSpillSideEffects(false, + CHECK_SPILL_ALL DEBUGARG("Strict ordering of exceptions for Array store")); } // Pull the new value from the stack. @@ -13962,7 +13959,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Create the assignment node and append it. if (varTypeIsStruct(op1)) { - op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStruct(op1, op2, stelemClsHnd, CHECK_SPILL_ALL); } else { @@ -14497,15 +14494,15 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtFlags & GTF_GLOB_EFFECT) { - impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( - "Branch to next Optimization, op1 side effect")); - impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impSpillSideEffects(false, + CHECK_SPILL_ALL DEBUGARG("Branch to next Optimization, op1 side effect")); + impAppendTree(gtUnusedValNode(op1), CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { - impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( - "Branch to next Optimization, op2 side effect")); - impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impSpillSideEffects(false, + CHECK_SPILL_ALL DEBUGARG("Branch to next Optimization, op2 side effect")); + impAppendTree(gtUnusedValNode(op2), CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG @@ -14871,7 +14868,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); + op1 = impGetStructAddr(op1, clsHnd, CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); @@ -14949,7 +14946,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); - impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); @@ -14963,7 +14960,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); @@ -15263,7 +15260,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); - impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } @@ -15273,7 +15270,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); - impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } @@ -15491,7 +15488,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock - impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(newObjThisPtr, CHECK_SPILL_NONE, impCurStmtDI); } else { @@ -15515,7 +15512,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); + impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; @@ -15538,7 +15535,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. - impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); + impAssignTempGen(lclNum, op1, CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; @@ -15842,7 +15839,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); - impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(obj, CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } @@ -15868,7 +15865,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { assert(opcode == CEE_LDFLD && objType != nullptr); - obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); + obj = impGetStructAddr(obj, objType, CHECK_SPILL_ALL, true); } /* Create the data member node */ @@ -16151,7 +16148,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); - impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(obj, CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } @@ -16343,11 +16340,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) // An indirect store such as "st[s]fld" interferes with indirect accesses, so we must spill // global refs and potentially aliased locals. - impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); + impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { - op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStruct(op1, op2, clsHnd, CHECK_SPILL_ALL); } } goto APPEND; @@ -16647,7 +16644,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impPopStack().val; // make certain it is normalized; - op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); + op1 = impNormStructVal(op1, impGetRefAnyClass(), CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); GenTreeCall* helperCall = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF); @@ -16665,7 +16662,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impPopStack().val; // make certain it is normalized; - op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); + op1 = impNormStructVal(op1, impGetRefAnyClass(), CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { @@ -16684,7 +16681,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { - impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1->AsOp()->gtOp1, CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif @@ -16824,7 +16821,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (opcode == CEE_UNBOX) { GenTree* cloneOperand; - op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); @@ -16865,13 +16862,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; - op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); - op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, + op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) @@ -16887,7 +16884,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. - impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting @@ -16949,7 +16946,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); - op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStruct(op2, op1, resolvedToken.hClass, CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); @@ -16987,7 +16984,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); - op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStruct(op2, op1, resolvedToken.hClass, CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); @@ -17371,7 +17368,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) assertImp(varTypeIsStruct(op2)); - op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); + op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { @@ -17720,7 +17717,7 @@ GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); - impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(tmpNum, op, hClass, CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. @@ -17915,8 +17912,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } - impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), - (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); @@ -17961,8 +17957,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); - impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), - (unsigned)CHECK_SPILL_ALL); + impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) @@ -18067,12 +18062,12 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), - retClsHnd, (unsigned)CHECK_SPILL_ALL); + retClsHnd, CHECK_SPILL_ALL); } } else { - impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); + impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, CHECK_SPILL_ALL); } } } @@ -18100,8 +18095,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); - op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); - impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, CHECK_SPILL_ALL); + impAppendTree(op2, CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; @@ -18173,7 +18168,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } - impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); @@ -18736,7 +18731,7 @@ void Compiler::impImportBlock(BasicBlock* block) if (addStmt != nullptr) { - impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); + impAppendStmt(addStmt, CHECK_SPILL_NONE); } } diff --git a/src/coreclr/jit/importer_vectorization.cpp b/src/coreclr/jit/importer_vectorization.cpp index a3f2142b1f727..008b8f308c2cb 100644 --- a/src/coreclr/jit/importer_vectorization.cpp +++ b/src/coreclr/jit/importer_vectorization.cpp @@ -857,7 +857,7 @@ GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* if (unrolled != nullptr) { // We succeeded, fill the placeholders: - impAssignTempGen(spanObjRef, impGetStructAddr(spanObj, spanCls, (unsigned)CHECK_SPILL_NONE, true)); + impAssignTempGen(spanObjRef, impGetStructAddr(spanObj, spanCls, CHECK_SPILL_NONE, true)); impAssignTempGen(spanDataTmp, spanData); if (unrolled->OperIs(GT_QMARK)) { diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index 1cd9583cfd7e8..1682c43461fb9 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -1255,7 +1255,7 @@ GenTree* Compiler::impSIMDPopStack(var_types type, bool expectAddr, CORINFO_CLAS structHandle = ti.GetClassHandleForValueClass(); } - tree = impNormStructVal(tree, structHandle, (unsigned)CHECK_SPILL_ALL); + tree = impNormStructVal(tree, structHandle, CHECK_SPILL_ALL); } // Now set the type of the tree to the specialized SIMD struct type, if applicable. From 79e362bc252fbba51804dca88e81c6f2e07d3185 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Fri, 15 Jul 2022 23:32:22 +0300 Subject: [PATCH 7/8] Don't manually spill for 'st[s]fld' --- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/importer.cpp | 20 ++++++-------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 4feb70a9090ee..0a9164fc3482f 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4000,7 +4000,7 @@ class Compiler void impSpillSpecialSideEff(); void impSpillSideEffect(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); - void impSpillLclRefs(ssize_t lclNum, unsigned chkLevel = CHECK_SPILL_ALL); + void impSpillLclRefs(unsigned lclNum, unsigned chkLevel); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 26450b28fd2a3..e3008167c3fbc 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2557,7 +2557,7 @@ inline void Compiler::impSpillSpecialSideEff() // lclNum - The local's number // chkLevel - Height (exclusive) of the portion of the stack to check // -void Compiler::impSpillLclRefs(ssize_t lclNum, unsigned chkLevel /* = CHECK_SPILL_ALL */) +void Compiler::impSpillLclRefs(unsigned lclNum, unsigned chkLevel) { // Before we make any appends to the tree list we must spill the // "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG. @@ -16230,11 +16230,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!"Unexpected fieldAccessor"); } - // "impAssignStruct" will back-substitute the field address tree into calls that return things via - // return buffers, so we have to delay calling it until after we have spilled everything needed. - bool deferStructAssign = (lclTyp == TYP_STRUCT); - - if (!deferStructAssign) + if (lclTyp != TYP_STRUCT) { assert(op1->OperIs(GT_FIELD, GT_IND)); @@ -16323,8 +16319,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = gtNewAssignNode(op1, op2); } - /* Check if the class needs explicit initialization */ - + // Check if the class needs explicit initialization. if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); @@ -16338,16 +16333,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - // An indirect store such as "st[s]fld" interferes with indirect accesses, so we must spill - // global refs and potentially aliased locals. - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); - - if (deferStructAssign) + if (lclTyp == TYP_STRUCT) { op1 = impAssignStruct(op1, op2, clsHnd, CHECK_SPILL_ALL); } + + goto SPILL_APPEND; } - goto APPEND; case CEE_NEWARR: { From a22fb7c3aeec99234bbcd0f42d6f75eae0b300df Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Mon, 18 Jul 2022 20:58:59 +0300 Subject: [PATCH 8/8] Revert 'Clean out '(unsigned)CHECK_SPILL_ALL/NONE' casts' --- src/coreclr/jit/compiler.h | 2 +- src/coreclr/jit/fginline.cpp | 6 +- src/coreclr/jit/gentree.cpp | 107 ++++++------ src/coreclr/jit/hwintrinsic.cpp | 2 +- src/coreclr/jit/hwintrinsicarm64.cpp | 6 +- src/coreclr/jit/hwintrinsicxarch.cpp | 4 +- src/coreclr/jit/importer.cpp | 180 +++++++++++---------- src/coreclr/jit/importer_vectorization.cpp | 2 +- src/coreclr/jit/simd.cpp | 2 +- 9 files changed, 159 insertions(+), 152 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 0a9164fc3482f..03d0bc001abc6 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -3813,7 +3813,7 @@ class Compiler void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, - unsigned curLevel = CHECK_SPILL_NONE, + unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index b06f0d11ec68b..9891317505e82 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -1611,7 +1611,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) // argTmpNum here since in-linee compiler instance // would have iterated over these and marked them // accordingly. - impAssignTempGen(tmpNum, argNode, structHnd, CHECK_SPILL_NONE, &afterStmt, callDI, block); + impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on // the actual arg, but we now do this up front, when @@ -1818,8 +1818,8 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { // Unsafe value cls check is not needed here since in-linee compiler instance would have // iterated over locals and marked accordingly. - impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, CHECK_SPILL_NONE, - &afterStmt, callDI, block); + impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, + (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); } else { diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index e8375d3b0f2c7..b15378b8e9b97 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -15354,7 +15354,7 @@ GenTree* Compiler::gtNewTempAssign( } dest->gtFlags |= GTF_DONT_CSE; valx->gtFlags |= GTF_DONT_CSE; - asg = impAssignStruct(dest, val, valStructHnd, CHECK_SPILL_NONE, pAfterStmt, di, block); + asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block); } else { @@ -15408,7 +15408,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT) { assert(structType != nullptr); - assg = impGetStructAddr(assg, structType, CHECK_SPILL_ALL, true); + assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true); } else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT) { @@ -15484,7 +15484,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, if (!varTypeIsStruct(lclTyp)) { // get the result as primitive type - result = impGetStructAddr(result, structType, CHECK_SPILL_ALL, true); + result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true); result = gtNewOperNode(GT_IND, lclTyp, result); } } @@ -15514,7 +15514,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, { if (varTypeIsStruct(lclTyp)) { - result = impAssignStructPtr(result, assg, structType, CHECK_SPILL_ALL); + result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL); } else { @@ -18767,11 +18767,12 @@ GenTree* Compiler::gtNewSimdAbsNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup1; - op1 = impCloneExpr(op1, &op1Dup1, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector abs")); + op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, + nullptr DEBUGARG("Clone op1 for vector abs")); GenTree* op1Dup2; - op1Dup1 = - impCloneExpr(op1Dup1, &op1Dup2, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector abs")); + op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, + nullptr DEBUGARG("Clone op1 for vector abs")); // op1 = op1 < Zero tmp = gtNewZeroConNode(type, simdBaseJitType); @@ -19071,12 +19072,12 @@ GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op, { // op1Dup = op1 GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector multiply")); // op2Dup = op2 GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector multiply")); // op1 = Sse2.ShiftRightLogical128BitLane(op1, 4) @@ -19612,7 +19613,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, GenTree* tmp = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); - tmp = impCloneExpr(tmp, &op1, clsHnd, CHECK_SPILL_ALL, + tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp for vector Equals")); op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle, @@ -19662,11 +19663,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual")); op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -19745,7 +19746,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup; - constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector GreaterThan")); // op1 = op1 - constVector @@ -19814,19 +19815,19 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup1; - op1 = impCloneExpr(op1, &op1Dup1, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector GreaterThan")); GenTree* op1Dup2; - op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, CHECK_SPILL_ALL, + op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector GreaterThan")); GenTree* op2Dup1; - op2 = impCloneExpr(op2, &op2Dup1, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector GreaterThan")); GenTree* op2Dup2; - op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, CHECK_SPILL_ALL, + op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 vector GreaterThan")); GenTree* t = @@ -19888,11 +19889,11 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual")); op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -19971,7 +19972,7 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup; - constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector LessThan")); // op1 = op1 - constVector @@ -20040,19 +20041,19 @@ GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op, // result = BitwiseOr(op1, op2) GenTree* op1Dup1; - op1 = impCloneExpr(op1, &op1Dup1, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector LessThan")); GenTree* op1Dup2; - op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, CHECK_SPILL_ALL, + op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector LessThan")); GenTree* op2Dup1; - op2 = impCloneExpr(op2, &op2Dup1, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector LessThan")); GenTree* op2Dup2; - op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, CHECK_SPILL_ALL, + op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 vector LessThan")); GenTree* t = @@ -20757,11 +20758,11 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup1; - constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Max")); GenTree* constVectorDup2; - constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, CHECK_SPILL_ALL, + constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Max")); // op1 = op1 - constVector @@ -20835,10 +20836,10 @@ GenTree* Compiler::gtNewSimdMaxNode(var_types type, } GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max")); + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max")); + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max")); // op1 = op1 > op2 op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -20941,11 +20942,11 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic); GenTree* constVectorDup1; - constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, CHECK_SPILL_ALL, + constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Min")); GenTree* constVectorDup2; - constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, CHECK_SPILL_ALL, + constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone constVector for vector Min")); // op1 = op1 - constVector @@ -21019,10 +21020,10 @@ GenTree* Compiler::gtNewSimdMinNode(var_types type, } GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min")); + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min")); + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min")); // op1 = op1 < op2 op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -21080,7 +21081,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21121,7 +21122,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21158,11 +21159,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector narrow")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize, @@ -21232,7 +21233,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21272,7 +21273,7 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, simdSize, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize, @@ -21300,11 +21301,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, // return Sse2.UnpackLow(tmp3, tmp4).As(); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, @@ -21315,11 +21316,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* tmp1Dup; - tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, CHECK_SPILL_ALL, + tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp1 for vector narrow")); GenTree* tmp2Dup; - tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, CHECK_SPILL_ALL, + tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone tmp2 for vector narrow")); tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, @@ -21350,11 +21351,11 @@ GenTree* Compiler::gtNewSimdNarrowNode(var_types type, CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector narrow")); GenTree* op2Dup; - op2 = impCloneExpr(op2, &op2Dup, clsHnd, CHECK_SPILL_ALL, + op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector narrow")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize, @@ -21853,7 +21854,7 @@ GenTree* Compiler::gtNewSimdSumNode( for (int i = 0; i < haddCount; i++) { - op1 = impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } @@ -21861,7 +21862,7 @@ GenTree* Compiler::gtNewSimdSumNode( { intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add; - op1 = impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -21889,7 +21890,8 @@ GenTree* Compiler::gtNewSimdSumNode( { if (simdSize == 8) { - op1 = impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, + nullptr DEBUGARG("Clone op1 for vector sum")); tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } @@ -21915,8 +21917,8 @@ GenTree* Compiler::gtNewSimdSumNode( for (int i = 0; i < haddCount; i++) { - op1 = - impCloneExpr(op1, &tmp, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum")); + op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, + nullptr DEBUGARG("Clone op1 for vector sum")); op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } @@ -22146,7 +22148,7 @@ GenTree* Compiler::gtNewSimdWidenLowerNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector widen lower")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize, @@ -22269,7 +22271,8 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector widen upper")); + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, + nullptr DEBUGARG("Clone op1 for vector widen upper")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); @@ -22322,7 +22325,7 @@ GenTree* Compiler::gtNewSimdWidenUpperNode( CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic); GenTree* op1Dup; - op1 = impCloneExpr(op1, &op1Dup, clsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector widen upper")); tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize, diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp index f654d8d2b3b86..00bd37ec5112f 100644 --- a/src/coreclr/jit/hwintrinsic.cpp +++ b/src/coreclr/jit/hwintrinsic.cpp @@ -580,7 +580,7 @@ GenTree* Compiler::addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound GenTree* immOpDup = nullptr; - immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + immOp = impCloneExpr(immOp, &immOpDup, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone an immediate operand for immediate value bounds check")); if (immLowerBound != 0) diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index db17cf51f2696..247c546668429 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -957,7 +957,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); - op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize, @@ -992,7 +992,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType); - op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, simdClsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits")); op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false); @@ -1810,7 +1810,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); const unsigned lclNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg intrinsic")); - impAssignTempGen(lclNum, loadIntrinsic, sig->retTypeSigClass, CHECK_SPILL_ALL); + impAssignTempGen(lclNum, loadIntrinsic, sig->retTypeSigClass, (unsigned)CHECK_SPILL_ALL); LclVarDsc* varDsc = lvaGetDesc(lclNum); // The following is to exclude the fields of the local to have SSA. diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index e1d25a621789a..01629a3080b1e 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -2540,7 +2540,7 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND else { GenTree* clonedOp1 = nullptr; - op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); @@ -2620,7 +2620,7 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN else { GenTree* clonedOp1 = nullptr; - op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan")); retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index e3008167c3fbc..4d5305933f104 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -431,12 +431,12 @@ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) return; #else - if (chkLevel == CHECK_SPILL_ALL) + if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } - if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == CHECK_SPILL_NONE) + if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } @@ -498,12 +498,12 @@ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { - if (chkLevel == CHECK_SPILL_ALL) + if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } - if ((chkLevel != 0) && (chkLevel != CHECK_SPILL_NONE)) + if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); @@ -2180,7 +2180,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken if (pRuntimeLookup->testForNull) { - slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } @@ -2192,7 +2192,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { - indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } @@ -2219,7 +2219,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken { if (isLastIndirectionWithSizeCheck) { - lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } @@ -2247,7 +2247,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); - impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr, impCurStmtDI); + impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets @@ -2267,7 +2267,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); - impAppendTree(qmark, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } @@ -2328,7 +2328,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); - impAssignTempGen(tmp, result, CHECK_SPILL_NONE); + impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } @@ -2467,7 +2467,7 @@ void Compiler::impSpillStackEnsure(bool spillLeaves) inline void Compiler::impEvalSideEffects() { - impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); + impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } @@ -2504,14 +2504,14 @@ void Compiler::impSpillSideEffect(bool spillGlobEffects, unsigned i DEBUGARG(con inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { - assert(chkLevel != CHECK_SPILL_NONE); + assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); - if (chkLevel == CHECK_SPILL_ALL) + if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } @@ -2842,7 +2842,7 @@ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { - impAppendTree(gtNewNothingNode(), CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } @@ -3558,8 +3558,8 @@ GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span - impAppendTree(lengthFieldAsg, CHECK_SPILL_NONE, impCurStmtDI); - impAppendTree(pointerFieldAsg, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); @@ -3877,7 +3877,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); - impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, CHECK_SPILL_NONE); + impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); @@ -3930,9 +3930,9 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. - index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); - ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check @@ -4424,7 +4424,8 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, if (cnsNode->IsFloatNaN()) { - impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("spill side effects before propagating NaN")); + impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( + "spill side effects before propagating NaN")); // maxsd, maxss, minsd, and minss all return op2 if either is NaN // we require NaN to be propagated so ensure the known NaN is op2 @@ -4675,7 +4676,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { - gtArr = impCloneExpr(gtArr, >ArrClone, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + gtArr = impCloneExpr(gtArr, >ArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } @@ -6561,7 +6562,7 @@ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGA GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewIconNode(block->bbCodeOffs)); // verCurrentState.esStackDepth = 0; - impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. @@ -7797,7 +7798,8 @@ int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) - objToBox = impGetStructAddr(objToBox, nullableCls, CHECK_SPILL_ALL, true); + objToBox = + impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); @@ -7995,7 +7997,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); - Statement* asgStmt = impAppendTree(asg, CHECK_SPILL_NONE, impCurStmtDI); + Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). @@ -8077,7 +8079,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); - op1 = impAssignStructPtr(op1, exprToBox, operCls, CHECK_SPILL_ALL); + op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { @@ -8114,10 +8116,10 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) } // Spill eval stack to flush out any pending side effects. - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); + impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. - Statement* copyStmt = impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); + Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); @@ -8149,7 +8151,8 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) return; } - op1 = gtNewHelperCallNode(boxHelper, TYP_REF, op2, impGetStructAddr(exprToBox, operCls, CHECK_SPILL_ALL, true)); + op1 = gtNewHelperCallNode(boxHelper, TYP_REF, op2, + impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); } /* Push the result back on the stack, */ @@ -8205,7 +8208,7 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORI // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); + impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: @@ -9161,7 +9164,7 @@ void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) callout->gtArgs.PushFront(this, NewCallArg::Primitive(currentArg)); } - impAppendTree(callout, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ @@ -9642,7 +9645,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); - impAssignTempGen(lclNum, stubAddr, CHECK_SPILL_NONE); + impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node @@ -9729,7 +9732,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; - thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); @@ -9741,7 +9744,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); - impAssignTempGen(lclNum, fptr, CHECK_SPILL_ALL); + impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call->AsCall()->gtCallAddr = fptr; @@ -9812,7 +9815,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); - impAssignTempGen(lclNum, fptr, CHECK_SPILL_ALL); + impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, di); @@ -10332,7 +10335,7 @@ var_types Compiler::impImportCall(OPCODE opcode, } // append the call node. - impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack @@ -10649,7 +10652,7 @@ var_types Compiler::impImportCall(OPCODE opcode, } else { - impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else @@ -10702,7 +10705,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. - impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI, false); + impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); @@ -10743,7 +10746,7 @@ var_types Compiler::impImportCall(OPCODE opcode, unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; - impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), CHECK_SPILL_NONE); + impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); @@ -11128,7 +11131,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. - impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, CHECK_SPILL_NONE); + impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); @@ -11169,7 +11172,7 @@ void Compiler::impImportLeave(BasicBlock* block) // LEAVE clears the stack, spill side effects, and set stack to 0 - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); + impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); @@ -11243,7 +11246,7 @@ void Compiler::impImportLeave(BasicBlock* block) callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) - impAppendTree(endCatches, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) @@ -11330,7 +11333,7 @@ void Compiler::impImportLeave(BasicBlock* block) block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) - impAppendTree(endCatches, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) @@ -11429,7 +11432,7 @@ void Compiler::impImportLeave(BasicBlock* block) // LEAVE clears the stack, spill side effects, and set stack to 0 - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); + impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); @@ -12425,7 +12428,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree( // This can replace op1 with a GT_COMMA that evaluates op1 into a local // - op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); + op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on @@ -12509,7 +12512,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree( // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); - impAssignTempGen(tmp, qmarkNull, CHECK_SPILL_NONE); + impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // @@ -12964,7 +12967,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); - impAppendTree(placeHolder, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } @@ -13131,11 +13134,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto DECODE_OPCODE; SPILL_APPEND: - impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: - impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: @@ -13930,8 +13933,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { - impSpillSideEffects(false, - CHECK_SPILL_ALL DEBUGARG("Strict ordering of exceptions for Array store")); + impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( + "Strict ordering of exceptions for Array store")); } // Pull the new value from the stack. @@ -13959,7 +13962,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Create the assignment node and append it. if (varTypeIsStruct(op1)) { - op1 = impAssignStruct(op1, op2, stelemClsHnd, CHECK_SPILL_ALL); + op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { @@ -14494,15 +14497,15 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtFlags & GTF_GLOB_EFFECT) { - impSpillSideEffects(false, - CHECK_SPILL_ALL DEBUGARG("Branch to next Optimization, op1 side effect")); - impAppendTree(gtUnusedValNode(op1), CHECK_SPILL_NONE, impCurStmtDI); + impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( + "Branch to next Optimization, op1 side effect")); + impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { - impSpillSideEffects(false, - CHECK_SPILL_ALL DEBUGARG("Branch to next Optimization, op2 side effect")); - impAppendTree(gtUnusedValNode(op2), CHECK_SPILL_NONE, impCurStmtDI); + impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( + "Branch to next Optimization, op2 side effect")); + impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG @@ -14868,7 +14871,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else { - op1 = impGetStructAddr(op1, clsHnd, CHECK_SPILL_ALL, false); + op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); @@ -14946,7 +14949,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); - impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), CHECK_SPILL_ALL); + impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); @@ -14960,7 +14963,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); @@ -15260,7 +15263,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); - impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } @@ -15270,7 +15273,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); - impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } @@ -15488,7 +15491,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock - impAppendTree(newObjThisPtr, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { @@ -15512,7 +15515,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); - impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); + impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; @@ -15535,7 +15538,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. - impAssignTempGen(lclNum, op1, CHECK_SPILL_NONE); + impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; @@ -15839,7 +15842,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); - impAppendTree(obj, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } @@ -15865,7 +15868,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { assert(opcode == CEE_LDFLD && objType != nullptr); - obj = impGetStructAddr(obj, objType, CHECK_SPILL_ALL, true); + obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ @@ -16148,7 +16151,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); - impAppendTree(obj, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } @@ -16337,7 +16340,6 @@ void Compiler::impImportBlockCode(BasicBlock* block) { op1 = impAssignStruct(op1, op2, clsHnd, CHECK_SPILL_ALL); } - goto SPILL_APPEND; } @@ -16636,7 +16638,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impPopStack().val; // make certain it is normalized; - op1 = impNormStructVal(op1, impGetRefAnyClass(), CHECK_SPILL_ALL); + op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); GenTreeCall* helperCall = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF); @@ -16654,7 +16656,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impPopStack().val; // make certain it is normalized; - op1 = impNormStructVal(op1, impGetRefAnyClass(), CHECK_SPILL_ALL); + op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { @@ -16673,7 +16675,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { - impAppendTree(op1->AsOp()->gtOp1, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif @@ -16813,7 +16815,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (opcode == CEE_UNBOX) { GenTree* cloneOperand; - op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); @@ -16854,13 +16856,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; - op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); - op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, CHECK_SPILL_ALL, + op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) @@ -16876,7 +16878,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. - impAppendTree(op1, CHECK_SPILL_ALL, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting @@ -16938,7 +16940,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); - op1 = impAssignStruct(op2, op1, resolvedToken.hClass, CHECK_SPILL_ALL); + op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); @@ -16976,7 +16978,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); - op1 = impAssignStruct(op2, op1, resolvedToken.hClass, CHECK_SPILL_ALL); + op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); @@ -17360,7 +17362,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) assertImp(varTypeIsStruct(op2)); - op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, CHECK_SPILL_ALL); + op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { @@ -17709,7 +17711,7 @@ GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); - impAssignTempGen(tmpNum, op, hClass, CHECK_SPILL_ALL); + impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. @@ -17904,7 +17906,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } - impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), CHECK_SPILL_ALL); + impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), + (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); @@ -17949,7 +17952,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); - impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), CHECK_SPILL_ALL); + impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), + (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) @@ -18054,12 +18058,12 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), - retClsHnd, CHECK_SPILL_ALL); + retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { - impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, CHECK_SPILL_ALL); + impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } @@ -18087,8 +18091,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); - op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, CHECK_SPILL_ALL); - impAppendTree(op2, CHECK_SPILL_NONE, impCurStmtDI); + op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); + impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; @@ -18160,7 +18164,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } - impAppendTree(op1, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); @@ -18723,7 +18727,7 @@ void Compiler::impImportBlock(BasicBlock* block) if (addStmt != nullptr) { - impAppendStmt(addStmt, CHECK_SPILL_NONE); + impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } diff --git a/src/coreclr/jit/importer_vectorization.cpp b/src/coreclr/jit/importer_vectorization.cpp index 008b8f308c2cb..a3f2142b1f727 100644 --- a/src/coreclr/jit/importer_vectorization.cpp +++ b/src/coreclr/jit/importer_vectorization.cpp @@ -857,7 +857,7 @@ GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* if (unrolled != nullptr) { // We succeeded, fill the placeholders: - impAssignTempGen(spanObjRef, impGetStructAddr(spanObj, spanCls, CHECK_SPILL_NONE, true)); + impAssignTempGen(spanObjRef, impGetStructAddr(spanObj, spanCls, (unsigned)CHECK_SPILL_NONE, true)); impAssignTempGen(spanDataTmp, spanData); if (unrolled->OperIs(GT_QMARK)) { diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index 1682c43461fb9..1cd9583cfd7e8 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -1255,7 +1255,7 @@ GenTree* Compiler::impSIMDPopStack(var_types type, bool expectAddr, CORINFO_CLAS structHandle = ti.GetClassHandleForValueClass(); } - tree = impNormStructVal(tree, structHandle, CHECK_SPILL_ALL); + tree = impNormStructVal(tree, structHandle, (unsigned)CHECK_SPILL_ALL); } // Now set the type of the tree to the specialized SIMD struct type, if applicable.