diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index dfc29a621d4652..a748e130a128c0 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -3285,7 +3285,7 @@ void CodeGen::genCall(GenTreeCall* call) // If there is nothing next, that means the result is thrown away, so this value is not live. // However, for minopts or debuggable code, we keep it live to support managed return value debugging. - if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode) + if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.DbgCode()) { gcInfo.gcMarkRegSetNpt(RBM_INTRET); } diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 1be119806114c4..93a6659a15707f 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -1799,7 +1799,7 @@ void CodeGen::genGenerateMachineCode() { printf("; EnC code\n"); } - else if (compiler->opts.compDbgCode) + else if (compiler->opts.DbgCode()) { printf("; debuggable code\n"); } @@ -4588,7 +4588,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, // or when in debug code noway_assert(varTypeIsGC(varDsc->TypeGet()) || (varDsc->TypeGet() == TYP_STRUCT) || - compiler->info.compInitMem || compiler->opts.compDbgCode); + compiler->info.compInitMem || compiler->opts.DbgCode()); if (!varDsc->lvOnFrame) { @@ -6918,7 +6918,7 @@ void CodeGen::genSetScopeInfo(unsigned which, PREFIX_ASSUME(compiler->lvaVarargsHandleArg < compiler->info.compArgsCount); if (!compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->lvOnFrame) { - noway_assert(!compiler->opts.compDbgCode); + noway_assert(!compiler->opts.DbgCode()); return; } @@ -7198,7 +7198,7 @@ void CodeGen::genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, void CodeGen::genEnsureCodeEmitted(const DebugInfo& di) { - if (!compiler->opts.compDbgCode) + if (!compiler->opts.DbgCode()) { return; } @@ -7343,7 +7343,7 @@ void CodeGen::genIPmappingGen() //different source lines. As a result, we have all sorts of latent problems with how we emit debug //info, but very few actual ones. Whenever someone wants to tackle that problem in general, turn this //assert back on. - if (compiler->opts.compDbgCode) + if (compiler->opts.DbgCode()) { //Assert that the first instruction of every basic block with more than one incoming edge has a //different sequence point from each incoming block. diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index c11671168675bb..37c5a71a722c0d 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -7084,7 +7084,7 @@ void CodeGen::genCall(GenTreeCall* call) // If there is nothing next, that means the result is thrown away, so this value is not live. // However, for minopts or debuggable code, we keep it live to support managed return value debugging. - if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode) + if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.DbgCode()) { gcInfo.gcMarkRegSetNpt(RBM_INTRET); } diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 671d5692ac4576..f8197d7dc2f696 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -1914,10 +1914,10 @@ void Compiler::compInit(ArenaAllocator* pAlloc, compRationalIRForm = false; #ifdef DEBUG - compCodeGenDone = false; - opts.compMinOptsIsUsed = false; + compCodeGenDone = false; #endif - opts.compMinOptsIsSet = false; + + opts.InitializeMinOpts(); // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; @@ -2428,7 +2428,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags) //------------------------------------------------------------------------- - opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); + opts.SetDbgCode(jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE)); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); @@ -2487,7 +2487,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags) #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns - opts.compDbgCode = false; + opts.SetDbgCode(false); opts.compDbgInfo = false; opts.compDbgEnC = false; #endif @@ -3199,8 +3199,8 @@ void Compiler::compInitOptions(JitFlags* jitFlags) if (opts.compProcedureSplitting) { - // Note that opts.compdbgCode is true under ngen for checked assemblies! - opts.compProcedureSplitting = !opts.compDbgCode || enableFakeSplitting; + // Note that opts.DbgCode() is true under ngen for checked assemblies! + opts.compProcedureSplitting = !opts.DbgCode() || enableFakeSplitting; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. @@ -3289,7 +3289,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags) : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); - printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); + printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.DbgCode())); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); @@ -3555,12 +3555,12 @@ void Compiler::compInitDebuggingInfo() compInitVarScopeMap(); - if (opts.compScopeInfo || opts.compDbgCode) + if (opts.compScopeInfo || opts.DbgCode()) { compInitScopeLists(); } - if (opts.compDbgCode && (info.compVarScopesCount > 0)) + if (opts.DbgCode() && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the @@ -3858,7 +3858,7 @@ void Compiler::compSetOptimizationLevel() // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && - !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) + !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.DbgCode()) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); @@ -4110,9 +4110,9 @@ const char* Compiler::compGetTieringName(bool wantShortName) const const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); const bool instrumenting = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR); - if (!opts.compMinOptsIsSet) + if (!opts.IsMinOptsSet()) { - // If 'compMinOptsIsSet' is not set, just return here. Otherwise, if this method is called + // If 'IsMinOptsSet()' is not true, just return here. Otherwise, if this method is called // by the assertAbort(), we would recursively call assert while trying to get MinOpts() // and eventually stackoverflow. return "Optimization-Level-Not-Yet-Set"; @@ -4164,7 +4164,7 @@ const char* Compiler::compGetTieringName(bool wantShortName) const return "MinOpts"; } } - else if (opts.compDbgCode) + else if (opts.DbgCode()) { return "Debug"; } diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 5b451505a6ec2a..e4caebe6c04835 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -9191,39 +9191,61 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Maximum number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 + private: bool compMinOpts; bool compMinOptsIsSet; + bool compDbgCode; // Generate debugger-friendly code? + bool compOptimizationEnabled; // A cached, composite value + #ifdef DEBUG mutable bool compMinOptsIsUsed; +#endif // !DEBUG + + void SetOptimizationEnabled() + { + // We want to be careful with the `compMinOptsIsSet` and `compMinOptsIsUsed` values. + // Caching the value shouldn't touch these. However, using this value should have the + // same asserts as using `MinOpts()`. + compOptimizationEnabled = !compMinOpts && !compDbgCode; + } + + public: + void InitializeMinOpts() + { + INDEBUG(compMinOptsIsUsed = false); + compMinOptsIsSet = false; + } +#ifdef DEBUG bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } - bool IsMinOptsSet() const - { - return compMinOptsIsSet; - } #else // !DEBUG bool MinOpts() const { return compMinOpts; } +#endif // !DEBUG + bool IsMinOptsSet() const { return compMinOptsIsSet; } -#endif // !DEBUG bool OptimizationDisabled() const { - return MinOpts() || compDbgCode; + assert(compMinOptsIsSet); + INDEBUG(compMinOptsIsUsed = true); + return !compOptimizationEnabled; } bool OptimizationEnabled() const { - return !OptimizationDisabled(); + assert(compMinOptsIsSet); + INDEBUG(compMinOptsIsUsed = true); + return compOptimizationEnabled; } void SetMinOpts(bool val) @@ -9232,6 +9254,20 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; + + SetOptimizationEnabled(); // Update compOptimizationEnabled + } + + bool DbgCode() const + { + return compDbgCode; + } + + void SetDbgCode(bool val) + { + compDbgCode = val; + + SetOptimizationEnabled(); // Update compOptimizationEnabled } // true if the CLFLG_* for an optimization is set. @@ -9325,7 +9361,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX } bool compScopeInfo; // Generate the LocalVar info ? - bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; @@ -9932,7 +9967,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX if (opts.IsOSR()) return false; #endif - return !info.compInitMem && opts.compDbgCode; + return !info.compInitMem && opts.DbgCode(); } // Returns true if the jit supports having patchpoints in this method. diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 41c1ae5ebefbbc..4b796cc63a3d82 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -1983,7 +1983,7 @@ inline bool Compiler::lvaKeepAliveAndReportThis() if (info.compXcptnsCount > 0) return true; - if (opts.compDbgCode) + if (opts.DbgCode()) return true; if (lvaGenericsContextInUse) diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 294a92ab8020ff..dd5ed3ba18f292 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -2280,7 +2280,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed const bool notStruct = !varTypeIsStruct(lvaGetDesc(varNum)); const bool notLastInstr = (codeAddr < codeEndp - sz); - const bool notDebugCode = !opts.compDbgCode; + const bool notDebugCode = !opts.DbgCode(); if (notStruct && notLastInstr && notDebugCode && impILConsumesAddr(codeAddr + sz)) { @@ -2809,7 +2809,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F // Keep track of where we are in the scope lists, as we will also // create blocks at scope boundaries. - if (opts.compDbgCode && (info.compVarScopesCount > 0)) + if (opts.DbgCode() && (info.compVarScopesCount > 0)) { compResetScopeLists(); @@ -3186,7 +3186,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F bool foundScope = false; - if (opts.compDbgCode && (info.compVarScopesCount > 0)) + if (opts.DbgCode() && (info.compVarScopesCount > 0)) { while (compGetNextEnterScope(nxtBBoffs)) { @@ -6888,5 +6888,5 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, // true if 'throw' helper block should be created. bool Compiler::fgUseThrowHelperBlocks() { - return !opts.compDbgCode; + return !opts.DbgCode(); } diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index ff2a171f065648..21544e96b57c2a 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -52,7 +52,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.compDbgCode) + if (opts.DbgCode()) { JITDUMP("Method compiled with debug codegen, no removal.\n"); return PhaseStatus::MODIFIED_NOTHING; @@ -320,7 +320,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.compDbgCode) + if (opts.DbgCode()) { JITDUMP("Method compiled with debug codegen, no removal.\n"); return PhaseStatus::MODIFIED_NOTHING; @@ -657,7 +657,7 @@ PhaseStatus Compiler::fgCloneFinally() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.compDbgCode) + if (opts.DbgCode()) { JITDUMP("Method compiled with debug codegen, no cloning.\n"); return PhaseStatus::MODIFIED_NOTHING; @@ -1657,7 +1657,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.compDbgCode) + if (opts.DbgCode()) { JITDUMP("Method compiled with debug codegen, no merging.\n"); return PhaseStatus::MODIFIED_NOTHING; diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index f26a27ce30b0fb..a55d198c1be708 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -4761,7 +4761,7 @@ bool Compiler::fgExpandRarelyRunBlocks() // bool Compiler::fgReorderBlocks(bool useProfile) { - noway_assert(opts.compDbgCode == false); + noway_assert(!opts.DbgCode()); #if defined(FEATURE_EH_FUNCLETS) assert(fgFuncletsCreated); diff --git a/src/coreclr/jit/fgstmt.cpp b/src/coreclr/jit/fgstmt.cpp index 57124f6f3d306a..1579e21c177ba3 100644 --- a/src/coreclr/jit/fgstmt.cpp +++ b/src/coreclr/jit/fgstmt.cpp @@ -458,7 +458,7 @@ void Compiler::fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isU } #endif // DEBUG - if (opts.compDbgCode && stmt->GetPrevStmt() != stmt && stmt->GetDebugInfo().IsValid()) + if (opts.DbgCode() && stmt->GetPrevStmt() != stmt && stmt->GetDebugInfo().IsValid()) { /* TODO: For debuggable code, should we remove significant statement boundaries. Or should we leave a GT_NO_OP in its place? */ @@ -546,7 +546,7 @@ inline bool OperIsControlFlow(genTreeOps oper) bool Compiler::fgCheckRemoveStmt(BasicBlock* block, Statement* stmt) { - if (opts.compDbgCode) + if (opts.DbgCode()) { return false; } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 0bad2bc849a220..ebd6780803a7b0 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -453,7 +453,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) bool Compiler::fgCanSwitchToOptimized() { bool result = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && - !opts.compDbgCode && !compIsForInlining(); + !opts.DbgCode() && !compIsForInlining(); if (result) { // Ensure that it would be safe to change the opt level @@ -2303,7 +2303,7 @@ class MergedReturns // Do not look for mergeable constant returns in debug codegen as // we may lose track of sequence points. - if ((returnBlock != nullptr) && (maxReturns > 1) && !comp->opts.compDbgCode) + if ((returnBlock != nullptr) && (maxReturns > 1) && !comp->opts.DbgCode()) { // Check to see if this is a constant return so that we can search // for and/or create a constant return block for it. @@ -2695,7 +2695,7 @@ PhaseStatus Compiler::fgAddInternal() CORINFO_JUST_MY_CODE_HANDLE* pDbgHandle = nullptr; CORINFO_JUST_MY_CODE_HANDLE dbgHandle = nullptr; - if (opts.compDbgCode && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) + if (opts.DbgCode() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { dbgHandle = info.compCompHnd->getJustMyCodeHandle(info.compMethodHnd, &pDbgHandle); } diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index dd1dc0e70d371e..5202542005b382 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -7478,7 +7478,7 @@ GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType, // Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and // codegen will pass DebugInfo() to emitter, which will cause emitter // not to emit IP mapping entry. - if (opts.compDbgCode && opts.compDbgInfo && di.IsValid()) + if (opts.DbgCode() && opts.compDbgInfo && di.IsValid()) { // Managed Retval - IL offset of the call. This offset is used to emit a // CALL_INSTRUCTION type sequence point while emitting corresponding native call. diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 4b556fa9634697..065dfecc3dd3ed 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2040,7 +2040,7 @@ bool Compiler::impSpillStackEntry(unsigned level, void Compiler::impSpillStackEnsure(bool spillLeaves) { - assert(!spillLeaves || opts.compDbgCode); + assert(!spillLeaves || opts.DbgCode()); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { @@ -2447,7 +2447,7 @@ void Compiler::impNoteLastILoffs() void Compiler::impNoteBranchOffs() { - if (opts.compDbgCode) + if (opts.DbgCode()) { impAppendTree(gtNewNothingNode(), CHECK_SPILL_NONE, impCurStmtDI); } @@ -6379,7 +6379,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); - if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) + if (verCurrentState.esStackDepth != 0 && opts.DbgCode()) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form @@ -6390,7 +6390,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Have we reported debug info for any tree? - if (impCurStmtDI.IsValid() && opts.compDbgCode) + if (impCurStmtDI.IsValid() && opts.DbgCode()) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, CHECK_SPILL_NONE, impCurStmtDI); @@ -6454,7 +6454,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { impCurStmtOffsSet(opcodeOffs); } - else if (opts.compDbgCode) + else if (opts.DbgCode()) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); @@ -6462,7 +6462,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { - if (opts.compDbgCode) + if (opts.DbgCode()) { impSpillStackEnsure(true); } @@ -6816,7 +6816,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { - if (opts.compDbgCode) + if (opts.DbgCode()) { op1 = gtNewNothingNode(); goto SPILL_APPEND; @@ -7624,8 +7624,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks - unreachable under compDbgCode */ - assert(!opts.compDbgCode); + unreachable under DbgCode() */ + assert(!opts.DbgCode()); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case @@ -7655,7 +7655,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ - if (opts.compDbgCode && impCurStmtDI.IsValid()) + if (opts.DbgCode() && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } @@ -8143,7 +8143,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Does the value have any side effects? */ - if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) + if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.DbgCode()) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. @@ -10700,7 +10700,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto SPILL_APPEND; case CEE_NOP: - if (opts.compDbgCode) + if (opts.DbgCode()) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 5e35d39aa7de67..f536f226c10596 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -4391,7 +4391,7 @@ GenTree* Compiler::impTransformThis(GenTree* thisPtr, bool Compiler::impCanPInvokeInline() { - return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && + return getInlinePInvokeEnabled() && (!opts.DbgCode()) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } @@ -5317,7 +5317,7 @@ void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method - if (opts.compDbgCode) + if (opts.DbgCode()) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index ad958d63f89091..213ac74eae0cb1 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -849,7 +849,7 @@ void Compiler::fgExtendDbgLifetimes() } #endif // DEBUG - noway_assert(opts.compDbgCode && (info.compVarScopesCount > 0)); + noway_assert(opts.DbgCode() && (info.compVarScopesCount > 0)); /*------------------------------------------------------------------------- * Extend the lifetimes over the entire reported scope of the variable. @@ -871,7 +871,7 @@ void Compiler::fgExtendDbgLifetimes() fgLiveVarAnalysis(true); - /* For compDbgCode, we prepend an empty BB which will hold the + /* For DbgCode(), we prepend an empty BB which will hold the initializations of variables which are in scope at IL offset 0 (but not initialized by the IL code). Since they will currently be marked as live on entry to fgFirstBB, unmark the liveness so that @@ -1418,7 +1418,7 @@ class LiveVarAnalysis /* Only update BBF_INTERNAL blocks as they may be syntactically out of sequence. */ - noway_assert(m_compiler->opts.compDbgCode && (m_compiler->info.compVarScopesCount > 0)); + noway_assert(m_compiler->opts.DbgCode() && (m_compiler->info.compVarScopesCount > 0)); if (!(block->bbFlags & BBF_INTERNAL)) { @@ -2651,7 +2651,7 @@ void Compiler::fgInterBlockLocalVarLiveness() * reported scope, so that it will be visible over the entire scope */ - if (opts.compDbgCode && (info.compVarScopesCount > 0)) + if (opts.DbgCode() && (info.compVarScopesCount > 0)) { fgExtendDbgLifetimes(); } diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 659d630f8d326b..f036a532cc8836 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -1533,7 +1533,7 @@ bool LinearScan::isRegCandidate(LclVarDsc* varDsc) { case TYP_FLOAT: case TYP_DOUBLE: - return !compiler->opts.compDbgCode; + return !compiler->opts.DbgCode(); case TYP_INT: case TYP_LONG: diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index 3908f1998792a9..63cdc4789d047f 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -2179,7 +2179,7 @@ void LinearScan::buildIntervals() // Use lvRefCnt instead of checking bbLiveIn because if it's volatile we // won't have done dataflow on it, but it needs to be marked as live-in so // it will get saved in the prolog. - if (!compiler->compJmpOpUsed && argDsc->lvRefCnt() == 0 && !compiler->opts.compDbgCode) + if (!compiler->compJmpOpUsed && argDsc->lvRefCnt() == 0 && !compiler->opts.DbgCode()) { continue; } @@ -2226,7 +2226,7 @@ void LinearScan::buildIntervals() { // We can overwrite the register (i.e. codegen saves it on entry) assert(argDsc->lvRefCnt() == 0 || !argDsc->lvIsRegArg || argDsc->lvDoNotEnregister || - !argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode)); + !argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.DbgCode())); } } diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index c16730d655a20c..762571864b9f7b 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -968,7 +968,7 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call) // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // - if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && comp->opts.compDbgCode) + if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && comp->opts.DbgCode()) { exceptionFlags = comp->gtCollectExceptions(argx); if ((exceptionFlags & (ExceptionSetFlags::IndexOutOfRangeException | @@ -4297,7 +4297,7 @@ BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) delay = false; } - if (!opts.compDbgCode) + if (!opts.DbgCode()) { if (!delay && !compIsForInlining()) { @@ -13614,7 +13614,7 @@ bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(cons // The rest of block has been removed and we will always throw an exception. // - // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. + // For DbgCode(), we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { @@ -14074,7 +14074,7 @@ void Compiler::fgSetOptions() } #endif - if (opts.compDbgCode) + if (opts.DbgCode()) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... diff --git a/src/coreclr/jit/regalloc.cpp b/src/coreclr/jit/regalloc.cpp index aa45c85a070d9f..7b13de83f9cf18 100644 --- a/src/coreclr/jit/regalloc.cpp +++ b/src/coreclr/jit/regalloc.cpp @@ -333,7 +333,7 @@ void Compiler::raMarkStkVars() an issue as fgExtendDbgLifetimes() adds an initialization and variables in scope will not have a zero ref-cnt. */ - if (opts.compDbgCode && !varDsc->lvIsParam && varDsc->lvTracked) + if (opts.DbgCode() && !varDsc->lvIsParam && varDsc->lvTracked) { for (unsigned scopeNum = 0; scopeNum < info.compVarScopesCount; scopeNum++) { @@ -347,7 +347,7 @@ void Compiler::raMarkStkVars() So we set lvMustInit and verify it has a nonzero ref-cnt. */ - if (opts.compDbgCode && !stkFixedArgInVarArgs && lclNum < info.compLocalsCount) + if (opts.DbgCode() && !stkFixedArgInVarArgs && lclNum < info.compLocalsCount) { if (varDsc->lvRefCnt() == 0) { diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index 97df205af41bb3..091f8d522af2d8 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -775,7 +775,7 @@ void CodeGen::siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned in LclVarDsc* lclVarDsc = compiler->lvaGetDesc(varScope->vsdVarNum); // Only report locals that were referenced, if we're not doing debug codegen - if (compiler->opts.compDbgCode || (lclVarDsc->lvRefCnt() > 0)) + if (compiler->opts.DbgCode() || (lclVarDsc->lvRefCnt() > 0)) { // brace-matching editor workaround for following line: ( JITDUMP("Scope info: opening scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg,