diff --git a/eng/pipelines/common/templates/runtimes/run-test-job.yml b/eng/pipelines/common/templates/runtimes/run-test-job.yml
index 04399d26a6a22e..214b56547b7aca 100644
--- a/eng/pipelines/common/templates/runtimes/run-test-job.yml
+++ b/eng/pipelines/common/templates/runtimes/run-test-job.yml
@@ -546,6 +546,7 @@ jobs:
- jitosr
- jitosr_stress
- jitosr_pgo
+ - jitosr_stress_random
- jitpartialcompilation
- jitpartialcompilation_osr
- jitpartialcompilation_osr_pgo
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 0c479168f4fee4..39368081e4b2b2 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -6235,15 +6235,9 @@ void CodeGen::genEnregisterOSRArgsAndLocals()
// This local was part of the live tier0 state and is enregistered in the
// OSR method. Initialize the register from the right frame slot.
//
- // We currently don't expect to see enregistered multi-reg args in OSR methods,
- // as struct promotion is disabled. So any struct arg just uses the location
- // on the tier0 frame.
- //
// If we ever enable promotion we'll need to generalize what follows to copy each
// field from the tier0 frame to its OSR home.
//
- assert(!varDsc->lvIsMultiRegArg);
-
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
// This arg or local is not live at entry to the OSR method.
@@ -7542,6 +7536,16 @@ void CodeGen::genFnProlog()
#endif // PROFILING_SUPPORTED
+ // For OSR we may have a zero-length prolog. That's not supported
+ // when the method must report a generics context,/ so add a nop if so.
+ //
+ if (compiler->opts.IsOSR() && (GetEmitter()->emitGetPrologOffsetEstimate() == 0) &&
+ (compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis()))
+ {
+ JITDUMP("OSR: prolog was zero length and has generic context to report: adding nop to pad prolog.\n");
+ instGen(INS_nop);
+ }
+
if (!GetInterruptible())
{
// The 'real' prolog ends here for non-interruptible methods.
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index 394aca1719aeb5..d62718ff3c8ced 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -2786,15 +2786,19 @@ void Compiler::compInitOptions(JitFlags* jitFlags)
verboseDump = (JitConfig.JitDumpTier0() > 0);
}
- // Optionally suppress dumping some OSR jit requests.
+ // Optionally suppress dumping except for a specific OSR jit request.
//
- if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_OSR))
- {
- const int desiredOffset = JitConfig.JitDumpAtOSROffset();
+ const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset();
- if (desiredOffset != -1)
+ if (verboseDump && (dumpAtOSROffset != -1))
+ {
+ if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR))
+ {
+ verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry);
+ }
+ else
{
- verboseDump = (((IL_OFFSET)desiredOffset) == info.compILEntry);
+ verboseDump = false;
}
}
diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp
index 7fd7b5d5625636..3c9913f106d69a 100644
--- a/src/coreclr/jit/fgdiagnostic.cpp
+++ b/src/coreclr/jit/fgdiagnostic.cpp
@@ -2151,11 +2151,11 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 *
{
if (block == fgEntryBB)
{
- printf("original-entry");
+ printf(" original-entry");
}
if (block == fgOSREntryBB)
{
- printf("osr-entry");
+ printf(" osr-entry");
}
}
diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp
index f94cb8615be404..3295111d7edb56 100644
--- a/src/coreclr/jit/fgprofile.cpp
+++ b/src/coreclr/jit/fgprofile.cpp
@@ -1721,12 +1721,23 @@ PhaseStatus Compiler::fgPrepareToInstrumentMethod()
// jitting on PGO. If we ever implement a broader pattern of deferral -- say deferring
// based on static PGO -- we will need to reconsider.
//
+ // Under OSR stress we may add patchpoints even without backedges. So we also
+ // need to change the PGO instrumetation approach if OSR stress is enabled.
+ //
CLANG_FORMAT_COMMENT_ANCHOR;
+#if defined(DEBUG)
+ const bool mayHaveStressPatchpoints =
+ (JitConfig.JitOffsetOnStackReplacement() >= 0) || (JitConfig.JitRandomOnStackReplacement() > 0);
+#else
+ const bool mayHaveStressPatchpoints = false;
+#endif
+
+ const bool mayHavePatchpoints =
+ (JitConfig.TC_OnStackReplacement() > 0) && (compHasBackwardJump || mayHaveStressPatchpoints);
const bool prejit = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT);
- const bool tier0WithPatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) &&
- (JitConfig.TC_OnStackReplacement() > 0) && compHasBackwardJump;
- const bool osrMethod = opts.IsOSR();
+ const bool tier0WithPatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && mayHavePatchpoints;
+ const bool osrMethod = opts.IsOSR();
const bool useEdgeProfiles = (JitConfig.JitEdgeProfiling() > 0) && !prejit && !tier0WithPatchpoints && !osrMethod;
if (useEdgeProfiles)
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 33e5443c1eee4a..8a2ba996ede397 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -11792,28 +11792,30 @@ void Compiler::impImportBlockCode(BasicBlock* block)
#ifdef FEATURE_ON_STACK_REPLACEMENT
- // Are there any places in the method where we might add a patchpoint?
+ // Is OSR enabled?
//
- if (compHasBackwardJump)
+ if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
{
- // Is OSR enabled?
+ // We don't inline at Tier0, if we do, we may need rethink our approach.
+ // Could probably support inlines that don't introduce flow.
//
- if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
+ assert(!compIsForInlining());
+
+ // OSR is not yet supported for methods with explicit tail calls.
+ //
+ // But we also do not have to switch these methods to be optimized as we should be
+ // able to avoid getting trapped in Tier0 code by normal call counting.
+ // So instead, just suppress adding patchpoints.
+ //
+ if (!compTailPrefixSeen)
{
- // OSR is not yet supported for methods with explicit tail calls.
- //
- // But we also may not switch methods to be optimized as we should be
- // able to avoid getting trapped in Tier0 code by normal call counting.
- // So instead, just suppress adding patchpoints.
+ assert(compCanHavePatchpoints());
+
+ // The normaly policy is only to add patchpoints to the targets of lexically
+ // backwards branches.
//
- if (!compTailPrefixSeen)
+ if (compHasBackwardJump)
{
- assert(compCanHavePatchpoints());
-
- // We don't inline at Tier0, if we do, we may need rethink our approach.
- // Could probably support inlines that don't introduce flow.
- assert(!compIsForInlining());
-
// Is the start of this block a suitable patchpoint?
//
if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0))
@@ -11826,12 +11828,64 @@ void Compiler::impImportBlockCode(BasicBlock* block)
setMethodHasPatchpoint();
}
}
+ else
+ {
+ // Should not see backward branch targets w/o backwards branches
+ assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0);
+ }
}
- }
- else
- {
- // Should not see backward branch targets w/o backwards branches
- assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0);
+
+#ifdef DEBUG
+ // As a stress test, we can place patchpoints at the start of any block
+ // that is a stack empty point and is not within a handler.
+ //
+ // Todo: enable for mid-block stack empty points too.
+ //
+ const int offsetOSR = JitConfig.JitOffsetOnStackReplacement();
+ const int randomOSR = JitConfig.JitRandomOnStackReplacement();
+ const bool tryOffsetOSR = offsetOSR >= 0;
+ const bool tryRandomOSR = randomOSR > 0;
+
+ if ((tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() &&
+ ((block->bbFlags & BBF_PATCHPOINT) == 0))
+ {
+ // Block start can have a patchpoint. See if we should add one.
+ //
+ bool addPatchpoint = false;
+
+ // Specific offset?
+ //
+ if (tryOffsetOSR)
+ {
+ if (impCurOpcOffs == (unsigned)offsetOSR)
+ {
+ addPatchpoint = true;
+ }
+ }
+ // Random?
+ //
+ else
+ {
+ // Reuse the random inliner's random state.
+ // Note m_inlineStrategy is always created, even if we're not inlining.
+ //
+ CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR);
+ const int randomValue = (int)random->Next(100);
+
+ addPatchpoint = (randomValue < randomOSR);
+ }
+
+ if (addPatchpoint)
+ {
+ block->bbFlags |= BBF_PATCHPOINT;
+ setMethodHasPatchpoint();
+ }
+
+ JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random",
+ addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs);
+ }
+
+#endif // DEBUG
}
// Mark stack-empty rare blocks to be considered for partial compilation.
diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h
index 73b71a07a8c492..615b520c5b2c98 100644
--- a/src/coreclr/jit/jitconfigvalues.h
+++ b/src/coreclr/jit/jitconfigvalues.h
@@ -521,6 +521,12 @@ CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0)
CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000)
// Enable partial compilation for Tier0 methods
CONFIG_INTEGER(TC_PartialCompilation, W("TC_PartialCompilation"), 0)
+#if defined(DEBUG)
+// Randomly sprinkle patchpoints. Value is the likelyhood any given stack-empty point becomes a patchpoint.
+CONFIG_INTEGER(JitRandomOnStackReplacement, W("JitRandomOnStackReplacement"), 0)
+// Place patchpoint at the specified IL offset, if possible. Overrides random placement.
+CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1)
+#endif // debug
#if defined(DEBUG)
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // Enable osr for only some methods
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 7b329e0a86a461..eb76cd663f287b 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -305,7 +305,9 @@ void Compiler::lvaInitTypeRef()
JITDUMP("-- V%02u is OSR exposed\n", varNum);
varDsc->lvHasLdAddrOp = 1;
- if (varDsc->lvType != TYP_STRUCT) // Why does it apply only to non-structs?
+ // todo: Why does it apply only to non-structs?
+ //
+ if (!varTypeIsStruct(varDsc) && !varTypeIsSIMD(varDsc))
{
lvaSetVarAddrExposed(varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED));
}
diff --git a/src/tests/Common/testenvironment.proj b/src/tests/Common/testenvironment.proj
index c8d6e0d31e59e9..aae081a1871265 100644
--- a/src/tests/Common/testenvironment.proj
+++ b/src/tests/Common/testenvironment.proj
@@ -62,6 +62,7 @@
COMPlus_JitEdgeProfiling;
COMPlus_JitRandomGuardedDevirtualization;
COMPlus_JitRandomEdgeCounts;
+ COMPlus_JitRandomOnStackReplacement;
RunningIlasmRoundTrip
@@ -187,6 +188,7 @@
+