diff --git a/THIRD-PARTY-NOTICES.TXT b/THIRD-PARTY-NOTICES.TXT
index 53855810b5af9f..4b40333f72ce4f 100644
--- a/THIRD-PARTY-NOTICES.TXT
+++ b/THIRD-PARTY-NOTICES.TXT
@@ -480,8 +480,8 @@ Foundation, Inc., Hewlett-Packard Company, Microsoft, nor Digital
Equipment Corporation makes any representations about the
suitability of this software for any purpose."
-License notice for The LLVM Compiler Infrastructure
----------------------------------------------------
+License notice for The LLVM Compiler Infrastructure (Legacy License)
+--------------------------------------------------------------------
Developed by:
@@ -986,8 +986,8 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-License for remote stack unwind (https://github.com/llvm/llvm-project/blob/main/lldb/source/Symbol/CompactUnwindInfo.cpp)
---------------------------------------
+License notice for The LLVM Project
+-----------------------------------
Copyright 2019 LLVM Project
diff --git a/docs/coding-guidelines/libraries-packaging.md b/docs/coding-guidelines/libraries-packaging.md
index 25a2894cfe9ae5..f7c926890bf8e8 100644
--- a/docs/coding-guidelines/libraries-packaging.md
+++ b/docs/coding-guidelines/libraries-packaging.md
@@ -55,11 +55,47 @@ Packages can include a Markdown Readme file with a short usage documentation. To
The package Readme is displayed on the package details page on [NuGet gallery](https://nuget.org/). You can include the following content in it:
- A description of the package purpose.
+- A list of package key features
+- A code example that demostrates how to use the package.
- Information when package should be used. For example, if the library is included in the shared framework in .NET, but needs to be installed via NuGet on .NET Framework, it should be mentioned.
-- Information on how to get started with the package.
-- Links to related documentation.
- A list of common entry-point types for the package, with links to their API docs under [.NET API Browser](https://learn.microsoft.com/dotnet/api/).
-- A short code example that demostrates the package usage.
+- Links to related documentation.
+- Information about how to provide feedback on the package and contribute to it.
+
+Use the following Markdown template for a package Readme:
+
+```
+## About
+
+
+
+## Key Features
+
+
+
+## How to Use
+
+
+
+## Main Types
+
+
+
+## Additional Documentation
+
+* [Conceptual documentation](...)
+* [API documentation](...)
+
+## Related Packages
+
+
+
+## Feedback & Contributing
+
+
+
+ExamplePackage is released as open source under the [MIT license](https://licenses.nuget.org/MIT). Bug reports and contributions are welcome at [the GitHub repository](https://github.com/dotnet/runtime).
+```
For a list of supported Markdown features, see [NuGet documentation](https://learn.microsoft.com/nuget/nuget-org/package-readme-on-nuget-org#supported-markdown-features).
diff --git a/docs/project/list-of-diagnostics.md b/docs/project/list-of-diagnostics.md
index 4f78e9e711653d..4b46d49f5813f0 100644
--- a/docs/project/list-of-diagnostics.md
+++ b/docs/project/list-of-diagnostics.md
@@ -142,10 +142,10 @@ The diagnostic id values reserved for .NET Libraries analyzer warnings are `SYSL
| __`SYSLIB1023`__ | Generating more than 6 arguments is not supported |
| __`SYSLIB1024`__ | Argument is using the unsupported out parameter modifier |
| __`SYSLIB1025`__ | Multiple logging methods cannot use the same event name within a class |
-| __`SYSLIB1026`__ | _`SYSLIB1026`-`SYSLIB1029` reserved for logging._ |
-| __`SYSLIB1027`__ | _`SYSLIB1026`-`SYSLIB1029` reserved for logging._ |
-| __`SYSLIB1028`__ | _`SYSLIB1026`-`SYSLIB1029` reserved for logging._ |
-| __`SYSLIB1029`__ | _`SYSLIB1026`-`SYSLIB1029` reserved for logging._ |
+| __`SYSLIB1026`__ | C# language version not supported by the logging source generator. |
+| __`SYSLIB1027`__ | _`SYSLIB1001`-`SYSLIB1029` reserved for logging._ |
+| __`SYSLIB1028`__ | _`SYSLIB1001`-`SYSLIB1029` reserved for logging._ |
+| __`SYSLIB1029`__ | _`SYSLIB1001`-`SYSLIB1029` reserved for logging._ |
| __`SYSLIB1030`__ | JsonSourceGenerator did not generate serialization metadata for type |
| __`SYSLIB1031`__ | JsonSourceGenerator encountered a duplicate JsonTypeInfo property name |
| __`SYSLIB1032`__ | JsonSourceGenerator encountered a context class that is not partial |
@@ -208,7 +208,7 @@ The diagnostic id values reserved for .NET Libraries analyzer warnings are `SYSL
| __`SYSLIB1089`__ | _`SYSLIB1070`-`SYSLIB1089` reserved for System.Runtime.InteropServices.JavaScript.JSImportGenerator._ |
| __`SYSLIB1090`__ | Invalid 'GeneratedComInterfaceAttribute' usage |
| __`SYSLIB1091`__ | Method is declared in different partial declaration than the 'GeneratedComInterface' attribute. |
-| __`SYSLIB1092`__ | 'GenerateComInterfaceAttribute' usage not recommended. See aka.ms/GeneratedComInterfaceUsage for recommended usage. |
+| __`SYSLIB1092`__ | Usage of '[LibraryImport|GeneratedComInterface]' does not follow recommendation. See aka.ms/[LibraryImport|GeneratedComInterface]Usage for best practices. |
| __`SYSLIB1093`__ | Analysis for COM interface generation has failed |
| __`SYSLIB1094`__ | The base COM interface failed to generate source. Code will not be generated for this interface. |
| __`SYSLIB1095`__ | Invalid 'GeneratedComClassAttribute' usage |
@@ -250,7 +250,7 @@ The diagnostic id values reserved for .NET Libraries analyzer warnings are `SYSL
| __`SYSLIB1213`__ | Options validation generator: Member potentially missing enumerable validation. |
| __`SYSLIB1214`__ | Options validation generator: Can't validate constants, static fields or properties. |
| __`SYSLIB1215`__ | Options validation generator: Validation attribute on the member is inaccessible from the validator type. |
-| __`SYSLIB1216`__ | *_`SYSLIB1201`-`SYSLIB1219` reserved for Microsoft.Extensions.Options.SourceGeneration.* |
+| __`SYSLIB1216`__ | C# language version not supported by the options validation source generator. |
| __`SYSLIB1217`__ | *_`SYSLIB1201`-`SYSLIB1219` reserved for Microsoft.Extensions.Options.SourceGeneration.* |
| __`SYSLIB1218`__ | *_`SYSLIB1201`-`SYSLIB1219` reserved for Microsoft.Extensions.Options.SourceGeneration.* |
| __`SYSLIB1219`__ | *_`SYSLIB1201`-`SYSLIB1219` reserved for Microsoft.Extensions.Options.SourceGeneration.* |
@@ -270,3 +270,5 @@ The diagnostic id values reserved for .NET Libraries analyzer warnings are `SYSL
| Suppression ID | Suppressed Diagnostic ID | Description |
| :----------------------- | :----------------------- | :---------- |
| __`SYSLIBSUPPRESS0001`__ | CA1822 | Do not offer to make methods static when the methods need to be instance methods for a custom marshaller shape. |
+| __`SYSLIBSUPPRESS0002`__ | IL2026 | ConfigurationBindingGenerator: suppress RequiresUnreferencedCode diagnostic for binding call that has been intercepted by a generated static variant. |
+| __`SYSLIBSUPPRESS0003`__ | IL3050 | ConfigurationBindingGenerator: suppress RequiresDynamicCode diagnostic for binding call that has been intercepted by a generated static variant. |
diff --git a/docs/workflow/trimming/feature-switches.md b/docs/workflow/trimming/feature-switches.md
index 87d8fa4c5ec425..635187684116b5 100644
--- a/docs/workflow/trimming/feature-switches.md
+++ b/docs/workflow/trimming/feature-switches.md
@@ -13,6 +13,7 @@ configurations but their defaults might vary as any SDK can set the defaults dif
| EnableUnsafeBinaryFormatterSerialization | System.Runtime.Serialization.EnableUnsafeBinaryFormatterSerialization | BinaryFormatter serialization support is trimmed when set to false |
| EventSourceSupport | System.Diagnostics.Tracing.EventSource.IsSupported | Any EventSource related code or logic is trimmed when set to false |
| InvariantGlobalization | System.Globalization.Invariant | All globalization specific code and data is trimmed when set to true |
+| MetricsSupport | System.Diagnostics.Metrics.Meter.IsSupported | Any Metrics related code or logic is trimmed when set to false |
| PredefinedCulturesOnly | System.Globalization.PredefinedCulturesOnly | Don't allow creating a culture for which the platform does not have data |
| HybridGlobalization | System.Globalization.Hybrid | Properties connected with the mixed: platform-specific + icu-based globalization will be trimmed |
| UseSystemResourceKeys | System.Resources.UseSystemResourceKeys | Any localizable resources for system assemblies is trimmed when set to true |
@@ -27,6 +28,7 @@ configurations but their defaults might vary as any SDK can set the defaults dif
| MetadataUpdaterSupport | System.Reflection.Metadata.MetadataUpdater.IsSupported | Metadata update related code to be trimmed when set to false |
| _EnableConsumingManagedCodeFromNativeHosting | System.Runtime.InteropServices.EnableConsumingManagedCodeFromNativeHosting | Getting a managed function from native hosting is disabled when set to false and related functionality can be trimmed. |
| VerifyDependencyInjectionOpenGenericServiceTrimmability | Microsoft.Extensions.DependencyInjection.VerifyOpenGenericServiceTrimmability | When set to true, DependencyInjection will verify trimming annotations applied to open generic services are correct |
+| DisableDependencyInjectionDynamicEngine | Microsoft.Extensions.DependencyInjection.DisableDynamicEngine | When set to true, DependencyInjection will avoid using System.Reflection.Emit when realizing services |
| NullabilityInfoContextSupport | System.Reflection.NullabilityInfoContext.IsSupported | Nullable attributes can be trimmed when set to false |
| DynamicCodeSupport | System.Runtime.CompilerServices.RuntimeFeature.IsDynamicCodeSupported | Changes RuntimeFeature.IsDynamicCodeSupported to false to allow testing AOT-safe fallback code without publishing for Native AOT. |
| _AggressiveAttributeTrimming | System.AggressiveAttributeTrimming | When set to true, aggressively trims attributes to allow for the most size savings possible, even if it could result in runtime behavior changes |
diff --git a/eng/SourceBuildPrebuiltBaseline.xml b/eng/SourceBuildPrebuiltBaseline.xml
index 74f6be96543a5e..458b2d756cba9a 100644
--- a/eng/SourceBuildPrebuiltBaseline.xml
+++ b/eng/SourceBuildPrebuiltBaseline.xml
@@ -10,12 +10,18 @@
+
-
+
+
+
+
diff --git a/eng/Subsets.props b/eng/Subsets.props
index 77268ffa7b5d09..eb4151f3a185d6 100644
--- a/eng/Subsets.props
+++ b/eng/Subsets.props
@@ -502,7 +502,7 @@
-
+
@@ -512,7 +512,7 @@
-
+
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index 76bf5019cd8ed2..9c18388aa5346d 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -1,80 +1,80 @@
-
+
https://github.com/dotnet/icu
- 92124838d3f0efde3ac483a904691a611babb9a0
+ ac7697a28716e68e986d3db72cbc10324f8961fe
-
+
https://github.com/dotnet/msquic
- a880e93af4e50d19110d228e698900c110e2b0e9
+ bbb1252b31e3a194be3163982d972e4583c75476
https://github.com/dotnet/wcf
7f504aabb1988e9a093c1e74d8040bd52feb2f01
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
https://github.com/dotnet/command-line-api
@@ -85,209 +85,209 @@
02fe27cd6a9b001c8feb7938e6ef4b3799745759b
-
+
https://github.com/dotnet/cecil
- 2f4ef297939628143389ddeea569874ded0b1c1b
+ 64a8874f3c485657e732ca56a5f24e2095740103
-
+
https://github.com/dotnet/emsdk
- abfa03c97f4175d4d209435cd0e71f558e36c3fd
+ ae4eaab4a9415d7f87ca7c6dc0b41ea482fa6337
-
+
https://github.com/dotnet/source-build-reference-packages
- 5a1492557c8717b428b69fd4b7ca8c91d5d18cd3
+ 7b55da982fc6e71c1776c4de89111aee0eecb45a
-
+
https://github.com/dotnet/source-build-externals
- de4dda48d0cf31e13182bc24107b2246c61ed483
+ ed17956dbc31097b7ba6a66be086f4a70a97d84f
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/xliff-tasks
- 493329204079519072f0241ed26f692bdee0d60c
+ 194f32828726c3f1f63f79f3dc09b9e99c157b11
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
-
+
https://github.com/dotnet/llvm-project
- 9b77c16a6061fb1160ec12bd307badb4c58dff98
+ 2e6bfc3d59a6c80e3fa90a703e23bd4dab707756
https://github.com/dotnet/runtime
@@ -330,67 +330,67 @@
https://github.com/dotnet/xharness
480b9159eb7e69b182a87581d5a336e97e0b6dae
-
+
https://github.com/dotnet/arcade
- 9b2af35a6702526dc8a7c5fcadcc44efd0dca170
+ 1d451c32dda2314c721adbf8829e1c0cd4e681ff
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- 068998a5d91f55a619d1d072ab3094dacd5d6a4f
+ 492f7464d31d9599531fab2a67bc2422046f5133
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- 068998a5d91f55a619d1d072ab3094dacd5d6a4f
+ 492f7464d31d9599531fab2a67bc2422046f5133
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- 068998a5d91f55a619d1d072ab3094dacd5d6a4f
+ 492f7464d31d9599531fab2a67bc2422046f5133
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- 068998a5d91f55a619d1d072ab3094dacd5d6a4f
+ 492f7464d31d9599531fab2a67bc2422046f5133
-
+
https://github.com/dotnet/hotreload-utils
- 696312fd2a60671797b12311a4cf387d3cd14dd0
+ 94979ba9c2904e39ddb9aa255a288caacd6ce166
-
+
https://github.com/dotnet/runtime-assets
- 48270e734aa881c737b80c4fe0459e68aaf08ad6
+ 99168dcff56809205e7ef8530d1256f3a07fab1f
-
+
https://github.com/dotnet/roslyn
- 1fd4ff9d594b227baa3fc0962e2251323311ec19
+ 0d735148bbb4cb511be547fbc1db63a2c81a821d
-
+
https://github.com/dotnet/roslyn
- 1fd4ff9d594b227baa3fc0962e2251323311ec19
+ 0d735148bbb4cb511be547fbc1db63a2c81a821d
-
+
https://github.com/dotnet/roslyn
- 1fd4ff9d594b227baa3fc0962e2251323311ec19
+ 0d735148bbb4cb511be547fbc1db63a2c81a821d
-
+
https://github.com/dotnet/roslyn-analyzers
- 755a4f888d64fc7c0f2802adca731f301a53283d
+ 4a7701fd72094614897b33e4cb1d9640c221d862
-
+
https://github.com/dotnet/roslyn-analyzers
- 755a4f888d64fc7c0f2802adca731f301a53283d
+ 4a7701fd72094614897b33e4cb1d9640c221d862
https://github.com/dotnet/sdk
d10b02ae5cc670609d920a672985ed4456bdd6b6
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- 068998a5d91f55a619d1d072ab3094dacd5d6a4f
+ 492f7464d31d9599531fab2a67bc2422046f5133
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- 068998a5d91f55a619d1d072ab3094dacd5d6a4f
+ 492f7464d31d9599531fab2a67bc2422046f5133
@@ -398,5 +398,9 @@
https://github.com/NuGet/NuGet.Client
8fef55f5a55a3b4f2c96cd1a9b5ddc51d4b927f8
+
+ https://github.com/dotnet/installer
+ 46a7370763921ded24dcb70c585ee97883c615d4
+
diff --git a/eng/Versions.props b/eng/Versions.props
index 3cccff5761371c..ef9555ba255cbe 100644
--- a/eng/Versions.props
+++ b/eng/Versions.props
@@ -7,18 +7,22 @@
0
0
8.0.100
- 7.0.8
+ 7.0.12
6.0.$([MSBuild]::Add($([System.Version]::Parse('$(PackageVersionNet7)').Build),11))
- rc
- 1
- -$(PreReleaseVersionLabel).$(PreReleaseVersionIteration)
+ rtm
+
+
+
+ false
+ release
+ -$(PreReleaseVersionLabel)
+ -$(PreReleaseVersionLabel).$(PreReleaseVersionIteration)
$(SdkBandVersion)$(WorkloadVersionSuffix)
+
+ false
$(MajorVersion).$(MinorVersion).0.0
-
- false
- release
true
false
@@ -32,17 +36,17 @@
- 3.11.0-beta1.23412.1
- 8.0.0-preview.23412.1
+ 3.11.0-beta1.23472.1
+ 8.0.0-preview.23472.1
- 4.8.0-1.23408.8
- 4.8.0-1.23408.8
- 4.8.0-1.23408.8
+ 4.8.0-3.23501.1
+ 4.8.0-3.23501.1
+ 4.8.0-3.23501.1
8.0.100-preview.7.23329.3
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 2.5.1-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
- 8.0.0-beta.23411.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 2.5.1-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
+ 8.0.0-beta.23463.1
6.0.0-preview.1.102
@@ -106,14 +110,14 @@
8.0.0-rc.1.23406.6
8.0.0-preview.7.23325.2
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
6.0.0
1.1.1
@@ -139,29 +143,29 @@
4.5.0
8.0.0-rc.1.23406.6
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
- 8.0.0-beta.23408.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
+ 8.0.0-beta.23421.1
- 1.0.0-prerelease.23362.5
- 1.0.0-prerelease.23362.5
- 1.0.0-prerelease.23362.5
- 1.0.0-prerelease.23362.5
- 1.0.0-prerelease.23362.5
- 1.0.0-prerelease.23362.5
+ 1.0.0-prerelease.23478.3
+ 1.0.0-prerelease.23478.3
+ 1.0.0-prerelease.23478.3
+ 1.0.0-prerelease.23478.3
+ 1.0.0-prerelease.23478.3
+ 1.0.0-prerelease.23478.3
- 16.11.27-beta1.23180.1
+ 16.11.29-beta1.23404.4
2.0.0-beta4.23307.1
3.0.3
2.1.0
@@ -182,14 +186,14 @@
8.0.0-prerelease.23407.2
8.0.0-prerelease.23407.2
8.0.0-prerelease.23407.2
- 8.0.0-alpha.0.23407.2
+ 8.0.0-alpha.0.23502.1
2.4.2
1.0.0
2.4.5
3.12.0
4.1.0
6.0.0
- 13.0.1
+ 13.0.3
1.0.2
2.0.4
4.18.4
@@ -203,57 +207,58 @@
2.46.3
2.45.0
2.45.0
-
-
1.1.2-beta1.23323.1
- 7.0.0-preview-20221010.1
+ 8.0.0-preview-20230918.1
8.0.0-rc.1.23406.6
- 0.11.4-alpha.23407.2
+ 0.11.4-alpha.23502.1
8.0.0-rc.1.23406.6
- 8.0.0-rc.1.23407.2
+ 8.0.0-rc.2.23468.2
2.2.2
- 8.0.0-alpha.1.23180.2
+ 8.0.0-alpha.1.23468.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
- 8.0.0-rc.1.23411.2
+ 8.0.0-rtm.23477.1
$(MicrosoftNETWorkloadEmscriptenCurrentManifest80100TransportVersion)
1.1.87-gba258badda
1.0.0-v3.14.0.5722
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
- 16.0.5-alpha.1.23408.1
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
+ 16.0.5-alpha.1.23472.2
3.1.7
1.0.406601
+
+ 8.0.100-rtm.23478.7
+ $(MicrosoftDotnetSdkInternalVersion)
diff --git a/eng/common/SetupNugetSources.ps1 b/eng/common/SetupNugetSources.ps1
index 6e99723945183e..6c65e81925f2a3 100644
--- a/eng/common/SetupNugetSources.ps1
+++ b/eng/common/SetupNugetSources.ps1
@@ -153,7 +153,7 @@ if ($dotnet31Source -ne $null) {
AddPackageSource -Sources $sources -SourceName "dotnet3.1-internal-transport" -SourceEndPoint "https://pkgs.dev.azure.com/dnceng/_packaging/dotnet3.1-internal-transport/nuget/v2" -Creds $creds -Username $userName -Password $Password
}
-$dotnetVersions = @('5','6','7')
+$dotnetVersions = @('5','6','7','8')
foreach ($dotnetVersion in $dotnetVersions) {
$feedPrefix = "dotnet" + $dotnetVersion;
diff --git a/eng/common/SetupNugetSources.sh b/eng/common/SetupNugetSources.sh
index 8af7d899db1212..d387c7eac95e54 100644
--- a/eng/common/SetupNugetSources.sh
+++ b/eng/common/SetupNugetSources.sh
@@ -105,7 +105,7 @@ if [ "$?" == "0" ]; then
PackageSources+=('dotnet3.1-internal-transport')
fi
-DotNetVersions=('5' '6' '7')
+DotNetVersions=('5' '6' '7' '8')
for DotNetVersion in ${DotNetVersions[@]} ; do
FeedPrefix="dotnet${DotNetVersion}";
diff --git a/eng/common/cross/toolchain.cmake b/eng/common/cross/toolchain.cmake
index a88d643c8a765e..0998e875e5f78d 100644
--- a/eng/common/cross/toolchain.cmake
+++ b/eng/common/cross/toolchain.cmake
@@ -207,6 +207,7 @@ elseif(ILLUMOS)
set(CMAKE_CXX_STANDARD_LIBRARIES "${CMAKE_CXX_STANDARD_LIBRARIES} -lssp")
elseif(HAIKU)
set(CMAKE_SYSROOT "${CROSS_ROOTFS}")
+ set(CMAKE_PROGRAM_PATH "${CMAKE_PROGRAM_PATH};${CROSS_ROOTFS}/cross-tools-x86_64/bin")
set(TOOLSET_PREFIX ${TOOLCHAIN}-)
function(locate_toolchain_exec exec var)
@@ -217,7 +218,6 @@ elseif(HAIKU)
endif()
find_program(EXEC_LOCATION_${exec}
- PATHS "${CROSS_ROOTFS}/cross-tools-x86_64/bin"
NAMES
"${TOOLSET_PREFIX}${exec}${CLR_CMAKE_COMPILER_FILE_NAME_VERSION}"
"${TOOLSET_PREFIX}${exec}")
diff --git a/eng/common/loc/P22DotNetHtmlLocalization.lss b/eng/common/loc/P22DotNetHtmlLocalization.lss
index 858a0b237c62ce..5d892d619398f9 100644
Binary files a/eng/common/loc/P22DotNetHtmlLocalization.lss and b/eng/common/loc/P22DotNetHtmlLocalization.lss differ
diff --git a/eng/common/native/init-compiler.sh b/eng/common/native/init-compiler.sh
index 517401b688bf76..f5c1ec7eafeb28 100644
--- a/eng/common/native/init-compiler.sh
+++ b/eng/common/native/init-compiler.sh
@@ -63,7 +63,7 @@ if [ -z "$CLR_CC" ]; then
# Set default versions
if [ -z "$majorVersion" ]; then
# note: gcc (all versions) and clang versions higher than 6 do not have minor version in file name, if it is zero.
- if [ "$compiler" = "clang" ]; then versions="16 15 14 13 12 11 10 9 8 7 6.0 5.0 4.0 3.9 3.8 3.7 3.6 3.5"
+ if [ "$compiler" = "clang" ]; then versions="17 16 15 14 13 12 11 10 9 8 7 6.0 5.0 4.0 3.9 3.8 3.7 3.6 3.5"
elif [ "$compiler" = "gcc" ]; then versions="13 12 11 10 9 8 7 6 5 4.9"; fi
for version in $versions; do
diff --git a/eng/common/native/init-distro-rid.sh b/eng/common/native/init-distro-rid.sh
index aba9fe24028b0f..de1687b2ccbe79 100644
--- a/eng/common/native/init-distro-rid.sh
+++ b/eng/common/native/init-distro-rid.sh
@@ -79,7 +79,6 @@ getNonPortableDistroRid()
# Input:
# os: (str)
# arch: (str)
-# isPortable: (int)
# rootfsDir?: (nullable:string)
#
# Return:
@@ -97,10 +96,9 @@ initDistroRidGlobal()
{
local targetOs="$1"
local targetArch="$2"
- local isPortable="$3"
local rootfsDir=""
- if [ "$#" -ge 4 ]; then
- rootfsDir="$4"
+ if [ "$#" -ge 3 ]; then
+ rootfsDir="$3"
fi
if [ -n "${rootfsDir}" ]; then
diff --git a/eng/common/sdl/trim-assets-version.ps1 b/eng/common/sdl/trim-assets-version.ps1
new file mode 100644
index 00000000000000..a2e0048770452f
--- /dev/null
+++ b/eng/common/sdl/trim-assets-version.ps1
@@ -0,0 +1,75 @@
+<#
+.SYNOPSIS
+Install and run the 'Microsoft.DotNet.VersionTools.Cli' tool with the 'trim-artifacts-version' command to trim the version from the NuGet assets file name.
+
+.PARAMETER InputPath
+Full path to directory where artifact packages are stored
+
+.PARAMETER Recursive
+Search for NuGet packages recursively
+
+#>
+
+Param(
+ [string] $InputPath,
+ [bool] $Recursive = $true
+)
+
+$CliToolName = "Microsoft.DotNet.VersionTools.Cli"
+
+function Install-VersionTools-Cli {
+ param(
+ [Parameter(Mandatory=$true)][string]$Version
+ )
+
+ Write-Host "Installing the package '$CliToolName' with a version of '$version' ..."
+ $feed = "https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-eng/nuget/v3/index.json"
+
+ $argumentList = @("tool", "install", "--local", "$CliToolName", "--add-source $feed", "--no-cache", "--version $Version", "--create-manifest-if-needed")
+ Start-Process "$dotnet" -Verbose -ArgumentList $argumentList -NoNewWindow -Wait
+}
+
+# -------------------------------------------------------------------
+
+if (!(Test-Path $InputPath)) {
+ Write-Host "Input Path '$InputPath' does not exist"
+ ExitWithExitCode 1
+}
+
+$ErrorActionPreference = 'Stop'
+Set-StrictMode -Version 2.0
+
+$disableConfigureToolsetImport = $true
+$global:LASTEXITCODE = 0
+
+# `tools.ps1` checks $ci to perform some actions. Since the SDL
+# scripts don't necessarily execute in the same agent that run the
+# build.ps1/sh script this variable isn't automatically set.
+$ci = $true
+. $PSScriptRoot\..\tools.ps1
+
+try {
+ $dotnetRoot = InitializeDotNetCli -install:$true
+ $dotnet = "$dotnetRoot\dotnet.exe"
+
+ $toolsetVersion = Read-ArcadeSdkVersion
+ Install-VersionTools-Cli -Version $toolsetVersion
+
+ $cliToolFound = (& "$dotnet" tool list --local | Where-Object {$_.Split(' ')[0] -eq $CliToolName})
+ if ($null -eq $cliToolFound) {
+ Write-PipelineTelemetryError -Force -Category 'Sdl' -Message "The '$CliToolName' tool is not installed."
+ ExitWithExitCode 1
+ }
+
+ Exec-BlockVerbosely {
+ & "$dotnet" $CliToolName trim-assets-version `
+ --assets-path $InputPath `
+ --recursive $Recursive
+ Exit-IfNZEC "Sdl"
+ }
+}
+catch {
+ Write-Host $_
+ Write-PipelineTelemetryError -Force -Category 'Sdl' -Message $_
+ ExitWithExitCode 1
+}
\ No newline at end of file
diff --git a/eng/common/templates/job/execute-sdl.yml b/eng/common/templates/job/execute-sdl.yml
index 7aabaa18017bf6..7870f93bc17652 100644
--- a/eng/common/templates/job/execute-sdl.yml
+++ b/eng/common/templates/job/execute-sdl.yml
@@ -105,6 +105,11 @@ jobs:
downloadPath: $(Build.ArtifactStagingDirectory)\artifacts
checkDownloadedFiles: true
+ - powershell: eng/common/sdl/trim-assets-version.ps1
+ -InputPath $(Build.ArtifactStagingDirectory)\artifacts
+ displayName: Trim the version from the NuGet packages
+ continueOnError: ${{ parameters.sdlContinueOnError }}
+
- powershell: eng/common/sdl/extract-artifact-packages.ps1
-InputPath $(Build.ArtifactStagingDirectory)\artifacts\BlobArtifacts
-ExtractPath $(Build.ArtifactStagingDirectory)\artifacts\BlobArtifacts
diff --git a/eng/common/tools.ps1 b/eng/common/tools.ps1
index c9eced9f7df4c6..aa74ab4a81e782 100644
--- a/eng/common/tools.ps1
+++ b/eng/common/tools.ps1
@@ -671,6 +671,10 @@ function InitializeNativeTools() {
}
}
+function Read-ArcadeSdkVersion() {
+ return $GlobalJson.'msbuild-sdks'.'Microsoft.DotNet.Arcade.Sdk'
+}
+
function InitializeToolset() {
if (Test-Path variable:global:_ToolsetBuildProj) {
return $global:_ToolsetBuildProj
@@ -678,7 +682,7 @@ function InitializeToolset() {
$nugetCache = GetNuGetPackageCachePath
- $toolsetVersion = $GlobalJson.'msbuild-sdks'.'Microsoft.DotNet.Arcade.Sdk'
+ $toolsetVersion = Read-ArcadeSdkVersion
$toolsetLocationFile = Join-Path $ToolsetDir "$toolsetVersion.txt"
if (Test-Path $toolsetLocationFile) {
diff --git a/eng/liveBuilds.targets b/eng/liveBuilds.targets
index 118601229cf8b7..370e19805cc3a0 100644
--- a/eng/liveBuilds.targets
+++ b/eng/liveBuilds.targets
@@ -260,10 +260,4 @@
DependsOnTargets="
ResolveLibrariesRefAssembliesFromLocalBuild;
ResolveLibrariesRuntimeFilesFromLocalBuild" />
-
-
-
- $([MSBuild]::NormalizePath('$(ArtifactsBinDir)', 'Microsoft.NETCore.Platforms', 'runtime.json'))
- $([MSBuild]::NormalizePath('$(LibrariesProjectRoot)', 'Microsoft.NETCore.Platforms', 'src', 'runtime.json'))
-
diff --git a/eng/native/configureplatform.cmake b/eng/native/configureplatform.cmake
index 2f6ca03db863fb..e6e0273bc75c46 100644
--- a/eng/native/configureplatform.cmake
+++ b/eng/native/configureplatform.cmake
@@ -2,7 +2,7 @@ include(${CMAKE_CURRENT_LIST_DIR}/functions.cmake)
# If set, indicates that this is not an officially supported release.
# Release branches should set this to false.
-set(PRERELEASE 1)
+set(PRERELEASE 0)
#----------------------------------------
# Detect and set platform variable names
diff --git a/eng/pipelines/common/evaluate-default-paths.yml b/eng/pipelines/common/evaluate-default-paths.yml
index 5fb74a3741f413..0e4279a9697b94 100644
--- a/eng/pipelines/common/evaluate-default-paths.yml
+++ b/eng/pipelines/common/evaluate-default-paths.yml
@@ -241,6 +241,7 @@ jobs:
- src/mono/tools/*
- src/mono/wasi/*
- src/mono/wasm/debugger/*
+ - src/mono/wasm/host/*
- src/mono/wasm/Wasm.Build.Tests/*
- ${{ parameters._const_paths._wasm_pipelines }}
- ${{ parameters._const_paths._always_exclude }}
@@ -258,6 +259,7 @@ jobs:
- eng/testing/workloads-testing.targets
- src/mono/mono/component/mini-wasm-debugger.c
- src/mono/wasm/debugger/*
+ - src/mono/wasm/host/*
- src/mono/wasm/Wasm.Build.Tests/*
- src/mono/nuget/Microsoft.NET.Runtime*
src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/*
diff --git a/eng/pipelines/common/global-build-job.yml b/eng/pipelines/common/global-build-job.yml
index 41cce9e1534f94..39e7d9b5c53ced 100644
--- a/eng/pipelines/common/global-build-job.yml
+++ b/eng/pipelines/common/global-build-job.yml
@@ -68,6 +68,7 @@ jobs:
variables:
- ${{ if eq(variables['System.TeamProject'], 'internal') }}:
- group: DotNet-HelixApi-Access
+ - group: AzureDevOps-Artifact-Feeds-Pats
- name: _osParameter
value: -os ${{ parameters.osGroup }}
@@ -144,13 +145,37 @@ jobs:
- ${{ each variable in parameters.variables }}:
- ${{ variable }}
steps:
+ - ${{ if eq(parameters.osGroup, 'windows') }}:
+ - template: /eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml
+
- checkout: self
clean: true
- fetchDepth: $(checkoutFetchDepth)
+ # If running in source build mode, a git stash will be used for the inner clone. Avoid setting a fetch depth,
+ # as a stash of a shallow cloned repo is not currently supported.
+ ${{ if ne(parameters.isSourceBuild, true) }}:
+ fetchDepth: $(checkoutFetchDepth)
- ${{ if and(eq(parameters.isOfficialBuild, true), notin(parameters.osGroup, 'osx', 'maccatalyst', 'ios', 'iossimulator', 'tvos', 'tvossimulator')) }}:
- template: /eng/pipelines/common/restore-internal-tools.yml
+ - ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ - ${{ if and(ne(parameters.osGroup, 'windows'), ne(parameters.hostedOs, 'windows')) }}:
+ - task: Bash@3
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh
+ arguments: $(Build.SourcesDirectory)/NuGet.config $Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+ - ${{ else }}:
+ - task: PowerShell@2
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.ps1
+ arguments: -ConfigFile $(Build.SourcesDirectory)/NuGet.config -Password $Env:Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+
- ${{ each monoCrossAOTTargetOS in parameters.monoCrossAOTTargetOS }}:
- task: DownloadPipelineArtifact@2
displayName: Download ${{monoCrossAOTTargetOS}} AOT offset files
diff --git a/eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml b/eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml
new file mode 100644
index 00000000000000..7b9eab0bafdb50
--- /dev/null
+++ b/eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml
@@ -0,0 +1,38 @@
+# This script tries to disable VSIXAutoUpdate. In case an update is seen as already running,
+# it will exit with an error.
+steps:
+ - powershell: |
+ schtasks /change /tn "\Microsoft\VisualStudio\VSIX Auto Update" /disable
+
+ $vswhere = "C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe"
+ if (-not (Test-Path -Path "$vswhere" -PathType Leaf))
+ {
+ Write-Error "Couldn't locate vswhere at $vswhere"
+ exit 1
+ }
+
+ $vsdir = &"$vswhere" -latest -prerelease -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath
+ $vsregedit = "$vsdir\Common7\IDE\VsRegEdit.exe"
+
+ if (-not (Test-Path -Path "$vsregedit" ))
+ {
+ Write-Error "VSWhere returned path: $vsdir, but regedit $vsregedit doesn't exist."
+ exit 1
+ }
+
+ Write-Output "VSWhere returned path: $vsdir, using regedit $vsregedit"
+ Write-Output "Disabling updates through VS Registry:"
+
+ &"$vsdir\Common7\IDE\VsRegEdit.exe" set local HKCU ExtensionManager AutomaticallyCheckForUpdates2Override dword 0
+ &"$vsdir\Common7\IDE\VsRegEdit.exe" read local HKCU ExtensionManager AutomaticallyCheckForUpdates2Override dword
+
+ $processes = Get-Process -Name VSIXAutoUpdate -ErrorAction SilentlyContinue
+
+ if ($processes -ne $null -and $processes.Count -gt 0)
+ {
+ Write-Error "VSIXAutoUpdate has already spawned. Failfast to allow retry"
+ exit 1
+ }
+
+ displayName: Disable VSIX updates or fail-fast
+ condition: always()
diff --git a/eng/pipelines/common/templates/pipeline-with-resources.yml b/eng/pipelines/common/templates/pipeline-with-resources.yml
index c30ce8597808d8..02242394fba671 100644
--- a/eng/pipelines/common/templates/pipeline-with-resources.yml
+++ b/eng/pipelines/common/templates/pipeline-with-resources.yml
@@ -85,12 +85,12 @@ resources:
image: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-stream8
- container: browser_wasm
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-2.0-webassembly
+ image: mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-2.0-webassembly-20230913040940-1edc1c6
env:
ROOTFS_DIR: /crossrootfs/x64
- container: wasi_wasm
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-2.0-webassembly
+ image: mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-2.0-webassembly-20230913040940-1edc1c6
env:
ROOTFS_DIR: /crossrootfs/x64
diff --git a/eng/pipelines/common/templates/runtimes/xplat-job.yml b/eng/pipelines/common/templates/runtimes/xplat-job.yml
index 7249125648cf32..f4ac7e82957123 100644
--- a/eng/pipelines/common/templates/runtimes/xplat-job.yml
+++ b/eng/pipelines/common/templates/runtimes/xplat-job.yml
@@ -106,6 +106,9 @@ jobs:
- ${{insert}}: ${{ variable }}
steps:
+ - ${{ if eq(parameters.osGroup, 'windows') }}:
+ - template: /eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml
+
- checkout: self
clean: true
fetchDepth: $(checkoutFetchDepth)
diff --git a/eng/pipelines/coreclr/templates/build-job.yml b/eng/pipelines/coreclr/templates/build-job.yml
index 99379f80a5d9ce..365c1432aa41a1 100644
--- a/eng/pipelines/coreclr/templates/build-job.yml
+++ b/eng/pipelines/coreclr/templates/build-job.yml
@@ -79,6 +79,8 @@ jobs:
# Variables used by arcade to gather asset manifests
- name: _DotNetPublishToBlobFeed
value: true
+ - ${{ if eq(variables['System.TeamProject'], 'internal') }}:
+ - group: AzureDevOps-Artifact-Feeds-Pats
- name: officialBuildIdArg
value: ''
- ${{ if eq(parameters.isOfficialBuild, true) }}:
@@ -162,6 +164,24 @@ jobs:
continueOnError: false
condition: and(succeeded(), in(variables['SignType'], 'real', 'test'))
+ - ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ - ${{ if ne(parameters.osGroup, 'windows') }}:
+ - task: Bash@3
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh
+ arguments: $(Build.SourcesDirectory)/NuGet.config $Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+ - ${{ if eq(parameters.osGroup, 'windows') }}:
+ - task: PowerShell@2
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.ps1
+ arguments: -ConfigFile $(Build.SourcesDirectory)/NuGet.config -Password $Env:Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+
- ${{ if in(parameters.osGroup, 'osx', 'ios', 'tvos') }}:
- script: |
du -sh $(Build.SourcesDirectory)/*
diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml
index 13d3352393fa7f..c279c318e34d59 100644
--- a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml
+++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml
@@ -193,6 +193,44 @@ jobs:
eq(dependencies.evaluate_paths.outputs['SetPathVars_coreclr.containsChange'], true),
eq(variables['isRollingBuild'], true))
+#
+# CoreCLR NativeAOT checked build and Pri0 tests
+# Only when CoreCLR is changed
+#
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/common/global-build-job.yml
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ buildConfig: Checked
+ platforms:
+ - windows_x64
+ - linux_x64
+ variables:
+ - name: timeoutPerTestInMinutes
+ value: 60
+ - name: timeoutPerTestCollectionInMinutes
+ value: 180
+ jobParameters:
+ timeoutInMinutes: 240
+ nameSuffix: NativeAOT_Pri0
+ buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release
+ extraStepsTemplate: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml
+ extraStepsParameters:
+ creator: dotnet-bot
+ testBuildArgs: 'nativeaot /p:IlcUseServerGc=false'
+ liveLibrariesBuildConfig: Release
+ testRunNamePrefixSuffix: NativeAOT_Pri0_$(_BuildConfig)
+ extraVariablesTemplates:
+ - template: /eng/pipelines/common/templates/runtimes/test-variables.yml
+ parameters:
+ testGroup: innerloop
+ liveLibrariesBuildConfig: Release
+ condition: >-
+ or(
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_coreclr.containsChange'], true),
+ eq(variables['isRollingBuild'], true))
+
# Run net48 tests on win-x64
- template: /eng/pipelines/common/platform-matrix.yml
parameters:
diff --git a/eng/pipelines/installer/jobs/build-job.yml b/eng/pipelines/installer/jobs/build-job.yml
index 5a0e37157e45c5..1d89cfad70eb20 100644
--- a/eng/pipelines/installer/jobs/build-job.yml
+++ b/eng/pipelines/installer/jobs/build-job.yml
@@ -293,9 +293,30 @@ jobs:
parameters.archType,
parameters.liveLibrariesBuildConfig) }}
steps:
+ - ${{ if eq(parameters.osGroup, 'windows') }}:
+ - template: /eng/pipelines/common/templates/disable-vsupdate-or-failfast.yml
- checkout: self
clean: true
fetchDepth: $(checkoutFetchDepth)
+
+ - ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ - ${{ if ne(parameters.osGroup, 'windows') }}:
+ - task: Bash@3
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh
+ arguments: $(Build.SourcesDirectory)/NuGet.config $Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+ - ${{ else }}:
+ - task: PowerShell@2
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.ps1
+ arguments: -ConfigFile $(Build.SourcesDirectory)/NuGet.config -Password $Env:Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+
- ${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
- template: /eng/pipelines/common/download-artifact-step.yml
parameters:
diff --git a/eng/pipelines/libraries/base-job.yml b/eng/pipelines/libraries/base-job.yml
index 9dea30f61c455d..2448124a7bc62d 100644
--- a/eng/pipelines/libraries/base-job.yml
+++ b/eng/pipelines/libraries/base-job.yml
@@ -48,6 +48,7 @@ jobs:
variables:
- ${{ if eq(variables['System.TeamProject'], 'internal') }}:
- group: DotNet-HelixApi-Access
+ - group: AzureDevOps-Artifact-Feeds-Pats
- _buildScriptFileName: build
@@ -136,4 +137,22 @@ jobs:
artifactName: '$(_runtimeArtifactName)'
displayName: '$(runtimeFlavorName) build drop'
+ - ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ - ${{ if ne(parameters.osGroup, 'windows') }}:
+ - task: Bash@3
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh
+ arguments: $(Build.SourcesDirectory)/NuGet.config $Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+ - ${{ if eq(parameters.osGroup, 'windows') }}:
+ - task: PowerShell@2
+ displayName: Setup Private Feeds Credentials
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/SetupNugetSources.ps1
+ arguments: -ConfigFile $(Build.SourcesDirectory)/NuGet.config -Password $Env:Token
+ env:
+ Token: $(dn-bot-dnceng-artifact-feeds-rw)
+
- ${{ parameters.steps }}
diff --git a/eng/pipelines/libraries/helix-queues-setup.yml b/eng/pipelines/libraries/helix-queues-setup.yml
index 72d8d53cd94ddd..987d7f99c41f4a 100644
--- a/eng/pipelines/libraries/helix-queues-setup.yml
+++ b/eng/pipelines/libraries/helix-queues-setup.yml
@@ -62,13 +62,13 @@ jobs:
- ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}:
- SLES.15.Amd64.Open
- (Centos.8.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-stream8-helix
- - (Fedora.36.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-36-helix
+ - (Fedora.38.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-38-helix
- (Ubuntu.2204.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-22.04-helix-amd64
- (Debian.11.Amd64.Open)Ubuntu.2204.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-helix-amd64
- ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}:
- ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}:
- SLES.15.Amd64.Open
- - (Fedora.36.Amd64.Open)ubuntu.1804.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-36-helix
+ - (Fedora.38.Amd64.Open)ubuntu.1804.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-38-helix
- Ubuntu.2204.Amd64.Open
- (Debian.11.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-helix-amd64
- (Mariner.2.0.Amd64.Open)ubuntu.1804.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-2.0-helix-amd64
diff --git a/eng/pipelines/libraries/stress/http.yml b/eng/pipelines/libraries/stress/http.yml
index 6c740e49d04d47..f4f9c45de36e48 100644
--- a/eng/pipelines/libraries/stress/http.yml
+++ b/eng/pipelines/libraries/stress/http.yml
@@ -13,6 +13,7 @@ schedules:
- main
- release/6.0
- release/7.0
+ - release/8.0
variables:
- template: ../variables.yml
diff --git a/eng/pipelines/libraries/stress/ssl.yml b/eng/pipelines/libraries/stress/ssl.yml
index 791251030f5753..ab93994400d346 100644
--- a/eng/pipelines/libraries/stress/ssl.yml
+++ b/eng/pipelines/libraries/stress/ssl.yml
@@ -13,6 +13,7 @@ schedules:
- main
- release/6.0
- release/7.0
+ - release/8.0
variables:
- template: ../variables.yml
diff --git a/eng/pipelines/runtime-llvm.yml b/eng/pipelines/runtime-llvm.yml
index e31e623a0353c8..9d358e5f793086 100644
--- a/eng/pipelines/runtime-llvm.yml
+++ b/eng/pipelines/runtime-llvm.yml
@@ -119,7 +119,7 @@ extends:
testGroup: innerloop
nameSuffix: AllSubsets_Mono_LLVMAOT
buildArgs: -s mono+libs+host+packs -c $(_BuildConfig)
- /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
+ /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
condition: >-
or(
eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
@@ -138,7 +138,7 @@ extends:
testGroup: innerloop
nameSuffix: AllSubsets_Mono_LLVMAOT
buildArgs: -s mono+libs+host+packs -c $(_BuildConfig)
- /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
+ /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
condition: >-
or(
eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml
index 172a40e24d169f..3a9fd8d89ac4b0 100644
--- a/eng/pipelines/runtime-official.yml
+++ b/eng/pipelines/runtime-official.yml
@@ -41,13 +41,13 @@ extends:
# Localization build
#
- # disabled due to https://github.com/dotnet/runtime/issues/90466
- #- ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/main') }}:
- # - template: /eng/common/templates/job/onelocbuild.yml
- # parameters:
- # MirrorRepo: runtime
- # LclSource: lclFilesfromPackage
- # LclPackageId: 'LCL-JUNO-PROD-RUNTIME'
+ - ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/release/8.0') }}:
+ - template: /eng/common/templates/job/onelocbuild.yml
+ parameters:
+ MirrorRepo: runtime
+ MirrorBranch: release/8.0
+ LclSource: lclFilesfromPackage
+ LclPackageId: 'LCL-JUNO-PROD-RUNTIME'
#
# Source Index Build
@@ -334,7 +334,7 @@ extends:
runtimeFlavor: mono
jobParameters:
buildArgs: -s mono+libs+host+packs -c $(_BuildConfig)
- /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
+ /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
nameSuffix: AllSubsets_Mono_LLVMAOT
runtimeVariant: LLVMAOT
isOfficialBuild: ${{ variables.isOfficialBuild }}
diff --git a/eng/pipelines/runtime-wasm-perf.yml b/eng/pipelines/runtime-wasm-perf.yml
index bd6a6d979e3e40..69039fb3e2a473 100644
--- a/eng/pipelines/runtime-wasm-perf.yml
+++ b/eng/pipelines/runtime-wasm-perf.yml
@@ -3,6 +3,7 @@
# UI to this, and thus avoid any scheduled triggers
trigger: none
+pr: none
variables:
- template: /eng/pipelines/common/variables.yml
diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml
index 0f1f9610c60349..3aa0b6504819a7 100644
--- a/eng/pipelines/runtime.yml
+++ b/eng/pipelines/runtime.yml
@@ -556,6 +556,47 @@ extends:
extraBuildArgs: /p:AotHostArchitecture=x64 /p:AotHostOS=$(_hostedOS)
alwaysRun: ${{ variables.isRollingBuild }}
+ #
+ # Android devices
+ # Build the whole product using Mono and run libraries tests
+ #
+ - template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/common/global-build-job.yml
+ helixQueuesTemplate: /eng/pipelines/libraries/helix-queues-setup.yml
+ buildConfig: Release
+ runtimeFlavor: mono
+ platforms:
+ - android_arm
+ - android_arm64
+ variables:
+ # map dependencies variables to local variables
+ - name: librariesContainsChange
+ value: $[ dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'] ]
+ - name: monoContainsChange
+ value: $[ dependencies.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'] ]
+ jobParameters:
+ testGroup: innerloop
+ nameSuffix: AllSubsets_Mono
+ buildArgs: -s mono+libs+libs.tests+host+packs -c $(_BuildConfig) /p:ArchiveTests=true /p:RunSmokeTestsOnly=true /p:EnableAdditionalTimezoneChecks=true
+ timeoutInMinutes: 480
+ condition: >-
+ or(
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true),
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true),
+ eq(variables['isRollingBuild'], true))
+ # extra steps, run tests
+ extraStepsTemplate: /eng/pipelines/libraries/helix.yml
+ extraStepsParameters:
+ creator: dotnet-bot
+ testRunNamePrefixSuffix: Mono_$(_BuildConfig)
+ condition: >-
+ or(
+ eq(variables['librariesContainsChange'], true),
+ eq(variables['monoContainsChange'], true),
+ eq(variables['isRollingBuild'], true))
+
#
# iOS/tvOS devices - Full AOT + AggressiveTrimming to reduce size
# Build the whole product using Mono and run libraries tests
@@ -739,7 +780,7 @@ extends:
testGroup: innerloop
nameSuffix: AllSubsets_Mono_LLVMAOT
buildArgs: -s mono+libs+host+packs -c $(_BuildConfig)
- /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
+ /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
condition: >-
or(
eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
@@ -758,7 +799,7 @@ extends:
testGroup: innerloop
nameSuffix: AllSubsets_Mono_LLVMAOT
buildArgs: -s mono+libs+host+packs -c $(_BuildConfig)
- /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
+ /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
condition: >-
or(
eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
@@ -1277,7 +1318,7 @@ extends:
testGroup: innerloop
nameSuffix: AllSubsets_Mono_LLVMAot_RuntimeTests
runtimeVariant: llvmaot
- buildArgs: -s mono+libs+clr.hosts+clr.iltools -c Release /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
+ buildArgs: -s mono+libs+clr.hosts+clr.iltools -c Release /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true
timeoutInMinutes: 180
condition: >-
diff --git a/eng/testing/ProvisioningVersions.props b/eng/testing/ProvisioningVersions.props
index 251fa2d85d3165..3105078bdc3b59 100644
--- a/eng/testing/ProvisioningVersions.props
+++ b/eng/testing/ProvisioningVersions.props
@@ -44,20 +44,20 @@
-
+ false
true
- 113.0.5672.63
- 1121455
- <_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/1121461
+ 115.0.5790.170
+ 1148114
+ <_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/1148123
- 113.0.5672.64
- 1121455
- <_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1121477
+ 115.0.5790.171
+ 1148114
+ <_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1148119
diff --git a/eng/testing/tests.ioslike.targets b/eng/testing/tests.ioslike.targets
index 9151c7c7db05e5..f93afcf1dfb31b 100644
--- a/eng/testing/tests.ioslike.targets
+++ b/eng/testing/tests.ioslike.targets
@@ -15,8 +15,8 @@
<_AOTBuildCommand Condition="'$(ContinuousIntegrationBuild)' != 'true'">$(_AOTBuildCommand) /p:RuntimeSrcDir=$(RepoRoot) /p:RuntimeConfig=$(Configuration)
-
- <_AOTBuildCommand>$(_AOTBuildCommand) /p:XHARNESS_EXECUTION_DIR="$XHARNESS_EXECUTION_DIR" /p:RunAOTCompilation=$(RunAOTCompilation) /p:UseNativeAOTRuntime=$(UseNativeAOTRuntime) /p:TargetOS=$(TargetOS) /p:TargetArchitecture=$(TargetArchitecture) /p:MonoForceInterpreter=$(MonoForceInterpreter) /p:DevTeamProvisioning=$(DevTeamProvisioning) /p:UsePortableRuntimePack=true /p:Configuration=$(Configuration)
+
+ <_AOTBuildCommand>$(_AOTBuildCommand) /p:XHARNESS_EXECUTION_DIR="$XHARNESS_EXECUTION_DIR" /p:RunAOTCompilation=$(RunAOTCompilation) /p:UseNativeAOTRuntime=$(UseNativeAOTRuntime) /p:TargetOS=$(TargetOS) /p:TargetArchitecture=$(TargetArchitecture) /p:MonoForceInterpreter=$(MonoForceInterpreter) /p:MonoEnableLLVM=true /p:DevTeamProvisioning=$(DevTeamProvisioning) /p:UsePortableRuntimePack=true /p:Configuration=$(Configuration)
<_AOTBuildCommand>$(_AOTBuildCommand)
<_ResetSimulatorSwitch Condition="'$(TargetOS)' == 'iossimulator' or '$(TargetOS)' == 'tvossimulator'">--reset-simulator
diff --git a/eng/testing/workloads-testing.targets b/eng/testing/workloads-testing.targets
index 2961313c84973d..df5526e3f11583 100644
--- a/eng/testing/workloads-testing.targets
+++ b/eng/testing/workloads-testing.targets
@@ -76,6 +76,7 @@
Command="chmod +x $(_DotNetInstallScriptPath); $(_DotNetInstallCommand)" />
diff --git a/global.json b/global.json
index b4ca83d356c283..2b41b42c3256e6 100644
--- a/global.json
+++ b/global.json
@@ -8,9 +8,9 @@
"dotnet": "8.0.100-preview.7.23376.3"
},
"msbuild-sdks": {
- "Microsoft.DotNet.Arcade.Sdk": "8.0.0-beta.23411.1",
- "Microsoft.DotNet.Helix.Sdk": "8.0.0-beta.23411.1",
- "Microsoft.DotNet.SharedFramework.Sdk": "8.0.0-beta.23411.1",
+ "Microsoft.DotNet.Arcade.Sdk": "8.0.0-beta.23463.1",
+ "Microsoft.DotNet.Helix.Sdk": "8.0.0-beta.23463.1",
+ "Microsoft.DotNet.SharedFramework.Sdk": "8.0.0-beta.23463.1",
"Microsoft.Build.NoTargets": "3.7.0",
"Microsoft.Build.Traversal": "3.4.0",
"Microsoft.NET.Sdk.IL": "8.0.0-rc.1.23406.6"
diff --git a/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
index dbbb6758593b0c..e3e091bb872a2b 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/GC.CoreCLR.cs
@@ -899,13 +899,10 @@ internal enum RefreshMemoryStatus
///
/// This API will only handle configs that could be handled when the runtime is loaded, for example, for configs that don't have any effects on 32-bit systems (like the GCHeapHardLimit* ones), this API will not handle it.
///
- /// As of now, this API is feature preview only and subject to changes as necessary.
- ///
/// If the hard limit is too low. This can happen if the heap hard limit that the refresh will set, either because of new AppData settings or implied by the container memory limit changes, is lower than what is already committed.
/// If the hard limit is invalid. This can happen, for example, with negative heap hard limit percentages.
///
///
- [RequiresPreviewFeatures("RefreshMemoryLimit is in preview.")]
public static void RefreshMemoryLimit()
{
ulong heapHardLimit = (AppContext.GetData("GCHeapHardLimit") as ulong?) ?? ulong.MaxValue;
diff --git a/src/coreclr/debug/createdump/crashinfo.cpp b/src/coreclr/debug/createdump/crashinfo.cpp
index ef903767ba0279..8af6ec4a54f5bd 100644
--- a/src/coreclr/debug/createdump/crashinfo.cpp
+++ b/src/coreclr/debug/createdump/crashinfo.cpp
@@ -195,7 +195,7 @@ CrashInfo::GatherCrashInfo(DumpType dumpType)
return false;
}
// Add the special (fake) memory region for the special diagnostics info
- MemoryRegion special(PF_R, SpecialDiagInfoAddress, SpecialDiagInfoAddress + PAGE_SIZE);
+ MemoryRegion special(PF_R, SpecialDiagInfoAddress, SpecialDiagInfoAddress + SpecialDiagInfoSize);
m_memoryRegions.insert(special);
#ifdef __APPLE__
InitializeOtherMappings();
diff --git a/src/coreclr/debug/createdump/specialdiaginfo.h b/src/coreclr/debug/createdump/specialdiaginfo.h
index 3a04a9f551e6d7..a857129c9c91ff 100644
--- a/src/coreclr/debug/createdump/specialdiaginfo.h
+++ b/src/coreclr/debug/createdump/specialdiaginfo.h
@@ -24,6 +24,8 @@ const uint64_t SpecialDiagInfoAddress = 0x7fff1000;
#endif
#endif
+const uint64_t SpecialDiagInfoSize = 0x1000;
+
struct SpecialDiagInfoHeader
{
char Signature[16];
diff --git a/src/coreclr/debug/daccess/dacdbiimpl.cpp b/src/coreclr/debug/daccess/dacdbiimpl.cpp
index 67d5b1e60d948c..07208001b0c3b8 100644
--- a/src/coreclr/debug/daccess/dacdbiimpl.cpp
+++ b/src/coreclr/debug/daccess/dacdbiimpl.cpp
@@ -7788,8 +7788,9 @@ HRESULT DacStackReferenceWalker::Next(ULONG count, DacGcReference stackRefs[], U
stackRefs[i].i64ExtraData = 0;
const SOSStackRefData &sosStackRef = mList.Get(i);
- if (sosStackRef.Flags & GC_CALL_INTERIOR)
+ if (sosStackRef.Flags & GC_CALL_INTERIOR || sosStackRef.Address == 0)
{
+ // Direct pointer case - interior pointer, Frame ref, or enregistered var.
stackRefs[i].pObject = CLRDATA_ADDRESS_TO_TADDR(sosStackRef.Object) | 1;
}
else
diff --git a/src/coreclr/debug/daccess/request.cpp b/src/coreclr/debug/daccess/request.cpp
index 868593fae4651e..e5cc22d8c708a5 100644
--- a/src/coreclr/debug/daccess/request.cpp
+++ b/src/coreclr/debug/daccess/request.cpp
@@ -135,11 +135,17 @@ BOOL DacValidateEEClass(PTR_EEClass pEEClass)
BOOL DacValidateMethodTable(PTR_MethodTable pMT, BOOL &bIsFree)
{
+ bIsFree = FALSE;
+
+ if ((pMT == NULL) || dac_cast(pMT) == (TADDR)-1)
+ {
+ return FALSE;
+ }
+
// Verify things are right.
BOOL retval = FALSE;
EX_TRY
{
- bIsFree = FALSE;
if (HOST_CDADDR(pMT) == HOST_CDADDR(g_pFreeObjectMethodTable))
{
bIsFree = TRUE;
@@ -182,7 +188,7 @@ BadMethodTable: ;
BOOL DacValidateMD(PTR_MethodDesc pMD)
{
- if (pMD == NULL)
+ if ((pMD == NULL) || dac_cast(pMD) == (TADDR)-1)
{
return FALSE;
}
@@ -2642,8 +2648,7 @@ ClrDataAccess::GetAssemblyLocation(CLRDATA_ADDRESS assembly, int count, _Inout_u
// Turn from bytes to wide characters
if (!pAssembly->GetPEAssembly()->GetPath().IsEmpty())
{
- if (!pAssembly->GetPEAssembly()->GetPath().
- DacGetUnicode(count, location, pNeeded))
+ if (!pAssembly->GetPEAssembly()->GetPath().DacGetUnicode(count, location, pNeeded))
{
hr = E_FAIL;
}
diff --git a/src/coreclr/debug/daccess/stack.cpp b/src/coreclr/debug/daccess/stack.cpp
index 9402d529eb8ea3..6b9f1a491c291c 100644
--- a/src/coreclr/debug/daccess/stack.cpp
+++ b/src/coreclr/debug/daccess/stack.cpp
@@ -1253,14 +1253,19 @@ ClrDataFrame::GetLocalSig(MetaSig** sig,
{
// It turns out we cannot really get rid of this check. Dynamic methods
// (including IL stubs) do not have their local sig's available after JIT time.
- if (!m_methodDesc->IsIL())
+ // IL methods with dynamically generated IL (for example, UnsafeAccessors) may
+ // not have an IL header.
+ COR_ILMETHOD* ilHeader = m_methodDesc->IsIL()
+ ? m_methodDesc->GetILHeader()
+ : NULL;
+ if (ilHeader == NULL)
{
*sig = NULL;
*count = 0;
return E_FAIL;
}
- COR_ILMETHOD_DECODER methodDecoder(m_methodDesc->GetILHeader());
+ COR_ILMETHOD_DECODER methodDecoder(ilHeader);
mdSignature localSig = methodDecoder.GetLocalVarSigTok() ?
methodDecoder.GetLocalVarSigTok() : mdSignatureNil;
if (localSig == mdSignatureNil)
diff --git a/src/coreclr/debug/di/process.cpp b/src/coreclr/debug/di/process.cpp
index eb0f4ad5f1c262..db8f2a4badd67f 100644
--- a/src/coreclr/debug/di/process.cpp
+++ b/src/coreclr/debug/di/process.cpp
@@ -180,7 +180,11 @@ STDAPI DLLEXPORT OpenVirtualProcessImpl2(
IUnknown ** ppInstance,
CLR_DEBUGGING_PROCESS_FLAGS* pFlagsOut)
{
+#ifdef TARGET_WINDOWS
+ HMODULE hDac = LoadLibraryExW(pDacModulePath, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+#else
HMODULE hDac = LoadLibraryW(pDacModulePath);
+#endif // !TARGET_WINDOWS
if (hDac == NULL)
{
return HRESULT_FROM_WIN32(GetLastError());
diff --git a/src/coreclr/debug/di/rsclass.cpp b/src/coreclr/debug/di/rsclass.cpp
index ec52823c07af5f..55f83b48a6d211 100644
--- a/src/coreclr/debug/di/rsclass.cpp
+++ b/src/coreclr/debug/di/rsclass.cpp
@@ -132,6 +132,7 @@ HRESULT CordbClass::GetStaticFieldValue(mdFieldDef fieldDef,
IMetaDataImport * pImport = NULL;
EX_TRY
{
+ RSLockHolder lockHolder(GetProcess()->GetProcessLock());
pImport = GetModule()->GetMetaDataImporter(); // throws
// Validate the token.
@@ -1191,4 +1192,3 @@ HRESULT CordbClass::SearchFieldInfo(
// Well, the field doesn't even belong to this class...
ThrowHR(E_INVALIDARG);
}
-
diff --git a/src/coreclr/debug/ee/functioninfo.cpp b/src/coreclr/debug/ee/functioninfo.cpp
index 19910c6429a9c6..6eaa02d2c6de6f 100644
--- a/src/coreclr/debug/ee/functioninfo.cpp
+++ b/src/coreclr/debug/ee/functioninfo.cpp
@@ -1575,7 +1575,11 @@ DebuggerJitInfo *DebuggerMethodInfo::FindOrCreateInitAndAddJitInfo(MethodDesc* f
if (startAddr == NULL)
{
startAddr = g_pEEInterface->GetFunctionAddress(fd);
- _ASSERTE(startAddr != NULL);
+ if (startAddr == NULL)
+ {
+ //The only case this should happen is if we are trying to get the DJI for a method that has not been jitted yet.
+ return NULL;
+ }
}
else
{
diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp
index 02a9b8f26c2f56..7351954070725e 100644
--- a/src/coreclr/gc/gc.cpp
+++ b/src/coreclr/gc/gc.cpp
@@ -823,6 +823,11 @@ class t_join
join_struct.r_join_lock = n_th;
}
+ int get_num_threads()
+ {
+ return join_struct.n_threads;
+ }
+
void destroy ()
{
dprintf (JOIN_LOG, ("Destroying join structure"));
@@ -887,6 +892,8 @@ class t_join
// avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
if (color == join_struct.lock_color.LoadWithoutBarrier())
{
+ dprintf (9999, ("---h%d %d j%d %d - respin!!! (c:%d-%d)",
+ gch->heap_number, join_id, join_struct.n_threads, color, join_struct.lock_color.LoadWithoutBarrier()));
goto respin;
}
@@ -1117,6 +1124,25 @@ t_join bgc_t_join;
} \
}
+#define spin_and_wait(count_to_spin, expr) \
+{ \
+ while (!expr) \
+ { \
+ for (int j = 0; j < count_to_spin; j++) \
+ { \
+ if (expr) \
+ { \
+ break; \
+ } \
+ YieldProcessor (); \
+ } \
+ if (!(expr)) \
+ { \
+ GCToOSInterface::YieldThread (0); \
+ } \
+ } \
+}
+
#ifdef BACKGROUND_GC
#define max_pending_allocs 64
@@ -1429,8 +1455,6 @@ enter_msl_status gc_heap::enter_spin_lock_msl_helper (GCSpinLock* msl)
{
#ifdef DYNAMIC_HEAP_COUNT
uint64_t start = GetHighPrecisionTimeStamp();
-
- msl->msl_wait_count++;
#endif //DYNAMIC_HEAP_COUNT
unsigned int i = 0;
@@ -1485,7 +1509,7 @@ enter_msl_status gc_heap::enter_spin_lock_msl_helper (GCSpinLock* msl)
#ifdef DYNAMIC_HEAP_COUNT
uint64_t end = GetHighPrecisionTimeStamp();
Interlocked::ExchangeAdd64 (&msl->msl_wait_time, end - start);
- dprintf (6666, ("wait for msl lock total time: %zd, total count: %zd, this time: %zd, this count: %u", msl->msl_wait_time, msl->msl_wait_count, end - start, i));
+ dprintf (3, ("h%d wait for msl lock wait time %zd, total wait time: %zd", heap_number, (end - start), msl->msl_wait_time));
#endif //DYNAMIC_HEAP_COUNT
}
while (Interlocked::CompareExchange (&msl->lock, lock_taken, lock_free) != lock_free);
@@ -2318,9 +2342,6 @@ sorted_table* gc_heap::seg_table;
#ifdef MULTIPLE_HEAPS
GCEvent gc_heap::ee_suspend_event;
-#ifdef DYNAMIC_HEAP_COUNT
-GCEvent gc_heap::gc_idle_thread_event;
-#endif //DYNAMIC_HEAP_COUNT
size_t gc_heap::min_gen0_balance_delta = 0;
size_t gc_heap::min_balance_threshold = 0;
#endif //MULTIPLE_HEAPS
@@ -2919,6 +2940,12 @@ BOOL gc_heap::should_expand_in_full_gc = FALSE;
#ifdef DYNAMIC_HEAP_COUNT
int gc_heap::dynamic_adaptation_mode = dynamic_adaptation_default;
gc_heap::dynamic_heap_count_data_t SVR::gc_heap::dynamic_heap_count_data;
+uint64_t gc_heap::last_suspended_end_time = 0;
+size_t gc_heap::gc_index_full_gc_end = 0;
+
+#ifdef STRESS_DYNAMIC_HEAP_COUNT
+int gc_heap::heaps_in_this_gc = 0;
+#endif //STRESS_DYNAMIC_HEAP_COUNT
#endif // DYNAMIC_HEAP_COUNT
// Provisional mode related stuff.
@@ -6967,12 +6994,6 @@ BOOL gc_heap::create_thread_support (int number_of_heaps)
{
goto cleanup;
}
-#ifdef DYNAMIC_HEAP_COUNT
- if (!gc_idle_thread_event.CreateOSManualEventNoThrow (FALSE))
- {
- goto cleanup;
- }
-#endif //DYNAMIC_HEAP_COUNT
if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE))
{
goto cleanup;
@@ -7020,10 +7041,6 @@ bool gc_heap::create_gc_thread ()
return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC");
}
-#ifdef DYNAMIC_HEAP_COUNT
-static size_t prev_change_heap_count_gc_index;
-#endif //DYNAMIC_HEAP_COUNT
-
#ifdef _MSC_VER
#pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
#endif //_MSC_VER
@@ -7042,18 +7059,87 @@ void gc_heap::gc_thread_function ()
if (heap_number == 0)
{
- uint32_t wait_result = gc_heap::ee_suspend_event.Wait(gradual_decommit_in_progress_p ? DECOMMIT_TIME_STEP_MILLISECONDS : INFINITE, FALSE);
+ bool wait_on_time_out_p = gradual_decommit_in_progress_p;
+ uint32_t wait_time = DECOMMIT_TIME_STEP_MILLISECONDS;
+#ifdef DYNAMIC_HEAP_COUNT
+ // background_running_p can only change from false to true during suspension.
+ if (!gc_heap::background_running_p () && dynamic_heap_count_data.should_change_heap_count)
+ {
+ assert (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes);
+
+ dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index];
+ wait_time = min (wait_time, (uint32_t)(sample.elapsed_between_gcs / 1000 / 3));
+ wait_time = max (wait_time, 1);
+
+ dprintf (6666, ("gc#0 thread waiting for %d ms (betwen GCs %I64d)", wait_time, sample.elapsed_between_gcs));
+ }
+#endif //DYNAMIC_HEAP_COUNT
+ uint32_t wait_result = gc_heap::ee_suspend_event.Wait(wait_on_time_out_p ? wait_time : INFINITE, FALSE);
+ dprintf (9999, ("waiting for ee done res %d (timeout %d, %I64d ms since last suspend end)(should_change_heap_count is %d) (gradual_decommit_in_progress_p %d)",
+ wait_result, wait_time, ((GetHighPrecisionTimeStamp() - last_suspended_end_time) / 1000),
+ dynamic_heap_count_data.should_change_heap_count, gradual_decommit_in_progress_p));
if (wait_result == WAIT_TIMEOUT)
{
- decommit_lock.Enter();
- gradual_decommit_in_progress_p = decommit_step (DECOMMIT_TIME_STEP_MILLISECONDS);
- decommit_lock.Leave();
+#ifdef DYNAMIC_HEAP_COUNT
+ if (dynamic_heap_count_data.should_change_heap_count)
+ {
+#ifdef BACKGROUND_GC
+ if (!gc_heap::background_running_p ())
+#endif //BACKGROUND_GC
+ {
+ dprintf (6666, ("changing heap count due to timeout"));
+ check_heap_count();
+ }
+ }
+#endif //DYNAMIC_HEAP_COUNT
+
+ if (gradual_decommit_in_progress_p)
+ {
+ decommit_lock.Enter ();
+ gradual_decommit_in_progress_p = decommit_step (DECOMMIT_TIME_STEP_MILLISECONDS);
+ decommit_lock.Leave ();
+ }
continue;
}
+#ifdef DYNAMIC_HEAP_COUNT
+ // We might want to consider also doing this when a BGC finishes.
+ if (dynamic_heap_count_data.should_change_heap_count)
+ {
+#ifdef BACKGROUND_GC
+ if (!gc_heap::background_running_p ())
+#endif //BACKGROUND_GC
+ {
+ // this was a request to do a GC so make sure we follow through with one.
+ dprintf (6666, ("changing heap count at a GC start"));
+ check_heap_count ();
+ }
+ }
+
+ // wait till the threads that should have gone idle at least reached the place where they are about to wait on the idle event.
+ if ((gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) &&
+ (n_heaps != dynamic_heap_count_data.last_n_heaps))
+ {
+ int spin_count = 1024;
+ int idle_thread_count = n_max_heaps - n_heaps;
+ dprintf (9999, ("heap count changed %d->%d, idle should be %d and is %d", dynamic_heap_count_data.last_n_heaps, n_heaps,
+ idle_thread_count, VolatileLoadWithoutBarrier (&dynamic_heap_count_data.idle_thread_count)));
+ if (idle_thread_count != dynamic_heap_count_data.idle_thread_count)
+ {
+ spin_and_wait (spin_count, (idle_thread_count == dynamic_heap_count_data.idle_thread_count));
+ dprintf (9999, ("heap count changed %d->%d, now idle is %d", dynamic_heap_count_data.last_n_heaps, n_heaps,
+ VolatileLoadWithoutBarrier (&dynamic_heap_count_data.idle_thread_count)));
+ }
+
+ dynamic_heap_count_data.last_n_heaps = n_heaps;
+ }
+#endif //DYNAMIC_HEAP_COUNT
+
suspended_start_time = GetHighPrecisionTimeStamp();
BEGIN_TIMING(suspend_ee_during_log);
+ dprintf (9999, ("h0 suspending EE in GC!"));
GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
+ dprintf (9999, ("h0 suspended EE in GC!"));
END_TIMING(suspend_ee_during_log);
proceed_with_gc_p = TRUE;
@@ -7067,46 +7153,74 @@ void gc_heap::gc_thread_function ()
{
settings.init_mechanisms();
#ifdef DYNAMIC_HEAP_COUNT
- // make sure the other gc threads cannot see this as a request to change heap count
- // see explanation below about the cases when we return from gc_start_event.Wait
- assert (dynamic_heap_count_data.new_n_heaps == n_heaps);
+ if (gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes)
+ {
+ // make sure the other gc threads cannot see this as a request to change heap count
+ // see explanation below about the cases when we return from gc_start_event.Wait
+ assert (dynamic_heap_count_data.new_n_heaps == n_heaps);
+ }
#endif //DYNAMIC_HEAP_COUNT
+ dprintf (9999, ("GC thread %d setting_gc_start_in_gc(h%d)", heap_number, n_heaps));
gc_start_event.Set();
}
dprintf (3, (ThreadStressLog::gcServerThread0StartMsg(), heap_number));
}
else
{
+ dprintf (9999, ("GC thread %d waiting_for_gc_start(%d)(gc%Id)", heap_number, n_heaps, VolatileLoadWithoutBarrier(&settings.gc_index)));
gc_start_event.Wait(INFINITE, FALSE);
#ifdef DYNAMIC_HEAP_COUNT
- // we have a couple different cases to handle here when we come back from the wait:
- // 1. We are starting a GC. Signaled by dynamic_heap_count_data.new_n_heaps == n_heaps
- // a) We are starting a GC, but this thread is idle. Signaled by n_heaps <= heap_number
- // b) We are starting a GC, and this thread is participating. Signaled by heap_number < n_heaps
- // 2. We are changing heap count. Signaled by dynamic_heap_count_data.new_n_heaps != n_heaps
- // a) We are changing heap count, but this thread is idle. Signaled by n_heaps <= heap_number.
- // b) We are changing heap count, and this thread is participating. Signaled by heap_number < n_heaps.
-
- // check for 1.a) and 2.a) cases above
- if (n_heaps <= heap_number)
- {
- dprintf (2, ("GC thread %d idle", heap_number));
-
- // make sure GC is complete so we know the gc_idle_thread_event has been reset
- g_theGCHeap->WaitUntilGCComplete();
+ dprintf (9999, ("GC thread %d waiting_done_gc_start(%d-%d)(i: %d)(gc%Id)",
+ heap_number, n_heaps, dynamic_heap_count_data.new_n_heaps, dynamic_heap_count_data.init_only_p, VolatileLoadWithoutBarrier (&settings.gc_index)));
+
+ if ((gc_heap::dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) &&
+ (dynamic_heap_count_data.new_n_heaps != n_heaps))
+ {
+ // The reason why we need to do this is -
+ // + for threads that were participating, we need them to do work for change_heap_count
+ // + for threads that were not participating but will need to participate, we need to make sure they are woken now instead of
+ // randomly sometime later.
+ int old_n_heaps = n_heaps;
+ int new_n_heaps = dynamic_heap_count_data.new_n_heaps;
+ int num_threads_to_wake = max (new_n_heaps, old_n_heaps);
+ if (heap_number < num_threads_to_wake)
+ {
+ dprintf (9999, ("h%d < %d, calling change", heap_number, num_threads_to_wake));
+ change_heap_count (dynamic_heap_count_data.new_n_heaps);
+ if (new_n_heaps < old_n_heaps)
+ {
+ dprintf (9999, ("h%d after change", heap_number));
+ // at the end of change_heap_count we've changed join's heap count to the new one if it's smaller. So we need to make sure
+ // only that many threads will participate in the following GCs.
+ if (heap_number < new_n_heaps)
+ {
+ dprintf (9999, ("h%d < %d participating (dec)", heap_number, new_n_heaps));
+ }
+ else
+ {
+ Interlocked::Increment (&dynamic_heap_count_data.idle_thread_count);
+ dprintf (9999, ("GC thread %d wait_on_idle(%d < %d)(gc%Id), total idle %d", heap_number, old_n_heaps, new_n_heaps,
+ VolatileLoadWithoutBarrier (&settings.gc_index), VolatileLoadWithoutBarrier (&dynamic_heap_count_data.idle_thread_count)));
+ gc_idle_thread_event.Wait (INFINITE, FALSE);
+ dprintf (9999, ("GC thread %d waking_from_idle(%d)(gc%Id) after doing change", heap_number, n_heaps, VolatileLoadWithoutBarrier (&settings.gc_index)));
+ }
+ }
+ else
+ {
+ dprintf (9999, ("h%d < %d participating (inc)", heap_number, new_n_heaps));
+ }
+ }
+ else
+ {
+ Interlocked::Increment (&dynamic_heap_count_data.idle_thread_count);
+ dprintf (9999, ("GC thread %d wait_on_idle(< max %d)(gc%Id), total idle %d", heap_number, num_threads_to_wake,
+ VolatileLoadWithoutBarrier (&settings.gc_index), VolatileLoadWithoutBarrier (&dynamic_heap_count_data.idle_thread_count)));
+ gc_idle_thread_event.Wait (INFINITE, FALSE);
+ dprintf (9999, ("GC thread %d waking_from_idle(%d)(gc%Id)", heap_number, n_heaps, VolatileLoadWithoutBarrier (&settings.gc_index)));
+ }
- // now wait on the gc_idle_thread_event
- gc_idle_thread_event.Wait(INFINITE, FALSE);
- dprintf (2, ("GC thread %d waking from idle", heap_number));
- continue;
- }
- // case 2.b) above: is this a request to change heap count?
- if (dynamic_heap_count_data.new_n_heaps != n_heaps)
- {
- change_heap_count (dynamic_heap_count_data.new_n_heaps);
continue;
}
- // case 1.b) above: we're starting a GC.
#endif //DYNAMIC_HEAP_COUNT
dprintf (3, (ThreadStressLog::gcServerThreadNStartMsg(), heap_number));
}
@@ -7191,10 +7305,6 @@ void gc_heap::gc_thread_function ()
{
gradual_decommit_in_progress_p = decommit_step (DECOMMIT_TIME_STEP_MILLISECONDS);
}
-#ifdef DYNAMIC_HEAP_COUNT
- // check if we should adjust the number of heaps
- check_heap_count();
-#endif //DYNAMIC_HEAP_COUNT
}
else
{
@@ -9955,6 +10065,20 @@ BOOL gc_heap::insert_ro_segment (heap_segment* seg)
return TRUE;
}
+void gc_heap::update_ro_segment (heap_segment* seg, uint8_t* allocated, uint8_t* committed)
+{
+ enter_spin_lock (&gc_heap::gc_lock);
+
+ assert (use_frozen_segments_p);
+ assert (heap_segment_read_only_p (seg));
+ assert (allocated <= committed);
+ assert (committed <= heap_segment_reserved (seg));
+ heap_segment_allocated (seg) = allocated;
+ heap_segment_committed (seg) = committed;
+
+ leave_spin_lock (&gc_heap::gc_lock);
+}
+
// No one is calling this function right now. If this is getting called we need
// to take care of decommitting the mark array for it - we will need to remember
// which portion of the mark array was committed and only decommit that.
@@ -12513,6 +12637,16 @@ void gc_heap::rearrange_uoh_segments()
freeable_uoh_segment = 0;
}
+void gc_heap::delay_free_segments()
+{
+ rearrange_uoh_segments();
+#ifdef BACKGROUND_GC
+ background_delay_delete_uoh_segments();
+ if (!gc_heap::background_running_p())
+ rearrange_small_heap_segments();
+#endif //BACKGROUND_GC
+}
+
#ifndef USE_REGIONS
void gc_heap::rearrange_heap_segments(BOOL compacting)
{
@@ -14846,6 +14980,25 @@ gc_heap::init_gc_heap (int h_number)
gc_done_event_lock = -1;
gc_done_event_set = false;
+#ifdef DYNAMIC_HEAP_COUNT
+ if (h_number != 0)
+ {
+ if (!gc_idle_thread_event.CreateAutoEventNoThrow (FALSE))
+ {
+ return 0;
+ }
+
+#ifdef BACKGROUND_GC
+ if (!bgc_idle_thread_event.CreateAutoEventNoThrow (FALSE))
+ {
+ return 0;
+ }
+#endif //BACKGROUND_GC
+
+ dprintf (9999, ("creating idle events for h%d", h_number));
+ }
+#endif //DYNAMIC_HEAP_COUNT
+
if (!init_dynamic_data())
{
return 0;
@@ -16024,7 +16177,6 @@ void min_fl_list_info::thread_item_no_prev (uint8_t* item)
tail = item;
}
-// This is only implemented for gen2 right now!!!!
// the min_fl_list array is arranged as chunks of n_heaps min_fl_list_info, the 1st chunk corresponds to the 1st bucket,
// and so on.
void allocator::rethread_items (size_t* num_total_fl_items, size_t* num_total_fl_items_rethreaded, gc_heap* current_heap,
@@ -17392,6 +17544,7 @@ BOOL gc_heap::a_fit_free_list_uoh_p (size_t size,
gen_number, align_const);
dd_new_allocation (dynamic_data_of (gen_number)) -= limit;
+ size_t saved_free_list_size = free_list_size;
#ifdef FEATURE_LOH_COMPACTION
if (loh_pad)
{
@@ -17420,7 +17573,7 @@ BOOL gc_heap::a_fit_free_list_uoh_p (size_t size,
{
generation_free_obj_space (gen) += remain_size;
}
- generation_free_list_space (gen) -= free_list_size;
+ generation_free_list_space (gen) -= saved_free_list_size;
assert ((ptrdiff_t)generation_free_list_space (gen) >= 0);
generation_free_list_allocated (gen) += limit;
@@ -21986,11 +22139,70 @@ BOOL gc_heap::should_proceed_with_gc()
void gc_heap::update_end_gc_time_per_heap()
{
+#ifdef DYNAMIC_HEAP_COUNT
+ size_t prev_gen2_end_time = 0;
+ if ((heap_number == 0) && (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes) && (settings.condemned_generation == max_generation))
+ {
+ dynamic_data* dd = dynamic_data_of (max_generation);
+ prev_gen2_end_time = dd_previous_time_clock (dd) + dd_gc_elapsed_time (dd);;
+ }
+#endif //DYNAMIC_HEAP_COUNT
+
for (int gen_number = 0; gen_number <= settings.condemned_generation; gen_number++)
{
dynamic_data* dd = dynamic_data_of (gen_number);
+
+ if (heap_number == 0)
+ {
+ dprintf (6666, ("prev gen%d GC end time: prev start %I64d + prev gc elapsed %Id = %I64d",
+ gen_number, dd_previous_time_clock (dd), dd_gc_elapsed_time (dd), (dd_previous_time_clock (dd) + dd_gc_elapsed_time (dd))));
+ }
+
dd_gc_elapsed_time (dd) = (size_t)(end_gc_time - dd_time_clock (dd));
+
+ if (heap_number == 0)
+ {
+ dprintf (6666, ("updated NGC%d %Id elapsed time to %I64d - %I64d = %I64d", gen_number, dd_gc_clock (dd), end_gc_time, dd_time_clock (dd), dd_gc_elapsed_time (dd)));
+ }
}
+
+#ifdef DYNAMIC_HEAP_COUNT
+ if ((heap_number == 0) && (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes))
+ {
+ dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index];
+ sample.elapsed_between_gcs = end_gc_time - last_suspended_end_time;
+ sample.gc_pause_time = dd_gc_elapsed_time (dynamic_data_of (0));
+ sample.msl_wait_time = get_msl_wait_time();
+
+ dprintf (6666, ("sample#%d: this GC end %I64d - last sus end %I64d = %I64d, this GC pause %I64d, msl wait %I64d",
+ dynamic_heap_count_data.sample_index, end_gc_time, last_suspended_end_time, sample.elapsed_between_gcs, sample.gc_pause_time, sample.msl_wait_time));
+
+ last_suspended_end_time = end_gc_time;
+
+ GCEventFireHeapCountSample_V1 (
+ (uint64_t)VolatileLoadWithoutBarrier (&settings.gc_index),
+ sample.elapsed_between_gcs,
+ sample.gc_pause_time,
+ sample.msl_wait_time);
+
+ dynamic_heap_count_data.sample_index = (dynamic_heap_count_data.sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+
+ if (settings.condemned_generation == max_generation)
+ {
+ gc_index_full_gc_end = dd_gc_clock (dynamic_data_of (0));
+ size_t elapsed_between_gen2_gcs = end_gc_time - prev_gen2_end_time;
+ size_t gen2_elapsed_time = sample.gc_pause_time;
+ dynamic_heap_count_data.gen2_gc_percents[dynamic_heap_count_data.gen2_sample_index] = (float)gen2_elapsed_time * 100.0f / elapsed_between_gen2_gcs;
+
+ dprintf (6666, ("gen2 sample#%d: this GC end %I64d - last gen2 end %I64d = %I64d, GC elapsed %I64d, percent %.3f",
+ dynamic_heap_count_data.gen2_sample_index, end_gc_time, prev_gen2_end_time, elapsed_between_gen2_gcs,
+ gen2_elapsed_time, dynamic_heap_count_data.gen2_gc_percents[dynamic_heap_count_data.gen2_sample_index]));
+ dynamic_heap_count_data.gen2_sample_index = (dynamic_heap_count_data.gen2_sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ }
+
+ calculate_new_heap_count ();
+ }
+#endif //DYNAMIC_HEAP_COUNT
}
void gc_heap::update_end_ngc_time()
@@ -22137,7 +22349,31 @@ void gc_heap::gc1()
{
dynamic_data* dd = dynamic_data_of (n);
end_gc_time = GetHighPrecisionTimeStamp();
+ size_t time_since_last_gen2 = 0;
+
+#ifdef DYNAMIC_HEAP_COUNT
+ if ((heap_number == 0) && (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes))
+ {
+ time_since_last_gen2 = (size_t)(end_gc_time - (dd_previous_time_clock (dd) + dd_gc_elapsed_time (dd)));
+ dprintf (6666, ("BGC %Id end %I64d - (prev gen2 start %I64d + elapsed %Id = %I64d) = time inbewteen gen2 %Id",
+ dd_gc_clock (dd), end_gc_time, dd_previous_time_clock (dd), dd_gc_elapsed_time (dd), (dd_previous_time_clock (dd) + dd_gc_elapsed_time (dd)), time_since_last_gen2));
+ }
+#endif //DYNAMIC_HEAP_COUNT
+
dd_gc_elapsed_time (dd) = (size_t)(end_gc_time - dd_time_clock (dd));
+#ifdef DYNAMIC_HEAP_COUNT
+ if ((heap_number == 0) && (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes))
+ {
+ dprintf (6666, ("updating BGC %Id elapsed time to %I64d - %I64d = %I64d", dd_gc_clock (dd), end_gc_time, dd_time_clock (dd), dd_gc_elapsed_time (dd)));
+
+ float bgc_percent = (float)dd_gc_elapsed_time (dd) * 100.0f / (float)time_since_last_gen2;
+ dynamic_heap_count_data.gen2_gc_percents[dynamic_heap_count_data.gen2_sample_index] = bgc_percent;
+ dprintf (6666, ("gen2 sample %d elapsed %Id * 100 / time inbetween gen2 %Id = %.3f",
+ dynamic_heap_count_data.gen2_sample_index, dd_gc_elapsed_time (dd), time_since_last_gen2, bgc_percent));
+ dynamic_heap_count_data.gen2_sample_index = (dynamic_heap_count_data.gen2_sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ gc_index_full_gc_end = dd_gc_clock (dynamic_data_of (0));
+ }
+#endif //DYNAMIC_HEAP_COUNT
#ifdef HEAP_BALANCE_INSTRUMENTATION
if (heap_number == 0)
@@ -22744,7 +22980,12 @@ void gc_heap::merge_fl_from_other_heaps (int gen_idx, int to_n_heaps, int from_n
assert (free_list_space_decrease <= generation_free_list_space (gen));
generation_free_list_space (gen) -= free_list_space_decrease;
- assert (free_list_space_decrease <= dd_fragmentation (dd));
+ // TODO - I'm seeing for gen2 this is free_list_space_decrease can be a bit larger than frag.
+ // Need to fix this later.
+ if (gen_idx != max_generation)
+ {
+ assert (free_list_space_decrease <= dd_fragmentation (dd));
+ }
size_t free_list_space_increase = 0;
for (int from_hn = 0; from_hn < from_n_heaps; from_hn++)
@@ -23719,9 +23960,6 @@ void gc_heap::garbage_collect (int n)
#ifdef MULTIPLE_HEAPS
gc_start_event.Reset();
-#ifdef DYNAMIC_HEAP_COUNT
- gc_idle_thread_event.Reset();
-#endif //DYNAMIC_HEAP_COUNT
gc_t_join.restart();
#endif //MULTIPLE_HEAPS
}
@@ -23743,6 +23981,9 @@ void gc_heap::garbage_collect (int n)
#endif // STRESS_HEAP
#ifdef MULTIPLE_HEAPS
+#ifdef STRESS_DYNAMIC_HEAP_COUNT
+ Interlocked::Increment (&heaps_in_this_gc);
+#endif //STRESS_DYNAMIC_HEAP_COUNT
//align all heaps on the max generation to condemn
dprintf (3, ("Joining for max generation to condemn"));
condemned_generation_num = generation_to_condemn (n,
@@ -23758,30 +23999,31 @@ void gc_heap::garbage_collect (int n)
#endif //FEATURE_BASICFREEZE
#ifdef MULTIPLE_HEAPS
+#ifdef STRESS_DYNAMIC_HEAP_COUNT
+ dprintf (9999, ("%d heaps, join sees %d, actually joined %d, %d idle threads (%d)",
+ n_heaps, gc_t_join.get_num_threads (), heaps_in_this_gc,
+ VolatileLoadWithoutBarrier(&dynamic_heap_count_data.idle_thread_count), (n_max_heaps - n_heaps)));
+ if (heaps_in_this_gc != n_heaps)
+ {
+ dprintf (9999, ("should have %d heaps but actually have %d!!", n_heaps, heaps_in_this_gc));
+ GCToOSInterface::DebugBreak ();
+ }
+
+ heaps_in_this_gc = 0;
+#endif //STRESS_DYNAMIC_HEAP_COUNT
+
for (int i = 0; i < n_heaps; i++)
{
gc_heap* hp = g_heaps[i];
// check for card table growth
if (g_gc_card_table != hp->card_table)
hp->copy_brick_card_table();
-
- hp->rearrange_uoh_segments();
-#ifdef BACKGROUND_GC
- hp->background_delay_delete_uoh_segments();
- if (!gc_heap::background_running_p())
- hp->rearrange_small_heap_segments();
-#endif //BACKGROUND_GC
+ hp->delay_free_segments();
}
#else //MULTIPLE_HEAPS
if (g_gc_card_table != card_table)
copy_brick_card_table();
-
- rearrange_uoh_segments();
-#ifdef BACKGROUND_GC
- background_delay_delete_uoh_segments();
- if (!gc_heap::background_running_p())
- rearrange_small_heap_segments();
-#endif //BACKGROUND_GC
+ delay_free_segments();
#endif //MULTIPLE_HEAPS
BOOL should_evaluate_elevation = TRUE;
@@ -23868,10 +24110,8 @@ void gc_heap::garbage_collect (int n)
do_pre_gc();
#ifdef MULTIPLE_HEAPS
+ dprintf (9999, ("in GC, resetting gc_start"));
gc_start_event.Reset();
-#ifdef DYNAMIC_HEAP_COUNT
- gc_idle_thread_event.Reset();
-#endif //DYNAMIC_HEAP_COUNT
dprintf(3, ("Starting all gc threads for gc"));
gc_t_join.restart();
#endif //MULTIPLE_HEAPS
@@ -24327,7 +24567,7 @@ void gc_heap::equalize_promoted_bytes(int condemned_gen_number)
// hope is to achieve better work balancing in relocate and compact phases
// this is also used when the heap count changes to balance regions between heaps
int highest_gen_number = ((condemned_gen_number == max_generation) ?
- (total_generation_count - 1) : condemned_gen_number);
+ (total_generation_count - 1) : condemned_gen_number);
int stop_gen_idx = get_stop_generation_index (condemned_gen_number);
for (int gen_idx = highest_gen_number; gen_idx >= stop_gen_idx; gen_idx--)
@@ -25036,285 +25276,332 @@ void gc_heap::recommission_heap()
#endif //RECORD_LOH_STATE
}
-void gc_heap::check_heap_count ()
+float median_of_3 (float a, float b, float c)
+{
+#define compare_and_swap(i, j) \
+ { \
+ if (i < j) \
+ { \
+ float t = i; \
+ i = j; \
+ j = t; \
+ } \
+ }
+ compare_and_swap (b, a);
+ compare_and_swap (c, a);
+ compare_and_swap (c, b);
+#undef compare_and_swap
+ return b;
+}
+
+size_t gc_heap::get_num_completed_gcs ()
{
- dynamic_heap_count_data.new_n_heaps = n_heaps;
+ size_t num_completed_gcs = settings.gc_index;
+#ifdef BACKGROUND_GC
+ if (g_heaps[0]->is_bgc_in_progress ())
+ {
+ num_completed_gcs--;
+ dprintf (6666, ("BGC in prog, completed GCs -> %Id", num_completed_gcs));
+ }
+#endif //BACKGROUND_GC
+
+ return num_completed_gcs;
+}
+
+int gc_heap::calculate_new_heap_count ()
+{
+ assert (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes);
+
+ size_t num_completed_gcs = get_num_completed_gcs ();
- if (dynamic_adaptation_mode != dynamic_adaptation_to_application_sizes)
+ dprintf (6666, ("current GC %Id(completed: %Id), prev completed GCs %Id, last full GC happened at index %Id",
+ VolatileLoadWithoutBarrier (&settings.gc_index), num_completed_gcs, dynamic_heap_count_data.prev_num_completed_gcs, gc_index_full_gc_end));
+
+ if (num_completed_gcs < (dynamic_heap_count_data.prev_num_completed_gcs + dynamic_heap_count_data_t::sample_size))
{
- return;
+ dprintf (6666, ("not enough GCs, skipping"));
+ return n_heaps;
}
- // we should be calling this only on the main GC thread
- assert (heap_number == 0);
+ float median_gen2_tcp_percent = 0.0f;
+ if (gc_index_full_gc_end >= (settings.gc_index - dynamic_heap_count_data_t::sample_size))
+ {
+ median_gen2_tcp_percent = dynamic_heap_count_data.get_median_gen2_gc_percent ();
+ }
- // acquire data for the current sample
- uint64_t soh_msl_wait_time = 0;
- uint64_t uoh_msl_wait_time = 0;
- size_t allocating_thread_count = 0;
- size_t heap_size = 0;
- for (int i = 0; i < n_heaps; i++)
+ // If there was a blocking gen2 GC, the overhead would be very large and most likely we would not pick it. So we
+ // rely on the gen2 sample's overhead calculated above.
+ float throughput_cost_percents[dynamic_heap_count_data_t::sample_size];
+ for (int i = 0; i < dynamic_heap_count_data_t::sample_size; i++)
{
- gc_heap* hp = g_heaps[i];
+ dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[i];
+ throughput_cost_percents[i] = (sample.elapsed_between_gcs ? (((float)sample.msl_wait_time / n_heaps + sample.gc_pause_time) * 100.0f / (float)sample.elapsed_between_gcs) : 0.0f);
+ assert (throughput_cost_percents[i] >= 0.0);
+ if (throughput_cost_percents[i] > 100.0)
+ throughput_cost_percents[i] = 100.0;
+ dprintf (6666, ("sample %d: msl %I64d / %d + pause %I64d / elapsed %I64d = throughput_cost_percent: %.3f", i,
+ sample.msl_wait_time, n_heaps, sample.gc_pause_time, sample.elapsed_between_gcs, throughput_cost_percents[i]));
+ }
- allocating_thread_count += hp->alloc_contexts_used;
+ float median_throughput_cost_percent = median_of_3 (throughput_cost_percents[0], throughput_cost_percents[1], throughput_cost_percents[2]);
- soh_msl_wait_time += hp->more_space_lock_soh.msl_wait_time;
- hp->more_space_lock_soh.msl_wait_time = 0;
- hp->more_space_lock_soh.msl_wait_count = 0;
+ // apply exponential smoothing and use 1/3 for the smoothing factor
+ const float smoothing = 3;
+ float smoothed_median_throughput_cost_percent = dynamic_heap_count_data.smoothed_median_throughput_cost_percent;
+ if (smoothed_median_throughput_cost_percent != 0.0f)
+ {
+ // average it with the previous value
+ smoothed_median_throughput_cost_percent = median_throughput_cost_percent / smoothing + (smoothed_median_throughput_cost_percent / smoothing) * (smoothing - 1);
+ }
+ else
+ {
+ smoothed_median_throughput_cost_percent = median_throughput_cost_percent;
+ }
- uoh_msl_wait_time += hp->more_space_lock_uoh.msl_wait_time;
- hp->more_space_lock_uoh.msl_wait_time = 0;
- hp->more_space_lock_uoh.msl_wait_count = 0;
+ dprintf (6666, ("median tcp: %.3f, smoothed tcp: %.3f, gen2 tcp %.3f(%.3f, %.3f, %.3f)",
+ median_throughput_cost_percent, smoothed_median_throughput_cost_percent, median_gen2_tcp_percent,
+ dynamic_heap_count_data.gen2_gc_percents[0], dynamic_heap_count_data.gen2_gc_percents[1], dynamic_heap_count_data.gen2_gc_percents[2]));
+
+ size_t heap_size = 0;
+ for (int i = 0; i < n_heaps; i++)
+ {
+ gc_heap* hp = g_heaps[i];
for (int gen_idx = 0; gen_idx < total_generation_count; gen_idx++)
{
dynamic_data* dd = hp->dynamic_data_of (gen_idx);
// estimate the size of each generation as the live data size plus the budget
- heap_size += dd_promoted_size (dd) + dd_desired_allocation (dd);
- dprintf (6666, ("h%d g%d promoted: %zd desired allocation: %zd", i, gen_idx, dd_promoted_size (dd), dd_desired_allocation (dd)));
+ heap_size += dd_current_size (dd) + dd_desired_allocation (dd);
+ dprintf (3, ("h%d g%d current: %zd desired allocation: %zd", i, gen_idx, dd_promoted_size (dd), dd_desired_allocation (dd)));
}
}
- dynamic_data* hp0_dd0 = g_heaps[0]->dynamic_data_of (0);
+ // estimate the space cost of adding a heap as the min gen0 budget
+ size_t heap_space_cost_per_heap = dd_min_size (g_heaps[0]->dynamic_data_of (0));
- // persist data for the current sample
- dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index];
+ // compute the % space cost of adding a heap
+ float percent_heap_space_cost_per_heap = heap_space_cost_per_heap * 100.0f / heap_size;
- sample.soh_msl_wait_time = soh_msl_wait_time / n_heaps;
- sample.uoh_msl_wait_time = uoh_msl_wait_time / n_heaps;
- sample.elapsed_between_gcs = dd_time_clock (hp0_dd0) - dd_previous_time_clock (hp0_dd0);
- sample.gc_elapsed_time = dd_gc_elapsed_time (hp0_dd0);
- sample.allocating_thread_count = allocating_thread_count;
- sample.heap_size = heap_size;
+ // compute reasonable step sizes for the heap count
+ //
+ // on the way up, we essentially multiply the heap count by 1.5, so we go 1, 2, 3, 5, 8 ...
+ // we don't go all the way to the number of CPUs, but stay 1 or 2 short
+ int step_up = (n_heaps + 1) / 2;
+ int extra_heaps = 1 + (n_max_heaps >= 32);
+ step_up = min (step_up, n_max_heaps - extra_heaps - n_heaps);
- dprintf (6666, ("sample %d: soh_msl_wait_time: %zd, uoh_msl_wait_time: %zd, elapsed_between_gcs: %zd, gc_elapsed_time: %d, heap_size: %zd MB",
- dynamic_heap_count_data.sample_index,
- sample.soh_msl_wait_time,
- sample.uoh_msl_wait_time,
- sample.elapsed_between_gcs,
- sample.gc_elapsed_time,
- sample.heap_size/(1024*1024)));
+ // on the way down, we essentially divide the heap count by 1.5
+ int step_down = (n_heaps + 1) / 3;
- dynamic_heap_count_data.sample_index = (dynamic_heap_count_data.sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ // estimate the potential time benefit of going up a step
+ float tcp_reduction_per_step_up = smoothed_median_throughput_cost_percent * step_up / (n_heaps + step_up);
- GCEventFireHeapCountSample_V1(
- sample.gc_elapsed_time,
- sample.soh_msl_wait_time,
- sample.uoh_msl_wait_time,
- sample.elapsed_between_gcs
- );
+ // estimate the potential time cost of going down a step
+ float tcp_increase_per_step_down = smoothed_median_throughput_cost_percent * step_down / (n_heaps - step_down);
+
+ // estimate the potential space cost of going up a step
+ float scp_increase_per_step_up = percent_heap_space_cost_per_heap * step_up;
+
+ // estimate the potential space saving of going down a step
+ float scp_decrease_per_step_down = percent_heap_space_cost_per_heap * step_down;
- if (settings.gc_index < prev_change_heap_count_gc_index + 3)
+ dprintf (6666, ("[CHP] u %d, d %d | space cost %Id / heap %Id(%.2fmb) = scp %.3f (u: %.3f, d: %.3f) | stcp %.3f, u * %.1f = %.3f, d * %.1f = %.3f",
+ step_up, step_down,
+ heap_space_cost_per_heap, heap_size, ((float)heap_size / (float)1000 / (float)1000), percent_heap_space_cost_per_heap,
+ scp_increase_per_step_up, scp_decrease_per_step_down,
+ smoothed_median_throughput_cost_percent,
+ ((float)step_up / (float)(n_heaps + step_up)), tcp_reduction_per_step_up,
+ ((float)step_down / (float)(n_heaps - step_down)), tcp_increase_per_step_down));
+
+#ifdef STRESS_DYNAMIC_HEAP_COUNT
+ // quick hack for initial testing
+ int new_n_heaps = (int)gc_rand::get_rand (n_max_heaps - 1) + 1;
+
+ // if we are adjusting down, make sure we adjust lower than the lowest uoh msl heap
+ if ((new_n_heaps < n_heaps) && (dynamic_heap_count_data.lowest_heap_with_msl_uoh != -1))
{
- // reconsider the decision every few gcs
- return;
+ new_n_heaps = min (dynamic_heap_count_data.lowest_heap_with_msl_uoh, new_n_heaps);
+ new_n_heaps = max (new_n_heaps, 1);
}
-
- if (gc_heap::background_running_p())
+ dprintf (6666, ("stress %d -> %d", n_heaps, new_n_heaps));
+#else //STRESS_DYNAMIC_HEAP_COUNT
+ int new_n_heaps = n_heaps;
+ if (median_throughput_cost_percent > 10.0f)
{
- // can't have background gc running while we change the number of heaps
- // so it's useless to compute a new number of heaps here
+ // ramp up more agressively - use as many heaps as it would take to bring
+ // the tcp down to 5%
+ new_n_heaps = (int)(n_heaps * (median_throughput_cost_percent / 5.0));
+ dprintf (6666, ("[CHP0] tcp %.3f -> %d * %.3f = %d", median_throughput_cost_percent, n_heaps, (median_throughput_cost_percent / 5.0), new_n_heaps));
+ new_n_heaps = min (new_n_heaps, n_max_heaps - extra_heaps);
}
- else
+ // if the median tcp is 10% or less, react slower
+ else if ((smoothed_median_throughput_cost_percent > 5.0f) || (median_gen2_tcp_percent > 10.0f))
{
- // compute the % overhead from msl waiting time and gc time for each of the samples
- float percent_overhead[dynamic_heap_count_data_t::sample_size];
- for (int i = 0; i < dynamic_heap_count_data_t::sample_size; i++)
- {
- dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[i];
- uint64_t overhead_time = sample.soh_msl_wait_time + sample.uoh_msl_wait_time + sample.gc_elapsed_time;
- percent_overhead[i] = overhead_time * 100.0f / sample.elapsed_between_gcs;
- if (percent_overhead[i] < 0)
- percent_overhead[i] = 0;
- else if (percent_overhead[i] > 100)
- percent_overhead[i] = 100;
- dprintf (6666, ("sample %d: percent_overhead: %d%%", i, (int)percent_overhead[i]));
- }
- // compute the median of the percent overhead samples
- #define compare_and_swap(i, j) \
- { \
- if (percent_overhead[i] < percent_overhead[j]) \
- { \
- float t = percent_overhead[i]; \
- percent_overhead[i] = percent_overhead[j]; \
- percent_overhead[j] = t; \
- } \
- }
- compare_and_swap (1, 0);
- compare_and_swap (2, 0);
- compare_and_swap (2, 1);
- #undef compare_and_swap
-
- // the middle element is the median overhead percentage
- float median_percent_overhead = percent_overhead[1];
-
- // apply exponential smoothing and use 1/3 for the smoothing factor
- const float smoothing = 3;
- float smoothed_median_percent_overhead = dynamic_heap_count_data.smoothed_median_percent_overhead;
- if (smoothed_median_percent_overhead != 0.0f)
- {
- // average it with the previous value
- smoothed_median_percent_overhead = median_percent_overhead / smoothing + (smoothed_median_percent_overhead / smoothing) * (smoothing - 1);
+ if (smoothed_median_throughput_cost_percent > 5.0f)
+ {
+ dprintf (6666, ("[CHP1] stcp %.3f > 5, %d + %d = %d", smoothed_median_throughput_cost_percent, n_heaps, step_up, (n_heaps + step_up)));
}
else
{
- // first time? initialize to the median
- smoothed_median_percent_overhead = median_percent_overhead;
+ dprintf (6666, ("[CHP2] tcp %.3f > 10, %d + %d = %d", median_gen2_tcp_percent, n_heaps, step_up, (n_heaps + step_up)));
}
+ new_n_heaps += step_up;
+ }
+ // if we can save at least 1% more in time than we spend in space, increase number of heaps
+ else if ((tcp_reduction_per_step_up - scp_increase_per_step_up) >= 1.0f)
+ {
+ dprintf (6666, ("[CHP3] % .3f - % .3f = % .3f, % d + % d = % d",
+ tcp_reduction_per_step_up, scp_increase_per_step_up, (tcp_reduction_per_step_up - scp_increase_per_step_up),
+ n_heaps, step_up, (n_heaps + step_up)));
+ new_n_heaps += step_up;
+ }
+ // if we can save at least 1% more in space than we spend in time, decrease number of heaps
+ else if ((smoothed_median_throughput_cost_percent < 1.0f) &&
+ (median_gen2_tcp_percent < 5.0f) &&
+ ((scp_decrease_per_step_down - tcp_increase_per_step_down) >= 1.0f))
+ {
+ dprintf (6666, ("[CHP4] stcp %.3f tcp %.3f, %.3f - %.3f = %.3f, %d + %d = %d",
+ smoothed_median_throughput_cost_percent, median_gen2_tcp_percent,
+ scp_decrease_per_step_down, tcp_increase_per_step_down, (scp_decrease_per_step_down - tcp_increase_per_step_down),
+ n_heaps, step_up, (n_heaps + step_up)));
+ new_n_heaps -= step_down;
+ }
- dprintf (6666, ("median overhead: %d%% smoothed median overhead: %d%%", (int)(median_percent_overhead*1000), (int)(smoothed_median_percent_overhead*1000)));
-
- // estimate the space cost of adding a heap as the min gen0 size
- size_t heap_space_cost_per_heap = dd_min_size (hp0_dd0);
-
- // compute the % space cost of adding a heap
- float percent_heap_space_cost_per_heap = heap_space_cost_per_heap * 100.0f / heap_size;
-
- // compute reasonable step sizes for the heap count
+ assert (new_n_heaps >= 1);
+ assert (new_n_heaps <= n_max_heaps);
+#endif //STRESS_DYNAMIC_HEAP_COUNT
- // on the way up, we essentially multiply the heap count by 1.5, so we go 1, 2, 3, 5, 8 ...
- // we don't go all the way to the number of CPUs, but stay 1 or 2 short
- int step_up = (n_heaps + 1) / 2;
- int extra_heaps = 1 + (n_max_heaps >= 32);
- step_up = min (step_up, n_max_heaps - extra_heaps - n_heaps);
+ // store data used for decision to emit in ETW event
+ dynamic_heap_count_data.median_throughput_cost_percent = median_throughput_cost_percent;
+ dynamic_heap_count_data.smoothed_median_throughput_cost_percent = smoothed_median_throughput_cost_percent;
+ dynamic_heap_count_data.percent_heap_space_cost_per_heap = percent_heap_space_cost_per_heap;
+ dynamic_heap_count_data.tcp_reduction_per_step_up = tcp_reduction_per_step_up;
+ dynamic_heap_count_data.tcp_increase_per_step_down = tcp_increase_per_step_down;
+ dynamic_heap_count_data.scp_increase_per_step_up = scp_increase_per_step_up;
+ dynamic_heap_count_data.scp_decrease_per_step_down = scp_decrease_per_step_down;
+
+ GCEventFireHeapCountTuning_V1 (
+ (uint16_t)dynamic_heap_count_data.new_n_heaps,
+ (uint64_t)VolatileLoadWithoutBarrier (&settings.gc_index),
+ dynamic_heap_count_data.median_throughput_cost_percent,
+ dynamic_heap_count_data.smoothed_median_throughput_cost_percent,
+ dynamic_heap_count_data.tcp_reduction_per_step_up,
+ dynamic_heap_count_data.tcp_increase_per_step_down,
+ dynamic_heap_count_data.scp_increase_per_step_up,
+ dynamic_heap_count_data.scp_decrease_per_step_down
+ );
- // on the way down, we essentially divide the heap count by 1.5
- int step_down = (n_heaps + 1) / 3;
+ dynamic_heap_count_data.prev_num_completed_gcs = num_completed_gcs;
- // estimate the potential time benefit of going up a step
- float overhead_reduction_per_step_up = smoothed_median_percent_overhead * step_up / (n_heaps + step_up);
+ if (new_n_heaps != n_heaps)
+ {
+ dprintf (6666, ("should change! %d->%d", n_heaps, new_n_heaps));
+ dynamic_heap_count_data.heap_count_to_change_to = new_n_heaps;
+ dynamic_heap_count_data.should_change_heap_count = true;
+ }
- // estimate the potential time cost of going down a step
- float overhead_increase_per_step_down = smoothed_median_percent_overhead * step_down / (n_heaps - step_down);
+ return new_n_heaps;
+}
- // estimate the potential space cost of going up a step
- float space_cost_increase_per_step_up = percent_heap_space_cost_per_heap * step_up;
+void gc_heap::check_heap_count ()
+{
+ dynamic_heap_count_data.new_n_heaps = dynamic_heap_count_data.heap_count_to_change_to;
- // estimate the potential space saving of going down a step
- float space_cost_decrease_per_step_down = percent_heap_space_cost_per_heap * step_down;
+ assert (dynamic_heap_count_data.new_n_heaps != n_heaps);
-#ifdef STRESS_DYNAMIC_HEAP_COUNT
- // quick hack for initial testing
- int new_n_heaps = (int)gc_rand::get_rand (n_max_heaps - 1) + 1;
+ if (dynamic_heap_count_data.new_n_heaps != n_heaps)
+ {
+ dprintf (9999, ("h0 suspending EE in check"));
+ // can't have threads allocating while we change the number of heaps
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
+ dprintf (9999, ("h0 suspended EE in check"));
- // if we are adjusting down, make sure we adjust lower than the lowest uoh msl heap
- if ((new_n_heaps < n_heaps) && (dynamic_heap_count_data.lowest_heap_with_msl_uoh != -1))
+#ifdef BACKGROUND_GC
+ if (gc_heap::background_running_p())
{
- new_n_heaps = min (dynamic_heap_count_data.lowest_heap_with_msl_uoh, new_n_heaps);
+ // background GC is running - reset the new heap count
+ dynamic_heap_count_data.new_n_heaps = n_heaps;
+ dprintf (6666, ("can't change heap count! BGC in progress"));
- // but not down to zero, obviously...
- new_n_heaps = max (new_n_heaps, 1);
- }
-#else //STRESS_DYNAMIC_HEAP_COUNT
- int new_n_heaps = n_heaps;
- if (median_percent_overhead > 10.0f)
- {
- // ramp up more agressively - use as many heaps as it would take to bring
- // the overhead down to 5%
- new_n_heaps = (int)(n_heaps * (median_percent_overhead / 5.0));
- new_n_heaps = min (new_n_heaps, n_max_heaps - extra_heaps);
- }
- // if the median overhead is 10% or less, react slower
- else if (smoothed_median_percent_overhead > 5.0f)
- {
- new_n_heaps += step_up;
- }
- // if we can save at least 1% more in time than we spend in space, increase number of heaps
- else if (overhead_reduction_per_step_up - space_cost_increase_per_step_up >= 1.0f)
- {
- new_n_heaps += step_up;
- }
- // if we can save at least 1% more in space than we spend in time, decrease number of heaps
- else if (smoothed_median_percent_overhead < 1.0f && space_cost_decrease_per_step_down - overhead_increase_per_step_down >= 1.0f)
- {
- new_n_heaps -= step_down;
+ GCToEEInterface::RestartEE(TRUE);
}
+#endif //BACKGROUND_GC
+ }
- dprintf (6666, ("or: %d, si: %d, sd: %d, oi: %d => %d -> %d",
- (int)overhead_reduction_per_step_up,
- (int)space_cost_increase_per_step_up,
- (int)space_cost_decrease_per_step_down,
- (int)overhead_increase_per_step_down,
- n_heaps,
- new_n_heaps));
-
- assert (1 <= new_n_heaps);
- assert (new_n_heaps <= n_max_heaps);
-#endif //STRESS_DYNAMIC_HEAP_COUNT
-
- dynamic_heap_count_data.new_n_heaps = new_n_heaps;
-
- // store data used for decision to emit in ETW event
- dynamic_heap_count_data.median_percent_overhead = median_percent_overhead;
- dynamic_heap_count_data.smoothed_median_percent_overhead = smoothed_median_percent_overhead;
- dynamic_heap_count_data.percent_heap_space_cost_per_heap = percent_heap_space_cost_per_heap;
- dynamic_heap_count_data.overhead_reduction_per_step_up = overhead_reduction_per_step_up;
- dynamic_heap_count_data.overhead_increase_per_step_down = overhead_increase_per_step_down;
- dynamic_heap_count_data.space_cost_increase_per_step_up = space_cost_increase_per_step_up;
- dynamic_heap_count_data.space_cost_decrease_per_step_down = space_cost_decrease_per_step_down;
-
- GCEventFireHeapCountTuning_V1(
- (uint16_t)dynamic_heap_count_data.new_n_heaps,
- (uint64_t)VolatileLoad(&settings.gc_index),
- dynamic_heap_count_data.median_percent_overhead,
- dynamic_heap_count_data.smoothed_median_percent_overhead,
- dynamic_heap_count_data.overhead_reduction_per_step_up,
- dynamic_heap_count_data.overhead_increase_per_step_down,
- dynamic_heap_count_data.space_cost_increase_per_step_up,
- dynamic_heap_count_data.space_cost_decrease_per_step_down
- );
-
- if (new_n_heaps != n_heaps)
+ if (dynamic_heap_count_data.new_n_heaps != n_heaps)
+ {
+ dprintf (6666, ("prep to change from %d to %d", n_heaps, dynamic_heap_count_data.new_n_heaps));
+ if (!prepare_to_change_heap_count (dynamic_heap_count_data.new_n_heaps))
{
- // can't have threads allocating while we change the number of heaps
- GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
-
- if (gc_heap::background_running_p())
- {
- // background GC is running - reset the new heap count
- dynamic_heap_count_data.new_n_heaps = n_heaps;
-
- GCToEEInterface::RestartEE(TRUE);
- }
+ // we don't have sufficient resources - reset the new heap count
+ dynamic_heap_count_data.new_n_heaps = n_heaps;
}
}
if (dynamic_heap_count_data.new_n_heaps == n_heaps)
{
// heap count stays the same, no work to do
- dprintf (6666, ("heap count stays the same, no work to do %d == %d", dynamic_heap_count_data.new_n_heaps, n_heaps));
+ dynamic_heap_count_data.prev_num_completed_gcs = get_num_completed_gcs ();
+ dynamic_heap_count_data.should_change_heap_count = false;
- // come back after 3 GCs to reconsider
- prev_change_heap_count_gc_index = settings.gc_index;
+ dprintf (6666, ("heap count stays the same %d, no work to do, set prev completed to %Id", dynamic_heap_count_data.new_n_heaps, dynamic_heap_count_data.prev_num_completed_gcs));
return;
}
- if (GCScan::GetGcRuntimeStructuresValid())
+ int new_n_heaps = dynamic_heap_count_data.new_n_heaps;
+
+ assert (!(dynamic_heap_count_data.init_only_p));
+
{
+ // At this point we are guaranteed to be able to change the heap count to the new one.
+ // Change the heap count for joins here because we will need to join new_n_heaps threads together.
+ dprintf (9999, ("changing join hp %d->%d", n_heaps, new_n_heaps));
+ int max_threads_to_wake = max (n_heaps, new_n_heaps);
+ gc_t_join.update_n_threads (max_threads_to_wake);
+
// make sure the other gc threads cannot see this as a request to GC
assert (dynamic_heap_count_data.new_n_heaps != n_heaps);
+
+ if (n_heaps < new_n_heaps)
+ {
+ int saved_idle_thread_count = dynamic_heap_count_data.idle_thread_count;
+ Interlocked::ExchangeAdd (&dynamic_heap_count_data.idle_thread_count, (n_heaps - new_n_heaps));
+ dprintf (9999, ("GC thread %d setting idle events for h%d-h%d, total idle %d -> %d", heap_number, n_heaps, (new_n_heaps - 1),
+ saved_idle_thread_count, VolatileLoadWithoutBarrier (&dynamic_heap_count_data.idle_thread_count)));
+
+ for (int heap_idx = n_heaps; heap_idx < new_n_heaps; heap_idx++)
+ {
+ g_heaps[heap_idx]->gc_idle_thread_event.Set();
+#ifdef BACKGROUND_GC
+ g_heaps[heap_idx]->bgc_idle_thread_event.Set();
+#endif //BACKGROUND_GC
+ }
+ }
+
gc_start_event.Set();
}
int old_n_heaps = n_heaps;
+ (dynamic_heap_count_data.heap_count_change_count)++;
change_heap_count (dynamic_heap_count_data.new_n_heaps);
GCToEEInterface::RestartEE(TRUE);
- prev_change_heap_count_gc_index = settings.gc_index;
+ dprintf (9999, ("h0 restarted EE"));
// we made changes to the heap count that will change the overhead,
// so change the smoothed overhead to reflect that
- int new_n_heaps = n_heaps;
- dynamic_heap_count_data.smoothed_median_percent_overhead = dynamic_heap_count_data.smoothed_median_percent_overhead/new_n_heaps*old_n_heaps;
+ dynamic_heap_count_data.smoothed_median_throughput_cost_percent = dynamic_heap_count_data.smoothed_median_throughput_cost_percent / n_heaps * old_n_heaps;
+
+ dprintf (6666, ("h0 finished changing, set should change to false!"));
+ dynamic_heap_count_data.should_change_heap_count = false;
}
bool gc_heap::prepare_to_change_heap_count (int new_n_heaps)
{
- dprintf (6666, ("trying to change heap count %d -> %d", n_heaps, new_n_heaps));
+ dprintf (9999, ("trying to change heap count %d -> %d", n_heaps, new_n_heaps));
// use this variable for clarity - n_heaps will change during the transition
int old_n_heaps = n_heaps;
@@ -25357,6 +25644,17 @@ bool gc_heap::prepare_to_change_heap_count (int new_n_heaps)
}
}
+ // Before we look at whether we have sufficient regions we should return regions that should be deleted to free
+ // so we don't lose them when we decommission heaps. We could do this for only heaps that we are about
+ // to decomission. But it's better to do this for all heaps because we don't need to worry about adding them to the
+ // heaps remain (freeable uoh/soh regions) and we get rid of regions with the heap_segment_flags_uoh_delete flag
+ // because background_delay_delete_uoh_segments makes the assumption it can't be the start region.
+ for (int i = 0; i < old_n_heaps; i++)
+ {
+ gc_heap* hp = g_heaps[i];
+ hp->delay_free_segments ();
+ }
+
// if we want to increase the number of heaps, we have to make sure we can give
// each heap a region for each generation. If we cannot do that, we have to give up
ptrdiff_t region_count_in_gen[total_generation_count];
@@ -25437,39 +25735,34 @@ bool gc_heap::prepare_to_change_heap_count (int new_n_heaps)
bool gc_heap::change_heap_count (int new_n_heaps)
{
+ dprintf (9999, ("BEG heap%d changing %d->%d", heap_number, n_heaps, new_n_heaps));
+
// use this variable for clarity - n_heaps will change during the transition
int old_n_heaps = n_heaps;
+ bool init_only_p = dynamic_heap_count_data.init_only_p;
- if (heap_number == 0)
- {
- if (!prepare_to_change_heap_count (new_n_heaps))
- {
- // we don't have sufficient resources - reset the new heap count
- dynamic_heap_count_data.new_n_heaps = n_heaps;
- }
- }
-
- if (GCScan::GetGcRuntimeStructuresValid())
{
- // join for sufficient resources decision
gc_t_join.join (this, gc_join_merge_temp_fl);
if (gc_t_join.joined ())
{
+ // BGC is not running, we can safely change its join's heap count.
+#ifdef BACKGROUND_GC
+ bgc_t_join.update_n_threads (new_n_heaps);
+#endif //BACKGROUND_GC
+
+ dynamic_heap_count_data.init_only_p = false;
+ dprintf (9999, ("in change h%d resetting gc_start, update bgc join to %d heaps", heap_number, new_n_heaps));
gc_start_event.Reset();
gc_t_join.restart ();
}
}
- // gc_heap::n_heaps may have changed by now, compare to the snapshot *before* the join
- if (dynamic_heap_count_data.new_n_heaps == old_n_heaps)
- {
- dprintf (6666, ("failed to change heap count, no work to do %d == %d", dynamic_heap_count_data.new_n_heaps, old_n_heaps));
- return false;
- }
+ assert (dynamic_heap_count_data.new_n_heaps != old_n_heaps);
+
+ dprintf (9999, ("Waiting h0 heap%d changing %d->%d", heap_number, n_heaps, new_n_heaps));
if (heap_number == 0)
{
- // after having checked for sufficient resources, we are now committed to actually change the heap count
dprintf (3, ("switching heap count from %d to %d heaps", old_n_heaps, new_n_heaps));
// spread finalization data out to heaps coming into service
@@ -25490,17 +25783,23 @@ bool gc_heap::change_heap_count (int new_n_heaps)
from_heap_number = (from_heap_number + 1) % old_n_heaps;
}
- // prepare for the switch by fixing the allocation contexts on the old heaps,
+ // prepare for the switch by fixing the allocation contexts on the old heaps, unify the gen0_bricks_cleared flag,
// and setting the survived size for the existing regions to their allocated size
+ BOOL unified_gen0_bricks_cleared = TRUE;
for (int i = 0; i < old_n_heaps; i++)
{
gc_heap* hp = g_heaps[i];
- if (GCScan::GetGcRuntimeStructuresValid())
+ if (!init_only_p)
{
hp->fix_allocation_contexts (TRUE);
}
+ if (unified_gen0_bricks_cleared && (hp->gen0_bricks_cleared == FALSE))
+ {
+ unified_gen0_bricks_cleared = FALSE;
+ }
+
for (int gen_idx = 0; gen_idx < total_generation_count; gen_idx++)
{
generation* gen = hp->generation_of (gen_idx);
@@ -25600,7 +25899,7 @@ bool gc_heap::change_heap_count (int new_n_heaps)
hpd->free_regions[kind].transfer_regions(&hp->free_regions[kind]);
}
}
- // update number of heaps
+ dprintf (9999, ("h%d changing %d->%d", heap_number, n_heaps, new_n_heaps));
n_heaps = new_n_heaps;
// even out the regions over the current number of heaps
@@ -25611,6 +25910,8 @@ bool gc_heap::change_heap_count (int new_n_heaps)
{
gc_heap* hp = g_heaps[i];
+ hp->gen0_bricks_cleared = unified_gen0_bricks_cleared;
+
// establish invariants regarding the ephemeral segment
generation* gen0 = hp->generation_of (0);
if ((hp->ephemeral_heap_segment == nullptr) ||
@@ -25639,7 +25940,9 @@ bool gc_heap::change_heap_count (int new_n_heaps)
}
}
- if (GCScan::GetGcRuntimeStructuresValid())
+ dprintf (3, ("individual heap%d changing %d->%d", heap_number, n_heaps, new_n_heaps));
+
+ if (!init_only_p)
{
// join for rethreading the free lists
gc_t_join.join (this, gc_join_merge_temp_fl);
@@ -25651,7 +25954,11 @@ bool gc_heap::change_heap_count (int new_n_heaps)
// rethread the free lists
for (int gen_idx = 0; gen_idx < total_generation_count; gen_idx++)
{
- rethread_fl_items (gen_idx);
+ if (heap_number < old_n_heaps)
+ {
+ dprintf (3, ("h%d calling per heap work!", heap_number));
+ rethread_fl_items (gen_idx);
+ }
// join for merging the free lists
gc_t_join.join (this, gc_join_merge_temp_fl);
@@ -25662,18 +25969,14 @@ bool gc_heap::change_heap_count (int new_n_heaps)
gc_t_join.restart ();
}
}
+#ifdef BACKGROUND_GC
// there should be no items in the bgc_alloc_lock
bgc_alloc_lock->check();
+#endif //BACKGROUND_GC
}
if (heap_number == 0)
{
- // udate the number of heaps in the joins
- gc_t_join.update_n_threads(new_n_heaps);
- #ifdef BACKGROUND_GC
- bgc_t_join.update_n_threads(new_n_heaps);
- #endif //BACKGROUND_GC
-
// compute the total budget per generation over the old heaps
// and figure out what the new budget per heap is
ptrdiff_t budget_per_heap[total_generation_count];
@@ -25733,21 +26036,50 @@ bool gc_heap::change_heap_count (int new_n_heaps)
hp->decommission_heap();
}
- if (GCScan::GetGcRuntimeStructuresValid())
+ if (!init_only_p)
{
// make sure no allocation contexts point to idle heaps
fix_allocation_contexts_heaps();
}
- if (old_n_heaps < new_n_heaps)
+ dynamic_heap_count_data.last_n_heaps = old_n_heaps;
+ }
+
+ // join the last time to change the heap count again if needed.
+ if (new_n_heaps < old_n_heaps)
+ {
+ gc_t_join.join (this, gc_join_merge_temp_fl);
+ if (gc_t_join.joined ())
{
- // wake up threads for the new heaps
- gc_idle_thread_event.Set();
+ dprintf (9999, ("now changing the join heap count to the smaller one %d", new_n_heaps));
+ gc_t_join.update_n_threads (new_n_heaps);
+
+ gc_t_join.restart ();
}
}
return true;
}
+
+size_t gc_heap::get_msl_wait_time()
+{
+ assert (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes);
+
+ size_t msl_wait_since_pause = 0;
+
+ for (int i = 0; i < n_heaps; i++)
+ {
+ gc_heap* hp = g_heaps[i];
+
+ msl_wait_since_pause += hp->more_space_lock_soh.msl_wait_time;
+ hp->more_space_lock_soh.msl_wait_time = 0;
+
+ msl_wait_since_pause += hp->more_space_lock_uoh.msl_wait_time;
+ hp->more_space_lock_uoh.msl_wait_time = 0;
+ }
+
+ return msl_wait_since_pause;
+}
#endif //DYNAMIC_HEAP_COUNT
#endif //USE_REGIONS
@@ -32791,17 +33123,17 @@ void gc_heap::plan_phase (int condemned_gen_number)
}
else
{
- dprintf (2, ("gen2 didn't grow (end seg alloc: %zd, , condemned alloc: %zd, gen1 c alloc: %zd",
+ dprintf (1, ("gen2 didn't grow (end seg alloc: %zd, , condemned alloc: %zd, gen1 c alloc: %zd",
end_seg_allocated, condemned_allocated,
generation_condemned_allocated (generation_of (max_generation - 1))));
}
- dprintf (1, ("older gen's free alloc: %zd->%zd, seg alloc: %zd->%zd, condemned alloc: %zd->%zd",
+ dprintf (2, ("older gen's free alloc: %zd->%zd, seg alloc: %zd->%zd, condemned alloc: %zd->%zd",
r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen),
r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen),
r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen)));
- dprintf (1, ("this GC did %zd free list alloc(%zd bytes free space rejected)",
+ dprintf (2, ("this GC did %zd free list alloc(%zd bytes free space rejected)",
free_list_allocated, rejected_free_space));
maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info);
@@ -38894,9 +39226,9 @@ void gc_heap::bgc_thread_function()
{
// this is the case where we have more background GC threads than heaps
// - wait until we're told to continue...
- dprintf (3, ("BGC thread %d idle", heap_number));
- gc_idle_thread_event.Wait(INFINITE, FALSE);
- dprintf (3, ("BGC thread %d waking from idle", heap_number));
+ dprintf (9999, ("BGC thread %d idle (%d heaps) (gc%Id)", heap_number, n_heaps, VolatileLoadWithoutBarrier (&settings.gc_index)));
+ bgc_idle_thread_event.Wait(INFINITE, FALSE);
+ dprintf (9999, ("BGC thread %d waking from idle (%d heaps) (gc%Id)", heap_number, n_heaps, VolatileLoadWithoutBarrier (&settings.gc_index)));
continue;
}
#endif //DYNAMIC_HEAP_COUNT
@@ -38968,7 +39300,7 @@ void gc_heap::bgc_thread_function()
dprintf (SPINLOCK_LOG, ("bgc Lgc"));
leave_spin_lock (&gc_lock);
#ifdef MULTIPLE_HEAPS
- dprintf(1, ("End of BGC - starting all BGC threads"));
+ dprintf(1, ("End of BGC"));
bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
}
@@ -42845,6 +43177,9 @@ bool gc_heap::init_dynamic_data()
{
process_start_time = now;
smoothed_desired_total[0] = dynamic_data_of (0)->min_size * n_heaps;
+#ifdef DYNAMIC_HEAP_COUNT
+ last_suspended_end_time = now;
+#endif //DYNAMIC_HEAP_COUNT
#ifdef HEAP_BALANCE_INSTRUMENTATION
last_gc_end_time_us = now;
dprintf (HEAP_BALANCE_LOG, ("qpf=%zd, start: %zd(%d)", qpf, start_raw_ts, now));
@@ -47943,6 +48278,7 @@ HRESULT GCHeap::Initialize()
uint32_t nhp = 1;
uint32_t nhp_from_config = 0;
+ uint32_t max_nhp_from_config = (uint32_t)GCConfig::GetMaxHeapCount();
#ifndef MULTIPLE_HEAPS
GCConfig::SetServerGC(false);
@@ -48137,6 +48473,10 @@ HRESULT GCHeap::Initialize()
#ifdef MULTIPLE_HEAPS
assert (nhp <= g_num_processors);
+ if (max_nhp_from_config)
+ {
+ nhp = min (nhp, max_nhp_from_config);
+ }
gc_heap::n_max_heaps = nhp;
gc_heap::n_heaps = nhp;
hr = gc_heap::initialize_gc (seg_size, large_seg_size, pin_seg_size, nhp);
@@ -48287,9 +48627,32 @@ HRESULT GCHeap::Initialize()
{
// start with only 1 heap
gc_heap::smoothed_desired_total[0] /= gc_heap::n_heaps;
- gc_heap::g_heaps[0]->change_heap_count (1);
+ int initial_n_heaps = 1;
+ dprintf (9999, ("gc_heap::n_heaps is %d, initial %d", gc_heap::n_heaps, initial_n_heaps));
+
+ {
+ if (!gc_heap::prepare_to_change_heap_count (initial_n_heaps))
+ {
+ // we don't have sufficient resources.
+ return E_FAIL;
+ }
+
+ gc_heap::dynamic_heap_count_data.new_n_heaps = initial_n_heaps;
+ gc_heap::dynamic_heap_count_data.idle_thread_count = 0;
+ gc_heap::dynamic_heap_count_data.init_only_p = true;
+
+ int max_threads_to_wake = max (gc_heap::n_heaps, initial_n_heaps);
+ gc_t_join.update_n_threads (max_threads_to_wake);
+ gc_heap::gc_start_event.Set ();
+ }
+
+ gc_heap::g_heaps[0]->change_heap_count (initial_n_heaps);
+ gc_heap::gc_start_event.Reset ();
+
+ // This needs to be different from our initial heap count so we can make sure we wait for
+ // the idle threads correctly in gc_thread_function.
+ gc_heap::dynamic_heap_count_data.last_n_heaps = 0;
}
- gc_heap::dynamic_heap_count_data.new_n_heaps = gc_heap::n_heaps;
#endif //DYNAMIC_HEAP_COUNT
GCScan::GcRuntimeStructuresValid (TRUE);
@@ -49861,10 +50224,16 @@ void gc_heap::do_post_gc()
}
#endif //BGC_SERVO_TUNING
+#ifdef BACKGROUND_GC
+ const char* str_gc_type = (settings.concurrent ? "BGC" : (gc_heap::background_running_p () ? "FGC" : "NGC"));
+#else
+ const char* str_gc_type = "NGC";
+#endif //BACKGROUND_GC
+
dprintf (1, (ThreadStressLog::gcDetailedEndMsg(),
- VolatileLoad(&settings.gc_index),
- dd_collection_count(hp->dynamic_data_of(0)),
- (size_t)(GetHighPrecisionTimeStamp() / 1000),
+ VolatileLoad (&settings.gc_index),
+ dd_collection_count (hp->dynamic_data_of (0)),
+ (size_t)(GetHighPrecisionTimeStamp () / 1000),
settings.condemned_generation,
(settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")),
(settings.compaction ? "C" : "S"),
diff --git a/src/coreclr/gc/gcconfig.h b/src/coreclr/gc/gcconfig.h
index 72786778d5a978..aeded6bc97f17f 100644
--- a/src/coreclr/gc/gcconfig.h
+++ b/src/coreclr/gc/gcconfig.h
@@ -83,6 +83,7 @@ class GCConfigStringHolder
INT_CONFIG (BGCSpinCount, "BGCSpinCount", NULL, 140, "Specifies the bgc spin count") \
INT_CONFIG (BGCSpin, "BGCSpin", NULL, 2, "Specifies the bgc spin time") \
INT_CONFIG (HeapCount, "GCHeapCount", "System.GC.HeapCount", 0, "Specifies the number of server GC heaps") \
+ INT_CONFIG (MaxHeapCount, "GCMaxHeapCount", "System.GC.MaxHeapCount", 0, "Specifies the max number of server GC heaps to adjust to") \
INT_CONFIG (Gen0Size, "GCgen0size", NULL, 0, "Specifies the smallest gen0 budget") \
INT_CONFIG (SegmentSize, "GCSegmentSize", NULL, 0, "Specifies the managed heap segment size") \
INT_CONFIG (LatencyMode, "GCLatencyMode", NULL, -1, "Specifies the GC latency mode - batch, interactive or low latency (note that the same " \
diff --git a/src/coreclr/gc/gcee.cpp b/src/coreclr/gc/gcee.cpp
index 6dbbfd64a7a514..32738da9b603ab 100644
--- a/src/coreclr/gc/gcee.cpp
+++ b/src/coreclr/gc/gcee.cpp
@@ -510,9 +510,12 @@ bool GCHeap::IsInFrozenSegment(Object *object)
void GCHeap::UpdateFrozenSegment(segment_handle seg, uint8_t* allocated, uint8_t* committed)
{
#ifdef FEATURE_BASICFREEZE
- heap_segment* heap_seg = reinterpret_cast(seg);
- heap_segment_committed(heap_seg) = committed;
- heap_segment_allocated(heap_seg) = allocated;
+#ifdef MULTIPLE_HEAPS
+ gc_heap* heap = gc_heap::g_heaps[0];
+#else
+ gc_heap* heap = pGenGCHeap;
+#endif //MULTIPLE_HEAPS
+ heap->update_ro_segment (reinterpret_cast(seg), allocated, committed);
#endif // FEATURE_BASICFREEZE
}
diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h
index da0085ce19610d..cce6c5ee28adf0 100644
--- a/src/coreclr/gc/gcpriv.h
+++ b/src/coreclr/gc/gcpriv.h
@@ -402,8 +402,6 @@ struct GCDebugSpinLock {
#if defined(DYNAMIC_HEAP_COUNT)
// time in microseconds we wait for the more space lock
uint64_t msl_wait_time;
- // number of times we wait for the more space lock
- uint64_t msl_wait_count;
#endif //DYNAMIC_HEAP_COUNT
GCDebugSpinLock()
@@ -415,7 +413,7 @@ struct GCDebugSpinLock {
, num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
#endif
#if defined(DYNAMIC_HEAP_COUNT)
- , msl_wait_time(0), msl_wait_count(0)
+ , msl_wait_time(0)
#endif //DYNAMIC_HEAP_COUNT
{
}
@@ -1148,15 +1146,12 @@ class dynamic_data
//
// The following 3 fields are updated at the beginning of each GC, if that GC condemns this generation.
//
- // The number of GC that condemned this generation. The only difference between this
- // and collection_count is just that collection_count is maintained for all physical generations
- // (currently there are 5) whereas this is only updated for logical generations (there are 3).
- size_t gc_clock;
- uint64_t time_clock; //time when this gc started
+ size_t gc_clock; // the gc index
+ uint64_t time_clock; // time when this gc started
uint64_t previous_time_clock; // time when previous gc started
// Updated at the end of a GC, if that GC condemns this generation.
- size_t gc_elapsed_time; // Time it took for the gc to complete
+ size_t gc_elapsed_time; // time it took for the gc to complete
//
// The following fields (and fields in sdata) are initialized during GC init time and do not change.
@@ -1495,6 +1490,8 @@ class mark_queue_t
void verify_empty();
};
+float median_of_3 (float a, float b, float c);
+
//class definition of the internal class
class gc_heap
{
@@ -2380,6 +2377,7 @@ class gc_heap
#ifdef FEATURE_BASICFREEZE
PER_HEAP_METHOD BOOL insert_ro_segment (heap_segment* seg);
PER_HEAP_METHOD void remove_ro_segment (heap_segment* seg);
+ PER_HEAP_METHOD void update_ro_segment (heap_segment* seg, uint8_t* allocated, uint8_t* committed);
#endif //FEATURE_BASICFREEZE
PER_HEAP_METHOD BOOL set_ro_segment_in_range (heap_segment* seg);
#ifndef USE_REGIONS
@@ -2421,6 +2419,7 @@ class gc_heap
#ifndef USE_REGIONS
PER_HEAP_METHOD void rearrange_heap_segments(BOOL compacting);
#endif //!USE_REGIONS
+ PER_HEAP_METHOD void delay_free_segments();
PER_HEAP_ISOLATED_METHOD void distribute_free_regions();
#ifdef BACKGROUND_GC
PER_HEAP_ISOLATED_METHOD void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
@@ -2596,11 +2595,17 @@ class gc_heap
// re-initialize a heap in preparation to putting it back into service
PER_HEAP_METHOD void recommission_heap();
+ PER_HEAP_ISOLATED_METHOD size_t get_num_completed_gcs();
+
+ PER_HEAP_ISOLATED_METHOD int calculate_new_heap_count();
+
// check if we should change the heap count
PER_HEAP_METHOD void check_heap_count();
- PER_HEAP_METHOD bool prepare_to_change_heap_count (int new_n_heaps);
+ PER_HEAP_ISOLATED_METHOD bool prepare_to_change_heap_count (int new_n_heaps);
PER_HEAP_METHOD bool change_heap_count (int new_n_heaps);
+
+ PER_HEAP_ISOLATED_METHOD size_t get_msl_wait_time();
#endif //DYNAMIC_HEAP_COUNT
#endif //USE_REGIONS
@@ -3777,6 +3782,13 @@ class gc_heap
PER_HEAP_FIELD_MAINTAINED mark* loh_pinned_queue;
#endif //FEATURE_LOH_COMPACTION
+#ifdef DYNAMIC_HEAP_COUNT
+ PER_HEAP_FIELD_MAINTAINED GCEvent gc_idle_thread_event;
+#ifdef BACKGROUND_GC
+ PER_HEAP_FIELD_MAINTAINED GCEvent bgc_idle_thread_event;
+#endif //BACKGROUND_GC
+#endif //DYNAMIC_HEAP_COUNT
+
/******************************************/
// PER_HEAP_FIELD_MAINTAINED_ALLOC fields //
/******************************************/
@@ -4083,7 +4095,6 @@ class gc_heap
// These 2 fields' values do not change but are set/unset per GC
PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent gc_start_event;
PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent ee_suspend_event;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent gc_idle_thread_event;
// Also updated on the heap#0 GC thread because that's where we are actually doing the decommit.
PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL gradual_decommit_in_progress_p;
@@ -4162,6 +4173,10 @@ class gc_heap
PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint8_t* gc_high; // high end of the highest region being condemned
#endif //USE_REGIONS
+#ifdef STRESS_DYNAMIC_HEAP_COUNT
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC int heaps_in_this_gc;
+#endif //STRESS_DYNAMIC_HEAP_COUNT
+
/**************************************************/
// PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC fields //
/**************************************************/
@@ -4260,37 +4275,65 @@ class gc_heap
#endif //USE_REGIONS
#ifdef DYNAMIC_HEAP_COUNT
+ // Sample collection -
+ //
+ // For every GC, we collect the msl wait time + GC pause duration info and use both to calculate the
+ // throughput cost percentage. We will also be using the wait time and the GC pause duration separately
+ // for other purposes in the future.
+ //
+ // For all gen2 GCs we also keep a separate array currently just for the GC cost. This serves as a backstop
+ // to smooth out the situation when we rarely pick the gen2 GCs in the first array.
struct dynamic_heap_count_data_t
{
static const int sample_size = 3;
struct sample
{
- uint64_t elapsed_between_gcs; // time between gcs in microseconds
- uint64_t gc_elapsed_time; // time the gc took
- uint64_t soh_msl_wait_time; // time the allocator spent waiting for the soh msl lock
- uint64_t uoh_msl_wait_time; // time the allocator spent waiting for the uoh msl lock
- size_t allocating_thread_count;// number of allocating threads
- size_t heap_size;
+ uint64_t elapsed_between_gcs; // time between gcs in microseconds (this should really be between_pauses)
+ uint64_t gc_pause_time; // pause time for this GC
+ uint64_t msl_wait_time;
};
- unsigned sample_index;
+ uint32_t sample_index;
sample samples[sample_size];
+ size_t prev_num_completed_gcs;
+
+ uint32_t gen2_sample_index;
+ // This is (gc_elapsed_time / time inbetween this and the last gen2 GC)
+ float gen2_gc_percents[sample_size];
- float median_percent_overhead; // estimated overhead of allocator + gc
- float smoothed_median_percent_overhead; // exponentially smoothed version
- float percent_heap_space_cost_per_heap; // percent space cost of adding a heap
- float overhead_reduction_per_step_up; // percentage effect on overhead of increasing heap count
- float overhead_increase_per_step_down; // percentage effect on overhead of decreasing heap count
- float space_cost_increase_per_step_up; // percentage effect on space of increasing heap count
- float space_cost_decrease_per_step_down;// percentage effect on space of decreasing heap count
+ float median_throughput_cost_percent; // estimated overhead of allocator + gc
+ float smoothed_median_throughput_cost_percent; // exponentially smoothed version
+ float percent_heap_space_cost_per_heap; // percent space cost of adding a heap
+ float tcp_reduction_per_step_up; // throughput cost percent effect of increasing heap count
+ float tcp_increase_per_step_down; // throughput cost percent effect of decreasing heap count
+ float scp_increase_per_step_up; // space cost percent effect of increasing heap count
+ float scp_decrease_per_step_down; // space cost percent effect of decreasing heap count
int new_n_heaps;
+ // the heap count we changed from
+ int last_n_heaps;
+ // don't start a GC till we see (n_max_heaps - new_n_heaps) number of threads idling
+ VOLATILE(int32_t) idle_thread_count;
+ bool init_only_p;
+
+ bool should_change_heap_count;
+ int heap_count_to_change_to;
+ int heap_count_change_count;
#ifdef STRESS_DYNAMIC_HEAP_COUNT
int lowest_heap_with_msl_uoh;
#endif //STRESS_DYNAMIC_HEAP_COUNT
+
+ float get_median_gen2_gc_percent()
+ {
+ return median_of_3 (gen2_gc_percents[0], gen2_gc_percents[1], gen2_gc_percents[2]);
+ }
};
PER_HEAP_ISOLATED_FIELD_MAINTAINED dynamic_heap_count_data_t dynamic_heap_count_data;
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED uint64_t last_suspended_end_time;
+ // If the last full GC is blocking, this is that GC's index; for BGC, this is the settings.gc_index
+ // when the BGC ended.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t gc_index_full_gc_end;
#endif //DYNAMIC_HEAP_COUNT
/****************************************************/
@@ -4866,7 +4909,6 @@ uint64_t& dd_previous_time_clock (dynamic_data* inst)
return inst->previous_time_clock;
}
-
inline
size_t& dd_gc_clock_interval (dynamic_data* inst)
{
diff --git a/src/coreclr/ilasm/assem.cpp b/src/coreclr/ilasm/assem.cpp
index dd2c91ac093acb..2bd90fadb8f916 100644
--- a/src/coreclr/ilasm/assem.cpp
+++ b/src/coreclr/ilasm/assem.cpp
@@ -1324,7 +1324,12 @@ OPCODE Assembler::DecodeOpcode(const BYTE *pCode, DWORD *pdwLen)
char* Assembler::ReflectionNotation(mdToken tk)
{
+ // We break the global static `wzUniBuf` into 2 equal parts: the first part is used for a Unicode
+ // string, the second part is used for a converted-into-multibyte (MB) string. Note that the MB string
+ // length is in bytes.
char *sz = (char*)&wzUniBuf[dwUniBuf>>1], *pc;
+ const size_t szSizeBytes = (dwUniBuf * sizeof(WCHAR)) / 2; // sizeof(WCHAR) is 2, so this is just `dwUniBuf`
+ const size_t cchUniBuf = dwUniBuf / 2; // only use the first 1/2 of wzUniBuf
*sz=0;
switch(TypeFromToken(tk))
{
@@ -1333,7 +1338,7 @@ char* Assembler::ReflectionNotation(mdToken tk)
Class *pClass = m_lstClass.PEEK(RidFromToken(tk)-1);
if(pClass)
{
- strcpy_s(sz,dwUniBuf>>1,pClass->m_szFQN);
+ strcpy_s(sz,szSizeBytes,pClass->m_szFQN);
pc = sz;
while((pc = strchr(pc,NESTING_SEP)) != NULL)
{
@@ -1348,31 +1353,80 @@ char* Assembler::ReflectionNotation(mdToken tk)
{
ULONG N;
mdToken tkResScope;
- if(SUCCEEDED(m_pImporter->GetTypeRefProps(tk,&tkResScope,wzUniBuf,dwUniBuf>>1,&N)))
+ if(SUCCEEDED(m_pImporter->GetTypeRefProps(tk,&tkResScope,wzUniBuf,cchUniBuf,&N)))
{
- WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,sz,dwUniBuf>>1,NULL,NULL);
+ int ret = WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,sz,szSizeBytes,NULL,NULL);
if(TypeFromToken(tkResScope)==mdtAssemblyRef)
{
AsmManAssembly *pAsmRef = m_pManifest->m_AsmRefLst.PEEK(RidFromToken(tkResScope)-1);
if(pAsmRef)
{
- pc = &sz[strlen(sz)];
- pc+=sprintf_s(pc,(dwUniBuf >> 1),", %s, Version=%d.%d.%d.%d, Culture=",pAsmRef->szName,
+ // We assume below that if sprintf_s fails due to buffer overrun,
+ // execution fails fast and sprintf_s doesn't return.
+ int sprintf_ret;
+ const size_t szLen = strlen(sz);
+ pc = &sz[szLen];
+ size_t szRemainingSizeBytes = szSizeBytes - szLen;
+
+ sprintf_ret = sprintf_s(pc,szRemainingSizeBytes,", %s, Version=%d.%d.%d.%d, Culture=",pAsmRef->szName,
pAsmRef->usVerMajor,pAsmRef->usVerMinor,pAsmRef->usBuild,pAsmRef->usRevision);
- ULONG L=0;
- if(pAsmRef->pLocale && (L=pAsmRef->pLocale->length()))
+ pc += sprintf_ret;
+ szRemainingSizeBytes -= (size_t)sprintf_ret;
+
+ unsigned L=0;
+ if(pAsmRef->pLocale && ((L=pAsmRef->pLocale->length()) > 0))
+ {
+ // L is in bytes and doesn't include the terminating null.
+ if (L > (cchUniBuf - 1) * sizeof(WCHAR))
+ {
+ report->error("Locale too long (%d characters, %d allowed).\n",L / sizeof(WCHAR), cchUniBuf - 1);
+ *sz=0;
+ break;
+ }
+ else if (szRemainingSizeBytes == 0)
+ {
+ report->error("TypeRef too long.\n");
+ *sz=0;
+ break;
+ }
+
+ if (szRemainingSizeBytes > 0)
+ {
+ memcpy(wzUniBuf,pAsmRef->pLocale->ptr(),L);
+ wzUniBuf[L>>1] = 0;
+ ret = WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,pc,(int)szRemainingSizeBytes,NULL,NULL);
+ if (ret <= 0)
+ {
+ report->error("Locale too long.\n");
+ *sz=0;
+ break;
+ }
+ else
+ {
+ pc += ret;
+ szRemainingSizeBytes -= (size_t)ret;
+ }
+ }
+ }
+ else
{
- memcpy(wzUniBuf,pAsmRef->pLocale->ptr(),L);
- wzUniBuf[L>>1] = 0;
- WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,pc,dwUniBuf>>1,NULL,NULL);
+ sprintf_ret = sprintf_s(pc,szRemainingSizeBytes,"neutral");
+ pc += sprintf_ret;
+ szRemainingSizeBytes -= (size_t)sprintf_ret;
}
- else pc+=sprintf_s(pc,(dwUniBuf >> 1),"neutral");
- pc = &sz[strlen(sz)];
- if(pAsmRef->pPublicKeyToken && (L=pAsmRef->pPublicKeyToken->length()))
+ if(pAsmRef->pPublicKeyToken && ((L=pAsmRef->pPublicKeyToken->length()) > 0))
{
- pc+=sprintf_s(pc,(dwUniBuf >> 1),", Publickeytoken=");
+ sprintf_ret = sprintf_s(pc,szRemainingSizeBytes,", Publickeytoken=");
+ pc += sprintf_ret;
+ szRemainingSizeBytes -= (size_t)sprintf_ret;
+
BYTE* pb = (BYTE*)(pAsmRef->pPublicKeyToken->ptr());
- for(N=0; N> 1),"%2.2x",*pb);
+ for(unsigned i=0; im_pLinearScan->dumpLsraStatsSummary(jitstdout);
+ compiler->m_pLinearScan->dumpLsraStatsSummary(jitstdout());
}
#endif // TRACK_LSRA_STATS
@@ -2104,7 +2104,7 @@ void CodeGen::genEmitUnwindDebugGCandEH()
genCreateAndStoreGCInfo(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
#ifdef DEBUG
- FILE* dmpf = jitstdout;
+ FILE* dmpf = jitstdout();
compiler->opts.dmpHex = false;
if (!strcmp(compiler->info.compMethodName, "= 0.5)
{
- fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count,
- percentage, size);
+ jitprintf(" GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count,
+ percentage, size);
remainingCount -= count;
}
else
@@ -1484,14 +1482,14 @@ void Compiler::compShutdown()
if (remainingCount > 0)
{
- fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n",
- remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount,
- 100.0 * remainingCountLarge / totalCount);
+ jitprintf(" All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount,
+ 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount,
+ 100.0 * remainingCountLarge / totalCount);
}
- fprintf(fout, " -----------------------------------------------------\n");
- fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount,
- 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount);
- fprintf(fout, "\n");
+ jitprintf(" -----------------------------------------------------\n");
+ jitprintf(" Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount,
+ 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount);
+ jitprintf("\n");
}
#endif // COUNT_AST_OPERS
@@ -1500,49 +1498,49 @@ void Compiler::compShutdown()
if (grossVMsize && grossNCsize)
{
- fprintf(fout, "\n");
- fprintf(fout, "--------------------------------------\n");
- fprintf(fout, "Function and GC info size stats\n");
- fprintf(fout, "--------------------------------------\n");
+ jitprintf("\n");
+ jitprintf("--------------------------------------\n");
+ jitprintf("Function and GC info size stats\n");
+ jitprintf("--------------------------------------\n");
- fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName,
- 100 * grossNCsize / grossVMsize, "Total (excluding GC info)");
+ jitprintf("[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName,
+ 100 * grossNCsize / grossVMsize, "Total (excluding GC info)");
- fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName,
- 100 * totalNCsize / grossVMsize, "Total (including GC info)");
+ jitprintf("[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName,
+ 100 * totalNCsize / grossVMsize, "Total (including GC info)");
if (gcHeaderISize || gcHeaderNSize)
{
- fprintf(fout, "\n");
+ jitprintf("\n");
- fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n",
- gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize,
- 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize,
- Target::g_tgtCPUName);
+ jitprintf("GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize,
+ gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize,
+ 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize,
+ Target::g_tgtCPUName);
- fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize,
- gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001),
- (float)gcHeaderNSize / (genMethodNCnt + 0.001),
- (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt);
+ jitprintf("GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize,
+ gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001),
+ (float)gcHeaderNSize / (genMethodNCnt + 0.001),
+ (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt);
- fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize,
- gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001),
- (float)gcPtrMapNSize / (genMethodNCnt + 0.001),
- (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt);
+ jitprintf("GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize,
+ gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001),
+ (float)gcPtrMapNSize / (genMethodNCnt + 0.001),
+ (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt);
}
else
{
- fprintf(fout, "\n");
+ jitprintf("\n");
- fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n",
- totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize,
- 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName);
+ jitprintf("GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize,
+ 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize,
+ Target::g_tgtCPUName);
}
#ifdef DEBUG
#if DOUBLE_ALIGN
- fprintf(fout, "%u out of %u methods generated with double-aligned stack\n",
- Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt);
+ jitprintf("%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount,
+ genMethodCnt);
#endif
#endif
}
@@ -1550,110 +1548,110 @@ void Compiler::compShutdown()
#endif // DISPLAY_SIZES
#if CALL_ARG_STATS
- compDispCallArgStats(fout);
+ compDispCallArgStats(jitstdout());
#endif
#if COUNT_BASIC_BLOCKS
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "Basic block count frequency table:\n");
- fprintf(fout, "--------------------------------------------------\n");
- bbCntTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
-
- fprintf(fout, "\n");
-
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "IL method size frequency table for methods with a single basic block:\n");
- fprintf(fout, "--------------------------------------------------\n");
- bbOneBBSizeTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
-
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "fgComputeDoms `while (change)` iterations:\n");
- fprintf(fout, "--------------------------------------------------\n");
- domsChangedIterationTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
-
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "fgComputeReachabilitySets `while (change)` iterations:\n");
- fprintf(fout, "--------------------------------------------------\n");
- computeReachabilitySetsIterationTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
-
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "fgComputeReachability `while (change)` iterations:\n");
- fprintf(fout, "--------------------------------------------------\n");
- computeReachabilityIterationTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("Basic block count frequency table:\n");
+ jitprintf("--------------------------------------------------\n");
+ bbCntTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
+
+ jitprintf("\n");
+
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("IL method size frequency table for methods with a single basic block:\n");
+ jitprintf("--------------------------------------------------\n");
+ bbOneBBSizeTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
+
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("fgComputeDoms `while (change)` iterations:\n");
+ jitprintf("--------------------------------------------------\n");
+ domsChangedIterationTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
+
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("fgComputeReachabilitySets `while (change)` iterations:\n");
+ jitprintf("--------------------------------------------------\n");
+ computeReachabilitySetsIterationTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
+
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("fgComputeReachability `while (change)` iterations:\n");
+ jitprintf("--------------------------------------------------\n");
+ computeReachabilityIterationTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
#endif // COUNT_BASIC_BLOCKS
#if COUNT_LOOPS
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Loop stats\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods);
- fprintf(fout, "Total number of loops is %5u\n", totalLoopCount);
- fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod);
- fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows);
- fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount);
- fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows);
- fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount);
- fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount);
-
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "Loop count frequency table:\n");
- fprintf(fout, "--------------------------------------------------\n");
- loopCountTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "Loop exit count frequency table:\n");
- fprintf(fout, "--------------------------------------------------\n");
- loopExitCountTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Loop stats\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Total number of methods with loops is %5u\n", totalLoopMethods);
+ jitprintf("Total number of loops is %5u\n", totalLoopCount);
+ jitprintf("Maximum number of loops per method is %5u\n", maxLoopsPerMethod);
+ jitprintf("# of methods overflowing nat loop table is %5u\n", totalLoopOverflows);
+ jitprintf("Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount);
+ jitprintf("# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows);
+ jitprintf("Total number of loops with an iterator is %5u\n", iterLoopCount);
+ jitprintf("Total number of loops with a constant iterator is %5u\n", constIterLoopCount);
+
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("Loop count frequency table:\n");
+ jitprintf("--------------------------------------------------\n");
+ loopCountTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
+ jitprintf("Loop exit count frequency table:\n");
+ jitprintf("--------------------------------------------------\n");
+ loopExitCountTable.dump(jitstdout());
+ jitprintf("--------------------------------------------------\n");
#endif // COUNT_LOOPS
#if MEASURE_NODE_SIZE
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "GenTree node allocation stats\n");
- fprintf(fout, "---------------------------------------------------\n");
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("GenTree node allocation stats\n");
+ jitprintf("---------------------------------------------------\n");
- fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n",
- genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize,
- genNodeSizeStats.genTreeNodeSize / genMethodCnt);
+ jitprintf("Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n",
+ genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize,
+ genNodeSizeStats.genTreeNodeSize / genMethodCnt);
- fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n",
- genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize,
- (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) /
- genNodeSizeStats.genTreeNodeSize);
+ jitprintf("Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n",
+ genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize,
+ (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) /
+ genNodeSizeStats.genTreeNodeSize);
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Distribution of per-method GenTree node counts:\n");
- genTreeNcntHist.dump(fout);
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Distribution of per-method GenTree node counts:\n");
+ genTreeNcntHist.dump(jitstdout());
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n");
- genTreeNsizHist.dump(fout);
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Distribution of per-method GenTree node allocations (in bytes):\n");
+ genTreeNsizHist.dump(jitstdout());
#endif // MEASURE_NODE_SIZE
#if MEASURE_BLOCK_SIZE
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "BasicBlock and FlowEdge/BasicBlockList allocation stats\n");
- fprintf(fout, "---------------------------------------------------\n");
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("BasicBlock and FlowEdge/BasicBlockList allocation stats\n");
+ jitprintf("---------------------------------------------------\n");
- fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count,
- BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt);
- fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt,
- genFlowNodeSize, genFlowNodeSize / genMethodCnt);
+ jitprintf("Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count,
+ BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt);
+ jitprintf("Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize,
+ genFlowNodeSize / genMethodCnt);
#endif // MEASURE_BLOCK_SIZE
@@ -1661,21 +1659,21 @@ void Compiler::compShutdown()
if (s_dspMemStats)
{
- fprintf(fout, "\nAll allocations:\n");
- ArenaAllocator::dumpAggregateMemStats(jitstdout);
+ jitprintf("\nAll allocations:\n");
+ ArenaAllocator::dumpAggregateMemStats(jitstdout());
- fprintf(fout, "\nLargest method:\n");
- ArenaAllocator::dumpMaxMemStats(jitstdout);
+ jitprintf("\nLargest method:\n");
+ ArenaAllocator::dumpMaxMemStats(jitstdout());
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Distribution of total memory allocated per method (in KB):\n");
- memAllocHist.dump(fout);
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Distribution of total memory allocated per method (in KB):\n");
+ memAllocHist.dump(jitstdout());
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Distribution of total memory used per method (in KB):\n");
- memUsedHist.dump(fout);
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Distribution of total memory used per method (in KB):\n");
+ memUsedHist.dump(jitstdout());
}
#endif // MEASURE_MEM_ALLOC
@@ -1685,29 +1683,29 @@ void Compiler::compShutdown()
if (JitConfig.DisplayLoopHoistStats() != 0)
#endif // DEBUG
{
- PrintAggregateLoopHoistStats(jitstdout);
+ PrintAggregateLoopHoistStats(jitstdout());
}
#endif // LOOP_HOIST_STATS
#if TRACK_ENREG_STATS
if (JitConfig.JitEnregStats() != 0)
{
- s_enregisterStats.Dump(fout);
+ s_enregisterStats.Dump(jitstdout());
}
#endif // TRACK_ENREG_STATS
#if MEASURE_PTRTAB_SIZE
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "GC pointer table stats\n");
- fprintf(fout, "---------------------------------------------------\n");
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("GC pointer table stats\n");
+ jitprintf("---------------------------------------------------\n");
- fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize,
- GCInfo::s_gcRegPtrDscSize / genMethodCnt);
+ jitprintf("Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize,
+ GCInfo::s_gcRegPtrDscSize / genMethodCnt);
- fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize,
- GCInfo::s_gcTotalPtrTabSize / genMethodCnt);
+ jitprintf("Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize,
+ GCInfo::s_gcTotalPtrTabSize / genMethodCnt);
#endif // MEASURE_PTRTAB_SIZE
@@ -1715,37 +1713,37 @@ void Compiler::compShutdown()
if (genMethodCnt != 0)
{
- fprintf(fout, "\n");
- fprintf(fout, "A total of %6u methods compiled", genMethodCnt);
+ jitprintf("\n");
+ jitprintf("A total of %6u methods compiled", genMethodCnt);
#if DISPLAY_SIZES
if (genMethodICnt || genMethodNCnt)
{
- fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt);
+ jitprintf(" (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt);
}
#endif // DISPLAY_SIZES
- fprintf(fout, ".\n");
+ jitprintf(".\n");
}
#endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES
#if EMITTER_STATS
- emitterStats(fout);
+ emitterStats(jitstdout());
#endif
#if MEASURE_FATAL
- fprintf(fout, "\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Fatal errors stats\n");
- fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, " badCode: %u\n", fatal_badCode);
- fprintf(fout, " noWay: %u\n", fatal_noWay);
- fprintf(fout, " implLimitation: %u\n", fatal_implLimitation);
- fprintf(fout, " NOMEM: %u\n", fatal_NOMEM);
- fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody);
+ jitprintf("\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf("Fatal errors stats\n");
+ jitprintf("---------------------------------------------------\n");
+ jitprintf(" badCode: %u\n", fatal_badCode);
+ jitprintf(" noWay: %u\n", fatal_noWay);
+ jitprintf(" implLimitation: %u\n", fatal_implLimitation);
+ jitprintf(" NOMEM: %u\n", fatal_NOMEM);
+ jitprintf(" noWayAssertBody: %u\n", fatal_noWayAssertBody);
#ifdef DEBUG
- fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs);
+ jitprintf(" noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs);
#endif // DEBUG
- fprintf(fout, " NYI: %u\n", fatal_NYI);
+ jitprintf(" NYI: %u\n", fatal_NYI);
#endif // MEASURE_FATAL
}
@@ -1754,14 +1752,14 @@ void Compiler::compShutdown()
*/
/* static */
-void Compiler::compDisplayStaticSizes(FILE* fout)
+void Compiler::compDisplayStaticSizes()
{
#if MEASURE_NODE_SIZE
- GenTree::DumpNodeSizes(fout);
+ GenTree::DumpNodeSizes();
#endif
#if EMITTER_STATS
- emitterStaticStats(fout);
+ emitterStaticStats();
#endif
}
@@ -5177,7 +5175,7 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
#if TRACK_LSRA_STATS
if (JitConfig.DisplayLsraStats() == 2)
{
- m_pLinearScan->dumpLsraStatsCsv(jitstdout);
+ m_pLinearScan->dumpLsraStatsCsv(jitstdout());
}
#endif // TRACK_LSRA_STATS
@@ -5282,6 +5280,13 @@ PhaseStatus Compiler::placeLoopAlignInstructions()
weight_t minBlockSoFar = BB_MAX_WEIGHT;
BasicBlock* bbHavingAlign = nullptr;
BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP;
+ bool visitedLoopNum[BasicBlock::MAX_LOOP_NUM];
+ memset(visitedLoopNum, false, sizeof(visitedLoopNum));
+
+#ifdef DEBUG
+ unsigned visitedBlockForLoopNum[BasicBlock::MAX_LOOP_NUM];
+ memset(visitedBlockForLoopNum, 0, sizeof(visitedBlockForLoopNum));
+#endif
if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign())
{
@@ -5304,7 +5309,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions()
}
}
- // If there is a unconditional jump (which is not part of callf/always pair)
+ // If there is an unconditional jump (which is not part of callf/always pair)
if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail())
{
// Track the lower weight blocks
@@ -5358,12 +5363,19 @@ PhaseStatus Compiler::placeLoopAlignInstructions()
madeChanges = true;
unmarkedLoopAlign = true;
}
- else if ((block->bbNatLoopNum != BasicBlock::NOT_IN_LOOP) && (block->bbNatLoopNum == loopTop->bbNatLoopNum))
+ else if ((loopTop->bbNatLoopNum != BasicBlock::NOT_IN_LOOP) && visitedLoopNum[loopTop->bbNatLoopNum])
{
+#ifdef DEBUG
+ char buffer[100];
+ sprintf_s(buffer, 100, "loop block " FMT_BB " appears before top of loop",
+ visitedBlockForLoopNum[loopTop->bbNatLoopNum]);
+#endif
+
// In some odd cases we may see blocks within the loop before we see the
// top block of the loop. Just bail on aligning such loops.
//
- loopTop->unmarkLoopAlign(this DEBUG_ARG("loop block appears before top of loop"));
+
+ loopTop->unmarkLoopAlign(this DEBUG_ARG(buffer));
madeChanges = true;
unmarkedLoopAlign = true;
}
@@ -5398,6 +5410,20 @@ PhaseStatus Compiler::placeLoopAlignInstructions()
break;
}
}
+
+ if (block->bbNatLoopNum != BasicBlock::NOT_IN_LOOP)
+ {
+#ifdef DEBUG
+ if (!visitedLoopNum[block->bbNatLoopNum])
+ {
+ // Record the first block for which bbNatLoopNum was seen for
+ // debugging purpose.
+ visitedBlockForLoopNum[block->bbNatLoopNum] = block->bbNum;
+ }
+#endif
+ // If this block is part of loop, mark the loopNum as visited.
+ visitedLoopNum[block->bbNatLoopNum] = true;
+ }
}
assert(loopsToProcess == 0);
@@ -5879,7 +5905,7 @@ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr,
}
#endif // FUNC_INFO_LOGGING
- // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0);
+ // if (s_compMethodsCount==0) setvbuf(jitstdout(), NULL, _IONBF, 0);
if (compIsForInlining())
{
@@ -6363,7 +6389,7 @@ void Compiler::compCompileFinish()
if (s_dspMemStats || verbose)
{
printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash());
- compArenaAllocator->dumpMemStats(jitstdout);
+ compArenaAllocator->dumpMemStats(jitstdout());
}
#endif // DEBUG
#endif // MEASURE_MEM_ALLOC
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index 645e54258a637a..8339d6d274f4f5 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -10345,7 +10345,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
InlineInfo* inlineInfo);
void compDone();
- static void compDisplayStaticSizes(FILE* fout);
+ static void compDisplayStaticSizes();
//------------ Some utility functions --------------
diff --git a/src/coreclr/jit/disasm.cpp b/src/coreclr/jit/disasm.cpp
index e1926a3f640b7b..fd5c98eb068810 100644
--- a/src/coreclr/jit/disasm.cpp
+++ b/src/coreclr/jit/disasm.cpp
@@ -1478,12 +1478,12 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* coldCo
}
#else // !DEBUG
// NOTE: non-DEBUG builds always use jitstdout currently!
- disAsmFile = jitstdout;
+ disAsmFile = jitstdout();
#endif // !DEBUG
if (disAsmFile == nullptr)
{
- disAsmFile = jitstdout;
+ disAsmFile = jitstdout();
}
// As this writes to a common file, this is not reentrant.
@@ -1519,7 +1519,7 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* coldCo
DisasmBuffer(disAsmFile, /* printIt */ true);
fprintf(disAsmFile, "\n");
- if (disAsmFile != jitstdout)
+ if (disAsmFile != jitstdout())
{
fclose(disAsmFile);
}
diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp
index bcf7c7be401c21..57c52855ea8393 100644
--- a/src/coreclr/jit/ee_il_dll.cpp
+++ b/src/coreclr/jit/ee_il_dll.cpp
@@ -31,8 +31,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-FILE* jitstdout = nullptr;
-
ICorJitHost* g_jitHost = nullptr;
bool g_jitInitialized = false;
@@ -72,15 +70,28 @@ extern "C" DLLEXPORT void jitStartup(ICorJitHost* jitHost)
assert(!JitConfig.isInitialized());
JitConfig.initialize(jitHost);
+#ifdef FEATURE_TRACELOGGING
+ JitTelemetry::NotifyDllProcessAttach();
+#endif
+ Compiler::compStartup();
+
+ g_jitInitialized = true;
+}
+
+static FILE* volatile s_jitstdout;
+
+static FILE* jitstdoutInit()
+{
const WCHAR* jitStdOutFile = JitConfig.JitStdOutFile();
+ FILE* file = nullptr;
if (jitStdOutFile != nullptr)
{
- jitstdout = _wfopen(jitStdOutFile, W("a"));
- assert(jitstdout != nullptr);
+ file = _wfopen(jitStdOutFile, W("a"));
+ assert(file != nullptr);
}
#if !defined(HOST_UNIX)
- if (jitstdout == nullptr)
+ if (file == nullptr)
{
int stdoutFd = _fileno(procstdout());
// Check fileno error output(s) -1 may overlap with errno result
@@ -89,46 +100,61 @@ extern "C" DLLEXPORT void jitStartup(ICorJitHost* jitHost)
// or bogus and avoid making further calls.
if ((stdoutFd != -1) && (stdoutFd != -2) && (errno != EINVAL))
{
- int jitstdoutFd = _dup(_fileno(procstdout()));
+ int jitstdoutFd = _dup(stdoutFd);
// Check the error status returned by dup.
if (jitstdoutFd != -1)
{
_setmode(jitstdoutFd, _O_TEXT);
- jitstdout = _fdopen(jitstdoutFd, "w");
- assert(jitstdout != nullptr);
+ file = _fdopen(jitstdoutFd, "w");
+ assert(file != nullptr);
// Prevent the FILE* from buffering its output in order to avoid calls to
// `fflush()` throughout the code.
- setvbuf(jitstdout, nullptr, _IONBF, 0);
+ setvbuf(file, nullptr, _IONBF, 0);
}
}
}
#endif // !HOST_UNIX
- // If jitstdout is still null, fallback to whatever procstdout() was
- // initially set to.
- if (jitstdout == nullptr)
+ if (file == nullptr)
{
- jitstdout = procstdout();
+ file = procstdout();
}
-#ifdef FEATURE_TRACELOGGING
- JitTelemetry::NotifyDllProcessAttach();
-#endif
- Compiler::compStartup();
+ FILE* observed = InterlockedCompareExchangeT(&s_jitstdout, file, nullptr);
- g_jitInitialized = true;
+ if (observed != nullptr)
+ {
+ if (file != procstdout())
+ {
+ fclose(file);
+ }
+
+ return observed;
+ }
+
+ return file;
}
-#ifndef DEBUG
+FILE* jitstdout()
+{
+ FILE* file = s_jitstdout;
+ if (file != nullptr)
+ {
+ return file;
+ }
+
+ return jitstdoutInit();
+}
+
+// Like printf/logf, but only outputs to jitstdout -- skips call back into EE.
void jitprintf(const char* fmt, ...)
{
va_list vl;
va_start(vl, fmt);
- vfprintf(jitstdout, fmt, vl);
+ vfprintf(jitstdout(), fmt, vl);
va_end(vl);
}
-#endif
void jitShutdown(bool processIsTerminating)
{
@@ -139,14 +165,15 @@ void jitShutdown(bool processIsTerminating)
Compiler::compShutdown();
- if (jitstdout != procstdout())
+ FILE* file = s_jitstdout;
+ if ((file != nullptr) && (file != procstdout()))
{
// When the process is terminating, the fclose call is unnecessary and is also prone to
// crashing since the UCRT itself often frees the backing memory earlier on in the
// termination sequence.
if (!processIsTerminating)
{
- fclose(jitstdout);
+ fclose(file);
}
}
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index c85724f9240b98..8a671b7a757b88 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -215,7 +215,7 @@ unsigned emitter::emitInt32CnsCnt;
unsigned emitter::emitNegCnsCnt;
unsigned emitter::emitPow2CnsCnt;
-void emitterStaticStats(FILE* fout)
+void emitterStaticStats()
{
// The IG buffer size depends on whether we are storing a debug info pointer or not. For our purposes
// here, do not include that.
@@ -227,6 +227,8 @@ void emitterStaticStats(FILE* fout)
insGroup* igDummy = nullptr;
+ FILE* fout = jitstdout();
+
fprintf(fout, "\n");
fprintf(fout, "insGroup:\n");
fprintf(fout, "Offset / size of igNext = %3zu / %2zu\n", offsetof(insGroup, igNext),
diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp
index a0dc786782b0bc..784f797bc5efed 100644
--- a/src/coreclr/jit/emitarm.cpp
+++ b/src/coreclr/jit/emitarm.cpp
@@ -4850,6 +4850,7 @@ void emitter::emitIns_Call(EmitCallType callType,
dispIns(id);
appendToCurIG(id);
+ emitLastMemBarrier = nullptr; // Cannot optimize away future memory barriers
}
/*****************************************************************************
diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp
index b4e81322e2a696..ef1220e325e47c 100644
--- a/src/coreclr/jit/emitarm64.cpp
+++ b/src/coreclr/jit/emitarm64.cpp
@@ -8886,6 +8886,7 @@ void emitter::emitIns_Call(EmitCallType callType,
dispIns(id);
appendToCurIG(id);
+ emitLastMemBarrier = nullptr; // Cannot optimize away future memory barriers
}
/*****************************************************************************
@@ -16615,6 +16616,15 @@ emitter::RegisterOrder emitter::IsOptimizableLdrStrWithPair(
emitAttr prevSize = emitLastIns->idOpSize();
ssize_t prevImm = emitGetInsSC(emitLastIns);
+ // If we have this format, the 'imm' and/or 'prevImm' are not scaled(encoded),
+ // therefore we cannot proceed.
+ // TODO: In this context, 'imm' and 'prevImm' are assumed to be scaled(encoded).
+ // They should never be scaled(encoded) until its about to be written to the buffer.
+ if (fmt == IF_LS_2C || lastInsFmt == IF_LS_2C)
+ {
+ return eRO_none;
+ }
+
// Signed, *raw* immediate value fits in 7 bits, so for LDP/ STP the raw value is from -64 to +63.
// For LDR/ STR, there are 9 bits, so we need to limit the range explicitly in software.
if ((imm < -64) || (imm > 63) || (prevImm < -64) || (prevImm > 63))
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index 980d40a47ac318..1c48d1c52f0bb2 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -5485,6 +5485,13 @@ void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeI
{
assert(!src->isContained()); // there must be one non-contained src
+ if (addr->isContained() && addr->OperIs(GT_LCL_ADDR))
+ {
+ GenTreeLclVarCommon* lclVar = addr->AsLclVarCommon();
+ emitIns_S_R(ins, attr, src->GetRegNum(), lclVar->GetLclNum(), lclVar->GetLclOffs());
+ return;
+ }
+
// ind, reg
id = emitNewInstrAmd(attr, offset);
emitHandleMemOp(storeInd, id, emitInsModeFormat(ins, IF_ARD_RRD), ins);
diff --git a/src/coreclr/jit/error.cpp b/src/coreclr/jit/error.cpp
index 01e6f734b89f89..06635f5d582a1d 100644
--- a/src/coreclr/jit/error.cpp
+++ b/src/coreclr/jit/error.cpp
@@ -387,7 +387,7 @@ int logf(const char* fmt, ...)
{
// if the EE refuses to log it, we try to send it to stdout
va_start(args, fmt);
- written = vflogf(jitstdout, fmt, args);
+ written = vflogf(jitstdout(), fmt, args);
va_end(args);
}
#if 0 // Enable this only when you need it
@@ -448,7 +448,7 @@ void gcDump_logf(const char* fmt, ...)
{
// if the EE refuses to log it, we try to send it to stdout
va_start(args, fmt);
- vflogf(jitstdout, fmt, args);
+ vflogf(jitstdout(), fmt, args);
va_end(args);
}
#if 0 // Enable this only when you need it
diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp
index e79bdb0e46368a..6dbc5e9a654845 100644
--- a/src/coreclr/jit/fgdiagnostic.cpp
+++ b/src/coreclr/jit/fgdiagnostic.cpp
@@ -674,7 +674,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePositi
}
else if (strcmp(filename, "stdout") == 0)
{
- fgxFile = jitstdout;
+ fgxFile = jitstdout();
*wbDontClose = true;
}
else if (strcmp(filename, "stderr") == 0)
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index d02ce4714d8a2e..d6d524f375ea73 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -481,10 +481,12 @@ void GenTree::ReportOperBashing(FILE* f)
#if MEASURE_NODE_SIZE
-void GenTree::DumpNodeSizes(FILE* fp)
+void GenTree::DumpNodeSizes()
{
// Dump the sizes of the various GenTree flavors
+ FILE* fp = jitstdout();
+
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
@@ -19637,8 +19639,8 @@ GenTree* Compiler::gtNewSimdBinOpNode(
}
else
{
- assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)) ||
- (op2->TypeIs(TYP_SIMD12) && type == TYP_SIMD16));
+ assert((genActualType(op2) == genActualType(type)) || (genActualType(op2) == genActualType(simdBaseType)) ||
+ (op2->TypeIs(TYP_SIMD12) && (type == TYP_SIMD16)));
}
NamedIntrinsic intrinsic = NI_Illegal;
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index 27688c3d41790d..109da6a15c30d8 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -2311,7 +2311,7 @@ struct GenTree
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
- static void DumpNodeSizes(FILE* fp);
+ static void DumpNodeSizes();
#endif
#ifdef DEBUG
diff --git a/src/coreclr/jit/host.h b/src/coreclr/jit/host.h
index c99a0601e499b0..0ccefae924e637 100644
--- a/src/coreclr/jit/host.h
+++ b/src/coreclr/jit/host.h
@@ -3,6 +3,8 @@
/*****************************************************************************/
+void jitprintf(const char* fmt, ...);
+
#ifdef DEBUG
#undef printf
@@ -44,7 +46,6 @@ extern "C" void ANALYZER_NORETURN __cdecl assertAbort(const char* why, const cha
// Re-define printf in Release to use jitstdout (can be overwritten with DOTNET_JitStdOutFile=file)
#undef printf
#define printf jitprintf
-void jitprintf(const char* fmt, ...);
#undef assert
#define assert(p) (void)0
@@ -55,7 +56,7 @@ void jitprintf(const char* fmt, ...);
#define _HOST_H_
/*****************************************************************************/
-extern FILE* jitstdout;
+FILE* jitstdout();
inline FILE* procstdout()
{
diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp
index 488f65b5ac008d..065999982a87a9 100644
--- a/src/coreclr/jit/hwintrinsicxarch.cpp
+++ b/src/coreclr/jit/hwintrinsicxarch.cpp
@@ -3602,17 +3602,19 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
op2 = impSIMDPopStack();
op1 = impSIMDPopStack();
- if (unusedVal1)
+ // Consume operands we won't use, in case they have side effects.
+ //
+ if (unusedVal1 && !(*val1)->IsVectorZero())
{
impAppendTree(gtUnusedValNode(*val1), CHECK_SPILL_ALL, impCurStmtDI);
}
- if (unusedVal2)
+ if (unusedVal2 && !(*val2)->IsVectorZero())
{
impAppendTree(gtUnusedValNode(*val2), CHECK_SPILL_ALL, impCurStmtDI);
}
- if (unusedVal3)
+ if (unusedVal3 && !(*val3)->IsVectorZero())
{
impAppendTree(gtUnusedValNode(*val3), CHECK_SPILL_ALL, impCurStmtDI);
}
diff --git a/src/coreclr/jit/inline.cpp b/src/coreclr/jit/inline.cpp
index bae5755707594d..0ded3ef3482a70 100644
--- a/src/coreclr/jit/inline.cpp
+++ b/src/coreclr/jit/inline.cpp
@@ -480,7 +480,7 @@ void InlineContext::DumpData(unsigned indent)
{
const char* inlineReason = InlGetObservationString(m_Observation);
printf("%*s%u,\"%s\",\"%s\",", indent, "", GetOrdinal(), inlineReason, calleeName);
- m_Policy->DumpData(jitstdout);
+ m_Policy->DumpData(jitstdout());
printf("\n");
}
diff --git a/src/coreclr/jit/layout.cpp b/src/coreclr/jit/layout.cpp
index 113414ddfd7f7a..918fd4ab6521d4 100644
--- a/src/coreclr/jit/layout.cpp
+++ b/src/coreclr/jit/layout.cpp
@@ -421,6 +421,7 @@ void ClassLayout::InitializeGCPtrs(Compiler* compiler)
//
// Return value:
// true if at least one GC ByRef, false otherwise.
+//
bool ClassLayout::HasGCByRef() const
{
unsigned slots = GetSlotCount();
@@ -435,6 +436,39 @@ bool ClassLayout::HasGCByRef() const
return false;
}
+//------------------------------------------------------------------------
+// IntersectsGCPtr: check if the specified interval intersects with a GC
+// pointer.
+//
+// Parameters:
+// offset - The start offset of the interval
+// size - The size of the interval
+//
+// Return value:
+// True if it does.
+//
+bool ClassLayout::IntersectsGCPtr(unsigned offset, unsigned size) const
+{
+ if (!HasGCPtr())
+ {
+ return false;
+ }
+
+ unsigned startSlot = offset / TARGET_POINTER_SIZE;
+ unsigned endSlot = (offset + size - 1) / TARGET_POINTER_SIZE;
+ assert((startSlot < GetSlotCount()) && (endSlot < GetSlotCount()));
+
+ for (unsigned i = startSlot; i <= endSlot; i++)
+ {
+ if (IsGCPtr(i))
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
//------------------------------------------------------------------------
// AreCompatible: check if 2 layouts are the same for copying.
//
diff --git a/src/coreclr/jit/layout.h b/src/coreclr/jit/layout.h
index 0e9d6ed65d03d3..59ecaa9405485d 100644
--- a/src/coreclr/jit/layout.h
+++ b/src/coreclr/jit/layout.h
@@ -216,6 +216,8 @@ class ClassLayout
}
}
+ bool IntersectsGCPtr(unsigned offset, unsigned size) const;
+
static bool AreCompatible(const ClassLayout* layout1, const ClassLayout* layout2);
private:
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 81df694f05e1ff..2e454e64c14eb1 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -7484,6 +7484,28 @@ bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, int registerCount)
if (registerCount == varDsc->lvFieldCnt)
{
canEnregisterAsMultiReg = true;
+
+#ifdef FEATURE_SIMD
+ // TYP_SIMD12 breaks the above invariant that "we won't have
+ // matching reg and field counts"; for example, consider
+ //
+ // * STORE_LCL_VAR(CALL)
+ // * RETURN(LCL_VAR)
+ //
+ // These return in two GPR registers, while the fields of the
+ // local are stored in SIMD and GPR register, so registerCount
+ // == varDsc->lvFieldCnt == 2. But the backend cannot handle
+ // this.
+
+ for (int i = 0; i < varDsc->lvFieldCnt; i++)
+ {
+ if (comp->lvaGetDesc(varDsc->lvFieldLclStart + i)->TypeGet() == TYP_SIMD12)
+ {
+ canEnregisterAsMultiReg = false;
+ break;
+ }
+ }
+#endif
}
}
}
diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp
index 86a5b2b1ab4cb4..2536d44aa00c56 100644
--- a/src/coreclr/jit/lowerarmarch.cpp
+++ b/src/coreclr/jit/lowerarmarch.cpp
@@ -1900,6 +1900,10 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(tmp2);
}
+ else
+ {
+ tmp2->SetUnusedValue();
+ }
BlockRange().Remove(node);
return tmp2->gtNext;
diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp
index eba7bdb93e20fb..150ad04a55d99f 100644
--- a/src/coreclr/jit/lowerxarch.cpp
+++ b/src/coreclr/jit/lowerxarch.cpp
@@ -3754,17 +3754,14 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node)
// We want to optimize GetElement down to an Indir where possible as
// this unlocks additional containment opportunities for various nodes
- var_types newType;
- GenTree* newBase;
- GenTree* newIndex;
- uint32_t newScale;
- int32_t newOffset;
+ GenTree* newBase;
+ GenTree* newIndex;
+ uint32_t newScale;
+ int32_t newOffset;
GenTreeIndir* indir = op1->AsIndir();
GenTree* addr = indir->Addr();
- newType = simdBaseType;
-
if (addr->OperIsAddrMode())
{
// We have an existing addressing mode, so we want to try and
@@ -3860,7 +3857,8 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node)
new (comp, GT_LEA) GenTreeAddrMode(addr->TypeGet(), newBase, newIndex, newScale, newOffset);
BlockRange().InsertBefore(node, newAddr);
- GenTreeIndir* newIndir = comp->gtNewIndir(newType, newAddr, (indir->gtFlags & GTF_IND_FLAGS));
+ GenTreeIndir* newIndir =
+ comp->gtNewIndir(JITtype2varType(simdBaseJitType), newAddr, (indir->gtFlags & GTF_IND_FLAGS));
BlockRange().InsertBefore(node, newIndir);
LIR::Use use;
@@ -3868,6 +3866,10 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(newIndir);
}
+ else
+ {
+ newIndir->SetUnusedValue();
+ }
BlockRange().Remove(op1);
BlockRange().Remove(node);
@@ -3907,8 +3909,8 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node)
if (lclDsc->lvDoNotEnregister && (lclOffs <= 0xFFFF) && ((lclOffs + elemSize) <= lclDsc->lvExactSize()))
{
- GenTree* lclFld =
- comp->gtNewLclFldNode(lclVar->GetLclNum(), simdBaseType, static_cast(lclOffs));
+ GenTree* lclFld = comp->gtNewLclFldNode(lclVar->GetLclNum(), JITtype2varType(simdBaseJitType),
+ static_cast(lclOffs));
BlockRange().InsertBefore(node, lclFld);
LIR::Use use;
@@ -3916,6 +3918,10 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(lclFld);
}
+ else
+ {
+ lclFld->SetUnusedValue();
+ }
BlockRange().Remove(op1);
BlockRange().Remove(op2);
@@ -4158,6 +4164,11 @@ GenTree* Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(cast);
}
+ else
+ {
+ node->ClearUnusedValue();
+ cast->SetUnusedValue();
+ }
next = LowerNode(cast);
}
@@ -4737,6 +4748,10 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(tmp1);
}
+ else
+ {
+ tmp1->SetUnusedValue();
+ }
BlockRange().Remove(node);
return LowerNode(tmp1);
@@ -5267,6 +5282,10 @@ GenTree* Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(tmp1);
}
+ else
+ {
+ tmp1->SetUnusedValue();
+ }
BlockRange().Remove(node);
return tmp1->gtNext;
@@ -5306,7 +5325,8 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
GenTreeIndir* indir = op1->AsIndir();
- GenTreeIndir* newIndir = comp->gtNewIndir(simdBaseType, indir->Addr(), (indir->gtFlags & GTF_IND_FLAGS));
+ GenTreeIndir* newIndir =
+ comp->gtNewIndir(JITtype2varType(simdBaseJitType), indir->Addr(), (indir->gtFlags & GTF_IND_FLAGS));
BlockRange().InsertBefore(node, newIndir);
LIR::Use use;
@@ -5314,6 +5334,10 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(newIndir);
}
+ else
+ {
+ newIndir->SetUnusedValue();
+ }
BlockRange().Remove(op1);
BlockRange().Remove(node);
@@ -5334,7 +5358,8 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
if (lclDsc->lvDoNotEnregister && (lclOffs <= 0xFFFF) && ((lclOffs + elemSize) <= lclDsc->lvExactSize()))
{
- GenTree* lclFld = comp->gtNewLclFldNode(lclVar->GetLclNum(), simdBaseType, lclVar->GetLclOffs());
+ GenTree* lclFld =
+ comp->gtNewLclFldNode(lclVar->GetLclNum(), JITtype2varType(simdBaseJitType), lclVar->GetLclOffs());
BlockRange().InsertBefore(node, lclFld);
LIR::Use use;
@@ -5342,6 +5367,10 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(lclFld);
}
+ else
+ {
+ lclFld->SetUnusedValue();
+ }
BlockRange().Remove(op1);
BlockRange().Remove(node);
@@ -5426,6 +5455,11 @@ GenTree* Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
{
use.ReplaceWith(cast);
}
+ else
+ {
+ node->ClearUnusedValue();
+ cast->SetUnusedValue();
+ }
next = LowerNode(cast);
}
@@ -6470,7 +6504,7 @@ void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node)
case NI_AVX2_ConvertToUInt32:
{
// These intrinsics are "ins reg/mem, xmm"
- isContainable = varTypeIsIntegral(simdBaseType);
+ isContainable = varTypeIsIntegral(simdBaseType) && (genTypeSize(src) == genTypeSize(node));
break;
}
@@ -6534,7 +6568,8 @@ void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node)
size_t numArgs = hwintrinsic->GetOperandCount();
GenTree* lastOp = hwintrinsic->Op(numArgs);
- isContainable = HWIntrinsicInfo::isImmOp(intrinsicId, lastOp) && lastOp->IsCnsIntOrI();
+ isContainable = HWIntrinsicInfo::isImmOp(intrinsicId, lastOp) && lastOp->IsCnsIntOrI() &&
+ (genTypeSize(simdBaseType) == genTypeSize(node));
if (isContainable && (intrinsicId == NI_SSE2_Extract))
{
@@ -7922,6 +7957,9 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
// The memory form of this already takes a pointer and should be treated like a MemoryLoad
supportsGeneralLoads = !childNode->OperIsHWIntrinsic();
}
+
+ supportsGeneralLoads =
+ supportsGeneralLoads && (genTypeSize(childNode) >= genTypeSize(parentNode->GetSimdBaseType()));
break;
}
@@ -8101,7 +8139,16 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
case NI_Vector128_ToScalar:
case NI_Vector256_ToScalar:
case NI_Vector512_ToScalar:
+ case NI_SSE2_ConvertToInt32:
+ case NI_SSE2_ConvertToUInt32:
+ case NI_SSE2_X64_ConvertToInt64:
+ case NI_SSE2_X64_ConvertToUInt64:
+ case NI_SSE2_Extract:
+ case NI_SSE41_Extract:
+ case NI_SSE41_X64_Extract:
case NI_AVX_ExtractVector128:
+ case NI_AVX2_ConvertToInt32:
+ case NI_AVX2_ConvertToUInt32:
case NI_AVX2_ExtractVector128:
case NI_AVX512F_ExtractVector128:
case NI_AVX512F_ExtractVector256:
@@ -8144,15 +8191,24 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
return false;
}
+ case NI_Vector128_get_Zero:
+ case NI_Vector256_get_Zero:
+ {
+ // These are only containable as part of Sse41.Insert
+ return false;
+ }
+
case NI_SSE3_MoveAndDuplicate:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX512F_BroadcastScalarToVector512:
{
- var_types baseType = hwintrinsic->GetSimdBaseType();
- if (varTypeIsSmall(baseType))
+ var_types parentBaseType = parentNode->GetSimdBaseType();
+ var_types childBaseType = hwintrinsic->GetSimdBaseType();
+
+ if (varTypeIsSmall(parentBaseType) || (genTypeSize(parentBaseType) != genTypeSize(childBaseType)))
{
- // early return if the base type is not embedded broadcast compatible.
+ // early return if either base type is not embedded broadcast compatible.
return false;
}
@@ -8160,7 +8216,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
if (intrinsicId == NI_SSE3_MoveAndDuplicate)
{
// NI_SSE3_MoveAndDuplicate is for Vector128 only.
- assert(baseType == TYP_DOUBLE);
+ assert(childBaseType == TYP_DOUBLE);
}
if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX512F_VL) &&
@@ -8193,6 +8249,15 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
case NI_AVX_BroadcastScalarToVector128:
case NI_AVX_BroadcastScalarToVector256:
{
+ var_types parentBaseType = parentNode->GetSimdBaseType();
+ var_types childBaseType = hwintrinsic->GetSimdBaseType();
+
+ if (varTypeIsSmall(parentBaseType) || (genTypeSize(parentBaseType) != genTypeSize(childBaseType)))
+ {
+ // early return if either base type is not embedded broadcast compatible.
+ return false;
+ }
+
return parentNode->OperIsEmbBroadcastCompatible();
}
@@ -8332,8 +8397,15 @@ void Lowering::TryFoldCnsVecForEmbeddedBroadcast(GenTreeHWIntrinsic* parentNode,
BlockRange().InsertBefore(broadcastNode, createScalar);
BlockRange().InsertBefore(createScalar, constScalar);
LIR::Use use;
- BlockRange().TryGetUse(childNode, &use);
- use.ReplaceWith(broadcastNode);
+ if (BlockRange().TryGetUse(childNode, &use))
+ {
+ use.ReplaceWith(broadcastNode);
+ }
+ else
+ {
+ broadcastNode->SetUnusedValue();
+ }
+
BlockRange().Remove(childNode);
LowerNode(createScalar);
LowerNode(broadcastNode);
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index 5c8fe4aae77889..0a33ba3faba9bd 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -1421,7 +1421,7 @@ PhaseStatus LinearScan::doLinearScan()
#endif
)
{
- dumpLsraStats(jitstdout);
+ dumpLsraStats(jitstdout());
}
#endif // TRACK_LSRA_STATS
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index f29d6a57148136..3deada8eec085b 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -8919,6 +8919,14 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
break;
#endif
+ case GT_COMMA:
+ if (op2->OperIsStore() || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2))
+ {
+ typ = tree->gtType = TYP_VOID;
+ }
+
+ break;
+
default:
break;
}
@@ -10770,6 +10778,12 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}
+ // Must be working with the same types of vectors.
+ if (hwop1->TypeGet() != node->TypeGet())
+ {
+ break;
+ }
+
if (toScalar != nullptr)
{
DEBUG_DESTROY_NODE(toScalar);
@@ -10793,8 +10807,6 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
}
#if defined(TARGET_XARCH)
- case NI_AVX512F_Add:
- case NI_AVX512BW_Add:
case NI_AVX512F_And:
case NI_AVX512DQ_And:
case NI_AVX512F_AndNot:
@@ -10836,13 +10848,6 @@ GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
switch (intrinsicId)
{
- case NI_AVX512F_Add:
- case NI_AVX512BW_Add:
- {
- maskIntrinsicId = NI_AVX512F_AddMask;
- break;
- }
-
case NI_AVX512F_And:
case NI_AVX512DQ_And:
{
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 684f71b898f313..2ca5526476b82d 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -8875,9 +8875,15 @@ GenTree* Compiler::optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma,
}
#endif
- // Extract side effects
+ // TODO-Bug: We really should be extracting all side effects from the
+ // length and index here, but the length typically involves a GT_ARR_LENGTH
+ // that we would preserve. Usually, as part of proving that the range check
+ // passes, we have also proven that the ARR_LENGTH is non-faulting. We need
+ // a good way to communicate to this function that it is ok to ignore side
+ // effects of the ARR_LENGTH.
GenTree* sideEffList = nullptr;
- gtExtractSideEffList(check, &sideEffList, GTF_ASG);
+ gtExtractSideEffList(check->GetArrayLength(), &sideEffList, GTF_ASG);
+ gtExtractSideEffList(check->GetIndex(), &sideEffList);
if (sideEffList != nullptr)
{
@@ -9031,9 +9037,9 @@ void Compiler::optRemoveRedundantZeroInits()
CompAllocator allocator(getAllocator(CMK_ZeroInit));
LclVarRefCounts refCounts(allocator);
BitVecTraits bitVecTraits(lvaCount, this);
- BitVec zeroInitLocals = BitVecOps::MakeEmpty(&bitVecTraits);
- bool hasGCSafePoint = false;
- bool canThrow = false;
+ BitVec zeroInitLocals = BitVecOps::MakeEmpty(&bitVecTraits);
+ bool hasGCSafePoint = false;
+ bool hasImplicitControlFlow = false;
assert(fgNodeThreading == NodeThreading::AllTrees);
@@ -9044,6 +9050,8 @@ void Compiler::optRemoveRedundantZeroInits()
CompAllocator allocator(getAllocator(CMK_ZeroInit));
LclVarRefCounts defsInBlock(allocator);
bool removedTrackedDefs = false;
+ bool hasEHSuccs = block->HasPotentialEHSuccs(this);
+
for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;)
{
Statement* next = stmt->GetNextStmt();
@@ -9054,10 +9062,7 @@ void Compiler::optRemoveRedundantZeroInits()
hasGCSafePoint = true;
}
- if ((tree->gtFlags & GTF_EXCEPT) != 0)
- {
- canThrow = true;
- }
+ hasImplicitControlFlow |= hasEHSuccs && ((tree->gtFlags & GTF_EXCEPT) != 0);
switch (tree->gtOper)
{
@@ -9203,7 +9208,8 @@ void Compiler::optRemoveRedundantZeroInits()
}
}
- if (!removedExplicitZeroInit && isEntire && (!canThrow || !lclDsc->lvLiveInOutOfHndlr))
+ if (!removedExplicitZeroInit && isEntire &&
+ (!hasImplicitControlFlow || (lclDsc->lvTracked && !lclDsc->lvLiveInOutOfHndlr)))
{
// If compMethodRequiresPInvokeFrame() returns true, lower may later
// insert a call to CORINFO_HELP_INIT_PINVOKE_FRAME which is a gc-safe point.
diff --git a/src/coreclr/jit/promotion.cpp b/src/coreclr/jit/promotion.cpp
index e2c4e797a3c9d8..52163f4db0cceb 100644
--- a/src/coreclr/jit/promotion.cpp
+++ b/src/coreclr/jit/promotion.cpp
@@ -621,6 +621,38 @@ class LocalUses
bool EvaluateReplacement(
Compiler* comp, unsigned lclNum, const Access& access, unsigned inducedCount, weight_t inducedCountWtd)
{
+ // Verify that this replacement has proper GC ness compared to the
+ // layout. While reinterpreting GC fields to integers can be considered
+ // UB, there are scenarios where it can happen safely:
+ //
+ // * The user code could have guarded the access with a dynamic check
+ // that it doesn't contain a GC pointer, so that the access is actually
+ // in dead code. This happens e.g. in span functions in SPC.
+ //
+ // * For byrefs, reinterpreting as an integer could be ok in a
+ // restricted scope due to pinning.
+ //
+ // In theory we could allow these promotions in the restricted scope,
+ // but currently physical promotion works on a function-wide basis.
+
+ LclVarDsc* lcl = comp->lvaGetDesc(lclNum);
+ ClassLayout* layout = lcl->GetLayout();
+ if (layout->IntersectsGCPtr(access.Offset, genTypeSize(access.AccessType)))
+ {
+ if (((access.Offset % TARGET_POINTER_SIZE) != 0) ||
+ (layout->GetGCPtrType(access.Offset / TARGET_POINTER_SIZE) != access.AccessType))
+ {
+ return false;
+ }
+ }
+ else
+ {
+ if (varTypeIsGC(access.AccessType))
+ {
+ return false;
+ }
+ }
+
unsigned countOverlappedCallArg = 0;
unsigned countOverlappedStoredFromCall = 0;
@@ -678,9 +710,8 @@ class LocalUses
// Now look at the overlapping struct uses that promotion will make more expensive.
- unsigned countReadBacks = 0;
- weight_t countReadBacksWtd = 0;
- LclVarDsc* lcl = comp->lvaGetDesc(lclNum);
+ unsigned countReadBacks = 0;
+ weight_t countReadBacksWtd = 0;
// For parameters or OSR locals we always need one read back.
if (lcl->lvIsParam || lcl->lvIsOSRLocal)
{
@@ -2309,8 +2340,36 @@ void ReplaceVisitor::ReadBackAfterCall(GenTreeCall* call, GenTree* user)
//
// If the remainder of the struct local is dying, then we expect that this
// entire struct local is now dying, since all field accesses are going to be
-// replaced with other locals. The exception is if there is a queued read
-// back for any of the fields.
+// replaced with other locals.
+//
+// There are two exceptions to the above:
+//
+// 1) If there is a queued readback for any of the fields, then there is
+// live state in the struct local, so it is not dying.
+//
+// 2) If there are further uses of the local in the same statement then we cannot
+// actually act on the last-use information we would provide here. That's because
+// uses of locals occur at the user and we do not model that here. In the real model
+// there are cases where we do not have any place to insert any IR between the two uses.
+// For example, consider:
+//
+// ▌ CALL void Program:Foo(Program+S,Program+S)
+// ├──▌ LCL_VAR struct V01 loc0
+// └──▌ LCL_VAR struct V01 loc0
+//
+// If V01 is promoted fully then both uses of V01 are last uses here; but
+// replacing the IR with
+//
+// ▌ CALL void Program:Foo(Program+S,Program+S)
+// ├──▌ LCL_VAR struct V01 loc0 (last use)
+// └──▌ COMMA struct
+// ├──▌ STORE_LCL_FLD int V01 loc0 [+0]
+// │ └──▌ LCL_VAR int V02 tmp0
+// └──▌ LCL_VAR struct V01 loc0 (last use)
+//
+// would be illegal since the created store overlaps with the first local,
+// and does not take into account that both uses occur simultaneously at
+// the position of the CALL node.
//
bool ReplaceVisitor::IsPromotedStructLocalDying(GenTreeLclVarCommon* lcl)
{
@@ -2331,6 +2390,15 @@ bool ReplaceVisitor::IsPromotedStructLocalDying(GenTreeLclVarCommon* lcl)
}
}
+ for (GenTree* cur = lcl->gtNext; cur != nullptr; cur = cur->gtNext)
+ {
+ assert(cur->OperIsAnyLocal());
+ if (cur->TypeIs(TYP_STRUCT) && (cur->AsLclVarCommon()->GetLclNum() == lcl->GetLclNum()))
+ {
+ return false;
+ }
+ }
+
return true;
}
@@ -2546,7 +2614,7 @@ void ReplaceVisitor::WriteBackBeforeCurrentStatement(unsigned lcl, unsigned offs
GenTree* readBack = Promotion::CreateWriteBack(m_compiler, lcl, rep);
Statement* stmt = m_compiler->fgNewStmtFromTree(readBack);
- JITDUMP("Writing back %s before " FMT_STMT "\n", rep.Description, stmt->GetID());
+ JITDUMP("Writing back %s before " FMT_STMT "\n", rep.Description, m_currentStmt->GetID());
DISPSTMT(stmt);
m_compiler->fgInsertStmtBefore(m_currentBlock, m_currentStmt, stmt);
ClearNeedsWriteBack(rep);
diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp
index 3ec8e71c159e46..2e1c0a52a3d81b 100644
--- a/src/coreclr/jit/utils.cpp
+++ b/src/coreclr/jit/utils.cpp
@@ -2734,7 +2734,7 @@ float FloatingPointUtils::maximumNumber(float x, float y)
//
// It propagates NaN inputs back to the caller and
// otherwise returns the lesser of the inputs. It
-// treats +0 as lesser than -0 as per the specification.
+// treats +0 as greater than -0 as per the specification.
//
// Arguments:
// val1 - left operand
@@ -2763,7 +2763,7 @@ double FloatingPointUtils::minimum(double val1, double val2)
//
// It propagates NaN inputs back to the caller and
// otherwise returns the input with a lesser magnitude.
-// It treats +0 as lesser than -0 as per the specification.
+// It treats +0 as greater than -0 as per the specification.
//
// Arguments:
// x - left operand
@@ -2856,7 +2856,7 @@ double FloatingPointUtils::minimumNumber(double x, double y)
//
// It propagates NaN inputs back to the caller and
// otherwise returns the lesser of the inputs. It
-// treats +0 as lesser than -0 as per the specification.
+// treats +0 as greater than -0 as per the specification.
//
// Arguments:
// val1 - left operand
@@ -2885,7 +2885,7 @@ float FloatingPointUtils::minimum(float val1, float val2)
//
// It propagates NaN inputs back to the caller and
// otherwise returns the input with a lesser magnitude.
-// It treats +0 as lesser than -0 as per the specification.
+// It treats +0 as greater than -0 as per the specification.
//
// Arguments:
// x - left operand
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index 2ea2ee7845b8b2..2c638535d786e4 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -7891,7 +7891,7 @@ ValueNum ValueNumStore::EvalHWIntrinsicFunBinary(var_types type,
#endif
{
// Handle `x ^ x == 0`
- return arg0VN;
+ return VNZeroForType(type);
}
default:
diff --git a/src/coreclr/md/compiler/mdutil.cpp b/src/coreclr/md/compiler/mdutil.cpp
index 8fb1551ef7ceea..05b56a25875bb0 100644
--- a/src/coreclr/md/compiler/mdutil.cpp
+++ b/src/coreclr/md/compiler/mdutil.cpp
@@ -265,11 +265,7 @@ HRESULT LOADEDMODULES::FindCachedReadOnlyEntry(
{
// If the name matches...
LPCWSTR pszName = pRegMeta->GetNameOfDBFile();
- #ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
- if (u16_strcmp(szName, pszName) == 0)
- #else
if (SString::_wcsicmp(szName, pszName) == 0)
- #endif
{
ULONG cRefs;
@@ -299,11 +295,7 @@ HRESULT LOADEDMODULES::FindCachedReadOnlyEntry(
{
// If the name matches...
LPCWSTR pszName = pRegMeta->GetNameOfDBFile();
- #ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
- if (u16_strcmp(szName, pszName) == 0)
- #else
if (SString::_wcsicmp(szName, pszName) == 0)
- #endif
{
ULONG cRefs;
diff --git a/src/coreclr/nativeaot/Bootstrap/main.cpp b/src/coreclr/nativeaot/Bootstrap/main.cpp
index cc78cf8d6710a9..c2ff85b50e81fd 100644
--- a/src/coreclr/nativeaot/Bootstrap/main.cpp
+++ b/src/coreclr/nativeaot/Bootstrap/main.cpp
@@ -93,7 +93,7 @@ static char& __unbox_z = __stop___unbox;
#endif // _MSC_VER
-extern "C" bool RhInitialize();
+extern "C" bool RhInitialize(bool isDll);
extern "C" void RhSetRuntimeInitializationCallback(int (*fPtr)());
extern "C" bool RhRegisterOSModule(void * pModule,
@@ -164,7 +164,13 @@ extern "C" void __managed__Startup();
static int InitializeRuntime()
{
- if (!RhInitialize())
+ if (!RhInitialize(
+#ifdef NATIVEAOT_DLL
+ /* isDll */ true
+#else
+ /* isDll */ false
+#endif
+ ))
return -1;
void * osModule = PalGetModuleHandleFromPointer((void*)&NATIVEAOT_ENTRYPOINT);
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets
index 647aee4993d960..da6c90642f6f13 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Publish.targets
@@ -50,6 +50,7 @@
Text="RuntimeIdentifier is required for native compilation. Try running dotnet publish with the -r option value specified." />
+
@@ -94,7 +95,10 @@
+
+
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
index 409fcb654e919d..1f5b2cd681095c 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
@@ -50,6 +50,8 @@ The .NET Foundation licenses this file to you under the MIT license.
libeventpipe-disabled
libeventpipe-enabled
+
+ true
@@ -121,7 +123,7 @@ The .NET Foundation licenses this file to you under the MIT license.
-
+
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
index a4f34ef2225483..e9462399741c5e 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
@@ -263,7 +263,7 @@ The .NET Foundation licenses this file to you under the MIT license.
-
+
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
index 3c9d6c86ffc323..5c11243bbad99b 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
@@ -62,12 +62,12 @@ internal static class InternalCalls
[RuntimeExport("RhCollect")]
internal static void RhCollect(int generation, InternalGCCollectionMode mode, bool lowMemoryP = false)
{
- RhpCollect(generation, mode, lowMemoryP);
+ RhpCollect(generation, mode, lowMemoryP ? Interop.BOOL.TRUE : Interop.BOOL.FALSE);
}
[DllImport(Redhawk.BaseName)]
[UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvCdecl) })]
- private static extern void RhpCollect(int generation, InternalGCCollectionMode mode, bool lowMemoryP);
+ private static extern void RhpCollect(int generation, InternalGCCollectionMode mode, Interop.BOOL lowMemoryP);
[RuntimeExport("RhGetGcTotalMemory")]
internal static long RhGetGcTotalMemory()
diff --git a/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt
index 3cbaa6e2f253a6..f3d48797c2184a 100644
--- a/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt
+++ b/src/coreclr/nativeaot/Runtime/Full/CMakeLists.txt
@@ -6,7 +6,7 @@ project(Runtime)
# Include auto-generated files on include path
set(CMAKE_INCLUDE_CURRENT_DIR ON)
-if (CLR_CMAKE_TARGET_APPLE AND NOT CLR_CMAKE_TARGET_OSX)
+if (CLR_CMAKE_TARGET_APPLE)
list(APPEND RUNTIME_SOURCES_ARCH_ASM
${ARCH_SOURCES_DIR}/ThunkPoolThunks.${ASM_SUFFIX}
)
diff --git a/src/coreclr/nativeaot/Runtime/GCMemoryHelpers.cpp b/src/coreclr/nativeaot/Runtime/GCMemoryHelpers.cpp
index 27126acbdb839f..30f2c5c5fd3e9a 100644
--- a/src/coreclr/nativeaot/Runtime/GCMemoryHelpers.cpp
+++ b/src/coreclr/nativeaot/Runtime/GCMemoryHelpers.cpp
@@ -10,7 +10,6 @@
#include "PalRedhawkCommon.h"
#include "CommonMacros.inl"
-#include "GCMemoryHelpers.h"
#include "GCMemoryHelpers.inl"
// This function clears a piece of memory in a GC safe way.
@@ -31,11 +30,26 @@ COOP_PINVOKE_CDECL_HELPER(void *, RhpGcSafeZeroMemory, (void * mem, size_t size)
return mem;
}
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ //
+ // Memory writes are already ordered
+ //
+ #define GCHeapMemoryBarrier()
+#else
+ #define GCHeapMemoryBarrier() MemoryBarrier()
+#endif
+
// Move memory, in a way that is compatible with a move onto the heap, but
// does not require the destination pointer to be on the heap.
COOP_PINVOKE_HELPER(void, RhBulkMoveWithWriteBarrier, (uint8_t* pDest, uint8_t* pSrc, size_t cbDest))
{
+ // It is possible that the bulk write is publishing object references accessible so far only
+ // by the current thread to shared memory.
+ // The memory model requires that writes performed by current thread are observable no later
+ // than the writes that will actually publish the references.
+ GCHeapMemoryBarrier();
+
if (pDest <= pSrc || pSrc + cbDest <= pDest)
InlineForwardGCSafeCopy(pDest, pSrc, cbDest);
else
@@ -43,8 +57,3 @@ COOP_PINVOKE_HELPER(void, RhBulkMoveWithWriteBarrier, (uint8_t* pDest, uint8_t*
InlinedBulkWriteBarrier(pDest, cbDest);
}
-
-void REDHAWK_CALLCONV RhpBulkWriteBarrier(void* pMemStart, uint32_t cbMemSize)
-{
- InlinedBulkWriteBarrier(pMemStart, cbMemSize);
-}
diff --git a/src/coreclr/nativeaot/Runtime/MiscHelpers.cpp b/src/coreclr/nativeaot/Runtime/MiscHelpers.cpp
index ec2fabcc540f1f..6df37cf23b9d36 100644
--- a/src/coreclr/nativeaot/Runtime/MiscHelpers.cpp
+++ b/src/coreclr/nativeaot/Runtime/MiscHelpers.cpp
@@ -35,7 +35,6 @@
#include "MethodTable.inl"
#include "CommonMacros.inl"
#include "volatile.h"
-#include "GCMemoryHelpers.h"
#include "GCMemoryHelpers.inl"
#include "yieldprocessornormalized.h"
#include "RhConfig.h"
diff --git a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S
index 2bab323e65abca..79ffed2b05210d 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S
@@ -46,7 +46,7 @@ OFFSETOF__Thread__m_alloc_context__alloc_limit = OFFSETOF__Thread__m_rgbAll
add x2, x2, x12
ldr x13, [x1, #OFFSETOF__Thread__m_alloc_context__alloc_limit]
cmp x2, x13
- bhi RhpNewFast_RarePath
+ bhi LOCAL_LABEL(RhpNewFast_RarePath)
// Update the alloc pointer to account for the allocation.
str x2, [x1, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
@@ -57,7 +57,7 @@ OFFSETOF__Thread__m_alloc_context__alloc_limit = OFFSETOF__Thread__m_rgbAll
mov x0, x12
ret
-RhpNewFast_RarePath:
+LOCAL_LABEL(RhpNewFast_RarePath):
mov x1, #0
b C_FUNC(RhpNewObject)
LEAF_END RhpNewFast, _TEXT
@@ -88,12 +88,12 @@ RhpNewFast_RarePath:
bl C_FUNC(RhpGcAlloc)
// Set the new objects MethodTable pointer on success.
- cbz x0, NewOutOfMemory
+ cbz x0, LOCAL_LABEL(NewOutOfMemory)
POP_COOP_PINVOKE_FRAME
EPILOG_RETURN
-NewOutOfMemory:
+LOCAL_LABEL(NewOutOfMemory):
// This is the OOM failure path. We are going to tail-call to a managed helper that will throw
// an out of memory exception that the caller of this allocator understands.
@@ -113,7 +113,7 @@ NewOutOfMemory:
movz x2, MAX_STRING_LENGTH & 0xFFFF
movk x2, MAX_STRING_LENGTH >> 16, lsl 16
cmp x1, x2
- bhi StringSizeOverflow
+ bhi LOCAL_LABEL(StringSizeOverflow)
// Compute overall allocation size (align(base size + (element size * elements), 8)).
mov w2, #STRING_COMPONENT_SIZE
@@ -139,7 +139,7 @@ NewOutOfMemory:
add x2, x2, x12
ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_limit]
cmp x2, x12
- bhi C_FUNC(RhpNewArrayRare)
+ bhi LOCAL_LABEL(RhNewString_Rare)
// Reload new object address into r12.
ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
@@ -156,7 +156,7 @@ NewOutOfMemory:
ret
-StringSizeOverflow:
+LOCAL_LABEL(StringSizeOverflow):
// We get here if the length of the final string object can not be represented as an unsigned
// 32-bit value. We are going to tail-call to a managed helper that will throw
// an OOM exception that the caller of this allocator understands.
@@ -164,6 +164,9 @@ StringSizeOverflow:
// x0 holds MethodTable pointer already
mov x1, #1 // Indicate that we should throw OverflowException
b C_FUNC(RhExceptionHandling_FailedAllocation)
+
+LOCAL_LABEL(RhNewString_Rare):
+ b C_FUNC(RhpNewArrayRare)
LEAF_END RhNewString, _Text
// Allocate one dimensional, zero based array (SZARRAY).
@@ -177,7 +180,7 @@ StringSizeOverflow:
// case (32 dimensional MdArray) is less than 0xffff, and thus the product fits in 64 bits.
mov x2, #0x7FFFFFFF
cmp x1, x2
- bhi ArraySizeOverflow
+ bhi LOCAL_LABEL(ArraySizeOverflow)
ldrh w2, [x0, #OFFSETOF__MethodTable__m_usComponentSize]
umull x2, w1, w2
@@ -204,7 +207,7 @@ StringSizeOverflow:
add x2, x2, x12
ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_limit]
cmp x2, x12
- bhi C_FUNC(RhpNewArrayRare)
+ bhi LOCAL_LABEL(RhpNewArray_Rare)
// Reload new object address into x12.
ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
@@ -221,7 +224,7 @@ StringSizeOverflow:
ret
-ArraySizeOverflow:
+LOCAL_LABEL(ArraySizeOverflow):
// We get here if the size of the final array object can not be represented as an unsigned
// 32-bit value. We are going to tail-call to a managed helper that will throw
// an overflow exception that the caller of this allocator understands.
@@ -229,6 +232,9 @@ ArraySizeOverflow:
// x0 holds MethodTable pointer already
mov x1, #1 // Indicate that we should throw OverflowException
b C_FUNC(RhExceptionHandling_FailedAllocation)
+
+LOCAL_LABEL(RhpNewArray_Rare):
+ b C_FUNC(RhpNewArrayRare)
LEAF_END RhpNewArray, _TEXT
// Allocate one dimensional, zero based array (SZARRAY) using the slow path that calls a runtime helper.
@@ -254,12 +260,12 @@ ArraySizeOverflow:
bl C_FUNC(RhpGcAlloc)
// Set the new objects MethodTable pointer and length on success.
- cbz x0, ArrayOutOfMemory
+ cbz x0, LOCAL_LABEL(ArrayOutOfMemory)
POP_COOP_PINVOKE_FRAME
EPILOG_RETURN
-ArrayOutOfMemory:
+LOCAL_LABEL(ArrayOutOfMemory):
// This is the OOM failure path. We are going to tail-call to a managed helper that will throw
// an out of memory exception that the caller of this allocator understands.
diff --git a/src/coreclr/nativeaot/Runtime/arm64/ExceptionHandling.S b/src/coreclr/nativeaot/Runtime/arm64/ExceptionHandling.S
index d0425171e1d191..7c04f15ad3b858 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/ExceptionHandling.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/ExceptionHandling.S
@@ -275,7 +275,7 @@
// where the tail-calling thread had saved LR, which may not match where we have saved LR.
ldr x1, [x2, #OFFSETOF__Thread__m_pvHijackedReturnAddress]
- cbz x1, NotHijacked
+ cbz x1, LOCAL_LABEL(NotHijacked)
ldr x3, [x2, #OFFSETOF__Thread__m_ppvHijackedReturnAddressLocation]
@@ -286,13 +286,13 @@
add x12, sp, #(STACKSIZEOF_ExInfo + SIZEOF__PAL_LIMITED_CONTEXT) // re-compute SP at callsite
cmp x3, x12 // if (m_ppvHijackedReturnAddressLocation < SP at callsite)
- blo TailCallWasHijacked
+ blo LOCAL_LABEL(TailCallWasHijacked)
// normal case where a valid return address location is hijacked
str x1, [x3]
- b ClearThreadState
+ b LOCAL_LABEL(ClearThreadState)
-TailCallWasHijacked:
+LOCAL_LABEL(TailCallWasHijacked):
// Abnormal case where the return address location is now invalid because we ended up here via a tail
// call. In this case, our hijacked return address should be the correct caller of this method.
@@ -302,13 +302,13 @@ TailCallWasHijacked:
str lr, [sp, #(rsp_offsetof_Context + OFFSETOF__PAL_LIMITED_CONTEXT__LR)]
str lr, [sp, #(rsp_offsetof_Context + OFFSETOF__PAL_LIMITED_CONTEXT__IP)]
-ClearThreadState:
+LOCAL_LABEL(ClearThreadState):
// clear the Thread's hijack state
str xzr, [x2, #OFFSETOF__Thread__m_ppvHijackedReturnAddressLocation]
str xzr, [x2, #OFFSETOF__Thread__m_pvHijackedReturnAddress]
-NotHijacked:
+LOCAL_LABEL(NotHijacked):
add x1, sp, #rsp_offsetof_ExInfo // x1 <- ExInfo*
str xzr, [x1, #OFFSETOF__ExInfo__m_exception] // pExInfo->m_exception = null
@@ -429,13 +429,13 @@ NotHijacked:
add x12, x5, #OFFSETOF__Thread__m_ThreadStateFlags
-ClearRetry_Catch:
+LOCAL_LABEL(ClearRetry_Catch):
ldxr w4, [x12]
bic w4, w4, #TSF_DoNotTriggerGc
stxr w6, w4, [x12]
- cbz w6, ClearSuccess_Catch
- b ClearRetry_Catch
-ClearSuccess_Catch:
+ cbz w6, LOCAL_LABEL(ClearSuccess_Catch)
+ b LOCAL_LABEL(ClearRetry_Catch)
+LOCAL_LABEL(ClearSuccess_Catch):
//
// set preserved regs to the values expected by the funclet
@@ -487,21 +487,21 @@ ClearSuccess_Catch:
ldr x3, [sp, #rsp_offset_x3] // x3 <- current ExInfo*
ldr x2, [x2, #OFFSETOF__REGDISPLAY__SP] // x2 <- resume SP value
-PopExInfoLoop:
+LOCAL_LABEL(PopExInfoLoop):
ldr x3, [x3, #OFFSETOF__ExInfo__m_pPrevExInfo] // x3 <- next ExInfo
- cbz x3, DonePopping // if (pExInfo == null) { we're done }
+ cbz x3, LOCAL_LABEL(DonePopping) // if (pExInfo == null) { we're done }
cmp x3, x2
- blt PopExInfoLoop // if (pExInfo < resume SP} { keep going }
+ blt LOCAL_LABEL(PopExInfoLoop) // if (pExInfo < resume SP} { keep going }
-DonePopping:
+LOCAL_LABEL(DonePopping):
str x3, [x1, #OFFSETOF__Thread__m_pExInfoStackHead] // store the new head on the Thread
PREPARE_EXTERNAL_VAR_INDIRECT_W RhpTrapThreads, 3
- tbz x3, #TrapThreadsFlags_AbortInProgress_Bit, NoAbort
+ tbz x3, #TrapThreadsFlags_AbortInProgress_Bit, LOCAL_LABEL(NoAbort)
ldr x3, [sp, #rsp_offset_is_not_handling_thread_abort]
- cbnz x3, NoAbort
+ cbnz x3, LOCAL_LABEL(NoAbort)
// It was the ThreadAbortException, so rethrow it
// reset SP
@@ -510,7 +510,7 @@ DonePopping:
mov sp, x2
b C_FUNC(RhpThrowHwEx)
-NoAbort:
+LOCAL_LABEL(NoAbort):
// reset SP and jump to continuation address
mov sp, x2
br x0
@@ -564,13 +564,13 @@ NoAbort:
add x12, x2, #OFFSETOF__Thread__m_ThreadStateFlags
-ClearRetry:
+LOCAL_LABEL(ClearRetry):
ldxr w4, [x12]
bic w4, w4, #TSF_DoNotTriggerGc
stxr w3, w4, [x12]
- cbz w3, ClearSuccess
- b ClearRetry
-ClearSuccess:
+ cbz w3, LOCAL_LABEL(ClearSuccess)
+ b LOCAL_LABEL(ClearRetry)
+LOCAL_LABEL(ClearSuccess):
//
// set preserved regs to the values expected by the funclet
@@ -602,13 +602,13 @@ ClearSuccess:
ldr x2, [sp, rsp_FinallyFunclet_offset_thread]
add x12, x2, #OFFSETOF__Thread__m_ThreadStateFlags
-SetRetry:
+LOCAL_LABEL(SetRetry):
ldxr w1, [x12]
orr w1, w1, #TSF_DoNotTriggerGc
stxr w3, w1, [x12]
- cbz w3, SetSuccess
- b SetRetry
-SetSuccess:
+ cbz w3, LOCAL_LABEL(SetSuccess)
+ b LOCAL_LABEL(SetRetry)
+LOCAL_LABEL(SetSuccess):
ldp d8, d9, [sp, #0x00]
ldp d10, d11, [sp, #0x10]
@@ -707,13 +707,13 @@ SetSuccess:
add x12, x5, #OFFSETOF__Thread__m_ThreadStateFlags
-ClearRetry_Propagate:
+LOCAL_LABEL(ClearRetry_Propagate):
ldxr w4, [x12]
bic w4, w4, #TSF_DoNotTriggerGc
stxr w6, w4, [x12]
- cbz w6, ClearSuccess_Propagate
- b ClearRetry_Propagate
-ClearSuccess_Propagate:
+ cbz w6, LOCAL_LABEL(ClearSuccess_Propagate)
+ b LOCAL_LABEL(ClearRetry_Propagate)
+LOCAL_LABEL(ClearSuccess_Propagate):
//
// set preserved regs to the values expected by the funclet
@@ -749,13 +749,13 @@ ClearSuccess_Propagate:
ldr x3, [sp, #rsp_offset_x3] // x3 <- current ExInfo*
ldr x2, [x2, #OFFSETOF__REGDISPLAY__SP] // x2 <- resume SP value
-Propagate_PopExInfoLoop:
+LOCAL_LABEL(Propagate_PopExInfoLoop):
ldr x3, [x3, #OFFSETOF__ExInfo__m_pPrevExInfo] // x3 <- next ExInfo
- cbz x3, Propagate_DonePopping // if (pExInfo == null) { we're done }
+ cbz x3, LOCAL_LABEL(Propagate_DonePopping) // if (pExInfo == null) { we're done }
cmp x3, x2
- blt Propagate_PopExInfoLoop // if (pExInfo < resume SP} { keep going }
+ blt LOCAL_LABEL(Propagate_PopExInfoLoop) // if (pExInfo < resume SP} { keep going }
-Propagate_DonePopping:
+LOCAL_LABEL(Propagate_DonePopping):
str x3, [x1, #OFFSETOF__Thread__m_pExInfoStackHead] // store the new head on the Thread
// restore preemptive mode
diff --git a/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S b/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S
index e27834bae6fedd..abe7555b761134 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S
@@ -127,10 +127,10 @@ NESTED_ENTRY RhpGcProbeHijack, _TEXT, NoHandler
FixupHijackedCallstack
PREPARE_EXTERNAL_VAR_INDIRECT_W RhpTrapThreads, 3
- tbnz x3, #TrapThreadsFlags_TrapThreads_Bit, WaitForGC
+ tbnz x3, #TrapThreadsFlags_TrapThreads_Bit, LOCAL_LABEL(WaitForGC)
ret
-WaitForGC:
+LOCAL_LABEL(WaitForGC):
orr x12, x12, DEFAULT_FRAME_SAVE_FLAGS + PTFF_SAVE_X0 + PTFF_SAVE_X1
b C_FUNC(RhpWaitForGC)
NESTED_END RhpGcProbeHijack
@@ -144,11 +144,11 @@ NESTED_ENTRY RhpWaitForGC, _TEXT, NoHandler
bl C_FUNC(RhpWaitForGC2)
ldr x2, [sp, #OFFSETOF__PInvokeTransitionFrame__m_Flags]
- tbnz x2, #PTFF_THREAD_ABORT_BIT, ThrowThreadAbort
+ tbnz x2, #PTFF_THREAD_ABORT_BIT, LOCAL_LABEL(ThrowThreadAbort)
POP_PROBE_FRAME
EPILOG_RETURN
-ThrowThreadAbort:
+LOCAL_LABEL(ThrowThreadAbort):
POP_PROBE_FRAME
mov w0, #STATUS_REDHAWK_THREAD_ABORT
mov x1, lr // return address as exception PC
@@ -159,8 +159,10 @@ NESTED_END RhpWaitForGC
LEAF_ENTRY RhpGcPoll
PREPARE_EXTERNAL_VAR_INDIRECT_W RhpTrapThreads, 0
- cbnz w0, C_FUNC(RhpGcPollRare) // TrapThreadsFlags_None = 0
+ cbnz w0, LOCAL_LABEL(RhpGcPoll_Rare) // TrapThreadsFlags_None = 0
ret
+LOCAL_LABEL(RhpGcPoll_Rare):
+ b C_FUNC(RhpGcPollRare)
LEAF_END RhpGcPoll
NESTED_ENTRY RhpGcPollRare, _TEXT, NoHandler
diff --git a/src/coreclr/nativeaot/Runtime/arm64/WriteBarriers.S b/src/coreclr/nativeaot/Runtime/arm64/WriteBarriers.S
index d00ffb3a4a9978..835466c3b9e7e4 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/WriteBarriers.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/WriteBarriers.S
@@ -224,9 +224,11 @@ LEAF_END RhpByRefAssignRefArm64, _TEXT
PREPARE_EXTERNAL_VAR_INDIRECT g_highest_address, x12
ccmp x14, x12, #0x2, hs
- blo C_FUNC(RhpAssignRefArm64)
+ bhs LOCAL_LABEL(NotInHeap)
-NotInHeap:
+ b C_FUNC(RhpAssignRefArm64)
+
+LOCAL_LABEL(NotInHeap):
ALTERNATE_ENTRY RhpCheckedAssignRefAVLocation
str x15, [x14], 8
ret
@@ -293,44 +295,44 @@ LEAF_END RhpAssignRef, _TEXT
#ifndef LSE_INSTRUCTIONS_ENABLED_BY_DEFAULT
PREPARE_EXTERNAL_VAR_INDIRECT_W g_cpuFeatures, 16
- tbz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, CmpXchgRetry
+ tbz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, LOCAL_LABEL(CmpXchgRetry)
#endif
mov x10, x2
ALTERNATE_ENTRY RhpCheckedLockCmpXchgAVLocation
casal x10, x1, [x0] // exchange
cmp x2, x10
- bne CmpXchgNoUpdate
+ bne LOCAL_LABEL(CmpXchgNoUpdate)
#ifndef LSE_INSTRUCTIONS_ENABLED_BY_DEFAULT
- b DoCardsCmpXchg
-CmpXchgRetry:
+ b LOCAL_LABEL(DoCardsCmpXchg)
+LOCAL_LABEL(CmpXchgRetry):
// Check location value is what we expect.
ALTERNATE_ENTRY RhpCheckedLockCmpXchgAVLocation2
ldaxr x10, [x0]
cmp x10, x2
- bne CmpXchgNoUpdate
+ bne LOCAL_LABEL(CmpXchgNoUpdate)
// Current value matches comparand, attempt to update with the new value.
stlxr w12, x1, [x0]
- cbnz w12, CmpXchgRetry
+ cbnz w12, LOCAL_LABEL(CmpXchgRetry)
#endif
-DoCardsCmpXchg:
+LOCAL_LABEL(DoCardsCmpXchg):
// We have successfully updated the value of the objectref so now we need a GC write barrier.
// The following barrier code takes the destination in x0 and the value in x1 so the arguments are
// already correctly set up.
INSERT_CHECKED_WRITE_BARRIER_CORE x0, x1
-CmpXchgNoUpdate:
+LOCAL_LABEL(CmpXchgNoUpdate):
// x10 still contains the original value.
mov x0, x10
#ifndef LSE_INSTRUCTIONS_ENABLED_BY_DEFAULT
- tbnz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, NoBarrierCmpXchg
+ tbnz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, LOCAL_LABEL(NoBarrierCmpXchg)
InterlockedOperationBarrier
-NoBarrierCmpXchg:
+LOCAL_LABEL(NoBarrierCmpXchg):
#endif
ret lr
@@ -357,25 +359,25 @@ NoBarrierCmpXchg:
#ifndef LSE_INSTRUCTIONS_ENABLED_BY_DEFAULT
PREPARE_EXTERNAL_VAR_INDIRECT_W g_cpuFeatures, 16
- tbz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, ExchangeRetry
+ tbz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, LOCAL_LABEL(ExchangeRetry)
#endif
ALTERNATE_ENTRY RhpCheckedXchgAVLocation
swpal x1, x10, [x0] // exchange
#ifndef LSE_INSTRUCTIONS_ENABLED_BY_DEFAULT
- b DoCardsXchg
-ExchangeRetry:
+ b LOCAL_LABEL(DoCardsXchg)
+LOCAL_LABEL(ExchangeRetry):
// Read the existing memory location.
ALTERNATE_ENTRY RhpCheckedXchgAVLocation2
ldaxr x10, [x0]
// Attempt to update with the new value.
stlxr w12, x1, [x0]
- cbnz w12, ExchangeRetry
+ cbnz w12, LOCAL_LABEL(ExchangeRetry)
#endif
-DoCardsXchg:
+LOCAL_LABEL(DoCardsXchg):
// We have successfully updated the value of the objectref so now we need a GC write barrier.
// The following barrier code takes the destination in x0 and the value in x1 so the arguments are
// already correctly set up.
@@ -386,9 +388,9 @@ DoCardsXchg:
mov x0, x10
#ifndef LSE_INSTRUCTIONS_ENABLED_BY_DEFAULT
- tbnz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, NoBarrierXchg
+ tbnz w16, #ARM64_ATOMICS_FEATURE_FLAG_BIT, LOCAL_LABEL(NoBarrierXchg)
InterlockedOperationBarrier
-NoBarrierXchg:
+LOCAL_LABEL(NoBarrierXchg):
#endif
ret
diff --git a/src/coreclr/nativeaot/Runtime/gcrhenv.cpp b/src/coreclr/nativeaot/Runtime/gcrhenv.cpp
index 3d0990962b7c99..3ec488605c1b33 100644
--- a/src/coreclr/nativeaot/Runtime/gcrhenv.cpp
+++ b/src/coreclr/nativeaot/Runtime/gcrhenv.cpp
@@ -42,7 +42,6 @@
#include "daccess.h"
-#include "GCMemoryHelpers.h"
#include "interoplibinterface.h"
#include "holder.h"
diff --git a/src/coreclr/nativeaot/Runtime/portable.cpp b/src/coreclr/nativeaot/Runtime/portable.cpp
index d45b3d062d00e3..8b425bfe2dff12 100644
--- a/src/coreclr/nativeaot/Runtime/portable.cpp
+++ b/src/coreclr/nativeaot/Runtime/portable.cpp
@@ -31,7 +31,6 @@
#include "MethodTable.inl"
#include "ObjectLayout.h"
-#include "GCMemoryHelpers.h"
#include "GCMemoryHelpers.inl"
#if defined(USE_PORTABLE_HELPERS)
diff --git a/src/coreclr/nativeaot/Runtime/startup.cpp b/src/coreclr/nativeaot/Runtime/startup.cpp
index 32cbab53cb8304..5db04aa27766dd 100644
--- a/src/coreclr/nativeaot/Runtime/startup.cpp
+++ b/src/coreclr/nativeaot/Runtime/startup.cpp
@@ -297,6 +297,10 @@ static void UninitDLL()
Thread* g_threadPerformingShutdown = NULL;
#endif
+#if defined(_WIN32) && defined(FEATURE_PERFTRACING)
+bool g_safeToShutdownTracing;
+#endif
+
static void __cdecl OnProcessExit()
{
#ifdef _WIN32
@@ -309,8 +313,16 @@ static void __cdecl OnProcessExit()
#endif
#ifdef FEATURE_PERFTRACING
- EventPipe_Shutdown();
- DiagnosticServer_Shutdown();
+#ifdef _WIN32
+ // We forgo shutting down event pipe if it wouldn't be safe and could lead to a hang.
+ // If there was an active trace session, the trace will likely be corrupted without
+ // orderly shutdown. See https://github.com/dotnet/runtime/issues/89346.
+ if (g_safeToShutdownTracing)
+#endif
+ {
+ EventPipe_Shutdown();
+ DiagnosticServer_Shutdown();
+ }
#endif
}
@@ -348,7 +360,7 @@ void RuntimeThreadShutdown(void* thread)
#endif
}
-extern "C" bool RhInitialize()
+extern "C" bool RhInitialize(bool isDll)
{
if (!PalInit())
return false;
@@ -357,6 +369,10 @@ extern "C" bool RhInitialize()
atexit(&OnProcessExit);
#endif
+#if defined(_WIN32) && defined(FEATURE_PERFTRACING)
+ g_safeToShutdownTracing = !isDll;
+#endif
+
if (!InitDLL(PalGetModuleHandleFromPointer((void*)&RhInitialize)))
return false;
diff --git a/src/coreclr/nativeaot/Runtime/threadstore.cpp b/src/coreclr/nativeaot/Runtime/threadstore.cpp
index 67a6949fd7fb06..2e8369f9175fc5 100644
--- a/src/coreclr/nativeaot/Runtime/threadstore.cpp
+++ b/src/coreclr/nativeaot/Runtime/threadstore.cpp
@@ -24,7 +24,6 @@
#include "yieldprocessornormalized.h"
#include "slist.inl"
-#include "GCMemoryHelpers.h"
EXTERN_C volatile uint32_t RhpTrapThreads;
volatile uint32_t RhpTrapThreads = (uint32_t)TrapThreadsFlags::None;
diff --git a/src/coreclr/nativeaot/Runtime/unix/unixasmmacros.inc b/src/coreclr/nativeaot/Runtime/unix/unixasmmacros.inc
index ef6d393fd248b1..bde1d517b7e823 100644
--- a/src/coreclr/nativeaot/Runtime/unix/unixasmmacros.inc
+++ b/src/coreclr/nativeaot/Runtime/unix/unixasmmacros.inc
@@ -3,6 +3,11 @@
#define INVALIDGCVALUE 0xCCCCCCCD
+// Enforce subsections via symbols to workaround bugs in Xcode 15 linker.
+#if defined(__APPLE__)
+.subsections_via_symbols
+#endif
+
#if defined(__APPLE__)
#define C_FUNC(name) _##name
#define EXTERNAL_C_FUNC(name) C_FUNC(name)
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/GC.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/GC.NativeAot.cs
index 5ebcfc6c0771c1..3d263d5de6b63d 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/GC.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/GC.NativeAot.cs
@@ -869,7 +869,6 @@ public static TimeSpan GetTotalPauseDuration()
return new TimeSpan(RuntimeImports.RhGetTotalPauseDuration());
}
- [System.Runtime.Versioning.RequiresPreviewFeaturesAttribute("RefreshMemoryLimit is in preview.")]
public static void RefreshMemoryLimit()
{
ulong heapHardLimit = (AppContext.GetData("GCHeapHardLimit") as ulong?) ?? ulong.MaxValue;
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.MetadataSignatureParsing.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.MetadataSignatureParsing.cs
index 07674dded3541c..1a1cd76cc4ddfd 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.MetadataSignatureParsing.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.MetadataSignatureParsing.cs
@@ -173,12 +173,23 @@ internal static NativeParser GetNativeParserForSignature(RuntimeSignature signat
private bool CompareTypeSigWithType(ref NativeParser parser, TypeManagerHandle moduleHandle, Handle typeHandle)
{
- while (typeHandle.HandleType == HandleType.TypeSpecification)
+ while (typeHandle.HandleType == HandleType.TypeSpecification
+ || typeHandle.HandleType == HandleType.ModifiedType)
{
- typeHandle = typeHandle
- .ToTypeSpecificationHandle(_metadataReader)
- .GetTypeSpecification(_metadataReader)
- .Signature;
+ if (typeHandle.HandleType == HandleType.TypeSpecification)
+ {
+ typeHandle = typeHandle
+ .ToTypeSpecificationHandle(_metadataReader)
+ .GetTypeSpecification(_metadataReader)
+ .Signature;
+ }
+ else
+ {
+ typeHandle = typeHandle
+ .ToModifiedTypeHandle(_metadataReader)
+ .GetModifiedType(_metadataReader)
+ .Type;
+ }
}
// startOffset lets us backtrack to the TypeSignatureKind for external types since the TypeLoader
diff --git a/src/coreclr/pal/inc/unixasmmacros.inc b/src/coreclr/pal/inc/unixasmmacros.inc
index 658a65bb4b35aa..120b26543e3faa 100644
--- a/src/coreclr/pal/inc/unixasmmacros.inc
+++ b/src/coreclr/pal/inc/unixasmmacros.inc
@@ -3,6 +3,11 @@
#define INVALIDGCVALUE 0xCCCCCCCD
+// Enforce subsections via symbols to workaround bugs in Xcode 15 linker.
+#if defined(__APPLE__)
+.subsections_via_symbols
+#endif
+
#if defined(__APPLE__)
#define C_FUNC(name) _##name
#define EXTERNAL_C_FUNC(name) C_FUNC(name)
diff --git a/src/coreclr/tools/Common/CommandLineHelpers.cs b/src/coreclr/tools/Common/CommandLineHelpers.cs
index 205592c1c91dca..3fb977a3047a6a 100644
--- a/src/coreclr/tools/Common/CommandLineHelpers.cs
+++ b/src/coreclr/tools/Common/CommandLineHelpers.cs
@@ -210,7 +210,7 @@ public static void MakeReproPackage(string makeReproPath, string outputFilePath,
foreach (CliOption option in res.CommandResult.Command.Options)
{
OptionResult optionResult = res.GetResult(option);
- if (optionResult is null || option.Name == "make-repro-path")
+ if (optionResult is null || option.Name == "--make-repro-path")
{
continue;
}
@@ -233,7 +233,7 @@ public static void MakeReproPackage(string makeReproPath, string outputFilePath,
}
foreach (string inputFile in dictionary.Values)
{
- rspFile.Add($"--{option.Name}:{ConvertFromOriginalPathToReproPackagePath(input: true, inputFile)}");
+ rspFile.Add($"{option.Name}:{ConvertFromOriginalPathToReproPackagePath(input: true, inputFile)}");
}
}
else
@@ -241,7 +241,7 @@ public static void MakeReproPackage(string makeReproPath, string outputFilePath,
foreach (string optInList in values)
{
if (!string.IsNullOrEmpty(optInList))
- rspFile.Add($"--{option.Name}:{optInList}");
+ rspFile.Add($"{option.Name}:{optInList}");
}
}
}
@@ -254,11 +254,11 @@ public static void MakeReproPackage(string makeReproPath, string outputFilePath,
// if output option is used, overwrite the path to the repro package
stringVal = ConvertFromOriginalPathToReproPackagePath(input: false, stringVal);
}
- rspFile.Add($"--{option.Name}:{stringVal}");
+ rspFile.Add($"{option.Name}:{stringVal}");
}
else
{
- rspFile.Add($"--{option.Name}:{val}");
+ rspFile.Add($"{option.Name}:{val}");
}
}
}
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs b/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
index 8d2b8a4e3fd3fe..330296b18dbfa4 100644
--- a/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
@@ -614,6 +614,8 @@ private static MethodDesc ResolveInterfaceMethodToVirtualMethodOnType(MethodDesc
{
Debug.Assert(!interfaceMethod.Signature.IsStatic);
+ // This would be a default interface method resolution. The algorithm below would sort of work, but doesn't handle
+ // things like diamond cases and it's better not to let it resolve as such.
if (currentType.IsInterface)
return null;
@@ -781,7 +783,7 @@ private static DefaultInterfaceMethodResolution ResolveInterfaceMethodToDefaultI
// If we're asking about an interface, include the interface in the list.
consideredInterfaces = new DefType[currentType.RuntimeInterfaces.Length + 1];
Array.Copy(currentType.RuntimeInterfaces, consideredInterfaces, currentType.RuntimeInterfaces.Length);
- consideredInterfaces[consideredInterfaces.Length - 1] = (DefType)currentType.InstantiateAsOpen();
+ consideredInterfaces[consideredInterfaces.Length - 1] = currentType.IsGenericDefinition ? (DefType)currentType.InstantiateAsOpen() : currentType;
}
foreach (MetadataType runtimeInterface in consideredInterfaces)
@@ -921,6 +923,11 @@ public static IEnumerable EnumAllVirtualSlots(MetadataType type)
/// MethodDesc of the resolved virtual static method, null when not found (runtime lookup must be used)
public static MethodDesc ResolveInterfaceMethodToStaticVirtualMethodOnType(MethodDesc interfaceMethod, MetadataType currentType)
{
+ // This would be a default interface method resolution. The algorithm below would sort of work, but doesn't handle
+ // things like diamond cases and it's better not to let it resolve as such.
+ if (currentType.IsInterface)
+ return null;
+
// Search for match on a per-level in the type hierarchy
for (MetadataType typeToCheck = currentType; typeToCheck != null; typeToCheck = typeToCheck.MetadataBaseType)
{
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/CompilerGeneratedState.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/CompilerGeneratedState.cs
index 8150abeca61b1e..6a5e2ed77cdf08 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/CompilerGeneratedState.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/CompilerGeneratedState.cs
@@ -158,8 +158,10 @@ referencedMethod.OwningType is MetadataType generatedType &&
break;
case ILOpcode.stsfld:
+ case ILOpcode.ldsfld:
{
// Same as above, but stsfld instead of a call to the constructor
+ // Ldsfld may also trigger a cctor that creates a closure environment
FieldDesc? field = methodBody.GetObject(reader.ReadILToken()) as FieldDesc;
if (field == null)
continue;
@@ -417,6 +419,7 @@ void MapGeneratedTypeTypeParameters(
break;
case ILOpcode.stsfld:
+ case ILOpcode.ldsfld:
{
if (body.GetObject(reader.ReadILToken()) is FieldDesc { OwningType: MetadataType owningType }
&& compilerGeneratedType == owningType.GetTypeDefinition())
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/EETypeNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/EETypeNode.cs
index 1e9f7679b44cc3..61520b4bfadaff 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/EETypeNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/EETypeNode.cs
@@ -372,16 +372,8 @@ public sealed override IEnumerable GetConditionalSt
DefType defType = _type.GetClosestDefType();
- // Interfaces don't have vtables and we don't need to track their slot use.
- // The only exception are those interfaces that provide IDynamicInterfaceCastable implementations;
- // those have slots and we dispatch on them.
- bool needsDependenciesForVirtualMethodImpls = !defType.IsInterface
- || ((MetadataType)defType).IsDynamicInterfaceCastableImplementation();
-
// If we're producing a full vtable, none of the dependencies are conditional.
- needsDependenciesForVirtualMethodImpls &= !factory.VTable(defType).HasFixedSlots;
-
- if (needsDependenciesForVirtualMethodImpls)
+ if (!factory.VTable(defType).HasFixedSlots)
{
bool isNonInterfaceAbstractType = !defType.IsInterface && ((MetadataType)defType).IsAbstract;
@@ -436,6 +428,12 @@ public sealed override IEnumerable GetConditionalSt
((System.Collections.IStructuralEquatable)defType.RuntimeInterfaces).Equals(_type.RuntimeInterfaces,
EqualityComparer.Default));
+ // Interfaces don't have vtables and we don't need to track their instance method slot use.
+ // The only exception are those interfaces that provide IDynamicInterfaceCastable implementations;
+ // those have slots and we dispatch on them.
+ bool needsDependenciesForInstanceInterfaceMethodImpls = !defType.IsInterface
+ || ((MetadataType)defType).IsDynamicInterfaceCastableImplementation();
+
// Add conditional dependencies for interface methods the type implements. For example, if the type T implements
// interface IFoo which has a method M1, add a dependency on T.M1 dependent on IFoo.M1 being called, since it's
// possible for any IFoo object to actually be an instance of T.
@@ -456,6 +454,9 @@ public sealed override IEnumerable GetConditionalSt
bool isStaticInterfaceMethod = interfaceMethod.Signature.IsStatic;
+ if (!isStaticInterfaceMethod && !needsDependenciesForInstanceInterfaceMethodImpls)
+ continue;
+
MethodDesc implMethod = isStaticInterfaceMethod ?
defType.ResolveInterfaceMethodToStaticVirtualMethodOnType(interfaceMethod) :
defType.ResolveInterfaceMethodToVirtualMethodOnType(interfaceMethod);
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/InterfaceDispatchMapNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/InterfaceDispatchMapNode.cs
index 38104d7ab015c9..c2ac4568c748c9 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/InterfaceDispatchMapNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/InterfaceDispatchMapNode.cs
@@ -73,7 +73,7 @@ public static bool MightHaveInterfaceDispatchMap(TypeDesc type, NodeFactory fact
if (!type.IsArray && !type.IsDefType)
return false;
- // Interfaces don't have a dispatch map because we dispatch them based on the
+ // Interfaces don't have a dispatch map for instance methods because we dispatch them based on the
// dispatch map of the implementing class.
// The only exception are IDynamicInterfaceCastable scenarios that dispatch
// using the interface dispatch map.
@@ -83,8 +83,9 @@ public static bool MightHaveInterfaceDispatchMap(TypeDesc type, NodeFactory fact
// wasn't marked as [DynamicInterfaceCastableImplementation]" and "we couldn't find an
// implementation". We don't want to use the custom attribute for that at runtime because
// that's reflection and this should work without reflection.
- if (type.IsInterface)
- return ((MetadataType)type).IsDynamicInterfaceCastableImplementation();
+ bool isInterface = type.IsInterface;
+ if (isInterface && ((MetadataType)type).IsDynamicInterfaceCastableImplementation())
+ return true;
DefType declType = type.GetClosestDefType();
@@ -112,6 +113,11 @@ public static bool MightHaveInterfaceDispatchMap(TypeDesc type, NodeFactory fact
Debug.Assert(declMethod.IsVirtual);
+ // Only static methods get placed in dispatch maps of interface types (modulo
+ // IDynamicInterfaceCastable we already handled above).
+ if (isInterface && !declMethod.Signature.IsStatic)
+ continue;
+
if (interfaceOnDefinitionType != null)
declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), interfaceOnDefinitionType);
@@ -154,6 +160,10 @@ private void EmitDispatchMap(ref ObjectDataBuilder builder, NodeFactory factory)
var staticImplementations = new List<(int InterfaceIndex, int InterfaceMethodSlot, int ImplMethodSlot, int Context)>();
var staticDefaultImplementations = new List<(int InterfaceIndex, int InterfaceMethodSlot, int ImplMethodSlot, int Context)>();
+ bool isInterface = declType.IsInterface;
+ bool needsEntriesForInstanceInterfaceMethodImpls = !isInterface
+ || ((MetadataType)declType).IsDynamicInterfaceCastableImplementation();
+
// Resolve all the interfaces, but only emit non-static and non-default implementations
for (int interfaceIndex = 0; interfaceIndex < declTypeRuntimeInterfaces.Length; interfaceIndex++)
{
@@ -166,6 +176,10 @@ private void EmitDispatchMap(ref ObjectDataBuilder builder, NodeFactory factory)
for (int interfaceMethodSlot = 0; interfaceMethodSlot < virtualSlots.Count; interfaceMethodSlot++)
{
MethodDesc declMethod = virtualSlots[interfaceMethodSlot];
+
+ if (!declMethod.Signature.IsStatic && !needsEntriesForInstanceInterfaceMethodImpls)
+ continue;
+
if(!interfaceType.IsTypeDefinition)
declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceDefinitionType);
@@ -244,9 +258,17 @@ private void EmitDispatchMap(ref ObjectDataBuilder builder, NodeFactory factory)
// For default interface methods, the generic context is acquired by indexing
// into the interface list of the owning type.
Debug.Assert(providingInterfaceDefinitionType != null);
- int indexOfInterface = Array.IndexOf(declTypeDefinitionRuntimeInterfaces, providingInterfaceDefinitionType);
- Debug.Assert(indexOfInterface >= 0);
- genericContext = StaticVirtualMethodContextSource.ContextFromFirstInterface + indexOfInterface;
+ if (declTypeDefinition.HasSameTypeDefinition(providingInterfaceDefinitionType) &&
+ providingInterfaceDefinitionType == declTypeDefinition.InstantiateAsOpen())
+ {
+ genericContext = StaticVirtualMethodContextSource.ContextFromThisClass;
+ }
+ else
+ {
+ int indexOfInterface = Array.IndexOf(declTypeDefinitionRuntimeInterfaces, providingInterfaceDefinitionType);
+ Debug.Assert(indexOfInterface >= 0);
+ genericContext = StaticVirtualMethodContextSource.ContextFromFirstInterface + indexOfInterface;
+ }
}
staticDefaultImplementations.Add((
interfaceIndex,
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/SealedVTableNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/SealedVTableNode.cs
index bb67f884264dd3..a8460e80d0b413 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/SealedVTableNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/SealedVTableNode.cs
@@ -108,17 +108,21 @@ public bool BuildSealedVTableSlots(NodeFactory factory, bool relocsOnly)
_sealedVTableEntries = new List();
- // Interfaces don't have any virtual slots with the exception of interfaces that provide
+ // Interfaces don't have any instance virtual slots with the exception of interfaces that provide
// IDynamicInterfaceCastable implementation.
// Normal interface don't need one because the dispatch is done at the class level.
// For IDynamicInterfaceCastable, we don't have an implementing class.
- if (_type.IsInterface && !((MetadataType)_type).IsDynamicInterfaceCastableImplementation())
- return true;
+ bool isInterface = declType.IsInterface;
+ bool needsEntriesForInstanceInterfaceMethodImpls = !isInterface
+ || ((MetadataType)declType).IsDynamicInterfaceCastableImplementation();
IReadOnlyList virtualSlots = factory.VTable(declType).Slots;
for (int i = 0; i < virtualSlots.Count; i++)
{
+ if (!virtualSlots[i].Signature.IsStatic && !needsEntriesForInstanceInterfaceMethodImpls)
+ continue;
+
MethodDesc implMethod = declType.FindVirtualFunctionTargetMethodOnObjectType(virtualSlots[i]);
if (implMethod.CanMethodBeInSealedVTable())
@@ -143,6 +147,10 @@ public bool BuildSealedVTableSlots(NodeFactory factory, bool relocsOnly)
for (int interfaceMethodSlot = 0; interfaceMethodSlot < virtualSlots.Count; interfaceMethodSlot++)
{
MethodDesc declMethod = virtualSlots[interfaceMethodSlot];
+
+ if (!declMethod.Signature.IsStatic && !needsEntriesForInstanceInterfaceMethodImpls)
+ continue;
+
if (!interfaceType.IsTypeDefinition)
declMethod = factory.TypeSystemContext.GetMethodForInstantiatedType(declMethod.GetTypicalMethodDefinition(), (InstantiatedType)interfaceDefinitionType);
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/UsageBasedMetadataManager.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/UsageBasedMetadataManager.cs
index e680bf80f2dfa7..0d4a855e736ed0 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/UsageBasedMetadataManager.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/UsageBasedMetadataManager.cs
@@ -1093,7 +1093,7 @@ private void ProcessAttribute(TypeDesc type, XPathNavigator nav)
string internalValue = GetAttribute(nav, "internal");
if (!string.IsNullOrEmpty(internalValue))
{
- if (!IsRemoveAttributeInstances(internalValue) || !nav.IsEmptyElement)
+ if (!IsRemoveAttributeInstances(internalValue))
{
LogWarning(nav, DiagnosticId.UnrecognizedInternalAttribute, internalValue);
}
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/VirtualMethodCallHelper.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/VirtualMethodCallHelper.cs
index a9de1fce5e3a52..ec398d37433966 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/VirtualMethodCallHelper.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/VirtualMethodCallHelper.cs
@@ -93,9 +93,8 @@ private static int GetNumberOfSlotsInCurrentType(NodeFactory factory, TypeDesc i
{
if (implType.IsInterface)
{
- // We normally don't need to ask about vtable slots of interfaces. It's not wrong to ask
- // that question, but we currently only ask it for IDynamicInterfaceCastable implementations.
- Debug.Assert(((MetadataType)implType).IsDynamicInterfaceCastableImplementation());
+ // Interface types don't have physically assigned virtual slots, so the number of slots
+ // is always 0. They may have sealed slots.
return (implType.HasGenericDictionarySlot() && countDictionarySlots) ? 1 : 0;
}
diff --git a/src/coreclr/tools/aot/Mono.Linker.Tests/TestCases/TestDatabase.cs b/src/coreclr/tools/aot/Mono.Linker.Tests/TestCases/TestDatabase.cs
index 07147b1110323d..31cd4455663e2e 100644
--- a/src/coreclr/tools/aot/Mono.Linker.Tests/TestCases/TestDatabase.cs
+++ b/src/coreclr/tools/aot/Mono.Linker.Tests/TestCases/TestDatabase.cs
@@ -29,6 +29,11 @@ public static IEnumerable