diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 221d2dc2be6316..1c96ecae3b22f9 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -2471,6 +2471,10 @@ size_t gc_heap::mark_stack_array_length = 0; mark* gc_heap::mark_stack_array = 0; +uint32_t gc_heap::saved_pinned_queue_index = 0; + +saved_pinned_queue gc_heap::saved_pinned_queues[MAX_SAVED_PINNED_QUEUES]; + #if defined (_DEBUG) && defined (VERIFY_HEAP) BOOL gc_heap::verify_pinned_queue_p = FALSE; #endif //_DEBUG && VERIFY_HEAP @@ -2711,7 +2715,7 @@ alloc_list gc_heap::poh_alloc_list [NUM_POH_ALIST-1]; #ifdef DOUBLY_LINKED_FL // size we removed with no undo; only for recording purpose size_t gc_heap::gen2_removed_no_undo = 0; -size_t gc_heap::saved_pinned_plug_index = 0; +size_t gc_heap::saved_pinned_plug_index = INVALID_SAVED_PINNED_PLUG_INDEX; #endif //DOUBLY_LINKED_FL #ifdef FEATURE_EVENT_TRACE @@ -7808,6 +7812,7 @@ void gc_heap::make_mark_stack (mark* arr) #ifdef MH_SC_MARK mark_stack_busy() = 0; #endif //MH_SC_MARK + saved_pinned_queue_index = 0; } #ifdef BACKGROUND_GC @@ -14138,7 +14143,20 @@ void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen) uint8_t* old_loc = generation_last_free_list_allocated (gen); // check if old_loc happens to be in a saved plug_and_gap with a pinned plug after it - uint8_t* saved_plug_and_gap = pinned_plug (pinned_plug_of (saved_pinned_plug_index)) - sizeof(plug_and_gap); + uint8_t* saved_plug_and_gap = nullptr; + if (saved_pinned_plug_index != INVALID_SAVED_PINNED_PLUG_INDEX) + { + saved_plug_and_gap = pinned_plug (pinned_plug_of (saved_pinned_plug_index)) - sizeof(plug_and_gap); + + dprintf (5555, ("[h%d] sppi: %Id mtos: %Id old_loc: %Ix pp: %Ix(%Id) offs: %Id", + heap_number, + saved_pinned_plug_index, + mark_stack_tos, + old_loc, + pinned_plug (pinned_plug_of (saved_pinned_plug_index)), + pinned_len (pinned_plug_of (saved_pinned_plug_index)), + old_loc - saved_plug_and_gap)); + } size_t offset = old_loc - saved_plug_and_gap; if (offset < sizeof(gap_reloc_pair)) { @@ -14157,7 +14175,7 @@ void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen) set_free_obj_in_compact_bit (old_loc); } - dprintf (3333, ("[h%d] ac: %Ix->%Ix((%Id < %Id), Pset %Ix s->%Id", heap_number, + dprintf (5555, ("[h%d] ac: %Ix->%Ix((%Id < %Id), Pset %Ix s->%Id", heap_number, generation_allocation_context_start_region (gen), generation_allocation_pointer (gen), allocated_size, min_free_item_no_prev, filler_free_obj_size_location, filler_free_obj_size)); } @@ -27783,7 +27801,7 @@ void gc_heap::plan_phase (int condemned_gen_number) #ifdef DOUBLY_LINKED_FL gen2_removed_no_undo = 0; - saved_pinned_plug_index = 0; + saved_pinned_plug_index = INVALID_SAVED_PINNED_PLUG_INDEX; #endif //DOUBLY_LINKED_FL while (1) @@ -32077,6 +32095,43 @@ size_t gc_heap::recover_saved_pinned_info() return total_recovered_sweep_size; } +void saved_pinned_queue::save(gc_phase phase, size_t gc_index, size_t tos, size_t length, mark* array) +{ + if (mark_stack_array_length < length) + { + if (mark_stack_array != nullptr) + { + delete[] mark_stack_array; + mark_stack_array = nullptr; + } + mark_stack_array = new (nothrow) mark[length]; + if (mark_stack_array == nullptr) + { + mark_stack_array_length = 0; + mark_stack_tos = 0; + return; + } + mark_stack_array_length = length; + } + assert (mark_stack_array_length >= length); + memcpy (mark_stack_array, array, sizeof(mark_stack_array[0])*tos); + mark_stack_tos = tos; + saved_gc_index = gc_index; + saved_phase = phase; +} + +void gc_heap::save_pinned_queue(saved_pinned_queue::gc_phase phase) +{ + saved_pinned_queues[saved_pinned_queue_index].save( + phase, + settings.gc_index, + mark_stack_tos, + mark_stack_array_length, + mark_stack_array); + + saved_pinned_queue_index = (saved_pinned_queue_index + 1) % MAX_SAVED_PINNED_QUEUES; +} + void gc_heap::compact_phase (int condemned_gen_number, uint8_t* first_condemned_address, BOOL clear_cards) @@ -32112,6 +32167,10 @@ void gc_heap::compact_phase (int condemned_gen_number, #endif //FEATURE_LOH_COMPACTION reset_pinned_queue_bos(); + if (condemned_gen_number == 1) + { + save_pinned_queue(saved_pinned_queue::start_compact); + } update_oldest_pinned_plug(); BOOL reused_seg = expand_reused_seg_p(); if (reused_seg) @@ -32219,6 +32278,11 @@ void gc_heap::compact_phase (int condemned_gen_number, recover_saved_pinned_info(); + if (condemned_gen_number == 1) + { + save_pinned_queue(saved_pinned_queue::end_compact); + } + concurrent_print_time_delta ("compact end"); dprintf (2, (ThreadStressLog::gcEndCompactMsg(), heap_number)); @@ -44287,7 +44351,7 @@ void gc_heap::do_pre_gc() #ifdef TRACE_GC size_t total_allocated_since_last_gc = get_total_allocated_since_last_gc(); #ifdef BACKGROUND_GC - dprintf (1, (ThreadStressLog::gcDetailedStartMsg(), + dprintf (5555, (ThreadStressLog::gcDetailedStartMsg(), VolatileLoad(&settings.gc_index), dd_collection_count (hp->dynamic_data_of (0)), settings.condemned_generation, @@ -44295,7 +44359,7 @@ void gc_heap::do_pre_gc() (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")), settings.b_state)); #else - dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)", + dprintf (5555, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)", VolatileLoad(&settings.gc_index), dd_collection_count(hp->dynamic_data_of(0)), settings.condemned_generation, @@ -44306,7 +44370,7 @@ void gc_heap::do_pre_gc() { size_t total_heap_committed = get_total_committed_size(); size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping; - dprintf (1, ("(%d)GC commit BEG #%Id: %Id (recorded: %Id = %Id-%Id)", + dprintf (5555, ("(%d)GC commit BEG #%Id: %Id (recorded: %Id = %Id-%Id)", settings.condemned_generation, (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded, current_total_committed, current_total_committed_bookkeeping)); @@ -44706,7 +44770,7 @@ void gc_heap::do_post_gc() } #endif //BGC_SERVO_TUNING - dprintf (1, (ThreadStressLog::gcDetailedEndMsg(), + dprintf (5555, (ThreadStressLog::gcDetailedEndMsg(), VolatileLoad(&settings.gc_index), dd_collection_count(hp->dynamic_data_of(0)), (size_t)(GetHighPrecisionTimeStamp() / 1000), diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 051dd993014906..db9a182356a81d 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -53,7 +53,7 @@ inline void FATAL_GC_ERROR() // Server GC we will balance regions between heaps. // For now enable regions by default for only StandAlone GC builds #if defined (HOST_64BIT) && defined (BUILD_AS_STANDALONE) -#define USE_REGIONS +//#define USE_REGIONS #endif //HOST_64BIT && BUILD_AS_STANDALONE #ifdef USE_REGIONS @@ -137,7 +137,7 @@ inline void FATAL_GC_ERROR() #define MAX_LONGPATH 1024 #endif // MAX_LONGPATH -//#define TRACE_GC +#define TRACE_GC //#define SIMPLE_DPRINTF //#define JOIN_STATS //amount of time spent in the join @@ -261,7 +261,7 @@ void GCLog (const char *fmt, ... ); // wanting to inspect GC logs on unmodified builds, we can use this define here // to do so. //#define dprintf(l, x) -#define dprintf(l,x) STRESS_LOG_VA(l,x); +#define dprintf(l,x) {if (l == 5555) {STRESS_LOG_VA(l,x);}} #endif //SIMPLE_DPRINTF @@ -1200,6 +1200,37 @@ enum bookkeeping_element total_bookkeeping_elements }; +class saved_pinned_queue +{ +public: + enum gc_phase + { + invalid, + start_compact, + end_compact, + }; + +private: + size_t saved_gc_index; + gc_phase saved_phase; + size_t mark_stack_tos; + size_t mark_stack_array_length; + mark* mark_stack_array; + +public: + + saved_pinned_queue() : + saved_gc_index(0), + saved_phase(invalid), + mark_stack_tos(0), + mark_stack_array_length(0), + mark_stack_array(nullptr) + { + } + + void save(gc_phase phase, size_t gc_index, size_t tos, size_t length, mark* array); +}; + //class definition of the internal class class gc_heap { @@ -2310,6 +2341,8 @@ class gc_heap PER_HEAP void reset_pinned_queue_bos(); PER_HEAP + void save_pinned_queue(saved_pinned_queue::gc_phase phase); + PER_HEAP void set_allocator_next_pin (generation* gen); PER_HEAP void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len); @@ -4070,6 +4103,14 @@ class gc_heap PER_HEAP mark* mark_stack_array; +#define MAX_SAVED_PINNED_QUEUES 8 + + PER_HEAP + uint32_t saved_pinned_queue_index; + + PER_HEAP + saved_pinned_queue saved_pinned_queues[MAX_SAVED_PINNED_QUEUES]; + #if defined (_DEBUG) && defined (VERIFY_HEAP) PER_HEAP BOOL verify_pinned_queue_p; @@ -4528,6 +4569,8 @@ class gc_heap PER_HEAP size_t gen2_removed_no_undo; +#define INVALID_SAVED_PINNED_PLUG_INDEX (~0) + PER_HEAP size_t saved_pinned_plug_index; #endif //DOUBLY_LINKED_FL