summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorMatt Valentine-House <matt@eightbitraptor.com>2023-08-11 15:16:30 +0100
committerMatt Valentine-House <matt@eightbitraptor.com>2023-09-18 14:34:38 +0100
commit8792e421ce5e501f87bdb449fe885e3a436a88f0 (patch)
tree46ee19f7f3bbe48019e981dd8cb584d8d4be3ae0 /gc.c
parent404a1c032a334768607d1d72b111201d0d243cfe (diff)
Allow pages to be sorted by pinned slot count
By compacting into slots with pinned objects first, we improve the efficiency of compaction. As it is less likely that there will exist pages containing only pinned objects after compaction. This will increase the number of free pages left after compaction and enable us to free them. This used to be the default compaction method before it was removed (inadvertently?) during the introduction of auto_compaction. This commit will sort the pages by the pinned slot count at the start of a major GC that has been triggered by explicitly calling GC.compact (and thus setting objspace->flags.during_compaction). It works using the same method by which we sort the heap by empty slot count during GC.verify_compaction_references.
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/gc.c b/gc.c
index 2fee157877..9142a59bfc 100644
--- a/gc.c
+++ b/gc.c
@@ -966,6 +966,7 @@ struct heap_page {
short total_slots;
short free_slots;
short final_slots;
+ short pinned_slots;
struct {
unsigned int before_sweep : 1;
unsigned int has_remembered_objects : 1;
@@ -5672,12 +5673,21 @@ __attribute__((noinline))
#endif
static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func);
+static int compare_pinned_slots(const void *left, const void *right, void *d);
+
static void
gc_sweep_start(rb_objspace_t *objspace)
{
gc_mode_transition(objspace, gc_mode_sweeping);
objspace->rincgc.pooled_slots = 0;
+ if (objspace->flags.during_compacting) {
+ gc_sort_heap_by_compare_func(
+ objspace,
+ objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
+ );
+ }
+
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_size_pool_t *size_pool = &size_pools[i];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
@@ -6863,7 +6873,10 @@ gc_pin(rb_objspace_t *objspace, VALUE obj)
GC_ASSERT(is_markable_object(obj));
if (UNLIKELY(objspace->flags.during_compacting)) {
if (LIKELY(during_gc)) {
- MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
+ if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj)) {
+ GET_HEAP_PAGE(obj)->pinned_slots++;
+ MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
+ }
}
}
}
@@ -9376,11 +9389,6 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
GC_ASSERT(during_gc);
- if (objspace->flags.during_compacting &&
- objspace->rcompactor.compare_func) {
- gc_sort_heap_by_compare_func(objspace, objspace->rcompactor.compare_func);
- }
-
gc_prof_timer_start(objspace);
{
if (gc_marks(objspace, do_full_mark)) {
@@ -9933,6 +9941,18 @@ gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, s
#if GC_CAN_COMPILE_COMPACTION
static int
+compare_pinned_slots(const void *left, const void *right, void *dummy)
+{
+ struct heap_page *left_page;
+ struct heap_page *right_page;
+
+ left_page = *(struct heap_page * const *)left;
+ right_page = *(struct heap_page * const *)right;
+
+ return left_page->pinned_slots - right_page->pinned_slots;
+}
+
+static int
compare_free_slots(const void *left, const void *right, void *dummy)
{
struct heap_page *left_page;