summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-12-10 10:28:53 +0900
committerKoichi Sasada <ko1@atdot.net>2020-12-10 13:05:43 +0900
commiteafe000af3d363f6cb65be99edcfccedf35d801c (patch)
tree0dc504d5770938d68d7cde30bedb1a0690cf89d1 /gc.c
parent2544f7196ee7d157000244ca9cac55514e3b193d (diff)
lazy sweep tries to collect 2048 slots
Lazy sweep tries to collect free (unused) slots incrementally, and it only collect a few pages. This patch makes lazy sweep collects more objects (at least 2048 objects) and GC overhead of multi-ractor execution will be reduced.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/3875
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/gc.c b/gc.c
index f7d00cb1dc..e1031ee05f 100644
--- a/gc.c
+++ b/gc.c
@@ -5082,6 +5082,8 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
{
struct heap_page *sweep_page = heap->sweeping_page;
int unlink_limit = 3;
+ int swept_slots = 0;
+
#if GC_ENABLE_INCREMENTAL_MARK
int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
@@ -5119,7 +5121,10 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
}
else {
if (heap_add_freepage(heap, sweep_page)) {
- break;
+ swept_slots += free_slots;
+ if (swept_slots > 2048) {
+ break;
+ }
}
}
#else
@@ -5140,8 +5145,8 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
gc_prof_sweep_timer_stop(objspace);
#endif
- GC_ASSERT(gc_mode(objspace) == gc_mode_sweeping ?
- heap->free_pages != NULL : 1);
+ GC_ASSERT(gc_mode(objspace) == gc_mode_sweeping ? heap->free_pages != NULL : 1);
+
return heap->free_pages != NULL;
}