summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2023-08-18 09:56:46 -0400
committerPeter Zhu <peter@peterzhu.ca>2023-09-05 10:52:35 -0400
commit7a930cf0e4ff4bf7d70ef927d6f20d3189edcd21 (patch)
treea32f720f6876e1d6e1e55fcba2a3dd1701aa276d
parent790df7d3837af6554b26664d6de9a4d167cea1c7 (diff)
Pool more slots for large size pools
We always sweep at least 2048 slots per sweep step, but only pool one page. For large size pools, 2048 slots is many pages but one page is very few slots. This commit changes it so that at least 1024 slots are placed in the pooled pages per sweep step.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/8249
-rw-r--r--gc.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/gc.c b/gc.c
index b529a472ad..fb2d209466 100644
--- a/gc.c
+++ b/gc.c
@@ -1233,6 +1233,7 @@ total_freed_objects(rb_objspace_t *objspace)
#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
+#define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUT 1024
#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
#if SIZEOF_LONG == SIZEOF_VOIDP
@@ -5781,9 +5782,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
struct heap_page *sweep_page = heap->sweeping_page;
int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
int swept_slots = 0;
- bool need_pool = TRUE;
-
- gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
+ int pooled_slots = 0;
if (sweep_page == NULL) return FALSE;
@@ -5818,9 +5817,9 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
size_pool->freed_slots += ctx.freed_slots;
size_pool->empty_slots += ctx.empty_slots;
- if (need_pool) {
+ if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUT) {
heap_add_poolpage(objspace, heap, sweep_page);
- need_pool = FALSE;
+ pooled_slots += free_slots;
}
else {
heap_add_freepage(heap, sweep_page);