summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2023-08-30 15:28:22 -0400
committerPeter Zhu <peter@peterzhu.ca>2023-08-31 09:28:31 -0400
commit4f0d58260a7f506ead198064d12a967edd11fe5e (patch)
tree060ea8fef61b6b4240ee74dbe3cff410fe44e6b8 /gc.c
parenteb3d94f4baff70d2e120c9472a3851a4aa9c90d9 (diff)
Correctly calculate initial pages
The old algorithm could calculate an undercount for the initial pages due to two issues: 1. It did not take into account that some heap pages will have one less slot due to alignment. It assumed that every heap page would be able to be fully filled with slots. Pages that are unaligned with the slot size will lose one slot. The new algorithm assumes that every page will be unaligned. 2. It performed integer division, which truncates down. This means that the number of pages might not actually satisfy the number of slots. This can cause the heap to grow in `gc_sweep_finish_size_pool` after allocating all of the allocatable pages because the total number of slots would be less than the initial configured number of slots.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/8333
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/gc.c b/gc.c
index e2c2779b47..59c507369e 100644
--- a/gc.c
+++ b/gc.c
@@ -2305,11 +2305,22 @@ heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *he
#endif
static size_t
-minimum_pages_for_size_pool(rb_objspace_t *objspace, int size_pool_idx)
+slots_to_pages_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t slots)
{
- rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
- int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
- return gc_params.size_pool_init_slots[size_pool_idx] * multiple / HEAP_PAGE_OBJ_LIMIT;
+ size_t multiple = size_pool->slot_size / BASE_SLOT_SIZE;
+ /* Due to alignment, heap pages may have one less slot. We should
+ * ensure there is enough pages to guarantee that we will have at
+ * least the required number of slots after allocating all the pages. */
+ size_t slots_per_page = (HEAP_PAGE_OBJ_LIMIT / multiple) - 1;
+ return CEILDIV(slots, slots_per_page);
+}
+
+static size_t
+minimum_pages_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
+{
+ size_t size_pool_idx = size_pool - size_pools;
+ size_t init_slots = gc_params.size_pool_init_slots[size_pool_idx];
+ return slots_to_pages_for_size_pool(objspace, size_pool, init_slots);
}
static size_t
@@ -2322,7 +2333,7 @@ heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t fre
next_used = (size_t)(used * gc_params.growth_factor);
}
else if (total_slots == 0) {
- next_used = minimum_pages_for_size_pool(objspace, (int)(size_pool - size_pools));
+ next_used = minimum_pages_for_size_pool(objspace, size_pool);
}
else {
/* Find `f' where free_slots = f * total_slots * goal_ratio
@@ -3728,10 +3739,12 @@ Init_heap(void)
/* Set size pools allocatable pages. */
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+ rb_size_pool_t *size_pool = &size_pools[i];
+
/* Set the default value of size_pool_init_slots. */
gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
- size_pools[i].allocatable_pages = minimum_pages_for_size_pool(objspace, i);
+ size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
}
heap_pages_expand_sorted(objspace);
@@ -10829,7 +10842,7 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do
size_t minimum_pages = 0;
if (RTEST(expand_heap)) {
- minimum_pages = minimum_pages_for_size_pool(objspace, i);
+ minimum_pages = minimum_pages_for_size_pool(objspace, size_pool);
}
heap_add_pages(objspace, size_pool, heap, MAX(minimum_pages, heap->total_pages));
@@ -11643,8 +11656,7 @@ gc_set_initial_pages(rb_objspace_t *objspace)
if (size_pool_init_slots > size_pool->eden_heap.total_slots) {
size_t slots = size_pool_init_slots - size_pool->eden_heap.total_slots;
- int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
- size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
+ size_pool->allocatable_pages = slots_to_pages_for_size_pool(objspace, size_pool, slots);
}
else {
/* We already have more slots than size_pool_init_slots allows, so