summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2023-07-31 09:24:48 -0400
committerPeter Zhu <peter@peterzhu.ca>2023-07-31 11:46:53 -0400
commitb98838b65cbf8fa2d228f9e0249414cba4e3197d (patch)
tree342e39f04c293d898f0d0e7d1d2997df30cb8fe7 /gc.c
parent1bda22f5fedea5364c6df321a3004c47c732144e (diff)
Store initial slots per size pool
This commit stores the initial slots per size pool, configured with the environment variables `RUBY_GC_HEAP_INIT_SIZE_%d_SLOTS`. This ensures that the configured initial slots remains a low bound for the number of slots in the heap, which can prevent heaps from thrashing in size.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/8116
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/gc.c b/gc.c
index b686bbbaa4..fc36acf32a 100644
--- a/gc.c
+++ b/gc.c
@@ -341,7 +341,7 @@ rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
#define TICK_TYPE 1
typedef struct {
- size_t heap_init_slots;
+ size_t size_pool_init_slots[SIZE_POOL_COUNT];
size_t heap_free_slots;
double growth_factor;
size_t growth_max_slots;
@@ -364,7 +364,7 @@ typedef struct {
} ruby_gc_params_t;
static ruby_gc_params_t gc_params = {
- GC_HEAP_INIT_SLOTS,
+ { 0 },
GC_HEAP_FREE_SLOTS,
GC_HEAP_GROWTH_FACTOR,
GC_HEAP_GROWTH_MAX_SLOTS,
@@ -2264,6 +2264,14 @@ heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *he
}
static size_t
+minimum_pages_for_size_pool(rb_objspace_t *objspace, int size_pool_idx)
+{
+ rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
+ int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
+ return gc_params.size_pool_init_slots[size_pool_idx] * multiple / HEAP_PAGE_OBJ_LIMIT;
+}
+
+static size_t
heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t free_slots, size_t total_slots, size_t used)
{
double goal_ratio = gc_params.heap_free_slots_goal_ratio;
@@ -2273,8 +2281,7 @@ heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t fre
next_used = (size_t)(used * gc_params.growth_factor);
}
else if (total_slots == 0) {
- int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
- next_used = (gc_params.heap_init_slots * multiple) / HEAP_PAGE_OBJ_LIMIT;
+ next_used = minimum_pages_for_size_pool(objspace, (int)(size_pool - size_pools));
}
else {
/* Find `f' where free_slots = f * total_slots * goal_ratio
@@ -3716,9 +3723,10 @@ Init_heap(void)
/* Set size pools allocatable pages. */
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
- rb_size_pool_t *size_pool = &size_pools[i];
- int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
- size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
+ /* Set the default value of size_pool_init_slots. */
+ gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
+
+ size_pools[i].allocatable_pages = minimum_pages_for_size_pool(objspace, i);
}
heap_pages_expand_sorted(objspace);
@@ -5709,14 +5717,15 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
- size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
+ size_t init_slots = gc_params.size_pool_init_slots[size_pool - size_pools];
+ size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
/* If we don't have enough slots and we have pages on the tomb heap, move
* pages from the tomb heap to the eden heap. This may prevent page
* creation thrashing (frequently allocating and deallocting pages) and
* GC thrashing (running GC more frequently than required). */
struct heap_page *resurrected_page;
- while ((swept_slots < min_free_slots || swept_slots < gc_params.heap_init_slots) &&
+ while (swept_slots < min_free_slots &&
(resurrected_page = heap_page_resurrect(objspace, size_pool))) {
swept_slots += resurrected_page->free_slots;
@@ -5724,17 +5733,6 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
heap_add_freepage(heap, resurrected_page);
}
- /* Some size pools may have very few pages (or even no pages). These size pools
- * should still have allocatable pages. */
- if (min_free_slots < gc_params.heap_init_slots && swept_slots < gc_params.heap_init_slots) {
- int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
- size_t extra_slots = gc_params.heap_init_slots - swept_slots;
- size_t extend_page_count = CEILDIV(extra_slots * multiple, HEAP_PAGE_OBJ_LIMIT);
- if (extend_page_count > size_pool->allocatable_pages) {
- size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
- }
- }
-
if (swept_slots < min_free_slots) {
bool grow_heap = is_full_marking(objspace);
@@ -5745,7 +5743,11 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
size_pool->freed_slots > size_pool->empty_slots) &&
size_pool->allocatable_pages == 0;
- if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
+ /* Grow this heap if we haven't run at least RVALUE_OLD_AGE minor
+ * GC since the last major GC or if this heap is smaller than the
+ * the configured initial size. */
+ if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE ||
+ total_slots < init_slots) {
grow_heap = TRUE;
}
else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
@@ -8202,9 +8204,14 @@ gc_marks_finish(rb_objspace_t *objspace)
GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
- /* setup free-able page counts */
- if (max_free_slots < gc_params.heap_init_slots * r_mul) {
- max_free_slots = gc_params.heap_init_slots * r_mul;
+ /* Setup freeable slots. */
+ size_t total_init_slots = 0;
+ for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+ total_init_slots += gc_params.size_pool_init_slots[i] * r_mul;
+ }
+
+ if (max_free_slots < total_init_slots) {
+ max_free_slots = total_init_slots;
}
if (sweep_slots > max_free_slots) {
@@ -10781,7 +10788,6 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do
/* Clear the heap. */
gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qfalse);
- size_t growth_slots = gc_params.heap_init_slots;
if (RTEST(double_heap)) {
rb_warn("double_heap is deprecated, please use expand_heap instead");
@@ -10799,8 +10805,7 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do
size_t minimum_pages = 0;
if (RTEST(expand_heap)) {
- int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
- minimum_pages = growth_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
+ minimum_pages = minimum_pages_for_size_pool(objspace, i);
}
heap_add_pages(objspace, size_pool, heap, MAX(minimum_pages, heap->total_pages));
@@ -11580,7 +11585,7 @@ get_envparam_double(const char *name, double *default_value, double lower_bound,
}
static void
-gc_set_initial_pages(rb_objspace_t *objspace, int global_heap_init_slots)
+gc_set_initial_pages(rb_objspace_t *objspace)
{
gc_rest(objspace);
@@ -11589,24 +11594,18 @@ gc_set_initial_pages(rb_objspace_t *objspace, int global_heap_init_slots)
char env_key[sizeof("RUBY_GC_HEAP_INIT_SIZE_" "_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(size_pool->slot_size) * CHAR_BIT)];
snprintf(env_key, sizeof(env_key), "RUBY_GC_HEAP_INIT_SIZE_%d_SLOTS", size_pool->slot_size);
- size_t pool_init_slots = 0;
- if (!get_envparam_size(env_key, &pool_init_slots, 0)) {
- if (global_heap_init_slots) {
- // If we use the global init slot we ponderate it by slot size
- pool_init_slots = gc_params.heap_init_slots / (size_pool->slot_size / BASE_SLOT_SIZE);
- }
- else {
- continue;
- }
+ size_t size_pool_init_slots = gc_params.size_pool_init_slots[i];
+ if (get_envparam_size(env_key, &size_pool_init_slots, 0)) {
+ gc_params.size_pool_init_slots[i] = size_pool_init_slots;
}
- if (pool_init_slots > size_pool->eden_heap.total_slots) {
- size_t slots = pool_init_slots - size_pool->eden_heap.total_slots;
+ if (size_pool_init_slots > size_pool->eden_heap.total_slots) {
+ size_t slots = size_pool_init_slots - size_pool->eden_heap.total_slots;
int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
}
else {
- /* We already have more slots than heap_init_slots allows, so
+ /* We already have more slots than size_pool_init_slots allows, so
* prevent creating more pages. */
size_pool->allocatable_pages = 0;
}
@@ -11666,12 +11665,13 @@ ruby_gc_set_params(void)
}
/* RUBY_GC_HEAP_INIT_SLOTS */
- if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
- gc_set_initial_pages(objspace, TRUE);
- }
- else {
- gc_set_initial_pages(objspace, FALSE);
+ size_t global_init_slots;
+ if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &global_init_slots, 0)) {
+ for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+ gc_params.size_pool_init_slots[i] = global_init_slots;
+ }
}
+ gc_set_initial_pages(objspace);
get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);