summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2023-08-21 14:13:24 -0400
committerPeter Zhu <peter@peterzhu.ca>2023-08-27 09:39:29 -0400
commitb7237e3bbd36e7c520c4cbaf1f866b6dcc265a99 (patch)
tree45a80f00e101e5460f210d6c55c0a1a9b42a825c /gc.c
parent5c98ee02d2ac7f20ab978be7645801adf03e4302 (diff)
Free all empty heap pages in Process.warmup
This commit adds `free_empty_pages` which frees all empty heap pages and moves the number of pages freed to the allocatable pages counter. This is used in Process.warmup to improve performance because page invalidation from copy-on-write is slower than allocating a new page.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/8257
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/gc.c b/gc.c
index 6c4bffa95d..34334f3278 100644
--- a/gc.c
+++ b/gc.c
@@ -9647,11 +9647,55 @@ gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE
return Qnil;
}
+static void
+free_empty_pages(void)
+{
+ rb_objspace_t *objspace = &rb_objspace;
+
+ for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+ /* Move all empty pages to the tomb heap for freeing. */
+ rb_size_pool_t *size_pool = &size_pools[i];
+ rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+ rb_heap_t *tomb_heap = SIZE_POOL_TOMB_HEAP(size_pool);
+
+ size_t freed_pages = 0;
+
+ struct heap_page **next_page_ptr = &heap->free_pages;
+ struct heap_page *page = heap->free_pages;
+ while (page) {
+ /* All finalizers should have been ran in gc_start_internal, so there
+ * should be no objects that require finalization. */
+ GC_ASSERT(page->final_slots == 0);
+
+ struct heap_page *next_page = page->free_next;
+
+ if (page->free_slots == page->total_slots) {
+ heap_unlink_page(objspace, heap, page);
+ heap_add_page(objspace, size_pool, tomb_heap, page);
+ freed_pages++;
+ }
+ else {
+ *next_page_ptr = page;
+ next_page_ptr = &page->free_next;
+ }
+
+ page = next_page;
+ }
+
+ *next_page_ptr = NULL;
+
+ size_pool_allocatable_pages_set(objspace, size_pool, size_pool->allocatable_pages + freed_pages);
+ }
+
+ heap_pages_free_unused_pages(objspace);
+}
+
void
rb_gc_prepare_heap(void)
{
rb_objspace_each_objects(gc_set_candidate_object_i, NULL);
gc_start_internal(NULL, Qtrue, Qtrue, Qtrue, Qtrue, Qtrue);
+ free_empty_pages();
}
static int