From 4a627dbdfd1165022fa9e716ba845e937b03773d Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Tue, 29 Jun 2021 14:32:50 -0400 Subject: [Bug #18014] Fix memory leak in GC when using Ractors When a Ractor is removed, the freelist in the Ractor cache is not returned to the GC, leaving the freelist permanently lost. This commit recycles the freelist when the Ractor is destroyed, preventing a memory leak from occurring. --- gc.c | 116 +++++++++++++++++++++++++++++++------------------------------------ 1 file changed, 54 insertions(+), 62 deletions(-) (limited to 'gc.c') diff --git a/gc.c b/gc.c index c7e3034240..9af83de055 100644 --- a/gc.c +++ b/gc.c @@ -1817,45 +1817,34 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj); } -static inline bool +static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page) { asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false); GC_ASSERT(page->free_slots != 0); + GC_ASSERT(page->freelist != NULL); - if (page->freelist) { - page->free_next = heap->free_pages; - heap->free_pages = page; + page->free_next = heap->free_pages; + heap->free_pages = page; - RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist); + RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist); - asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); - return true; - } - else { - asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); - return false; - } + asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); } #if GC_ENABLE_INCREMENTAL_MARK -static inline int +static inline void heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page) { asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false); - if (page->freelist) { - page->free_next = heap->pooled_pages; - heap->pooled_pages = page; - objspace->rincgc.pooled_slots += page->free_slots; - asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); + GC_ASSERT(page->free_slots != 0); + GC_ASSERT(page->freelist != NULL); - return TRUE; - } - else { - asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); + page->free_next = heap->pooled_pages; + heap->pooled_pages = page; + objspace->rincgc.pooled_slots += page->free_slots; - return FALSE; - } + asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); } #endif @@ -2317,7 +2306,7 @@ rvargc_find_contiguous_slots(int slots, RVALUE *freelist) } #endif -static inline bool heap_add_freepage(rb_heap_t *heap, struct heap_page *page); +static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page); static struct heap_page * heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap); static inline void ractor_set_cache(rb_ractor_t *cr, struct heap_page *page); @@ -5563,32 +5552,7 @@ gc_sweep_start(rb_objspace_t *objspace) rb_ractor_t *r = NULL; list_for_each(&GET_VM()->ractor.set, r, vmlr_node) { - struct heap_page *page = r->newobj_cache.using_page; - RVALUE *freelist = r->newobj_cache.freelist; - RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist); - - if (page && freelist) { - asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false); - if (page->freelist) { - RVALUE *p = page->freelist; - asan_unpoison_object((VALUE)p, false); - while (p->as.free.next) { - RVALUE *prev = p; - p = p->as.free.next; - asan_poison_object((VALUE)prev); - asan_unpoison_object((VALUE)p, false); - } - p->as.free.next = freelist; - asan_poison_object((VALUE)p); - } - else { - page->freelist = freelist; - } - asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); - } - - r->newobj_cache.using_page = NULL; - r->newobj_cache.freelist = NULL; + rb_gc_ractor_newobj_cache_clear(&r->newobj_cache); } } @@ -5651,22 +5615,19 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap) else if (free_slots > 0) { #if GC_ENABLE_INCREMENTAL_MARK if (need_pool) { - if (heap_add_poolpage(objspace, heap, sweep_page)) { - need_pool = FALSE; - } + heap_add_poolpage(objspace, heap, sweep_page); + need_pool = FALSE; } else { - if (heap_add_freepage(heap, sweep_page)) { - swept_slots += free_slots; - if (swept_slots > 2048) { - break; - } + heap_add_freepage(heap, sweep_page); + swept_slots += free_slots; + if (swept_slots > 2048) { + break; } } #else - if (heap_add_freepage(heap, sweep_page)) { - break; - } + heap_add_freepage(heap, sweep_page); + break; #endif } else { @@ -8642,6 +8603,37 @@ rb_obj_gc_flags(VALUE obj, ID* flags, size_t max) /* GC */ +void +rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache) +{ + struct heap_page *page = newobj_cache->using_page; + RVALUE *freelist = newobj_cache->freelist; + RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist); + + if (page && freelist) { + asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false); + if (page->freelist) { + RVALUE *p = page->freelist; + asan_unpoison_object((VALUE)p, false); + while (p->as.free.next) { + RVALUE *prev = p; + p = p->as.free.next; + asan_poison_object((VALUE)prev); + asan_unpoison_object((VALUE)p, false); + } + p->as.free.next = freelist; + asan_poison_object((VALUE)p); + } + else { + page->freelist = freelist; + } + asan_poison_memory_region(&page->freelist, sizeof(RVALUE*)); + } + + newobj_cache->using_page = NULL; + newobj_cache->freelist = NULL; +} + void rb_gc_force_recycle(VALUE obj) { -- cgit v1.2.3