summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authornagachika <nagachika@ruby-lang.org>2021-07-18 15:23:09 +0900
committernagachika <nagachika@ruby-lang.org>2021-07-22 11:47:00 +0900
commita215c6d0448764131cbbb48b476dc698b51c2273 (patch)
tree93492686fdf93e14d7694f543bb872cb37543dbf /gc.c
parentc4b602e80fdc1824c68602302a4ed82a0e90d5b7 (diff)
partially merge revision(s) 119697f61e2b2b157816a8aa33aada5863959900,4a627dbdfd1165022fa9e716ba845e937b03773d: [Backport #18014]
[Bug #18014] Fix rb_gc_force_recycle unmark before sweep If we force recycle an object before the page is swept, we should clear it in the mark bitmap. If we don't clear it in the bitmap, then during sweeping we won't account for this free slot so the `free_slots` count of the page will be incorrect. --- gc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) [Bug #18014] Fix memory leak in GC when using Ractors When a Ractor is removed, the freelist in the Ractor cache is not returned to the GC, leaving the freelist permanently lost. This commit recycles the freelist when the Ractor is destroyed, preventing a memory leak from occurring. --- gc.c | 116 +++++++++++++++++++++++++++------------------------------- internal/gc.h | 6 +++ ractor.c | 3 ++ ractor_core.h | 5 +-- 4 files changed, 64 insertions(+), 66 deletions(-) Co-authored-by: Peter Zhu <peter@peterzhu.ca>
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c102
1 files changed, 53 insertions, 49 deletions
diff --git a/gc.c b/gc.c
index b9ca9c11fa..15faf4df3c 100644
--- a/gc.c
+++ b/gc.c
@@ -1713,45 +1713,34 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
}
-static inline bool
+static inline void
heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
{
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
GC_ASSERT(page->free_slots != 0);
+ GC_ASSERT(page->freelist != NULL);
- if (page->freelist) {
- page->free_next = heap->free_pages;
- heap->free_pages = page;
+ page->free_next = heap->free_pages;
+ heap->free_pages = page;
- RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
+ RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
- return true;
- }
- else {
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
- return false;
- }
+ asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
}
#if GC_ENABLE_INCREMENTAL_MARK
-static inline int
+static inline void
heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
{
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
- if (page->freelist) {
- page->free_next = heap->pooled_pages;
- heap->pooled_pages = page;
- objspace->rincgc.pooled_slots += page->free_slots;
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+ GC_ASSERT(page->free_slots != 0);
+ GC_ASSERT(page->freelist != NULL);
- return TRUE;
- }
- else {
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+ page->free_next = heap->pooled_pages;
+ heap->pooled_pages = page;
+ objspace->rincgc.pooled_slots += page->free_slots;
- return FALSE;
- }
+ asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
}
#endif
@@ -5060,20 +5049,7 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
rb_ractor_t *r = NULL;
list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
- struct heap_page *page = r->newobj_cache.using_page;
- RVALUE *freelist = r->newobj_cache.freelist;
- RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
-
- if (page && freelist) {
- RVALUE **p = &page->freelist;
- while (*p) {
- p = &(*p)->as.free.next;
- }
- *p = freelist;
- }
-
- r->newobj_cache.using_page = NULL;
- r->newobj_cache.freelist = NULL;
+ rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
}
}
@@ -5146,21 +5122,18 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
else if (free_slots > 0) {
#if GC_ENABLE_INCREMENTAL_MARK
if (need_pool) {
- if (heap_add_poolpage(objspace, heap, sweep_page)) {
- need_pool = FALSE;
- }
+ heap_add_poolpage(objspace, heap, sweep_page);
+ need_pool = FALSE;
}
else {
- if (heap_add_freepage(heap, sweep_page)) {
- swept_slots += free_slots;
- if (swept_slots > 2048) {
- break;
- }
+ heap_add_freepage(heap, sweep_page);
+ swept_slots += free_slots;
+ if (swept_slots > 2048) {
+ break;
}
}
#else
- heap_add_freepage(heap, sweep_page);
- break;
+ heap_add_freepage(heap, sweep_page);
#endif
}
else {
@@ -7979,6 +7952,37 @@ rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
/* GC */
void
+rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
+{
+ struct heap_page *page = newobj_cache->using_page;
+ RVALUE *freelist = newobj_cache->freelist;
+ RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
+
+ if (page && freelist) {
+ asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
+ if (page->freelist) {
+ RVALUE *p = page->freelist;
+ asan_unpoison_object((VALUE)p, false);
+ while (p->as.free.next) {
+ RVALUE *prev = p;
+ p = p->as.free.next;
+ asan_poison_object((VALUE)prev);
+ asan_unpoison_object((VALUE)p, false);
+ }
+ p->as.free.next = freelist;
+ asan_poison_object((VALUE)p);
+ }
+ else {
+ page->freelist = freelist;
+ }
+ asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+ }
+
+ newobj_cache->using_page = NULL;
+ newobj_cache->freelist = NULL;
+}
+
+void
rb_gc_force_recycle(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;
@@ -8007,7 +8011,7 @@ rb_gc_force_recycle(VALUE obj)
}
else {
#endif
- if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
+ if (is_old || GET_HEAP_PAGE(obj)->flags.before_sweep) {
CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
}
CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);