diff options
author | Aaron Patterson <tenderlove@ruby-lang.org> | 2019-10-30 16:28:21 -0700 |
---|---|---|
committer | Aaron Patterson <tenderlove@ruby-lang.org> | 2019-10-30 17:28:55 -0700 |
commit | e1bf29314feee6aaf535917da0178e868e7ff3fa (patch) | |
tree | c0a1ac3fab64003143a18d100e5ab19373d00ff7 /gc.c | |
parent | e08f2e47e302db71621cfe2e770c087a5cf5146d (diff) |
Fix zero free objects assertion
This commit is to attempt fixing this error:
http://ci.rvm.jp/results/trunk-gc-asserts@ruby-sky1/2353281
Each non-full heap_page struct contains a reference to the next page
that contains free slots. Compaction could fill any page, including
pages that happen to be linked to as "pages which contain free slots".
To fix this, we'll iterate each page, and rebuild the "free page list"
depending on the number of actual free slots on that page. If there are
no free slots on the page, we'll set the free_next pointer to NULL.
Finally we'll pop one page off the "free page list" and set it as the
"using page" for the next allocation.
Diffstat (limited to 'gc.c')
-rw-r--r-- | gc.c | 44 |
1 files changed, 29 insertions, 15 deletions
@@ -8553,14 +8553,14 @@ gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_doubl heap_add_pages(objspace, heap_eden, heap_allocated_pages); } - VALUE moved_list; + VALUE moved_list_head; VALUE disabled = rb_gc_disable(); if (use_toward_empty) { - moved_list = gc_compact_heap(objspace, compare_free_slots); + moved_list_head = gc_compact_heap(objspace, compare_free_slots); } else { - moved_list = gc_compact_heap(objspace, compare_pinned); + moved_list_head = gc_compact_heap(objspace, compare_pinned); } heap_eden->freelist = NULL; @@ -8573,32 +8573,46 @@ gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_doubl rb_clear_method_cache_by_class(rb_cObject); rb_clear_constant_cache(); - heap_eden->free_pages = NULL; - heap_eden->using_page = NULL; - while (moved_list) { + /* Clear RMOVED manual rather than rely on GC */ + while (moved_list_head) { VALUE next_moved; struct heap_page *page; - page = GET_HEAP_PAGE(moved_list); - next_moved = RMOVED(moved_list)->next; + page = GET_HEAP_PAGE(moved_list_head); + next_moved = RMOVED(moved_list_head)->next; - RMOVED(moved_list)->flags = 0; - RMOVED(moved_list)->destination = 0; - RMOVED(moved_list)->next = 0; + RMOVED(moved_list_head)->flags = 0; + RMOVED(moved_list_head)->destination = 0; + RMOVED(moved_list_head)->next = 0; page->free_slots++; - heap_page_add_freeobj(objspace, page, moved_list); + heap_page_add_freeobj(objspace, page, moved_list_head); if (page->free_slots == page->total_slots && heap_pages_freeable_pages > 0) { heap_pages_freeable_pages--; heap_unlink_page(objspace, heap_eden, page); heap_add_page(objspace, heap_tomb, page); - } else if (page->free_slots == page->total_slots) { - page->free_next = NULL; } objspace->profile.total_freed_objects++; - moved_list = next_moved; + moved_list_head = next_moved; } + heap_eden->free_pages = NULL; + + /* Rebuild free_pages linked list */ + size_t i; + for (i = 0; i < heap_allocated_pages; ++i) { + struct heap_page *page = heap_pages_sorted[i]; + if (page->free_slots > 0) { + page->free_next = heap_eden->free_pages; + heap_eden->free_pages = page; + } else { + page->free_next = NULL; + } + } + + heap_eden->using_page = heap_eden->free_pages; + heap_eden->free_pages = heap_eden->free_pages->free_next; + if (use_verifier) { gc_verify_internal_consistency(objspace); } |