summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c116
1 files changed, 54 insertions, 62 deletions
diff --git a/gc.c b/gc.c
index c7e3034240..9af83de055 100644
--- a/gc.c
+++ b/gc.c
@@ -1817,45 +1817,34 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
}
-static inline bool
+static inline void
heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
{
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
GC_ASSERT(page->free_slots != 0);
+ GC_ASSERT(page->freelist != NULL);
- if (page->freelist) {
- page->free_next = heap->free_pages;
- heap->free_pages = page;
+ page->free_next = heap->free_pages;
+ heap->free_pages = page;
- RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
+ RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
- return true;
- }
- else {
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
- return false;
- }
+ asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
}
#if GC_ENABLE_INCREMENTAL_MARK
-static inline int
+static inline void
heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
{
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
- if (page->freelist) {
- page->free_next = heap->pooled_pages;
- heap->pooled_pages = page;
- objspace->rincgc.pooled_slots += page->free_slots;
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+ GC_ASSERT(page->free_slots != 0);
+ GC_ASSERT(page->freelist != NULL);
- return TRUE;
- }
- else {
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+ page->free_next = heap->pooled_pages;
+ heap->pooled_pages = page;
+ objspace->rincgc.pooled_slots += page->free_slots;
- return FALSE;
- }
+ asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
}
#endif
@@ -2317,7 +2306,7 @@ rvargc_find_contiguous_slots(int slots, RVALUE *freelist)
}
#endif
-static inline bool heap_add_freepage(rb_heap_t *heap, struct heap_page *page);
+static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page);
static struct heap_page * heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap);
static inline void ractor_set_cache(rb_ractor_t *cr, struct heap_page *page);
@@ -5563,32 +5552,7 @@ gc_sweep_start(rb_objspace_t *objspace)
rb_ractor_t *r = NULL;
list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
- struct heap_page *page = r->newobj_cache.using_page;
- RVALUE *freelist = r->newobj_cache.freelist;
- RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
-
- if (page && freelist) {
- asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
- if (page->freelist) {
- RVALUE *p = page->freelist;
- asan_unpoison_object((VALUE)p, false);
- while (p->as.free.next) {
- RVALUE *prev = p;
- p = p->as.free.next;
- asan_poison_object((VALUE)prev);
- asan_unpoison_object((VALUE)p, false);
- }
- p->as.free.next = freelist;
- asan_poison_object((VALUE)p);
- }
- else {
- page->freelist = freelist;
- }
- asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
- }
-
- r->newobj_cache.using_page = NULL;
- r->newobj_cache.freelist = NULL;
+ rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
}
}
@@ -5651,22 +5615,19 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
else if (free_slots > 0) {
#if GC_ENABLE_INCREMENTAL_MARK
if (need_pool) {
- if (heap_add_poolpage(objspace, heap, sweep_page)) {
- need_pool = FALSE;
- }
+ heap_add_poolpage(objspace, heap, sweep_page);
+ need_pool = FALSE;
}
else {
- if (heap_add_freepage(heap, sweep_page)) {
- swept_slots += free_slots;
- if (swept_slots > 2048) {
- break;
- }
+ heap_add_freepage(heap, sweep_page);
+ swept_slots += free_slots;
+ if (swept_slots > 2048) {
+ break;
}
}
#else
- if (heap_add_freepage(heap, sweep_page)) {
- break;
- }
+ heap_add_freepage(heap, sweep_page);
+ break;
#endif
}
else {
@@ -8643,6 +8604,37 @@ rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
/* GC */
void
+rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
+{
+ struct heap_page *page = newobj_cache->using_page;
+ RVALUE *freelist = newobj_cache->freelist;
+ RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
+
+ if (page && freelist) {
+ asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
+ if (page->freelist) {
+ RVALUE *p = page->freelist;
+ asan_unpoison_object((VALUE)p, false);
+ while (p->as.free.next) {
+ RVALUE *prev = p;
+ p = p->as.free.next;
+ asan_poison_object((VALUE)prev);
+ asan_unpoison_object((VALUE)p, false);
+ }
+ p->as.free.next = freelist;
+ asan_poison_object((VALUE)p);
+ }
+ else {
+ page->freelist = freelist;
+ }
+ asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+ }
+
+ newobj_cache->using_page = NULL;
+ newobj_cache->freelist = NULL;
+}
+
+void
rb_gc_force_recycle(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;