summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-12-10 10:58:25 +0900
committerKoichi Sasada <ko1@atdot.net>2020-12-10 13:05:43 +0900
commitda3bca513f437b05b3953c3712ff48621fc5e008 (patch)
tree0bf9db6a75215f431132668d365040f4e6deb54a /gc.c
parent554c0949777cd495e5a1296bd6719fcf508a70d0 (diff)
cache free pages per ractor
Per ractor method cache (GH-#3842) only cached 1 page and this patch caches several pages to keep at least 512 free slots if available. If you increase the number of cached free slots, all cached slots will be collected when the GC is invoked.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/3875
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c69
1 files changed, 57 insertions, 12 deletions
diff --git a/gc.c b/gc.c
index 5adb336be8..d2e4e7cbbc 100644
--- a/gc.c
+++ b/gc.c
@@ -2157,34 +2157,70 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
return obj;
}
+static inline void
+ractor_cache_fill_freelist(rb_objspace_t *objspace, rb_ractor_t *cr, struct heap_page *page)
+{
+ cr->newobj_cache.using_page = page;
+ cr->newobj_cache.freelist = page->freelist;
+ page->free_slots = 0;
+ page->freelist = NULL;
+}
+
static inline VALUE
ractor_cached_freeobj(rb_objspace_t *objspace, rb_ractor_t *cr)
{
+ retry:;
RVALUE *p = cr->newobj_cache.freelist;
- if (p) {
+ if (LIKELY(p != NULL)) {
VALUE obj = (VALUE)p;
cr->newobj_cache.freelist = p->as.free.next;
asan_unpoison_object(obj, true);
return obj;
}
else {
- return Qfalse;
+ if (cr->newobj_cache.free_pages) {
+ struct heap_page *page = cr->newobj_cache.free_pages;
+ cr->newobj_cache.free_pages = page->free_next;
+ ractor_cache_fill_freelist(objspace, cr, page);
+ goto retry;
+ }
+ return false;
}
}
+#define RACTOR_SLOT_CACHE_NUM 512
+
static struct heap_page *
-heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
+heap_next_freepages(rb_objspace_t *objspace, rb_heap_t *heap)
{
ASSERT_vm_locking();
- struct heap_page *page;
-
+ // find at least 1 page
while (heap->free_pages == NULL) {
heap_prepare(objspace, heap);
}
- page = heap->free_pages;
- heap->free_pages = page->free_next;
+
+ // cache another pages if available
+ struct heap_page *page = heap->free_pages;
+ size_t free_slots = page->free_slots;
+ struct heap_page *p = page;
+
+ int page_cnt = 1;
+
+ while (p->free_next) {
+ if (free_slots >= RACTOR_SLOT_CACHE_NUM) {
+ break;
+ }
+ free_slots += p->free_slots;
+ p = p->free_next;
+ page_cnt++;
+ }
+
+ heap->free_pages = p->free_next;
+ p->free_next = NULL;
+
+ RUBY_DEBUG_LOG("free_slots:%d pages:%d", page->free_next ? (int)free_slots : (int)page->free_slots, page_cnt);
GC_ASSERT(page->free_slots != 0);
RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", page, page->freelist, page->free_slots);
@@ -2199,13 +2235,13 @@ ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr)
{
ASSERT_vm_locking();
GC_ASSERT(cr->newobj_cache.freelist == NULL);
+ GC_ASSERT(cr->newobj_cache.free_pages == NULL);
- struct heap_page *page = heap_next_freepage(objspace, heap_eden);
+ struct heap_page *page = heap_next_freepages(objspace, heap_eden);
+ struct heap_page *pages = page->free_next;
- cr->newobj_cache.using_page = page;
- cr->newobj_cache.freelist = page->freelist;
- page->free_slots = 0;
- page->freelist = NULL;
+ ractor_cache_fill_freelist(objspace, cr, page);
+ cr->newobj_cache.free_pages = pages;
GC_ASSERT(RB_TYPE_P((VALUE)cr->newobj_cache.freelist, T_NONE));
}
@@ -5041,8 +5077,17 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
*p = freelist;
}
+#if 0
+ int free_slots = 0;
+ for (RVALUE *p = freelist; p; p = p->as.free.next) free_slots++;
+ for (struct heap_page *page = r->newobj_cache.free_pages; page;
+ page = page->free_next) free_slots += page->free_slots;
+ fprintf(stderr, "r:%d unused free_slots:%d\n", r->id, free_slots);
+#endif
+
r->newobj_cache.using_page = NULL;
r->newobj_cache.freelist = NULL;
+ r->newobj_cache.free_pages = NULL;
}
}