summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorÉtienne Barrié <etienne.barrie@gmail.com>2025-04-28 11:53:49 +0200
committerJean Boussier <jean.boussier@gmail.com>2025-05-06 19:13:59 +0200
commitcb772247e72ce3d3c7718018539508b0204c1f2c (patch)
treef01a550cd11dd57455f7e3116892363952cdd224
parent925da368763487c495f47c9e7f17a252ff38a105 (diff)
Improve correctness contention for allocated object counts
Currently the count of allocated object for a heap is incremented without regards to parallelism which leads to incorrect counts. By maintaining a local counter in the ractor newobj cache, and only syncing atomically with some granularity, we can improve the correctness without increasing contention. The allocated object count is also synced when the ractor is freed. Co-authored-by: Jean Boussier <jean.boussier@gmail.com>
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/13192
-rw-r--r--gc/default/default.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/gc/default/default.c b/gc/default/default.c
index b2bbd79784..6eee53f8ba 100644
--- a/gc/default/default.c
+++ b/gc/default/default.c
@@ -161,6 +161,7 @@
typedef struct ractor_newobj_heap_cache {
struct free_slot *freelist;
struct heap_page *using_page;
+ size_t allocated_objects_count;
} rb_ractor_newobj_heap_cache_t;
typedef struct ractor_newobj_cache {
@@ -2287,6 +2288,8 @@ rb_gc_impl_size_allocatable_p(size_t size)
return size <= heap_slot_size(HEAP_COUNT - 1);
}
+static const size_t ALLOCATED_COUNT_STEP = 1024;
+
static inline VALUE
ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
size_t heap_idx)
@@ -2309,6 +2312,22 @@ ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *ca
VALUE obj = (VALUE)p;
rb_asan_unpoison_object(obj, true);
heap_cache->freelist = p->next;
+
+ if (rb_gc_multi_ractor_p()) {
+ heap_cache->allocated_objects_count++;
+ rb_heap_t *heap = &heaps[heap_idx];
+ if (heap_cache->allocated_objects_count >= ALLOCATED_COUNT_STEP) {
+ RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
+ heap_cache->allocated_objects_count = 0;
+ }
+ }
+ else {
+ rb_heap_t *heap = &heaps[heap_idx];
+ heap->total_allocated_objects++;
+ GC_ASSERT(heap->total_slots >=
+ (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
+ }
+
#if RGENGC_CHECK_MODE
GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == heap_slot_size(heap_idx));
// zero clear
@@ -2461,12 +2480,6 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t he
obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
}
- rb_heap_t *heap = &heaps[heap_idx];
- heap->total_allocated_objects++;
- GC_ASSERT(rb_gc_multi_ractor_p() ||
- heap->total_slots >=
- (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
-
return obj;
}
@@ -6261,6 +6274,14 @@ rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache)
rb_objspace_t *objspace = objspace_ptr;
objspace->live_ractor_cache_count--;
+ rb_ractor_newobj_cache_t *newobj_cache = (rb_ractor_newobj_cache_t *)cache;
+
+ for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
+ rb_heap_t *heap = &heaps[heap_idx];
+ rb_ractor_newobj_heap_cache_t *heap_cache = &newobj_cache->heap_caches[heap_idx];
+ RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
+ heap_cache->allocated_objects_count = 0;
+ }
gc_ractor_newobj_cache_clear(cache, NULL);
free(cache);