summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bootstraptest/test_ractor.rb23
-rw-r--r--gc.c24
-rw-r--r--gc/default/default.c36
-rw-r--r--gc/gc_impl.h6
-rw-r--r--ractor.c5
-rw-r--r--ractor_core.h1
-rw-r--r--ractor_sync.c1
7 files changed, 66 insertions, 30 deletions
diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb
index 4a58ece8ac..fcf14259e8 100644
--- a/bootstraptest/test_ractor.rb
+++ b/bootstraptest/test_ractor.rb
@@ -2372,3 +2372,26 @@ assert_equal 'ok', <<~'RUBY'
ractors.each(&:join)
:ok
RUBY
+
+# This test checks that we do not trigger a GC when we have malloc with Ractor
+# locks. We cannot trigger a GC with Ractor locks because GC requires VM lock
+# and Ractor barrier. If another Ractor is waiting on this Ractor lock, then it
+# will deadlock because the other Ractor will never join the barrier.
+#
+# Creating Ractor::Port requires locking the Ractor and inserting into an
+# st_table, which can call malloc.
+assert_equal 'ok', <<~'RUBY'
+ r = Ractor.new do
+ loop do
+ Ractor::Port.new
+ end
+ end
+
+ 10.times do
+ 10_000.times do
+ r.send(nil)
+ end
+ sleep(0.01)
+ end
+ :ok
+RUBY
diff --git a/gc.c b/gc.c
index f3c9e2e3f3..97367039af 100644
--- a/gc.c
+++ b/gc.c
@@ -625,9 +625,9 @@ typedef struct gc_function_map {
size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
bool (*size_allocatable_p)(size_t size);
// Malloc
- void *(*malloc)(void *objspace_ptr, size_t size);
- void *(*calloc)(void *objspace_ptr, size_t size);
- void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
+ void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
+ void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
+ void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
// Marking
@@ -5118,6 +5118,14 @@ ruby_xmalloc(size_t size)
return handle_malloc_failure(ruby_xmalloc_body(size));
}
+static bool
+malloc_gc_allowed(void)
+{
+ rb_ractor_t *r = rb_current_ractor_raw(false);
+
+ return r == NULL || !r->malloc_gc_disabled;
+}
+
static void *
ruby_xmalloc_body(size_t size)
{
@@ -5125,7 +5133,7 @@ ruby_xmalloc_body(size_t size)
negative_size_allocation_error("too large allocation size");
}
- return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
+ return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
}
void
@@ -5155,7 +5163,7 @@ ruby_xmalloc2(size_t n, size_t size)
static void *
ruby_xmalloc2_body(size_t n, size_t size)
{
- return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
+ return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
}
static void *ruby_xcalloc_body(size_t n, size_t size);
@@ -5169,7 +5177,7 @@ ruby_xcalloc(size_t n, size_t size)
static void *
ruby_xcalloc_body(size_t n, size_t size)
{
- return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
+ return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
}
static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
@@ -5190,7 +5198,7 @@ ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
negative_size_allocation_error("too large allocation size");
}
- return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
+ return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
}
void *
@@ -5214,7 +5222,7 @@ static void *
ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
{
size_t len = xmalloc2_size(n, size);
- return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
+ return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
}
void *
diff --git a/gc/default/default.c b/gc/default/default.c
index d2ed2244e1..e443a0727e 100644
--- a/gc/default/default.c
+++ b/gc/default/default.c
@@ -7970,7 +7970,7 @@ objspace_malloc_gc_stress(rb_objspace_t *objspace)
}
static inline bool
-objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
+objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type, bool gc_allowed)
{
if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
mem,
@@ -7982,7 +7982,7 @@ objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_s
}
static bool
-objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
+objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type, bool gc_allowed)
{
if (new_size > old_size) {
RUBY_ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
@@ -7997,7 +7997,7 @@ objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_siz
#endif
}
- if (type == MEMOP_TYPE_MALLOC) {
+ if (type == MEMOP_TYPE_MALLOC && gc_allowed) {
retry:
if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
@@ -8079,10 +8079,10 @@ malloc_during_gc_p(rb_objspace_t *objspace)
}
static inline void *
-objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
+objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size, bool gc_allowed)
{
size = objspace_malloc_size(objspace, mem, size);
- objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC) {}
+ objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC, gc_allowed) {}
#if CALC_EXACT_MALLOC_SIZE
{
@@ -8114,10 +8114,10 @@ objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
GPR_FLAG_MALLOC; \
objspace_malloc_gc_stress(objspace); \
\
- if (RB_LIKELY((expr))) { \
+ if (RB_LIKELY((expr))) { \
/* Success on 1st try */ \
} \
- else if (!garbage_collect_with_gvl(objspace, gpr)) { \
+ else if (gc_allowed && !garbage_collect_with_gvl(objspace, gpr)) { \
/* @shyouhei thinks this doesn't happen */ \
GC_MEMERROR("TRY_WITH_GC: could not GC"); \
} \
@@ -8160,7 +8160,7 @@ rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
#endif
old_size = objspace_malloc_size(objspace, ptr, old_size);
- objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
+ objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE, true) {
free(ptr);
ptr = NULL;
RB_DEBUG_COUNTER_INC(heap_xfree);
@@ -8168,7 +8168,7 @@ rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
}
void *
-rb_gc_impl_malloc(void *objspace_ptr, size_t size)
+rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed)
{
rb_objspace_t *objspace = objspace_ptr;
check_malloc_not_in_gc(objspace, "malloc");
@@ -8179,11 +8179,11 @@ rb_gc_impl_malloc(void *objspace_ptr, size_t size)
TRY_WITH_GC(size, mem = malloc(size));
RB_DEBUG_COUNTER_INC(heap_xmalloc);
if (!mem) return mem;
- return objspace_malloc_fixup(objspace, mem, size);
+ return objspace_malloc_fixup(objspace, mem, size, gc_allowed);
}
void *
-rb_gc_impl_calloc(void *objspace_ptr, size_t size)
+rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed)
{
rb_objspace_t *objspace = objspace_ptr;
@@ -8199,11 +8199,11 @@ rb_gc_impl_calloc(void *objspace_ptr, size_t size)
size = objspace_malloc_prepare(objspace, size);
TRY_WITH_GC(size, mem = calloc1(size));
if (!mem) return mem;
- return objspace_malloc_fixup(objspace, mem, size);
+ return objspace_malloc_fixup(objspace, mem, size, gc_allowed);
}
void *
-rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size)
+rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed)
{
rb_objspace_t *objspace = objspace_ptr;
@@ -8211,7 +8211,7 @@ rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_si
void *mem;
- if (!ptr) return rb_gc_impl_malloc(objspace, new_size);
+ if (!ptr) return rb_gc_impl_malloc(objspace, new_size, gc_allowed);
/*
* The behavior of realloc(ptr, 0) is implementation defined.
@@ -8219,7 +8219,7 @@ rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_si
* see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
*/
if (new_size == 0) {
- if ((mem = rb_gc_impl_malloc(objspace, 0)) != NULL) {
+ if ((mem = rb_gc_impl_malloc(objspace, 0, gc_allowed)) != NULL) {
/*
* - OpenBSD's malloc(3) man page says that when 0 is passed, it
* returns a non-NULL pointer to an access-protected memory page.
@@ -8278,7 +8278,7 @@ rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_si
}
#endif
- objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
+ objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC, gc_allowed);
RB_DEBUG_COUNTER_INC(heap_xrealloc);
return mem;
@@ -8290,10 +8290,10 @@ rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff)
rb_objspace_t *objspace = objspace_ptr;
if (diff > 0) {
- objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
+ objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC, true);
}
else if (diff < 0) {
- objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
+ objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC, true);
}
}
diff --git a/gc/gc_impl.h b/gc/gc_impl.h
index d1ae7983a2..b461f3a305 100644
--- a/gc/gc_impl.h
+++ b/gc/gc_impl.h
@@ -72,9 +72,9 @@ GC_IMPL_FN bool rb_gc_impl_size_allocatable_p(size_t size);
* memory just return NULL (with appropriate errno set).
* The caller side takes care of that situation.
*/
-GC_IMPL_FN void *rb_gc_impl_malloc(void *objspace_ptr, size_t size);
-GC_IMPL_FN void *rb_gc_impl_calloc(void *objspace_ptr, size_t size);
-GC_IMPL_FN void *rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
+GC_IMPL_FN void *rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed);
+GC_IMPL_FN void *rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed);
+GC_IMPL_FN void *rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
GC_IMPL_FN void rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size);
GC_IMPL_FN void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff);
// Marking
diff --git a/ractor.c b/ractor.c
index c4d748e69c..d40d0e61bf 100644
--- a/ractor.c
+++ b/ractor.c
@@ -71,6 +71,7 @@ ractor_lock(rb_ractor_t *r, const char *file, int line)
ASSERT_ractor_unlocking(r);
rb_native_mutex_lock(&r->sync.lock);
+ r->malloc_gc_disabled = true;
#if RACTOR_CHECK_MODE > 0
if (rb_current_execution_context(false) != NULL) {
@@ -99,6 +100,10 @@ ractor_unlock(rb_ractor_t *r, const char *file, int line)
#if RACTOR_CHECK_MODE > 0
r->sync.locked_by = Qnil;
#endif
+
+ VM_ASSERT(r->malloc_gc_disabled);
+
+ r->malloc_gc_disabled = false;
rb_native_mutex_unlock(&r->sync.lock);
RUBY_DEBUG_LOG2(file, line, "r:%u%s", r->pub.id, rb_current_ractor_raw(false) == r ? " (self)" : "");
diff --git a/ractor_core.h b/ractor_core.h
index 0656ce00a0..130ccb1118 100644
--- a/ractor_core.h
+++ b/ractor_core.h
@@ -98,6 +98,7 @@ struct rb_ractor_struct {
VALUE verbose;
VALUE debug;
+ bool malloc_gc_disabled;
void *newobj_cache;
}; // rb_ractor_t is defined in vm_core.h
diff --git a/ractor_sync.c b/ractor_sync.c
index 52ce953851..c208ee6b2d 100644
--- a/ractor_sync.c
+++ b/ractor_sync.c
@@ -395,7 +395,6 @@ ractor_add_port(rb_ractor_t *r, st_data_t id)
RACTOR_LOCK(r);
{
- // memo: can cause GC, but GC doesn't use ractor locking.
st_insert(r->sync.ports, id, (st_data_t)rq);
}
RACTOR_UNLOCK(r);