summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-03-10 02:22:11 +0900
committerKoichi Sasada <ko1@atdot.net>2020-09-03 21:11:06 +0900
commit79df14c04b452411b9d17e26a398e491bca1a811 (patch)
tree7598cee0f105439efd5bb328a727b0fe27d7c666 /gc.c
parenteeb5325d3bfd71301896360c17e8f51abcb9a7e5 (diff)
Introduce Ractor mechanism for parallel execution
This commit introduces Ractor mechanism to run Ruby program in parallel. See doc/ractor.md for more details about Ractor. See ticket [Feature #17100] to see the implementation details and discussions. [Feature #17100] This commit does not complete the implementation. You can find many bugs on using Ractor. Also the specification will be changed so that this feature is experimental. You will see a warning when you make the first Ractor with `Ractor.new`. I hope this feature can help programmers from thread-safety issues.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/3365
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c347
1 files changed, 223 insertions, 124 deletions
diff --git a/gc.c b/gc.c
index 9b453cb6ed..535f526391 100644
--- a/gc.c
+++ b/gc.c
@@ -101,7 +101,9 @@
#include "symbol.h"
#include "transient_heap.h"
#include "vm_core.h"
+#include "vm_sync.h"
#include "vm_callinfo.h"
+#include "ractor.h"
#include "builtin.h"
@@ -402,7 +404,7 @@ int ruby_rgengc_debug;
* 2: enable profiling for each types
*/
#ifndef RGENGC_PROFILE
-#define RGENGC_PROFILE 0
+#define RGENGC_PROFILE 1
#endif
/* RGENGC_ESTIMATE_OLDMALLOC
@@ -884,7 +886,6 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define heap_pages_deferred_final objspace->heap_pages.deferred_final
#define heap_eden (&objspace->eden_heap)
#define heap_tomb (&objspace->tomb_heap)
-#define dont_gc objspace->flags.dont_gc
#define during_gc objspace->flags.during_gc
#define finalizing objspace->atomic_flags.finalizing
#define finalizer_table objspace->finalizer_table
@@ -897,6 +898,18 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define stress_to_class 0
#endif
+#if 0
+#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
+#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
+#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
+#define dont_gc_val() (objspace->flags.dont_gc)
+#else
+#define dont_gc_on() (objspace->flags.dont_gc = 1)
+#define dont_gc_off() (objspace->flags.dont_gc = 0)
+#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
+#define dont_gc_val() (objspace->flags.dont_gc)
+#endif
+
static inline enum gc_mode
gc_mode_verify(enum gc_mode mode)
{
@@ -984,8 +997,8 @@ static int garbage_collect(rb_objspace_t *, int reason);
static int gc_start(rb_objspace_t *objspace, int reason);
static void gc_rest(rb_objspace_t *objspace);
-static inline void gc_enter(rb_objspace_t *objspace, const char *event);
-static inline void gc_exit(rb_objspace_t *objspace, const char *event);
+static inline void gc_enter(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev);
+static inline void gc_exit(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev);
static void gc_marks(rb_objspace_t *objspace, int full_mark);
static void gc_marks_start(rb_objspace_t *objspace, int full);
@@ -1233,6 +1246,7 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
goto skip;
}
}
+ bp();
fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
err++;
skip:
@@ -1561,7 +1575,7 @@ rb_objspace_alloc(void)
malloc_limit = gc_params.malloc_limit_min;
list_head_init(&objspace->eden_heap.pages);
list_head_init(&objspace->tomb_heap.pages);
- dont_gc = TRUE;
+ dont_gc_on();
return objspace;
}
@@ -2032,7 +2046,7 @@ heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
{
RVALUE *p = heap->freelist;
if (LIKELY(p != NULL)) {
- heap->freelist = p->as.free.next;
+ heap->freelist = p->as.free.next;
}
asan_unpoison_object((VALUE)p, true);
return (VALUE)p;
@@ -2108,6 +2122,10 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote
};
MEMCPY(RANY(obj), &buf, RVALUE, 1);
+#if RACTOR_CHECK_MODE
+ rb_ractor_setup_belonging(obj);
+#endif
+
#if RGENGC_CHECK_MODE
GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
@@ -2115,58 +2133,57 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote
GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
if (flags & FL_PROMOTED1) {
- if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
+ if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
}
else {
- if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
+ if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
}
if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
#endif
if (UNLIKELY(wb_protected == FALSE)) {
- MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
+ MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
}
#if RGENGC_PROFILE
if (wb_protected) {
- objspace->profile.total_generated_normal_object_count++;
+ objspace->profile.total_generated_normal_object_count++;
#if RGENGC_PROFILE >= 2
- objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
+ objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
#endif
}
else {
- objspace->profile.total_generated_shady_object_count++;
+ objspace->profile.total_generated_shady_object_count++;
#if RGENGC_PROFILE >= 2
- objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
+ objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
#endif
}
#endif
+ objspace->total_allocated_objects++;
#if GC_DEBUG
RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
#endif
- objspace->total_allocated_objects++;
-
gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
#if RGENGC_OLD_NEWOBJ_CHECK > 0
{
- static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
+ static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
- if (!is_incremental_marking(objspace) &&
- flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
- ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
- if (--newobj_cnt == 0) {
- newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
+ if (!is_incremental_marking(objspace) &&
+ flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
+ ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
+ if (--newobj_cnt == 0) {
+ newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
- gc_mark_set(objspace, obj);
- RVALUE_AGE_SET_OLD(objspace, obj);
+ gc_mark_set(objspace, obj);
+ RVALUE_AGE_SET_OLD(objspace, obj);
- rb_gc_writebarrier_remember(obj);
- }
- }
+ rb_gc_writebarrier_remember(obj);
+ }
+ }
}
#endif
check_rvalue_consistency(obj);
@@ -2179,20 +2196,21 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp
VALUE obj;
if (UNLIKELY(during_gc || ruby_gc_stressful)) {
- if (during_gc) {
- dont_gc = 1;
- during_gc = 0;
- rb_bug("object allocation during garbage collection phase");
- }
+ if (during_gc) {
+ dont_gc_on();
+ during_gc = 0;
+ rb_bug("object allocation during garbage collection phase");
+ }
- if (ruby_gc_stressful) {
- if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
- rb_memerror();
- }
- }
+ if (ruby_gc_stressful) {
+ if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
+ rb_memerror();
+ }
+ }
}
obj = heap_get_freeobj(objspace, heap_eden);
+
newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
return obj;
@@ -2219,30 +2237,36 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect
rb_objspace_t *objspace = &rb_objspace;
VALUE obj;
- RB_DEBUG_COUNTER_INC(obj_newobj);
- (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
+ RB_VM_LOCK_ENTER();
+ {
+
+ RB_DEBUG_COUNTER_INC(obj_newobj);
+ (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
#if GC_DEBUG_STRESS_TO_CLASS
- if (UNLIKELY(stress_to_class)) {
- long i, cnt = RARRAY_LEN(stress_to_class);
- for (i = 0; i < cnt; ++i) {
- if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
+ if (UNLIKELY(stress_to_class)) {
+ long i, cnt = RARRAY_LEN(stress_to_class);
+ for (i = 0; i < cnt; ++i) {
+ if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
+ }
}
- }
#endif
- if (!(during_gc ||
- ruby_gc_stressful ||
- gc_event_hook_available_p(objspace)) &&
- (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
- return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
+ if (!(during_gc ||
+ ruby_gc_stressful ||
+ gc_event_hook_available_p(objspace)) &&
+ (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
+ newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
+ }
+ else {
+ RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
+ obj = wb_protected ?
+ newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
+ newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
+ }
}
- else {
- RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
+ RB_VM_LOCK_LEAVE();
- return wb_protected ?
- newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
- newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
- }
+ return obj;
}
VALUE
@@ -2273,6 +2297,18 @@ rb_newobj_of(VALUE klass, VALUE flags)
return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
}
+VALUE
+rb_newobj_with(VALUE src)
+{
+ VALUE klass = RBASIC_CLASS(src);
+ VALUE flags = RBASIC(src)->flags;
+
+ VALUE v1 = RANY(src)->as.values.v1;
+ VALUE v2 = RANY(src)->as.values.v2;
+ VALUE v3 = RANY(src)->as.values.v3;
+ return newobj_of(klass, flags & ~FL_WB_PROTECTED, v1, v2, v3, flags & FL_WB_PROTECTED);
+}
+
#define UNEXPECTED_NODE(func) \
rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
@@ -3597,10 +3633,11 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
/* prohibit GC because force T_DATA finalizers can break an object graph consistency */
- dont_gc = 1;
+ dont_gc_on();
/* running data/file finalizers are part of garbage collection */
- gc_enter(objspace, "rb_objspace_call_finalizer");
+ unsigned int lock_lev;
+ gc_enter(objspace, "rb_objspace_call_finalizer", &lock_lev);
/* run data/file object's finalizers */
for (i = 0; i < heap_allocated_pages; i++) {
@@ -3642,7 +3679,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
}
}
- gc_exit(objspace, "rb_objspace_call_finalizer");
+ gc_exit(objspace, "rb_objspace_call_finalizer", &lock_lev);
if (heap_pages_deferred_final) {
finalize_list(objspace, heap_pages_deferred_final);
@@ -4529,15 +4566,16 @@ gc_sweep_rest(rb_objspace_t *objspace)
static void
gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
{
- GC_ASSERT(dont_gc == FALSE);
+ GC_ASSERT(dont_gc_val() == FALSE);
if (!GC_ENABLE_LAZY_SWEEP) return;
- gc_enter(objspace, "sweep_continue");
+ unsigned int lock_lev;
+ gc_enter(objspace, "sweep_continue", &lock_lev);
if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
}
gc_sweep_step(objspace, heap);
- gc_exit(objspace, "sweep_continue");
+ gc_exit(objspace, "sweep_continue", &lock_lev);
}
static void
@@ -5944,8 +5982,8 @@ objspace_allrefs(rb_objspace_t *objspace)
struct allrefs data;
struct mark_func_data_struct mfd;
VALUE obj;
- int prev_dont_gc = dont_gc;
- dont_gc = TRUE;
+ int prev_dont_gc = dont_gc_val();
+ dont_gc_on();
data.objspace = objspace;
data.references = st_init_numtable();
@@ -5966,7 +6004,7 @@ objspace_allrefs(rb_objspace_t *objspace)
}
free_stack_chunks(&data.mark_stack);
- dont_gc = prev_dont_gc;
+ dont_gc_set(prev_dont_gc);
return data.references;
}
@@ -6288,8 +6326,8 @@ gc_verify_heap_pages(rb_objspace_t *objspace)
static VALUE
gc_verify_internal_consistency_m(VALUE dummy)
{
+ ASSERT_vm_locking();
gc_verify_internal_consistency(&rb_objspace);
-
return Qnil;
}
@@ -6639,10 +6677,11 @@ gc_marks_rest(rb_objspace_t *objspace)
static void
gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
{
- GC_ASSERT(dont_gc == FALSE);
+ GC_ASSERT(dont_gc_val() == FALSE);
#if GC_ENABLE_INCREMENTAL_MARK
- gc_enter(objspace, "marks_continue");
+ unsigned int lock_lev;
+ gc_enter(objspace, "marks_continue", &lock_lev);
PUSH_MARK_FUNC_DATA(NULL);
{
@@ -6674,7 +6713,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
}
POP_MARK_FUNC_DATA();
- gc_exit(objspace, "marks_continue");
+ gc_exit(objspace, "marks_continue", &lock_lev);
#endif
}
@@ -6973,21 +7012,50 @@ void
rb_gc_writebarrier(VALUE a, VALUE b)
{
rb_objspace_t *objspace = &rb_objspace;
+ bool retry;
if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
+ retry_:
+ retry = false;
if (!is_incremental_marking(objspace)) {
- if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
- return;
- }
- else {
- gc_writebarrier_generational(a, b, objspace);
- }
+ if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
+ // do nothing
+ }
+ else {
+ RB_VM_LOCK_ENTER(); // can change GC state
+ {
+ if (!is_incremental_marking(objspace)) {
+ if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
+ // do nothing
+ }
+ else {
+ gc_writebarrier_generational(a, b, objspace);
+ }
+ }
+ else {
+ retry = true;
+ }
+ }
+ RB_VM_LOCK_LEAVE();
+ }
}
else { /* slow path */
- gc_writebarrier_incremental(a, b, objspace);
+ RB_VM_LOCK_ENTER(); // can change GC state
+ {
+ if (is_incremental_marking(objspace)) {
+ gc_writebarrier_incremental(a, b, objspace);
+ }
+ else {
+ retry = true;
+ }
+ }
+ RB_VM_LOCK_LEAVE();
}
+ if (retry) goto retry_;
+
+ return;
}
void
@@ -7154,46 +7222,49 @@ void
rb_gc_force_recycle(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;
+ RB_VM_LOCK_ENTER();
+ {
+ int is_old = RVALUE_OLD_P(obj);
- int is_old = RVALUE_OLD_P(obj);
-
- gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
+ gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
- if (is_old) {
- if (RVALUE_MARKED(obj)) {
- objspace->rgengc.old_objects--;
- }
- }
- CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
- CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
+ if (is_old) {
+ if (RVALUE_MARKED(obj)) {
+ objspace->rgengc.old_objects--;
+ }
+ }
+ CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
+ CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
#if GC_ENABLE_INCREMENTAL_MARK
- if (is_incremental_marking(objspace)) {
- if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
- invalidate_mark_stack(&objspace->mark_stack, obj);
- CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
- }
- CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
- }
- else {
+ if (is_incremental_marking(objspace)) {
+ if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
+ invalidate_mark_stack(&objspace->mark_stack, obj);
+ CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
+ }
+ CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
+ }
+ else {
#endif
- if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
- CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
- }
- CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
+ if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
+ CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
+ }
+ CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
#if GC_ENABLE_INCREMENTAL_MARK
- }
+ }
#endif
- objspace->profile.total_freed_objects++;
+ objspace->profile.total_freed_objects++;
- heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
+ heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
- /* Disable counting swept_slots because there are no meaning.
- * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
- * objspace->heap.swept_slots++;
- * }
- */
+ /* Disable counting swept_slots because there are no meaning.
+ * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
+ * objspace->heap.swept_slots++;
+ * }
+ */
+ }
+ RB_VM_LOCK_LEAVE();
}
#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
@@ -7281,7 +7352,7 @@ heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
static int
ready_to_gc(rb_objspace_t *objspace)
{
- if (dont_gc || during_gc || ruby_disable_gc) {
+ if (dont_gc_val() || during_gc || ruby_disable_gc) {
heap_ready_to_gc(objspace, heap_eden);
return FALSE;
}
@@ -7361,17 +7432,25 @@ gc_reset_malloc_info(rb_objspace_t *objspace)
static int
garbage_collect(rb_objspace_t *objspace, int reason)
{
+ int ret;
+
+ RB_VM_LOCK_ENTER();
+ {
#if GC_PROFILE_MORE_DETAIL
- objspace->profile.prepare_time = getrusage_time();
+ objspace->profile.prepare_time = getrusage_time();
#endif
- gc_rest(objspace);
+ gc_rest(objspace);
#if GC_PROFILE_MORE_DETAIL
- objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
+ objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
#endif
- return gc_start(objspace, reason);
+ ret = gc_start(objspace, reason);
+ }
+ RB_VM_LOCK_LEAVE();
+
+ return ret;
}
static int
@@ -7389,12 +7468,14 @@ gc_start(rb_objspace_t *objspace, int reason)
GC_ASSERT(gc_mode(objspace) == gc_mode_none);
GC_ASSERT(!is_lazy_sweeping(heap_eden));
GC_ASSERT(!is_incremental_marking(objspace));
+
+ unsigned int lock_lev;
+ gc_enter(objspace, "gc_start", &lock_lev);
+
#if RGENGC_CHECK_MODE >= 2
gc_verify_internal_consistency(objspace);
#endif
- gc_enter(objspace, "gc_start");
-
if (ruby_gc_stressful) {
int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
@@ -7478,7 +7559,7 @@ gc_start(rb_objspace_t *objspace, int reason)
}
gc_prof_timer_stop(objspace);
- gc_exit(objspace, "gc_start");
+ gc_exit(objspace, "gc_start", &lock_lev);
return TRUE;
}
@@ -7489,7 +7570,8 @@ gc_rest(rb_objspace_t *objspace)
int sweeping = is_lazy_sweeping(heap_eden);
if (marking || sweeping) {
- gc_enter(objspace, "gc_rest");
+ unsigned int lock_lev;
+ gc_enter(objspace, "gc_rest", &lock_lev);
if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
@@ -7501,7 +7583,7 @@ gc_rest(rb_objspace_t *objspace)
if (is_lazy_sweeping(heap_eden)) {
gc_sweep_rest(objspace);
}
- gc_exit(objspace, "gc_rest");
+ gc_exit(objspace, "gc_rest", &lock_lev);
}
}
@@ -7587,30 +7669,38 @@ gc_record(rb_objspace_t *objspace, int direction, const char *event)
#endif /* PRINT_ENTER_EXIT_TICK */
static inline void
-gc_enter(rb_objspace_t *objspace, const char *event)
+gc_enter(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev)
{
+ // stop other ractors
+
+ RB_VM_LOCK_ENTER_LEV(lock_lev);
+ rb_vm_barrier();
+
GC_ASSERT(during_gc == 0);
if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
mjit_gc_start_hook();
during_gc = TRUE;
+ RUBY_DEBUG_LOG("%s (%s)", event, gc_current_status(objspace));
gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
gc_record(objspace, 0, event);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
}
static inline void
-gc_exit(rb_objspace_t *objspace, const char *event)
+gc_exit(rb_objspace_t *objspace, const char *event, unsigned int *lock_lev)
{
GC_ASSERT(during_gc != 0);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
gc_record(objspace, 1, event);
+ RUBY_DEBUG_LOG("%s (%s)", event, gc_current_status(objspace));
gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
during_gc = FALSE;
mjit_gc_exit_hook();
+ RB_VM_LOCK_LEAVE_LEV(lock_lev);
}
static void *
@@ -7623,7 +7713,7 @@ gc_with_gvl(void *ptr)
static int
garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
{
- if (dont_gc) return TRUE;
+ if (dont_gc_val()) return TRUE;
if (ruby_thread_has_gvl_p()) {
return garbage_collect(objspace, reason);
}
@@ -8647,7 +8737,7 @@ static VALUE
rb_gc_compact(rb_execution_context_t *ec, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
- if (dont_gc) return Qnil;
+ if (dont_gc_val()) return Qnil;
gc_compact(objspace, FALSE, FALSE, FALSE);
return gc_compact_stats(objspace);
@@ -9349,9 +9439,9 @@ rb_gc_enable(void)
VALUE
rb_objspace_gc_enable(rb_objspace_t *objspace)
{
- int old = dont_gc;
+ int old = dont_gc_val();
- dont_gc = FALSE;
+ dont_gc_off();
return old ? Qtrue : Qfalse;
}
@@ -9371,8 +9461,8 @@ rb_gc_disable_no_rest(void)
static VALUE
gc_disable_no_rest(rb_objspace_t *objspace)
{
- int old = dont_gc;
- dont_gc = TRUE;
+ int old = dont_gc_val();
+ dont_gc_on();
return old ? Qtrue : Qfalse;
}
@@ -9742,7 +9832,10 @@ rb_memerror(void)
sleep(60);
}
- if (during_gc) gc_exit(objspace, "rb_memerror");
+ if (during_gc) {
+ // TODO: OMG!! How to implement it?
+ gc_exit(objspace, "rb_memerror", NULL);
+ }
exc = nomem_error;
if (!exc ||
@@ -9869,7 +9962,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
if (type == MEMOP_TYPE_MALLOC) {
retry:
- if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
+ if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
@@ -11607,6 +11700,8 @@ rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
}
}
+bool rb_ractor_p(VALUE rv);
+
const char *
rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
{
@@ -11750,6 +11845,10 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
(iseq = vm_block_iseq(block)) != NULL) {
rb_raw_iseq_info(BUFF_ARGS, iseq);
}
+ else if (rb_ractor_p(obj)) {
+ rb_ractor_t *r = (void *)DATA_PTR(obj);
+ APPENDF((BUFF_ARGS, "r:%d", r->id));
+ }
else {
const char * const type_name = rb_objspace_data_type_name(obj);
if (type_name) {