diff options
author | usa <usa@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2012-11-13 10:02:45 +0000 |
---|---|---|
committer | usa <usa@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2012-11-13 10:02:45 +0000 |
commit | ae2df330bc7662a50bea6385e53d49078592b07e (patch) | |
tree | 2ed43e3747edf8c02f2e1c19980b010127bac309 /gc.c | |
parent | 28a3dae8080985b4d6a9cb22d997b7f71c214051 (diff) |
merged revision(s) 37075,37076,37082,37083,37088: [Backport #7123]
* gc.c: Use the non-recursive marking instead of recursion. The
recursion marking of CRuby needs checking stack overflow and the
fail-safe system, but these systems not good at partial points,
for example, marking deep tree structures. [ruby-dev:46184]
[Feature #7095]
* configure.in (GC_MARK_STACKFRAME_WORD): removed. It's used by
checking stack overflow of marking.
* win32/Makefile.sub (GC_MARK_STACKFRAME_WORD): ditto.
* gc.c (free_stack_chunks): it is used only when per-VM object space
is enabled.
* gc.c (rb_objspace_call_finalizer): mark self-referencing finalizers
before run finalizers, to fix SEGV from btest on 32bit.
* gc.c (gc_mark_stacked_objects): extract from gc_marks().
* gc.c (rb_objspace_call_finalizer): call gc_mark_stacked_objects
at suitable point.
* gc.c (init_heap): call init_mark_stack before to allocate
altstack. This change avoid the stack overflow at the signal
handler on 32bit, but I don't understand reason... [Feature #7095]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_9_3@37648 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r-- | gc.c | 363 |
1 files changed, 225 insertions, 138 deletions
@@ -98,8 +98,6 @@ ruby_gc_params_t initial_params = { #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory] -#define MARK_STACK_MAX 1024 - int ruby_gc_debug_indent = 0; /* for GC profile */ @@ -319,6 +317,22 @@ struct gc_list { struct gc_list *next; }; +#define STACK_CHUNK_SIZE 500 + +typedef struct stack_chunk { + VALUE data[STACK_CHUNK_SIZE]; + struct stack_chunk *next; +} stack_chunk_t; + +typedef struct mark_stack { + stack_chunk_t *chunk; + stack_chunk_t *cache; + size_t index; + size_t limit; + size_t cache_size; + size_t unused_cache_size; +} mark_stack_t; + #define CALC_EXACT_MALLOC_SIZE 0 typedef struct rb_objspace { @@ -355,11 +369,7 @@ typedef struct rb_objspace { st_table *table; RVALUE *deferred; } final; - struct { - VALUE buffer[MARK_STACK_MAX]; - VALUE *ptr; - int overflow; - } markstack; + mark_stack_t mark_stack; struct { int run; gc_profile_record *record; @@ -394,9 +404,6 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress; #define during_gc objspace->flags.during_gc #define finalizer_table objspace->final.table #define deferred_final_list objspace->final.deferred -#define mark_stack objspace->markstack.buffer -#define mark_stack_ptr objspace->markstack.ptr -#define mark_stack_overflow objspace->markstack.overflow #define global_List objspace->global_list #define ruby_gc_stress objspace->gc_stress #define initial_malloc_limit initial_params.initial_malloc_limit @@ -419,6 +426,7 @@ rb_objspace_alloc(void) #endif static void initial_expand_heap(rb_objspace_t *objspace); +static void init_mark_stack(mark_stack_t *stack); void rb_gc_set_params(void) @@ -465,6 +473,7 @@ rb_gc_set_params(void) static void gc_sweep(rb_objspace_t *); static void slot_sweep(rb_objspace_t *, struct heaps_slot *); static void rest_sweep(rb_objspace_t *); +static void free_stack_chunks(mark_stack_t *); void rb_objspace_free(rb_objspace_t *objspace) @@ -491,6 +500,7 @@ rb_objspace_free(rb_objspace_t *objspace) heaps_used = 0; heaps = 0; } + free_stack_chunks(&objspace->mark_stack); free(objspace); } #endif @@ -1100,6 +1110,7 @@ static void init_heap(rb_objspace_t *objspace) { add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT); + init_mark_stack(&objspace->mark_stack); #ifdef USE_SIGALTSTACK { /* altstack of another threads are allocated in another place */ @@ -1296,8 +1307,136 @@ ruby_get_stack_grow_direction(volatile VALUE *addr) } #endif -#define GC_LEVEL_MAX 250 -#define STACKFRAME_FOR_GC_MARK (GC_LEVEL_MAX * GC_MARK_STACKFRAME_WORD) +/* Marking stack */ + +static void push_mark_stack(mark_stack_t *, VALUE); +static int pop_mark_stack(mark_stack_t *, VALUE *); +static void shrink_stack_chunk_cache(mark_stack_t *stack); + +static stack_chunk_t * +stack_chunk_alloc(void) +{ + stack_chunk_t *res; + + res = malloc(sizeof(stack_chunk_t)); + if (!res) + rb_memerror(); + + return res; +} + +static inline int +is_mark_stask_empty(mark_stack_t *stack) +{ + return stack->chunk == NULL; +} + +static void +add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk) +{ + chunk->next = stack->cache; + stack->cache = chunk; + stack->cache_size++; +} + +static void +shrink_stack_chunk_cache(mark_stack_t *stack) +{ + stack_chunk_t *chunk; + + if (stack->unused_cache_size > (stack->cache_size/2)) { + chunk = stack->cache; + stack->cache = stack->cache->next; + stack->cache_size--; + free(chunk); + } + stack->unused_cache_size = stack->cache_size; +} + +static void +push_mark_stack_chunk(mark_stack_t *stack) +{ + stack_chunk_t *next; + + if (stack->cache_size > 0) { + next = stack->cache; + stack->cache = stack->cache->next; + stack->cache_size--; + if (stack->unused_cache_size > stack->cache_size) + stack->unused_cache_size = stack->cache_size; + } + else { + next = stack_chunk_alloc(); + } + next->next = stack->chunk; + stack->chunk = next; + stack->index = 0; +} + +static void +pop_mark_stack_chunk(mark_stack_t *stack) +{ + stack_chunk_t *prev; + + prev = stack->chunk->next; + add_stack_chunk_cache(stack, stack->chunk); + stack->chunk = prev; + stack->index = stack->limit; +} + +#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE +static void +free_stack_chunks(mark_stack_t *stack) +{ + stack_chunk_t *chunk = stack->chunk; + stack_chunk_t *next = NULL; + + while (chunk != NULL) { + next = chunk->next; + free(chunk); + chunk = next; + } +} +#endif + +static void +push_mark_stack(mark_stack_t *stack, VALUE data) +{ + if (stack->index == stack->limit) { + push_mark_stack_chunk(stack); + } + stack->chunk->data[stack->index++] = data; +} + +static int +pop_mark_stack(mark_stack_t *stack, VALUE *data) +{ + if (is_mark_stask_empty(stack)) { + return FALSE; + } + if (stack->index == 1) { + *data = stack->chunk->data[--stack->index]; + pop_mark_stack_chunk(stack); + return TRUE; + } + *data = stack->chunk->data[--stack->index]; + return TRUE; +} + +static void +init_mark_stack(mark_stack_t *stack) +{ + int i; + + push_mark_stack_chunk(stack); + stack->limit = STACK_CHUNK_SIZE; + + for(i=0; i < 4; i++) { + add_stack_chunk_cache(stack, stack_chunk_alloc()); + } + stack->unused_cache_size = stack->cache_size; +} + size_t ruby_stack_length(VALUE **p) @@ -1308,6 +1447,7 @@ ruby_stack_length(VALUE **p) return STACK_LENGTH; } +#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)) static int stack_check(int water_mark) { @@ -1323,6 +1463,7 @@ stack_check(int water_mark) #endif return ret; } +#endif #define STACKFRAME_FOR_CALL_CFUNC 512 @@ -1336,51 +1477,22 @@ ruby_stack_check(void) #endif } -static void -init_mark_stack(rb_objspace_t *objspace) -{ - mark_stack_overflow = 0; - mark_stack_ptr = mark_stack; -} - #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack) -static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev); -static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev); +static void gc_mark(rb_objspace_t *objspace, VALUE ptr); +static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr); static void -gc_mark_all(rb_objspace_t *objspace) +gc_mark_stacked_objects(rb_objspace_t *objspace) { - RVALUE *p, *pend; - size_t i; + mark_stack_t *mstack = &objspace->mark_stack; + VALUE obj = 0; - init_mark_stack(objspace); - for (i = 0; i < heaps_used; i++) { - p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end; - while (p < pend) { - if ((p->as.basic.flags & FL_MARK) && - (p->as.basic.flags != FL_MARK)) { - gc_mark_children(objspace, (VALUE)p, 0); - } - p++; - } - } -} - -static void -gc_mark_rest(rb_objspace_t *objspace) -{ - VALUE tmp_arry[MARK_STACK_MAX]; - VALUE *p; - - p = (mark_stack_ptr - mark_stack) + tmp_arry; - MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry); - - init_mark_stack(objspace); - while (p != tmp_arry) { - p--; - gc_mark_children(objspace, *p, 0); + if (!mstack->index) return; + while (pop_mark_stack(mstack, &obj)) { + gc_mark_children(objspace, obj); } + shrink_stack_chunk_cache(mstack); } static inline int @@ -1419,7 +1531,7 @@ mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n v = *x; VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)); if (is_pointer_to_heap(objspace, (void *)v)) { - gc_mark(objspace, v, 0); + gc_mark(objspace, v); } x++; } @@ -1445,24 +1557,22 @@ rb_gc_mark_locations(VALUE *start, VALUE *end) struct mark_tbl_arg { rb_objspace_t *objspace; - int lev; }; static int mark_entry(ID key, VALUE value, st_data_t data) { struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, value, arg->lev); + gc_mark(arg->objspace, value); return ST_CONTINUE; } static void -mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +mark_tbl(rb_objspace_t *objspace, st_table *tbl) { struct mark_tbl_arg arg; if (!tbl || tbl->num_entries == 0) return; arg.objspace = objspace; - arg.lev = lev; st_foreach(tbl, mark_entry, (st_data_t)&arg); } @@ -1470,68 +1580,66 @@ static int mark_key(VALUE key, VALUE value, st_data_t data) { struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, key, arg->lev); + gc_mark(arg->objspace, key); return ST_CONTINUE; } static void -mark_set(rb_objspace_t *objspace, st_table *tbl, int lev) +mark_set(rb_objspace_t *objspace, st_table *tbl) { struct mark_tbl_arg arg; if (!tbl) return; arg.objspace = objspace; - arg.lev = lev; st_foreach(tbl, mark_key, (st_data_t)&arg); } void rb_mark_set(st_table *tbl) { - mark_set(&rb_objspace, tbl, 0); + mark_set(&rb_objspace, tbl); } static int mark_keyvalue(VALUE key, VALUE value, st_data_t data) { struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, key, arg->lev); - gc_mark(arg->objspace, value, arg->lev); + gc_mark(arg->objspace, key); + gc_mark(arg->objspace, value); return ST_CONTINUE; } static void -mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev) +mark_hash(rb_objspace_t *objspace, st_table *tbl) { struct mark_tbl_arg arg; if (!tbl) return; arg.objspace = objspace; - arg.lev = lev; st_foreach(tbl, mark_keyvalue, (st_data_t)&arg); } void rb_mark_hash(st_table *tbl) { - mark_hash(&rb_objspace, tbl, 0); + mark_hash(&rb_objspace, tbl); } static void -mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev) +mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me) { const rb_method_definition_t *def = me->def; - gc_mark(objspace, me->klass, lev); + gc_mark(objspace, me->klass); if (!def) return; switch (def->type) { case VM_METHOD_TYPE_ISEQ: - gc_mark(objspace, def->body.iseq->self, lev); + gc_mark(objspace, def->body.iseq->self); break; case VM_METHOD_TYPE_BMETHOD: - gc_mark(objspace, def->body.proc, lev); + gc_mark(objspace, def->body.proc); break; case VM_METHOD_TYPE_ATTRSET: case VM_METHOD_TYPE_IVAR: - gc_mark(objspace, def->body.attr.location, lev); + gc_mark(objspace, def->body.attr.location); break; default: break; /* ignore */ @@ -1541,24 +1649,23 @@ mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev) void rb_mark_method_entry(const rb_method_entry_t *me) { - mark_method_entry(&rb_objspace, me, 0); + mark_method_entry(&rb_objspace, me); } static int mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data) { struct mark_tbl_arg *arg = (void*)data; - mark_method_entry(arg->objspace, me, arg->lev); + mark_method_entry(arg->objspace, me); return ST_CONTINUE; } static void -mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +mark_m_tbl(rb_objspace_t *objspace, st_table *tbl) { struct mark_tbl_arg arg; if (!tbl) return; arg.objspace = objspace; - arg.lev = lev; st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg); } @@ -1580,17 +1687,16 @@ static int mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data) { struct mark_tbl_arg *arg = (void*)data; - gc_mark(arg->objspace, ce->value, arg->lev); + gc_mark(arg->objspace, ce->value); return ST_CONTINUE; } static void -mark_const_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +mark_const_tbl(rb_objspace_t *objspace, st_table *tbl) { struct mark_tbl_arg arg; if (!tbl) return; arg.objspace = objspace; - arg.lev = lev; st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg); } @@ -1611,19 +1717,19 @@ rb_free_const_table(st_table *tbl) void rb_mark_tbl(st_table *tbl) { - mark_tbl(&rb_objspace, tbl, 0); + mark_tbl(&rb_objspace, tbl); } void rb_gc_mark_maybe(VALUE obj) { if (is_pointer_to_heap(&rb_objspace, (void *)obj)) { - gc_mark(&rb_objspace, obj, 0); + gc_mark(&rb_objspace, obj); } } static void -gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev) +gc_mark(rb_objspace_t *objspace, VALUE ptr) { register RVALUE *obj; @@ -1634,29 +1740,17 @@ gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev) obj->as.basic.flags |= FL_MARK; objspace->heap.live_num++; - if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check(STACKFRAME_FOR_GC_MARK))) { - if (!mark_stack_overflow) { - if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) { - *mark_stack_ptr = ptr; - mark_stack_ptr++; - } - else { - mark_stack_overflow = 1; - } - } - return; - } - gc_mark_children(objspace, ptr, lev+1); + push_mark_stack(&objspace->mark_stack, ptr); } void rb_gc_mark(VALUE ptr) { - gc_mark(&rb_objspace, ptr, 0); + gc_mark(&rb_objspace, ptr); } static void -gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) +gc_mark_children(rb_objspace_t *objspace, VALUE ptr) { register RVALUE *obj = RANY(ptr); @@ -1692,7 +1786,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_RESBODY: case NODE_CLASS: case NODE_BLOCK_PASS: - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u2.node); /* fall through */ case NODE_BLOCK: /* 1,3 */ case NODE_OPTBLOCK: @@ -1706,7 +1800,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_DEFS: case NODE_OP_ASGN1: case NODE_ARGS: - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u1.node); /* fall through */ case NODE_SUPER: /* 3 */ case NODE_FCALL: @@ -1733,7 +1827,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_ALIAS: case NODE_VALIAS: case NODE_ARGSCAT: - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u1.node); /* fall through */ case NODE_GASGN: /* 2 */ case NODE_LASGN: @@ -1769,7 +1863,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_SCOPE: /* 2,3 */ case NODE_CDECL: case NODE_OPT_ARG: - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u3.node); ptr = (VALUE)obj->as.node.u2.node; goto again; @@ -1801,26 +1895,26 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) default: /* unlisted NODE */ if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u1.node); } if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u2.node); } if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); + gc_mark(objspace, (VALUE)obj->as.node.u3.node); } } return; /* no need to mark class. */ } - gc_mark(objspace, obj->as.basic.klass, lev); + gc_mark(objspace, obj->as.basic.klass); switch (BUILTIN_TYPE(obj)) { case T_ICLASS: case T_CLASS: case T_MODULE: - mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev); - mark_tbl(objspace, RCLASS_IV_TBL(obj), lev); - mark_const_tbl(objspace, RCLASS_CONST_TBL(obj), lev); + mark_m_tbl(objspace, RCLASS_M_TBL(obj)); + mark_tbl(objspace, RCLASS_IV_TBL(obj)); + mark_const_tbl(objspace, RCLASS_CONST_TBL(obj)); ptr = RCLASS_SUPER(obj); goto again; @@ -1833,13 +1927,13 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) long i, len = RARRAY_LEN(obj); VALUE *ptr = RARRAY_PTR(obj); for (i=0; i < len; i++) { - gc_mark(objspace, *ptr++, lev); + gc_mark(objspace, *ptr++); } } break; case T_HASH: - mark_hash(objspace, obj->as.hash.ntbl, lev); + mark_hash(objspace, obj->as.hash.ntbl); ptr = obj->as.hash.ifnone; goto again; @@ -1866,25 +1960,25 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) long i, len = ROBJECT_NUMIV(obj); VALUE *ptr = ROBJECT_IVPTR(obj); for (i = 0; i < len; i++) { - gc_mark(objspace, *ptr++, lev); + gc_mark(objspace, *ptr++); } } break; case T_FILE: if (obj->as.file.fptr) { - gc_mark(objspace, obj->as.file.fptr->pathv, lev); - gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev); - gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev); - gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev); - gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev); - gc_mark(objspace, obj->as.file.fptr->write_lock, lev); + gc_mark(objspace, obj->as.file.fptr->pathv); + gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing); + gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat); + gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts); + gc_mark(objspace, obj->as.file.fptr->encs.ecopts); + gc_mark(objspace, obj->as.file.fptr->write_lock); } break; case T_REGEXP: - gc_mark(objspace, obj->as.regexp.src, lev); - break; + ptr = obj->as.regexp.src; + goto again; case T_FLOAT: case T_BIGNUM: @@ -1892,7 +1986,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) break; case T_MATCH: - gc_mark(objspace, obj->as.match.regexp, lev); + gc_mark(objspace, obj->as.match.regexp); if (obj->as.match.str) { ptr = obj->as.match.str; goto again; @@ -1900,14 +1994,14 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) break; case T_RATIONAL: - gc_mark(objspace, obj->as.rational.num, lev); - gc_mark(objspace, obj->as.rational.den, lev); - break; + gc_mark(objspace, obj->as.rational.num); + ptr = obj->as.rational.den; + goto again; case T_COMPLEX: - gc_mark(objspace, obj->as.complex.real, lev); - gc_mark(objspace, obj->as.complex.imag, lev); - break; + gc_mark(objspace, obj->as.complex.real); + ptr = obj->as.complex.imag; + goto again; case T_STRUCT: { @@ -1915,7 +2009,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) VALUE *ptr = RSTRUCT_PTR(obj); while (len--) { - gc_mark(objspace, *ptr++, lev); + gc_mark(objspace, *ptr++); } } break; @@ -2441,11 +2535,9 @@ gc_marks(rb_objspace_t *objspace) SET_STACK_END; - init_mark_stack(objspace); - th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm); - mark_tbl(objspace, finalizer_table, 0); + mark_tbl(objspace, finalizer_table); mark_current_machine_context(objspace, th); rb_gc_mark_symbols(); @@ -2458,7 +2550,7 @@ gc_marks(rb_objspace_t *objspace) rb_mark_end_proc(); rb_gc_mark_global_tbl(); - mark_tbl(objspace, rb_class_tbl, 0); + mark_tbl(objspace, rb_class_tbl); /* mark generic instance variables for special constants */ rb_mark_generic_ivar_tbl(); @@ -2467,15 +2559,9 @@ gc_marks(rb_objspace_t *objspace) rb_gc_mark_unlinked_live_method_entries(th->vm); - /* gc_mark objects whose marking are not completed*/ - while (!MARK_STACK_EMPTY) { - if (mark_stack_overflow) { - gc_mark_all(objspace); - } - else { - gc_mark_rest(objspace); - } - } + /* marking-loop */ + gc_mark_stacked_objects(objspace); + GC_PROF_MARK_TIMER_STOP; } @@ -3008,7 +3094,8 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace) /* XXX: this loop will make no sense */ /* because mark will not be removed */ finalize_deferred(objspace); - mark_tbl(objspace, finalizer_table, 0); + mark_tbl(objspace, finalizer_table); + gc_mark_stacked_objects(objspace); st_foreach(finalizer_table, chain_finalized_object, (st_data_t)&deferred_final_list); } while (deferred_final_list); |