summaryrefslogtreecommitdiff
path: root/vm_method.c
diff options
context:
space:
mode:
authorko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2010-05-05 17:51:21 +0000
committerko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2010-05-05 17:51:21 +0000
commit833cade2dce8ee8a9dd2091fcc84880030a51d54 (patch)
tree4e6e8437899658e77c877fa3e783e76322e34a9c /vm_method.c
parent34ed81ea8948d8095389d38a4818c99b25155183 (diff)
* vm_method.c (rb_unlink_method_entry, rb_sweep_method_entry):
added. Unlinked method entries are collected to vm->unlinked_method_entry_list. On the GC timing, mark all method entries which are on all living threads. Only non-marked method entries are collected. This hack prevents releasing living method entry. [Performance Consideration] Since this Method Entry GC (MEGC) doesn't occuer frequently, MEGC will not be a performance bottleneck. However, to traverse living method entries, every control frame push needs to clear cfp->me field. This will be a performance issue (because pushing control frame is occurred frequently). Bug #2777 [ruby-dev:40457] * cont.c (fiber_init): init cfp->me. * gc.c (garbage_collect): kick rb_sweep_method_entry(). * method.h (rb_method_entry_t): add a mark field. * vm.c (invoke_block_from_c): set passed me. * vm.c (rb_thread_mark): mark cfp->me. * vm_core.h (rb_thread_t): add a field passed_me. * vm_core.h (rb_vm_t): add a field unlinked_method_entry_list. * vm_insnhelper.c (vm_push_frame): clear cfp->me at all times. * vm_insnhelper.c (vm_call_bmethod): pass me. * bootstraptest/test_method.rb: add a test. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@27634 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'vm_method.c')
-rw-r--r--vm_method.c59
1 files changed, 43 insertions, 16 deletions
diff --git a/vm_method.c b/vm_method.c
index 4c3f16000a..69d50d969b 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -126,6 +126,44 @@ rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_me
}
}
+static void
+rb_unlink_method_entry(rb_method_entry_t *me)
+{
+ struct unlinked_method_entry_list_entry *ume = ALLOC(struct unlinked_method_entry_list_entry);
+ ume->me = me;
+ ume->next = GET_VM()->unlinked_method_entry_list;
+ GET_VM()->unlinked_method_entry_list = ume;
+}
+
+void
+rb_sweep_method_entry(void *pvm)
+{
+ rb_vm_t *vm = pvm;
+ struct unlinked_method_entry_list_entry *ume = vm->unlinked_method_entry_list, *prev_ume = 0, *curr_ume;
+
+ while (ume) {
+ if (ume->me->mark) {
+ ume->me->mark = 0;
+ prev_ume = ume;
+ ume = ume->next;
+ }
+ else {
+ rb_free_method_entry(ume->me);
+
+ if (prev_ume == 0) {
+ vm->unlinked_method_entry_list = ume->next;
+ }
+ else {
+ prev_ume->next = ume->next;
+ }
+
+ curr_ume = ume;
+ ume = ume->next;
+ xfree(curr_ume);
+ }
+ }
+}
+
void
rb_free_method_entry(rb_method_entry_t *me)
{
@@ -214,26 +252,15 @@ rb_method_entry_make(VALUE klass, ID mid, rb_method_type_t type,
}
}
- /* FIXME: this avoid to free methods used in cfp, but reusing may cause
- * another problem when the usage is changed.
- */
- me = old_me;
-
- if (me->def) {
- if (me->def->alias_count == 0)
- xfree(me->def);
- else if (me->def->alias_count > 0)
- me->def->alias_count--;
- me->def = 0;
- }
- }
- else {
- me = ALLOC(rb_method_entry_t);
+ rb_unlink_method_entry(old_me);
}
+ me = ALLOC(rb_method_entry_t);
+
rb_clear_cache_by_id(mid);
me->flag = NOEX_WITH_SAFE(noex);
+ me->mark = 0;
me->called_id = mid;
me->klass = klass;
me->def = def;
@@ -453,7 +480,7 @@ remove_method(VALUE klass, ID mid)
rb_vm_check_redefinition_opt_method(me);
rb_clear_cache_for_undef(klass, mid);
- rb_free_method_entry(me);
+ rb_unlink_method_entry(me);
CALL_METHOD_HOOK(klass, removed, mid);
}