summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--iseq.c25
-rw-r--r--iseq.h2
-rw-r--r--test/ruby/test_settracefunc.rb122
-rw-r--r--vm.c73
-rw-r--r--vm_insnhelper.c35
-rw-r--r--vm_trace.c13
6 files changed, 210 insertions, 60 deletions
diff --git a/iseq.c b/iseq.c
index f0117d94ce..e42525cb9f 100644
--- a/iseq.c
+++ b/iseq.c
@@ -3310,6 +3310,23 @@ rb_iseq_trace_flag_cleared(const rb_iseq_t *iseq, size_t pos)
encoded_iseq_trace_instrument(&iseq_encoded[pos], 0, false);
}
+// We need to fire call events on instructions with b_call events if the block
+// is running as a method. So, if we are listening for call events, then
+// instructions that have b_call events need to become trace variants.
+// Use this function when making decisions about recompiling to trace variants.
+static inline rb_event_flag_t
+add_bmethod_events(rb_event_flag_t events)
+{
+ if (events & RUBY_EVENT_CALL) {
+ events |= RUBY_EVENT_B_CALL;
+ }
+ if (events & RUBY_EVENT_RETURN) {
+ events |= RUBY_EVENT_B_RETURN;
+ }
+ return events;
+}
+
+// Note, to support call/return events for bmethods, turnon_event can have more events than tpval.
static int
iseq_add_local_tracepoint(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
{
@@ -3365,9 +3382,12 @@ iseq_add_local_tracepoint_i(const rb_iseq_t *iseq, void *p)
}
int
-rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
+rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line, bool target_bmethod)
{
struct trace_set_local_events_struct data;
+ if (target_bmethod) {
+ turnon_events = add_bmethod_events(turnon_events);
+ }
data.turnon_events = turnon_events;
data.tpval = tpval;
data.target_line = target_line;
@@ -3399,6 +3419,7 @@ iseq_remove_local_tracepoint(const rb_iseq_t *iseq, VALUE tpval)
((rb_iseq_t *)iseq)->aux.exec.local_hooks = NULL;
}
+ local_events = add_bmethod_events(local_events);
for (pc = 0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
pc += encoded_iseq_trace_instrument(&iseq_encoded[pc], pc_events & (local_events | iseq->aux.exec.global_trace_events), false);
@@ -3449,7 +3470,7 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
rb_event_flag_t enabled_events;
rb_event_flag_t local_events = iseq->aux.exec.local_hooks ? iseq->aux.exec.local_hooks->events : 0;
((rb_iseq_t *)iseq)->aux.exec.global_trace_events = turnon_events;
- enabled_events = turnon_events | local_events;
+ enabled_events = add_bmethod_events(turnon_events | local_events);
for (pc=0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
diff --git a/iseq.h b/iseq.h
index eb46367379..cc68b8d3f8 100644
--- a/iseq.h
+++ b/iseq.h
@@ -160,7 +160,7 @@ const rb_iseq_t *rb_iseq_ibf_load(VALUE str);
const rb_iseq_t *rb_iseq_ibf_load_bytes(const char *cstr, size_t);
VALUE rb_iseq_ibf_load_extra_data(VALUE str);
void rb_iseq_init_trace(rb_iseq_t *iseq);
-int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line);
+int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line, bool target_bmethod);
int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval);
const rb_iseq_t *rb_iseq_load_iseq(VALUE fname);
diff --git a/test/ruby/test_settracefunc.rb b/test/ruby/test_settracefunc.rb
index 4a37426531..d737c84af5 100644
--- a/test/ruby/test_settracefunc.rb
+++ b/test/ruby/test_settracefunc.rb
@@ -393,7 +393,7 @@ class TestSetTraceFunc < Test::Unit::TestCase
[["c-return", 3, :set_trace_func, Kernel],
["line", 6, __method__, self.class],
["call", 1, :foobar, FooBar],
- ["return", 6, :foobar, FooBar],
+ ["return", 1, :foobar, FooBar],
["line", 7, __method__, self.class],
["c-call", 7, :set_trace_func, Kernel]].each{|e|
assert_equal(e, events.shift)
@@ -1951,7 +1951,7 @@ CODE
end
define_method(:f_break_defined) do
- return :f_break_defined
+ break :f_break_defined
end
define_method(:f_raise_defined) do
@@ -1972,27 +1972,44 @@ CODE
tp_return_value(:f_last_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_return_defined, nil], # current limitation
+ assert_equal [[:b_return, :f_return_defined, :f_return_defined],
[:return, :f_return_defined, :f_return_defined]],
tp_return_value(:f_return_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_break_defined, nil],
+ assert_equal [[:b_return, :f_break_defined, :f_break_defined],
[:return, :f_break_defined, :f_break_defined]],
tp_return_value(:f_break_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_raise_defined, nil],
+ assert_equal [[:b_return, :f_raise_defined, f_raise_defined],
[:return, :f_raise_defined, f_raise_defined]],
tp_return_value(:f_raise_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_break_in_rescue_defined, nil],
+ assert_equal [[:b_return, :f_break_in_rescue_defined, f_break_in_rescue_defined],
[:return, :f_break_in_rescue_defined, f_break_in_rescue_defined]],
tp_return_value(:f_break_in_rescue_defined),
'[Bug #13369]'
end
+ define_method(:just_yield) do |&block|
+ block.call
+ end
+
+ define_method(:unwind_multiple_bmethods) do
+ just_yield { return :unwind_multiple_bmethods }
+ end
+
+ def test_non_local_return_across_multiple_define_methods
+ assert_equal [[:b_return, :unwind_multiple_bmethods, nil],
+ [:b_return, :just_yield, nil],
+ [:return, :just_yield, nil],
+ [:b_return, :unwind_multiple_bmethods, :unwind_multiple_bmethods],
+ [:return, :unwind_multiple_bmethods, :unwind_multiple_bmethods]],
+ tp_return_value(:unwind_multiple_bmethods)
+ end
+
def f_iter
yield
end
@@ -2403,6 +2420,99 @@ CODE
assert_equal Array.new(2){th}, events
end
+ def test_return_bmethod_location
+ bug13392 = "[ruby-core:80515] incorrect bmethod return location"
+ actual = nil
+ obj = Object.new
+ expected = __LINE__ + 1
+ obj.define_singleton_method(:t){}
+ tp = TracePoint.new(:return) do
+ next unless target_thread?
+ actual = tp.lineno
+ end
+ tp.enable {obj.t}
+ assert_equal(expected, actual, bug13392)
+ end
+
+ def test_b_tracepoints_going_away
+ # test that call and return TracePoints continue to work
+ # when b_call and b_return TracePoints stop
+ events = []
+ record_events = ->(tp) do
+ next unless target_thread?
+ events << [tp.event, tp.method_id]
+ end
+
+ call_ret_tp = TracePoint.new(:call, :return, &record_events)
+ block_call_ret_tp = TracePoint.new(:b_call, :b_return, &record_events)
+
+ obj = Object.new
+ obj.define_singleton_method(:foo) {} # a bmethod
+
+ foo = obj.method(:foo)
+ call_ret_tp.enable(target: foo) do
+ block_call_ret_tp.enable(target: foo) do
+ obj.foo
+ end
+ obj.foo
+ end
+
+ assert_equal(
+ [
+ [:call, :foo],
+ [:b_call, :foo],
+ [:b_return, :foo],
+ [:return, :foo],
+ [:call, :foo],
+ [:return, :foo],
+ ],
+ events,
+ )
+ end
+
+ def test_target_different_bmethod_same_iseq
+ # make two bmethods that share the same block iseq
+ block = Proc.new {}
+ obj = Object.new
+ obj.define_singleton_method(:one, &block)
+ obj.define_singleton_method(:two, &block)
+
+ events = []
+ record_events = ->(tp) do
+ next unless target_thread?
+ events << [tp.event, tp.method_id]
+ end
+ tp_one = TracePoint.new(:call, :return, &record_events)
+ tp_two = TracePoint.new(:call, :return, &record_events)
+
+ tp_one.enable(target: obj.method(:one)) do
+ obj.one
+ obj.two # not targeted
+ end
+ assert_equal([[:call, :one], [:return, :one]], events)
+ events.clear
+
+ tp_one.enable(target: obj.method(:one)) do
+ obj.one
+ tp_two.enable(target: obj.method(:two)) do
+ obj.two
+ end
+ obj.two
+ obj.one
+ end
+ assert_equal(
+ [
+ [:call, :one],
+ [:return, :one],
+ [:call, :two],
+ [:return, :two],
+ [:call, :one],
+ [:return, :one],
+ ],
+ events
+ )
+ end
+
def test_return_event_with_rescue
obj = Object.new
def obj.example
diff --git a/vm.c b/vm.c
index 53f16275c3..19f5ce95f0 100644
--- a/vm.c
+++ b/vm.c
@@ -1313,7 +1313,6 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
/* bmethod */
int arg_size = iseq->body->param.size;
VALUE ret;
- rb_hook_list_t *hooks;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
@@ -1325,24 +1324,9 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
- RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
-
- if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
- hooks->events & RUBY_EVENT_CALL) {
- rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
- me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
- }
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
ret = vm_exec(ec, true);
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
- if ((hooks = me->def->body.bmethod.hooks) != NULL &&
- hooks->events & RUBY_EVENT_RETURN) {
- rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
- me->def->original_id, me->called_id, me->owner, ret, FALSE);
- }
- RUBY_DTRACE_METHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
return ret;
}
@@ -2033,9 +2017,11 @@ frame_name(const rb_control_frame_t *cfp)
}
#endif
+// cfp_returning_with_value:
+// Whether cfp is the last frame in the unwinding process for a non-local return.
static void
hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
- int will_finish_vm_exec, int state, struct vm_throw_data *err)
+ bool cfp_returning_with_value, int state, struct vm_throw_data *err)
{
if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
return;
@@ -2058,32 +2044,36 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
break;
case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(ec->cfp)) {
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
+ VALUE bmethod_return_value = frame_return_value(err);
+ if (cfp_returning_with_value) {
+ // Non-local return terminating at a BMETHOD control frame.
+ bmethod_return_value = THROW_DATA_VAL(err);
+ }
+
+
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
- ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
+ ec->cfp->self, 0, 0, 0, bmethod_return_value, FALSE);
}
- if (!will_finish_vm_exec) {
- const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
-
- /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
- EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- frame_return_value(err));
-
- VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
- local_hooks = me->def->body.bmethod.hooks;
-
- if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
- rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- frame_return_value(err), TRUE);
- }
+ const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
+
+ EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id,
+ rb_vm_frame_method_entry(ec->cfp)->called_id,
+ rb_vm_frame_method_entry(ec->cfp)->owner,
+ bmethod_return_value);
+
+ VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
+ local_hooks = me->def->body.bmethod.hooks;
+
+ if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
+ rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
+ rb_vm_frame_method_entry(ec->cfp)->def->original_id,
+ rb_vm_frame_method_entry(ec->cfp)->called_id,
+ rb_vm_frame_method_entry(ec->cfp)->owner,
+ bmethod_return_value, TRUE);
}
THROW_DATA_CONSUMED_SET(err);
}
@@ -2284,7 +2274,8 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
if (catch_iseq == NULL) {
ec->errinfo = Qnil;
THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
- hook_before_rewind(ec, ec->cfp, TRUE, state, err);
+ // cfp == escape_cfp here so calling with cfp_returning_with_value = true
+ hook_before_rewind(ec, ec->cfp, true, state, err);
rb_vm_pop_frame(ec);
return THROW_DATA_VAL(err);
}
@@ -2425,7 +2416,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
return Qundef;
}
else {
- hook_before_rewind(ec, ec->cfp, FALSE, state, err);
+ hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err);
if (VM_FRAME_FINISHED_P(ec->cfp)) {
rb_vm_pop_frame(ec);
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 348bfea5aa..741deb6b42 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -5638,6 +5638,7 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
{
const VALUE *pc = reg_cfp->pc;
rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
+ rb_event_flag_t global_events = enabled_flags;
if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
return;
@@ -5647,12 +5648,25 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
size_t pos = pc - iseq->body->iseq_encoded;
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
- rb_event_flag_t local_hook_events = local_hooks != NULL ? local_hooks->events : 0;
- enabled_flags |= local_hook_events;
+ rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
+ rb_hook_list_t *bmethod_local_hooks = NULL;
+ rb_event_flag_t bmethod_local_events = 0;
+ bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
+ enabled_flags |= iseq_local_events;
+
+ VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
+
+ if (bmethod_frame) {
+ const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
+ VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
+ bmethod_local_hooks = me->def->body.bmethod.hooks;
+ if (bmethod_local_hooks) {
+ bmethod_local_events = bmethod_local_hooks->events;
+ }
+ }
- VM_ASSERT((local_hook_events & ~ISEQ_TRACE_EVENTS) == 0);
- if ((pc_events & enabled_flags) == 0) {
+ if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
#if 0
/* disable trace */
/* TODO: incomplete */
@@ -5670,6 +5684,9 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
else {
rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
+ /* Note, not considering iseq local events here since the same
+ * iseq could be used in multiple bmethods. */
+ rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
if (0) {
ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
@@ -5681,17 +5698,25 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(pc_events != 0);
- VM_ASSERT(enabled_flags & pc_events);
/* check traces */
+ if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
+ /* b_call instruction running as a method. Fire call event. */
+ vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks, Qundef);
+ }
VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
+ if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
+ /* b_return instruction running as a method. Fire return event. */
+ vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks, TOPN(0));
+ }
}
}
}
+#undef VM_TRACE_HOOK
#if VM_CHECK_MODE > 0
NORETURN( NOINLINE( COLDFUNC
diff --git a/vm_trace.c b/vm_trace.c
index 66ea7b24b7..3a0f7a7aad 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -1180,8 +1180,9 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
{
rb_tp_t *tp = tpptr(tpval);
const rb_iseq_t *iseq = iseq_of(target);
- int n;
+ int n = 0;
unsigned int line = 0;
+ bool target_bmethod = false;
if (tp->tracing > 0) {
rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
@@ -1199,10 +1200,6 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
VM_ASSERT(tp->local_target_set == Qfalse);
tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
- /* iseq */
- n = rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line);
- rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
-
/* bmethod */
if (rb_obj_is_method(target)) {
rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
@@ -1211,11 +1208,17 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
rb_hash_aset(tp->local_target_set, target, Qfalse);
+ target_bmethod = true;
n++;
}
}
+ /* iseq */
+ n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
+ rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
+
+
if (n == 0) {
rb_raise(rb_eArgError, "can not enable any hooks");
}