summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Evans <code@jeremyevans.net>2021-08-23 15:22:14 -0700
committerJeremy Evans <code@jeremyevans.net>2021-08-29 07:23:39 -0700
commit2d98593bf54a37397c6e4886ccc7e3654c2eaf85 (patch)
tree33d08b3e99dcd4b00ee76f0a286bdbacbabcc85f
parent5f7c2291d6b3ba890d62c7e3a686202dffb14759 (diff)
Support tracing of attr_reader and attr_writer
In vm_call_method_each_type, check for c_call and c_return events before dispatching to vm_call_ivar and vm_call_attrset. With this approach, the call cache will still dispatch directly to those functions, so this change will only decrease performance for the first (uncached) call, and even then, the performance decrease is very minimal. This approach requires that we clear the call caches when tracing is enabled or disabled. The approach currently switches all vm_call_ivar and vm_call_attrset call caches to vm_call_general any time tracing is enabled or disabled. So it could theoretically result in a slowdown for code that constantly enables or disables tracing. This approach does not handle targeted tracepoints, but from my testing, c_call and c_return events are not supported for targeted tracepoints, so that shouldn't matter. This includes a benchmark showing the performance decrease is minimal if detectable at all. Fixes [Bug #16383] Fixes [Bug #10470] Co-authored-by: Takashi Kokubun <takashikkbn@gmail.com>
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/4767
-rw-r--r--benchmark/attr_accessor.yml29
-rw-r--r--iseq.c29
-rw-r--r--test/ruby/test_settracefunc.rb50
-rw-r--r--vm_eval.c22
-rw-r--r--vm_insnhelper.c47
-rw-r--r--vm_trace.c5
6 files changed, 176 insertions, 6 deletions
diff --git a/benchmark/attr_accessor.yml b/benchmark/attr_accessor.yml
new file mode 100644
index 0000000000..82134cdf9b
--- /dev/null
+++ b/benchmark/attr_accessor.yml
@@ -0,0 +1,29 @@
+prelude: |
+ class C
+ attr_accessor :x
+ def initialize
+ @x = nil
+ end
+ class_eval <<-END
+ def ar
+ #{'x;'*256}
+ end
+ def aw
+ #{'self.x = nil;'*256}
+ end
+ def arm
+ m = method(:x)
+ #{'m.call;'*256}
+ end
+ def awm
+ m = method(:x=)
+ #{'m.call(nil);'*256}
+ end
+ END
+ end
+ obj = C.new
+benchmark:
+ attr_reader: "obj.ar"
+ attr_writer: "obj.aw"
+ attr_reader_method: "obj.arm"
+ attr_writer_method: "obj.awm"
diff --git a/iseq.c b/iseq.c
index b08f1bb497..3dd7beaf0d 100644
--- a/iseq.c
+++ b/iseq.c
@@ -3407,6 +3407,32 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
}
}
+bool rb_vm_call_ivar_attrset_p(const vm_call_handler ch);
+void rb_vm_cc_general(const struct rb_callcache *cc);
+
+static int
+clear_attr_ccs_i(void *vstart, void *vend, size_t stride, void *data)
+{
+ VALUE v = (VALUE)vstart;
+ for (; v != (VALUE)vend; v += stride) {
+ void *ptr = asan_poisoned_object_p(v);
+ asan_unpoison_object(v, false);
+
+ if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
+ rb_vm_cc_general((struct rb_callcache *)v);
+ }
+
+ asan_poison_object_if(ptr, v);
+ }
+ return 0;
+}
+
+void
+rb_clear_attr_ccs(void)
+{
+ rb_objspace_each_objects(clear_attr_ccs_i, NULL);
+}
+
static int
trace_set_i(void *vstart, void *vend, size_t stride, void *data)
{
@@ -3420,6 +3446,9 @@ trace_set_i(void *vstart, void *vend, size_t stride, void *data)
if (rb_obj_is_iseq(v)) {
rb_iseq_trace_set(rb_iseq_check((rb_iseq_t *)v), turnon_events);
}
+ else if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
+ rb_vm_cc_general((struct rb_callcache *)v);
+ }
asan_poison_object_if(ptr, v);
}
diff --git a/test/ruby/test_settracefunc.rb b/test/ruby/test_settracefunc.rb
index 1393caaf91..88488aef5c 100644
--- a/test/ruby/test_settracefunc.rb
+++ b/test/ruby/test_settracefunc.rb
@@ -831,6 +831,56 @@ class TestSetTraceFunc < Test::Unit::TestCase
}
end
+ def test_tracepoint_attr
+ c = Class.new do
+ attr_accessor :x
+ alias y x
+ alias y= x=
+ end
+ obj = c.new
+
+ ar_meth = obj.method(:x)
+ aw_meth = obj.method(:x=)
+ aar_meth = obj.method(:y)
+ aaw_meth = obj.method(:y=)
+ events = []
+ trace = TracePoint.new(:c_call, :c_return){|tp|
+ next if !target_thread?
+ next if tp.path != __FILE__
+ next if tp.method_id == :call
+ case tp.event
+ when :c_call
+ assert_raise(RuntimeError) {tp.return_value}
+ events << [tp.event, tp.method_id, tp.callee_id]
+ when :c_return
+ events << [tp.event, tp.method_id, tp.callee_id, tp.return_value]
+ end
+ }
+ test_proc = proc do
+ obj.x = 1
+ obj.x
+ obj.y = 2
+ obj.y
+ aw_meth.call(1)
+ ar_meth.call
+ aaw_meth.call(2)
+ aar_meth.call
+ end
+ test_proc.call # populate call caches
+ trace.enable(&test_proc)
+ expected = [
+ [:c_call, :x=, :x=],
+ [:c_return, :x=, :x=, 1],
+ [:c_call, :x, :x],
+ [:c_return, :x, :x, 1],
+ [:c_call, :x=, :y=],
+ [:c_return, :x=, :y=, 2],
+ [:c_call, :x, :y],
+ [:c_return, :x, :y, 2],
+ ]
+ assert_equal(expected*2, events)
+ end
+
class XYZZYException < Exception; end
def method_test_tracepoint_raised_exception err
raise err
diff --git a/vm_eval.c b/vm_eval.c
index 8a0166e1ee..a727ea8776 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -190,7 +190,16 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const
}
rb_check_arity(calling->argc, 1, 1);
- ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
+ if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv,
+ vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
+ ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv,
+ vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, ret);
+ }
+ else {
+ ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
+ }
goto success;
case VM_METHOD_TYPE_IVAR:
if (calling->kw_splat &&
@@ -201,7 +210,16 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const
}
rb_check_arity(calling->argc, 0, 0);
- ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
+ if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv,
+ vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
+ ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv,
+ vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, ret);
+ }
+ else {
+ ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
+ }
goto success;
case VM_METHOD_TYPE_BMETHOD:
ret = vm_call_bmethod_body(ec, calling, argv);
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index e186376b24..e08ba34082 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -3029,6 +3029,12 @@ vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
}
+bool
+rb_vm_call_ivar_attrset_p(const vm_call_handler ch)
+{
+ return (ch == vm_call_ivar || ch == vm_call_attrset);
+}
+
static inline VALUE
vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
{
@@ -3484,16 +3490,40 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
rb_check_arity(calling->argc, 1, 1);
vm_cc_attr_index_set(cc, 0);
- CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG)));
- return vm_call_attrset(ec, cfp, calling);
+ if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
+ const struct rb_callinfo *ci = calling->ci;
+ VALUE v;
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id,
+ vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
+ v = vm_call_attrset(ec, cfp, calling);
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id,
+ vm_ci_mid(ci), vm_cc_cme(cc)->owner, v);
+ return v;
+ }
+ else {
+ CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG)));
+ return vm_call_attrset(ec, cfp, calling);
+ }
case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
vm_cc_attr_index_set(cc, 0);
- CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT)));
- return vm_call_ivar(ec, cfp, calling);
+ if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
+ const struct rb_callinfo *ci = calling->ci;
+ VALUE v;
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id,
+ vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
+ v = vm_call_ivar(ec, cfp, calling);
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id,
+ vm_ci_mid(ci), vm_cc_cme(cc)->owner, v);
+ return v;
+ }
+ else {
+ CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT)));
+ return vm_call_ivar(ec, cfp, calling);
+ }
case VM_METHOD_TYPE_MISSING:
vm_cc_method_missing_reason_set(cc, 0);
@@ -3615,6 +3645,15 @@ vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
return vm_call_method(ec, reg_cfp, calling);
}
+void
+rb_vm_cc_general(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+
+ *(vm_call_handler *)&cc->call_ = vm_call_general;
+}
+
static VALUE
vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
{
diff --git a/vm_trace.c b/vm_trace.c
index ed218b8120..cec5e42e31 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -74,6 +74,8 @@ rb_hook_list_free(rb_hook_list_t *hooks)
/* ruby_vm_event_flags management */
+void rb_clear_attr_ccs(void);
+
static void
update_global_event_hook(rb_event_flag_t vm_events)
{
@@ -91,6 +93,9 @@ update_global_event_hook(rb_event_flag_t vm_events)
/* write all ISeqs if and only if new events are added */
rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
}
+ else {
+ rb_clear_attr_ccs();
+ }
ruby_vm_event_flags = vm_events;
ruby_vm_event_enabled_global_flags |= vm_events;