summaryrefslogtreecommitdiff
path: root/iseq.c
diff options
context:
space:
mode:
authorJeremy Evans <code@jeremyevans.net>2021-08-23 15:22:14 -0700
committerJeremy Evans <code@jeremyevans.net>2021-08-29 07:23:39 -0700
commit2d98593bf54a37397c6e4886ccc7e3654c2eaf85 (patch)
tree33d08b3e99dcd4b00ee76f0a286bdbacbabcc85f /iseq.c
parent5f7c2291d6b3ba890d62c7e3a686202dffb14759 (diff)
Support tracing of attr_reader and attr_writer
In vm_call_method_each_type, check for c_call and c_return events before dispatching to vm_call_ivar and vm_call_attrset. With this approach, the call cache will still dispatch directly to those functions, so this change will only decrease performance for the first (uncached) call, and even then, the performance decrease is very minimal. This approach requires that we clear the call caches when tracing is enabled or disabled. The approach currently switches all vm_call_ivar and vm_call_attrset call caches to vm_call_general any time tracing is enabled or disabled. So it could theoretically result in a slowdown for code that constantly enables or disables tracing. This approach does not handle targeted tracepoints, but from my testing, c_call and c_return events are not supported for targeted tracepoints, so that shouldn't matter. This includes a benchmark showing the performance decrease is minimal if detectable at all. Fixes [Bug #16383] Fixes [Bug #10470] Co-authored-by: Takashi Kokubun <takashikkbn@gmail.com>
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/4767
Diffstat (limited to 'iseq.c')
-rw-r--r--iseq.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/iseq.c b/iseq.c
index b08f1bb497..3dd7beaf0d 100644
--- a/iseq.c
+++ b/iseq.c
@@ -3407,6 +3407,32 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
}
}
+bool rb_vm_call_ivar_attrset_p(const vm_call_handler ch);
+void rb_vm_cc_general(const struct rb_callcache *cc);
+
+static int
+clear_attr_ccs_i(void *vstart, void *vend, size_t stride, void *data)
+{
+ VALUE v = (VALUE)vstart;
+ for (; v != (VALUE)vend; v += stride) {
+ void *ptr = asan_poisoned_object_p(v);
+ asan_unpoison_object(v, false);
+
+ if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
+ rb_vm_cc_general((struct rb_callcache *)v);
+ }
+
+ asan_poison_object_if(ptr, v);
+ }
+ return 0;
+}
+
+void
+rb_clear_attr_ccs(void)
+{
+ rb_objspace_each_objects(clear_attr_ccs_i, NULL);
+}
+
static int
trace_set_i(void *vstart, void *vend, size_t stride, void *data)
{
@@ -3420,6 +3446,9 @@ trace_set_i(void *vstart, void *vend, size_t stride, void *data)
if (rb_obj_is_iseq(v)) {
rb_iseq_trace_set(rb_iseq_check((rb_iseq_t *)v), turnon_events);
}
+ else if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
+ rb_vm_cc_general((struct rb_callcache *)v);
+ }
asan_poison_object_if(ptr, v);
}