summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Wu <[email protected]>2021-07-13 19:01:09 -0400
committerAlan Wu <[email protected]>2021-12-01 17:42:33 -0500
commit9121e57a5f50bc91bae48b3b91edb283bf96cb6b ()
treea90f8d855b86e508f7a9928395c85b4001fb6c29
parent3b2b28d035c9635b9473c7a03ede04fa6ac57a34 (diff)
Rework tracing for blocks running as methods
The main impetus for this change is to fix [Bug #13392]. Previously, we fired the "return" TracePoint event after popping the stack frame for the block running as method (BMETHOD). This gave undesirable source location outputs as the return event normally fires right before the frame going away. The iseq for each block can run both as a block and as a method. To accommodate that, this commit makes vm_trace() fire call/return events for instructions that have b_call/b_return events attached when the iseq is running as a BMETHOD. The logic for rewriting to "trace_*" instruction is tweaked so that when the user listens to call/return events, instructions with b_call/b_return become trace variants. To continue to provide the return value for non-local returns done using the "return" or "break" keyword inside BMETHODs, the stack unwinding code is tweaked. b_return events now provide the same return value as return events for these non-local cases. A pre-existing test deemed not providing a return value for these b_return events as a limitation. This commit removes the checks for call/return TracePoint events that happen when calling into BMETHODs when no TracePoints are active. Technically, migrating just the return event is enough to fix the bug, but migrating both call and return removes our reliance on `VM_FRAME_FLAG_FINISH` and re-entering the interpreter when the caller is already in the interpreter.
Notes: Merged: https://.com/ruby/ruby/pull/4637
-rw-r--r--iseq.c25
-rw-r--r--iseq.h2
-rw-r--r--test/ruby/test_settracefunc.rb122
-rw-r--r--vm.c73
-rw-r--r--vm_insnhelper.c35
-rw-r--r--vm_trace.c13
6 files changed, 210 insertions, 60 deletions
@@ -3310,6 +3310,23 @@ rb_iseq_trace_flag_cleared(const rb_iseq_t *iseq, size_t pos)
encoded_iseq_trace_instrument(&iseq_encoded[pos], 0, false);
}
static int
iseq_add_local_tracepoint(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
{
@@ -3365,9 +3382,12 @@ iseq_add_local_tracepoint_i(const rb_iseq_t *iseq, void *p)
}
int
-rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
{
struct trace_set_local_events_struct data;
data.turnon_events = turnon_events;
data.tpval = tpval;
data.target_line = target_line;
@@ -3399,6 +3419,7 @@ iseq_remove_local_tracepoint(const rb_iseq_t *iseq, VALUE tpval)
((rb_iseq_t *)iseq)->aux.exec.local_hooks = NULL;
}
for (pc = 0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
pc += encoded_iseq_trace_instrument(&iseq_encoded[pc], pc_events & (local_events | iseq->aux.exec.global_trace_events), false);
@@ -3449,7 +3470,7 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
rb_event_flag_t enabled_events;
rb_event_flag_t local_events = iseq->aux.exec.local_hooks ? iseq->aux.exec.local_hooks->events : 0;
((rb_iseq_t *)iseq)->aux.exec.global_trace_events = turnon_events;
- enabled_events = turnon_events | local_events;
for (pc=0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
@@ -160,7 +160,7 @@ const rb_iseq_t *rb_iseq_ibf_load(VALUE str);
const rb_iseq_t *rb_iseq_ibf_load_bytes(const char *cstr, size_t);
VALUE rb_iseq_ibf_load_extra_data(VALUE str);
void rb_iseq_init_trace(rb_iseq_t *iseq);
-int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line);
int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval);
const rb_iseq_t *rb_iseq_load_iseq(VALUE fname);
@@ -393,7 +393,7 @@ class TestSetTraceFunc < Test::Unit::TestCase
[["c-return", 3, :set_trace_func, Kernel],
["line", 6, __method__, self.class],
["call", 1, :foobar, FooBar],
- ["return", 6, :foobar, FooBar],
["line", 7, __method__, self.class],
["c-call", 7, :set_trace_func, Kernel]].each{|e|
assert_equal(e, events.shift)
@@ -1951,7 +1951,7 @@ CODE
end
define_method(:f_break_defined) do
- return :f_break_defined
end
define_method(:f_raise_defined) do
@@ -1972,27 +1972,44 @@ CODE
tp_return_value(:f_last_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_return_defined, nil], # current limitation
[:return, :f_return_defined, :f_return_defined]],
tp_return_value(:f_return_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_break_defined, nil],
[:return, :f_break_defined, :f_break_defined]],
tp_return_value(:f_break_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_raise_defined, nil],
[:return, :f_raise_defined, f_raise_defined]],
tp_return_value(:f_raise_defined),
'[Bug #13369]'
- assert_equal [[:b_return, :f_break_in_rescue_defined, nil],
[:return, :f_break_in_rescue_defined, f_break_in_rescue_defined]],
tp_return_value(:f_break_in_rescue_defined),
'[Bug #13369]'
end
def f_iter
yield
end
@@ -2403,6 +2420,99 @@ CODE
assert_equal Array.new(2){th}, events
end
def test_return_event_with_rescue
obj = Object.new
def obj.example
@@ -1313,7 +1313,6 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
/* bmethod */
int arg_size = iseq->body->param.size;
VALUE ret;
- rb_hook_list_t *hooks;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
@@ -1325,24 +1324,9 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
- RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
-
- if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
- hooks->events & RUBY_EVENT_CALL) {
- rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
- me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
- }
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
ret = vm_exec(ec, true);
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
- if ((hooks = me->def->body.bmethod.hooks) != NULL &&
- hooks->events & RUBY_EVENT_RETURN) {
- rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
- me->def->original_id, me->called_id, me->owner, ret, FALSE);
- }
- RUBY_DTRACE_METHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
return ret;
}
@@ -2033,9 +2017,11 @@ frame_name(const rb_control_frame_t *cfp)
}
#endif
static void
hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
- int will_finish_vm_exec, int state, struct vm_throw_data *err)
{
if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
return;
@@ -2058,32 +2044,36 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
break;
case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(ec->cfp)) {
- EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
- ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
}
- if (!will_finish_vm_exec) {
- const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
-
- /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
- EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- frame_return_value(err));
-
- VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
- local_hooks = me->def->body.bmethod.hooks;
-
- if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
- rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
- rb_vm_frame_method_entry(ec->cfp)->def->original_id,
- rb_vm_frame_method_entry(ec->cfp)->called_id,
- rb_vm_frame_method_entry(ec->cfp)->owner,
- frame_return_value(err), TRUE);
- }
}
THROW_DATA_CONSUMED_SET(err);
}
@@ -2284,7 +2274,8 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
if (catch_iseq == NULL) {
ec->errinfo = Qnil;
THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
- hook_before_rewind(ec, ec->cfp, TRUE, state, err);
rb_vm_pop_frame(ec);
return THROW_DATA_VAL(err);
}
@@ -2425,7 +2416,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
return Qundef;
}
else {
- hook_before_rewind(ec, ec->cfp, FALSE, state, err);
if (VM_FRAME_FINISHED_P(ec->cfp)) {
rb_vm_pop_frame(ec);
@@ -5638,6 +5638,7 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
{
const VALUE *pc = reg_cfp->pc;
rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
return;
@@ -5647,12 +5648,25 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
size_t pos = pc - iseq->body->iseq_encoded;
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
- rb_event_flag_t local_hook_events = local_hooks != NULL ? local_hooks->events : 0;
- enabled_flags |= local_hook_events;
- VM_ASSERT((local_hook_events & ~ISEQ_TRACE_EVENTS) == 0);
- if ((pc_events & enabled_flags) == 0) {
#if 0
/* disable trace */
/* TODO: incomplete */
@@ -5670,6 +5684,9 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
else {
rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
if (0) {
ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
@@ -5681,17 +5698,25 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(pc_events != 0);
- VM_ASSERT(enabled_flags & pc_events);
/* check traces */
VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
}
}
}
#if VM_CHECK_MODE > 0
NORETURN( NOINLINE( COLDFUNC
@@ -1180,8 +1180,9 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
{
rb_tp_t *tp = tpptr(tpval);
const rb_iseq_t *iseq = iseq_of(target);
- int n;
unsigned int line = 0;
if (tp->tracing > 0) {
rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
@@ -1199,10 +1200,6 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
VM_ASSERT(tp->local_target_set == Qfalse);
tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
- /* iseq */
- n = rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line);
- rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
-
/* bmethod */
if (rb_obj_is_method(target)) {
rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
@@ -1211,11 +1208,17 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
rb_hash_aset(tp->local_target_set, target, Qfalse);
n++;
}
}
if (n == 0) {
rb_raise(rb_eArgError, "can not enable any hooks");
}