summaryrefslogtreecommitdiff
path: root/vm_insnhelper.c
diff options
context:
space:
mode:
-rw-r--r--vm_insnhelper.c814
1 files changed, 424 insertions, 390 deletions
@@ -115,9 +115,9 @@ callable_class_p(VALUE klass)
}
static int
-callable_method_entry_p(const rb_callable_method_entry_t *me)
{
- if (me == NULL || callable_class_p(me->defined_class)) {
return TRUE;
}
else {
@@ -221,8 +221,6 @@ static bool vm_stack_canary_was_born = false;
MJIT_FUNC_EXPORTED void
vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
{
- return;
-
const struct rb_control_frame_struct *reg_cfp = ec->cfp;
const struct rb_iseq_struct *iseq;
@@ -1024,9 +1022,9 @@ vm_search_const_defined_class(const VALUE cbase, ID id)
return 0;
}
-ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, IVC, struct rb_call_cache *, int));
static inline VALUE
-vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
VALUE val = Qundef;
@@ -1035,10 +1033,10 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
// frozen?
}
else if (LIKELY(is_attr ?
- RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, cc->aux.index > 0) :
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
ic->ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
- st_index_t index = !is_attr ? ic->index : (cc->aux.index - 1);
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
@@ -1076,7 +1074,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
ic->ic_serial = RCLASS_SERIAL(RBASIC(obj)->klass);
}
else { /* call_info */
- cc->aux.index = (int)index + 1;
}
if (index < numiv) {
@@ -1124,7 +1122,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
}
static inline VALUE
-vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
rb_check_frozen_internal(obj);
@@ -1135,9 +1133,9 @@ vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is
if (LIKELY(
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->ic_serial == RCLASS_SERIAL(klass))) ||
- ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, cc->aux.index > 0)))) {
VALUE *ptr = ROBJECT_IVPTR(obj);
- index = !is_attr ? ic->index : cc->aux.index-1;
if (RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_oorange, index < ROBJECT_NUMIV(obj))) {
RB_OBJ_WRITE(obj, &ptr[index], val);
@@ -1157,7 +1155,7 @@ vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is
rb_raise(rb_eArgError, "too many instance variables");
}
else {
- cc->aux.index = (int)(index + 1);
}
}
/* fall through */
@@ -1440,210 +1438,199 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
-#if __has_attribute(artificial)
-__attribute__((__artificial__))
-#endif
-static inline vm_call_handler
-calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
{
- const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
- if (UNLIKELY(!me)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_nome);
- return vm_call_general; /* vm_call_method_nome() situation */
- }
- else if (LIKELY(cc->me != me)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_distinct);
- return vm_call_general; /* normal cases */
- }
- else if (UNLIKELY(cc->method_serial != me->def->method_serial)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_refine);
- return vm_call_general; /* cc->me was refined elsewhere */
- }
- /* "Calling a formerly-public method, which is now privatised, with an
- * explicit receiver" is the only situation we have to check here. A
- * formerly-private method now publicised is an absolutely safe thing.
- * Calling a private method without specifying a receiver is also safe. */
- else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) &&
- !(vm_ci_flag(ci) & VM_CALL_FCALL)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_visi);
- return vm_call_general;
}
- else {
- RB_DEBUG_COUNTER_INC(mc_miss_spurious);
- (void)RB_DEBUG_COUNTER_INC_IF(mc_miss_reuse_call, cc->call != vm_call_general);
- return cc->call;
}
}
-MJIT_FUNC_EXPORTED void
-rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass)
{
- const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
-
- const rb_callable_method_entry_t *me =
- rb_callable_method_entry(klass, vm_ci_mid(ci));
- const vm_call_handler call = calccall(cd, me);
- struct rb_call_cache buf = {
- GET_GLOBAL_METHOD_STATE(),
- { RCLASS_SERIAL(klass) },
- me,
- me ? me->def->method_serial : 0,
- call,
- };
- if (call != vm_call_general) {
- for (int i = 0; i < numberof(cc->class_serial) - 1; i++) {
- buf.class_serial[i + 1] = cc->class_serial[i];
- }
}
- MEMCPY(cc, &buf, struct rb_call_cache, 1);
- VM_ASSERT(callable_method_entry_p(cc->me));
-}
-
-/* # Description of what `vm_cache_check_for_class_serial()` is doing #########
- *
- * - Let's assume a `struct rb_call_cache` has its `class_serial` as an array
- * of length 3 (typical situation for 64 bit environments):
- *
- * ```C
- * struct rb_call_cache {
- * rb_serial_t method_state;
- * rb_serial_t class_serial[3];
- * rb_callable_method_entry_t *me;
- * rb_method_definition_struct *def;
- * vm_call_handler call;
- * union { ... snip ... } aux;
- * };
- * ```
- *
- * - Initially, the `cc->class_serial` array is filled with zeros.
- *
- * - If the cache mishits, and if that was due to mc_miss_spurious situation,
- * `rb_vm_search_method_slowpath()` pushes the newest class serial at the
- * leftmost position of the `cc->class_serial`.
- *
- * ```
- * from: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | (x) | (y) | (z) | me | def | call | aux |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * \ \
- * \ \
- * \ \
- * \ \
- * \ \
- * v v
- * to: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | NEW | (x) | (y) | me | def | call | aux |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * ^^^
- * fill RCLASS_SERIAL(klass)
- * ```
- *
- * - Eventually, the `cc->class_serial` is filled with a series of classes that
- * share the same method entry for the same call site.
- *
- * - `vm_cache_check_for_class_serial()` can say that the cache now hits if
- * _any_ of the class serials stored inside of `cc->class_serial` is equal to
- * the given `class_serial` value.
- *
- * - It scans the array from left to right, looking for the expected class
- * serial. If it finds that at `cc->class_serial[0]` (this branch
- * probability is 98% according to @shyouhei's experiment), just returns
- * true. If it reaches the end of the array without finding anything,
- * returns false. This is done in the #1 loop below.
- *
- * - What needs to be complicated is when the class serial is found at either
- * `cc->class_serial[1]` or `cc->class_serial[2]`. When that happens, its
- * return value is true because `cc->me` and `cc->call` are valid. But
- * `cc->aux` might be invalid. Also the found class serial is expected to
- * hit next time. In this case we reorder the array and wipe out `cc->aux`.
- * This is done in the #2 loop below.
- *
- * ```
- * from: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | (x) | (y) | (z) | me | def | call | aux |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * \ \ |
- * \ \ |
- * +- \ --- \ -+
- * | \ \
- * | \ \
- * v v v
- * to: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | (z) | (x) | (y) | me | def | call | 000 |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * ^^^
- * wipe out
- * ```
- *
- */
-static inline bool
-vm_cache_check_for_class_serial(struct rb_call_cache *cc, rb_serial_t class_serial)
{
- int i;
- rb_serial_t j;
- /* This is the loop #1 in above description. */
- for (i = 0; i < numberof(cc->class_serial); i++) {
- j = cc->class_serial[i];
- if (! j) {
- break;
- }
- else if (j != class_serial) {
- continue;
- }
- else if (! i) {
- return true;
}
- else {
- goto hit;
}
}
- RB_DEBUG_COUNTER_INC(mc_class_serial_miss);
- return false;
- hit:
- /* This is the loop #2 in above description. */
- for (; i > 0; i--) {
- cc->class_serial[i] = cc->class_serial[i - 1];
}
- cc->class_serial[0] = j;
- MEMZERO(&cc->aux, cc->aux, 1); /* cc->call is valid, but cc->aux might not. */
- return true;
}
static void
-vm_search_method_fastpath(struct rb_call_data *cd, VALUE klass)
{
- struct rb_call_cache *cc = &cd->cc;
#if OPT_INLINE_METHOD_CACHE
- if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
- GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
- vm_cache_check_for_class_serial(cc, RCLASS_SERIAL(klass)))) {
- /* cache hit! */
- VM_ASSERT(cc->call != NULL);
- RB_DEBUG_COUNTER_INC(mc_inline_hit);
- return;
}
- RB_DEBUG_COUNTER_INC(mc_inline_miss);
#endif
- rb_vm_search_method_slowpath(cd, klass);
}
static void
-vm_search_method(struct rb_call_data *cd, VALUE recv)
{
VALUE klass = CLASS_OF(recv);
-
VM_ASSERT(klass != Qfalse);
VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
- vm_search_method_fastpath(cd, klass);
}
static inline int
@@ -1659,16 +1646,16 @@ check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)())
}
static inline int
-vm_method_cfunc_is(CALL_DATA cd, VALUE recv, VALUE (*func)())
{
- vm_search_method(cd, recv);
- return check_cfunc(cd->cc.me, func);
}
static VALUE
-opt_equal_fallback(VALUE recv, VALUE obj, CALL_DATA cd)
{
- if (vm_method_cfunc_is(cd, recv, rb_obj_equal)) {
return recv == obj ? Qtrue : Qfalse;
}
@@ -1728,7 +1715,7 @@ static
inline
#endif
VALUE
-opt_eq_func(VALUE recv, VALUE obj, CALL_DATA cd)
{
switch (comparable_by_identity(recv, obj)) {
case 1:
@@ -1751,7 +1738,7 @@ opt_eq_func(VALUE recv, VALUE obj, CALL_DATA cd)
}
fallback:
- return opt_equal_fallback(recv, obj, cd);
}
static
@@ -1781,7 +1768,7 @@ opt_eql_func(VALUE recv, VALUE obj, CALL_DATA cd)
}
fallback:
- return opt_equal_fallback(recv, obj, cd);
}
#undef BUILTIN_CLASS_P
#undef EQ_UNREDEFINED_P
@@ -1797,14 +1784,14 @@ rb_equal_opt(VALUE obj1, VALUE obj2)
rb_gc_register_mark_object((VALUE)ci);
}
- struct rb_call_data cd = { .ci = ci, };
- return opt_eq_func(obj1, obj2, &cd);
}
VALUE
rb_eql_opt(VALUE obj1, VALUE obj2)
{
- struct rb_call_data cd = { .ci = vm_ci_new_id(idEqlP), };
return opt_eql_func(obj1, obj2, &cd);
}
@@ -1929,11 +1916,11 @@ vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t
{
RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
- struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
int param = iseq->body->param.size;
int local = iseq->body->local_table_size;
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
}
MJIT_STATIC bool
@@ -2043,8 +2030,8 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame
struct rb_calling_info *calling,
struct rb_call_data *cd)
{
- const struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const int lead_num = iseq->body->param.lead_num;
const int opt = calling->argc - lead_num;
const int opt_num = iseq->body->param.opt_num;
@@ -2064,7 +2051,7 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame
}
#endif
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param - delta, local);
}
static VALUE
@@ -2072,8 +2059,8 @@ vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_fra
struct rb_calling_info *calling,
struct rb_call_data *cd)
{
- const struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const int lead_num = iseq->body->param.lead_num;
const int opt = calling->argc - lead_num;
const int opt_pc = (int)iseq->body->param.opt_table[opt];
@@ -2103,12 +2090,12 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *
struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
const int ci_kw_len = kw_arg->keyword_len;
@@ -2122,7 +2109,7 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *
int param = iseq->body->param.size;
int local = iseq->body->local_table_size;
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
}
static VALUE
@@ -2131,12 +2118,12 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
struct rb_call_data *cd)
{
const struct rb_callinfo *MAYBE_UNUSED(ci) = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
VALUE * const argv = cfp->sp - calling->argc;
VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
@@ -2152,7 +2139,7 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
int param = iseq->body->param.size;
int local = iseq->body->local_table_size;
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
}
static inline int
@@ -2160,7 +2147,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
if (LIKELY(rb_simple_iseq_p(iseq))) {
@@ -2172,7 +2159,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
}
- CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(cd->ci, &cd->cc));
return 0;
}
else if (rb_iseq_only_optparam_p(iseq)) {
@@ -2192,12 +2179,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
}
else {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
}
/* initialize opt vars for self-references */
@@ -2225,7 +2212,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
return 0;
}
@@ -2238,7 +2225,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (klocals[kw_param->num] == INT2FIX(0)) {
/* copy from default_values */
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
}
return 0;
@@ -2254,11 +2241,11 @@ vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct r
{
RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
- const struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const int param_size = iseq->body->param.size;
const int local_size = iseq->body->local_table_size;
- const int opt_pc = vm_callee_setup_arg(ec, calling, cd, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(ec, cfp, calling, cd, opt_pc, param_size, local_size);
}
@@ -2267,10 +2254,10 @@ vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
int opt_pc, int param_size, int local_size)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param_size, local_size);
}
else {
return vm_call_iseq_setup_tailcall(ec, cfp, calling, cd, opt_pc);
@@ -2298,10 +2285,10 @@ static inline VALUE
vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd,
int opt_pc)
{
- const struct rb_call_cache *cc = &cd->cc;
unsigned int i;
VALUE *argv = cfp->sp - calling->argc;
- const rb_callable_method_entry_t *me = cc->me;
const rb_iseq_t *iseq = def_iseq_ptr(me->def);
VALUE *src_argv = argv;
VALUE *sp_orig, *sp;
@@ -2501,9 +2488,9 @@ static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
VALUE val;
- const rb_callable_method_entry_t *me = cc->me;
const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
int len = cfunc->argc;
@@ -2553,20 +2540,20 @@ vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb
static VALUE
vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
- struct rb_call_cache *cc = &cd->cc;
RB_DEBUG_COUNTER_INC(ccf_ivar);
cfp->sp -= 1;
- return vm_getivar(calling->recv, cc->me->def->body.attr.id, NULL, cc, TRUE);
}
static VALUE
vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
- struct rb_call_cache *cc = &cd->cc;
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
- return vm_setivar(calling->recv, cc->me->def->body.attr.id, val, NULL, cc, 1);
}
static inline VALUE
@@ -2574,11 +2561,11 @@ vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling
{
rb_proc_t *proc;
VALUE val;
- const struct rb_call_cache *cc = &cd->cc;
/* control block frame */
- GetProcPtr(cc->me->def->body.bmethod.proc, proc);
- val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, cc->me);
return val;
}
@@ -2601,6 +2588,65 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
return vm_call_bmethod_body(ec, calling, cd, argv);
}
static enum method_missing_reason
ci_missing_reason(const struct rb_callinfo *ci)
{
@@ -2619,12 +2665,10 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
int i;
VALUE sym;
ID mid;
- const struct rb_callinfo *orig_ci = orig_cd->ci;
- const struct rb_call_cache *orig_cc = &orig_cd->cc;
- struct rb_call_cache *cc;
struct rb_call_data cd;
- CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
i = calling->argc - 1;
@@ -2632,9 +2676,6 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
rb_raise(rb_eArgError, "no method name given");
}
- cd.cc = *orig_cc;
- cc = &cd.cc;
-
sym = TOPN(i);
if (!(mid = rb_check_id(&sym))) {
@@ -2642,12 +2683,12 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
VALUE exc =
rb_make_no_method_exception(rb_eNoMethodError, 0, calling->recv,
rb_long2int(calling->argc), &TOPN(i),
- vm_ci_flag(orig_ci) & (VM_CALL_FCALL|VM_CALL_VCALL));
rb_exc_raise(exc);
}
TOPN(i) = rb_str_intern(sym);
mid = idMethodMissing;
- ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(orig_ci);
}
else {
/* shift arguments */
@@ -2658,10 +2699,14 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
DEC_SP(1);
}
- CC_SET_ME(cc, rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), mid, NULL));
unsigned int new_flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
- cd.ci = vm_ci_new_runtime(mid, new_flag, 0 /* not accessed (calling->argc is used) */, vm_ci_kwarg(orig_ci));
-
return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd);
}
@@ -2706,20 +2751,19 @@ vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
}
else {
calling->recv = rb_vm_bh_to_procval(ec, block_handler);
- vm_search_method(cd, calling->recv);
return vm_call_general(ec, reg_cfp, calling, cd);
}
}
static VALUE
-vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *orig_cd)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
- const struct rb_callinfo *orig_ci = orig_cd->ci;
- const struct rb_call_cache *orig_cc = &orig_cd->cc;
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
- struct rb_call_data cd = *orig_cd;
unsigned int argc;
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
@@ -2727,8 +2771,11 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
cd.ci = vm_ci_new_runtime(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
- cd.cc.me = rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv),
- idMethodMissing, NULL);
calling->argc = argc;
@@ -2741,29 +2788,39 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
argv[0] = ID2SYM(vm_ci_mid(orig_ci));
INC_SP(1);
- ec->method_missing_reason = orig_cc->aux.method_missing_reason;
return vm_call_method(ec, reg_cfp, calling, &cd);
}
static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
static VALUE
vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd, VALUE klass)
{
- RB_DEBUG_COUNTER_INC(ccf_method_missing);
-
- const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
klass = RCLASS_SUPER(klass);
- CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, vm_ci_mid(ci)) : NULL);
- if (!cc->me) {
return vm_call_method_nome(ec, cfp, calling, cd);
}
- if (cc->me->def->type == VM_METHOD_TYPE_REFINED &&
- cc->me->def->body.refined.orig_me) {
- CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me));
}
- return vm_call_method_each_type(ec, cfp, calling, cd);
}
static inline VALUE
@@ -2795,53 +2852,6 @@ current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
return cfp;
}
-static VALUE
-find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
-{
- VALUE klass = current_class;
-
- /* for prepended Module, then start from cover class */
- if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN)) klass = RBASIC_CLASS(klass);
-
- while (RTEST(klass)) {
- VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
- if (owner == target_owner) {
- return klass;
- }
- klass = RCLASS_SUPER(klass);
- }
-
- return current_class; /* maybe module function */
-}
-
-static const rb_callable_method_entry_t *
-aliased_callable_method_entry(const rb_callable_method_entry_t *me)
-{
- const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
- const rb_callable_method_entry_t *cme;
-
- if (orig_me->defined_class == 0) {
- VALUE defined_class = find_defined_class_by_owner(me->defined_class, orig_me->owner);
- VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
- cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
-
- if (me->def->alias_count + me->def->complemented_count == 0) {
- RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
- }
- else {
- rb_method_definition_t *def =
- rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
- rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
- }
- }
- else {
- cme = (const rb_callable_method_entry_t *)orig_me;
- }
-
- VM_ASSERT(callable_method_entry_p(cme));
- return cme;
-}
-
static const rb_callable_method_entry_t *
refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
{
@@ -2865,57 +2875,78 @@ refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
return cme;
}
-static int
-search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, ID mid, struct rb_call_cache *cc)
{
const rb_cref_t *cref = vm_get_cref(cfp->ep);
for (; cref; cref = CREF_NEXT(cref)) {
- const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), cc->me->owner);
if (NIL_P(refinement)) continue;
const rb_callable_method_entry_t *const ref_me =
rb_callable_method_entry(refinement, mid);
if (ref_me) {
- if (cc->call == vm_call_super_method) {
const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
continue;
}
}
- if (cc->me->def->type != VM_METHOD_TYPE_REFINED ||
- cc->me->def != ref_me->def) {
- CC_SET_ME(cc, ref_me);
}
if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
- return TRUE;
}
}
else {
- CC_SET_ME(cc, NULL);
- return FALSE;
}
}
- if (cc->me->def->body.refined.orig_me) {
- CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me));
}
else {
- VALUE klass = RCLASS_SUPER(cc->me->defined_class);
- CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, mid) : NULL);
}
- return TRUE;
}
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
- switch (cc->me->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling, cd);
@@ -2930,20 +2961,20 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 1, 1);
- cc->aux.index = 0;
- CC_SET_FASTPATH(cc, vm_call_attrset, !((vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT) || (vm_ci_flag(ci) & VM_CALL_KWARG)));
return vm_call_attrset(ec, cfp, calling, cd);
case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
- cc->aux.index = 0;
- CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT));
return vm_call_ivar(ec, cfp, calling, cd);
case VM_METHOD_TYPE_MISSING:
- cc->aux.method_missing_reason = 0;
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
return vm_call_method_missing(ec, cfp, calling, cd);
@@ -2952,12 +2983,11 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
return vm_call_bmethod(ec, cfp, calling, cd);
case VM_METHOD_TYPE_ALIAS:
- CC_SET_ME(cc, aliased_callable_method_entry(cc->me));
- VM_ASSERT(cc->me != NULL);
- return vm_call_method_each_type(ec, cfp, calling, cd);
case VM_METHOD_TYPE_OPTIMIZED:
- switch (cc->me->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
return vm_call_opt_send(ec, cfp, calling, cd);
@@ -2969,23 +2999,22 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
return vm_call_opt_block_call(ec, cfp, calling, cd);
default:
rb_bug("vm_call_method: unsupported optimized method type (%d)",
- cc->me->def->body.optimize_type);
}
case VM_METHOD_TYPE_UNDEF:
break;
case VM_METHOD_TYPE_ZSUPER:
- return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(cc->me->defined_class));
case VM_METHOD_TYPE_REFINED:
- if (search_refined_method(ec, cfp, vm_ci_mid(ci), cc))
- return vm_call_method(ec, cfp, calling, cd);
- else
- return vm_call_method_nome(ec, cfp, calling, cd);
}
- rb_bug("vm_call_method: unsupported method type (%d)", cc->me->def->type);
}
NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
@@ -2995,7 +3024,6 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
{
/* method missing */
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
const int stat = ci_missing_reason(ci);
if (vm_ci_mid(ci) == idMethodMissing) {
@@ -3004,9 +3032,7 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
}
else {
- cc->aux.method_missing_reason = stat;
- CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
- return vm_call_method_missing(ec, cfp, calling, cd);
}
}
@@ -3014,12 +3040,12 @@ static inline VALUE
vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
- VM_ASSERT(callable_method_entry_p(cc->me));
- if (cc->me != NULL) {
- switch (METHOD_ENTRY_VISI(cc->me)) {
case METHOD_VISI_PUBLIC: /* likely */
return vm_call_method_each_type(ec, cfp, calling, cd);
@@ -3028,7 +3054,7 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
enum method_missing_reason stat = MISSING_PRIVATE;
if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
- cc->aux.method_missing_reason = stat;
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
return vm_call_method_missing(ec, cfp, calling, cd);
}
@@ -3036,15 +3062,19 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
case METHOD_VISI_PROTECTED:
if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) {
- if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) {
- cc->aux.method_missing_reason = MISSING_PROTECTED;
return vm_call_method_missing(ec, cfp, calling, cd);
}
else {
/* caching method info to dummy cc */
- VM_ASSERT(cc->me != NULL);
- struct rb_call_data cd_entry = *cd;
- return vm_call_method_each_type(ec, cfp, calling, &cd_entry);
}
}
return vm_call_method_each_type(ec, cfp, calling, cd);
@@ -3071,8 +3101,8 @@ vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, st
RB_DEBUG_COUNTER_INC(ccf_super_method);
/* this check is required to distinguish with other functions. */
- const struct rb_call_cache *cc = &cd->cc;
- if (cc->call != vm_call_super_method) rb_bug("bug");
return vm_call_method(ec, reg_cfp, calling, cd);
}
@@ -3145,30 +3175,34 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
if (!klass) {
/* bound instance method of module */
- cd->cc.aux.method_missing_reason = MISSING_SUPER;
- CC_SET_FASTPATH(&cd->cc, vm_call_method_missing, TRUE);
}
else {
- struct rb_call_cache *cc = &cd->cc;
-#if OPT_INLINE_METHOD_CACHE
- /* Unlike normal method search, we only consider the first class
- * serial. Since we're testing defined_class rather than receiver,
- * there's only one valid "warm" value. */
- if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
- GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
- cc->class_serial[0] == RCLASS_SERIAL(klass)) &&
- cc->me && vm_ci_mid(cd->ci) == cc->me->called_id) {
- VM_ASSERT(cc->call != NULL);
- RB_DEBUG_COUNTER_INC(mc_inline_hit);
- return;
- }
-#endif
-
- CC_SET_ME(cc, rb_callable_method_entry(klass, vm_ci_mid(cd->ci)));
- CC_SET_FASTPATH(cc, vm_call_super_method, TRUE);
-
- cc->method_state = GET_GLOBAL_METHOD_STATE();
- cc->class_serial[0] = RCLASS_SERIAL(klass);
}
}
@@ -3958,7 +3992,7 @@ vm_search_method_wrap(
struct rb_call_data *cd,
VALUE recv)
{
- vm_search_method(cd, recv);
}
static void
@@ -3999,9 +4033,8 @@ vm_sendish(
struct rb_call_data *cd,
VALUE recv))
{
- const struct rb_callinfo *ci = cd->ci;
- CALL_CACHE cc = &cd->cc;
VALUE val;
int argc = vm_ci_argc(ci);
VALUE recv = TOPN(argc);
struct rb_calling_info calling;
@@ -4012,8 +4045,9 @@ vm_sendish(
calling.argc = argc;
method_explorer(GET_CFP(), cd, recv);
- val = cc->call(ec, GET_CFP(), &calling, cd);
if (val != Qundef) {
return val; /* CFUNC normal return */
@@ -4356,10 +4390,10 @@ vm_opt_mod(VALUE recv, VALUE obj)
}
static VALUE
-vm_opt_neq(CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
{
- if (vm_method_cfunc_is(cd, recv, rb_obj_not_equal)) {
- VALUE val = opt_eq_func(recv, obj, cd_eq);
if (val != Qundef) {
return RTEST(val) ? Qfalse : Qtrue;
@@ -4630,13 +4664,13 @@ vm_opt_empty_p(VALUE recv)
VALUE rb_false(VALUE obj);
static VALUE
-vm_opt_nil_p(CALL_DATA cd, VALUE recv)
{
if (recv == Qnil &&
BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
return Qtrue;
}
- else if (vm_method_cfunc_is(cd, recv, rb_false)) {
return Qfalse;
}
else {
@@ -4692,9 +4726,9 @@ vm_opt_succ(VALUE recv)
}
static VALUE
-vm_opt_not(CALL_DATA cd, VALUE recv)
{
- if (vm_method_cfunc_is(cd, recv, rb_obj_not)) {
return RTEST(recv) ? Qfalse : Qtrue;
}
else {