diff options
-rw-r--r-- | vm_insnhelper.c | 490 |
1 files changed, 146 insertions, 344 deletions
@@ -50,11 +50,6 @@ MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE exc) { VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc))); - rb_shape_t * shape = rb_shape_get_shape(exc); - if (rb_shape_frozen_shape_p(shape)) { - shape = shape->parent; - } - rb_shape_set_shape(e, shape); rb_obj_copy_ivar(e, exc); return e; } @@ -1090,19 +1085,35 @@ vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_l return klass; } -ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)); static inline void -fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id) { - if (is_attr) { - if (vm_cc_markable(cc)) { - vm_cc_attr_index_set(cc, index, shape_id, shape_id); - RB_OBJ_WRITTEN(cc, Qundef, rb_shape_get_shape_by_id(shape_id)); - } } else { - vm_ic_attr_index_set(iseq, ic, index, shape_id, shape_id); - RB_OBJ_WRITTEN(iseq, Qundef, rb_shape_get_shape_by_id(shape_id)); } } @@ -1112,120 +1123,68 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call { #if OPT_IC_FOR_IVAR VALUE val = Qundef; - shape_id_t shape_id; - VALUE * ivar_list; if (SPECIAL_CONST_P(obj)) { - return Qnil; } -#if SHAPE_IN_BASIC_FLAGS - shape_id = RBASIC_SHAPE_ID(obj); -#endif - switch (BUILTIN_TYPE(obj)) { - case T_OBJECT: - ivar_list = ROBJECT_IVPTR(obj); - VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true); -#if !SHAPE_IN_BASIC_FLAGS - shape_id = ROBJECT_SHAPE_ID(obj); -#endif - break; - case T_CLASS: - case T_MODULE: - { - goto general_path; - } - default: - if (FL_TEST_RAW(obj, FL_EXIVAR)) { - struct gen_ivtbl *ivtbl; - rb_gen_ivtbl_get(obj, id, &ivtbl); -#if !SHAPE_IN_BASIC_FLAGS - shape_id = ivtbl->shape_id; -#endif - ivar_list = ivtbl->ivptr; - } else { - return Qnil; - } - } - - shape_id_t cached_id; - if (is_attr) { - cached_id = vm_cc_attr_shape_id(cc); } else { - cached_id = vm_ic_attr_shape_id(ic); - } - attr_index_t index; - if (LIKELY(cached_id == shape_id)) { - RB_DEBUG_COUNTER_INC(ivar_get_ic_hit); - if (is_attr && vm_cc_attr_index_p(cc)) { - index = vm_cc_attr_index(cc); - } - else if (!is_attr && vm_ic_attr_index_p(ic)) { - index = vm_ic_attr_index(ic); - } - else { - return Qnil; } - val = ivar_list[index]; - VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true); - } - else { // cache miss case -#if RUBY_DEBUG - if (is_attr) { - if (cached_id != INVALID_SHAPE_ID) { - RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set); - } else { - RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset); } } else { - if (cached_id != INVALID_SHAPE_ID) { - RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set); - } else { - RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset); - } } -#endif - - attr_index_t index; - rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id); - if (rb_shape_get_iv_index(shape, id, &index)) { - // This fills in the cache with the shared cache object. - // "ent" is the shared cache object - fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id); - - // We fetched the ivar list above - val = ivar_list[index]; } else { - if (is_attr) { - if (vm_cc_markable(cc)) { - vm_cc_attr_index_initialize(cc, shape_id); - } - } - else { - vm_ic_attr_index_initialize(ic, shape_id); - } - - val = Qnil; } - } - - RUBY_ASSERT(val != Qundef); - - return val; - -general_path: #endif /* OPT_IC_FOR_IVAR */ RB_DEBUG_COUNTER_INC(ivar_get_ic_miss); @@ -1237,24 +1196,6 @@ general_path: } } -static void -populate_cache(attr_index_t index, rb_shape_t *shape, rb_shape_t *next_shape, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr) -{ - // Cache population code - if (is_attr) { - if (vm_cc_markable(cc)) { - vm_cc_attr_index_set(cc, index, SHAPE_ID(shape), SHAPE_ID(next_shape)); - RB_OBJ_WRITTEN(cc, Qundef, (VALUE)shape); - RB_OBJ_WRITTEN(cc, Qundef, (VALUE)next_shape); - } - } - else { - vm_ic_attr_index_set(iseq, ic, index, SHAPE_ID(shape), SHAPE_ID(next_shape)); - RB_OBJ_WRITTEN(iseq, Qundef, (VALUE)shape); - RB_OBJ_WRITTEN(iseq, Qundef, (VALUE)next_shape); - } -} - ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)); NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)); NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)); @@ -1262,66 +1203,35 @@ NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, cons static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr) { -#if OPT_IC_FOR_IVAR - switch (BUILTIN_TYPE(obj)) { - case T_OBJECT: - { - rb_check_frozen_internal(obj); - - attr_index_t index; - - uint32_t num_iv = ROBJECT_NUMIV(obj); - rb_shape_t* shape = rb_shape_get_shape(obj); - rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id); - if (shape != next_shape) { - rb_shape_set_shape(obj, next_shape); - } - - if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree - if (index >= MAX_IVARS) { - rb_raise(rb_eArgError, "too many instance variables"); - } - - populate_cache(index, shape, next_shape, id, iseq, ic, cc, is_attr); - } - else { - rb_bug("Didn't find instance variable %s\n", rb_id2name(id)); - } - - // Ensure the IV buffer is wide enough to store the IV - if (UNLIKELY(index >= num_iv)) { - rb_init_iv_list(obj); - } - VALUE *ptr = ROBJECT_IVPTR(obj); - RB_OBJ_WRITE(obj, &ptr[index], val); - RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit); - return val; } - case T_CLASS: - case T_MODULE: - break; - default: - { - rb_shape_t * shape = rb_shape_get_shape(obj); - rb_ivar_set(obj, id, val); - rb_shape_t * next_shape = rb_shape_get_shape(obj); - attr_index_t index; - - if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree - if (index >= MAX_IVARS) { - rb_raise(rb_eArgError, "too many instance variables"); - } - populate_cache(index, shape, next_shape, id, iseq, ic, cc, is_attr); - } - else { - rb_bug("didn't find the id\n"); - } - return val; } } #endif RB_DEBUG_COUNTER_INC(ivar_set_ic_miss); @@ -1340,99 +1250,39 @@ vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true); } -NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)); -static VALUE -vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index) -{ -#if SHAPE_IN_BASIC_FLAGS - shape_id_t shape_id = RBASIC_SHAPE_ID(obj); -#else - shape_id_t shape_id = rb_generic_shape_id(obj); -#endif - - // Cache hit case - if (shape_id == source_shape_id) { - RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID); - - struct gen_ivtbl *ivtbl = 0; - if (dest_shape_id != shape_id) { - ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1); -#if SHAPE_IN_BASIC_FLAGS - RBASIC_SET_SHAPE_ID(obj, dest_shape_id); -#else - ivtbl->shape_id = dest_shape_id; -#endif - RB_OBJ_WRITTEN(obj, Qundef, rb_shape_get_shape_by_id(dest_shape_id)); - } - else { - // Just get the IV table - RUBY_ASSERT(GET_VM()->shape_list[dest_shape_id]); - rb_gen_ivtbl_get(obj, 0, &ivtbl); - } - - VALUE *ptr = ivtbl->ivptr; - - RB_OBJ_WRITE(obj, &ptr[index], val); - - RB_DEBUG_COUNTER_INC(ivar_set_ic_hit); - - return val; - } - - return Qundef; -} - static inline VALUE -vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index) { #if OPT_IC_FOR_IVAR - switch (BUILTIN_TYPE(obj)) { - case T_OBJECT: - { - VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj)); - // If object's shape id is the same as the source - // then do the shape transition and write the ivar - // If object's shape id is the same as the dest - // then write the ivar - shape_id_t shape_id = ROBJECT_SHAPE_ID(obj); - - // Do we have a cache hit *and* is the CC intitialized - if (shape_id == source_shape_id) { - RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID); - - VM_ASSERT(!rb_ractor_shareable_p(obj)); - - if (dest_shape_id != shape_id) { - if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) { - rb_init_iv_list(obj); - } - ROBJECT_SET_SHAPE_ID(obj, dest_shape_id); - } - else { - RUBY_ASSERT(GET_VM()->shape_list[dest_shape_id]); - } - - RUBY_ASSERT(index < ROBJECT_NUMIV(obj)); - VALUE *ptr = ROBJECT_IVPTR(obj); - RB_OBJ_WRITE(obj, &ptr[index], val); - RB_DEBUG_COUNTER_INC(ivar_set_ic_hit); - - return val; - } } - break; - case T_CLASS: - case T_MODULE: RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject); - default: - break; } - - return Qundef; #endif /* OPT_IC_FOR_IVAR */ } static VALUE @@ -1527,22 +1377,7 @@ vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic) static inline void vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic) { - shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ic); - attr_index_t index = vm_ic_attr_index(ic); - shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ic); - if (UNLIKELY(vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index) == Qundef)) { - switch (BUILTIN_TYPE(obj)) { - case T_OBJECT: - case T_CLASS: - case T_MODULE: - break; - default: - if (vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index) != Qundef) { - return; - } - } - vm_setivar_slowpath_ivar(obj, id, val, iseq, ic); - } } void @@ -1551,6 +1386,28 @@ rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IV vm_setinstancevariable(iseq, obj, id, val, ic); } static VALUE vm_throw_continue(const rb_execution_context_t *ec, VALUE err) { @@ -3243,45 +3100,17 @@ vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call const struct rb_callcache *cc = calling->cc; RB_DEBUG_COUNTER_INC(ccf_ivar); cfp->sp -= 1; - VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE); - return ivar; } static VALUE -vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj) { RB_DEBUG_COUNTER_INC(ccf_attrset); VALUE val = *(cfp->sp - 1); cfp->sp -= 2; - shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc); - attr_index_t index = vm_cc_attr_index(cc); - shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc); - ID id = vm_cc_cme(cc)->def->body.attr.id; - rb_check_frozen_internal(obj); - VALUE res = vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index); - if (res == Qundef) { - switch (BUILTIN_TYPE(obj)) { - case T_OBJECT: - case T_CLASS: - case T_MODULE: - break; - default: - { - res = vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index); - if (res != Qundef) { - return res; - } - } - } - res = vm_setivar_slowpath_attr(obj, id, val, cc); - } - return res; -} - -static VALUE -vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling) -{ - return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv); } bool @@ -3390,7 +3219,7 @@ vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_cal { calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, - {{0}}, aliased_callable_method_entry(vm_cc_cme(calling->cc))); return vm_call_method_each_type(ec, cfp, calling); @@ -3560,7 +3389,7 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_ ec->method_missing_reason = reason; calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)); - calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL)); return vm_call_method(ec, reg_cfp, calling); } @@ -3586,7 +3415,7 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca cme = refined_method_callable_without_refinement(cme); } - calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme); return vm_call_method_each_type(ec, cfp, calling); } @@ -3693,7 +3522,7 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struc static VALUE vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling) { - struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, search_refined_method(ec, cfp, calling)); if (vm_cc_cme(ref_cc)) { @@ -3873,45 +3702,18 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); rb_check_arity(calling->argc, 1, 1); - const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG); - - if (vm_cc_markable(cc)) { - vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID); - VM_CALL_METHOD_ATTR(v, - vm_call_attrset_direct(ec, cfp, cc, calling->recv), - CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask))); - } else { - cc = &((struct rb_callcache) { - .flags = T_IMEMO | - (imemo_callcache << FL_USHIFT) | - VM_CALLCACHE_UNMARKABLE | - ((VALUE)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT) | - VM_CALLCACHE_ON_STACK, - .klass = cc->klass, - .cme_ = cc->cme_, - .call_ = cc->call_, - .aux_ = { - .attr = { - .index = 0, - .dest_shape_id = INVALID_SHAPE_ID, - } - }, - }); - - VM_CALL_METHOD_ATTR(v, - vm_call_attrset_direct(ec, cfp, cc, calling->recv), - CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask))); - } return v; case VM_METHOD_TYPE_IVAR: CALLER_SETUP_ARG(cfp, calling, ci); CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); rb_check_arity(calling->argc, 0, 0); - if (vm_cc_markable(cc)) { - vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID); - } const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT); VM_CALL_METHOD_ATTR(v, vm_call_ivar(ec, cfp, calling), |