diff options
-rw-r--r-- | class.c | 45 | ||||
-rw-r--r-- | common.mk | 1 | ||||
-rw-r--r-- | compile.c | 38 | ||||
-rw-r--r-- | debug_counter.h | 92 | ||||
-rw-r--r-- | eval.c | 2 | ||||
-rw-r--r-- | ext/objspace/objspace.c | 1 | ||||
-rw-r--r-- | gc.c | 204 | ||||
-rw-r--r-- | id_table.c | 2 | ||||
-rw-r--r-- | insns.def | 13 | ||||
-rw-r--r-- | internal/class.h | 2 | ||||
-rw-r--r-- | internal/imemo.h | 4 | ||||
-rw-r--r-- | internal/vm.h | 41 | ||||
-rw-r--r-- | iseq.c | 17 | ||||
-rw-r--r-- | method.h | 11 | ||||
-rw-r--r-- | mjit.c | 19 | ||||
-rw-r--r-- | mjit.h | 29 | ||||
-rw-r--r-- | mjit_compile.c | 42 | ||||
-rw-r--r-- | mjit_worker.c | 30 | ||||
-rw-r--r-- | test/-ext-/tracepoint/test_tracepoint.rb | 12 | ||||
-rw-r--r-- | test/ruby/test_gc.rb | 3 | ||||
-rw-r--r-- | test/ruby/test_inlinecache.rb | 64 | ||||
-rw-r--r-- | tool/mk_call_iseq_optimized.rb | 2 | ||||
-rw-r--r-- | tool/ruby_vm/views/_mjit_compile_send.erb | 23 | ||||
-rw-r--r-- | tool/ruby_vm/views/mjit_compile.inc.erb | 2 | ||||
-rw-r--r-- | vm.c | 26 | ||||
-rw-r--r-- | vm_callinfo.h | 235 | ||||
-rw-r--r-- | vm_core.h | 3 | ||||
-rw-r--r-- | vm_dump.c | 4 | ||||
-rw-r--r-- | vm_eval.c | 50 | ||||
-rw-r--r-- | vm_insnhelper.c | 814 | ||||
-rw-r--r-- | vm_insnhelper.h | 15 | ||||
-rw-r--r-- | vm_method.c | 630 |
32 files changed, 1606 insertions, 870 deletions
@@ -894,12 +894,21 @@ add_refined_method_entry_i(ID key, VALUE value, void *data) static void ensure_origin(VALUE klass); static int include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super) { VALUE p, iclass; int method_changed = 0, constant_changed = 0; struct rb_id_table *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass)); if (FL_TEST(module, RCLASS_REFINED_BY_ANY)) { ensure_origin(module); @@ -912,7 +921,7 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super) if (klass_m_tbl && klass_m_tbl == RCLASS_M_TBL(module)) return -1; /* ignore if the module included already in superclasses */ - for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) { int type = BUILTIN_TYPE(p); if (type == T_ICLASS) { if (RCLASS_M_TBL(p) == RCLASS_M_TBL(module)) { @@ -924,37 +933,53 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super) } else if (type == T_CLASS) { if (!search_super) break; - superclass_seen = TRUE; } } - iclass = rb_include_class_new(module, RCLASS_SUPER(c)); c = RCLASS_SET_SUPER(c, iclass); RCLASS_SET_INCLUDER(iclass, klass); { VALUE m = module; - if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass; - rb_module_add_to_subclasses_list(m, iclass); } if (FL_TEST(klass, RMODULE_IS_REFINEMENT)) { VALUE refined_class = rb_refinement_module_get_refined_class(klass); - rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class); FL_SET(c, RMODULE_INCLUDED_INTO_REFINEMENT); } - tbl = RMODULE_M_TBL(module); - if (tbl && rb_id_table_size(tbl)) method_changed = 1; - tbl = RMODULE_CONST_TBL(module); if (tbl && rb_id_table_size(tbl)) constant_changed = 1; skip: module = RCLASS_SUPER(module); } - if (method_changed) rb_clear_method_cache_by_class(klass); if (constant_changed) rb_clear_constant_cache(); return method_changed; @@ -2946,6 +2946,7 @@ mjit.$(OBJEXT): {$(VPATH)}thread.h mjit.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h mjit.$(OBJEXT): {$(VPATH)}thread_native.h mjit.$(OBJEXT): {$(VPATH)}util.h mjit.$(OBJEXT): {$(VPATH)}vm_core.h mjit.$(OBJEXT): {$(VPATH)}vm_opts.h mjit_compile.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h @@ -566,6 +566,8 @@ static void verify_call_cache(rb_iseq_t *iseq) { #if CPDEBUG VALUE *original = rb_iseq_original_iseq(iseq); size_t i = 0; while (i < iseq->body->iseq_size) { @@ -574,16 +576,27 @@ verify_call_cache(rb_iseq_t *iseq) for (int j=0; types[j]; j++) { if (types[j] == TS_CALLDATA) { - struct rb_call_cache cc; struct rb_call_data *cd = (struct rb_call_data *)original[i+j+1]; - MEMZERO(&cc, cc, 1); - if (memcmp(&cc, &cd->cc, sizeof(cc))) { - rb_bug("call cache not zero for fresh iseq"); } } } i += insn_len(insn); } #endif } @@ -661,7 +674,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node) DECL_ANCHOR(ret); INIT_ANCHOR(ret); - if (imemo_type_p((VALUE)node, imemo_ifunc)) { rb_raise(rb_eArgError, "unexpected imemo_ifunc"); } @@ -1212,6 +1225,7 @@ new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_cal argc += kw_arg->keyword_len; } iseq->body->ci_size++; const struct rb_callinfo *ci = vm_ci_new(mid, flag, argc, kw_arg); RB_OBJ_WRITTEN(iseq, Qundef, ci); @@ -2223,6 +2237,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor) struct rb_call_data *cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++]; assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size); cd->ci = source_ci; generated_iseq[code_index + 1 + j] = (VALUE)cd; break; } @@ -10301,16 +10316,18 @@ ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq) } /* note that we dump out rb_call_info but load back rb_call_data */ -static struct rb_call_data * ibf_load_ci_entries(const struct ibf_load *load, ibf_offset_t ci_entries_offset, - unsigned int ci_size) { ibf_offset_t reading_pos = ci_entries_offset; unsigned int i; struct rb_call_data *cds = ZALLOC_N(struct rb_call_data, ci_size); for (i = 0; i < ci_size; i++) { VALUE mid_index = ibf_load_small_value(load, &reading_pos); @@ -10331,10 +10348,9 @@ ibf_load_ci_entries(const struct ibf_load *load, cds[i].ci = vm_ci_new(mid, flag, argc, kwarg); RB_OBJ_WRITTEN(load->iseq, Qundef, cds[i].ci); } - - return cds; -} static ibf_offset_t ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq) @@ -10588,7 +10604,7 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset) load_body->catch_except_p = catch_except_p; load_body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, is_size); - load_body->call_data = ibf_load_ci_entries(load, ci_entries_offset, ci_size); load_body->param.opt_table = ibf_load_param_opt_table(load, param_opt_table_offset, param_opt_num); load_body->param.keyword = ibf_load_param_keyword(load, param_keyword_offset); load_body->param.flags.has_kw = (param_flags >> 4) & 1; @@ -14,46 +14,45 @@ #ifdef RB_DEBUG_COUNTER -/* - * method cache (mc) counts. - * - * * mc_inline_hit/miss: inline mc hit/miss counts (VM send insn) - * * mc_global_hit/miss: global method cache hit/miss counts - * two types: (1) inline cache miss (VM send insn) - * (2) called from C (rb_funcall). - * * mc_global_state_miss: inline mc miss by global_state miss. - * * mc_class_serial_miss: ... by mc_class_serial_miss - * * mc_cme_complement: callable_method_entry complement counts. - * * mc_cme_complement_hit: callable_method_entry cache hit counts. - * * mc_search_super: search_method() call counts. - * * mc_miss_by_nome: inline mc miss by no ment. - * * mc_miss_by_distinct: ... by distinct ment. - * * mc_miss_by_refine: ... by ment being refined. - * * mc_miss_by_visi: ... by visibility change. - * * mc_miss_spurious: spurious inline mc misshit. - * * mc_miss_reuse_call: count of reuse of cc->call. - */ -RB_DEBUG_COUNTER(mc_inline_hit) -RB_DEBUG_COUNTER(mc_inline_miss) -RB_DEBUG_COUNTER(mc_global_hit) -RB_DEBUG_COUNTER(mc_global_miss) -RB_DEBUG_COUNTER(mc_global_state_miss) -RB_DEBUG_COUNTER(mc_class_serial_miss) -RB_DEBUG_COUNTER(mc_cme_complement) -RB_DEBUG_COUNTER(mc_cme_complement_hit) -RB_DEBUG_COUNTER(mc_search_super) -RB_DEBUG_COUNTER(mc_miss_by_nome) -RB_DEBUG_COUNTER(mc_miss_by_distinct) -RB_DEBUG_COUNTER(mc_miss_by_refine) -RB_DEBUG_COUNTER(mc_miss_by_visi) -RB_DEBUG_COUNTER(mc_miss_spurious) -RB_DEBUG_COUNTER(mc_miss_reuse_call) // callinfo -RB_DEBUG_COUNTER(ci_packed) -RB_DEBUG_COUNTER(ci_kw) -RB_DEBUG_COUNTER(ci_nokw) -RB_DEBUG_COUNTER(ci_runtime) /* * call cache fastpath usage @@ -289,6 +288,7 @@ RB_DEBUG_COUNTER(obj_imemo_ifunc) RB_DEBUG_COUNTER(obj_imemo_memo) RB_DEBUG_COUNTER(obj_imemo_parser_strterm) RB_DEBUG_COUNTER(obj_imemo_callinfo) /* ar_table */ RB_DEBUG_COUNTER(artable_hint_hit) @@ -375,17 +375,33 @@ rb_debug_counter_add(enum rb_debug_counter_type type, int add, int cond) return cond; } VALUE rb_debug_counter_reset(VALUE klass); VALUE rb_debug_counter_show(VALUE klass); #define RB_DEBUG_COUNTER_INC(type) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, 1) #define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !(cond))) #define RB_DEBUG_COUNTER_INC_IF(type, cond) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, (cond)) #else #define RB_DEBUG_COUNTER_INC(type) ((void)0) #define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (cond) #define RB_DEBUG_COUNTER_INC_IF(type, cond) (cond) #endif void rb_debug_counter_show_results(const char *msg); @@ -1476,7 +1476,7 @@ rb_using_module(const rb_cref_t *cref, VALUE module) { Check_Type(module, T_MODULE); using_module_recursive(cref, module); - rb_clear_method_cache_by_class(rb_cObject); } /*! \private */ @@ -638,6 +638,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self) imemo_type_ids[9] = rb_intern("imemo_ast"); imemo_type_ids[10] = rb_intern("imemo_parser_strterm"); imemo_type_ids[11] = rb_intern("imemo_callinfo"); } rb_objspace_each_objects(count_imemo_objects_i, (void *)hash); @@ -2530,6 +2530,116 @@ rb_free_const_table(struct rb_id_table *tbl) rb_id_table_free(tbl); } static inline void make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data) { @@ -2621,6 +2731,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj) case T_CLASS: mjit_remove_class_serial(RCLASS_SERIAL(obj)); rb_id_table_free(RCLASS_M_TBL(obj)); if (RCLASS_IV_TBL(obj)) { st_free_table(RCLASS_IV_TBL(obj)); } @@ -2805,6 +2916,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj) rb_class_detach_subclasses(obj); RCLASS_EXT(obj)->subclasses = NULL; } rb_class_remove_from_module_subclasses(obj); rb_class_remove_from_super_subclasses(obj); xfree(RANY(obj)->as.klass.ptr); @@ -2896,6 +3008,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj) case imemo_callinfo: RB_DEBUG_COUNTER_INC(obj_imemo_callinfo); break; default: /* unreachable */ break; @@ -5335,6 +5450,13 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj) return; case imemo_callinfo: return; #if VM_CHECK_MODE > 0 default: VM_UNREACHABLE(gc_mark_imemo); @@ -5383,7 +5505,9 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj) gc_mark(objspace, RCLASS_SUPER(obj)); } if (!RCLASS_EXT(obj)) break; mark_m_tbl(objspace, RCLASS_M_TBL(obj)); mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj)); mark_const_tbl(objspace, RCLASS_CONST_TBL(obj)); break; @@ -5397,6 +5521,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj) } if (!RCLASS_EXT(obj)) break; mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj)); break; case T_ARRAY: @@ -8126,6 +8251,13 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj) case imemo_ast: rb_ast_update_references((rb_ast_t *)obj); break; case imemo_parser_strterm: case imemo_tmpbuf: case imemo_callinfo: @@ -8202,6 +8334,39 @@ update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl) } static enum rb_id_table_iterator_result update_const_table(VALUE value, void *data) { rb_const_entry_t *ce = (rb_const_entry_t *)value; @@ -8257,7 +8422,10 @@ gc_update_object_references(rb_objspace_t *objspace, VALUE obj) } if (!RCLASS_EXT(obj)) break; update_m_tbl(objspace, RCLASS_M_TBL(obj)); gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj)); update_class_ext(objspace, RCLASS_EXT(obj)); update_const_tbl(objspace, RCLASS_CONST_TBL(obj)); break; @@ -8275,6 +8443,7 @@ gc_update_object_references(rb_objspace_t *objspace, VALUE obj) } update_class_ext(objspace, RCLASS_EXT(obj)); update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj)); break; case T_IMEMO: @@ -8607,7 +8776,6 @@ gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_doubl gc_check_references_for_moved(objspace); } - rb_clear_method_cache_by_class(rb_cObject); rb_clear_constant_cache(); heap_eden->free_pages = NULL; heap_eden->using_page = NULL; @@ -11550,6 +11718,9 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj) if (!NIL_P(class_path)) { APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path))); } break; } case T_ICLASS: @@ -11606,21 +11777,31 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj) IMEMO_NAME(ast); IMEMO_NAME(parser_strterm); IMEMO_NAME(callinfo); #undef IMEMO_NAME default: UNREACHABLE; } - APPENDF((BUFF_ARGS, "/%s", imemo_name)); switch (imemo_type(obj)) { case imemo_ment: { const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment; if (me->def) { - APPENDF((BUFF_ARGS, "(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)", rb_id2name(me->called_id), rb_method_type_name(me->def->type), - me->def->alias_count, - obj_info(me->owner), - obj_info(me->defined_class))); } else { APPENDF((BUFF_ARGS, "%s", rb_id2name(me->called_id))); @@ -11642,6 +11823,17 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj) vm_ci_kwarg(ci) ? "available" : "NULL")); break; } default: break; } @@ -229,7 +229,7 @@ rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp) int index = hash_table_index(tbl, key); if (index >= 0) { - *valp = tbl->items[index].val; return TRUE; } else { @@ -827,7 +827,7 @@ opt_nil_p (VALUE recv) (VALUE val) { - val = vm_opt_nil_p(cd, recv); if (val == Qundef) { CALL_SIMPLE_METHOD(); @@ -903,8 +903,9 @@ invokeblock // attr rb_snum_t sp_inc = sp_inc_of_invokeblock(cd->ci); // attr rb_snum_t comptime_sp_inc = sp_inc_of_invokeblock(ci); { - if (UNLIKELY(cd->cc.call != vm_invokeblock_i)) { - cd->cc.call = vm_invokeblock_i; // check before setting to avoid CoW } VALUE bh = VM_BLOCK_HANDLER_NONE; @@ -1167,7 +1168,7 @@ opt_eq (VALUE recv, VALUE obj) (VALUE val) { - val = opt_eq_func(recv, obj, cd); if (val == Qundef) { CALL_SIMPLE_METHOD(); @@ -1181,7 +1182,7 @@ opt_neq (VALUE recv, VALUE obj) (VALUE val) { - val = vm_opt_neq(cd, cd_eq, recv, obj); if (val == Qundef) { CALL_SIMPLE_METHOD(); @@ -1431,7 +1432,7 @@ opt_not (VALUE recv) (VALUE val) { - val = vm_opt_not(cd, recv); if (val == Qundef) { CALL_SIMPLE_METHOD(); @@ -41,6 +41,7 @@ struct rb_classext_struct { #endif struct rb_id_table *const_tbl; struct rb_id_table *callable_m_tbl; struct rb_subclass_entry *subclasses; struct rb_subclass_entry **parent_subclasses; /** @@ -83,6 +84,7 @@ typedef struct rb_classext_struct rb_classext_t; # define RCLASS_M_TBL(c) (RCLASS(c)->m_tbl) #endif #define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl) #define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl) #define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_) #define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class) @@ -29,6 +29,7 @@ #define IMEMO_FL_USER2 FL_USER6 #define IMEMO_FL_USER3 FL_USER7 #define IMEMO_FL_USER4 FL_USER8 enum imemo_type { imemo_env = 0, @@ -43,6 +44,7 @@ enum imemo_type { imemo_ast = 9, imemo_parser_strterm = 10, imemo_callinfo = 11, }; /* CREF (Class REFerence) is defined in method.h */ @@ -171,6 +173,8 @@ imemo_type_p(VALUE imemo, enum imemo_type imemo_type) } } static inline bool imemo_throw_data_p(VALUE imemo) { @@ -52,44 +52,6 @@ enum method_missing_reason { MISSING_NONE = 0x40 }; -struct rb_call_cache { - /* inline cache: keys */ - rb_serial_t method_state; - rb_serial_t class_serial[ - (CACHELINE - - sizeof(rb_serial_t) /* method_state */ - - sizeof(struct rb_callable_method_entry_struct *) /* me */ - - sizeof(uintptr_t) /* method_serial */ - - sizeof(enum method_missing_reason) /* aux */ - - sizeof(VALUE (*)( /* call */ - struct rb_execution_context_struct *e, - struct rb_control_frame_struct *, - struct rb_calling_info *, - const struct rb_call_data *))) - / sizeof(rb_serial_t) - ]; - - /* inline cache: values */ - const struct rb_callable_method_entry_struct *me; - uintptr_t method_serial; /* me->def->method_serial */ - - VALUE (*call)(struct rb_execution_context_struct *ec, - struct rb_control_frame_struct *cfp, - struct rb_calling_info *calling, - struct rb_call_data *cd); - - union { - unsigned int index; /* used by ivar */ - enum method_missing_reason method_missing_reason; /* used by method_missing */ - } aux; -}; -STATIC_ASSERT(cachelined, sizeof(struct rb_call_cache) <= CACHELINE); - -struct rb_call_data { - const struct rb_callinfo *ci; - struct rb_call_cache cc; -}; - /* vm_insnhelper.h */ rb_serial_t rb_next_class_serial(void); @@ -139,8 +101,9 @@ MJIT_SYMBOL_EXPORT_END VALUE rb_equal_opt(VALUE obj1, VALUE obj2); VALUE rb_eql_opt(VALUE obj1, VALUE obj2); MJIT_SYMBOL_EXPORT_BEGIN -void rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass); MJIT_SYMBOL_EXPORT_END /* vm_dump.c */ @@ -247,6 +247,7 @@ rb_iseq_update_references(rb_iseq_t *iseq) if (!SPECIAL_CONST_P(cds[i].ci)) { cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci); } } } if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) { @@ -323,6 +324,11 @@ rb_iseq_mark(const rb_iseq_t *iseq) struct rb_call_data *cds = (struct rb_call_data *)body->call_data; for (unsigned int i=0; i<body->ci_size; i++) { rb_gc_mark_movable((VALUE)cds[i].ci); } } @@ -351,6 +357,14 @@ rb_iseq_mark(const rb_iseq_t *iseq) } } } } if (FL_TEST_RAW(iseq, ISEQ_NOT_LOADED_YET)) { @@ -663,6 +677,9 @@ finish_iseq_build(rb_iseq_t *iseq) rb_exc_raise(err); } rb_iseq_init_trace(iseq); return Qtrue; } @@ -69,8 +69,12 @@ typedef struct rb_callable_method_entry_struct { /* same fields with rb_method_e #define METHOD_ENTRY_VISI(me) (rb_method_visibility_t)(((me)->flags & (IMEMO_FL_USER0 | IMEMO_FL_USER1)) >> (IMEMO_FL_USHIFT+0)) #define METHOD_ENTRY_BASIC(me) (int) (((me)->flags & (IMEMO_FL_USER2 )) >> (IMEMO_FL_USHIFT+2)) -#define METHOD_ENTRY_COMPLEMENTED(me) ((me)->flags & IMEMO_FL_USER3) -#define METHOD_ENTRY_COMPLEMENTED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER3) static inline void METHOD_ENTRY_VISI_SET(rb_method_entry_t *me, rb_method_visibility_t visi) @@ -229,4 +233,7 @@ void rb_scope_visibility_set(rb_method_visibility_t); VALUE rb_unnamed_parameters(int arity); #endif /* RUBY_METHOD_H */ @@ -25,6 +25,9 @@ #include "internal/warnings.h" #include "mjit_worker.c" // Copy ISeq's states so that race condition does not happen on compilation. static void @@ -51,14 +54,18 @@ mjit_copy_job_handler(void *data) } const struct rb_iseq_constant_body *body = job->iseq->body; - if (job->cc_entries) { - unsigned int i; - struct rb_call_cache *sink = job->cc_entries; - const struct rb_call_data *calls = body->call_data; - for (i = 0; i < body->ci_size; i++) { - *sink++ = calls[i].cc; } } if (job->is_entries) { memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size); } @@ -70,6 +70,35 @@ struct rb_mjit_compile_info { bool disable_inlining; }; typedef VALUE (*mjit_func_t)(rb_execution_context_t *, rb_control_frame_t *); RUBY_SYMBOL_EXPORT_BEGIN @@ -41,9 +41,9 @@ call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body) // For propagating information needed for lazily pushing a frame. struct inlined_call_context { int orig_argc; // ci->orig_argc - VALUE me; // cc->me - int param_size; // def_iseq_ptr(cc->me->def)->body->param.size - int local_size; // def_iseq_ptr(cc->me->def)->body->local_table_size }; // Storage to keep compiler's status. This should have information @@ -57,7 +57,6 @@ struct compile_status { bool local_stack_p; // Safely-accessible cache entries copied from main thread. union iseq_inline_storage_entry *is_entries; - struct rb_call_cache *cc_entries; // Mutated optimization levels struct rb_mjit_compile_info *compile_info; // If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there. @@ -79,13 +78,11 @@ struct case_dis_var { VALUE last_value; }; -// Returns true if call cache is still not obsoleted and cc->me->def->type is available. static bool has_valid_method_type(CALL_CACHE cc) { - extern bool mjit_valid_class_serial_p(rb_serial_t class_serial); - return GET_GLOBAL_METHOD_STATE() == cc->method_state - && mjit_valid_class_serial_p(cc->class_serial[0]) && cc->me; } // Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition @@ -276,7 +273,8 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct fprintf(f, " return Qundef;\n"); } -extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries); static bool mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status) @@ -368,8 +366,6 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body) .stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \ .inlined_iseqs = compile_root_p ? \ alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \ - .cc_entries = body->ci_size > 0 ? \ - alloca(sizeof(struct rb_call_cache) * body->ci_size) : NULL, \ .is_entries = (body->is_size > 0) ? \ alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \ .compile_info = compile_root_p ? \ @@ -394,17 +390,18 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status #else int insn = (int)body->iseq_encoded[pos]; #endif - if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block` CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1]; const struct rb_callinfo *ci = cd->ci; - CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); // use copy to avoid race condition const rb_iseq_t *child_iseq; - if (has_valid_method_type(cc_copy) && - !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path - cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg - inlinable_iseq_p(child_iseq->body)) { status->inlined_iseqs[pos] = child_iseq->body; if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job. @@ -418,12 +415,12 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status INIT_COMPILE_STATUS(child_status, child_iseq->body, false); child_status.inline_context = (struct inlined_call_context){ .orig_argc = vm_ci_argc(ci), - .me = (VALUE)cc_copy->me, .param_size = child_iseq->body->param.size, .local_size = child_iseq->body->local_table_size }; - if ((child_status.cc_entries != NULL || child_status.is_entries != NULL) - && !mjit_copy_cache_from_main_thread(child_iseq, child_status.cc_entries, child_status.is_entries)) return false; fprintf(f, "ALWAYS_INLINE(static VALUE _mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", pos); @@ -454,9 +451,10 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname) struct compile_status status; INIT_COMPILE_STATUS(status, iseq->body, true); - if ((status.cc_entries != NULL || status.is_entries != NULL) - && !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries)) return false; if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) { if (!precompile_inlinable_iseqs(f, iseq, &status)) @@ -122,32 +122,6 @@ typedef intptr_t pid_t; #define MJIT_TMP_PREFIX "_ruby_mjit_" -// The unit structure that holds metadata of ISeq for MJIT. -struct rb_mjit_unit { - // Unique order number of unit. - int id; - // Dlopen handle of the loaded object file. - void *handle; - rb_iseq_t *iseq; -#ifndef _MSC_VER - // This value is always set for `compact_all_jit_code`. Also used for lazy deletion. - char *o_file; - // true if it's inherited from parent Ruby process and lazy deletion should be skipped. - // `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used - // by child for `compact_all_jit_code`. - bool o_file_inherited_p; -#endif -#if defined(_WIN32) - // DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted. - char *so_file; -#endif - // Only used by unload_units. Flag to check this unit is currently on stack or not. - char used_code_p; - struct list_node unode; - // mjit_compile's optimization switches - struct rb_mjit_compile_info compile_info; -}; - // Linked list of struct rb_mjit_unit. struct rb_mjit_unit_list { struct list_head head; @@ -1117,7 +1091,6 @@ convert_unit_to_func(struct rb_mjit_unit *unit) typedef struct { const rb_iseq_t *iseq; - struct rb_call_cache *cc_entries; union iseq_inline_storage_entry *is_entries; bool finish_p; } mjit_copy_job_t; @@ -1138,7 +1111,7 @@ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *); // We're lazily copying cache values from main thread because these cache values // could be different between ones on enqueue timing and ones on dequeue timing. bool -mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries) { mjit_copy_job_t *job = &mjit_copy_job; // just a short hand @@ -1146,7 +1119,6 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc job->finish_p = true; // disable dising this job in mjit_copy_job_handler while it's being modified CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); - job->cc_entries = cc_entries; job->is_entries = is_entries; CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread"); @@ -10,33 +10,25 @@ class TestTracepointObj < Test::Unit::TestCase end def test_tracks_objspace_events - result = Bug.tracepoint_track_objspace_events{ - Object.new - } - object_new_newobj = result[0] - result = EnvUtil.suppress_warning {eval(<<-EOS, nil, __FILE__, __LINE__+1)} Bug.tracepoint_track_objspace_events { 99 'abc' _="foobar" - Object.new nil } EOS newobj_count, free_count, gc_start_count, gc_end_mark_count, gc_end_sweep_count, *newobjs = *result - assert_equal 1 + object_new_newobj, newobj_count - assert_equal 1 + object_new_newobj, newobjs.size assert_equal 'foobar', newobjs[0] - assert_equal Object, newobjs[1].class assert_operator free_count, :>=, 0 assert_operator gc_start_count, :==, gc_end_mark_count assert_operator gc_start_count, :>=, gc_end_sweep_count end def test_tracks_objspace_count - return stat1 = {} stat2 = {} GC.disable @@ -94,6 +94,9 @@ class TestGc < Test::Unit::TestCase GC.start GC.stat(stat) ObjectSpace.count_objects(count) assert_equal(count[:TOTAL]-count[:FREE], stat[:heap_live_slots]) assert_equal(count[:FREE], stat[:heap_free_slots]) @@ -0,0 +1,64 @@ @@ -24,7 +24,7 @@ static VALUE #{fname(param, local)}(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { RB_DEBUG_COUNTER_INC(ccf_iseq_fix); - return vm_call_iseq_setup_normal(ec, cfp, calling, cd->cc.me, 0, #{param}, #{local}); } EOS @@ -14,9 +14,9 @@ MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>]; % end % # compiler: Use copied cc to avoid race condition - CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); % - if (!status->compile_info->disable_send_cache && has_valid_method_type(cc_copy)) { const rb_iseq_t *iseq; const CALL_INFO ci = cd->ci; unsigned int argc = vm_ci_argc(ci); // this `argc` variable is for calculating a value's position on stack considering `blockarg`. @@ -25,7 +25,10 @@ % end if (!(vm_ci_flag(ci) & VM_CALL_TAILCALL) // inlining non-tailcall path - && cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, iseq = def_iseq_ptr(cc_copy->me->def))) { // CC_SET_FASTPATH in vm_callee_setup_arg int param_size = iseq->body->param.size; fprintf(f, "{\n"); @@ -35,8 +38,10 @@ } % # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things. - fprintf(f, " if (UNLIKELY(GET_GLOBAL_METHOD_STATE() != %"PRI_SERIALT_PREFIX"u ||\n", cc_copy->method_state); - fprintf(f, " RCLASS_SERIAL(CLASS_OF(stack[%d])) != %"PRI_SERIALT_PREFIX"u)) {\n", b->stack_size - 1 - argc, cc_copy->class_serial[0]); fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos); fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size); fprintf(f, " goto send_cancel;\n"); @@ -59,18 +64,18 @@ fprintf(f, " {\n"); fprintf(f, " struct rb_calling_info calling;\n"); % if insn.name == 'send' - fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (CALL_INFO)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)ci, (VALUE)blockiseq); % else fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n"); % end fprintf(f, " calling.argc = %d;\n", vm_ci_argc(ci)); fprintf(f, " calling.recv = stack[%d];\n", b->stack_size - 1 - argc); -% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH. fprintf(f, " {\n"); fprintf(f, " VALUE v;\n"); - fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n", - (VALUE)cc_copy->me, param_size, iseq->body->local_table_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE if (iseq->body->catch_except_p) { fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n"); fprintf(f, " v = vm_exec(ec, TRUE);\n"); @@ -57,7 +57,7 @@ switch (insn) { % when *send_compatible_opt_insns % # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated. % cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' } - if (has_valid_method_type(status->cc_entries + call_data_index((CALL_DATA)operands[<%= cd_index %>], body))) { <%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%> <%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%> break; @@ -386,6 +386,8 @@ rb_serial_t ruby_vm_global_method_state = 1; rb_serial_t ruby_vm_global_constant_state = 1; rb_serial_t ruby_vm_class_serial = 1; static void thread_free(void *ptr); void @@ -2806,8 +2808,9 @@ static VALUE m_core_undef_method(VALUE self, VALUE cbase, VALUE sym) { REWIND_CFP({ - rb_undef(cbase, SYM2ID(sym)); - rb_clear_method_cache_by_class(self); }); return Qnil; } @@ -2962,6 +2965,13 @@ f_lambda(VALUE _) return rb_block_lambda(); } void Init_VM(void) { @@ -3249,9 +3259,11 @@ Init_VM(void) #if VMDEBUG rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0); rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0); #else (void)sdr; (void)nsdr; #endif /* VM bootstrap: phase 2 */ @@ -3348,6 +3360,10 @@ Init_vm_objects(void) vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000); rb_objspace_gc_enable(vm->objspace); } /* top self */ @@ -3716,6 +3732,12 @@ vm_collect_usage_register(int reg, int isset) } #endif #endif /* #ifndef MJIT_HEADER */ #include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */ @@ -75,13 +75,13 @@ struct rb_callinfo { #define CI_EMBED_FLAG 0x01 #define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits) -#define CI_EMBED_ARGC_MASK ((1UL<<CI_EMBED_ARGC_bits) - 1) #define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits) -#define CI_EMBED_FLAG_MASK ((1UL<<CI_EMBED_FLAG_bits) - 1) #define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits) -#define CI_EMBED_ID_MASK ((1UL<<CI_EMBED_ID_bits) - 1) -static inline int vm_ci_packed_p(const struct rb_callinfo *ci) { #if USE_EMBED_CI @@ -89,7 +89,7 @@ vm_ci_packed_p(const struct rb_callinfo *ci) return 1; } else { - VM_ASSERT(imemo_type_p((VALUE)ci, imemo_callinfo)); return 0; } #else @@ -97,6 +97,17 @@ vm_ci_packed_p(const struct rb_callinfo *ci) #endif } static inline ID vm_ci_mid(const struct rb_callinfo *ci) { @@ -141,7 +152,6 @@ vm_ci_kwarg(const struct rb_callinfo *ci) } } -#if 0 // for debug static inline void vm_ci_dump(const struct rb_callinfo *ci) { @@ -153,7 +163,6 @@ vm_ci_dump(const struct rb_callinfo *ci) rp(ci); } } -#endif #define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__) #define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__) @@ -162,12 +171,11 @@ static inline const struct rb_callinfo * vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line) { #if USE_EMBED_CI - if ((mid & ~CI_EMBED_ID_MASK) == 0 && (argc & ~CI_EMBED_ARGC_MASK) == 0 && kwarg == NULL) { VALUE embed_ci = - 1L | ((VALUE)argc << CI_EMBED_ARGC_SHFT) | ((VALUE)flag << CI_EMBED_FLAG_SHFT) | ((VALUE)mid << CI_EMBED_ID_SHFT); @@ -175,8 +183,11 @@ vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinf return (const struct rb_callinfo *)embed_ci; } #endif const bool debug = 0; if (debug) fprintf(stderr, "%s:%d ", file, line); const struct rb_callinfo *ci = (const struct rb_callinfo *) rb_imemo_new(imemo_callinfo, (VALUE)mid, @@ -204,3 +215,209 @@ vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb RB_DEBUG_COUNTER_INC(ci_runtime); return vm_ci_new_(mid, flag, argc, kwarg, file, line); } @@ -253,7 +253,6 @@ struct rb_calling_info { }; struct rb_execution_context_struct; -typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd); #if 1 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj) @@ -1088,7 +1087,7 @@ typedef struct iseq_inline_cache_entry *IC; typedef struct iseq_inline_iv_cache_entry *IVC; typedef union iseq_inline_storage_entry *ISE; typedef const struct rb_callinfo *CALL_INFO; -typedef struct rb_call_cache *CALL_CACHE; typedef struct rb_call_data *CALL_DATA; typedef VALUE CDHASH; @@ -111,7 +111,7 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c } if (cfp->iseq != 0) { -#define RUBY_VM_IFUNC_P(ptr) imemo_type_p((VALUE)ptr, imemo_ifunc) if (RUBY_VM_IFUNC_P(cfp->iseq)) { iseq_name = "<ifunc>"; } @@ -167,7 +167,7 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c char buff[0x100]; if (me) { - if (imemo_type_p((VALUE)me, imemo_ment)) { fprintf(stderr, " me:\n"); fprintf(stderr, " called_id: %s, type: %s\n", rb_id2name(me->called_id), rb_method_type_name(me->def->type)); fprintf(stderr, " owner class: %s\n", rb_raw_obj_info(buff, 0x100, me->owner)); @@ -47,7 +47,8 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE { struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, }; const struct rb_callinfo *ci = vm_ci_new_runtime(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL); - const struct rb_call_cache cc = { 0, { 0, }, me, me->def->method_serial, vm_call_general, { 0, }, }; struct rb_call_data cd = { ci, cc, }; return vm_call0_body(ec, &calling, &cd, argv); } @@ -56,9 +57,9 @@ static VALUE vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv) { const struct rb_callinfo *ci = cd->ci; - const struct rb_call_cache *cc = &cd->cc; VALUE val; - const rb_callable_method_entry_t *me = cc->me; const rb_method_cfunc_t *cfunc = UNALIGNED_MEMBER_PTR(me->def, body.cfunc); int len = cfunc->argc; VALUE recv = calling->recv; @@ -109,14 +110,14 @@ static VALUE vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv) { const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; VALUE ret; calling->block_handler = vm_passed_block_handler(ec); again: - switch (cc->me->def->type) { case VM_METHOD_TYPE_ISEQ: { rb_control_frame_t *reg_cfp = ec->cfp; @@ -147,7 +148,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc } rb_check_arity(calling->argc, 1, 1); - ret = rb_ivar_set(calling->recv, cc->me->def->body.attr.id, argv[0]); goto success; case VM_METHOD_TYPE_IVAR: if (calling->kw_splat && @@ -158,7 +159,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc } rb_check_arity(calling->argc, 0, 0); - ret = rb_attr_get(calling->recv, cc->me->def->body.attr.id); goto success; case VM_METHOD_TYPE_BMETHOD: ret = vm_call_bmethod_body(ec, calling, cd, argv); @@ -166,21 +167,21 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc case VM_METHOD_TYPE_ZSUPER: case VM_METHOD_TYPE_REFINED: { - const rb_method_type_t type = cc->me->def->type; - VALUE super_class = cc->me->defined_class; if (type == VM_METHOD_TYPE_ZSUPER) { super_class = RCLASS_ORIGIN(super_class); } - else if (cc->me->def->body.refined.orig_me) { - CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me)); - goto again; } super_class = RCLASS_SUPER(super_class); if (super_class) { - CC_SET_ME(cc, rb_callable_method_entry(super_class, vm_ci_mid(ci))); - if (cc->me) { RUBY_VM_CHECK_INTS(ec); goto again; } @@ -191,7 +192,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc goto success; } case VM_METHOD_TYPE_ALIAS: - CC_SET_ME(cc, aliased_callable_method_entry(cc->me)); goto again; case VM_METHOD_TYPE_MISSING: { @@ -200,7 +201,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc argv, MISSING_NOENTRY, calling->kw_splat); } case VM_METHOD_TYPE_OPTIMIZED: - switch (cc->me->def->body.optimize_type) { case OPTIMIZED_METHOD_TYPE_SEND: ret = send_internal(calling->argc, argv, calling->recv, calling->kw_splat ? CALL_FCALL_KW : CALL_FCALL); goto success; @@ -212,13 +213,13 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc goto success; } default: - rb_bug("vm_call0: unsupported optimized method type (%d)", cc->me->def->body.optimize_type); } break; case VM_METHOD_TYPE_UNDEF: break; } - rb_bug("vm_call0: unsupported method type (%d)", cc->me->def->type); return Qundef; success: @@ -359,7 +360,7 @@ struct rescue_funcall_args { VALUE recv; ID mid; rb_execution_context_t *ec; - const rb_method_entry_t *me; unsigned int respond: 1; unsigned int respond_to_missing: 1; int argc; @@ -373,7 +374,7 @@ check_funcall_exec(VALUE v) struct rescue_funcall_args *args = (void *)v; return call_method_entry(args->ec, args->defined_class, args->recv, idMethodMissing, - args->me, args->argc, args->argv, args->kw_splat); } static VALUE @@ -417,7 +418,7 @@ static VALUE check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mid, int argc, const VALUE *argv, int respond, VALUE def, int kw_splat) { struct rescue_funcall_args args; - const rb_method_entry_t *me; VALUE ret = Qundef; ret = basic_obj_respond_to_missing(ec, klass, recv, @@ -426,8 +427,9 @@ check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mi args.respond = respond > 0; args.respond_to_missing = (ret != Qundef); ret = def; - me = method_entry_get(klass, idMethodMissing, &args.defined_class); - if (me && !METHOD_ENTRY_BASIC(me)) { VALUE argbuf, *new_args = ALLOCV_N(VALUE, argbuf, argc+1); new_args[0] = ID2SYM(mid); @@ -442,7 +444,7 @@ check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mi ec->method_missing_reason = MISSING_NOENTRY; args.ec = ec; args.recv = recv; - args.me = me; args.mid = mid; args.argc = argc + 1; args.argv = new_args; @@ -115,9 +115,9 @@ callable_class_p(VALUE klass) } static int -callable_method_entry_p(const rb_callable_method_entry_t *me) { - if (me == NULL || callable_class_p(me->defined_class)) { return TRUE; } else { @@ -221,8 +221,6 @@ static bool vm_stack_canary_was_born = false; MJIT_FUNC_EXPORTED void vm_check_canary(const rb_execution_context_t *ec, VALUE *sp) { - return; - const struct rb_control_frame_struct *reg_cfp = ec->cfp; const struct rb_iseq_struct *iseq; @@ -1024,9 +1022,9 @@ vm_search_const_defined_class(const VALUE cbase, ID id) return 0; } -ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, IVC, struct rb_call_cache *, int)); static inline VALUE -vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr) { #if OPT_IC_FOR_IVAR VALUE val = Qundef; @@ -1035,10 +1033,10 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr) // frozen? } else if (LIKELY(is_attr ? - RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, cc->aux.index > 0) : RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial, ic->ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) { - st_index_t index = !is_attr ? ic->index : (cc->aux.index - 1); RB_DEBUG_COUNTER_INC(ivar_get_ic_hit); @@ -1076,7 +1074,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr) ic->ic_serial = RCLASS_SERIAL(RBASIC(obj)->klass); } else { /* call_info */ - cc->aux.index = (int)index + 1; } if (index < numiv) { @@ -1124,7 +1122,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr) } static inline VALUE -vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is_attr) { #if OPT_IC_FOR_IVAR rb_check_frozen_internal(obj); @@ -1135,9 +1133,9 @@ vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is if (LIKELY( (!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->ic_serial == RCLASS_SERIAL(klass))) || - ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, cc->aux.index > 0)))) { VALUE *ptr = ROBJECT_IVPTR(obj); - index = !is_attr ? ic->index : cc->aux.index-1; if (RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_oorange, index < ROBJECT_NUMIV(obj))) { RB_OBJ_WRITE(obj, &ptr[index], val); @@ -1157,7 +1155,7 @@ vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is rb_raise(rb_eArgError, "too many instance variables"); } else { - cc->aux.index = (int)(index + 1); } } /* fall through */ @@ -1440,210 +1438,199 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag) static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd); -#if __has_attribute(artificial) -__attribute__((__artificial__)) -#endif -static inline vm_call_handler -calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me) { - const struct rb_callinfo *ci = cd->ci; - const struct rb_call_cache *cc = &cd->cc; - if (UNLIKELY(!me)) { - RB_DEBUG_COUNTER_INC(mc_miss_by_nome); - return vm_call_general; /* vm_call_method_nome() situation */ - } - else if (LIKELY(cc->me != me)) { - RB_DEBUG_COUNTER_INC(mc_miss_by_distinct); - return vm_call_general; /* normal cases */ - } - else if (UNLIKELY(cc->method_serial != me->def->method_serial)) { - RB_DEBUG_COUNTER_INC(mc_miss_by_refine); - return vm_call_general; /* cc->me was refined elsewhere */ - } - /* "Calling a formerly-public method, which is now privatised, with an - * explicit receiver" is the only situation we have to check here. A - * formerly-private method now publicised is an absolutely safe thing. - * Calling a private method without specifying a receiver is also safe. */ - else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) && - !(vm_ci_flag(ci) & VM_CALL_FCALL)) { - RB_DEBUG_COUNTER_INC(mc_miss_by_visi); - return vm_call_general; } - else { - RB_DEBUG_COUNTER_INC(mc_miss_spurious); - (void)RB_DEBUG_COUNTER_INC_IF(mc_miss_reuse_call, cc->call != vm_call_general); - return cc->call; } } -MJIT_FUNC_EXPORTED void -rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass) { - const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; - - const rb_callable_method_entry_t *me = - rb_callable_method_entry(klass, vm_ci_mid(ci)); - const vm_call_handler call = calccall(cd, me); - struct rb_call_cache buf = { - GET_GLOBAL_METHOD_STATE(), - { RCLASS_SERIAL(klass) }, - me, - me ? me->def->method_serial : 0, - call, - }; - if (call != vm_call_general) { - for (int i = 0; i < numberof(cc->class_serial) - 1; i++) { - buf.class_serial[i + 1] = cc->class_serial[i]; - } } - MEMCPY(cc, &buf, struct rb_call_cache, 1); - VM_ASSERT(callable_method_entry_p(cc->me)); -} - -/* # Description of what `vm_cache_check_for_class_serial()` is doing ######### - * - * - Let's assume a `struct rb_call_cache` has its `class_serial` as an array - * of length 3 (typical situation for 64 bit environments): - * - * ```C - * struct rb_call_cache { - * rb_serial_t method_state; - * rb_serial_t class_serial[3]; - * rb_callable_method_entry_t *me; - * rb_method_definition_struct *def; - * vm_call_handler call; - * union { ... snip ... } aux; - * }; - * ``` - * - * - Initially, the `cc->class_serial` array is filled with zeros. - * - * - If the cache mishits, and if that was due to mc_miss_spurious situation, - * `rb_vm_search_method_slowpath()` pushes the newest class serial at the - * leftmost position of the `cc->class_serial`. - * - * ``` - * from: +--------------+-----+-----+-----+----+-----+------+-----+ - * | method_state | (x) | (y) | (z) | me | def | call | aux | - * +--------------+-----+-----+-----+----+-----+------+-----+ - * \ \ - * \ \ - * \ \ - * \ \ - * \ \ - * v v - * to: +--------------+-----+-----+-----+----+-----+------+-----+ - * | method_state | NEW | (x) | (y) | me | def | call | aux | - * +--------------+-----+-----+-----+----+-----+------+-----+ - * ^^^ - * fill RCLASS_SERIAL(klass) - * ``` - * - * - Eventually, the `cc->class_serial` is filled with a series of classes that - * share the same method entry for the same call site. - * - * - `vm_cache_check_for_class_serial()` can say that the cache now hits if - * _any_ of the class serials stored inside of `cc->class_serial` is equal to - * the given `class_serial` value. - * - * - It scans the array from left to right, looking for the expected class - * serial. If it finds that at `cc->class_serial[0]` (this branch - * probability is 98% according to @shyouhei's experiment), just returns - * true. If it reaches the end of the array without finding anything, - * returns false. This is done in the #1 loop below. - * - * - What needs to be complicated is when the class serial is found at either - * `cc->class_serial[1]` or `cc->class_serial[2]`. When that happens, its - * return value is true because `cc->me` and `cc->call` are valid. But - * `cc->aux` might be invalid. Also the found class serial is expected to - * hit next time. In this case we reorder the array and wipe out `cc->aux`. - * This is done in the #2 loop below. - * - * ``` - * from: +--------------+-----+-----+-----+----+-----+------+-----+ - * | method_state | (x) | (y) | (z) | me | def | call | aux | - * +--------------+-----+-----+-----+----+-----+------+-----+ - * \ \ | - * \ \ | - * +- \ --- \ -+ - * | \ \ - * | \ \ - * v v v - * to: +--------------+-----+-----+-----+----+-----+------+-----+ - * | method_state | (z) | (x) | (y) | me | def | call | 000 | - * +--------------+-----+-----+-----+----+-----+------+-----+ - * ^^^ - * wipe out - * ``` - * - */ -static inline bool -vm_cache_check_for_class_serial(struct rb_call_cache *cc, rb_serial_t class_serial) { - int i; - rb_serial_t j; - /* This is the loop #1 in above description. */ - for (i = 0; i < numberof(cc->class_serial); i++) { - j = cc->class_serial[i]; - if (! j) { - break; - } - else if (j != class_serial) { - continue; - } - else if (! i) { - return true; } - else { - goto hit; } } - RB_DEBUG_COUNTER_INC(mc_class_serial_miss); - return false; - hit: - /* This is the loop #2 in above description. */ - for (; i > 0; i--) { - cc->class_serial[i] = cc->class_serial[i - 1]; } - cc->class_serial[0] = j; - MEMZERO(&cc->aux, cc->aux, 1); /* cc->call is valid, but cc->aux might not. */ - return true; } static void -vm_search_method_fastpath(struct rb_call_data *cd, VALUE klass) { - struct rb_call_cache *cc = &cd->cc; #if OPT_INLINE_METHOD_CACHE - if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss, - GET_GLOBAL_METHOD_STATE() == cc->method_state) && - vm_cache_check_for_class_serial(cc, RCLASS_SERIAL(klass)))) { - /* cache hit! */ - VM_ASSERT(cc->call != NULL); - RB_DEBUG_COUNTER_INC(mc_inline_hit); - return; } - RB_DEBUG_COUNTER_INC(mc_inline_miss); #endif - rb_vm_search_method_slowpath(cd, klass); } static void -vm_search_method(struct rb_call_data *cd, VALUE recv) { VALUE klass = CLASS_OF(recv); - VM_ASSERT(klass != Qfalse); VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass)); - vm_search_method_fastpath(cd, klass); } static inline int @@ -1659,16 +1646,16 @@ check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)()) } static inline int -vm_method_cfunc_is(CALL_DATA cd, VALUE recv, VALUE (*func)()) { - vm_search_method(cd, recv); - return check_cfunc(cd->cc.me, func); } static VALUE -opt_equal_fallback(VALUE recv, VALUE obj, CALL_DATA cd) { - if (vm_method_cfunc_is(cd, recv, rb_obj_equal)) { return recv == obj ? Qtrue : Qfalse; } @@ -1728,7 +1715,7 @@ static inline #endif VALUE -opt_eq_func(VALUE recv, VALUE obj, CALL_DATA cd) { switch (comparable_by_identity(recv, obj)) { case 1: @@ -1751,7 +1738,7 @@ opt_eq_func(VALUE recv, VALUE obj, CALL_DATA cd) } fallback: - return opt_equal_fallback(recv, obj, cd); } static @@ -1781,7 +1768,7 @@ opt_eql_func(VALUE recv, VALUE obj, CALL_DATA cd) } fallback: - return opt_equal_fallback(recv, obj, cd); } #undef BUILTIN_CLASS_P #undef EQ_UNREDEFINED_P @@ -1797,14 +1784,14 @@ rb_equal_opt(VALUE obj1, VALUE obj2) rb_gc_register_mark_object((VALUE)ci); } - struct rb_call_data cd = { .ci = ci, }; - return opt_eq_func(obj1, obj2, &cd); } VALUE rb_eql_opt(VALUE obj1, VALUE obj2) { - struct rb_call_data cd = { .ci = vm_ci_new_id(idEqlP), }; return opt_eql_func(obj1, obj2, &cd); } @@ -1929,11 +1916,11 @@ vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t { RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start); - struct rb_call_cache *cc = &cd->cc; - const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); int param = iseq->body->param.size; int local = iseq->body->local_table_size; - return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local); } MJIT_STATIC bool @@ -2043,8 +2030,8 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame struct rb_calling_info *calling, struct rb_call_data *cd) { - const struct rb_call_cache *cc = &cd->cc; - const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); const int lead_num = iseq->body->param.lead_num; const int opt = calling->argc - lead_num; const int opt_num = iseq->body->param.opt_num; @@ -2064,7 +2051,7 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame } #endif - return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param - delta, local); } static VALUE @@ -2072,8 +2059,8 @@ vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_fra struct rb_calling_info *calling, struct rb_call_data *cd) { - const struct rb_call_cache *cc = &cd->cc; - const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); const int lead_num = iseq->body->param.lead_num; const int opt = calling->argc - lead_num; const int opt_pc = (int)iseq->body->param.opt_table[opt]; @@ -2103,12 +2090,12 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t * struct rb_call_data *cd) { const struct rb_callinfo *ci = cd->ci; - const struct rb_call_cache *cc = &cd->cc; VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG); RB_DEBUG_COUNTER_INC(ccf_iseq_kw1); - const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword; const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci); const int ci_kw_len = kw_arg->keyword_len; @@ -2122,7 +2109,7 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t * int param = iseq->body->param.size; int local = iseq->body->local_table_size; - return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local); } static VALUE @@ -2131,12 +2118,12 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t struct rb_call_data *cd) { const struct rb_callinfo *MAYBE_UNUSED(ci) = cd->ci; - const struct rb_call_cache *cc = &cd->cc; VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0); RB_DEBUG_COUNTER_INC(ccf_iseq_kw2); - const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword; VALUE * const argv = cfp->sp - calling->argc; VALUE * const klocals = argv + kw_param->bits_start - kw_param->num; @@ -2152,7 +2139,7 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t int param = iseq->body->param.size; int local = iseq->body->local_table_size; - return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local); } static inline int @@ -2160,7 +2147,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size) { const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) { if (LIKELY(rb_simple_iseq_p(iseq))) { @@ -2172,7 +2159,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num); } - CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(cd->ci, &cd->cc)); return 0; } else if (rb_iseq_only_optparam_p(iseq)) { @@ -2192,12 +2179,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) { CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start, !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && - !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); } else { CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start, !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && - !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); } /* initialize opt vars for self-references */ @@ -2225,7 +2212,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals); CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg, - !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); return 0; } @@ -2238,7 +2225,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, if (klocals[kw_param->num] == INT2FIX(0)) { /* copy from default_values */ CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg, - !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); } return 0; @@ -2254,11 +2241,11 @@ vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct r { RB_DEBUG_COUNTER_INC(ccf_iseq_setup); - const struct rb_call_cache *cc = &cd->cc; - const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); const int param_size = iseq->body->param.size; const int local_size = iseq->body->local_table_size; - const int opt_pc = vm_callee_setup_arg(ec, calling, cd, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc, param_size, local_size); return vm_call_iseq_setup_2(ec, cfp, calling, cd, opt_pc, param_size, local_size); } @@ -2267,10 +2254,10 @@ vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct int opt_pc, int param_size, int local_size) { const struct rb_callinfo *ci = cd->ci; - const struct rb_call_cache *cc = &cd->cc; if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) { - return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param_size, local_size); } else { return vm_call_iseq_setup_tailcall(ec, cfp, calling, cd, opt_pc); @@ -2298,10 +2285,10 @@ static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd, int opt_pc) { - const struct rb_call_cache *cc = &cd->cc; unsigned int i; VALUE *argv = cfp->sp - calling->argc; - const rb_callable_method_entry_t *me = cc->me; const rb_iseq_t *iseq = def_iseq_ptr(me->def); VALUE *src_argv = argv; VALUE *sp_orig, *sp; @@ -2501,9 +2488,9 @@ static VALUE vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { const struct rb_callinfo *ci = cd->ci; - const struct rb_call_cache *cc = &cd->cc; VALUE val; - const rb_callable_method_entry_t *me = cc->me; const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me); int len = cfunc->argc; @@ -2553,20 +2540,20 @@ vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb static VALUE vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { - struct rb_call_cache *cc = &cd->cc; RB_DEBUG_COUNTER_INC(ccf_ivar); cfp->sp -= 1; - return vm_getivar(calling->recv, cc->me->def->body.attr.id, NULL, cc, TRUE); } static VALUE vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { - struct rb_call_cache *cc = &cd->cc; RB_DEBUG_COUNTER_INC(ccf_attrset); VALUE val = *(cfp->sp - 1); cfp->sp -= 2; - return vm_setivar(calling->recv, cc->me->def->body.attr.id, val, NULL, cc, 1); } static inline VALUE @@ -2574,11 +2561,11 @@ vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling { rb_proc_t *proc; VALUE val; - const struct rb_call_cache *cc = &cd->cc; /* control block frame */ - GetProcPtr(cc->me->def->body.bmethod.proc, proc); - val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, cc->me); return val; } @@ -2601,6 +2588,65 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c return vm_call_bmethod_body(ec, calling, cd, argv); } static enum method_missing_reason ci_missing_reason(const struct rb_callinfo *ci) { @@ -2619,12 +2665,10 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct int i; VALUE sym; ID mid; - const struct rb_callinfo *orig_ci = orig_cd->ci; - const struct rb_call_cache *orig_cc = &orig_cd->cc; - struct rb_call_cache *cc; struct rb_call_data cd; - CALLER_SETUP_ARG(reg_cfp, calling, orig_ci); i = calling->argc - 1; @@ -2632,9 +2676,6 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_raise(rb_eArgError, "no method name given"); } - cd.cc = *orig_cc; - cc = &cd.cc; - sym = TOPN(i); if (!(mid = rb_check_id(&sym))) { @@ -2642,12 +2683,12 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct VALUE exc = rb_make_no_method_exception(rb_eNoMethodError, 0, calling->recv, rb_long2int(calling->argc), &TOPN(i), - vm_ci_flag(orig_ci) & (VM_CALL_FCALL|VM_CALL_VCALL)); rb_exc_raise(exc); } TOPN(i) = rb_str_intern(sym); mid = idMethodMissing; - ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(orig_ci); } else { /* shift arguments */ @@ -2658,10 +2699,14 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct DEC_SP(1); } - CC_SET_ME(cc, rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), mid, NULL)); unsigned int new_flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); - cd.ci = vm_ci_new_runtime(mid, new_flag, 0 /* not accessed (calling->argc is used) */, vm_ci_kwarg(orig_ci)); - return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd); } @@ -2706,20 +2751,19 @@ vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, } else { calling->recv = rb_vm_bh_to_procval(ec, block_handler); - vm_search_method(cd, calling->recv); return vm_call_general(ec, reg_cfp, calling, cd); } } static VALUE -vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *orig_cd) { RB_DEBUG_COUNTER_INC(ccf_method_missing); - const struct rb_callinfo *orig_ci = orig_cd->ci; - const struct rb_call_cache *orig_cc = &orig_cd->cc; VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc); - struct rb_call_data cd = *orig_cd; unsigned int argc; CALLER_SETUP_ARG(reg_cfp, calling, orig_ci); @@ -2727,8 +2771,11 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); cd.ci = vm_ci_new_runtime(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)); - cd.cc.me = rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), - idMethodMissing, NULL); calling->argc = argc; @@ -2741,29 +2788,39 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, argv[0] = ID2SYM(vm_ci_mid(orig_ci)); INC_SP(1); - ec->method_missing_reason = orig_cc->aux.method_missing_reason; return vm_call_method(ec, reg_cfp, calling, &cd); } static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me); static VALUE vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd, VALUE klass) { - RB_DEBUG_COUNTER_INC(ccf_method_missing); - - const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; klass = RCLASS_SUPER(klass); - CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, vm_ci_mid(ci)) : NULL); - if (!cc->me) { return vm_call_method_nome(ec, cfp, calling, cd); } - if (cc->me->def->type == VM_METHOD_TYPE_REFINED && - cc->me->def->body.refined.orig_me) { - CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me)); } - return vm_call_method_each_type(ec, cfp, calling, cd); } static inline VALUE @@ -2795,53 +2852,6 @@ current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp) return cfp; } -static VALUE -find_defined_class_by_owner(VALUE current_class, VALUE target_owner) -{ - VALUE klass = current_class; - - /* for prepended Module, then start from cover class */ - if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN)) klass = RBASIC_CLASS(klass); - - while (RTEST(klass)) { - VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass; - if (owner == target_owner) { - return klass; - } - klass = RCLASS_SUPER(klass); - } - - return current_class; /* maybe module function */ -} - -static const rb_callable_method_entry_t * -aliased_callable_method_entry(const rb_callable_method_entry_t *me) -{ - const rb_method_entry_t *orig_me = me->def->body.alias.original_me; - const rb_callable_method_entry_t *cme; - - if (orig_me->defined_class == 0) { - VALUE defined_class = find_defined_class_by_owner(me->defined_class, orig_me->owner); - VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE)); - cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class); - - if (me->def->alias_count + me->def->complemented_count == 0) { - RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme); - } - else { - rb_method_definition_t *def = - rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id); - rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme); - } - } - else { - cme = (const rb_callable_method_entry_t *)orig_me; - } - - VM_ASSERT(callable_method_entry_p(cme)); - return cme; -} - static const rb_callable_method_entry_t * refined_method_callable_without_refinement(const rb_callable_method_entry_t *me) { @@ -2865,57 +2875,78 @@ refined_method_callable_without_refinement(const rb_callable_method_entry_t *me) return cme; } -static int -search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, ID mid, struct rb_call_cache *cc) { const rb_cref_t *cref = vm_get_cref(cfp->ep); for (; cref; cref = CREF_NEXT(cref)) { - const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), cc->me->owner); if (NIL_P(refinement)) continue; const rb_callable_method_entry_t *const ref_me = rb_callable_method_entry(refinement, mid); if (ref_me) { - if (cc->call == vm_call_super_method) { const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp); const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp); if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) { continue; } } - if (cc->me->def->type != VM_METHOD_TYPE_REFINED || - cc->me->def != ref_me->def) { - CC_SET_ME(cc, ref_me); } if (ref_me->def->type != VM_METHOD_TYPE_REFINED) { - return TRUE; } } else { - CC_SET_ME(cc, NULL); - return FALSE; } } - if (cc->me->def->body.refined.orig_me) { - CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me)); } else { - VALUE klass = RCLASS_SUPER(cc->me->defined_class); - CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, mid) : NULL); } - return TRUE; } static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; - switch (cc->me->def->type) { case VM_METHOD_TYPE_ISEQ: CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE); return vm_call_iseq_setup(ec, cfp, calling, cd); @@ -2930,20 +2961,20 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); rb_check_arity(calling->argc, 1, 1); - cc->aux.index = 0; - CC_SET_FASTPATH(cc, vm_call_attrset, !((vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT) || (vm_ci_flag(ci) & VM_CALL_KWARG))); return vm_call_attrset(ec, cfp, calling, cd); case VM_METHOD_TYPE_IVAR: CALLER_SETUP_ARG(cfp, calling, ci); CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); rb_check_arity(calling->argc, 0, 0); - cc->aux.index = 0; - CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)); return vm_call_ivar(ec, cfp, calling, cd); case VM_METHOD_TYPE_MISSING: - cc->aux.method_missing_reason = 0; CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE); return vm_call_method_missing(ec, cfp, calling, cd); @@ -2952,12 +2983,11 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_bmethod(ec, cfp, calling, cd); case VM_METHOD_TYPE_ALIAS: - CC_SET_ME(cc, aliased_callable_method_entry(cc->me)); - VM_ASSERT(cc->me != NULL); - return vm_call_method_each_type(ec, cfp, calling, cd); case VM_METHOD_TYPE_OPTIMIZED: - switch (cc->me->def->body.optimize_type) { case OPTIMIZED_METHOD_TYPE_SEND: CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE); return vm_call_opt_send(ec, cfp, calling, cd); @@ -2969,23 +2999,22 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_opt_block_call(ec, cfp, calling, cd); default: rb_bug("vm_call_method: unsupported optimized method type (%d)", - cc->me->def->body.optimize_type); } case VM_METHOD_TYPE_UNDEF: break; case VM_METHOD_TYPE_ZSUPER: - return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(cc->me->defined_class)); case VM_METHOD_TYPE_REFINED: - if (search_refined_method(ec, cfp, vm_ci_mid(ci), cc)) - return vm_call_method(ec, cfp, calling, cd); - else - return vm_call_method_nome(ec, cfp, calling, cd); } - rb_bug("vm_call_method: unsupported method type (%d)", cc->me->def->type); } NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status)); @@ -2995,7 +3024,6 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct { /* method missing */ const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; const int stat = ci_missing_reason(ci); if (vm_ci_mid(ci) == idMethodMissing) { @@ -3004,9 +3032,7 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat); } else { - cc->aux.method_missing_reason = stat; - CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE); - return vm_call_method_missing(ec, cfp, calling, cd); } } @@ -3014,12 +3040,12 @@ static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { const struct rb_callinfo *ci = cd->ci; - struct rb_call_cache *cc = &cd->cc; - VM_ASSERT(callable_method_entry_p(cc->me)); - if (cc->me != NULL) { - switch (METHOD_ENTRY_VISI(cc->me)) { case METHOD_VISI_PUBLIC: /* likely */ return vm_call_method_each_type(ec, cfp, calling, cd); @@ -3028,7 +3054,7 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca enum method_missing_reason stat = MISSING_PRIVATE; if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL; - cc->aux.method_missing_reason = stat; CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE); return vm_call_method_missing(ec, cfp, calling, cd); } @@ -3036,15 +3062,19 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca case METHOD_VISI_PROTECTED: if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) { - if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) { - cc->aux.method_missing_reason = MISSING_PROTECTED; return vm_call_method_missing(ec, cfp, calling, cd); } else { /* caching method info to dummy cc */ - VM_ASSERT(cc->me != NULL); - struct rb_call_data cd_entry = *cd; - return vm_call_method_each_type(ec, cfp, calling, &cd_entry); } } return vm_call_method_each_type(ec, cfp, calling, cd); @@ -3071,8 +3101,8 @@ vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, st RB_DEBUG_COUNTER_INC(ccf_super_method); /* this check is required to distinguish with other functions. */ - const struct rb_call_cache *cc = &cd->cc; - if (cc->call != vm_call_super_method) rb_bug("bug"); return vm_call_method(ec, reg_cfp, calling, cd); } @@ -3145,30 +3175,34 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c if (!klass) { /* bound instance method of module */ - cd->cc.aux.method_missing_reason = MISSING_SUPER; - CC_SET_FASTPATH(&cd->cc, vm_call_method_missing, TRUE); } else { - struct rb_call_cache *cc = &cd->cc; -#if OPT_INLINE_METHOD_CACHE - /* Unlike normal method search, we only consider the first class - * serial. Since we're testing defined_class rather than receiver, - * there's only one valid "warm" value. */ - if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss, - GET_GLOBAL_METHOD_STATE() == cc->method_state) && - cc->class_serial[0] == RCLASS_SERIAL(klass)) && - cc->me && vm_ci_mid(cd->ci) == cc->me->called_id) { - VM_ASSERT(cc->call != NULL); - RB_DEBUG_COUNTER_INC(mc_inline_hit); - return; - } -#endif - - CC_SET_ME(cc, rb_callable_method_entry(klass, vm_ci_mid(cd->ci))); - CC_SET_FASTPATH(cc, vm_call_super_method, TRUE); - - cc->method_state = GET_GLOBAL_METHOD_STATE(); - cc->class_serial[0] = RCLASS_SERIAL(klass); } } @@ -3958,7 +3992,7 @@ vm_search_method_wrap( struct rb_call_data *cd, VALUE recv) { - vm_search_method(cd, recv); } static void @@ -3999,9 +4033,8 @@ vm_sendish( struct rb_call_data *cd, VALUE recv)) { - const struct rb_callinfo *ci = cd->ci; - CALL_CACHE cc = &cd->cc; VALUE val; int argc = vm_ci_argc(ci); VALUE recv = TOPN(argc); struct rb_calling_info calling; @@ -4012,8 +4045,9 @@ vm_sendish( calling.argc = argc; method_explorer(GET_CFP(), cd, recv); - val = cc->call(ec, GET_CFP(), &calling, cd); if (val != Qundef) { return val; /* CFUNC normal return */ @@ -4356,10 +4390,10 @@ vm_opt_mod(VALUE recv, VALUE obj) } static VALUE -vm_opt_neq(CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj) { - if (vm_method_cfunc_is(cd, recv, rb_obj_not_equal)) { - VALUE val = opt_eq_func(recv, obj, cd_eq); if (val != Qundef) { return RTEST(val) ? Qfalse : Qtrue; @@ -4630,13 +4664,13 @@ vm_opt_empty_p(VALUE recv) VALUE rb_false(VALUE obj); static VALUE -vm_opt_nil_p(CALL_DATA cd, VALUE recv) { if (recv == Qnil && BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) { return Qtrue; } - else if (vm_method_cfunc_is(cd, recv, rb_false)) { return Qfalse; } else { @@ -4692,9 +4726,9 @@ vm_opt_succ(VALUE recv) } static VALUE -vm_opt_not(CALL_DATA cd, VALUE recv) { - if (vm_method_cfunc_is(cd, recv, rb_obj_not)) { return RTEST(recv) ? Qfalse : Qtrue; } else { @@ -121,20 +121,13 @@ enum vm_regan_acttype { */ static inline void -CC_SET_FASTPATH(CALL_CACHE cc, vm_call_handler func, bool enabled) { if (LIKELY(enabled)) { - cc->call = func; } } -static inline void -CC_SET_ME(CALL_CACHE cc, const rb_callable_method_entry_t *me) -{ - cc->me = me; - cc->method_serial = me ? me->def->method_serial : 0; -} - #define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL]) /**********************************************************/ @@ -258,10 +251,10 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj) /* If this returns true, an optimized function returned by `vm_call_iseq_setup_func` can be used as a fastpath. */ static bool -vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_call_cache *cc) { return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && - !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED); } #endif /* RUBY_INSNHELPER_H */ @@ -6,25 +6,6 @@ #define METHOD_DEBUG 0 -#if OPT_GLOBAL_METHOD_CACHE -#ifndef GLOBAL_METHOD_CACHE_SIZE -#define GLOBAL_METHOD_CACHE_SIZE 0x800 -#endif -#define LSB_ONLY(x) ((x) & ~((x) - 1)) -#define POWER_OF_2_P(x) ((x) == LSB_ONLY(x)) -#if !POWER_OF_2_P(GLOBAL_METHOD_CACHE_SIZE) -# error GLOBAL_METHOD_CACHE_SIZE must be power of 2 -#endif -#ifndef GLOBAL_METHOD_CACHE_MASK -#define GLOBAL_METHOD_CACHE_MASK (GLOBAL_METHOD_CACHE_SIZE-1) -#endif - -#define GLOBAL_METHOD_CACHE_KEY(c,m) ((((c)>>3)^(m))&(global_method_cache.mask)) -#define GLOBAL_METHOD_CACHE(c,m) (global_method_cache.entries + GLOBAL_METHOD_CACHE_KEY(c,m)) -#else -#define GLOBAL_METHOD_CACHE(c,m) (rb_bug("global method cache disabled improperly"), NULL) -#endif - static int vm_redefinition_check_flag(VALUE klass); static void rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass); @@ -37,50 +18,108 @@ static void rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VAL #define singleton_undefined idSingleton_method_undefined #define attached id__attached__ -struct cache_entry { - rb_serial_t method_state; - rb_serial_t class_serial; - ID mid; - rb_method_entry_t* me; - VALUE defined_class; -}; - -#if OPT_GLOBAL_METHOD_CACHE -static struct { - unsigned int size; - unsigned int mask; - struct cache_entry *entries; -} global_method_cache = { - GLOBAL_METHOD_CACHE_SIZE, - GLOBAL_METHOD_CACHE_MASK, -}; -#endif - #define ruby_running (GET_VM()->running) /* int ruby_running = 0; */ -static void -rb_class_clear_method_cache(VALUE klass, VALUE arg) { - rb_serial_t old_serial = *(rb_serial_t *)arg; - if (RCLASS_SERIAL(klass) > old_serial) { - return; } - mjit_remove_class_serial(RCLASS_SERIAL(klass)); - RCLASS_SERIAL(klass) = rb_next_class_serial(); - if (BUILTIN_TYPE(klass) == T_ICLASS) { - struct rb_id_table *table = RCLASS_CALLABLE_M_TBL(klass); - if (table) { - rb_id_table_clear(table); - } } - else { - VM_ASSERT(RCLASS_CALLABLE_M_TBL(klass) == 0); } - rb_class_foreach_subclass(klass, rb_class_clear_method_cache, arg); } void @@ -89,31 +128,131 @@ rb_clear_constant_cache(void) INC_GLOBAL_CONSTANT_STATE(); } -void -rb_clear_method_cache_by_class(VALUE klass) { - if (klass && klass != Qundef) { - int global = klass == rb_cBasicObject || klass == rb_cObject || klass == rb_mKernel; - RUBY_DTRACE_HOOK(METHOD_CACHE_CLEAR, (global ? "global" : rb_class2name(klass))); - if (global) { - INC_GLOBAL_METHOD_STATE(); - } - else { - rb_serial_t old_serial = PREV_CLASS_SERIAL(); - rb_class_clear_method_cache(klass, (VALUE)&old_serial); - } } - if (klass == rb_mKernel) { - rb_subclass_entry_t *entry = RCLASS_EXT(klass)->subclasses; - for (; entry != NULL; entry = entry->next) { - struct rb_id_table *table = RCLASS_CALLABLE_M_TBL(entry->klass); - if (table)rb_id_table_clear(table); - } } } VALUE @@ -138,7 +277,7 @@ rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_me rb_method_cfunc_t opt; opt.func = func; opt.argc = argc; - rb_add_method(klass, mid, VM_METHOD_TYPE_CFUNC, &opt, visi); } else { rb_define_notimplement_method_id(klass, mid, visi); @@ -161,8 +300,13 @@ rb_method_definition_release(rb_method_definition_t *def, int complemented) xfree(def); } else { - if (complemented) def->complemented_count--; - else if (def->alias_count > 0) def->alias_count--; if (METHOD_DEBUG) fprintf(stderr, "-%p-%s:%d->%d,%d->%d (dec)\n", (void *)def, rb_id2name(def->original_id), alias_count, def->alias_count, complemented_count, def->complemented_count); @@ -179,20 +323,6 @@ rb_free_method_entry(const rb_method_entry_t *me) static inline rb_method_entry_t *search_method(VALUE klass, ID id, VALUE *defined_class_ptr); extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2); -static inline rb_method_entry_t * -lookup_method_table(VALUE klass, ID id) -{ - st_data_t body; - struct rb_id_table *m_tbl = RCLASS_M_TBL(klass); - - if (rb_id_table_lookup(m_tbl, id, &body)) { - return (rb_method_entry_t *) body; - } - else { - return 0; - } -} - static VALUE (*call_cfunc_invoker_func(int argc))(VALUE recv, int argc, const VALUE *, VALUE (*func)(ANYARGS)) { @@ -406,7 +536,11 @@ const rb_method_entry_t * rb_method_entry_clone(const rb_method_entry_t *src_me) { rb_method_entry_t *me = rb_method_entry_alloc(src_me->called_id, src_me->owner, src_me->defined_class, - method_definition_addref(src_me->def)); METHOD_ENTRY_FLAGS_COPY(me, src_me); return me; } @@ -487,6 +621,20 @@ make_method_entry_refined(VALUE owner, rb_method_entry_t *me) } } void rb_add_refined_method_entry(VALUE refined_class, ID mid) { @@ -494,7 +642,7 @@ rb_add_refined_method_entry(VALUE refined_class, ID mid) if (me) { make_method_entry_refined(refined_class, me); - rb_clear_method_cache_by_class(refined_class); } else { rb_add_method(refined_class, mid, VM_METHOD_TYPE_REFINED, 0, METHOD_VISI_PUBLIC); @@ -615,7 +763,7 @@ rb_method_entry_make(VALUE klass, ID mid, VALUE defined_class, rb_method_visibil if (def == NULL) def = rb_method_definition_create(type, original_id); rb_method_definition_set(me, def, opts); - rb_clear_method_cache_by_class(klass); /* check mid */ if (klass == rb_cObject) { @@ -737,149 +885,169 @@ rb_get_alloc_func(VALUE klass) return 0; } static inline rb_method_entry_t* search_method(VALUE klass, ID id, VALUE *defined_class_ptr) { rb_method_entry_t *me; for (; klass; klass = RCLASS_SUPER(klass)) { RB_DEBUG_COUNTER_INC(mc_search_super); - if ((me = lookup_method_table(klass, id)) != 0) break; } - if (defined_class_ptr) - *defined_class_ptr = klass; return me; } -const rb_method_entry_t * -rb_method_entry_at(VALUE klass, ID id) { - return lookup_method_table(klass, id); } -/* - * search method entry without the method cache. - * - * if you need method entry with method cache (normal case), use - * rb_method_entry() simply. - */ -static rb_method_entry_t * -method_entry_get_without_cache(VALUE klass, ID id, - VALUE *defined_class_ptr) { - VALUE defined_class; - rb_method_entry_t *me = search_method(klass, id, &defined_class); - if (ruby_running) { - if (OPT_GLOBAL_METHOD_CACHE) { - struct cache_entry *ent; - ent = GLOBAL_METHOD_CACHE(klass, id); - ent->class_serial = RCLASS_SERIAL(klass); - ent->method_state = GET_GLOBAL_METHOD_STATE(); - ent->defined_class = defined_class; - ent->mid = id; - if (UNDEFINED_METHOD_ENTRY_P(me)) { - me = ent->me = NULL; - } - else { - ent->me = me; - } - } - else if (UNDEFINED_METHOD_ENTRY_P(me)) { - me = NULL; - } } - else if (UNDEFINED_METHOD_ENTRY_P(me)) { - me = NULL; } - if (defined_class_ptr) - *defined_class_ptr = defined_class; - return me; } -static void -verify_method_cache(VALUE klass, ID id, VALUE defined_class, rb_method_entry_t *me) { - if (!VM_DEBUG_VERIFY_METHOD_CACHE) return; - VALUE actual_defined_class; - rb_method_entry_t *actual_me = - method_entry_get_without_cache(klass, id, &actual_defined_class); - if (me != actual_me || defined_class != actual_defined_class) { - rb_bug("method cache verification failed"); } } -static rb_method_entry_t * -method_entry_get(VALUE klass, ID id, VALUE *defined_class_ptr) { - struct cache_entry *ent; - if (!OPT_GLOBAL_METHOD_CACHE) goto nocache; - ent = GLOBAL_METHOD_CACHE(klass, id); - if (ent->method_state == GET_GLOBAL_METHOD_STATE() && - ent->class_serial == RCLASS_SERIAL(klass) && - ent->mid == id) { - verify_method_cache(klass, id, ent->defined_class, ent->me); - if (defined_class_ptr) *defined_class_ptr = ent->defined_class; - RB_DEBUG_COUNTER_INC(mc_global_hit); - return ent->me; - } - nocache: - RB_DEBUG_COUNTER_INC(mc_global_miss); - return method_entry_get_without_cache(klass, id, defined_class_ptr); -} -MJIT_FUNC_EXPORTED const rb_method_entry_t * -rb_method_entry(VALUE klass, ID id) -{ - return method_entry_get(klass, id, NULL); } static const rb_callable_method_entry_t * -prepare_callable_method_entry(VALUE defined_class, ID id, const rb_method_entry_t *me) { - struct rb_id_table *mtbl; - const rb_callable_method_entry_t *cme; - if (me && me->defined_class == 0) { - RB_DEBUG_COUNTER_INC(mc_cme_complement); - VM_ASSERT(RB_TYPE_P(defined_class, T_ICLASS) || RB_TYPE_P(defined_class, T_MODULE)); - VM_ASSERT(me->defined_class == 0); - - mtbl = RCLASS_CALLABLE_M_TBL(defined_class); - - if (mtbl && rb_id_table_lookup(mtbl, id, (VALUE *)&me)) { - RB_DEBUG_COUNTER_INC(mc_cme_complement_hit); - cme = (rb_callable_method_entry_t *)me; - VM_ASSERT(callable_method_entry_p(cme)); - } - else { - if (!mtbl) { - mtbl = RCLASS_EXT(defined_class)->callable_m_tbl = rb_id_table_create(0); - } - cme = rb_method_entry_complement_defined_class(me, me->called_id, defined_class); - rb_id_table_insert(mtbl, id, (VALUE)cme); - VM_ASSERT(callable_method_entry_p(cme)); - } } else { - cme = (const rb_callable_method_entry_t *)me; - VM_ASSERT(callable_method_entry_p(cme)); } return cme; } MJIT_FUNC_EXPORTED const rb_callable_method_entry_t * -rb_callable_method_entry(VALUE klass, ID id) { - VALUE defined_class; - rb_method_entry_t *me = method_entry_get(klass, id, &defined_class); - return prepare_callable_method_entry(defined_class, id, me); } static const rb_method_entry_t *resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *defined_class_ptr); @@ -887,7 +1055,7 @@ static const rb_method_entry_t *resolve_refined_method(VALUE refinements, const static const rb_method_entry_t * method_entry_resolve_refinement(VALUE klass, ID id, int with_refinement, VALUE *defined_class_ptr) { - const rb_method_entry_t *me = method_entry_get(klass, id, defined_class_ptr); if (me) { if (me->def->type == VM_METHOD_TYPE_REFINED) { @@ -916,9 +1084,15 @@ rb_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class_ptr) MJIT_FUNC_EXPORTED const rb_callable_method_entry_t * rb_callable_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class_ptr) { - VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class; - const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, TRUE, dcp); - return prepare_callable_method_entry(*dcp, id, me); } const rb_method_entry_t * @@ -932,7 +1106,7 @@ rb_callable_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_ { VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class; const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, FALSE, dcp); - return prepare_callable_method_entry(*dcp, id, me); } static const rb_method_entry_t * @@ -945,7 +1119,7 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de refinement = find_refinement(refinements, me->owner); if (!NIL_P(refinement)) { - tmp_me = method_entry_get(refinement, me->called_id, defined_class_ptr); if (tmp_me && tmp_me->def->type != VM_METHOD_TYPE_REFINED) { return tmp_me; @@ -963,7 +1137,7 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de return 0; } - me = method_entry_get(super, me->called_id, defined_class_ptr); } return me; } @@ -1010,10 +1184,10 @@ remove_method(VALUE klass, ID mid) klass, ID2SYM(mid)); } rb_id_table_delete(RCLASS_M_TBL(klass), mid); rb_vm_check_redefinition_opt_method(me, klass); - rb_clear_method_cache_by_class(klass); if (me->def->type == VM_METHOD_TYPE_REFINED) { rb_add_refined_method_entry(klass, mid); @@ -1069,6 +1243,7 @@ rb_export_method(VALUE klass, ID name, rb_method_visibility_t visi) VALUE origin_class = RCLASS_ORIGIN(klass); me = search_method(origin_class, name, &defined_class); if (!me && RB_TYPE_P(klass, T_MODULE)) { me = search_method(rb_cObject, name, &defined_class); } @@ -1087,7 +1262,7 @@ rb_export_method(VALUE klass, ID name, rb_method_visibility_t visi) if (me->def->type == VM_METHOD_TYPE_REFINED && me->def->body.refined.orig_me) { METHOD_ENTRY_VISI_SET((rb_method_entry_t *)me->def->body.refined.orig_me, visi); } - rb_clear_method_cache_by_class(klass); } else { rb_add_method(klass, name, VM_METHOD_TYPE_ZSUPER, 0, visi); @@ -1110,8 +1285,8 @@ rb_method_boundp(VALUE klass, ID id, int ex) me = rb_method_entry_without_refinements(klass, id, NULL); } - if (me != 0) { - if ((ex & ~BOUND_RESPONDS) && ((METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE) || ((ex & BOUND_RESPONDS) && (METHOD_ENTRY_VISI(me) == METHOD_VISI_PROTECTED)))) { return 0; @@ -1593,6 +1768,7 @@ rb_alias(VALUE klass, ID alias_name, ID original_name) again: orig_me = search_method(klass, original_name, &defined_class); if (orig_me && orig_me->def->type == VM_METHOD_TYPE_REFINED) { orig_me = rb_resolve_refined_method(Qnil, orig_me); } @@ -1841,7 +2017,7 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module) !me->def->body.iseq.iseqptr->body->param.flags.has_kw && !me->def->body.iseq.iseqptr->body->param.flags.has_kwrest) { me->def->body.iseq.iseqptr->body->param.flags.ruby2_keywords = 1; - rb_clear_method_cache_by_class(module); } else { rb_warn("Skipping set of ruby2_keywords flag for %s (method accepts keywords or method does not accept argument splat)", rb_id2name(name)); @@ -1860,7 +2036,7 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module) !iseq->body->param.flags.has_kw && !iseq->body->param.flags.has_kwrest) { iseq->body->param.flags.ruby2_keywords = 1; - rb_clear_method_cache_by_class(module); } else { rb_warn("Skipping set of ruby2_keywords flag for %s (method accepts keywords or method does not accept argument splat)", rb_id2name(name)); @@ -2061,10 +2237,10 @@ rb_mod_modfunc(int argc, VALUE *argv, VALUE module) int rb_method_basic_definition_p(VALUE klass, ID id) { - const rb_method_entry_t *me; if (!klass) return TRUE; /* hidden object cannot be overridden */ - me = rb_method_entry(klass, id); - return (me && METHOD_ENTRY_BASIC(me)) ? TRUE : FALSE; } #ifdef __GNUC__ #pragma pop_macro("rb_method_basic_definition_p") @@ -2072,10 +2248,8 @@ rb_method_basic_definition_p(VALUE klass, ID id) static VALUE call_method_entry(rb_execution_context_t *ec, VALUE defined_class, VALUE obj, ID id, - const rb_method_entry_t *me, int argc, const VALUE *argv, int kw_splat) { - const rb_callable_method_entry_t *cme = - prepare_callable_method_entry(defined_class, id, me); VALUE passed_block_handler = vm_passed_block_handler(ec); VALUE result = rb_vm_call_kw(ec, obj, id, argc, argv, cme, kw_splat); vm_passed_block_handler_set(ec, passed_block_handler); @@ -2088,13 +2262,12 @@ basic_obj_respond_to_missing(rb_execution_context_t *ec, VALUE klass, VALUE obj, { VALUE defined_class, args[2]; const ID rtmid = idRespond_to_missing; - const rb_method_entry_t *const me = - method_entry_get(klass, rtmid, &defined_class); - if (!me || METHOD_ENTRY_BASIC(me)) return Qundef; args[0] = mid; args[1] = priv; - return call_method_entry(ec, defined_class, obj, rtmid, me, 2, args, RB_NO_KEYWORDS); } static inline int @@ -2120,11 +2293,10 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri { VALUE defined_class; const ID resid = idRespond_to; - const rb_method_entry_t *const me = - method_entry_get(klass, resid, &defined_class); - if (!me) return -1; - if (METHOD_ENTRY_BASIC(me)) { return -1; } else { @@ -2135,7 +2307,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri args[0] = ID2SYM(id); args[1] = Qtrue; if (priv) { - argc = rb_method_entry_arity(me); if (argc > 2) { rb_raise(rb_eArgError, "respond_to? must accept 1 or 2 arguments (requires %d)", @@ -2145,7 +2317,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri argc = 2; } else if (!NIL_P(ruby_verbose)) { - VALUE location = rb_method_entry_location(me); rb_warn("%"PRIsVALUE"%c""respond_to?(:%"PRIsVALUE") uses" " the deprecated method signature, which takes one parameter", (FL_TEST(klass, FL_SINGLETON) ? obj : klass), @@ -2161,7 +2333,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri } } } - result = call_method_entry(ec, defined_class, obj, resid, me, argc, args, RB_NO_KEYWORDS); return RTEST(result); } } @@ -2246,25 +2418,7 @@ obj_respond_to_missing(VALUE obj, VALUE mid, VALUE priv) void Init_Method(void) { - if (!OPT_GLOBAL_METHOD_CACHE) return; - char *ptr = getenv("RUBY_GLOBAL_METHOD_CACHE_SIZE"); - int val; - - if (ptr != NULL && (val = atoi(ptr)) > 0) { - if ((val & (val - 1)) == 0) { /* ensure val is a power of 2 */ - global_method_cache.size = val; - global_method_cache.mask = val - 1; - } - else { - fprintf(stderr, "RUBY_GLOBAL_METHOD_CACHE_SIZE was set to %d but ignored because the value is not a power of 2.\n", val); - } - } - - global_method_cache.entries = (struct cache_entry *)calloc(global_method_cache.size, sizeof(struct cache_entry)); - if (global_method_cache.entries == NULL) { - fprintf(stderr, "[FATAL] failed to allocate memory\n"); - exit(EXIT_FAILURE); - } } void |