diff options
-rw-r--r-- | gc.c | 87 | ||||
-rw-r--r-- | include/ruby/atomic.h | 44 |
2 files changed, 99 insertions, 32 deletions
@@ -1772,13 +1772,31 @@ rb_gc_pointer_to_heap_p(VALUE obj) } #define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1) -#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT) - -static unsigned long long next_object_id = OBJ_ID_INITIAL; static VALUE id2ref_value = 0; static st_table *id2ref_tbl = NULL; static bool id2ref_tbl_built = false; void rb_gc_obj_id_moved(VALUE obj) { @@ -1815,7 +1833,7 @@ static void id2ref_tbl_mark(void *data) { st_table *table = (st_table *)data; - if (UNLIKELY(!RB_POSFIXABLE(next_object_id))) { // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM rb_mark_set(table); } @@ -1833,7 +1851,7 @@ static void id2ref_tbl_compact(void *data) { st_table *table = (st_table *)data; - if (LIKELY(RB_POSFIXABLE(next_object_id))) { // We know keys are all FIXNUM, so no need to update them. gc_ref_update_table_values_only(table); } @@ -1869,8 +1887,7 @@ class_object_id(VALUE klass) VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id); if (!id) { unsigned int lock_lev = rb_gc_vm_lock(); - id = ULL2NUM(next_object_id); - next_object_id += OBJ_ID_INCREMENT; VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id); if (existing_id) { id = existing_id; @@ -1884,6 +1901,30 @@ class_object_id(VALUE klass) } static VALUE object_id(VALUE obj) { switch (BUILTIN_TYPE(obj)) { @@ -1897,32 +1938,14 @@ object_id(VALUE obj) break; } - VALUE id = Qfalse; - unsigned int lock_lev; - - // We could avoid locking if the object isn't shareable - // but we'll lock anyway to lookup the next shape, and - // we'd at least need to generate the object_id using atomics. - lock_lev = rb_gc_vm_lock(); - - shape_id_t shape_id = rb_obj_shape_id(obj); - shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj); - - if (shape_id >= object_id_shape_id) { - id = rb_obj_field_get(obj, object_id_shape_id); - } - else { - id = ULL2NUM(next_object_id); - next_object_id += OBJ_ID_INCREMENT; - - rb_obj_field_set(obj, object_id_shape_id, id); - if (RB_UNLIKELY(id2ref_tbl)) { - st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj); - } } - rb_gc_vm_unlock(lock_lev); - return id; } static void @@ -1973,7 +1996,7 @@ object_id_to_ref(void *objspace_ptr, VALUE object_id) return obj; } - if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(next_object_id))) { rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10))); } else { @@ -198,6 +198,18 @@ typedef unsigned int rb_atomic_t; #define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var)) /** * Identical to #RUBY_ATOMIC_INC, except it expects its argument is `size_t`. * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This * should be used for size related operations to support such platforms. @@ -401,6 +413,38 @@ rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val) #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) |