summaryrefslogtreecommitdiff
path: root/shape.c
diff options
context:
space:
mode:
authorJean Boussier <[email protected]>2023-10-10 13:12:17 +0200
committerJean Boussier <[email protected]>2023-10-10 14:47:54 +0200
commit5cc44f48c51974a84a40480477a4afd8901ae7e4 ()
tree9c252adf59e2e0b932a74a7d0cf000969039e97d /shape.c
parentfd21460898d2d5044c1bcc140927142921424791 (diff)
Refactor rb_shape_transition_shape_capa to not accept capacity
This way the groth factor is encapsulated, which allows rb_shape_transition_shape_capa to be smarter about ideal sizes.
-rw-r--r--shape.c14
1 files changed, 10 insertions, 4 deletions
@@ -417,8 +417,8 @@ rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id)
return new_shape;
}
-rb_shape_t *
-rb_shape_transition_shape_capa(rb_shape_t* shape, uint32_t new_capacity)
{
ID edge_name = rb_make_temporary_id(new_capacity);
bool dont_care;
@@ -427,6 +427,12 @@ rb_shape_transition_shape_capa(rb_shape_t* shape, uint32_t new_capacity)
return new_shape;
}
bool
rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value)
{
@@ -541,7 +547,7 @@ rb_shape_rebuild_shape(rb_shape_t * initial_shape, rb_shape_t * dest_shape)
case SHAPE_IVAR:
if (midway_shape->capacity <= midway_shape->next_iv_index) {
// There isn't enough room to write this IV, so we need to increase the capacity
- midway_shape = rb_shape_transition_shape_capa(midway_shape, midway_shape->capacity * 2);
}
midway_shape = rb_shape_get_next_iv_shape(midway_shape, dest_shape->edge_name);
@@ -828,7 +834,7 @@ Init_default_shapes(void)
// Shapes by size pool
for (int i = 1; i < SIZE_POOL_COUNT; i++) {
uint32_t capa = (uint32_t)((rb_size_pool_slot_size(i) - offsetof(struct RObject, as.ary)) / sizeof(VALUE));
- rb_shape_t * new_shape = rb_shape_transition_shape_capa(root, capa);
new_shape->type = SHAPE_INITIAL_CAPACITY;
new_shape->size_pool_index = i;
RUBY_ASSERT(rb_shape_id(new_shape) == (shape_id_t)i);