summaryrefslogtreecommitdiff
path: root/shape.c
diff options
context:
space:
mode:
authorAaron Patterson <[email protected]>2023-02-07 17:46:42 -0800
committerAaron Patterson <[email protected]>2023-10-24 10:52:06 -0700
commit84e4453436c3549b4fda6014cdd5fcc9e0b80755 ()
treef0fdc61a51a8f1cfa5eb9b534103d1534a29b5ae /shape.c
parent5c4978c11c4ea9569d5d99a86936fbef0ab7fa52 (diff)
Use a functional red-black tree for indexing the shapes
This is an experimental commit that uses a functional red-black tree to create an index of the ancestor shapes. It uses an Okasaki style functional red black tree: https://www.cs.tufts.edu/comp/150FP/archive/chris-okasaki/redblack99.pdf This tree is advantageous because: * It offers O(n log n) insertions and O(n log n) lookups. * It shares memory with previous "versions" of the tree When we insert a node in the tree, only the parts of the tree that need to be rebalanced are newly allocated. Parts of the tree that don't need to be rebalanced are not reallocated, so "new trees" are able to share memory with old trees. This is in contrast to a sorted set where we would have to duplicate the set, and also resort the set on each insertion. I've added a new stat to RubyVM.stat so we can understand how the red black tree increases.
-rw-r--r--shape.c309
1 files changed, 295 insertions, 14 deletions
@@ -11,6 +11,10 @@
#include "variable.h"
#include <stdbool.h>
#ifndef SHAPE_DEBUG
#define SHAPE_DEBUG (VM_CHECK_MODE > 0)
#endif
@@ -20,11 +24,235 @@
#define SINGLE_CHILD_MASK (~((uintptr_t)SINGLE_CHILD_TAG))
#define SINGLE_CHILD_P(x) (((uintptr_t)x) & SINGLE_CHILD_TAG)
#define SINGLE_CHILD(x) (rb_shape_t *)((uintptr_t)x & SINGLE_CHILD_MASK)
static ID id_frozen;
static ID id_t_object;
static ID size_pool_edge_names[SIZE_POOL_COUNT];
rb_shape_tree_t *rb_shape_tree_ptr = NULL;
/*
@@ -152,6 +380,34 @@ rb_shape_alloc(ID edge_name, rb_shape_t * parent, enum shape_type type)
return shape;
}
static rb_shape_t *
rb_shape_alloc_new_child(ID id, rb_shape_t * shape, enum shape_type shape_type)
{
@@ -160,6 +416,9 @@ rb_shape_alloc_new_child(ID id, rb_shape_t * shape, enum shape_type shape_type)
switch (shape_type) {
case SHAPE_IVAR:
new_shape->next_iv_index = shape->next_iv_index + 1;
break;
case SHAPE_CAPACITY_CHANGE:
case SHAPE_FROZEN:
@@ -443,23 +702,37 @@ rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value)
RUBY_ASSERT(rb_shape_id(shape) != OBJ_TOO_COMPLEX_SHAPE_ID);
while (shape->parent_id != INVALID_SHAPE_ID) {
- if (shape->edge_name == id) {
- enum shape_type shape_type;
- shape_type = (enum shape_type)shape->type;
-
- switch (shape_type) {
- case SHAPE_IVAR:
- RUBY_ASSERT(shape->next_iv_index > 0);
*value = shape->next_iv_index - 1;
return true;
- case SHAPE_CAPACITY_CHANGE:
- case SHAPE_ROOT:
- case SHAPE_INITIAL_CAPACITY:
- case SHAPE_T_OBJECT:
return false;
- case SHAPE_OBJ_TOO_COMPLEX:
- case SHAPE_FROZEN:
- rb_bug("Ivar should not exist on transition");
}
}
shape = rb_shape_get_parent(shape);
@@ -820,6 +1093,12 @@ Init_default_shapes(void)
id_frozen = rb_make_internal_id();
id_t_object = rb_make_internal_id();
// Shapes by size pool
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
size_pool_edge_names[i] = rb_make_internal_id();
@@ -839,6 +1118,7 @@ Init_default_shapes(void)
rb_shape_t * new_shape = rb_shape_transition_shape_capa_create(root, capa);
new_shape->type = SHAPE_INITIAL_CAPACITY;
new_shape->size_pool_index = i;
RUBY_ASSERT(rb_shape_id(new_shape) == (shape_id_t)i);
}
@@ -849,6 +1129,7 @@ Init_default_shapes(void)
rb_shape_t * t_object_shape =
get_next_shape_internal(shape, id_t_object, SHAPE_T_OBJECT, &dont_care, true, false);
t_object_shape->edges = rb_id_table_create(0);
RUBY_ASSERT(rb_shape_id(t_object_shape) == (shape_id_t)(i + SIZE_POOL_COUNT));
}