diff options
| author | Matt Valentine-House <matt@eightbitraptor.com> | 2026-03-31 12:57:56 +0100 |
|---|---|---|
| committer | Matt Valentine-House <matt@eightbitraptor.com> | 2026-04-09 13:24:09 +0100 |
| commit | 2567e76ec376fee3b6d6e98e9578bcbb44e23034 (patch) | |
| tree | 48eb7c1b82f72945a79f9a26b89bd51a21de8133 | |
| parent | 93710423e13814b8be725d369ed29dac081cb89f (diff) | |
Handle small pools in shape capacity calculation
When pool slot sizes can be smaller than sizeof(struct RBasic) (e.g. a
32-byte pool on 64-bit where RBasic is 16 bytes), the capacity
calculation would underflow. Guard against this by setting capacity to
0 for pools too small to hold fields.
| -rw-r--r-- | shape.c | 14 | ||||
| -rw-r--r-- | shape.h | 1 |
2 files changed, 11 insertions, 4 deletions
@@ -477,14 +477,14 @@ static attr_index_t shape_grow_capa(attr_index_t current_capa) { const attr_index_t *capacities = rb_shape_tree.capacities; + size_t heaps_count = rb_shape_tree.heaps_count; // First try to use the next size that will be embeddable in a larger object slot. - attr_index_t capa; - while ((capa = *capacities)) { + for (size_t i = 0; i < heaps_count; i++) { + attr_index_t capa = capacities[i]; if (capa > current_capa) { return capa; } - capacities++; } return (attr_index_t)rb_malloc_grow_capa(current_capa, sizeof(VALUE)); @@ -1543,8 +1543,14 @@ Init_default_shapes(void) capacities[heaps_count] = 0; size_t index; for (index = 0; index < heaps_count; index++) { - capacities[index] = (heap_sizes[index] - sizeof(struct RBasic)) / sizeof(VALUE); + if (heap_sizes[index] > sizeof(struct RBasic)) { + capacities[index] = (heap_sizes[index] - sizeof(struct RBasic)) / sizeof(VALUE); + } + else { + capacities[index] = 0; + } } + rb_shape_tree.heaps_count = heaps_count; rb_shape_tree.capacities = capacities; #ifdef HAVE_MMAP @@ -115,6 +115,7 @@ typedef struct { rb_shape_t *shape_list; rb_shape_t *root_shape; const attr_index_t *capacities; + size_t heaps_count; rb_atomic_t next_shape_id; redblack_node_t *shape_cache; |
