summaryrefslogtreecommitdiff
path: root/node.c
diff options
context:
space:
mode:
authornagachika <nagachika@ruby-lang.org>2020-10-01 21:02:05 +0900
committernagachika <nagachika@ruby-lang.org>2020-10-01 21:02:05 +0900
commitb35bfa6abb7760e4323a4341dff840f59ddcfde1 (patch)
treef99c71873c8d91e0388040e1858e0de5b203a25e /node.c
parent811b2b0df5e670ad8db7951191232ce3f5b0d978 (diff)
merge revision(s) 35ba2783fe6b3316a6bbc6f00bf975ad7185d6e0,e8edc34f0abe176b24975a1fed1f2c3782f0a252: [Backport #16807]
Use a linked list to eliminate imemo tmp bufs for managing local tables This patch changes local table memory to be managed by a linked list rather than via the garbage collector. It reduces allocations from the GC and also fixes a use-after-free bug in the concurrent-with-sweep compactor I'm working on. Remove unused struct member I accidentally added this in 35ba2783fe6b3316a6bbc6f00bf975ad7185d6e0, and it's making the size of RVALUE be too big. I'm sorry! orz
Diffstat (limited to 'node.c')
-rw-r--r--node.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/node.c b/node.c
index 3514060ecb..2cdf4c9dda 100644
--- a/node.c
+++ b/node.c
@@ -1120,6 +1120,7 @@ typedef struct {
struct node_buffer_struct {
node_buffer_list_t unmarkable;
node_buffer_list_t markable;
+ ID *local_tables;
VALUE mark_hash;
};
@@ -1145,6 +1146,7 @@ rb_node_buffer_new(void)
node_buffer_t *nb = ruby_xmalloc(alloc_size);
init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]);
init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size));
+ nb->local_tables = 0;
nb->mark_hash = Qnil;
return nb;
}
@@ -1166,6 +1168,13 @@ rb_node_buffer_free(node_buffer_t *nb)
{
node_buffer_list_free(&nb->unmarkable);
node_buffer_list_free(&nb->markable);
+ ID * local_table = nb->local_tables;
+ while (local_table) {
+ unsigned int size = (unsigned int)*local_table;
+ ID * next_table = (ID *)local_table[size + 1];
+ xfree(local_table);
+ local_table = next_table;
+ }
xfree(nb);
}
@@ -1199,7 +1208,6 @@ rb_ast_newnode(rb_ast_t *ast, enum node_type type)
case NODE_DREGX:
case NODE_DSYM:
case NODE_ARGS:
- case NODE_SCOPE:
case NODE_ARYPTN:
return ast_newnode_in_bucket(&nb->markable);
default:
@@ -1208,6 +1216,14 @@ rb_ast_newnode(rb_ast_t *ast, enum node_type type)
}
void
+rb_ast_add_local_table(rb_ast_t *ast, ID *buf)
+{
+ unsigned int size = (unsigned int)*buf;
+ buf[size + 1] = (ID)ast->node_buffer->local_tables;
+ ast->node_buffer->local_tables = buf;
+}
+
+void
rb_ast_delete_node(rb_ast_t *ast, NODE *n)
{
(void)ast;
@@ -1253,15 +1269,6 @@ static void
mark_ast_value(void *ctx, NODE * node)
{
switch (nd_type(node)) {
- case NODE_SCOPE:
- {
- ID *buf = node->nd_tbl;
- if (buf) {
- unsigned int size = (unsigned int)*buf;
- rb_gc_mark_movable((VALUE)buf[size + 1]);
- }
- break;
- }
case NODE_ARYPTN:
{
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;
@@ -1293,15 +1300,6 @@ static void
update_ast_value(void *ctx, NODE * node)
{
switch (nd_type(node)) {
- case NODE_SCOPE:
- {
- ID *buf = node->nd_tbl;
- if (buf) {
- unsigned int size = (unsigned int)*buf;
- buf[size + 1] = rb_gc_location((VALUE)buf[size + 1]);
- }
- break;
- }
case NODE_ARYPTN:
{
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;