summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornagachika <nagachika@ruby-lang.org>2020-10-01 12:02:05 (GMT)
committernagachika <nagachika@ruby-lang.org>2020-10-01 12:02:05 (GMT)
commitb35bfa6abb7760e4323a4341dff840f59ddcfde1 (patch)
treef99c71873c8d91e0388040e1858e0de5b203a25e
parent811b2b0df5e670ad8db7951191232ce3f5b0d978 (diff)
merge revision(s) 35ba2783fe6b3316a6bbc6f00bf975ad7185d6e0,e8edc34f0abe176b24975a1fed1f2c3782f0a252: [Backport #16807]
Use a linked list to eliminate imemo tmp bufs for managing local tables This patch changes local table memory to be managed by a linked list rather than via the garbage collector. It reduces allocations from the GC and also fixes a use-after-free bug in the concurrent-with-sweep compactor I'm working on. Remove unused struct member I accidentally added this in 35ba2783fe6b3316a6bbc6f00bf975ad7185d6e0, and it's making the size of RVALUE be too big. I'm sorry! orz
-rw-r--r--node.c36
-rw-r--r--node.h1
-rw-r--r--parse.y12
-rw-r--r--version.h2
4 files changed, 21 insertions, 30 deletions
diff --git a/node.c b/node.c
index 3514060..2cdf4c9 100644
--- a/node.c
+++ b/node.c
@@ -1120,6 +1120,7 @@ typedef struct {
struct node_buffer_struct {
node_buffer_list_t unmarkable;
node_buffer_list_t markable;
+ ID *local_tables;
VALUE mark_hash;
};
@@ -1145,6 +1146,7 @@ rb_node_buffer_new(void)
node_buffer_t *nb = ruby_xmalloc(alloc_size);
init_node_buffer_list(&nb->unmarkable, (node_buffer_elem_t*)&nb[1]);
init_node_buffer_list(&nb->markable, (node_buffer_elem_t*)((size_t)nb->unmarkable.head + bucket_size));
+ nb->local_tables = 0;
nb->mark_hash = Qnil;
return nb;
}
@@ -1166,6 +1168,13 @@ rb_node_buffer_free(node_buffer_t *nb)
{
node_buffer_list_free(&nb->unmarkable);
node_buffer_list_free(&nb->markable);
+ ID * local_table = nb->local_tables;
+ while (local_table) {
+ unsigned int size = (unsigned int)*local_table;
+ ID * next_table = (ID *)local_table[size + 1];
+ xfree(local_table);
+ local_table = next_table;
+ }
xfree(nb);
}
@@ -1199,7 +1208,6 @@ rb_ast_newnode(rb_ast_t *ast, enum node_type type)
case NODE_DREGX:
case NODE_DSYM:
case NODE_ARGS:
- case NODE_SCOPE:
case NODE_ARYPTN:
return ast_newnode_in_bucket(&nb->markable);
default:
@@ -1208,6 +1216,14 @@ rb_ast_newnode(rb_ast_t *ast, enum node_type type)
}
void
+rb_ast_add_local_table(rb_ast_t *ast, ID *buf)
+{
+ unsigned int size = (unsigned int)*buf;
+ buf[size + 1] = (ID)ast->node_buffer->local_tables;
+ ast->node_buffer->local_tables = buf;
+}
+
+void
rb_ast_delete_node(rb_ast_t *ast, NODE *n)
{
(void)ast;
@@ -1253,15 +1269,6 @@ static void
mark_ast_value(void *ctx, NODE * node)
{
switch (nd_type(node)) {
- case NODE_SCOPE:
- {
- ID *buf = node->nd_tbl;
- if (buf) {
- unsigned int size = (unsigned int)*buf;
- rb_gc_mark_movable((VALUE)buf[size + 1]);
- }
- break;
- }
case NODE_ARYPTN:
{
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;
@@ -1293,15 +1300,6 @@ static void
update_ast_value(void *ctx, NODE * node)
{
switch (nd_type(node)) {
- case NODE_SCOPE:
- {
- ID *buf = node->nd_tbl;
- if (buf) {
- unsigned int size = (unsigned int)*buf;
- buf[size + 1] = rb_gc_location((VALUE)buf[size + 1]);
- }
- break;
- }
case NODE_ARYPTN:
{
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;
diff --git a/node.h b/node.h
index 217b344..f688f23 100644
--- a/node.h
+++ b/node.h
@@ -404,6 +404,7 @@ typedef struct rb_ast_struct {
rb_ast_t *rb_ast_new(void);
void rb_ast_mark(rb_ast_t*);
void rb_ast_update_references(rb_ast_t*);
+void rb_ast_add_local_table(rb_ast_t*, ID *buf);
void rb_ast_dispose(rb_ast_t*);
void rb_ast_free(rb_ast_t*);
size_t rb_ast_memsize(const rb_ast_t*);
diff --git a/parse.y b/parse.y
index 54515f9..0fd0c46 100644
--- a/parse.y
+++ b/parse.y
@@ -2872,11 +2872,9 @@ primary : literal
ID id = internal_id(p);
NODE *m = NEW_ARGS_AUX(0, 0, &NULL_LOC);
NODE *args, *scope, *internal_var = NEW_DVAR(id, &@2);
- VALUE tmpbuf = rb_imemo_tmpbuf_auto_free_pointer();
ID *tbl = ALLOC_N(ID, 3);
- rb_imemo_tmpbuf_set_ptr(tmpbuf, tbl);
tbl[0] = 1 /* length of local var table */; tbl[1] = id /* internal id */;
- tbl[2] = tmpbuf;
+ rb_ast_add_local_table(p->ast, tbl);
switch (nd_type($2)) {
case NODE_LASGN:
@@ -2896,7 +2894,6 @@ primary : literal
/* {|*internal_id| <m> = internal_id; ... } */
args = new_args(p, m, 0, id, 0, new_args_tail(p, 0, 0, 0, &@2), &@2);
scope = NEW_NODE(NODE_SCOPE, tbl, $5, args, &@$);
- RB_OBJ_WRITTEN(p->ast, Qnil, tmpbuf);
$$ = NEW_FOR($4, scope, &@$);
fixpos($$, $2);
/*% %*/
@@ -11825,12 +11822,9 @@ local_tbl(struct parser_params *p)
int cnt = cnt_args + cnt_vars;
int i, j;
ID *buf;
- VALUE tbl = 0;
if (cnt <= 0) return 0;
- tbl = rb_imemo_tmpbuf_auto_free_pointer();
buf = ALLOC_N(ID, cnt + 2);
- rb_imemo_tmpbuf_set_ptr(tbl, buf);
MEMCPY(buf+1, p->lvtbl->args->tbl, ID, cnt_args);
/* remove IDs duplicated to warn shadowing */
for (i = 0, j = cnt_args+1; i < cnt_vars; ++i) {
@@ -11841,11 +11835,9 @@ local_tbl(struct parser_params *p)
}
if (--j < cnt) {
REALLOC_N(buf, ID, (cnt = j) + 2);
- rb_imemo_tmpbuf_set_ptr(tbl, buf);
}
buf[0] = cnt;
- buf[cnt + 1] = (ID)tbl;
- RB_OBJ_WRITTEN(p->ast, Qnil, tbl);
+ rb_ast_add_local_table(p->ast, buf);
return buf;
}
diff --git a/version.h b/version.h
index 01c8010..5d0a5e0 100644
--- a/version.h
+++ b/version.h
@@ -2,7 +2,7 @@
# define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR
#define RUBY_VERSION_TEENY 2
#define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR
-#define RUBY_PATCHLEVEL 135
+#define RUBY_PATCHLEVEL 136
#define RUBY_RELEASE_YEAR 2020
#define RUBY_RELEASE_MONTH 10