summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornagachika <nagachika@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2017-08-05 06:35:02 +0000
committernagachika <nagachika@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2017-08-05 06:35:02 +0000
commit01cfae3beb0b649ca2ae879825349cf0330a3549 (patch)
treef7556d2c60e217dffe751979cb2edffb78d69cfe
parent90645fd4371abcb63d4fdce7ed75d14fb265125e (diff)
merge revision(s) 59462,59474: [Backport #13772]
release VM stack properly. * cont.c: r55766 change the handling method of Fiber's VM stack. Resumed Fiber points NULL as VM stack and running Thread has responsibility to manage it (marking and releasing). However, thread_start_func_2()@thread.c and thread_free()@vm.c doesn't free the VM stack if corresponding root Fiber is exist. This causes memory leak. [Bug #13772] * cont.c (root_fiber_alloc): fib->cont.saved_thread.ec.stack should be NULL because running thread has responsibility to manage this stack. * vm.c (rb_thread_recycle_stack_release): assert given stack is not NULL (callers should care it). fix stack storing for root fibers. * cont.c (root_fiber_alloc): this function is called by fiber_current() and fiber_store(). fiber_current() should clear VM stack information in a fiber data because runnning thread knows stack information and has responsibility to manage it. However fiber_store() requires to remain VM stack information in a fiber data because the responsibility to manage VM stack is moved to the Fiber from the Thread (and switch to another fiber). * cont.c (root_fiber_alloc): save thread's fiber and root_fiber information. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_4@59516 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
-rw-r--r--cont.c10
-rw-r--r--thread.c6
-rw-r--r--vm.c12
3 files changed, 19 insertions, 9 deletions
diff --git a/cont.c b/cont.c
index ab4f2bc830..c56ea0f6e6 100644
--- a/cont.c
+++ b/cont.c
@@ -575,6 +575,8 @@ cont_restore_thread(rb_context_t *cont)
th->root_lep = sth->root_lep;
th->root_svar = sth->root_svar;
th->ensure_list = sth->ensure_list;
+ VM_ASSERT(th->stack != NULL);
+ VM_ASSERT(sth->status == THREAD_RUNNABLE);
}
#if FIBER_USE_NATIVE
@@ -1316,6 +1318,7 @@ root_fiber_alloc(rb_thread_t *th)
#endif
fib->status = RUNNING;
+ th->root_fiber = th->fiber = fib;
return fib;
}
@@ -1324,9 +1327,9 @@ fiber_current(void)
{
rb_thread_t *th = GET_THREAD();
if (th->fiber == 0) {
- /* save root */
rb_fiber_t *fib = root_fiber_alloc(th);
- th->root_fiber = th->fiber = fib;
+ /* Running thread object has stack management responsibility */
+ fib->cont.saved_thread.stack = NULL;
}
return th->fiber;
}
@@ -1367,9 +1370,8 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
cont_save_thread(&fib->cont, th);
}
else {
- /* create current fiber */
+ /* create root fiber */
fib = root_fiber_alloc(th);
- th->root_fiber = th->fiber = fib;
}
#if FIBER_USE_NATIVE
diff --git a/thread.c b/thread.c
index 4fb46dc43a..5d9b8fcd58 100644
--- a/thread.c
+++ b/thread.c
@@ -688,10 +688,8 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
rb_threadptr_unlock_all_locking_mutexes(th);
rb_check_deadlock(th->vm);
- if (!th->root_fiber) {
- rb_thread_recycle_stack_release(th->stack);
- th->stack = 0;
- }
+ rb_thread_recycle_stack_release(th->stack);
+ th->stack = NULL;
}
native_mutex_lock(&th->vm->thread_destruct_lock);
/* make sure vm->running_thread never point me after this point.*/
diff --git a/vm.c b/vm.c
index 13308f3fc9..e44f7d824d 100644
--- a/vm.c
+++ b/vm.c
@@ -90,6 +90,8 @@ VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
{
const VALUE *start = th->stack;
const VALUE *end = (VALUE *)th->stack + th->stack_size;
+ VM_ASSERT(start != NULL);
+
if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
return FALSE;
}
@@ -103,6 +105,8 @@ VM_EP_IN_HEAP_P(const rb_thread_t *th, const VALUE *ep)
{
const VALUE *start = th->stack;
const VALUE *end = (VALUE *)th->cfp;
+ VM_ASSERT(start != NULL);
+
if (start <= ep && ep < end) {
return FALSE;
}
@@ -2315,7 +2319,7 @@ static int thread_recycle_stack_count = 0;
static VALUE *
thread_recycle_stack(size_t size)
{
- if (thread_recycle_stack_count) {
+ if (thread_recycle_stack_count > 0) {
/* TODO: check stack size if stack sizes are variable */
return thread_recycle_stack_slot[--thread_recycle_stack_count];
}
@@ -2331,6 +2335,8 @@ thread_recycle_stack(size_t size)
void
rb_thread_recycle_stack_release(VALUE *stack)
{
+ VM_ASSERT(stack != NULL);
+
#if USE_THREAD_DATA_RECYCLE
if (thread_recycle_stack_count < RECYCLE_MAX) {
thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
@@ -2414,6 +2420,10 @@ thread_free(void *ptr)
if (ptr) {
th = ptr;
+ if (th->stack != NULL) {
+ rb_thread_recycle_stack_release(th->stack);
+ th->stack = NULL;
+ }
if (!th->root_fiber) {
RUBY_FREE_UNLESS_NULL(th->stack);