summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cont.c10
-rw-r--r--thread.c6
-rw-r--r--vm.c12
3 files changed, 19 insertions, 9 deletions
diff --git a/cont.c b/cont.c
index ab4f2bc830..c56ea0f6e6 100644
--- a/cont.c
+++ b/cont.c
@@ -575,6 +575,8 @@ cont_restore_thread(rb_context_t *cont)
th->root_lep = sth->root_lep;
th->root_svar = sth->root_svar;
th->ensure_list = sth->ensure_list;
+ VM_ASSERT(th->stack != NULL);
+ VM_ASSERT(sth->status == THREAD_RUNNABLE);
}
#if FIBER_USE_NATIVE
@@ -1316,6 +1318,7 @@ root_fiber_alloc(rb_thread_t *th)
#endif
fib->status = RUNNING;
+ th->root_fiber = th->fiber = fib;
return fib;
}
@@ -1324,9 +1327,9 @@ fiber_current(void)
{
rb_thread_t *th = GET_THREAD();
if (th->fiber == 0) {
- /* save root */
rb_fiber_t *fib = root_fiber_alloc(th);
- th->root_fiber = th->fiber = fib;
+ /* Running thread object has stack management responsibility */
+ fib->cont.saved_thread.stack = NULL;
}
return th->fiber;
}
@@ -1367,9 +1370,8 @@ fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
cont_save_thread(&fib->cont, th);
}
else {
- /* create current fiber */
+ /* create root fiber */
fib = root_fiber_alloc(th);
- th->root_fiber = th->fiber = fib;
}
#if FIBER_USE_NATIVE
diff --git a/thread.c b/thread.c
index 4fb46dc43a..5d9b8fcd58 100644
--- a/thread.c
+++ b/thread.c
@@ -688,10 +688,8 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
rb_threadptr_unlock_all_locking_mutexes(th);
rb_check_deadlock(th->vm);
- if (!th->root_fiber) {
- rb_thread_recycle_stack_release(th->stack);
- th->stack = 0;
- }
+ rb_thread_recycle_stack_release(th->stack);
+ th->stack = NULL;
}
native_mutex_lock(&th->vm->thread_destruct_lock);
/* make sure vm->running_thread never point me after this point.*/
diff --git a/vm.c b/vm.c
index 13308f3fc9..e44f7d824d 100644
--- a/vm.c
+++ b/vm.c
@@ -90,6 +90,8 @@ VM_CFP_IN_HEAP_P(const rb_thread_t *th, const rb_control_frame_t *cfp)
{
const VALUE *start = th->stack;
const VALUE *end = (VALUE *)th->stack + th->stack_size;
+ VM_ASSERT(start != NULL);
+
if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
return FALSE;
}
@@ -103,6 +105,8 @@ VM_EP_IN_HEAP_P(const rb_thread_t *th, const VALUE *ep)
{
const VALUE *start = th->stack;
const VALUE *end = (VALUE *)th->cfp;
+ VM_ASSERT(start != NULL);
+
if (start <= ep && ep < end) {
return FALSE;
}
@@ -2315,7 +2319,7 @@ static int thread_recycle_stack_count = 0;
static VALUE *
thread_recycle_stack(size_t size)
{
- if (thread_recycle_stack_count) {
+ if (thread_recycle_stack_count > 0) {
/* TODO: check stack size if stack sizes are variable */
return thread_recycle_stack_slot[--thread_recycle_stack_count];
}
@@ -2331,6 +2335,8 @@ thread_recycle_stack(size_t size)
void
rb_thread_recycle_stack_release(VALUE *stack)
{
+ VM_ASSERT(stack != NULL);
+
#if USE_THREAD_DATA_RECYCLE
if (thread_recycle_stack_count < RECYCLE_MAX) {
thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
@@ -2414,6 +2420,10 @@ thread_free(void *ptr)
if (ptr) {
th = ptr;
+ if (th->stack != NULL) {
+ rb_thread_recycle_stack_release(th->stack);
+ th->stack = NULL;
+ }
if (!th->root_fiber) {
RUBY_FREE_UNLESS_NULL(th->stack);