summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog17
-rw-r--r--thread.c25
2 files changed, 29 insertions, 13 deletions
diff --git a/ChangeLog b/ChangeLog
index 4697476365..25c48002c4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+Thu Jun 30 02:28:11 2011 KOSAKI Motohiro <kosaki.motohiro@gmail.com>
+
+ * thread.c (rb_thread_schedule_rec): move interrupt_flag check to
+ rb_thread_schedule().
+ And also rename to rb_thead_schedule_limits() and remove
+ sched_depth argument. It's no longer called recursive.
+ * thread.c (rb_thread_schedule): add to check interrupt_flag as
+ above explained.
+
+ * thread.c (rb_threadptr_execute_interrupts_rec): rename to
+ rb_threadptr_execute_interrupts_common() and remove sched_depth
+ argument. It's no longer called recursive.
+
+ * thread.c (rb_thread_sleep): adapt the renaming.
+ * thread.c (rb_threadptr_execute_interrupts): ditto.
+ * thread.c (rb_thread_execute_interrupts): ditto.
+
Thu Jun 30 01:31:33 2011 KOSAKI Motohiro <kosaki.motohiro@gmail.com>
* thread.c (thread_s_pass): change RDoc description and remove
diff --git a/thread.c b/thread.c
index 0284ed6369..31b9d4fb68 100644
--- a/thread.c
+++ b/thread.c
@@ -999,10 +999,10 @@ rb_thread_sleep(int sec)
rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
}
-static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
+static void rb_threadptr_execute_interrupts_common(rb_thread_t *);
static void
-rb_thread_schedule_rec(int sched_depth, unsigned long limits_us)
+rb_thread_schedule_limits(unsigned long limits_us)
{
thread_debug("rb_thread_schedule\n");
if (!rb_thread_alone()) {
@@ -1017,17 +1017,17 @@ rb_thread_schedule_rec(int sched_depth, unsigned long limits_us)
rb_thread_set_current(th);
thread_debug("rb_thread_schedule/switch done\n");
-
- if (UNLIKELY(!sched_depth && GET_THREAD()->interrupt_flag)) {
- rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
- }
}
}
void
rb_thread_schedule(void)
{
- rb_thread_schedule_rec(0, 0);
+ rb_thread_schedule_limits(0);
+
+ if (UNLIKELY(GET_THREAD()->interrupt_flag)) {
+ rb_threadptr_execute_interrupts_common(GET_THREAD());
+ }
}
/* blocking region */
@@ -1261,7 +1261,7 @@ thread_s_pass(VALUE klass)
*/
static void
-rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
+rb_threadptr_execute_interrupts_common(rb_thread_t *th)
{
rb_atomic_t interrupt;
@@ -1305,7 +1305,7 @@ rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
rb_gc_finalize_deferred();
}
- if (!sched_depth && timer_interrupt) {
+ if (timer_interrupt) {
unsigned long limits_us = 250 * 1000;
if (th->priority > 0)
@@ -1316,10 +1316,9 @@ rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
if (status == THREAD_RUNNABLE)
th->running_time_us += TIME_QUANTUM_USEC;
- sched_depth++;
EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
- rb_thread_schedule_rec(sched_depth+1, limits_us);
+ rb_thread_schedule_limits(limits_us);
}
}
}
@@ -1327,7 +1326,7 @@ rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
void
rb_threadptr_execute_interrupts(rb_thread_t *th)
{
- rb_threadptr_execute_interrupts_rec(th, 0);
+ rb_threadptr_execute_interrupts_common(th);
}
void
@@ -1335,7 +1334,7 @@ rb_thread_execute_interrupts(VALUE thval)
{
rb_thread_t *th;
GetThreadPtr(thval, th);
- rb_threadptr_execute_interrupts_rec(th, 0);
+ rb_threadptr_execute_interrupts_common(th);
}
void