summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog33
-rw-r--r--cont.c97
-rw-r--r--eval.c10
-rw-r--r--hash.c10
-rw-r--r--internal.h1
-rw-r--r--vm_core.h17
6 files changed, 166 insertions, 2 deletions
diff --git a/ChangeLog b/ChangeLog
index ebce55f698..64c15806f6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,36 @@
+Sat Nov 16 02:13:56 2013 Masaya Tarui <tarui@ruby-lang.org>
+
+ * cont.c : Introdule ensure rollback mechanism. Please see below.
+
+ * internal.h (ruby_register_rollback_func_for_ensure): catch up above change.
+ Add rollback mechanism API.
+
+ * vm_core.h (typedef struct rb_vm_struct): catch up above change.
+ Introdule ensure-rollback relation table.
+
+ * vm_core.h (typedef struct rb_thread_struct): catch up above change.
+ Introdule ensure stack.
+
+ * eval.c (rb_ensure): catch up above change.
+ Introdule ensure stack.
+
+ * hash.c : New function for rollback ensure, and register it to
+ ensure-rollback relation table. [ruby-dev:47803] [Bug #9105]
+
+ Ensure Rollback Mechanism:
+ A rollback's function is a function to rollback a state before ensure's
+ function execution.
+ When the jump of callcc is across the scope of rb_ensure,
+ ensure's functions and rollback's functions are executed appropriately
+ for keeping consistency.
+
+ Current API is unstable, and only internal use.
+
+ ruby_register_rollback_func_for_ensure(ensure_func,rollback_func)
+ This API create relation ensure's function to rollback's function.
+ By registered rollback's function, it is executed When jumpping into
+ corresponding rb_ensure scope.
+
Sat Nov 16 00:18:36 2013 Masaki Matsushita <glass.saga@gmail.com>
* eval_jump.c (rb_exec_end_proc): fix double free or corruption error
diff --git a/cont.c b/cont.c
index 6ac7a1ea57..22eabb6451 100644
--- a/cont.c
+++ b/cont.c
@@ -107,6 +107,8 @@ typedef struct rb_context_struct {
rb_thread_t saved_thread;
rb_jmpbuf_t jmpbuf;
size_t machine_stack_size;
+ rb_ensure_entry_t *ensure_array;
+ rb_ensure_list_t *ensure_list;
} rb_context_t;
enum fiber_status {
@@ -223,6 +225,7 @@ cont_free(void *ptr)
#if FIBER_USE_NATIVE
if (cont->type == CONTINUATION_CONTEXT) {
/* cont */
+ ruby_xfree(cont->ensure_array);
RUBY_FREE_UNLESS_NULL(cont->machine_stack);
}
else {
@@ -253,6 +256,7 @@ cont_free(void *ptr)
#endif
}
#else /* not FIBER_USE_NATIVE */
+ ruby_xfree(cont->ensure_array);
RUBY_FREE_UNLESS_NULL(cont->machine_stack);
#endif
#ifdef __ia64
@@ -485,6 +489,22 @@ cont_capture(volatile int *stat)
cont_save_machine_stack(th, cont);
+ /* backup ensure_list to array for search in another context */
+ {
+ rb_ensure_list_t *p;
+ int size = 0;
+ rb_ensure_entry_t *entry;
+ for (p=th->ensure_list; p; p=p->next)
+ size++;
+ entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
+ for (p=th->ensure_list; p; p=p->next) {
+ if (!p->entry.marker)
+ p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
+ *entry++ = p->entry;
+ }
+ entry->marker = 0;
+ }
+
if (ruby_setjmp(cont->jmpbuf)) {
volatile VALUE value;
@@ -546,6 +566,8 @@ cont_restore_thread(rb_context_t *cont)
th->first_proc = sth->first_proc;
th->root_lep = sth->root_lep;
th->root_svar = sth->root_svar;
+ th->ensure_list = sth->ensure_list;
+
}
#if FIBER_USE_NATIVE
@@ -917,6 +939,80 @@ make_passing_arg(int argc, VALUE *argv)
}
}
+/* CAUTION!! : Currently, error in rollback_func is not supported */
+/* same as rb_protect if set rollback_func to NULL */
+void
+ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(ANYARGS), VALUE (*rollback_func)(ANYARGS))
+{
+ st_table **table_p = &GET_VM()->ensure_rollback_table;
+ if (UNLIKELY(*table_p == NULL)) {
+ *table_p = st_init_numtable();
+ }
+ st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
+}
+
+static inline VALUE
+lookup_rollback_func(VALUE (*ensure_func)(ANYARGS))
+{
+ st_table *table = GET_VM()->ensure_rollback_table;
+ st_data_t val;
+ if (table && st_lookup(table, (st_data_t)ensure_func, &val))
+ return (VALUE) val;
+ return Qundef;
+}
+
+
+static inline void
+rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
+{
+ rb_ensure_list_t *p;
+ rb_ensure_entry_t *entry;
+ size_t i;
+ size_t cur_size;
+ size_t target_size;
+ size_t base_point;
+ VALUE (*func)(ANYARGS);
+
+ cur_size = 0;
+ for (p=current; p; p=p->next)
+ cur_size++;
+ target_size = 0;
+ for (entry=target; entry->marker; entry++)
+ target_size++;
+
+ /* search common stack point */
+ p = current;
+ base_point = cur_size;
+ while (base_point) {
+ if (target_size >= base_point &&
+ p->entry.marker == target[target_size - base_point].marker)
+ break;
+ base_point --;
+ p = p->next;
+ }
+
+ /* rollback function check */
+ for (i=0; i < target_size - base_point; i++) {
+ if (!lookup_rollback_func(target[i].e_proc)) {
+ rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
+ }
+ }
+ /* pop ensure stack */
+ while (cur_size > base_point) {
+ /* escape from ensure block */
+ (*current->entry.e_proc)(current->entry.data2);
+ current = current->next;
+ cur_size--;
+ }
+ /* push ensure stack */
+ while (i--) {
+ func = (VALUE (*)(ANYARGS)) lookup_rollback_func(target[i].e_proc);
+ if ((VALUE)func != Qundef) {
+ (*func)(target[i].data2);
+ }
+ }
+}
+
/*
* call-seq:
* cont.call(args, ...)
@@ -954,6 +1050,7 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
rb_raise(rb_eRuntimeError, "continuation called across fiber");
}
}
+ rollback_ensure_stack(contval, th->ensure_list, cont->ensure_array);
cont->argc = argc;
cont->value = make_passing_arg(argc, argv);
diff --git a/eval.c b/eval.c
index 5b67e1a003..15b0db2ce8 100644
--- a/eval.c
+++ b/eval.c
@@ -840,7 +840,12 @@ rb_ensure(VALUE (*b_proc)(ANYARGS), VALUE data1, VALUE (*e_proc)(ANYARGS), VALUE
volatile VALUE result = Qnil;
volatile VALUE errinfo;
rb_thread_t *const th = GET_THREAD();
-
+ rb_ensure_list_t ensure_list;
+ ensure_list.entry.marker = 0;
+ ensure_list.entry.e_proc = e_proc;
+ ensure_list.entry.data2 = data2;
+ ensure_list.next = th->ensure_list;
+ th->ensure_list = &ensure_list;
PUSH_TAG();
if ((state = EXEC_TAG()) == 0) {
result = (*b_proc) (data1);
@@ -849,7 +854,8 @@ rb_ensure(VALUE (*b_proc)(ANYARGS), VALUE data1, VALUE (*e_proc)(ANYARGS), VALUE
/* TODO: fix me */
/* retval = prot_tag ? prot_tag->retval : Qnil; */ /* save retval */
errinfo = th->errinfo;
- (*e_proc) (data2);
+ th->ensure_list=ensure_list.next;
+ (*ensure_list.entry.e_proc)(ensure_list.entry.data2);
th->errinfo = errinfo;
if (state)
JUMP_TAG(state);
diff --git a/hash.c b/hash.c
index a37318be85..4ae939294f 100644
--- a/hash.c
+++ b/hash.c
@@ -201,6 +201,13 @@ hash_foreach_iter(st_data_t key, st_data_t value, st_data_t argp, int error)
}
static VALUE
+hash_foreach_ensure_rollback(VALUE hash)
+{
+ RHASH_ITER_LEV(hash)++;
+ return 0;
+}
+
+static VALUE
hash_foreach_ensure(VALUE hash)
{
if (--RHASH_ITER_LEV(hash) == 0) {
@@ -3765,4 +3772,7 @@ Init_Hash(void)
* See ENV (the class) for more details.
*/
rb_define_global_const("ENV", envtbl);
+
+ /* for callcc */
+ ruby_register_rollback_func_for_ensure(hash_foreach_ensure, hash_foreach_ensure_rollback);
}
diff --git a/internal.h b/internal.h
index 508e77d02e..cd2177afef 100644
--- a/internal.h
+++ b/internal.h
@@ -342,6 +342,7 @@ VALUE rb_insns_name_array(void);
/* cont.c */
VALUE rb_obj_is_fiber(VALUE);
void rb_fiber_reset_root_local_storage(VALUE);
+void ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(ANYARGS), VALUE (*rollback_func)(ANYARGS));
/* debug.c */
PRINTF_ARGS(void ruby_debug_printf(const char*, ...), 1, 2);
diff --git a/vm_core.h b/vm_core.h
index 44f871dd6f..94527bbfa6 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -387,6 +387,9 @@ typedef struct rb_vm_struct {
/* hook */
rb_hook_list_t event_hooks;
+ /* relation table of ensure - rollback for callcc */
+ struct st_table *ensure_rollback_table;
+
/* postponed_job */
struct rb_postponed_job_struct *postponed_job_buffer;
int postponed_job_index;
@@ -507,6 +510,17 @@ typedef struct rb_thread_list_struct{
} rb_thread_list_t;
+typedef struct rb_ensure_entry {
+ VALUE marker;
+ VALUE (*e_proc)(ANYARGS);
+ VALUE data2;
+} rb_ensure_entry_t;
+
+typedef struct rb_ensure_list {
+ struct rb_ensure_list *next;
+ struct rb_ensure_entry entry;
+} rb_ensure_list_t;
+
typedef struct rb_thread_struct {
VALUE self;
rb_vm_t *vm;
@@ -626,6 +640,9 @@ typedef struct rb_thread_struct {
VALUE root_fiber;
rb_jmpbuf_t root_jmpbuf;
+ /* ensure & callcc */
+ rb_ensure_list_t *ensure_list;
+
/* misc */
int method_missing_reason;
int abort_on_exception;