summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--eval_intern.h14
-rw-r--r--vm_core.h14
-rw-r--r--vm_sync.c22
3 files changed, 49 insertions, 1 deletions
diff --git a/eval_intern.h b/eval_intern.h
index aa07ce3..0e5a8ae 100644
--- a/eval_intern.h
+++ b/eval_intern.h
@@ -127,14 +127,26 @@ LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *);
rb_fiber_start(); \
} while (0)
+void rb_ec_vm_lock_rec_release(rb_execution_context_t *ec, int lock_rec);
+
+static inline void
+rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
+{
+ if (rb_ec_vm_lock_rec(ec) != lock_rec) {
+ rb_ec_vm_lock_rec_release(ec, lock_rec);
+ }
+}
+
#define EC_PUSH_TAG(ec) do { \
rb_execution_context_t * const _ec = (ec); \
struct rb_vm_tag _tag; \
_tag.state = TAG_NONE; \
_tag.tag = Qundef; \
- _tag.prev = _ec->tag;
+ _tag.prev = _ec->tag; \
+ _tag.lock_rec = rb_ec_vm_lock_rec(_ec); \
#define EC_POP_TAG() \
+ rb_ec_vm_lock_rec_check(_ec, _tag.lock_rec); \
_ec->tag = _tag.prev; \
} while (0)
diff --git a/vm_core.h b/vm_core.h
index 44f85ff..f783bd5 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -794,6 +794,7 @@ struct rb_vm_tag {
rb_jmpbuf_t buf;
struct rb_vm_tag *prev;
enum ruby_tag_type state;
+ int lock_rec;
};
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
@@ -1797,6 +1798,19 @@ rb_current_vm(void)
return ruby_current_vm_ptr;
}
+static inline int
+rb_ec_vm_lock_rec(rb_execution_context_t *ec)
+{
+ rb_vm_t *vm = rb_ec_vm_ptr(ec);
+
+ if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
+ return 0;
+ }
+ else {
+ return vm->ractor.sync.lock_rec;
+ }
+}
+
#else
#error "unsupported thread model"
#endif
diff --git a/vm_sync.c b/vm_sync.c
index e3d0ffe..6b17ce8 100644
--- a/vm_sync.c
+++ b/vm_sync.c
@@ -246,3 +246,25 @@ rb_vm_barrier(void)
}
}
}
+
+void
+rb_ec_vm_lock_rec_release(rb_execution_context_t *ec, int recorded_lock_rec)
+{
+ int current_lock_rec = rb_ec_vm_lock_rec(ec);
+ unsigned int lev;
+
+ bp();
+
+ if (recorded_lock_rec > current_lock_rec) {
+ for (; recorded_lock_rec > current_lock_rec; current_lock_rec++) {
+ RB_VM_LOCK_ENTER_LEV(&lev);
+ }
+ }
+ else {
+ for (; recorded_lock_rec < current_lock_rec; current_lock_rec--) {
+ RB_VM_LOCK_LEAVE_LEV(&lev);
+ }
+ }
+
+ VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
+}