summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-10-14 14:21:57 +0900
committerKoichi Sasada <ko1@atdot.net>2020-10-14 16:36:55 +0900
commitae693fff748c68ca2500bbc2c0a8802d50f269dc (patch)
treee96405ea3df4d2c03dce9cbe52503118005fc346
parent0714cb760c2b16f7d40e563b6ab9894553baf32c (diff)
fix releasing timing.
(1) recorded_lock_rec > current_lock_rec should not be occurred on rb_ec_vm_lock_rec_release(). (2) should be release VM lock at EXEC_TAG(), not POP_TAG(). (3) some refactoring.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/3655
-rw-r--r--eval_intern.h26
-rw-r--r--vm_core.h10
-rw-r--r--vm_sync.c21
3 files changed, 30 insertions, 27 deletions
diff --git a/eval_intern.h b/eval_intern.h
index 0e5a8ae692..48e9c890c7 100644
--- a/eval_intern.h
+++ b/eval_intern.h
@@ -127,16 +127,6 @@ LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *);
rb_fiber_start(); \
} while (0)
-void rb_ec_vm_lock_rec_release(rb_execution_context_t *ec, int lock_rec);
-
-static inline void
-rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
-{
- if (rb_ec_vm_lock_rec(ec) != lock_rec) {
- rb_ec_vm_lock_rec_release(ec, lock_rec);
- }
-}
-
#define EC_PUSH_TAG(ec) do { \
rb_execution_context_t * const _ec = (ec); \
struct rb_vm_tag _tag; \
@@ -146,7 +136,6 @@ rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
_tag.lock_rec = rb_ec_vm_lock_rec(_ec); \
#define EC_POP_TAG() \
- rb_ec_vm_lock_rec_check(_ec, _tag.lock_rec); \
_ec->tag = _tag.prev; \
} while (0)
@@ -169,12 +158,23 @@ rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
# define VAR_NOCLOBBERED(var) var
#endif
+static inline void
+rb_ec_vm_lock_rec_check(const rb_execution_context_t *ec, unsigned int recorded_lock_rec)
+{
+ unsigned int current_lock_rec = rb_ec_vm_lock_rec(ec);
+ if (current_lock_rec != recorded_lock_rec) {
+ rb_ec_vm_lock_rec_release(ec, recorded_lock_rec, current_lock_rec);
+ }
+}
+
/* clear ec->tag->state, and return the value */
static inline int
rb_ec_tag_state(const rb_execution_context_t *ec)
{
- enum ruby_tag_type state = ec->tag->state;
- ec->tag->state = TAG_NONE;
+ struct rb_vm_tag *tag = ec->tag;
+ enum ruby_tag_type state = tag->state;
+ tag->state = TAG_NONE;
+ rb_ec_vm_lock_rec_check(ec, tag->lock_rec);
return state;
}
diff --git a/vm_core.h b/vm_core.h
index f783bd5b99..e62d43d4aa 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -794,7 +794,7 @@ struct rb_vm_tag {
rb_jmpbuf_t buf;
struct rb_vm_tag *prev;
enum ruby_tag_type state;
- int lock_rec;
+ unsigned int lock_rec;
};
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
@@ -1798,8 +1798,12 @@ rb_current_vm(void)
return ruby_current_vm_ptr;
}
-static inline int
-rb_ec_vm_lock_rec(rb_execution_context_t *ec)
+void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
+ unsigned int recorded_lock_rec,
+ unsigned int current_lock_rec);
+
+static inline unsigned int
+rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
{
rb_vm_t *vm = rb_ec_vm_ptr(ec);
diff --git a/vm_sync.c b/vm_sync.c
index 6b17ce83b5..b670984a13 100644
--- a/vm_sync.c
+++ b/vm_sync.c
@@ -116,6 +116,7 @@ vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
vm->ractor.sync.lock_rec--;
+ *lev = vm->ractor.sync.lock_rec;
if (vm->ractor.sync.lock_rec == 0) {
vm->ractor.sync.lock_owner = NULL;
@@ -248,21 +249,19 @@ rb_vm_barrier(void)
}
void
-rb_ec_vm_lock_rec_release(rb_execution_context_t *ec, int recorded_lock_rec)
+rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
+ unsigned int recorded_lock_rec,
+ unsigned int current_lock_rec)
{
- int current_lock_rec = rb_ec_vm_lock_rec(ec);
- unsigned int lev;
-
- bp();
+ VM_ASSERT(recorded_lock_rec != current_lock_rec);
- if (recorded_lock_rec > current_lock_rec) {
- for (; recorded_lock_rec > current_lock_rec; current_lock_rec++) {
- RB_VM_LOCK_ENTER_LEV(&lev);
- }
+ if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
+ rb_bug("unexpected situation - recordd:%u current:%u",
+ recorded_lock_rec, current_lock_rec);
}
else {
- for (; recorded_lock_rec < current_lock_rec; current_lock_rec--) {
- RB_VM_LOCK_LEAVE_LEV(&lev);
+ while (recorded_lock_rec < current_lock_rec) {
+ RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
}
}