summaryrefslogtreecommitdiff
path: root/vm_sync.h
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-12-07 11:27:25 +0900
committerKoichi Sasada <ko1@atdot.net>2020-12-07 11:27:25 +0900
commit0ebf6bd0a26b637f019d480ecd7f00a73c416b58 (patch)
treeeae25239f844c5ef47c4ef6c6b3a2d1ded077841 /vm_sync.h
parent8dd03e5cf0d583ffb836cf27be5645a7d88ac736 (diff)
RB_VM_LOCK_ENTER_NO_BARRIER
Write barrier requires VM lock because it accesses VM global bitmap but RB_VM_LOCK_ENTER() can invoke GC because another ractor can wait to invoke GC and RB_VM_LOCK_ENTER() is barrier point. This means that before protecting by a write barrier, GC can invoke. To prevent such situation, RB_VM_LOCK_ENTER_NO_BARRIER() is introduced. This lock primitive does not become GC barrier points.
Diffstat (limited to 'vm_sync.h')
-rw-r--r--vm_sync.h13
1 files changed, 13 insertions, 0 deletions
diff --git a/vm_sync.h b/vm_sync.h
index 14b63f3017..8712e1a1ca 100644
--- a/vm_sync.h
+++ b/vm_sync.h
@@ -22,6 +22,7 @@ void rb_vm_unlock_body(LOCATION_ARGS);
struct rb_ractor_struct;
void rb_vm_lock_enter_body_cr(struct rb_ractor_struct *cr, unsigned int *lev APPEND_LOCATION_ARGS);
+void rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_barrier(void);
@@ -72,6 +73,14 @@ rb_vm_lock_enter(unsigned int *lev, const char *file, int line)
}
static inline void
+rb_vm_lock_enter_nb(unsigned int *lev, const char *file, int line)
+{
+ if (rb_multi_ractor_p()) {
+ rb_vm_lock_enter_body_nb(lev APPEND_LOCATION_PARAMS);
+ }
+}
+
+static inline void
rb_vm_lock_leave(unsigned int *lev, const char *file, int line)
{
if (rb_multi_ractor_p()) {
@@ -104,6 +113,10 @@ rb_vm_lock_leave_cr(struct rb_ractor_struct *cr, unsigned int *levp, const char
#define RB_VM_LOCK_ENTER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV(&_lev);
#define RB_VM_LOCK_LEAVE() RB_VM_LOCK_LEAVE_LEV(&_lev); }
+#define RB_VM_LOCK_ENTER_LEV_NB(levp) rb_vm_lock_enter_nb(levp, __FILE__, __LINE__)
+#define RB_VM_LOCK_ENTER_NO_BARRIER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV_NB(&_lev);
+#define RB_VM_LOCK_LEAVE_NO_BARRIER() RB_VM_LOCK_LEAVE_LEV(&_lev); }
+
#if RUBY_DEBUG > 0
void ASSERT_vm_locking(void);
void ASSERT_vm_unlocking(void);