summaryrefslogtreecommitdiff
path: root/thread_pthread.h
diff options
context:
space:
mode:
Diffstat (limited to 'thread_pthread.h')
-rw-r--r--thread_pthread.h183
1 files changed, 152 insertions, 31 deletions
diff --git a/thread_pthread.h b/thread_pthread.h
index b3e7c36cf0..992e9fb080 100644
--- a/thread_pthread.h
+++ b/thread_pthread.h
@@ -1,3 +1,5 @@
+#ifndef RUBY_THREAD_PTHREAD_H
+#define RUBY_THREAD_PTHREAD_H
/**********************************************************************
thread_pthread.h -
@@ -8,49 +10,168 @@
**********************************************************************/
-#ifndef RUBY_THREAD_PTHREAD_H
-#define RUBY_THREAD_PTHREAD_H
-
-#include <pthread.h>
#ifdef HAVE_PTHREAD_NP_H
#include <pthread_np.h>
#endif
-typedef pthread_t rb_thread_id_t;
-typedef pthread_mutex_t rb_thread_lock_t;
-typedef struct rb_thread_cond_struct {
- pthread_cond_t cond;
-#ifdef HAVE_CLOCKID_T
- clockid_t clockid;
+#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
+#define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER
+
+// TLS can not be accessed across .so on arm64 and perhaps ppc64le too.
+#if defined(__arm64__) || defined(__aarch64__) || defined(__powerpc64__)
+# define RB_THREAD_CURRENT_EC_NOINLINE
+#endif
+
+// this data should be protected by timer_th.waiting_lock
+struct rb_thread_sched_waiting {
+ enum thread_sched_waiting_flag {
+ thread_sched_waiting_none = 0x00,
+ thread_sched_waiting_timeout = 0x01,
+ thread_sched_waiting_io_read = 0x02,
+ thread_sched_waiting_io_write = 0x08,
+ thread_sched_waiting_io_force = 0x40, // ignore readable
+ } flags;
+
+ struct {
+ // should be compat with hrtime.h
+#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
+ int128_t timeout;
+#else
+ uint64_t timeout;
+#endif
+ uint32_t event_serial;
+ int fd; // -1 for timeout only
+ int result;
+ } data;
+
+ // connected to timer_th.waiting
+ struct ccan_list_node node;
+};
+
+// per-Thread scheduler helper data
+struct rb_thread_sched_item {
+ struct {
+ struct ccan_list_node ubf;
+
+ // connected to ractor->threads.sched.reqdyq
+ // locked by ractor->threads.sched.lock
+ struct ccan_list_node readyq;
+
+ // connected to vm->ractor.sched.timeslice_threads
+ // locked by vm->ractor.sched.lock
+ struct ccan_list_node timeslice_threads;
+
+ // connected to vm->ractor.sched.running_threads
+ // locked by vm->ractor.sched.lock
+ struct ccan_list_node running_threads;
+
+ // connected to vm->ractor.sched.zombie_threads
+ struct ccan_list_node zombie_threads;
+ } node;
+
+ struct rb_thread_sched_waiting waiting_reason;
+ uint32_t event_serial;
+
+ bool finished;
+ bool malloc_stack;
+ void *context_stack;
+ struct coroutine_context *context;
+};
+
+struct rb_native_thread {
+ rb_atomic_t serial;
+ struct rb_vm_struct *vm;
+
+ rb_nativethread_id_t thread_id;
+
+#ifdef RB_THREAD_T_HAS_NATIVE_ID
+ int tid;
+#endif
+
+ struct rb_thread_struct *running_thread;
+
+ // to control native thread
+#if defined(__GLIBC__) || defined(__FreeBSD__)
+ union
+#else
+ /*
+ * assume the platform condvars are badly implemented and have a
+ * "memory" of which mutex they're associated with
+ */
+ struct
+#endif
+ {
+ rb_nativethread_cond_t intr; /* th->interrupt_lock */
+ rb_nativethread_cond_t readyq; /* use sched->lock */
+ } cond;
+
+#ifdef USE_SIGALTSTACK
+ void *altstack;
#endif
-} rb_thread_cond_t;
-typedef struct native_thread_data_struct {
- void *signal_thread_list;
- rb_thread_cond_t sleep_cond;
-} native_thread_data_t;
+ struct coroutine_context *nt_context;
+ int dedicated;
-#include <semaphore.h>
+ size_t machine_stack_maxsize;
+};
#undef except
#undef try
#undef leave
#undef finally
-typedef struct rb_global_vm_lock_struct {
- /* fast path */
- unsigned long acquired;
- pthread_mutex_t lock;
-
- /* slow path */
- volatile unsigned long waiting;
- rb_thread_cond_t cond;
-
- /* yield */
- rb_thread_cond_t switch_cond;
- rb_thread_cond_t switch_wait_cond;
- int need_yield;
- int wait_yield;
-} rb_global_vm_lock_t;
+// per-Ractor
+struct rb_thread_sched {
+ rb_nativethread_lock_t lock_;
+#if VM_CHECK_MODE
+ struct rb_thread_struct *lock_owner;
+#endif
+ struct rb_thread_struct *running; // running thread or NULL
+ bool is_running;
+ bool is_running_timeslice;
+ bool enable_mn_threads;
+
+ struct ccan_list_head readyq;
+ int readyq_cnt;
+ // ractor scheduling
+ struct ccan_list_node grq_node;
+};
+
+#ifdef RB_THREAD_LOCAL_SPECIFIER
+ NOINLINE(void rb_current_ec_set(struct rb_execution_context_struct *));
+
+ # ifdef RB_THREAD_CURRENT_EC_NOINLINE
+ NOINLINE(struct rb_execution_context_struct *rb_current_ec(void));
+ # else
+ RUBY_EXTERN RB_THREAD_LOCAL_SPECIFIER struct rb_execution_context_struct *ruby_current_ec;
+
+ // for RUBY_DEBUG_LOG()
+ RUBY_EXTERN RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
+ #define RUBY_NT_SERIAL 1
+ # endif
+#else
+typedef pthread_key_t native_tls_key_t;
+
+static inline void *
+native_tls_get(native_tls_key_t key)
+{
+ // return value should be checked by caller
+ return pthread_getspecific(key);
+}
+
+static inline void
+native_tls_set(native_tls_key_t key, void *ptr)
+{
+ if (UNLIKELY(pthread_setspecific(key, ptr) != 0)) {
+ rb_bug("pthread_setspecific error");
+ }
+}
+
+RUBY_EXTERN native_tls_key_t ruby_current_ec_key;
+#endif
+
+struct rb_ractor_struct;
+void rb_ractor_sched_wait(struct rb_execution_context_struct *ec, struct rb_ractor_struct *cr, rb_unblock_function_t *ubf, void *ptr);
+void rb_ractor_sched_wakeup(struct rb_ractor_struct *r, struct rb_thread_struct *th);
#endif /* RUBY_THREAD_PTHREAD_H */