diff options
Diffstat (limited to 'thread_pthread.h')
| -rw-r--r-- | thread_pthread.h | 174 |
1 files changed, 138 insertions, 36 deletions
diff --git a/thread_pthread.h b/thread_pthread.h index f2b7e598f7..992e9fb080 100644 --- a/thread_pthread.h +++ b/thread_pthread.h @@ -1,3 +1,5 @@ +#ifndef RUBY_THREAD_PTHREAD_H +#define RUBY_THREAD_PTHREAD_H /********************************************************************** thread_pthread.h - @@ -8,9 +10,6 @@ **********************************************************************/ -#ifndef RUBY_THREAD_PTHREAD_H -#define RUBY_THREAD_PTHREAD_H - #ifdef HAVE_PTHREAD_NP_H #include <pthread_np.h> #endif @@ -18,13 +17,80 @@ #define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER #define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER -typedef pthread_cond_t rb_nativethread_cond_t; +// TLS can not be accessed across .so on arm64 and perhaps ppc64le too. +#if defined(__arm64__) || defined(__aarch64__) || defined(__powerpc64__) +# define RB_THREAD_CURRENT_EC_NOINLINE +#endif + +// this data should be protected by timer_th.waiting_lock +struct rb_thread_sched_waiting { + enum thread_sched_waiting_flag { + thread_sched_waiting_none = 0x00, + thread_sched_waiting_timeout = 0x01, + thread_sched_waiting_io_read = 0x02, + thread_sched_waiting_io_write = 0x08, + thread_sched_waiting_io_force = 0x40, // ignore readable + } flags; + + struct { + // should be compat with hrtime.h +#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL + int128_t timeout; +#else + uint64_t timeout; +#endif + uint32_t event_serial; + int fd; // -1 for timeout only + int result; + } data; + + // connected to timer_th.waiting + struct ccan_list_node node; +}; + +// per-Thread scheduler helper data +struct rb_thread_sched_item { + struct { + struct ccan_list_node ubf; + + // connected to ractor->threads.sched.reqdyq + // locked by ractor->threads.sched.lock + struct ccan_list_node readyq; + + // connected to vm->ractor.sched.timeslice_threads + // locked by vm->ractor.sched.lock + struct ccan_list_node timeslice_threads; + + // connected to vm->ractor.sched.running_threads + // locked by vm->ractor.sched.lock + struct ccan_list_node running_threads; -typedef struct native_thread_data_struct { - union { - struct list_node ubf; - struct list_node gvl; + // connected to vm->ractor.sched.zombie_threads + struct ccan_list_node zombie_threads; } node; + + struct rb_thread_sched_waiting waiting_reason; + uint32_t event_serial; + + bool finished; + bool malloc_stack; + void *context_stack; + struct coroutine_context *context; +}; + +struct rb_native_thread { + rb_atomic_t serial; + struct rb_vm_struct *vm; + + rb_nativethread_id_t thread_id; + +#ifdef RB_THREAD_T_HAS_NATIVE_ID + int tid; +#endif + + struct rb_thread_struct *running_thread; + + // to control native thread #if defined(__GLIBC__) || defined(__FreeBSD__) union #else @@ -34,42 +100,78 @@ typedef struct native_thread_data_struct { */ struct #endif - { + { rb_nativethread_cond_t intr; /* th->interrupt_lock */ - rb_nativethread_cond_t gvlq; /* vm->gvl.lock */ + rb_nativethread_cond_t readyq; /* use sched->lock */ } cond; -} native_thread_data_t; + +#ifdef USE_SIGALTSTACK + void *altstack; +#endif + + struct coroutine_context *nt_context; + int dedicated; + + size_t machine_stack_maxsize; +}; #undef except #undef try #undef leave #undef finally -typedef struct rb_global_vm_lock_struct { - /* fast path */ - const struct rb_thread_struct *owner; - rb_nativethread_lock_t lock; /* AKA vm->gvl.lock */ +// per-Ractor +struct rb_thread_sched { + rb_nativethread_lock_t lock_; +#if VM_CHECK_MODE + struct rb_thread_struct *lock_owner; +#endif + struct rb_thread_struct *running; // running thread or NULL + bool is_running; + bool is_running_timeslice; + bool enable_mn_threads; - /* - * slow path, protected by vm->gvl.lock - * - @waitq - FIFO queue of threads waiting for GVL - * - @timer - it handles timeslices for @owner. It is any one thread - * in @waitq, there is no @timer if @waitq is empty, but always - * a @timer if @waitq has entries - * - @timer_err tracks timeslice limit, the timeslice only resets - * when pthread_cond_timedwait returns ETIMEDOUT, so frequent - * switching between contended/uncontended GVL won't reset the - * timer. - */ - struct list_head waitq; /* <=> native_thread_data_t.node.ubf */ - const struct rb_thread_struct *timer; - int timer_err; - - /* yield */ - rb_nativethread_cond_t switch_cond; - rb_nativethread_cond_t switch_wait_cond; - int need_yield; - int wait_yield; -} rb_global_vm_lock_t; + struct ccan_list_head readyq; + int readyq_cnt; + // ractor scheduling + struct ccan_list_node grq_node; +}; + +#ifdef RB_THREAD_LOCAL_SPECIFIER + NOINLINE(void rb_current_ec_set(struct rb_execution_context_struct *)); + + # ifdef RB_THREAD_CURRENT_EC_NOINLINE + NOINLINE(struct rb_execution_context_struct *rb_current_ec(void)); + # else + RUBY_EXTERN RB_THREAD_LOCAL_SPECIFIER struct rb_execution_context_struct *ruby_current_ec; + + // for RUBY_DEBUG_LOG() + RUBY_EXTERN RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial; + #define RUBY_NT_SERIAL 1 + # endif +#else +typedef pthread_key_t native_tls_key_t; + +static inline void * +native_tls_get(native_tls_key_t key) +{ + // return value should be checked by caller + return pthread_getspecific(key); +} + +static inline void +native_tls_set(native_tls_key_t key, void *ptr) +{ + if (UNLIKELY(pthread_setspecific(key, ptr) != 0)) { + rb_bug("pthread_setspecific error"); + } +} + +RUBY_EXTERN native_tls_key_t ruby_current_ec_key; +#endif + +struct rb_ractor_struct; +void rb_ractor_sched_wait(struct rb_execution_context_struct *ec, struct rb_ractor_struct *cr, rb_unblock_function_t *ubf, void *ptr); +void rb_ractor_sched_wakeup(struct rb_ractor_struct *r, struct rb_thread_struct *th); #endif /* RUBY_THREAD_PTHREAD_H */ |
