summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorshyouhei <shyouhei@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-01-02 06:41:56 +0000
committershyouhei <shyouhei@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-01-02 06:41:56 +0000
commit5471bf9cc2ee4cab19f9f306aced64d0649fd672 (patch)
tree01ef4f1a10e7da2d773c1a07de3e9608da43f139
parent8dc0c7c035eb2ef22608968aeca63f24ade0eee0 (diff)
offsetof(type, foo.bar) is (arguably) a GCCism
TL;DR see http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2031.htm Suppose we have: struct X { struct Y { z_t z; } y; } x; then, you _cant_ infer offsetof(struct X, y.z). The ISO C99 section 7.17 says nothing about such situation. At least clang warns this being an extension to the language (-Wextended-offsetof). git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61560 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
-rw-r--r--thread_pthread.c6
-rw-r--r--variable.c16
2 files changed, 11 insertions, 11 deletions
diff --git a/thread_pthread.c b/thread_pthread.c
index 686c219ecb..469c9748b2 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -1221,11 +1221,13 @@ static void
ubf_wakeup_all_threads(void)
{
rb_thread_t *th;
+ native_thread_data_t *dat;
if (!ubf_threads_empty()) {
native_mutex_lock(&ubf_list_lock);
- list_for_each(&ubf_list_head, th,
- native_thread_data.ubf_list) {
+ list_for_each(&ubf_list_head, dat, ubf_list) {
+ th = (rb_thread_t *)(
+ ((char *)dat) - offsetof(rb_thread_t, native_thread_data));
ubf_wakeup_thread(th);
}
native_mutex_unlock(&ubf_list_lock);
diff --git a/variable.c b/variable.c
index 0047338cb5..5a0c45159e 100644
--- a/variable.c
+++ b/variable.c
@@ -1846,10 +1846,8 @@ struct autoload_state {
VALUE result;
ID id;
VALUE thread;
- union {
- struct list_node node;
- struct list_head head;
- } waitq;
+ struct list_node node;
+ struct list_head head;
};
struct autoload_data_i {
@@ -2102,11 +2100,11 @@ autoload_reset(VALUE arg)
if (need_wakeups) {
struct autoload_state *cur = 0, *nxt;
- list_for_each_safe(&state->waitq.head, cur, nxt, waitq.node) {
+ list_for_each_safe(&state->head, cur, nxt, node) {
VALUE th = cur->thread;
cur->thread = Qfalse;
- list_del_init(&cur->waitq.node); /* idempotent */
+ list_del_init(&cur->node); /* idempotent */
/*
* cur is stored on the stack of cur->waiting_th,
@@ -2141,7 +2139,7 @@ autoload_sleep_done(VALUE arg)
struct autoload_state *state = (struct autoload_state *)arg;
if (state->thread != Qfalse && rb_thread_to_be_killed(state->thread)) {
- list_del(&state->waitq.node); /* idempotent after list_del_init */
+ list_del(&state->node); /* idempotent after list_del_init */
}
return Qfalse;
@@ -2177,13 +2175,13 @@ rb_autoload_load(VALUE mod, ID id)
* autoload_reset will wake up any threads added to this
* iff the GVL is released during autoload_require
*/
- list_head_init(&state.waitq.head);
+ list_head_init(&state.head);
}
else if (state.thread == ele->state->thread) {
return Qfalse;
}
else {
- list_add_tail(&ele->state->waitq.head, &state.waitq.node);
+ list_add_tail(&ele->state->head, &state.node);
rb_ensure(autoload_sleep, (VALUE)&state,
autoload_sleep_done, (VALUE)&state);