summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNobuyoshi Nakada <nobu@ruby-lang.org>2022-03-30 16:36:31 +0900
committerGitHub <noreply@github.com>2022-03-30 20:36:31 +1300
commit42a0bed351979cb4a59c641fa5f03e49609561fd (patch)
tree615a891e8fa9fbf6f3747d070fc03302341974dc
parent8d27d00af514153819e44eb8e5f4f8631830ae55 (diff)
Prefix ccan headers (#4568)
* Prefixed ccan headers * Remove unprefixed names in ccan/build_assert * Remove unprefixed names in ccan/check_type * Remove unprefixed names in ccan/container_of * Remove unprefixed names in ccan/list Co-authored-by: Samuel Williams <samuel.williams@oriontransfer.co.nz>
Notes
Notes: Merged-By: ioquatix <samuel@codeotaku.com>
-rw-r--r--ccan/build_assert/build_assert.h12
-rw-r--r--ccan/check_type/check_type.h26
-rw-r--r--ccan/container_of/container_of.h48
-rw-r--r--ccan/list/list.h585
-rw-r--r--ccan/str/str.h9
-rw-r--r--gc.c58
-rw-r--r--io.c10
-rw-r--r--mjit.c14
-rw-r--r--mjit_worker.c36
-rw-r--r--process.c38
-rw-r--r--ractor.c18
-rw-r--r--ractor_core.h4
-rw-r--r--thread.c50
-rw-r--r--thread_pthread.c50
-rw-r--r--thread_pthread.h6
-rw-r--r--thread_sync.c76
-rw-r--r--variable.c32
-rw-r--r--vm.c14
-rw-r--r--vm_core.h22
-rw-r--r--vm_dump.c2
-rw-r--r--vm_sync.c4
-rw-r--r--vm_trace.c20
22 files changed, 568 insertions, 566 deletions
diff --git a/ccan/build_assert/build_assert.h b/ccan/build_assert/build_assert.h
index a04d1d4709..b846849241 100644
--- a/ccan/build_assert/build_assert.h
+++ b/ccan/build_assert/build_assert.h
@@ -3,7 +3,7 @@
#define CCAN_BUILD_ASSERT_H
/**
- * BUILD_ASSERT - assert a build-time dependency.
+ * CCAN_BUILD_ASSERT - assert a build-time dependency.
* @cond: the compile-time condition which must be true.
*
* Your compile will fail if the condition isn't true, or can't be evaluated
@@ -15,15 +15,15 @@
* static char *foo_to_char(struct foo *foo)
* {
* // This code needs string to be at start of foo.
- * BUILD_ASSERT(offsetof(struct foo, string) == 0);
+ * CCAN_BUILD_ASSERT(offsetof(struct foo, string) == 0);
* return (char *)foo;
* }
*/
-#define BUILD_ASSERT(cond) \
+#define CCAN_BUILD_ASSERT(cond) \
do { (void) sizeof(char [1 - 2*!(cond)]); } while(0)
/**
- * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression.
+ * CCAN_BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression.
* @cond: the compile-time condition which must be true.
*
* Your compile will fail if the condition isn't true, or can't be evaluated
@@ -32,9 +32,9 @@
* Example:
* #define foo_to_char(foo) \
* ((char *)(foo) \
- * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0))
+ * + CCAN_BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0))
*/
-#define BUILD_ASSERT_OR_ZERO(cond) \
+#define CCAN_BUILD_ASSERT_OR_ZERO(cond) \
(sizeof(char [1 - 2*!(cond)]) - 1)
#endif /* CCAN_BUILD_ASSERT_H */
diff --git a/ccan/check_type/check_type.h b/ccan/check_type/check_type.h
index 1f77a535e4..e795ad71d0 100644
--- a/ccan/check_type/check_type.h
+++ b/ccan/check_type/check_type.h
@@ -3,7 +3,7 @@
#define CCAN_CHECK_TYPE_H
/**
- * check_type - issue a warning or build failure if type is not correct.
+ * ccan_check_type - issue a warning or build failure if type is not correct.
* @expr: the expression whose type we should check (not evaluated).
* @type: the exact type we expect the expression to be.
*
@@ -11,7 +11,7 @@
* argument is of the expected type. No type promotion of the expression is
* done: an unsigned int is not the same as an int!
*
- * check_type() always evaluates to 0.
+ * ccan_check_type() always evaluates to 0.
*
* If your compiler does not support typeof, then the best we can do is fail
* to compile if the sizes of the types are unequal (a less complete check).
@@ -19,11 +19,11 @@
* Example:
* // They should always pass a 64-bit value to _set_some_value!
* #define set_some_value(expr) \
- * _set_some_value((check_type((expr), uint64_t), (expr)))
+ * _set_some_value((ccan_check_type((expr), uint64_t), (expr)))
*/
/**
- * check_types_match - issue a warning or build failure if types are not same.
+ * ccan_check_types_match - issue a warning or build failure if types are not same.
* @expr1: the first expression (not evaluated).
* @expr2: the second expression (not evaluated).
*
@@ -31,7 +31,7 @@
* arguments are of identical types. No type promotion of the expressions is
* done: an unsigned int is not the same as an int!
*
- * check_types_match() always evaluates to 0.
+ * ccan_check_types_match() always evaluates to 0.
*
* If your compiler does not support typeof, then the best we can do is fail
* to compile if the sizes of the types are unequal (a less complete check).
@@ -39,25 +39,25 @@
* Example:
* // Do subtraction to get to enclosing type, but make sure that
* // pointer is of correct type for that member.
- * #define container_of(mbr_ptr, encl_type, mbr) \
- * (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \
+ * #define ccan_container_of(mbr_ptr, encl_type, mbr) \
+ * (ccan_check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \
* ((encl_type *) \
* ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr))))
*/
#if HAVE_TYPEOF
-#define check_type(expr, type) \
+#define ccan_check_type(expr, type) \
((typeof(expr) *)0 != (type *)0)
-#define check_types_match(expr1, expr2) \
+#define ccan_check_types_match(expr1, expr2) \
((typeof(expr1) *)0 != (typeof(expr2) *)0)
#else
#include "ccan/build_assert/build_assert.h"
/* Without typeof, we can only test the sizes. */
-#define check_type(expr, type) \
- BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type))
+#define ccan_check_type(expr, type) \
+ CCAN_BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type))
-#define check_types_match(expr1, expr2) \
- BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2))
+#define ccan_check_types_match(expr1, expr2) \
+ CCAN_BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2))
#endif /* HAVE_TYPEOF */
#endif /* CCAN_CHECK_TYPE_H */
diff --git a/ccan/container_of/container_of.h b/ccan/container_of/container_of.h
index ae3e1fc81f..b30c347d57 100644
--- a/ccan/container_of/container_of.h
+++ b/ccan/container_of/container_of.h
@@ -4,7 +4,7 @@
#include "ccan/check_type/check_type.h"
/**
- * container_of - get pointer to enclosing structure
+ * ccan_container_of - get pointer to enclosing structure
* @member_ptr: pointer to the structure member
* @containing_type: the type this member is within
* @member: the name of this member within the structure.
@@ -24,18 +24,18 @@
*
* static struct info *foo_to_info(struct foo *foo)
* {
- * return container_of(foo, struct info, my_foo);
+ * return ccan_container_of(foo, struct info, my_foo);
* }
*/
-#define container_of(member_ptr, containing_type, member) \
+#define ccan_container_of(member_ptr, containing_type, member) \
((containing_type *) \
((char *)(member_ptr) \
- - container_off(containing_type, member)) \
- + check_types_match(*(member_ptr), ((containing_type *)0)->member))
+ - ccan_container_off(containing_type, member)) \
+ + ccan_check_types_match(*(member_ptr), ((containing_type *)0)->member))
/**
- * container_of_or_null - get pointer to enclosing structure, or NULL
+ * ccan_container_of_or_null - get pointer to enclosing structure, or NULL
* @member_ptr: pointer to the structure member
* @containing_type: the type this member is within
* @member: the name of this member within the structure.
@@ -56,21 +56,21 @@
*
* static struct info *foo_to_info_allowing_null(struct foo *foo)
* {
- * return container_of_or_null(foo, struct info, my_foo);
+ * return ccan_container_of_or_null(foo, struct info, my_foo);
* }
*/
static inline char *container_of_or_null_(void *member_ptr, size_t offset)
{
return member_ptr ? (char *)member_ptr - offset : NULL;
}
-#define container_of_or_null(member_ptr, containing_type, member) \
+#define ccan_container_of_or_null(member_ptr, containing_type, member) \
((containing_type *) \
- container_of_or_null_(member_ptr, \
- container_off(containing_type, member)) \
- + check_types_match(*(member_ptr), ((containing_type *)0)->member))
+ ccan_container_of_or_null_(member_ptr, \
+ ccan_container_off(containing_type, member)) \
+ + ccan_check_types_match(*(member_ptr), ((containing_type *)0)->member))
/**
- * container_off - get offset to enclosing structure
+ * ccan_container_off - get offset to enclosing structure
* @containing_type: the type this member is within
* @member: the name of this member within the structure.
*
@@ -89,15 +89,15 @@ static inline char *container_of_or_null_(void *member_ptr, size_t offset)
*
* static struct info *foo_to_info(struct foo *foo)
* {
- * size_t off = container_off(struct info, my_foo);
+ * size_t off = ccan_container_off(struct info, my_foo);
* return (void *)((char *)foo - off);
* }
*/
-#define container_off(containing_type, member) \
+#define ccan_container_off(containing_type, member) \
offsetof(containing_type, member)
/**
- * container_of_var - get pointer to enclosing structure using a variable
+ * ccan_container_of_var - get pointer to enclosing structure using a variable
* @member_ptr: pointer to the structure member
* @container_var: a pointer of same type as this member's container
* @member: the name of this member within the structure.
@@ -108,21 +108,21 @@ static inline char *container_of_or_null_(void *member_ptr, size_t offset)
* Example:
* static struct info *foo_to_i(struct foo *foo)
* {
- * struct info *i = container_of_var(foo, i, my_foo);
+ * struct info *i = ccan_container_of_var(foo, i, my_foo);
* return i;
* }
*/
#if HAVE_TYPEOF
-#define container_of_var(member_ptr, container_var, member) \
- container_of(member_ptr, typeof(*container_var), member)
+#define ccan_container_of_var(member_ptr, container_var, member) \
+ ccan_container_of(member_ptr, typeof(*container_var), member)
#else
-#define container_of_var(member_ptr, container_var, member) \
+#define ccan_container_of_var(member_ptr, container_var, member) \
((void *)((char *)(member_ptr) - \
- container_off_var(container_var, member)))
+ ccan_container_off_var(container_var, member)))
#endif
/**
- * container_off_var - get offset of a field in enclosing structure
+ * ccan_container_off_var - get offset of a field in enclosing structure
* @container_var: a pointer to a container structure
* @member: the name of a member within the structure.
*
@@ -132,10 +132,10 @@ static inline char *container_of_or_null_(void *member_ptr, size_t offset)
*
*/
#if HAVE_TYPEOF
-#define container_off_var(var, member) \
- container_off(typeof(*var), member)
+#define ccan_container_off_var(var, member) \
+ ccan_container_off(typeof(*var), member)
#else
-#define container_off_var(var, member) \
+#define ccan_container_off_var(var, member) \
((const char *)&(var)->member - (const char *)(var))
#endif
diff --git a/ccan/list/list.h b/ccan/list/list.h
index c434ad8106..91787bfdb3 100644
--- a/ccan/list/list.h
+++ b/ccan/list/list.h
@@ -7,7 +7,7 @@
#include "ccan/check_type/check_type.h"
/**
- * struct list_node - an entry in a doubly-linked list
+ * struct ccan_list_node - an entry in a doubly-linked list
* @next: next entry (self if empty)
* @prev: previous entry (self if empty)
*
@@ -16,209 +16,209 @@
* struct child {
* const char *name;
* // Linked list of all us children.
- * struct list_node list;
+ * struct ccan_list_node list;
* };
*/
-struct list_node
+struct ccan_list_node
{
- struct list_node *next, *prev;
+ struct ccan_list_node *next, *prev;
};
/**
- * struct list_head - the head of a doubly-linked list
- * @h: the list_head (containing next and prev pointers)
+ * struct ccan_list_head - the head of a doubly-linked list
+ * @h: the ccan_list_head (containing next and prev pointers)
*
* This is used as the head of a linked list.
* Example:
* struct parent {
* const char *name;
- * struct list_head children;
+ * struct ccan_list_head children;
* unsigned int num_children;
* };
*/
-struct list_head
+struct ccan_list_head
{
- struct list_node n;
+ struct ccan_list_node n;
};
-#define LIST_LOC __FILE__ ":" stringify(__LINE__)
-#define list_debug(h, loc) ((void)loc, h)
-#define list_debug_node(n, loc) ((void)loc, n)
+#define CCAN_LIST_LOC __FILE__ ":" ccan_stringify(__LINE__)
+#define ccan_list_debug(h, loc) ((void)loc, h)
+#define ccan_list_debug_node(n, loc) ((void)loc, n)
/**
- * LIST_HEAD_INIT - initializer for an empty list_head
+ * CCAN_LIST_HEAD_INIT - initializer for an empty ccan_list_head
* @name: the name of the list.
*
* Explicit initializer for an empty list.
*
* See also:
- * LIST_HEAD, list_head_init()
+ * CCAN_LIST_HEAD, ccan_list_head_init()
*
* Example:
- * static struct list_head my_list = LIST_HEAD_INIT(my_list);
+ * static struct ccan_list_head my_list = CCAN_LIST_HEAD_INIT(my_list);
*/
-#define LIST_HEAD_INIT(name) { { &(name).n, &(name).n } }
+#define CCAN_LIST_HEAD_INIT(name) { { &(name).n, &(name).n } }
/**
- * LIST_HEAD - define and initialize an empty list_head
+ * CCAN_LIST_HEAD - define and initialize an empty ccan_list_head
* @name: the name of the list.
*
- * The LIST_HEAD macro defines a list_head and initializes it to an empty
- * list. It can be prepended by "static" to define a static list_head.
+ * The CCAN_LIST_HEAD macro defines a ccan_list_head and initializes it to an empty
+ * list. It can be prepended by "static" to define a static ccan_list_head.
*
* See also:
- * LIST_HEAD_INIT, list_head_init()
+ * CCAN_LIST_HEAD_INIT, ccan_list_head_init()
*
* Example:
- * static LIST_HEAD(my_global_list);
+ * static CCAN_LIST_HEAD(my_global_list);
*/
-#define LIST_HEAD(name) \
- struct list_head name = LIST_HEAD_INIT(name)
+#define CCAN_LIST_HEAD(name) \
+ struct ccan_list_head name = CCAN_LIST_HEAD_INIT(name)
/**
- * list_head_init - initialize a list_head
- * @h: the list_head to set to the empty list
+ * ccan_list_head_init - initialize a ccan_list_head
+ * @h: the ccan_list_head to set to the empty list
*
* Example:
* ...
* struct parent *parent = malloc(sizeof(*parent));
*
- * list_head_init(&parent->children);
+ * ccan_list_head_init(&parent->children);
* parent->num_children = 0;
*/
-static inline void list_head_init(struct list_head *h)
+static inline void ccan_list_head_init(struct ccan_list_head *h)
{
h->n.next = h->n.prev = &h->n;
}
/**
- * list_node_init - initialize a list_node
- * @n: the list_node to link to itself.
+ * ccan_list_node_init - initialize a ccan_list_node
+ * @n: the ccan_list_node to link to itself.
*
- * You don't need to use this normally! But it lets you list_del(@n)
+ * You don't need to use this normally! But it lets you ccan_list_del(@n)
* safely.
*/
-static inline void list_node_init(struct list_node *n)
+static inline void ccan_list_node_init(struct ccan_list_node *n)
{
n->next = n->prev = n;
}
/**
- * list_add_after - add an entry after an existing node in a linked list
- * @h: the list_head to add the node to (for debugging)
- * @p: the existing list_node to add the node after
- * @n: the new list_node to add to the list.
+ * ccan_list_add_after - add an entry after an existing node in a linked list
+ * @h: the ccan_list_head to add the node to (for debugging)
+ * @p: the existing ccan_list_node to add the node after
+ * @n: the new ccan_list_node to add to the list.
*
- * The existing list_node must already be a member of the list.
- * The new list_node does not need to be initialized; it will be overwritten.
+ * The existing ccan_list_node must already be a member of the list.
+ * The new ccan_list_node does not need to be initialized; it will be overwritten.
*
* Example:
* struct child c1, c2, c3;
- * LIST_HEAD(h);
+ * CCAN_LIST_HEAD(h);
*
- * list_add_tail(&h, &c1.list);
- * list_add_tail(&h, &c3.list);
- * list_add_after(&h, &c1.list, &c2.list);
+ * ccan_list_add_tail(&h, &c1.list);
+ * ccan_list_add_tail(&h, &c3.list);
+ * ccan_list_add_after(&h, &c1.list, &c2.list);
*/
-#define list_add_after(h, p, n) list_add_after_(h, p, n, LIST_LOC)
-static inline void list_add_after_(struct list_head *h,
- struct list_node *p,
- struct list_node *n,
+#define ccan_list_add_after(h, p, n) ccan_list_add_after_(h, p, n, CCAN_LIST_LOC)
+static inline void ccan_list_add_after_(struct ccan_list_head *h,
+ struct ccan_list_node *p,
+ struct ccan_list_node *n,
const char *abortstr)
{
n->next = p->next;
n->prev = p;
p->next->prev = n;
p->next = n;
- (void)list_debug(h, abortstr);
+ (void)ccan_list_debug(h, abortstr);
}
/**
- * list_add - add an entry at the start of a linked list.
- * @h: the list_head to add the node to
- * @n: the list_node to add to the list.
+ * ccan_list_add - add an entry at the start of a linked list.
+ * @h: the ccan_list_head to add the node to
+ * @n: the ccan_list_node to add to the list.
*
- * The list_node does not need to be initialized; it will be overwritten.
+ * The ccan_list_node does not need to be initialized; it will be overwritten.
* Example:
* struct child *child = malloc(sizeof(*child));
*
* child->name = "marvin";
- * list_add(&parent->children, &child->list);
+ * ccan_list_add(&parent->children, &child->list);
* parent->num_children++;
*/
-#define list_add(h, n) list_add_(h, n, LIST_LOC)
-static inline void list_add_(struct list_head *h,
- struct list_node *n,
+#define ccan_list_add(h, n) ccan_list_add_(h, n, CCAN_LIST_LOC)
+static inline void ccan_list_add_(struct ccan_list_head *h,
+ struct ccan_list_node *n,
const char *abortstr)
{
- list_add_after_(h, &h->n, n, abortstr);
+ ccan_list_add_after_(h, &h->n, n, abortstr);
}
/**
- * list_add_before - add an entry before an existing node in a linked list
- * @h: the list_head to add the node to (for debugging)
- * @p: the existing list_node to add the node before
- * @n: the new list_node to add to the list.
+ * ccan_list_add_before - add an entry before an existing node in a linked list
+ * @h: the ccan_list_head to add the node to (for debugging)
+ * @p: the existing ccan_list_node to add the node before
+ * @n: the new ccan_list_node to add to the list.
*
- * The existing list_node must already be a member of the list.
- * The new list_node does not need to be initialized; it will be overwritten.
+ * The existing ccan_list_node must already be a member of the list.
+ * The new ccan_list_node does not need to be initialized; it will be overwritten.
*
* Example:
- * list_head_init(&h);
- * list_add_tail(&h, &c1.list);
- * list_add_tail(&h, &c3.list);
- * list_add_before(&h, &c3.list, &c2.list);
- */
-#define list_add_before(h, p, n) list_add_before_(h, p, n, LIST_LOC)
-static inline void list_add_before_(struct list_head *h,
- struct list_node *p,
- struct list_node *n,
+ * ccan_list_head_init(&h);
+ * ccan_list_add_tail(&h, &c1.list);
+ * ccan_list_add_tail(&h, &c3.list);
+ * ccan_list_add_before(&h, &c3.list, &c2.list);
+ */
+#define ccan_list_add_before(h, p, n) ccan_list_add_before_(h, p, n, CCAN_LIST_LOC)
+static inline void ccan_list_add_before_(struct ccan_list_head *h,
+ struct ccan_list_node *p,
+ struct ccan_list_node *n,
const char *abortstr)
{
n->next = p;
n->prev = p->prev;
p->prev->next = n;
p->prev = n;
- (void)list_debug(h, abortstr);
+ (void)ccan_list_debug(h, abortstr);
}
/**
- * list_add_tail - add an entry at the end of a linked list.
- * @h: the list_head to add the node to
- * @n: the list_node to add to the list.
+ * ccan_list_add_tail - add an entry at the end of a linked list.
+ * @h: the ccan_list_head to add the node to
+ * @n: the ccan_list_node to add to the list.
*
- * The list_node does not need to be initialized; it will be overwritten.
+ * The ccan_list_node does not need to be initialized; it will be overwritten.
* Example:
- * list_add_tail(&parent->children, &child->list);
+ * ccan_list_add_tail(&parent->children, &child->list);
* parent->num_children++;
*/
-#define list_add_tail(h, n) list_add_tail_(h, n, LIST_LOC)
-static inline void list_add_tail_(struct list_head *h,
- struct list_node *n,
+#define ccan_list_add_tail(h, n) ccan_list_add_tail_(h, n, CCAN_LIST_LOC)
+static inline void ccan_list_add_tail_(struct ccan_list_head *h,
+ struct ccan_list_node *n,
const char *abortstr)
{
- list_add_before_(h, &h->n, n, abortstr);
+ ccan_list_add_before_(h, &h->n, n, abortstr);
}
/**
- * list_empty - is a list empty?
- * @h: the list_head
+ * ccan_list_empty - is a list empty?
+ * @h: the ccan_list_head
*
* If the list is empty, returns true.
*
* Example:
- * assert(list_empty(&parent->children) == (parent->num_children == 0));
+ * assert(ccan_list_empty(&parent->children) == (parent->num_children == 0));
*/
-#define list_empty(h) list_empty_(h, LIST_LOC)
-static inline int list_empty_(const struct list_head *h, const char* abortstr)
+#define ccan_list_empty(h) ccan_list_empty_(h, CCAN_LIST_LOC)
+static inline int ccan_list_empty_(const struct ccan_list_head *h, const char* abortstr)
{
- (void)list_debug(h, abortstr);
+ (void)ccan_list_debug(h, abortstr);
return h->n.next == &h->n;
}
/**
- * list_empty_nodebug - is a list empty (and don't perform debug checks)?
- * @h: the list_head
+ * ccan_list_empty_nodebug - is a list empty (and don't perform debug checks)?
+ * @h: the ccan_list_head
*
* If the list is empty, returns true.
* This differs from list_empty() in that if CCAN_LIST_DEBUG is set it
@@ -226,20 +226,20 @@ static inline int list_empty_(const struct list_head *h, const char* abortstr)
* know what you're doing.
*
* Example:
- * assert(list_empty_nodebug(&parent->children) == (parent->num_children == 0));
+ * assert(ccan_list_empty_nodebug(&parent->children) == (parent->num_children == 0));
*/
#ifndef CCAN_LIST_DEBUG
-#define list_empty_nodebug(h) list_empty(h)
+#define ccan_list_empty_nodebug(h) ccan_list_empty(h)
#else
-static inline int list_empty_nodebug(const struct list_head *h)
+static inline int ccan_list_empty_nodebug(const struct ccan_list_head *h)
{
return h->n.next == &h->n;
}
#endif
/**
- * list_empty_nocheck - is a list empty?
- * @h: the list_head
+ * ccan_list_empty_nocheck - is a list empty?
+ * @h: the ccan_list_head
*
* If the list is empty, returns true. This doesn't perform any
* debug check for list consistency, so it can be called without
@@ -247,29 +247,29 @@ static inline int list_empty_nodebug(const struct list_head *h)
* checks where an incorrect result is not an issue (optimized
* bail out path for example).
*/
-static inline bool list_empty_nocheck(const struct list_head *h)
+static inline bool ccan_list_empty_nocheck(const struct ccan_list_head *h)
{
return h->n.next == &h->n;
}
/**
- * list_del - delete an entry from an (unknown) linked list.
- * @n: the list_node to delete from the list.
+ * ccan_list_del - delete an entry from an (unknown) linked list.
+ * @n: the ccan_list_node to delete from the list.
*
* Note that this leaves @n in an undefined state; it can be added to
* another list, but not deleted again.
*
* See also:
- * list_del_from(), list_del_init()
+ * ccan_list_del_from(), ccan_list_del_init()
*
* Example:
- * list_del(&child->list);
+ * ccan_list_del(&child->list);
* parent->num_children--;
*/
-#define list_del(n) list_del_(n, LIST_LOC)
-static inline void list_del_(struct list_node *n, const char* abortstr)
+#define ccan_list_del(n) ccan_list_del_(n, CCAN_LIST_LOC)
+static inline void ccan_list_del_(struct ccan_list_node *n, const char* abortstr)
{
- (void)list_debug_node(n, abortstr);
+ (void)ccan_list_debug_node(n, abortstr);
n->next->prev = n->prev;
n->prev->next = n->next;
#ifdef CCAN_LIST_DEBUG
@@ -279,80 +279,80 @@ static inline void list_del_(struct list_node *n, const char* abortstr)
}
/**
- * list_del_init - delete a node, and reset it so it can be deleted again.
- * @n: the list_node to be deleted.
+ * ccan_list_del_init - delete a node, and reset it so it can be deleted again.
+ * @n: the ccan_list_node to be deleted.
*
- * list_del(@n) or list_del_init() again after this will be safe,
+ * ccan_list_del(@n) or ccan_list_del_init() again after this will be safe,
* which can be useful in some cases.
*
* See also:
- * list_del_from(), list_del()
+ * ccan_list_del_from(), ccan_list_del()
*
* Example:
- * list_del_init(&child->list);
+ * ccan_list_del_init(&child->list);
* parent->num_children--;
*/
-#define list_del_init(n) list_del_init_(n, LIST_LOC)
-static inline void list_del_init_(struct list_node *n, const char *abortstr)
+#define ccan_list_del_init(n) ccan_list_del_init_(n, CCAN_LIST_LOC)
+static inline void ccan_list_del_init_(struct ccan_list_node *n, const char *abortstr)
{
- list_del_(n, abortstr);
- list_node_init(n);
+ ccan_list_del_(n, abortstr);
+ ccan_list_node_init(n);
}
/**
- * list_del_from - delete an entry from a known linked list.
- * @h: the list_head the node is in.
- * @n: the list_node to delete from the list.
+ * ccan_list_del_from - delete an entry from a known linked list.
+ * @h: the ccan_list_head the node is in.
+ * @n: the ccan_list_node to delete from the list.
*
* This explicitly indicates which list a node is expected to be in,
* which is better documentation and can catch more bugs.
*
- * See also: list_del()
+ * See also: ccan_list_del()
*
* Example:
- * list_del_from(&parent->children, &child->list);
+ * ccan_list_del_from(&parent->children, &child->list);
* parent->num_children--;
*/
-static inline void list_del_from(struct list_head *h, struct list_node *n)
+static inline void ccan_list_del_from(struct ccan_list_head *h, struct ccan_list_node *n)
{
#ifdef CCAN_LIST_DEBUG
{
/* Thorough check: make sure it was in list! */
- struct list_node *i;
+ struct ccan_list_node *i;
for (i = h->n.next; i != n; i = i->next)
assert(i != &h->n);
}
#endif /* CCAN_LIST_DEBUG */
/* Quick test that catches a surprising number of bugs. */
- assert(!list_empty(h));
- list_del(n);
+ assert(!ccan_list_empty(h));
+ ccan_list_del(n);
}
/**
- * list_swap - swap out an entry from an (unknown) linked list for a new one.
- * @o: the list_node to replace from the list.
- * @n: the list_node to insert in place of the old one.
+ * ccan_list_swap - swap out an entry from an (unknown) linked list for a new one.
+ * @o: the ccan_list_node to replace from the list.
+ * @n: the ccan_list_node to insert in place of the old one.
*
* Note that this leaves @o in an undefined state; it can be added to
* another list, but not deleted/swapped again.
*
* See also:
- * list_del()
+ * ccan_list_del()
*
* Example:
* struct child x1, x2;
- * LIST_HEAD(xh);
+ * CCAN_LIST_HEAD(xh);
*
- * list_add(&xh, &x1.list);
- * list_swap(&x1.list, &x2.list);
+ * ccan_list_add(&xh, &x1.list);
+ * ccan_list_swap(&x1.list, &x2.list);
*/
-#define list_swap(o, n) list_swap_(o, n, LIST_LOC)
-static inline void list_swap_(struct list_node *o,
- struct list_node *n,
+#define ccan_list_swap(o, n) ccan_list_swap_(o, n, CCAN_LIST_LOC)
+static inline void ccan_list_swap_(struct ccan_list_node *o,
+ struct ccan_list_node *n,
const char* abortstr)
{
- (void)list_debug_node(o, abortstr);
+ (void)ccan_list_debug_node(o, abortstr);
*n = *o;
n->next->prev = n;
n->prev->next = n;
@@ -363,135 +363,135 @@ static inline void list_swap_(struct list_node *o,
}
/**
- * list_entry - convert a list_node back into the structure containing it.
- * @n: the list_node
+ * ccan_list_entry - convert a ccan_list_node back into the structure containing it.
+ * @n: the ccan_list_node
* @type: the type of the entry
- * @member: the list_node member of the type
+ * @member: the ccan_list_node member of the type
*
* Example:
* // First list entry is children.next; convert back to child.
- * child = list_entry(parent->children.n.next, struct child, list);
+ * child = ccan_list_entry(parent->children.n.next, struct child, list);
*
* See Also:
- * list_top(), list_for_each()
+ * ccan_list_top(), ccan_list_for_each()
*/
-#define list_entry(n, type, member) container_of(n, type, member)
+#define ccan_list_entry(n, type, member) ccan_container_of(n, type, member)
/**
- * list_top - get the first entry in a list
- * @h: the list_head
+ * ccan_list_top - get the first entry in a list
+ * @h: the ccan_list_head
* @type: the type of the entry
- * @member: the list_node member of the type
+ * @member: the ccan_list_node member of the type
*
* If the list is empty, returns NULL.
*
* Example:
* struct child *first;
- * first = list_top(&parent->children, struct child, list);
+ * first = ccan_list_top(&parent->children, struct child, list);
* if (!first)
* printf("Empty list!\n");
*/
-#define list_top(h, type, member) \
- ((type *)list_top_((h), list_off_(type, member)))
+#define ccan_list_top(h, type, member) \
+ ((type *)ccan_list_top_((h), ccan_list_off_(type, member)))
-static inline const void *list_top_(const struct list_head *h, size_t off)
+static inline const void *ccan_list_top_(const struct ccan_list_head *h, size_t off)
{
- if (list_empty(h))
+ if (ccan_list_empty(h))
return NULL;
return (const char *)h->n.next - off;
}
/**
- * list_pop - remove the first entry in a list
- * @h: the list_head
+ * ccan_list_pop - remove the first entry in a list
+ * @h: the ccan_list_head
* @type: the type of the entry
- * @member: the list_node member of the type
+ * @member: the ccan_list_node member of the type
*
* If the list is empty, returns NULL.
*
* Example:
* struct child *one;
- * one = list_pop(&parent->children, struct child, list);
+ * one = ccan_list_pop(&parent->children, struct child, list);
* if (!one)
* printf("Empty list!\n");
*/
-#define list_pop(h, type, member) \
- ((type *)list_pop_((h), list_off_(type, member)))
+#define ccan_list_pop(h, type, member) \
+ ((type *)ccan_list_pop_((h), ccan_list_off_(type, member)))
-static inline const void *list_pop_(const struct list_head *h, size_t off)
+static inline const void *ccan_list_pop_(const struct ccan_list_head *h, size_t off)
{
- struct list_node *n;
+ struct ccan_list_node *n;
- if (list_empty(h))
+ if (ccan_list_empty(h))
return NULL;
n = h->n.next;
- list_del(n);
+ ccan_list_del(n);
return (const char *)n - off;
}
/**
- * list_tail - get the last entry in a list
- * @h: the list_head
+ * ccan_list_tail - get the last entry in a list
+ * @h: the ccan_list_head
* @type: the type of the entry
- * @member: the list_node member of the type
+ * @member: the ccan_list_node member of the type
*
* If the list is empty, returns NULL.
*
* Example:
* struct child *last;
- * last = list_tail(&parent->children, struct child, list);
+ * last = ccan_list_tail(&parent->children, struct child, list);
* if (!last)
* printf("Empty list!\n");
*/
-#define list_tail(h, type, member) \
- ((type *)list_tail_((h), list_off_(type, member)))
+#define ccan_list_tail(h, type, member) \
+ ((type *)ccan_list_tail_((h), ccan_list_off_(type, member)))
-static inline const void *list_tail_(const struct list_head *h, size_t off)
+static inline const void *ccan_list_tail_(const struct ccan_list_head *h, size_t off)
{
- if (list_empty(h))
+ if (ccan_list_empty(h))
return NULL;
return (const char *)h->n.prev - off;
}
/**
- * list_for_each - iterate through a list.
- * @h: the list_head (warning: evaluated multiple times!)
- * @i: the structure containing the list_node
- * @member: the list_node member of the structure
+ * ccan_list_for_each - iterate through a list.
+ * @h: the ccan_list_head (warning: evaluated multiple times!)
+ * @i: the structure containing the ccan_list_node
+ * @member: the ccan_list_node member of the structure
*
* This is a convenient wrapper to iterate @i over the entire list. It's
* a for loop, so you can break and continue as normal.
*
* Example:
- * list_for_each(&parent->children, child, list)
+ * ccan_list_for_each(&parent->children, child, list)
* printf("Name: %s\n", child->name);
*/
-#define list_for_each(h, i, member) \
- list_for_each_off(h, i, list_off_var_(i, member))
+#define ccan_list_for_each(h, i, member) \
+ ccan_list_for_each_off(h, i, ccan_list_off_var_(i, member))
/**
- * list_for_each_rev - iterate through a list backwards.
- * @h: the list_head
- * @i: the structure containing the list_node
- * @member: the list_node member of the structure
+ * ccan_list_for_each_rev - iterate through a list backwards.
+ * @h: the ccan_list_head
+ * @i: the structure containing the ccan_list_node
+ * @member: the ccan_list_node member of the structure
*
* This is a convenient wrapper to iterate @i over the entire list. It's
* a for loop, so you can break and continue as normal.
*
* Example:
- * list_for_each_rev(&parent->children, child, list)
+ * ccan_list_for_each_rev(&parent->children, child, list)
* printf("Name: %s\n", child->name);
*/
-#define list_for_each_rev(h, i, member) \
- list_for_each_rev_off(h, i, list_off_var_(i, member))
+#define ccan_list_for_each_rev(h, i, member) \
+ ccan_list_for_each_rev_off(h, i, ccan_list_off_var_(i, member))
/**
- * list_for_each_rev_safe - iterate through a list backwards,
+ * ccan_list_for_each_rev_safe - iterate through a list backwards,
* maybe during deletion
- * @h: the list_head
- * @i: the structure containing the list_node
- * @nxt: the structure containing the list_node
- * @member: the list_node member of the structure
+ * @h: the ccan_list_head
+ * @i: the structure containing the ccan_list_node
+ * @nxt: the structure containing the ccan_list_node
+ * @member: the ccan_list_node member of the structure
*
* This is a convenient wrapper to iterate @i over the entire list backwards.
* It's a for loop, so you can break and continue as normal. The extra
@@ -500,74 +500,74 @@ static inline const void *list_tail_(const struct list_head *h, size_t off)
*
* Example:
* struct child *next;
- * list_for_each_rev_safe(&parent->children, child, next, list) {
+ * ccan_list_for_each_rev_safe(&parent->children, child, next, list) {
* printf("Name: %s\n", child->name);
* }
*/
-#define list_for_each_rev_safe(h, i, nxt, member) \
- list_for_each_rev_safe_off(h, i, nxt, list_off_var_(i, member))
+#define ccan_list_for_each_rev_safe(h, i, nxt, member) \
+ ccan_list_for_each_rev_safe_off(h, i, nxt, ccan_list_off_var_(i, member))
/**
- * list_for_each_safe - iterate through a list, maybe during deletion
- * @h: the list_head
- * @i: the structure containing the list_node
- * @nxt: the structure containing the list_node
- * @member: the list_node member of the structure
+ * ccan_list_for_each_safe - iterate through a list, maybe during deletion
+ * @h: the ccan_list_head
+ * @i: the structure containing the ccan_list_node
+ * @nxt: the structure containing the ccan_list_node
+ * @member: the ccan_list_node member of the structure
*
* This is a convenient wrapper to iterate @i over the entire list. It's
* a for loop, so you can break and continue as normal. The extra variable
* @nxt is used to hold the next element, so you can delete @i from the list.
*
* Example:
- * list_for_each_safe(&parent->children, child, next, list) {
- * list_del(&child->list);
+ * ccan_list_for_each_safe(&parent->children, child, next, list) {
+ * ccan_list_del(&child->list);
* parent->num_children--;
* }
*/
-#define list_for_each_safe(h, i, nxt, member) \
- list_for_each_safe_off(h, i, nxt, list_off_var_(i, member))
+#define ccan_list_for_each_safe(h, i, nxt, member) \
+ ccan_list_for_each_safe_off(h, i, nxt, ccan_list_off_var_(i, member))
/**
- * list_next - get the next entry in a list
- * @h: the list_head
+ * ccan_list_next - get the next entry in a list
+ * @h: the ccan_list_head
* @i: a pointer to an entry in the list.
- * @member: the list_node member of the structure
+ * @member: the ccan_list_node member of the structure
*
* If @i was the last entry in the list, returns NULL.
*
* Example:
* struct child *second;
- * second = list_next(&parent->children, first, list);
+ * second = ccan_list_next(&parent->children, first, list);
* if (!second)
* printf("No second child!\n");
*/
-#define list_next(h, i, member) \
- ((list_typeof(i))list_entry_or_null(list_debug(h, \
- __FILE__ ":" stringify(__LINE__)), \
+#define ccan_list_next(h, i, member) \
+ ((ccan_list_typeof(i))ccan_list_entry_or_null(ccan_list_debug(h, \
+ __FILE__ ":" ccan_stringify(__LINE__)), \
(i)->member.next, \
- list_off_var_((i), member)))
+ ccan_list_off_var_((i), member)))
/**
- * list_prev - get the previous entry in a list
- * @h: the list_head
+ * ccan_list_prev - get the previous entry in a list
+ * @h: the ccan_list_head
* @i: a pointer to an entry in the list.
- * @member: the list_node member of the structure
+ * @member: the ccan_list_node member of the structure
*
* If @i was the first entry in the list, returns NULL.
*
* Example:
- * first = list_prev(&parent->children, second, list);
+ * first = ccan_list_prev(&parent->children, second, list);
* if (!first)
* printf("Can't go back to first child?!\n");
*/
-#define list_prev(h, i, member) \
- ((list_typeof(i))list_entry_or_null(list_debug(h, \
- __FILE__ ":" stringify(__LINE__)), \
+#define ccan_list_prev(h, i, member) \
+ ((ccan_list_typeof(i))ccan_list_entry_or_null(ccan_list_debug(h, \
+ __FILE__ ":" ccan_stringify(__LINE__)), \
(i)->member.prev, \
- list_off_var_((i), member)))
+ ccan_list_off_var_((i), member)))
/**
- * list_append_list - empty one list onto the end of another.
+ * ccan_list_append_list - empty one list onto the end of another.
* @to: the list to append into
* @from: the list to empty.
*
@@ -575,20 +575,20 @@ static inline const void *list_tail_(const struct list_head *h, size_t off)
* @to. After this @from will be empty.
*
* Example:
- * struct list_head adopter;
+ * struct ccan_list_head adopter;
*
- * list_append_list(&adopter, &parent->children);
- * assert(list_empty(&parent->children));
+ * ccan_list_append_list(&adopter, &parent->children);
+ * assert(ccan_list_empty(&parent->children));
* parent->num_children = 0;
*/
-#define list_append_list(t, f) list_append_list_(t, f, \
- __FILE__ ":" stringify(__LINE__))
-static inline void list_append_list_(struct list_head *to,
- struct list_head *from,
+#define ccan_list_append_list(t, f) ccan_list_append_list_(t, f, \
+ __FILE__ ":" ccan_stringify(__LINE__))
+static inline void ccan_list_append_list_(struct ccan_list_head *to,
+ struct ccan_list_head *from,
const char *abortstr)
{
- struct list_node *from_tail = list_debug(from, abortstr)->n.prev;
- struct list_node *to_tail = list_debug(to, abortstr)->n.prev;
+ struct ccan_list_node *from_tail = ccan_list_debug(from, abortstr)->n.prev;
+ struct ccan_list_node *to_tail = ccan_list_debug(to, abortstr)->n.prev;
/* Sew in head and entire list. */
to->n.prev = from_tail;
@@ -597,12 +597,12 @@ static inline void list_append_list_(struct list_head *to,
from->n.prev = to_tail;
/* Now remove head. */
- list_del(&from->n);
- list_head_init(from);
+ ccan_list_del(&from->n);
+ ccan_list_head_init(from);
}
/**
- * list_prepend_list - empty one list into the start of another.
+ * ccan_list_prepend_list - empty one list into the start of another.
* @to: the list to prepend into
* @from: the list to empty.
*
@@ -610,17 +610,17 @@ static inline void list_append_list_(struct list_head *to,
* of @to. After this @from will be empty.
*
* Example:
- * list_prepend_list(&adopter, &parent->children);
- * assert(list_empty(&parent->children));
+ * ccan_list_prepend_list(&adopter, &parent->children);
+ * assert(ccan_list_empty(&parent->children));
* parent->num_children = 0;
*/
-#define list_prepend_list(t, f) list_prepend_list_(t, f, LIST_LOC)
-static inline void list_prepend_list_(struct list_head *to,
- struct list_head *from,
+#define ccan_list_prepend_list(t, f) ccan_list_prepend_list_(t, f, CCAN_LIST_LOC)
+static inline void ccan_list_prepend_list_(struct ccan_list_head *to,
+ struct ccan_list_head *from,
const char *abortstr)
{
- struct list_node *from_tail = list_debug(from, abortstr)->n.prev;
- struct list_node *to_head = list_debug(to, abortstr)->n.next;
+ struct ccan_list_node *from_tail = ccan_list_debug(from, abortstr)->n.prev;
+ struct ccan_list_node *to_head = ccan_list_debug(to, abortstr)->n.next;
/* Sew in head and entire list. */
to->n.next = &from->n;
@@ -629,31 +629,31 @@ static inline void list_prepend_list_(struct list_head *to,
from_tail->next = to_head;
/* Now remove head. */
- list_del(&from->n);
- list_head_init(from);
+ ccan_list_del(&from->n);
+ ccan_list_head_init(from);
}
/* internal macros, do not use directly */
-#define list_for_each_off_dir_(h, i, off, dir) \
- for (i = list_node_to_off_(list_debug(h, LIST_LOC)->n.dir, \
+#define ccan_list_for_each_off_dir_(h, i, off, dir) \
+ for (i = ccan_list_node_to_off_(ccan_list_debug(h, CCAN_LIST_LOC)->n.dir, \
(off)); \
- list_node_from_off_((void *)i, (off)) != &(h)->n; \
- i = list_node_to_off_(list_node_from_off_((void *)i, (off))->dir, \
+ ccan_list_node_from_off_((void *)i, (off)) != &(h)->n; \
+ i = ccan_list_node_to_off_(ccan_list_node_from_off_((void *)i, (off))->dir, \
(off)))
-#define list_for_each_safe_off_dir_(h, i, nxt, off, dir) \
- for (i = list_node_to_off_(list_debug(h, LIST_LOC)->n.dir, \
+#define ccan_list_for_each_safe_off_dir_(h, i, nxt, off, dir) \
+ for (i = ccan_list_node_to_off_(ccan_list_debug(h, CCAN_LIST_LOC)->n.dir, \
(off)), \
- nxt = list_node_to_off_(list_node_from_off_(i, (off))->dir, \
+ nxt = ccan_list_node_to_off_(ccan_list_node_from_off_(i, (off))->dir, \
(off)); \
- list_node_from_off_(i, (off)) != &(h)->n; \
+ ccan_list_node_from_off_(i, (off)) != &(h)->n; \
i = nxt, \
- nxt = list_node_to_off_(list_node_from_off_(i, (off))->dir, \
+ nxt = ccan_list_node_to_off_(ccan_list_node_from_off_(i, (off))->dir, \
(off)))
/**
- * list_for_each_off - iterate through a list of memory regions.
- * @h: the list_head
+ * ccan_list_for_each_off - iterate through a list of memory regions.
+ * @h: the ccan_list_head
* @i: the pointer to a memory region which contains list node data.
* @off: offset(relative to @i) at which list node data resides.
*
@@ -664,125 +664,126 @@ static inline void list_prepend_list_(struct list_head *to,
* WARNING! Being the low-level macro that it is, this wrapper doesn't know
* nor care about the type of @i. The only assumption made is that @i points
* to a chunk of memory that at some @offset, relative to @i, contains a
- * properly filled `struct list_node' which in turn contains pointers to
+ * properly filled `struct ccan_list_node' which in turn contains pointers to
* memory chunks and it's turtles all the way down. With all that in mind
* remember that given the wrong pointer/offset couple this macro will
* happily churn all you memory until SEGFAULT stops it, in other words
* caveat emptor.
*
* It is worth mentioning that one of legitimate use-cases for that wrapper
- * is operation on opaque types with known offset for `struct list_node'
+ * is operation on opaque types with known offset for `struct ccan_list_node'
* member(preferably 0), because it allows you not to disclose the type of
* @i.
*
* Example:
- * list_for_each_off(&parent->children, child,
+ * ccan_list_for_each_off(&parent->children, child,
* offsetof(struct child, list))
* printf("Name: %s\n", child->name);
*/
-#define list_for_each_off(h, i, off) \
- list_for_each_off_dir_((h),(i),(off),next)
+#define ccan_list_for_each_off(h, i, off) \
+ ccan_list_for_each_off_dir_((h),(i),(off),next)
/**
- * list_for_each_rev_off - iterate through a list of memory regions backwards
- * @h: the list_head
+ * ccan_list_for_each_rev_off - iterate through a list of memory regions backwards
+ * @h: the ccan_list_head
* @i: the pointer to a memory region which contains list node data.
* @off: offset(relative to @i) at which list node data resides.
*
- * See list_for_each_off for details
+ * See ccan_list_for_each_off for details
*/
-#define list_for_each_rev_off(h, i, off) \
- list_for_each_off_dir_((h),(i),(off),prev)
+#define ccan_list_for_each_rev_off(h, i, off) \
+ ccan_list_for_each_off_dir_((h),(i),(off),prev)
/**
- * list_for_each_safe_off - iterate through a list of memory regions, maybe
+ * ccan_list_for_each_safe_off - iterate through a list of memory regions, maybe
* during deletion
- * @h: the list_head
+ * @h: the ccan_list_head
* @i: the pointer to a memory region which contains list node data.
- * @nxt: the structure containing the list_node
+ * @nxt: the structure containing the ccan_list_node
* @off: offset(relative to @i) at which list node data resides.
*
- * For details see `list_for_each_off' and `list_for_each_safe'
+ * For details see `ccan_list_for_each_off' and `ccan_list_for_each_safe'
* descriptions.
*
* Example:
- * list_for_each_safe_off(&parent->children, child,
+ * ccan_list_for_each_safe_off(&parent->children, child,
* next, offsetof(struct child, list))
* printf("Name: %s\n", child->name);
*/
-#define list_for_each_safe_off(h, i, nxt, off) \
- list_for_each_safe_off_dir_((h),(i),(nxt),(off),next)
+#define ccan_list_for_each_safe_off(h, i, nxt, off) \
+ ccan_list_for_each_safe_off_dir_((h),(i),(nxt),(off),next)
/**
- * list_for_each_rev_safe_off - iterate backwards through a list of
+ * ccan_list_for_each_rev_safe_off - iterate backwards through a list of
* memory regions, maybe during deletion
- * @h: the list_head
+ * @h: the ccan_list_head
* @i: the pointer to a memory region which contains list node data.
- * @nxt: the structure containing the list_node
+ * @nxt: the structure containing the ccan_list_node
* @off: offset(relative to @i) at which list node data resides.
*
- * For details see `list_for_each_rev_off' and `list_for_each_rev_safe'
+ * For details see `ccan_list_for_each_rev_off' and `ccan_list_for_each_rev_safe'
* descriptions.
*
* Example:
- * list_for_each_rev_safe_off(&parent->children, child,
+ * ccan_list_for_each_rev_safe_off(&parent->children, child,
* next, offsetof(struct child, list))
* printf("Name: %s\n", child->name);
*/
-#define list_for_each_rev_safe_off(h, i, nxt, off) \
- list_for_each_safe_off_dir_((h),(i),(nxt),(off),prev)
+#define ccan_list_for_each_rev_safe_off(h, i, nxt, off) \
+ ccan_list_for_each_safe_off_dir_((h),(i),(nxt),(off),prev)
/* Other -off variants. */
-#define list_entry_off(n, type, off) \
- ((type *)list_node_from_off_((n), (off)))
+#define ccan_list_entry_off(n, type, off) \
+ ((type *)ccan_list_node_from_off_((n), (off)))
-#define list_head_off(h, type, off) \
- ((type *)list_head_off((h), (off)))
+#define ccan_list_head_off(h, type, off) \
+ ((type *)ccan_list_head_off((h), (off)))
-#define list_tail_off(h, type, off) \
- ((type *)list_tail_((h), (off)))
+#define ccan_list_tail_off(h, type, off) \
+ ((type *)ccan_list_tail_((h), (off)))
-#define list_add_off(h, n, off) \
- list_add((h), list_node_from_off_((n), (off)))
+#define ccan_list_add_off(h, n, off) \
+ ccan_list_add((h), ccan_list_node_from_off_((n), (off)))
-#define list_del_off(n, off) \
- list_del(list_node_from_off_((n), (off)))
+#define ccan_list_del_off(n, off) \
+ ccan_list_del(ccan_list_node_from_off_((n), (off)))
-#define list_del_from_off(h, n, off) \
- list_del_from(h, list_node_from_off_((n), (off)))
+#define ccan_list_del_from_off(h, n, off) \
+ ccan_list_del_from(h, ccan_list_node_from_off_((n), (off)))
/* Offset helper functions so we only single-evaluate. */
-static inline void *list_node_to_off_(struct list_node *node, size_t off)
+static inline void *ccan_list_node_to_off_(struct ccan_list_node *node, size_t off)
{
return (void *)((char *)node - off);
}
-static inline struct list_node *list_node_from_off_(void *ptr, size_t off)
+static inline struct ccan_list_node *ccan_list_node_from_off_(void *ptr, size_t off)
{
- return (struct list_node *)((char *)ptr + off);
+ return (struct ccan_list_node *)((char *)ptr + off);
}
-/* Get the offset of the member, but make sure it's a list_node. */
-#define list_off_(type, member) \
- (container_off(type, member) + \
- check_type(((type *)0)->member, struct list_node))
+/* Get the offset of the member, but make sure it's a ccan_list_node. */
+#define ccan_list_off_(type, member) \
+ (ccan_container_off(type, member) + \
+ ccan_check_type(((type *)0)->member, struct ccan_list_node))
-#define list_off_var_(var, member) \
- (container_off_var(var, member) + \
- check_type(var->member, struct list_node))
+#define ccan_list_off_var_(var, member) \
+ (ccan_container_off_var(var, member) + \
+ ccan_check_type(var->member, struct ccan_list_node))
#if HAVE_TYPEOF
-#define list_typeof(var) typeof(var)
+#define ccan_list_typeof(var) typeof(var)
#else
-#define list_typeof(var) void *
+#define ccan_list_typeof(var) void *
#endif
/* Returns member, or NULL if at end of list. */
-static inline void *list_entry_or_null(const struct list_head *h,
- const struct list_node *n,
+static inline void *ccan_list_entry_or_null(const struct ccan_list_head *h,
+ const struct ccan_list_node *n,
size_t off)
{
if (n == &h->n)
return NULL;
return (char *)n - off;
}
+
#endif /* CCAN_LIST_H */
diff --git a/ccan/str/str.h b/ccan/str/str.h
index 9a9da9cd3f..6d4cf62423 100644
--- a/ccan/str/str.h
+++ b/ccan/str/str.h
@@ -2,15 +2,16 @@
#ifndef CCAN_STR_H
#define CCAN_STR_H
/**
- * stringify - Turn expression into a string literal
+ * ccan_stringify - Turn expression into a string literal
* @expr: any C expression
*
* Example:
* #define PRINT_COND_IF_FALSE(cond) \
- * ((cond) || printf("%s is false!", stringify(cond)))
+ * ((cond) || printf("%s is false!", ccan_stringify(cond)))
*/
-#define stringify(expr) stringify_1(expr)
+#define stringify(expr) ccan_stringify_1(expr)
+#define ccan_stringify(expr) ccan_stringify_1(expr)
/* Double-indirection required to stringify expansions */
-#define stringify_1(expr) #expr
+#define ccan_stringify_1(expr) #expr
#endif /* CCAN_STR_H */
diff --git a/gc.c b/gc.c
index b11e39c2e0..98c8f4c370 100644
--- a/gc.c
+++ b/gc.c
@@ -673,7 +673,7 @@ typedef struct mark_stack {
typedef struct rb_heap_struct {
struct heap_page *free_pages;
- struct list_head pages;
+ struct ccan_list_head pages;
struct heap_page *sweeping_page; /* iterator for .pages */
struct heap_page *compact_cursor;
uintptr_t compact_cursor_index;
@@ -918,7 +918,7 @@ struct heap_page {
struct heap_page *free_next;
uintptr_t start;
RVALUE *freelist;
- struct list_node page_node;
+ struct ccan_list_node page_node;
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
/* the following three bitmaps are cleared at the beginning of full GC */
@@ -1423,7 +1423,7 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
struct heap_page *page = NULL;
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_size_pool_t *size_pool = &size_pools[i];
- list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
+ ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
if (page->start <= (uintptr_t)obj &&
(uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
@@ -1768,8 +1768,8 @@ rb_objspace_alloc(void)
size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
- list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
- list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
+ ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
+ ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
}
dont_gc_on();
@@ -1941,7 +1941,7 @@ heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *pa
static void
heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
{
- list_del(&page->page_node);
+ ccan_list_del(&page->page_node);
heap->total_pages--;
heap->total_slots -= page->total_slots;
}
@@ -1964,7 +1964,7 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
bool has_pages_in_tomb_heap = FALSE;
for (i = 0; i < SIZE_POOL_COUNT; i++) {
- if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
+ if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
has_pages_in_tomb_heap = TRUE;
break;
}
@@ -2102,7 +2102,7 @@ heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
{
struct heap_page *page = 0, *next;
- list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
+ ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
if (page->freelist != NULL) {
heap_unlink_page(objspace, &size_pool->tomb_heap, page);
@@ -2142,7 +2142,7 @@ heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
/* Adding to eden heap during incremental sweeping is forbidden */
GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
- list_add_tail(&heap->pages, &page->page_node);
+ ccan_list_add_tail(&heap->pages, &page->page_node);
heap->total_pages++;
heap->total_slots += page->total_slots;
}
@@ -3610,7 +3610,7 @@ objspace_each_objects_try(VALUE arg)
* an infinite loop. */
struct heap_page *page = 0;
size_t pages_count = 0;
- list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
+ ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
pages[pages_count] = page;
pages_count++;
}
@@ -3624,7 +3624,7 @@ objspace_each_objects_try(VALUE arg)
size_t pages_count = data->pages_counts[i];
struct heap_page **pages = data->pages[i];
- struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
+ struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
for (size_t i = 0; i < pages_count; i++) {
/* If we have reached the end of the linked list then there are no
* more pages, so break. */
@@ -3641,7 +3641,7 @@ objspace_each_objects_try(VALUE arg)
break;
}
- page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
+ page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
}
}
@@ -5028,7 +5028,7 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
struct heap_page * next;
- next = list_prev(&heap->pages, cursor, page_node);
+ next = ccan_list_prev(&heap->pages, cursor, page_node);
/* Protect the current cursor since it probably has T_MOVED slots. */
lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
@@ -5055,7 +5055,7 @@ gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
while (cursor) {
unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
- cursor = list_next(&heap->pages, cursor, page_node);
+ cursor = ccan_list_next(&heap->pages, cursor, page_node);
}
}
@@ -5610,7 +5610,7 @@ heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
static void
gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
{
- heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
+ heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
heap->free_pages = NULL;
#if GC_ENABLE_INCREMENTAL_MARK
heap->pooled_pages = NULL;
@@ -5636,7 +5636,7 @@ gc_sweep_start(rb_objspace_t *objspace)
}
rb_ractor_t *r = NULL;
- list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
}
}
@@ -5763,7 +5763,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
gc_sweep_page(objspace, size_pool, heap, &ctx);
int free_slots = ctx.freed_slots + ctx.empty_slots;
- heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
+ heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
heap_pages_freeable_pages > 0 &&
@@ -5936,11 +5936,11 @@ gc_compact_start(rb_objspace_t *objspace)
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
- list_for_each(&heap->pages, page, page_node) {
+ ccan_list_for_each(&heap->pages, page, page_node) {
page->flags.before_sweep = TRUE;
}
- heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
+ heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
heap->compact_cursor_index = 0;
}
@@ -5986,7 +5986,7 @@ gc_sweep(rb_objspace_t *objspace)
}
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
- list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
+ ccan_list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
page->flags.before_sweep = TRUE;
}
}
@@ -7767,12 +7767,12 @@ gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
}
static int
-gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
+gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
{
int remembered_old_objects = 0;
struct heap_page *page = 0;
- list_for_each(head, page, page_node) {
+ ccan_list_for_each(head, page, page_node) {
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
RVALUE *p = page->freelist;
while (p) {
@@ -8008,7 +8008,7 @@ gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
{
struct heap_page *page = 0;
- list_for_each(&heap->pages, page, page_node) {
+ ccan_list_for_each(&heap->pages, page, page_node) {
bits_t *mark_bits = page->mark_bits;
bits_t *wbun_bits = page->wb_unprotected_bits;
uintptr_t p = page->start;
@@ -8425,7 +8425,7 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
#endif
gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
- list_for_each(&heap->pages, page, page_node) {
+ ccan_list_for_each(&heap->pages, page, page_node) {
if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
uintptr_t p = page->start;
bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
@@ -8472,7 +8472,7 @@ rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
{
struct heap_page *page = 0;
- list_for_each(&heap->pages, page, page_node) {
+ ccan_list_for_each(&heap->pages, page, page_node) {
memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
@@ -9586,7 +9586,7 @@ gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
struct heap_page *page = 0, **page_list = malloc(size);
size_t i = 0;
- list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
+ ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
page_list[i++] = page;
GC_ASSERT(page);
}
@@ -9598,10 +9598,10 @@ gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
/* Reset the eden heap */
- list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
+ ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
for (i = 0; i < total_pages; i++) {
- list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
+ ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
if (page_list[i]->free_slots != 0) {
heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
}
@@ -10276,7 +10276,7 @@ gc_update_references(rb_objspace_t *objspace)
rb_size_pool_t *size_pool = &size_pools[i];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
- list_for_each(&heap->pages, page, page_node) {
+ ccan_list_for_each(&heap->pages, page, page_node) {
uintptr_t start = (uintptr_t)page->start;
uintptr_t end = start + (page->total_slots * size_pool->slot_size);
diff --git a/io.c b/io.c
index b5582db5af..a2e15e7ba4 100644
--- a/io.c
+++ b/io.c
@@ -5191,7 +5191,7 @@ static void clear_codeconv(rb_io_t *fptr);
static void
fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
- struct list_head *busy)
+ struct ccan_list_head *busy)
{
VALUE err = Qnil;
int fd = fptr->fd;
@@ -5233,7 +5233,7 @@ fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
// Ensure waiting_fd users do not hit EBADF.
if (busy) {
// Wait for them to exit before we call close().
- do rb_thread_schedule(); while (!list_empty(busy));
+ do rb_thread_schedule(); while (!ccan_list_empty(busy));
}
// Disable for now.
@@ -5378,16 +5378,16 @@ rb_io_memsize(const rb_io_t *fptr)
# define KEEPGVL FALSE
#endif
-int rb_notify_fd_close(int fd, struct list_head *);
+int rb_notify_fd_close(int fd, struct ccan_list_head *);
static rb_io_t *
io_close_fptr(VALUE io)
{
rb_io_t *fptr;
VALUE write_io;
rb_io_t *write_fptr;
- struct list_head busy;
+ struct ccan_list_head busy;
- list_head_init(&busy);
+ ccan_list_head_init(&busy);
write_io = GetWriteIO(io);
if (io != write_io) {
write_fptr = RFILE(write_io)->fptr;
diff --git a/mjit.c b/mjit.c
index 2870b2b304..414088e9e4 100644
--- a/mjit.c
+++ b/mjit.c
@@ -117,7 +117,7 @@ mjit_update_references(const rb_iseq_t *iseq)
// `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too.
// TODO: we should be able to reduce the number of units checked here.
struct rb_mjit_unit *unit = NULL;
- list_for_each(&stale_units.head, unit, unode) {
+ ccan_list_for_each(&stale_units.head, unit, unode) {
if (unit->iseq == iseq) {
unit->iseq = (rb_iseq_t *)rb_gc_location((VALUE)unit->iseq);
}
@@ -145,7 +145,7 @@ mjit_free_iseq(const rb_iseq_t *iseq)
// `ISEQ_BODY(iseq)->jit_unit` anymore (because new one replaces that). So we need to check them too.
// TODO: we should be able to reduce the number of units checked here.
struct rb_mjit_unit *unit = NULL;
- list_for_each(&stale_units.head, unit, unode) {
+ ccan_list_for_each(&stale_units.head, unit, unode) {
if (unit->iseq == iseq) {
unit->iseq = NULL;
}
@@ -161,8 +161,8 @@ free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
{
struct rb_mjit_unit *unit = 0, *next;
- list_for_each_safe(&list->head, unit, next, unode) {
- list_del(&unit->unode);
+ ccan_list_for_each_safe(&list->head, unit, next, unode) {
+ ccan_list_del(&unit->unode);
if (!close_handle_p) unit->handle = NULL; /* Skip dlclose in free_unit() */
if (list == &stale_units) { // `free_unit(unit)` crashes after GC.compact on `stale_units`
@@ -886,7 +886,7 @@ skip_cleaning_object_files(struct rb_mjit_unit_list *list)
struct rb_mjit_unit *unit = NULL, *next;
// No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork.
- list_for_each_safe(&list->head, unit, next, unode) {
+ ccan_list_for_each_safe(&list->head, unit, next, unode) {
#if defined(_WIN32) // mswin doesn't reach here either. This is for MinGW.
if (unit->so_file) unit->so_file = NULL;
#endif
@@ -930,7 +930,7 @@ mjit_dump_total_calls(void)
{
struct rb_mjit_unit *unit;
fprintf(stderr, "[MJIT_COUNTER] total_calls of active_units:\n");
- list_for_each(&active_units.head, unit, unode) {
+ ccan_list_for_each(&active_units.head, unit, unode) {
const rb_iseq_t *iseq = unit->iseq;
fprintf(stderr, "%8ld: %s@%s:%d\n", ISEQ_BODY(iseq)->total_calls, RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
@@ -1036,7 +1036,7 @@ mjit_mark(void)
i++;
}
}
- list_for_each(&active_units.head, unit, unode) {
+ ccan_list_for_each(&active_units.head, unit, unode) {
iseqs[i] = unit->iseq;
i++;
}
diff --git a/mjit_worker.c b/mjit_worker.c
index 986ce93cca..6261ddc317 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -152,7 +152,7 @@ typedef intptr_t pid_t;
// The unit structure that holds metadata of ISeq for MJIT.
struct rb_mjit_unit {
- struct list_node unode;
+ struct ccan_list_node unode;
// Unique order number of unit.
int id;
// Dlopen handle of the loaded object file.
@@ -175,7 +175,7 @@ struct rb_mjit_unit {
// Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list {
- struct list_head head;
+ struct ccan_list_head head;
int length; // the list length
};
@@ -206,13 +206,13 @@ bool mjit_call_p = false;
// Priority queue of iseqs waiting for JIT compilation.
// This variable is a pointer to head unit of the queue.
-static struct rb_mjit_unit_list unit_queue = { LIST_HEAD_INIT(unit_queue.head) };
+static struct rb_mjit_unit_list unit_queue = { CCAN_LIST_HEAD_INIT(unit_queue.head) };
// List of units which are successfully compiled.
-static struct rb_mjit_unit_list active_units = { LIST_HEAD_INIT(active_units.head) };
+static struct rb_mjit_unit_list active_units = { CCAN_LIST_HEAD_INIT(active_units.head) };
// List of compacted so files which will be cleaned up by `free_list()` in `mjit_finish()`.
-static struct rb_mjit_unit_list compact_units = { LIST_HEAD_INIT(compact_units.head) };
+static struct rb_mjit_unit_list compact_units = { CCAN_LIST_HEAD_INIT(compact_units.head) };
// List of units before recompilation and just waiting for dlclose().
-static struct rb_mjit_unit_list stale_units = { LIST_HEAD_INIT(stale_units.head) };
+static struct rb_mjit_unit_list stale_units = { CCAN_LIST_HEAD_INIT(stale_units.head) };
// The number of so far processed ISEQs, used to generate unique id.
static int current_unit_num;
// A mutex for conitionals and critical sections.
@@ -370,7 +370,7 @@ add_to_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
(void)RB_DEBUG_COUNTER_INC_IF(mjit_length_compact_units, list == &compact_units);
(void)RB_DEBUG_COUNTER_INC_IF(mjit_length_stale_units, list == &stale_units);
- list_add_tail(&list->head, &unit->unode);
+ ccan_list_add_tail(&list->head, &unit->unode);
list->length++;
}
@@ -384,7 +384,7 @@ remove_from_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
rb_debug_counter_add(RB_DEBUG_COUNTER_mjit_length_stale_units, -1, list == &stale_units);
#endif
- list_del(&unit->unode);
+ ccan_list_del(&unit->unode);
list->length--;
}
@@ -503,7 +503,7 @@ get_from_list(struct rb_mjit_unit_list *list)
// Find iseq with max total_calls
struct rb_mjit_unit *unit = NULL, *next, *best = NULL;
- list_for_each_safe(&list->head, unit, next, unode) {
+ ccan_list_for_each_safe(&list->head, unit, next, unode) {
if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, list);
free_unit(unit);
@@ -977,7 +977,7 @@ compile_compact_jit_code(char* c_file)
// We need to check again here because we could've waited on GC above
bool iseq_gced = false;
struct rb_mjit_unit *child_unit = 0, *next;
- list_for_each_safe(&active_units.head, child_unit, next, unode) {
+ ccan_list_for_each_safe(&active_units.head, child_unit, next, unode) {
if (child_unit->iseq == NULL) { // ISeq is GC-ed
iseq_gced = true;
verbose(1, "JIT compaction: A method for JIT code u%d is obsoleted. Compaction will be skipped.", child_unit->id);
@@ -1002,7 +1002,7 @@ compile_compact_jit_code(char* c_file)
// TODO: Consider using a more granular lock after we implement inlining across
// compacted functions (not done yet).
bool success = true;
- list_for_each(&active_units.head, child_unit, unode) {
+ ccan_list_for_each(&active_units.head, child_unit, unode) {
CRITICAL_SECTION_START(3, "before set_compiling_iseqs");
success &= set_compiling_iseqs(child_unit->iseq);
CRITICAL_SECTION_FINISH(3, "after set_compiling_iseqs");
@@ -1080,7 +1080,7 @@ compact_all_jit_code(void)
remove_so_file(so_file, unit);
CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list");
- list_for_each(&active_units.head, cur, unode) {
+ ccan_list_for_each(&active_units.head, cur, unode) {
void *func;
char funcname[MAXPATHLEN];
sprint_funcname(funcname, cur);
@@ -1347,7 +1347,7 @@ unload_units(void)
// For now, we don't unload units when ISeq is GCed. We should
// unload such ISeqs first here.
- list_for_each_safe(&active_units.head, unit, next, unode) {
+ ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, &active_units);
free_unit(unit);
@@ -1355,7 +1355,7 @@ unload_units(void)
}
// Detect units which are in use and can't be unloaded.
- list_for_each(&active_units.head, unit, unode) {
+ ccan_list_for_each(&active_units.head, unit, unode) {
assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = false;
}
@@ -1372,7 +1372,7 @@ unload_units(void)
while (true) {
// Calculate the next max total_calls in unit_queue
long unsigned max_queue_calls = 0;
- list_for_each(&unit_queue.head, unit, unode) {
+ ccan_list_for_each(&unit_queue.head, unit, unode) {
if (unit->iseq != NULL && max_queue_calls < ISEQ_BODY(unit->iseq)->total_calls
&& ISEQ_BODY(unit->iseq)->total_calls < prev_queue_calls) {
max_queue_calls = ISEQ_BODY(unit->iseq)->total_calls;
@@ -1381,7 +1381,7 @@ unload_units(void)
prev_queue_calls = max_queue_calls;
bool unloaded_p = false;
- list_for_each_safe(&active_units.head, unit, next, unode) {
+ ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->used_code_p) // We can't unload code on stack.
continue;
@@ -1441,7 +1441,7 @@ mjit_worker(void)
// Wait until a unit becomes available
CRITICAL_SECTION_START(3, "in worker dequeue");
- while ((list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
+ while ((ccan_list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
verbose(3, "Getting wakeup from client");
@@ -1449,7 +1449,7 @@ mjit_worker(void)
if (pending_stale_p) {
pending_stale_p = false;
struct rb_mjit_unit *next;
- list_for_each_safe(&active_units.head, unit, next, unode) {
+ ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->stale_p) {
unit->stale_p = false;
remove_from_list(unit, &active_units);
diff --git a/process.c b/process.c
index 2282fc92dc..da663f9765 100644
--- a/process.c
+++ b/process.c
@@ -1076,7 +1076,7 @@ do_waitpid(rb_pid_t pid, int *st, int flags)
#define WAITPID_LOCK_ONLY ((struct waitpid_state *)-1)
struct waitpid_state {
- struct list_node wnode;
+ struct ccan_list_node wnode;
rb_execution_context_t *ec;
rb_nativethread_cond_t *cond;
rb_pid_t ret;
@@ -1110,12 +1110,12 @@ waitpid_signal(struct waitpid_state *w)
// Used for VM memsize reporting. Returns the size of a list of waitpid_state
// structs. Defined here because the struct definition lives here as well.
size_t
-rb_vm_memsize_waiting_list(struct list_head *waiting_list)
+rb_vm_memsize_waiting_list(struct ccan_list_head *waiting_list)
{
struct waitpid_state *waitpid = 0;
size_t size = 0;
- list_for_each(waiting_list, waitpid, wnode) {
+ ccan_list_for_each(waiting_list, waitpid, wnode) {
size += sizeof(struct waitpid_state);
}
@@ -1132,10 +1132,10 @@ sigwait_fd_migrate_sleeper(rb_vm_t *vm)
{
struct waitpid_state *w = 0;
- list_for_each(&vm->waiting_pids, w, wnode) {
+ ccan_list_for_each(&vm->waiting_pids, w, wnode) {
if (waitpid_signal(w)) return;
}
- list_for_each(&vm->waiting_grps, w, wnode) {
+ ccan_list_for_each(&vm->waiting_grps, w, wnode) {
if (waitpid_signal(w)) return;
}
}
@@ -1152,18 +1152,18 @@ rb_sigwait_fd_migrate(rb_vm_t *vm)
extern volatile unsigned int ruby_nocldwait; /* signal.c */
/* called by timer thread or thread which acquired sigwait_fd */
static void
-waitpid_each(struct list_head *head)
+waitpid_each(struct ccan_list_head *head)
{
struct waitpid_state *w = 0, *next;
- list_for_each_safe(head, w, next, wnode) {
+ ccan_list_for_each_safe(head, w, next, wnode) {
rb_pid_t ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
if (!ret) continue;
if (ret == -1) w->errnum = errno;
w->ret = ret;
- list_del_init(&w->wnode);
+ ccan_list_del_init(&w->wnode);
waitpid_signal(w);
}
}
@@ -1177,11 +1177,11 @@ ruby_waitpid_all(rb_vm_t *vm)
#if RUBY_SIGCHLD
rb_native_mutex_lock(&vm->waitpid_lock);
waitpid_each(&vm->waiting_pids);
- if (list_empty(&vm->waiting_pids)) {
+ if (ccan_list_empty(&vm->waiting_pids)) {
waitpid_each(&vm->waiting_grps);
}
/* emulate SA_NOCLDWAIT */
- if (list_empty(&vm->waiting_pids) && list_empty(&vm->waiting_grps)) {
+ if (ccan_list_empty(&vm->waiting_pids) && ccan_list_empty(&vm->waiting_grps)) {
while (ruby_nocldwait && do_waitpid(-1, 0, WNOHANG) > 0)
; /* keep looping */
}
@@ -1222,7 +1222,7 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
assert(!ruby_thread_has_gvl_p() && "must not have GVL");
waitpid_state_init(&w, pid, options);
- if (w.pid > 0 || list_empty(&vm->waiting_pids))
+ if (w.pid > 0 || ccan_list_empty(&vm->waiting_pids))
w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
if (w.ret) {
if (w.ret == -1) w.errnum = errno;
@@ -1231,7 +1231,7 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
int sigwait_fd = -1;
w.ec = 0;
- list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
+ ccan_list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
do {
if (sigwait_fd < 0)
sigwait_fd = rb_sigwait_fd_get(0);
@@ -1247,7 +1247,7 @@ ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
rb_native_cond_wait(w.cond, &vm->waitpid_lock);
}
} while (!w.ret);
- list_del(&w.wnode);
+ ccan_list_del(&w.wnode);
/* we're done, maybe other waitpid callers are not: */
if (sigwait_fd >= 0) {
@@ -1280,14 +1280,14 @@ waitpid_cleanup(VALUE x)
struct waitpid_state *w = (struct waitpid_state *)x;
/*
- * XXX w->ret is sometimes set but list_del is still needed, here,
- * Not sure why, so we unconditionally do list_del here:
+ * XXX w->ret is sometimes set but ccan_list_del is still needed, here,
+ * Not sure why, so we unconditionally do ccan_list_del here:
*/
if (TRUE || w->ret == 0) {
rb_vm_t *vm = rb_ec_vm_ptr(w->ec);
rb_native_mutex_lock(&vm->waitpid_lock);
- list_del(&w->wnode);
+ ccan_list_del(&w->wnode);
rb_native_mutex_unlock(&vm->waitpid_lock);
}
@@ -1307,7 +1307,7 @@ waitpid_wait(struct waitpid_state *w)
*/
rb_native_mutex_lock(&vm->waitpid_lock);
- if (w->pid > 0 || list_empty(&vm->waiting_pids)) {
+ if (w->pid > 0 || ccan_list_empty(&vm->waiting_pids)) {
w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
}
@@ -1323,7 +1323,7 @@ waitpid_wait(struct waitpid_state *w)
if (need_sleep) {
w->cond = 0;
/* order matters, favor specified PIDs rather than -1 or 0 */
- list_add(w->pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w->wnode);
+ ccan_list_add(w->pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w->wnode);
}
rb_native_mutex_unlock(&vm->waitpid_lock);
@@ -4229,7 +4229,7 @@ retry_fork_async_signal_safe(struct rb_process_status *status, int *ep,
if (waitpid_lock) {
if (pid > 0 && w != WAITPID_LOCK_ONLY) {
w->pid = pid;
- list_add(&GET_VM()->waiting_pids, &w->wnode);
+ ccan_list_add(&GET_VM()->waiting_pids, &w->wnode);
}
rb_native_mutex_unlock(waitpid_lock);
}
diff --git a/ractor.c b/ractor.c
index ccdfd32710..f010574028 100644
--- a/ractor.c
+++ b/ractor.c
@@ -202,7 +202,7 @@ ractor_mark(void *ptr)
if (r->threads.cnt > 0) {
rb_thread_t *th = 0;
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
VM_ASSERT(th != NULL);
rb_gc_mark(th->self);
}
@@ -1414,7 +1414,7 @@ vm_insert_ractor0(rb_vm_t *vm, rb_ractor_t *r, bool single_ractor_mode)
RUBY_DEBUG_LOG("r:%u ractor.cnt:%u++", r->pub.id, vm->ractor.cnt);
VM_ASSERT(single_ractor_mode || RB_VM_LOCKED_P());
- list_add_tail(&vm->ractor.set, &r->vmlr_node);
+ ccan_list_add_tail(&vm->ractor.set, &r->vmlr_node);
vm->ractor.cnt++;
}
@@ -1483,7 +1483,7 @@ vm_remove_ractor(rb_vm_t *vm, rb_ractor_t *cr)
vm->ractor.cnt, vm->ractor.sync.terminate_waiting);
VM_ASSERT(vm->ractor.cnt > 0);
- list_del(&cr->vmlr_node);
+ ccan_list_del(&cr->vmlr_node);
if (vm->ractor.cnt <= 2 && vm->ractor.sync.terminate_waiting) {
rb_native_cond_signal(&vm->ractor.sync.terminate_cond);
@@ -1550,7 +1550,7 @@ void rb_gvl_init(rb_global_vm_lock_t *gvl);
void
rb_ractor_living_threads_init(rb_ractor_t *r)
{
- list_head_init(&r->threads.set);
+ ccan_list_head_init(&r->threads.set);
r->threads.cnt = 0;
r->threads.blocking_cnt = 0;
}
@@ -1741,7 +1741,7 @@ rb_ractor_thread_list(rb_ractor_t *r)
ts = ALLOCA_N(VALUE, r->threads.cnt);
ts_cnt = 0;
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
switch (th->status) {
case THREAD_RUNNABLE:
case THREAD_STOPPED:
@@ -1770,7 +1770,7 @@ rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th)
RACTOR_LOCK(r);
{
RUBY_DEBUG_LOG("r(%d)->threads.cnt:%d++", r->pub.id, r->threads.cnt);
- list_add_tail(&r->threads.set, &th->lt_node);
+ ccan_list_add_tail(&r->threads.set, &th->lt_node);
r->threads.cnt++;
}
RACTOR_UNLOCK(r);
@@ -1853,7 +1853,7 @@ rb_ractor_living_threads_remove(rb_ractor_t *cr, rb_thread_t *th)
else {
RACTOR_LOCK(cr);
{
- list_del(&th->lt_node);
+ ccan_list_del(&th->lt_node);
cr->threads.cnt--;
}
RACTOR_UNLOCK(cr);
@@ -1940,7 +1940,7 @@ ractor_terminal_interrupt_all(rb_vm_t *vm)
if (vm->ractor.cnt > 1) {
// send terminate notification to all ractors
rb_ractor_t *r = 0;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != vm->ractor.main_ractor) {
rb_ractor_terminate_interrupt_main_thread(r);
}
@@ -2119,7 +2119,7 @@ rb_ractor_dump(void)
rb_vm_t *vm = GET_VM();
rb_ractor_t *r = 0;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != vm->ractor.main_ractor) {
fprintf(stderr, "r:%u (%s)\n", r->pub.id, ractor_status_str(r->status_));
}
diff --git a/ractor_core.h b/ractor_core.h
index 7673607a00..a3bc90febf 100644
--- a/ractor_core.h
+++ b/ractor_core.h
@@ -91,7 +91,7 @@ struct rb_ractor_struct {
// thread management
struct {
- struct list_head set;
+ struct ccan_list_head set;
unsigned int cnt;
unsigned int blocking_cnt;
unsigned int sleeper;
@@ -126,7 +126,7 @@ struct rb_ractor_struct {
ractor_terminated,
} status_;
- struct list_node vmlr_node;
+ struct ccan_list_node vmlr_node;
// ractor local data
diff --git a/thread.c b/thread.c
index 26d9bb4b14..5df4d68b45 100644
--- a/thread.c
+++ b/thread.c
@@ -150,7 +150,7 @@ void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
static volatile int system_working = 1;
struct waiting_fd {
- struct list_node wfd_node; /* <=> vm.waiting_fds */
+ struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
rb_thread_t *th;
int fd;
};
@@ -500,7 +500,7 @@ terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
{
rb_thread_t *th = 0;
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
if (th != main_thread) {
thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
thread_id_str(th), thread_status_name(th, TRUE));
@@ -1799,7 +1799,7 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
RB_VM_LOCK_ENTER();
{
- list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
+ ccan_list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
}
RB_VM_LOCK_LEAVE();
@@ -1814,11 +1814,11 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
/*
* must be deleted before jump
- * this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
+ * this will delete either from waiting_fds or on-stack CCAN_LIST_HEAD(busy)
*/
RB_VM_LOCK_ENTER();
{
- list_del(&waiting_fd.wfd_node);
+ ccan_list_del(&waiting_fd.wfd_node);
}
RB_VM_LOCK_LEAVE();
@@ -2574,20 +2574,20 @@ rb_ec_reset_raised(rb_execution_context_t *ec)
}
int
-rb_notify_fd_close(int fd, struct list_head *busy)
+rb_notify_fd_close(int fd, struct ccan_list_head *busy)
{
rb_vm_t *vm = GET_THREAD()->vm;
struct waiting_fd *wfd = 0, *next;
RB_VM_LOCK_ENTER();
{
- list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
+ ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
if (wfd->fd == fd) {
rb_thread_t *th = wfd->th;
VALUE err;
- list_del(&wfd->wfd_node);
- list_add(busy, &wfd->wfd_node);
+ ccan_list_del(&wfd->wfd_node);
+ ccan_list_add(busy, &wfd->wfd_node);
err = th->vm->special_exceptions[ruby_error_stream_closed];
rb_threadptr_pending_interrupt_enque(th, err);
@@ -2597,17 +2597,17 @@ rb_notify_fd_close(int fd, struct list_head *busy)
}
RB_VM_LOCK_LEAVE();
- return !list_empty(busy);
+ return !ccan_list_empty(busy);
}
void
rb_thread_fd_close(int fd)
{
- struct list_head busy;
+ struct ccan_list_head busy;
- list_head_init(&busy);
+ ccan_list_head_init(&busy);
if (rb_notify_fd_close(fd, &busy)) {
- do rb_thread_schedule(); while (!list_empty(&busy));
+ do rb_thread_schedule(); while (!ccan_list_empty(&busy));
}
}
@@ -4353,7 +4353,7 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
RB_VM_LOCK_ENTER();
{
- list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
+ ccan_list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
}
RB_VM_LOCK_LEAVE();
@@ -4404,7 +4404,7 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
RB_VM_LOCK_ENTER();
{
- list_del(&wfd.wfd_node);
+ ccan_list_del(&wfd.wfd_node);
}
RB_VM_LOCK_LEAVE();
@@ -4480,7 +4480,7 @@ select_single_cleanup(VALUE ptr)
{
struct select_args *args = (struct select_args *)ptr;
- list_del(&args->wfd.wfd_node);
+ ccan_list_del(&args->wfd.wfd_node);
if (args->read) rb_fd_term(args->read);
if (args->write) rb_fd_term(args->write);
if (args->except) rb_fd_term(args->except);
@@ -4506,7 +4506,7 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
RB_VM_LOCK_ENTER();
{
- list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
+ ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
}
RB_VM_LOCK_LEAVE();
@@ -4702,8 +4702,8 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
ubf_list_atfork();
// OK. Only this thread accesses:
- list_for_each(&vm->ractor.set, r, vmlr_node) {
- list_for_each(&r->threads.set, i, lt_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&r->threads.set, i, lt_node) {
atfork(i, th);
}
}
@@ -4843,7 +4843,7 @@ thgroup_list(VALUE group)
rb_thread_t *th = 0;
rb_ractor_t *r = GET_RACTOR();
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
if (th->thgroup == group) {
rb_ary_push(ary, th->self);
}
@@ -5513,7 +5513,7 @@ debug_deadlock_check(rb_ractor_t *r, VALUE msg)
rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
(void *)GET_THREAD(), (void *)r->threads.main);
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
"native:%"PRI_THREAD_ID" int:%u",
th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
@@ -5551,13 +5551,13 @@ rb_check_deadlock(rb_ractor_t *r)
if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
if (patrol_thread && patrol_thread != GET_THREAD()) return;
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
found = 1;
}
else if (th->locking_mutex) {
rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
- if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !list_empty(&mutex->waitq))) {
+ if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
found = 1;
}
}
@@ -5578,12 +5578,12 @@ rb_check_deadlock(rb_ractor_t *r)
// Used for VM memsize reporting. Returns the size of a list of waiting_fd
// structs. Defined here because the struct definition lives here as well.
size_t
-rb_vm_memsize_waiting_fds(struct list_head *waiting_fds)
+rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
{
struct waiting_fd *waitfd = 0;
size_t size = 0;
- list_for_each(waiting_fds, waitfd, wfd_node) {
+ ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
size += sizeof(struct waiting_fd);
}
diff --git a/thread_pthread.c b/thread_pthread.c
index 55289de73a..60853cd0a3 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -228,7 +228,7 @@ designate_timer_thread(rb_global_vm_lock_t *gvl)
{
native_thread_data_t *last;
- last = list_tail(&gvl->waitq, native_thread_data_t, node.ubf);
+ last = ccan_list_tail(&gvl->waitq, native_thread_data_t, node.ubf);
if (last) {
rb_native_cond_signal(&last->cond.gvlq);
return TRUE;
@@ -289,7 +289,7 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
VM_ASSERT(th->unblock.func == 0 &&
"we must not be in ubf_list and GVL waitq at the same time");
- list_add_tail(&gvl->waitq, &nd->node.gvl);
+ ccan_list_add_tail(&gvl->waitq, &nd->node.gvl);
do {
if (!gvl->timer) {
@@ -300,7 +300,7 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
}
} while (gvl->owner);
- list_del_init(&nd->node.gvl);
+ ccan_list_del_init(&nd->node.gvl);
if (gvl->need_yield) {
gvl->need_yield = 0;
@@ -331,7 +331,7 @@ gvl_release_common(rb_global_vm_lock_t *gvl)
{
native_thread_data_t *next;
gvl->owner = 0;
- next = list_top(&gvl->waitq, native_thread_data_t, node.ubf);
+ next = ccan_list_top(&gvl->waitq, native_thread_data_t, node.ubf);
if (next) rb_native_cond_signal(&next->cond.gvlq);
return next;
@@ -388,7 +388,7 @@ rb_gvl_init(rb_global_vm_lock_t *gvl)
rb_native_mutex_initialize(&gvl->lock);
rb_native_cond_initialize(&gvl->switch_cond);
rb_native_cond_initialize(&gvl->switch_wait_cond);
- list_head_init(&gvl->waitq);
+ ccan_list_head_init(&gvl->waitq);
gvl->owner = 0;
gvl->timer = 0;
gvl->timer_err = ETIMEDOUT;
@@ -690,7 +690,7 @@ native_thread_init(rb_thread_t *th)
th->tid = get_native_thread_id();
#endif
#ifdef USE_UBF_LIST
- list_node_init(&nd->node.ubf);
+ ccan_list_node_init(&nd->node.ubf);
#endif
rb_native_cond_initialize(&nd->cond.gvlq);
if (&nd->cond.gvlq != &nd->cond.intr)
@@ -1072,19 +1072,19 @@ struct cached_thread_entry {
rb_nativethread_id_t thread_id;
rb_thread_t *th;
void *altstack;
- struct list_node node;
+ struct ccan_list_node node;
};
#if USE_THREAD_CACHE
static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
-static LIST_HEAD(cached_thread_head);
+static CCAN_LIST_HEAD(cached_thread_head);
# if defined(HAVE_WORKING_FORK)
static void
thread_cache_reset(void)
{
rb_native_mutex_initialize(&thread_cache_lock);
- list_head_init(&cached_thread_head);
+ ccan_list_head_init(&cached_thread_head);
}
# endif
@@ -1111,12 +1111,12 @@ register_cached_thread_and_wait(void *altstack)
rb_native_mutex_lock(&thread_cache_lock);
{
- list_add(&cached_thread_head, &entry.node);
+ ccan_list_add(&cached_thread_head, &entry.node);
native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
if (entry.th == NULL) { /* unused */
- list_del(&entry.node);
+ ccan_list_del(&entry.node);
}
}
rb_native_mutex_unlock(&thread_cache_lock);
@@ -1141,7 +1141,7 @@ use_cached_thread(rb_thread_t *th)
struct cached_thread_entry *entry;
rb_native_mutex_lock(&thread_cache_lock);
- entry = list_pop(&cached_thread_head, struct cached_thread_entry, node);
+ entry = ccan_list_pop(&cached_thread_head, struct cached_thread_entry, node);
if (entry) {
entry->th = th;
/* th->thread_id must be set before signal for Thread#name= */
@@ -1162,7 +1162,7 @@ clear_thread_cache_altstack(void)
struct cached_thread_entry *entry;
rb_native_mutex_lock(&thread_cache_lock);
- list_for_each(&cached_thread_head, entry, node) {
+ ccan_list_for_each(&cached_thread_head, entry, node) {
void MAYBE_UNUSED(*altstack) = entry->altstack;
entry->altstack = 0;
RB_ALTSTACK_FREE(altstack);
@@ -1305,13 +1305,13 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
}
#ifdef USE_UBF_LIST
-static LIST_HEAD(ubf_list_head);
+static CCAN_LIST_HEAD(ubf_list_head);
static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
static void
ubf_list_atfork(void)
{
- list_head_init(&ubf_list_head);
+ ccan_list_head_init(&ubf_list_head);
rb_native_mutex_initialize(&ubf_list_lock);
}
@@ -1319,11 +1319,11 @@ ubf_list_atfork(void)
static void
register_ubf_list(rb_thread_t *th)
{
- struct list_node *node = &th->native_thread_data.node.ubf;
+ struct ccan_list_node *node = &th->native_thread_data.node.ubf;
- if (list_empty((struct list_head*)node)) {
+ if (ccan_list_empty((struct ccan_list_head*)node)) {
rb_native_mutex_lock(&ubf_list_lock);
- list_add(&ubf_list_head, node);
+ ccan_list_add(&ubf_list_head, node);
rb_native_mutex_unlock(&ubf_list_lock);
}
}
@@ -1332,15 +1332,15 @@ register_ubf_list(rb_thread_t *th)
static void
unregister_ubf_list(rb_thread_t *th)
{
- struct list_node *node = &th->native_thread_data.node.ubf;
+ struct ccan_list_node *node = &th->native_thread_data.node.ubf;
/* we can't allow re-entry into ubf_list_head */
VM_ASSERT(th->unblock.func == 0);
- if (!list_empty((struct list_head*)node)) {
+ if (!ccan_list_empty((struct ccan_list_head*)node)) {
rb_native_mutex_lock(&ubf_list_lock);
- list_del_init(node);
- if (list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
+ ccan_list_del_init(node);
+ if (ccan_list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
ubf_timer_disarm();
}
rb_native_mutex_unlock(&ubf_list_lock);
@@ -1397,7 +1397,7 @@ ubf_select(void *ptr)
static int
ubf_threads_empty(void)
{
- return list_empty(&ubf_list_head);
+ return ccan_list_empty(&ubf_list_head);
}
static void
@@ -1408,8 +1408,8 @@ ubf_wakeup_all_threads(void)
if (!ubf_threads_empty()) {
rb_native_mutex_lock(&ubf_list_lock);
- list_for_each(&ubf_list_head, dat, node.ubf) {
- th = container_of(dat, rb_thread_t, native_thread_data);
+ ccan_list_for_each(&ubf_list_head, dat, node.ubf) {
+ th = ccan_container_of(dat, rb_thread_t, native_thread_data);
ubf_wakeup_thread(th);
}
rb_native_mutex_unlock(&ubf_list_lock);
diff --git a/thread_pthread.h b/thread_pthread.h
index 2ac354046c..38a006627a 100644
--- a/thread_pthread.h
+++ b/thread_pthread.h
@@ -19,8 +19,8 @@
typedef struct native_thread_data_struct {
union {
- struct list_node ubf;
- struct list_node gvl;
+ struct ccan_list_node ubf;
+ struct ccan_list_node gvl;
} node;
#if defined(__GLIBC__) || defined(__FreeBSD__)
union
@@ -58,7 +58,7 @@ typedef struct rb_global_vm_lock_struct {
* switching between contended/uncontended GVL won't reset the
* timer.
*/
- struct list_head waitq; /* <=> native_thread_data_t.node.ubf */
+ struct ccan_list_head waitq; /* <=> native_thread_data_t.node.ubf */
const struct rb_thread_struct *timer;
int timer_err;
diff --git a/thread_sync.c b/thread_sync.c
index eaf2c025b9..9b466e6670 100644
--- a/thread_sync.c
+++ b/thread_sync.c
@@ -8,7 +8,7 @@ static VALUE rb_eClosedQueueError;
typedef struct rb_mutex_struct {
rb_fiber_t *fiber;
struct rb_mutex_struct *next_mutex;
- struct list_head waitq; /* protected by GVL */
+ struct ccan_list_head waitq; /* protected by GVL */
} rb_mutex_t;
/* sync_waiter is always on-stack */
@@ -16,18 +16,18 @@ struct sync_waiter {
VALUE self;
rb_thread_t *th;
rb_fiber_t *fiber;
- struct list_node node;
+ struct ccan_list_node node;
};
#define MUTEX_ALLOW_TRAP FL_USER1
static void
-sync_wakeup(struct list_head *head, long max)
+sync_wakeup(struct ccan_list_head *head, long max)
{
struct sync_waiter *cur = 0, *next;
- list_for_each_safe(head, cur, next, node) {
- list_del_init(&cur->node);
+ ccan_list_for_each_safe(head, cur, next, node) {
+ ccan_list_del_init(&cur->node);
if (cur->th->status != THREAD_KILLED) {
@@ -45,13 +45,13 @@ sync_wakeup(struct list_head *head, long max)
}
static void
-wakeup_one(struct list_head *head)
+wakeup_one(struct ccan_list_head *head)
{
sync_wakeup(head, 1);
}
static void
-wakeup_all(struct list_head *head)
+wakeup_all(struct ccan_list_head *head)
{
sync_wakeup(head, LONG_MAX);
}
@@ -95,7 +95,7 @@ rb_mutex_num_waiting(rb_mutex_t *mutex)
struct sync_waiter *w = 0;
size_t n = 0;
- list_for_each(&mutex->waitq, w, node) {
+ ccan_list_for_each(&mutex->waitq, w, node) {
n++;
}
@@ -152,7 +152,7 @@ mutex_alloc(VALUE klass)
obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
- list_head_init(&mutex->waitq);
+ ccan_list_head_init(&mutex->waitq);
return obj;
}
@@ -269,7 +269,7 @@ static VALUE
delete_from_waitq(VALUE value)
{
struct sync_waiter *sync_waiter = (void *)value;
- list_del(&sync_waiter->node);
+ ccan_list_del(&sync_waiter->node);
return Qnil;
}
@@ -302,7 +302,7 @@ do_mutex_lock(VALUE self, int interruptible_p)
.fiber = fiber
};
- list_add_tail(&mutex->waitq, &sync_waiter.node);
+ ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
@@ -335,11 +335,11 @@ do_mutex_lock(VALUE self, int interruptible_p)
.fiber = fiber
};
- list_add_tail(&mutex->waitq, &sync_waiter.node);
+ ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
native_sleep(th, timeout); /* release GVL */
- list_del(&sync_waiter.node);
+ ccan_list_del(&sync_waiter.node);
if (!mutex->fiber) {
mutex->fiber = fiber;
@@ -427,8 +427,8 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
struct sync_waiter *cur = 0, *next;
mutex->fiber = 0;
- list_for_each_safe(&mutex->waitq, cur, next, node) {
- list_del_init(&cur->node);
+ ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
+ ccan_list_del_init(&cur->node);
if (cur->th->scheduler != Qnil && rb_fiberptr_blocking(cur->fiber) == 0) {
rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
@@ -491,7 +491,7 @@ rb_mutex_abandon_locking_mutex(rb_thread_t *th)
if (th->locking_mutex) {
rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
- list_head_init(&mutex->waitq);
+ ccan_list_head_init(&mutex->waitq);
th->locking_mutex = Qfalse;
}
}
@@ -506,7 +506,7 @@ rb_mutex_abandon_all(rb_mutex_t *mutexes)
mutexes = mutex->next_mutex;
mutex->fiber = 0;
mutex->next_mutex = 0;
- list_head_init(&mutex->waitq);
+ ccan_list_head_init(&mutex->waitq);
}
}
#endif
@@ -631,7 +631,7 @@ void rb_mutex_allow_trap(VALUE self, int val)
#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
PACKED_STRUCT_UNALIGNED(struct rb_queue {
- struct list_head waitq;
+ struct ccan_list_head waitq;
rb_serial_t fork_gen;
const VALUE que;
int num_waiting;
@@ -642,7 +642,7 @@ PACKED_STRUCT_UNALIGNED(struct rb_queue {
PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
struct rb_queue q;
int num_waiting_push;
- struct list_head pushq;
+ struct ccan_list_head pushq;
long max;
});
@@ -674,7 +674,7 @@ queue_alloc(VALUE klass)
struct rb_queue *q;
obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
- list_head_init(queue_waitq(q));
+ ccan_list_head_init(queue_waitq(q));
return obj;
}
@@ -688,7 +688,7 @@ queue_fork_check(struct rb_queue *q)
}
/* forked children can't reach into parent thread stacks */
q->fork_gen = fork_gen;
- list_head_init(queue_waitq(q));
+ ccan_list_head_init(queue_waitq(q));
q->num_waiting = 0;
return 1;
}
@@ -732,8 +732,8 @@ szqueue_alloc(VALUE klass)
struct rb_szqueue *sq;
VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
&szqueue_data_type, sq);
- list_head_init(szqueue_waitq(sq));
- list_head_init(szqueue_pushq(sq));
+ ccan_list_head_init(szqueue_waitq(sq));
+ ccan_list_head_init(szqueue_pushq(sq));
return obj;
}
@@ -744,7 +744,7 @@ szqueue_ptr(VALUE obj)
TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
if (queue_fork_check(&sq->q)) {
- list_head_init(szqueue_pushq(sq));
+ ccan_list_head_init(szqueue_pushq(sq));
sq->num_waiting_push = 0;
}
@@ -869,7 +869,7 @@ rb_queue_initialize(int argc, VALUE *argv, VALUE self)
initial = rb_to_array(initial);
}
RB_OBJ_WRITE(self, &q->que, ary_buf_new());
- list_head_init(queue_waitq(q));
+ ccan_list_head_init(queue_waitq(q));
if (argc == 1) {
rb_ary_concat(q->que, initial);
}
@@ -983,7 +983,7 @@ queue_sleep_done(VALUE p)
{
struct queue_waiter *qw = (struct queue_waiter *)p;
- list_del(&qw->w.node);
+ ccan_list_del(&qw->w.node);
qw->as.q->num_waiting--;
return Qfalse;
@@ -994,7 +994,7 @@ szqueue_sleep_done(VALUE p)
{
struct queue_waiter *qw = (struct queue_waiter *)p;
- list_del(&qw->w.node);
+ ccan_list_del(&qw->w.node);
qw->as.sq->num_waiting_push--;
return Qfalse;
@@ -1023,9 +1023,9 @@ queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
.as = {.q = q}
};
- struct list_head *waitq = queue_waitq(q);
+ struct ccan_list_head *waitq = queue_waitq(q);
- list_add_tail(waitq, &queue_waiter.w.node);
+ ccan_list_add_tail(waitq, &queue_waiter.w.node);
queue_waiter.as.q->num_waiting++;
rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&queue_waiter);
@@ -1152,8 +1152,8 @@ rb_szqueue_initialize(VALUE self, VALUE vmax)
}
RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
- list_head_init(szqueue_waitq(sq));
- list_head_init(szqueue_pushq(sq));
+ ccan_list_head_init(szqueue_waitq(sq));
+ ccan_list_head_init(szqueue_pushq(sq));
sq->max = max;
return self;
@@ -1266,9 +1266,9 @@ rb_szqueue_push(int argc, VALUE *argv, VALUE self)
.as = {.sq = sq}
};
- struct list_head *pushq = szqueue_pushq(sq);
+ struct ccan_list_head *pushq = szqueue_pushq(sq);
- list_add_tail(pushq, &queue_waiter.w.node);
+ ccan_list_add_tail(pushq, &queue_waiter.w.node);
sq->num_waiting_push++;
rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&queue_waiter);
@@ -1381,7 +1381,7 @@ rb_szqueue_empty_p(VALUE self)
/* ConditionalVariable */
struct rb_condvar {
- struct list_head waitq;
+ struct ccan_list_head waitq;
rb_serial_t fork_gen;
};
@@ -1436,7 +1436,7 @@ condvar_ptr(VALUE self)
/* forked children can't reach into parent thread stacks */
if (cv->fork_gen != fork_gen) {
cv->fork_gen = fork_gen;
- list_head_init(&cv->waitq);
+ ccan_list_head_init(&cv->waitq);
}
return cv;
@@ -1449,7 +1449,7 @@ condvar_alloc(VALUE klass)
VALUE obj;
obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
- list_head_init(&cv->waitq);
+ ccan_list_head_init(&cv->waitq);
return obj;
}
@@ -1464,7 +1464,7 @@ static VALUE
rb_condvar_initialize(VALUE self)
{
struct rb_condvar *cv = condvar_ptr(self);
- list_head_init(&cv->waitq);
+ ccan_list_head_init(&cv->waitq);
return self;
}
@@ -1510,7 +1510,7 @@ rb_condvar_wait(int argc, VALUE *argv, VALUE self)
.fiber = ec->fiber_ptr
};
- list_add_tail(&cv->waitq, &sync_waiter.node);
+ ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
}
diff --git a/variable.c b/variable.c
index 8cb507628c..1dd867cc0c 100644
--- a/variable.c
+++ b/variable.c
@@ -2104,7 +2104,7 @@ autoload_data(VALUE mod, ID id)
}
struct autoload_const {
- struct list_node cnode; /* <=> autoload_data_i.constants */
+ struct ccan_list_node cnode; /* <=> autoload_data_i.constants */
VALUE mod;
VALUE ad; /* autoload_data_i */
VALUE value;
@@ -2119,14 +2119,14 @@ struct autoload_state {
struct autoload_const *ac;
VALUE result;
VALUE thread;
- struct list_head waitq;
+ struct ccan_list_head waitq;
};
struct autoload_data_i {
VALUE feature;
struct autoload_state *state; /* points to on-stack struct */
rb_serial_t fork_gen;
- struct list_head constants; /* <=> autoload_const.cnode */
+ struct ccan_list_head constants; /* <=> autoload_const.cnode */
};
static void
@@ -2144,7 +2144,7 @@ autoload_i_mark(void *ptr)
rb_gc_mark_movable(p->feature);
/* allow GC to free us if no modules refer to this via autoload_const.ad */
- if (list_empty(&p->constants)) {
+ if (ccan_list_empty(&p->constants)) {
rb_hash_delete(autoload_featuremap, p->feature);
}
}
@@ -2155,7 +2155,7 @@ autoload_i_free(void *ptr)
struct autoload_data_i *p = ptr;
/* we may leak some memory at VM shutdown time, no big deal */
- if (list_empty(&p->constants)) {
+ if (ccan_list_empty(&p->constants)) {
xfree(p);
}
}
@@ -2198,7 +2198,7 @@ static void
autoload_c_free(void *ptr)
{
struct autoload_const *ac = ptr;
- list_del(&ac->cnode);
+ ccan_list_del(&ac->cnode);
xfree(ac);
}
@@ -2288,7 +2288,7 @@ rb_autoload_str(VALUE mod, ID id, VALUE file)
&autoload_data_i_type, ele);
ele->feature = file;
ele->state = 0;
- list_head_init(&ele->constants);
+ ccan_list_head_init(&ele->constants);
rb_hash_aset(autoload_featuremap, file, ad);
}
else {
@@ -2304,7 +2304,7 @@ rb_autoload_str(VALUE mod, ID id, VALUE file)
ac->value = Qundef;
ac->flag = CONST_PUBLIC;
ac->ad = ad;
- list_add_tail(&ele->constants, &ac->cnode);
+ ccan_list_add_tail(&ele->constants, &ac->cnode);
st_insert(tbl, (st_data_t)id, (st_data_t)acv);
}
}
@@ -2325,7 +2325,7 @@ autoload_delete(VALUE mod, ID id)
ele = get_autoload_data((VALUE)load, &ac);
VM_ASSERT(ele);
if (ele) {
- VM_ASSERT(!list_empty(&ele->constants));
+ VM_ASSERT(!ccan_list_empty(&ele->constants));
}
/*
@@ -2333,7 +2333,7 @@ autoload_delete(VALUE mod, ID id)
* with parallel autoload. Using list_del_init here so list_del
* works in autoload_c_free
*/
- list_del_init(&ac->cnode);
+ ccan_list_del_init(&ac->cnode);
if (tbl->num_entries == 0) {
n = autoload;
@@ -2480,7 +2480,7 @@ autoload_reset(VALUE arg)
if (RTEST(state->result)) {
struct autoload_const *next;
- list_for_each_safe(&ele->constants, ac, next, cnode) {
+ ccan_list_for_each_safe(&ele->constants, ac, next, cnode) {
if (ac->value != Qundef) {
autoload_const_set(ac);
}
@@ -2491,11 +2491,11 @@ autoload_reset(VALUE arg)
if (need_wakeups) {
struct autoload_state *cur = 0, *nxt;
- list_for_each_safe(&state->waitq, cur, nxt, waitq.n) {
+ ccan_list_for_each_safe(&state->waitq, cur, nxt, waitq.n) {
VALUE th = cur->thread;
cur->thread = Qfalse;
- list_del_init(&cur->waitq.n); /* idempotent */
+ ccan_list_del_init(&cur->waitq.n); /* idempotent */
/*
* cur is stored on the stack of cur->waiting_th,
@@ -2530,7 +2530,7 @@ autoload_sleep_done(VALUE arg)
struct autoload_state *state = (struct autoload_state *)arg;
if (state->thread != Qfalse && rb_thread_to_be_killed(state->thread)) {
- list_del(&state->waitq.n); /* idempotent after list_del_init */
+ ccan_list_del(&state->waitq.n); /* idempotent after list_del_init */
}
return Qfalse;
@@ -2575,13 +2575,13 @@ rb_autoload_load(VALUE mod, ID id)
* autoload_reset will wake up any threads added to this
* if and only if the GVL is released during autoload_require
*/
- list_head_init(&state.waitq);
+ ccan_list_head_init(&state.waitq);
}
else if (state.thread == ele->state->thread) {
return Qfalse;
}
else {
- list_add_tail(&ele->state->waitq, &state.waitq.n);
+ ccan_list_add_tail(&ele->state->waitq, &state.waitq.n);
rb_ensure(autoload_sleep, (VALUE)&state,
autoload_sleep_done, (VALUE)&state);
diff --git a/vm.c b/vm.c
index abeef4a635..8bfebcccef 100644
--- a/vm.c
+++ b/vm.c
@@ -2634,12 +2634,12 @@ rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
if (ptr) {
rb_vm_t *vm = ptr;
rb_ractor_t *r = 0;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running));
if (r->threads.cnt > 0) {
rb_thread_t *th = 0;
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
VM_ASSERT(th != NULL);
rb_execution_context_t * ec = th->ec;
if (ec->vm_stack) {
@@ -2676,7 +2676,7 @@ rb_vm_mark(void *ptr)
long i, len;
const VALUE *obj_ary;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
// ractor.set only contains blocking or running ractors
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running));
@@ -2808,10 +2808,10 @@ ruby_vm_destruct(rb_vm_t *vm)
return 0;
}
-size_t rb_vm_memsize_waiting_list(struct list_head *waiting_list); // process.c
-size_t rb_vm_memsize_waiting_fds(struct list_head *waiting_fds); // thread.c
+size_t rb_vm_memsize_waiting_list(struct ccan_list_head *waiting_list); // process.c
+size_t rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds); // thread.c
size_t rb_vm_memsize_postponed_job_buffer(void); // vm_trace.c
-size_t rb_vm_memsize_workqueue(struct list_head *workqueue); // vm_trace.c
+size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
// Used for VM memsize reporting. Returns the size of the at_exit list by
// looping through the linked list and adding up the size of the structs.
@@ -2862,7 +2862,7 @@ vm_memsize(const void *ptr)
);
// TODO
- // struct { struct list_head set; } ractor;
+ // struct { struct ccan_list_head set; } ractor;
// void *main_altstack; #ifdef USE_SIGALTSTACK
// struct rb_objspace *objspace;
}
diff --git a/vm_core.h b/vm_core.h
index d985bd40ba..56013cf492 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -628,7 +628,7 @@ typedef struct rb_vm_struct {
VALUE self;
struct {
- struct list_head set;
+ struct ccan_list_head set;
unsigned int cnt;
unsigned int blocking_cnt;
@@ -658,9 +658,9 @@ typedef struct rb_vm_struct {
rb_serial_t fork_gen;
rb_nativethread_lock_t waitpid_lock;
- struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
- struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
- struct list_head waiting_fds; /* <=> struct waiting_fd */
+ struct ccan_list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
+ struct ccan_list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
+ struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
/* set in single-threaded processes only: */
volatile int ubf_async_safe;
@@ -701,7 +701,7 @@ typedef struct rb_vm_struct {
int src_encoding_index;
/* workqueue (thread-safe, NOT async-signal-safe) */
- struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
+ struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
rb_nativethread_lock_t workqueue_lock;
VALUE orig_progname, progname;
@@ -998,7 +998,7 @@ typedef struct rb_ractor_struct rb_ractor_t;
#endif
typedef struct rb_thread_struct {
- struct list_node lt_node; // managed by a ractor
+ struct ccan_list_node lt_node; // managed by a ractor
VALUE self;
rb_ractor_t *ractor;
rb_vm_t *vm;
@@ -1769,11 +1769,11 @@ void rb_thread_wakeup_timer_thread(int);
static inline void
rb_vm_living_threads_init(rb_vm_t *vm)
{
- list_head_init(&vm->waiting_fds);
- list_head_init(&vm->waiting_pids);
- list_head_init(&vm->workqueue);
- list_head_init(&vm->waiting_grps);
- list_head_init(&vm->ractor.set);
+ ccan_list_head_init(&vm->waiting_fds);
+ ccan_list_head_init(&vm->waiting_pids);
+ ccan_list_head_init(&vm->workqueue);
+ ccan_list_head_init(&vm->waiting_grps);
+ ccan_list_head_init(&vm->ractor.set);
}
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
diff --git a/vm_dump.c b/vm_dump.c
index 48f37808a5..d7223e9ef9 100644
--- a/vm_dump.c
+++ b/vm_dump.c
@@ -1188,7 +1188,7 @@ rb_vmdebug_stack_dump_all_threads(void)
rb_ractor_t *r = GET_RACTOR();
// TODO: now it only shows current ractor
- list_for_each(&r->threads.set, th, lt_node) {
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
#ifdef NON_SCALAR_THREAD_ID
rb_thread_id_string_t buf;
ruby_fill_thread_id_string(th->thread_id, buf);
diff --git a/vm_sync.c b/vm_sync.c
index 038e87f53b..610bdb7b10 100644
--- a/vm_sync.c
+++ b/vm_sync.c
@@ -254,7 +254,7 @@ rb_vm_barrier(void)
// send signal
rb_ractor_t *r = 0;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
if (r != cr) {
rb_ractor_vm_barrier_interrupt_running_thread(r);
}
@@ -272,7 +272,7 @@ rb_vm_barrier(void)
vm->ractor.sync.barrier_waiting = false;
vm->ractor.sync.barrier_cnt++;
- list_for_each(&vm->ractor.set, r, vmlr_node) {
+ ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
rb_native_cond_signal(&r->barrier_wait_cond);
}
}
diff --git a/vm_trace.c b/vm_trace.c
index a9074c338e..aaa70d3ef0 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -1594,19 +1594,19 @@ typedef struct rb_postponed_job_struct {
#define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
struct rb_workqueue_job {
- struct list_node jnode; /* <=> vm->workqueue */
+ struct ccan_list_node jnode; /* <=> vm->workqueue */
rb_postponed_job_t job;
};
// Used for VM memsize reporting. Returns the size of a list of rb_workqueue_job
// structs. Defined here because the struct definition lives here as well.
size_t
-rb_vm_memsize_workqueue(struct list_head *workqueue)
+rb_vm_memsize_workqueue(struct ccan_list_head *workqueue)
{
struct rb_workqueue_job *work = 0;
size_t size = 0;
- list_for_each(workqueue, work, jnode) {
+ ccan_list_for_each(workqueue, work, jnode) {
size += sizeof(struct rb_workqueue_job);
}
@@ -1732,7 +1732,7 @@ rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
wq_job->job.data = data;
rb_nativethread_lock_lock(&vm->workqueue_lock);
- list_add_tail(&vm->workqueue, &wq_job->jnode);
+ ccan_list_add_tail(&vm->workqueue, &wq_job->jnode);
rb_nativethread_lock_unlock(&vm->workqueue_lock);
// TODO: current implementation affects only main ractor
@@ -1748,12 +1748,12 @@ rb_postponed_job_flush(rb_vm_t *vm)
const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
VALUE volatile saved_errno = ec->errinfo;
- struct list_head tmp;
+ struct ccan_list_head tmp;
- list_head_init(&tmp);
+ ccan_list_head_init(&tmp);
rb_nativethread_lock_lock(&vm->workqueue_lock);
- list_append_list(&tmp, &vm->workqueue);
+ ccan_list_append_list(&tmp, &vm->workqueue);
rb_nativethread_lock_unlock(&vm->workqueue_lock);
ec->errinfo = Qnil;
@@ -1771,7 +1771,7 @@ rb_postponed_job_flush(rb_vm_t *vm)
(*pjob->func)(pjob->data);
}
}
- while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) {
+ while ((wq_job = ccan_list_pop(&tmp, struct rb_workqueue_job, jnode))) {
rb_postponed_job_t pjob = wq_job->job;
free(wq_job);
@@ -1785,9 +1785,9 @@ rb_postponed_job_flush(rb_vm_t *vm)
ec->errinfo = saved_errno;
/* don't leak memory if a job threw an exception */
- if (!list_empty(&tmp)) {
+ if (!ccan_list_empty(&tmp)) {
rb_nativethread_lock_lock(&vm->workqueue_lock);
- list_prepend_list(&vm->workqueue, &tmp);
+ ccan_list_prepend_list(&vm->workqueue, &tmp);
rb_nativethread_lock_unlock(&vm->workqueue_lock);
RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());