summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--array.c420
-rw-r--r--common.mk3
-rw-r--r--compile.c6
-rw-r--r--debug_counter.h12
-rw-r--r--enum.c8
-rw-r--r--gc.c147
-rw-r--r--include/ruby/ruby.h30
-rw-r--r--inits.c1
-rw-r--r--insns.def2
-rw-r--r--internal.h28
-rw-r--r--string.c6
-rw-r--r--test/ruby/test_enum.rb15
-rw-r--r--variable.c146
-rw-r--r--vm_args.c8
-rw-r--r--vm_eval.c2
-rw-r--r--vm_insnhelper.c2
16 files changed, 184 insertions, 652 deletions
diff --git a/array.c b/array.c
index d735879b0b..83a5ff41f5 100644
--- a/array.c
+++ b/array.c
@@ -14,14 +14,12 @@
#include "ruby/encoding.h"
#include "ruby/util.h"
#include "ruby/st.h"
+#include "internal.h"
#include "probes.h"
#include "id.h"
#include "debug_counter.h"
-#include "gc.h"
-#include "transient_heap.h"
-#include "internal.h"
-#if !ARRAY_DEBUG
+#ifndef ARRAY_DEBUG
# define NDEBUG
#endif
#include "ruby_assert.h"
@@ -44,21 +42,17 @@ VALUE rb_cArray;
#define ARY_HEAP_PTR(a) (assert(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.ptr)
#define ARY_HEAP_LEN(a) (assert(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.len)
-#define ARY_HEAP_CAPA(a) (assert(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.aux.capa)
-
#define ARY_EMBED_PTR(a) (assert(ARY_EMBED_P(a)), RARRAY(a)->as.ary)
#define ARY_EMBED_LEN(a) \
(assert(ARY_EMBED_P(a)), \
(long)((RBASIC(a)->flags >> RARRAY_EMBED_LEN_SHIFT) & \
(RARRAY_EMBED_LEN_MASK >> RARRAY_EMBED_LEN_SHIFT)))
-#define ARY_HEAP_SIZE(a) (assert(!ARY_EMBED_P(a)), assert(ARY_OWNS_HEAP_P(a)), ARY_HEAP_CAPA(a) * sizeof(VALUE))
+#define ARY_HEAP_SIZE(a) (assert(!ARY_EMBED_P(a)), assert(ARY_OWNS_HEAP_P(a)), RARRAY(a)->as.heap.aux.capa * sizeof(VALUE))
#define ARY_OWNS_HEAP_P(a) (!FL_TEST((a), ELTS_SHARED|RARRAY_EMBED_FLAG))
#define FL_SET_EMBED(a) do { \
assert(!ARY_SHARED_P(a)); \
FL_SET((a), RARRAY_EMBED_FLAG); \
- FL_UNSET_RAW((a), RARRAY_TRANSIENT_FLAG); \
- ary_verify(a); \
} while (0)
#define FL_UNSET_EMBED(ary) FL_UNSET((ary), RARRAY_EMBED_FLAG|RARRAY_EMBED_LEN_MASK)
#define FL_SET_SHARED(ary) do { \
@@ -108,7 +102,7 @@ VALUE rb_cArray;
} while (0)
#define ARY_CAPA(ary) (ARY_EMBED_P(ary) ? RARRAY_EMBED_LEN_MAX : \
- ARY_SHARED_ROOT_P(ary) ? RARRAY_LEN(ary) : ARY_HEAP_CAPA(ary))
+ ARY_SHARED_ROOT_P(ary) ? RARRAY_LEN(ary) : RARRAY(ary)->as.heap.aux.capa)
#define ARY_SET_CAPA(ary, n) do { \
assert(!ARY_EMBED_P(ary)); \
assert(!ARY_SHARED_P(ary)); \
@@ -136,82 +130,11 @@ VALUE rb_cArray;
} while (0)
#define FL_SET_SHARED_ROOT(ary) do { \
assert(!ARY_EMBED_P(ary)); \
- assert(!RARRAY_TRANSIENT_P(ary)); \
FL_SET((ary), RARRAY_SHARED_ROOT_FLAG); \
} while (0)
#define ARY_SET(a, i, v) RARRAY_ASET((assert(!ARY_SHARED_P(a)), (a)), (i), (v))
-
-#if ARRAY_DEBUG
-#define ary_verify(ary) ary_verify_(ary, __FILE__, __LINE__)
-
-static VALUE
-ary_verify_(VALUE ary, const char *file, int line)
-{
- assert(RB_TYPE_P(ary, T_ARRAY));
-
- if (FL_TEST(ary, ELTS_SHARED)) {
- VALUE root = RARRAY(ary)->as.heap.aux.shared;
- const VALUE *ptr = ARY_HEAP_PTR(ary);
- const VALUE *root_ptr = RARRAY_CONST_PTR_TRANSIENT(root);
- long len = ARY_HEAP_LEN(ary), root_len = RARRAY_LEN(root);
- assert(FL_TEST(root, RARRAY_SHARED_ROOT_FLAG));
- assert(root_ptr <= ptr && ptr + len <= root_ptr + root_len);
- ary_verify(root);
- }
- else if (ARY_EMBED_P(ary)) {
- assert(!RARRAY_TRANSIENT_P(ary));
- assert(!ARY_SHARED_P(ary));
- assert(RARRAY_LEN(ary) <= RARRAY_EMBED_LEN_MAX);
- }
- else {
-#if 1
- const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
- long i, len = RARRAY_LEN(ary);
- volatile VALUE v;
- if (len > 1) len = 1; /* check only HEAD */
- for (i=0; i<len; i++) {
- v = ptr[i]; /* access check */
- }
- v = v;
-#endif
- }
-
- if (RARRAY_TRANSIENT_P(ary)) {
- assert(rb_transient_heap_managed_ptr_p(RARRAY_CONST_PTR_TRANSIENT(ary)));
- }
-
- rb_transient_heap_verify();
-
- return ary;
-}
-
-void
-rb_ary_verify(VALUE ary){
- ary_verify(ary);
-}
-#else
-#define ary_verify(ary) ((void)0)
-#endif
-
-VALUE *
-rb_ary_ptr_use_start(VALUE ary)
-{
-#if ARRAY_DEBUG
- FL_SET_RAW(ary, RARRAY_PTR_IN_USE_FLAG);
-#endif
- return (VALUE *)RARRAY_CONST_PTR_TRANSIENT(ary);
-}
-
-void
-rb_ary_ptr_use_end(VALUE ary)
-{
-#if ARRAY_DEBUG
- FL_UNSET_RAW(ary, RARRAY_PTR_IN_USE_FLAG);
-#endif
-}
-
void
rb_mem_clear(register VALUE *mem, register long size)
{
@@ -272,167 +195,49 @@ ary_memcpy(VALUE ary, long beg, long argc, const VALUE *argv)
ary_memcpy0(ary, beg, argc, argv, ary);
}
-static VALUE *
-ary_heap_alloc(VALUE ary, size_t capa)
-{
- VALUE *ptr = rb_transient_heap_alloc(ary, sizeof(VALUE) * capa);
-
- if (ptr != NULL) {
- FL_SET_RAW(ary, RARRAY_TRANSIENT_FLAG);
- }
- else {
- FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
- ptr = ALLOC_N(VALUE, capa);
- }
-
- return ptr;
-}
-
-static void
-ary_heap_free_ptr(VALUE ary, const VALUE *ptr, long size)
-{
- if (RARRAY_TRANSIENT_P(ary)) {
- /* ignore it */
- }
- else {
- ruby_sized_xfree((void *)ptr, size);
- }
-}
-
-static void
-ary_heap_free(VALUE ary)
-{
- if (RARRAY_TRANSIENT_P(ary)) {
- FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
- }
- else {
- ary_heap_free_ptr(ary, ARY_HEAP_PTR(ary), ARY_HEAP_SIZE(ary));
- }
-}
-
-static void
-ary_heap_realloc(VALUE ary, size_t new_capa)
-{
- size_t old_capa = ARY_HEAP_CAPA(ary);
-
- if (RARRAY_TRANSIENT_P(ary)) {
- if (new_capa <= old_capa) {
- /* do nothing */
- }
- else {
- VALUE *new_ptr = rb_transient_heap_alloc(ary, sizeof(VALUE) * new_capa);
-
- if (new_ptr == NULL) {
- new_ptr = ALLOC_N(VALUE, new_capa);
- FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
- }
-
- MEMCPY(new_ptr, ARY_HEAP_PTR(ary), VALUE, old_capa);
- ARY_SET_PTR(ary, new_ptr);
- }
- }
- else {
- SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, new_capa, old_capa);
- }
- ary_verify(ary);
-}
-
-static inline void
-rb_ary_transient_heap_evacuate_(VALUE ary, int transient, int promote)
-{
- if (transient) {
- VALUE *new_ptr;
- const VALUE *old_ptr = ARY_HEAP_PTR(ary);
- long capa = ARY_HEAP_CAPA(ary);
- long len = ARY_HEAP_LEN(ary);
-
- if (ARY_SHARED_ROOT_P(ary)) {
- capa = len;
- }
-
- assert(ARY_OWNS_HEAP_P(ary));
- assert(RARRAY_TRANSIENT_P(ary));
- assert(!ARY_PTR_USING_P(ary));
-
- if (promote) {
- new_ptr = ALLOC_N(VALUE, capa);
- FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
- }
- else {
- new_ptr = ary_heap_alloc(ary, capa);
- }
-
- MEMCPY(new_ptr, old_ptr, VALUE, capa);
- /* do not use ARY_SET_PTR() because they assert !frozen */
- RARRAY(ary)->as.heap.ptr = new_ptr;
- }
-
- ary_verify(ary);
-}
-
-void
-rb_ary_transient_heap_evacuate(VALUE ary, int promote)
-{
- rb_ary_transient_heap_evacuate_(ary, RARRAY_TRANSIENT_P(ary), promote);
-}
-
-void
-rb_ary_detransient(VALUE ary)
-{
- assert(RARRAY_TRANSIENT_P(ary));
- rb_ary_transient_heap_evacuate_(ary, TRUE, TRUE);
-}
-
static void
ary_resize_capa(VALUE ary, long capacity)
{
assert(RARRAY_LEN(ary) <= capacity);
assert(!OBJ_FROZEN(ary));
assert(!ARY_SHARED_P(ary));
-
if (capacity > RARRAY_EMBED_LEN_MAX) {
if (ARY_EMBED_P(ary)) {
long len = ARY_EMBED_LEN(ary);
- VALUE *ptr = ary_heap_alloc(ary, capacity);
-
+ VALUE *ptr = ALLOC_N(VALUE, (capacity));
MEMCPY(ptr, ARY_EMBED_PTR(ary), VALUE, len);
FL_UNSET_EMBED(ary);
ARY_SET_PTR(ary, ptr);
ARY_SET_HEAP_LEN(ary, len);
}
else {
- ary_heap_realloc(ary, capacity);
+ SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, capacity, RARRAY(ary)->as.heap.aux.capa);
}
- ARY_SET_CAPA(ary, capacity);
+ ARY_SET_CAPA(ary, (capacity));
}
else {
if (!ARY_EMBED_P(ary)) {
- long len = ARY_HEAP_LEN(ary);
- long old_capa = ARY_HEAP_CAPA(ary);
- const VALUE *ptr = ARY_HEAP_PTR(ary);
+ long len = RARRAY_LEN(ary);
+ const VALUE *ptr = RARRAY_CONST_PTR(ary);
- if (len > capacity) len = capacity;
+ if (len > capacity) len = capacity;
MEMCPY((VALUE *)RARRAY(ary)->as.ary, ptr, VALUE, len);
- ary_heap_free_ptr(ary, ptr, old_capa);
-
FL_SET_EMBED(ary);
ARY_SET_LEN(ary, len);
+ ruby_sized_xfree((VALUE *)ptr, RARRAY(ary)->as.heap.aux.capa);
}
}
-
- ary_verify(ary);
}
static inline void
ary_shrink_capa(VALUE ary)
{
long capacity = ARY_HEAP_LEN(ary);
- long old_capa = ARY_HEAP_CAPA(ary);
+ long old_capa = RARRAY(ary)->as.heap.aux.capa;
assert(!ARY_SHARED_P(ary));
assert(old_capa >= capacity);
- if (old_capa > capacity) ary_heap_realloc(ary, capacity);
-
- ary_verify(ary);
+ if (old_capa > capacity)
+ SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, capacity, old_capa);
}
static void
@@ -448,8 +253,6 @@ ary_double_capa(VALUE ary, long min)
}
new_capa += min;
ary_resize_capa(ary, new_capa);
-
- ary_verify(ary);
}
static void
@@ -505,7 +308,6 @@ static inline void
rb_ary_modify_check(VALUE ary)
{
rb_check_frozen(ary);
- ary_verify(ary);
}
void
@@ -515,9 +317,6 @@ rb_ary_modify(VALUE ary)
if (ARY_SHARED_P(ary)) {
long shared_len, len = RARRAY_LEN(ary);
VALUE shared = ARY_SHARED(ary);
-
- ary_verify(shared);
-
if (len <= RARRAY_EMBED_LEN_MAX) {
const VALUE *ptr = ARY_HEAP_PTR(ary);
FL_UNSET_SHARED(ary);
@@ -527,9 +326,9 @@ rb_ary_modify(VALUE ary)
ARY_SET_EMBED_LEN(ary, len);
}
else if (ARY_SHARED_OCCUPIED(shared) && len > ((shared_len = RARRAY_LEN(shared))>>1)) {
- long shift = RARRAY_CONST_PTR_TRANSIENT(ary) - RARRAY_CONST_PTR_TRANSIENT(shared);
+ long shift = RARRAY_CONST_PTR(ary) - RARRAY_CONST_PTR(shared);
FL_UNSET_SHARED(ary);
- ARY_SET_PTR(ary, RARRAY_CONST_PTR_TRANSIENT(shared));
+ ARY_SET_PTR(ary, RARRAY_CONST_PTR(shared));
ARY_SET_CAPA(ary, shared_len);
RARRAY_PTR_USE(ary, ptr, {
MEMMOVE(ptr, ptr+shift, VALUE, len);
@@ -538,8 +337,8 @@ rb_ary_modify(VALUE ary)
rb_ary_decrement_share(shared);
}
else {
- VALUE *ptr = ary_heap_alloc(ary, len);
- MEMCPY(ptr, ARY_HEAP_PTR(ary), VALUE, len);
+ VALUE *ptr = ALLOC_N(VALUE, len);
+ MEMCPY(ptr, RARRAY_CONST_PTR(ary), VALUE, len);
rb_ary_unshare(ary);
ARY_SET_CAPA(ary, len);
ARY_SET_PTR(ary, ptr);
@@ -547,7 +346,6 @@ rb_ary_modify(VALUE ary)
rb_gc_writebarrier_remember(ary);
}
- ary_verify(ary);
}
static VALUE
@@ -564,12 +362,9 @@ ary_ensure_room_for_push(VALUE ary, long add_len)
if (new_len > RARRAY_EMBED_LEN_MAX) {
VALUE shared = ARY_SHARED(ary);
if (ARY_SHARED_OCCUPIED(shared)) {
- if (ARY_HEAP_PTR(ary) - RARRAY_CONST_PTR_TRANSIENT(shared) + new_len <= RARRAY_LEN(shared)) {
+ if (RARRAY_CONST_PTR(ary) - RARRAY_CONST_PTR(shared) + new_len <= RARRAY_LEN(shared)) {
rb_ary_modify_check(ary);
-
- ary_verify(ary);
- ary_verify(shared);
- return shared;
+ return shared;
}
else {
/* if array is shared, then it is likely it participate in push/shift pattern */
@@ -578,13 +373,11 @@ ary_ensure_room_for_push(VALUE ary, long add_len)
if (new_len > capa - (capa >> 6)) {
ary_double_capa(ary, new_len);
}
- ary_verify(ary);
return ary;
}
}
}
- ary_verify(ary);
- rb_ary_modify(ary);
+ rb_ary_modify(ary);
}
else {
rb_ary_modify_check(ary);
@@ -594,7 +387,6 @@ ary_ensure_room_for_push(VALUE ary, long add_len)
ary_double_capa(ary, new_len);
}
- ary_verify(ary);
return ary;
}
@@ -667,7 +459,7 @@ ary_new(VALUE klass, long capa)
ary = ary_alloc(klass);
if (capa > RARRAY_EMBED_LEN_MAX) {
- ptr = ary_heap_alloc(ary, capa);
+ ptr = ALLOC_N(VALUE, capa);
FL_UNSET_EMBED(ary);
ARY_SET_PTR(ary, ptr);
ARY_SET_CAPA(ary, capa);
@@ -731,9 +523,7 @@ rb_ary_new_from_values(long n, const VALUE *elts)
VALUE
rb_ary_tmp_new(long capa)
{
- VALUE ary = ary_new(0, capa);
- rb_ary_transient_heap_evacuate(ary, TRUE);
- return ary;
+ return ary_new(0, capa);
}
VALUE
@@ -742,7 +532,6 @@ rb_ary_tmp_new_fill(long capa)
VALUE ary = ary_new(0, capa);
ary_memfill(ary, 0, capa, Qnil);
ARY_SET_LEN(ary, capa);
- rb_ary_transient_heap_evacuate(ary, TRUE);
return ary;
}
@@ -750,13 +539,8 @@ void
rb_ary_free(VALUE ary)
{
if (ARY_OWNS_HEAP_P(ary)) {
- if (RARRAY_TRANSIENT_P(ary)) {
- RB_DEBUG_COUNTER_INC(obj_ary_transient);
- }
- else {
- RB_DEBUG_COUNTER_INC(obj_ary_ptr);
- ary_heap_free(ary);
- }
+ RB_DEBUG_COUNTER_INC(obj_ary_ptr);
+ ruby_sized_xfree((void *)ARY_HEAP_PTR(ary), ARY_HEAP_SIZE(ary));
}
else {
RB_DEBUG_COUNTER_INC(obj_ary_embed);
@@ -779,15 +563,13 @@ ary_discard(VALUE ary)
{
rb_ary_free(ary);
RBASIC(ary)->flags |= RARRAY_EMBED_FLAG;
- RBASIC(ary)->flags &= ~(RARRAY_EMBED_LEN_MASK | RARRAY_TRANSIENT_FLAG);
+ RBASIC(ary)->flags &= ~RARRAY_EMBED_LEN_MASK;
}
static VALUE
ary_make_shared(VALUE ary)
{
assert(!ARY_EMBED_P(ary));
- ary_verify(ary);
-
if (ARY_SHARED_P(ary)) {
return ARY_SHARED(ary);
}
@@ -795,7 +577,6 @@ ary_make_shared(VALUE ary)
return ary;
}
else if (OBJ_FROZEN(ary)) {
- rb_ary_transient_heap_evacuate(ary, TRUE);
ary_shrink_capa(ary);
FL_SET_SHARED_ROOT(ary);
ARY_SET_SHARED_NUM(ary, 1);
@@ -803,25 +584,18 @@ ary_make_shared(VALUE ary)
}
else {
long capa = ARY_CAPA(ary), len = RARRAY_LEN(ary);
- const VALUE *ptr;
NEWOBJ_OF(shared, struct RArray, 0, T_ARRAY | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0));
-
- rb_ary_transient_heap_evacuate(ary, TRUE);
- ptr = ARY_HEAP_PTR(ary);
-
FL_UNSET_EMBED(shared);
+
ARY_SET_LEN((VALUE)shared, capa);
- ARY_SET_PTR((VALUE)shared, ptr);
- ary_mem_clear((VALUE)shared, len, capa - len);
+ ARY_SET_PTR((VALUE)shared, RARRAY_CONST_PTR(ary));
+ ary_mem_clear((VALUE)shared, len, capa - len);
FL_SET_SHARED_ROOT(shared);
ARY_SET_SHARED_NUM((VALUE)shared, 1);
FL_SET_SHARED(ary);
ARY_SET_SHARED(ary, (VALUE)shared);
OBJ_FREEZE(shared);
-
- ary_verify((VALUE)shared);
- ary_verify(ary);
- return (VALUE)shared;
+ return (VALUE)shared;
}
}
@@ -832,7 +606,7 @@ ary_make_substitution(VALUE ary)
if (len <= RARRAY_EMBED_LEN_MAX) {
VALUE subst = rb_ary_new2(len);
- ary_memcpy(subst, 0, len, RARRAY_CONST_PTR_TRANSIENT(ary));
+ ary_memcpy(subst, 0, len, RARRAY_CONST_PTR(ary));
ARY_SET_EMBED_LEN(subst, len);
return subst;
}
@@ -955,8 +729,8 @@ rb_ary_initialize(int argc, VALUE *argv, VALUE ary)
rb_ary_modify(ary);
if (argc == 0) {
- if (ARY_OWNS_HEAP_P(ary) && ARY_HEAP_PTR(ary) != NULL) {
- ary_heap_free(ary);
+ if (ARY_OWNS_HEAP_P(ary) && RARRAY_CONST_PTR(ary) != 0) {
+ ruby_sized_xfree((void *)RARRAY_CONST_PTR(ary), ARY_HEAP_SIZE(ary));
}
rb_ary_unshare_safe(ary);
FL_SET_EMBED(ary);
@@ -1063,7 +837,7 @@ ary_make_partial(VALUE ary, VALUE klass, long offset, long len)
if (len <= RARRAY_EMBED_LEN_MAX) {
VALUE result = ary_alloc(klass);
- ary_memcpy(result, 0, len, RARRAY_CONST_PTR_TRANSIENT(ary) + offset);
+ ary_memcpy(result, 0, len, RARRAY_CONST_PTR(ary) + offset);
ARY_SET_EMBED_LEN(result, len);
return result;
}
@@ -1072,15 +846,12 @@ ary_make_partial(VALUE ary, VALUE klass, long offset, long len)
FL_UNSET_EMBED(result);
shared = ary_make_shared(ary);
- ARY_SET_PTR(result, RARRAY_CONST_PTR_TRANSIENT(ary));
+ ARY_SET_PTR(result, RARRAY_CONST_PTR(ary));
ARY_SET_LEN(result, RARRAY_LEN(ary));
rb_ary_set_shared(result, shared);
ARY_INCREASE_PTR(result, offset);
ARY_SET_LEN(result, len);
-
- ary_verify(shared);
- ary_verify(result);
return result;
}
}
@@ -1139,13 +910,12 @@ ary_take_first_or_last(int argc, const VALUE *argv, VALUE ary, enum ary_take_pos
VALUE
rb_ary_push(VALUE ary, VALUE item)
{
- long idx = RARRAY_LEN((ary_verify(ary), ary));
+ long idx = RARRAY_LEN(ary);
VALUE target_ary = ary_ensure_room_for_push(ary, 1);
RARRAY_PTR_USE(ary, ptr, {
RB_OBJ_WRITE(target_ary, &ptr[idx], item);
});
ARY_SET_LEN(ary, idx + 1);
- ary_verify(ary);
return ary;
}
@@ -1197,7 +967,6 @@ rb_ary_pop(VALUE ary)
}
--n;
ARY_SET_LEN(ary, n);
- ary_verify(ary);
return RARRAY_AREF(ary, n);
}
@@ -1231,7 +1000,6 @@ rb_ary_pop_m(int argc, VALUE *argv, VALUE ary)
rb_ary_modify_check(ary);
result = ary_take_first_or_last(argc, argv, ary, ARY_TAKE_LAST);
ARY_INCREASE_LEN(ary, -RARRAY_LEN(result));
- ary_verify(ary);
return result;
}
@@ -1250,7 +1018,6 @@ rb_ary_shift(VALUE ary)
MEMMOVE(ptr, ptr+1, VALUE, len-1);
}); /* WB: no new reference */
ARY_INCREASE_LEN(ary, -1);
- ary_verify(ary);
return top;
}
assert(!ARY_EMBED_P(ary)); /* ARY_EMBED_LEN_MAX < ARY_DEFAULT_SIZE */
@@ -1264,8 +1031,6 @@ rb_ary_shift(VALUE ary)
ARY_INCREASE_PTR(ary, 1); /* shift ptr */
ARY_INCREASE_LEN(ary, -1);
- ary_verify(ary);
-
return top;
}
@@ -1336,7 +1101,6 @@ rb_ary_behead(VALUE ary, long n)
}
ARY_INCREASE_LEN(ary, -n);
- ary_verify(ary);
return ary;
}
@@ -1356,8 +1120,8 @@ ary_ensure_room_for_unshift(VALUE ary, int argc)
VALUE shared = ARY_SHARED(ary);
capa = RARRAY_LEN(shared);
if (ARY_SHARED_OCCUPIED(shared) && capa > new_len) {
- head = RARRAY_CONST_PTR_TRANSIENT(ary);
- sharedp = RARRAY_CONST_PTR_TRANSIENT(shared);
+ head = RARRAY_CONST_PTR(ary);
+ sharedp = RARRAY_CONST_PTR(shared);
goto makeroom_if_need;
}
}
@@ -1370,13 +1134,11 @@ ary_ensure_room_for_unshift(VALUE ary, int argc)
/* use shared array for big "queues" */
if (new_len > ARY_DEFAULT_SIZE * 4) {
- ary_verify(ary);
-
- /* make a room for unshifted items */
+ /* make a room for unshifted items */
capa = ARY_CAPA(ary);
ary_make_shared(ary);
- head = sharedp = RARRAY_CONST_PTR_TRANSIENT(ary);
+ head = sharedp = RARRAY_CONST_PTR(ary);
goto makeroom;
makeroom_if_need:
if (head - sharedp < argc) {
@@ -1389,8 +1151,6 @@ ary_ensure_room_for_unshift(VALUE ary, int argc)
}
ARY_SET_PTR(ary, head - argc);
assert(ARY_SHARED_OCCUPIED(ARY_SHARED(ary)));
-
- ary_verify(ary);
return ARY_SHARED(ary);
}
else {
@@ -1399,7 +1159,6 @@ ary_ensure_room_for_unshift(VALUE ary, int argc)
MEMMOVE(ptr + argc, ptr, VALUE, len);
});
- ary_verify(ary);
return ary;
}
}
@@ -1817,7 +1576,7 @@ rb_ary_splice(VALUE ary, long beg, long len, const VALUE *rptr, long rlen)
}
{
- const VALUE *optr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ const VALUE *optr = RARRAY_CONST_PTR(ary);
rofs = (rptr >= optr && rptr < optr + olen) ? rptr - optr : -1;
}
@@ -1830,7 +1589,7 @@ rb_ary_splice(VALUE ary, long beg, long len, const VALUE *rptr, long rlen)
len = beg + rlen;
ary_mem_clear(ary, olen, beg - olen);
if (rlen > 0) {
- if (rofs != -1) rptr = RARRAY_CONST_PTR_TRANSIENT(ary) + rofs;
+ if (rofs != -1) rptr = RARRAY_CONST_PTR(ary) + rofs;
ary_memcpy0(ary, beg, rlen, rptr, target_ary);
}
ARY_SET_LEN(ary, len);
@@ -1854,7 +1613,7 @@ rb_ary_splice(VALUE ary, long beg, long len, const VALUE *rptr, long rlen)
ARY_SET_LEN(ary, alen);
}
if (rlen > 0) {
- if (rofs != -1) rptr = RARRAY_CONST_PTR_TRANSIENT(ary) + rofs;
+ if (rofs != -1) rptr = RARRAY_CONST_PTR(ary) + rofs;
/* give up wb-protected ary */
MEMMOVE(RARRAY_PTR(ary) + beg, rptr, VALUE, rlen);
}
@@ -1914,12 +1673,11 @@ rb_ary_resize(VALUE ary, long len)
}
else {
if (olen > len + ARY_DEFAULT_SIZE) {
- ary_heap_realloc(ary, len);
+ SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, len, RARRAY(ary)->as.heap.aux.capa);
ARY_SET_CAPA(ary, len);
}
ARY_SET_HEAP_LEN(ary, len);
}
- ary_verify(ary);
return ary;
}
@@ -1980,7 +1738,7 @@ rb_ary_aset(int argc, VALUE *argv, VALUE ary)
/* check if idx is Range */
range:
rpl = rb_ary_to_ary(argv[argc-1]);
- rb_ary_splice(ary, beg, len, RARRAY_CONST_PTR_TRANSIENT(rpl), RARRAY_LEN(rpl));
+ rb_ary_splice(ary, beg, len, RARRAY_CONST_PTR(rpl), RARRAY_LEN(rpl));
RB_GC_GUARD(rpl);
return argv[argc-1];
}
@@ -2062,7 +1820,7 @@ VALUE
rb_ary_each(VALUE ary)
{
long i;
- ary_verify(ary);
+
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
for (i=0; i<RARRAY_LEN(ary); i++) {
rb_yield(RARRAY_AREF(ary, i));
@@ -2172,18 +1930,15 @@ rb_ary_dup(VALUE ary)
{
long len = RARRAY_LEN(ary);
VALUE dup = rb_ary_new2(len);
- ary_memcpy(dup, 0, len, RARRAY_CONST_PTR_TRANSIENT(ary));
+ ary_memcpy(dup, 0, len, RARRAY_CONST_PTR(ary));
ARY_SET_LEN(dup, len);
-
- ary_verify(ary);
- ary_verify(dup);
return dup;
}
VALUE
rb_ary_resurrect(VALUE ary)
{
- return rb_ary_new4(RARRAY_LEN(ary), RARRAY_CONST_PTR_TRANSIENT(ary));
+ return rb_ary_new4(RARRAY_LEN(ary), RARRAY_CONST_PTR(ary));
}
extern VALUE rb_output_fs;
@@ -2524,8 +2279,8 @@ rb_ary_reverse_m(VALUE ary)
VALUE dup = rb_ary_new2(len);
if (len > 0) {
- const VALUE *p1 = RARRAY_CONST_PTR_TRANSIENT(ary);
- VALUE *p2 = (VALUE *)RARRAY_CONST_PTR_TRANSIENT(dup) + len - 1;
+ const VALUE *p1 = RARRAY_CONST_PTR(ary);
+ VALUE *p2 = (VALUE *)RARRAY_CONST_PTR(dup) + len - 1;
do *p2-- = *p1++; while (--len > 0);
}
ARY_SET_LEN(dup, RARRAY_LEN(ary));
@@ -2627,7 +2382,7 @@ rb_ary_rotate_m(int argc, VALUE *argv, VALUE ary)
rotated = rb_ary_new2(len);
if (len > 0) {
cnt = rotate_count(cnt, len);
- ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ ptr = RARRAY_CONST_PTR(ary);
len -= cnt;
ary_memcpy(rotated, 0, len, ptr + cnt);
ary_memcpy(rotated, len, cnt, ptr);
@@ -2727,6 +2482,7 @@ rb_ary_sort_bang(VALUE ary)
VALUE tmp = ary_make_substitution(ary); /* only ary refers tmp */
struct ary_sort_data data;
long len = RARRAY_LEN(ary);
+
RBASIC_CLEAR_CLASS(tmp);
data.ary = tmp;
data.cmp_opt.opt_methods = 0;
@@ -2759,22 +2515,21 @@ rb_ary_sort_bang(VALUE ary)
rb_ary_unshare(ary);
}
else {
- ary_heap_free(ary);
+ ruby_sized_xfree((void *)ARY_HEAP_PTR(ary), ARY_HEAP_SIZE(ary));
}
- ARY_SET_PTR(ary, ARY_HEAP_PTR(tmp));
+ ARY_SET_PTR(ary, RARRAY_CONST_PTR(tmp));
ARY_SET_HEAP_LEN(ary, len);
- ARY_SET_CAPA(ary, ARY_HEAP_LEN(tmp));
+ ARY_SET_CAPA(ary, RARRAY_LEN(tmp));
}
/* tmp was lost ownership for the ptr */
FL_UNSET(tmp, FL_FREEZE);
FL_SET_EMBED(tmp);
ARY_SET_EMBED_LEN(tmp, 0);
FL_SET(tmp, FL_FREEZE);
- }
+ }
/* tmp will be GC'ed. */
RBASIC_SET_CLASS_RAW(tmp, rb_cArray); /* rb_cArray must be marked */
}
- ary_verify(ary);
return ary;
}
@@ -3078,7 +2833,7 @@ append_values_at_single(VALUE result, VALUE ary, long olen, VALUE idx)
/* check if idx is Range */
else if (rb_range_beg_len(idx, &beg, &len, olen, 1)) {
if (len > 0) {
- const VALUE *const src = RARRAY_CONST_PTR_TRANSIENT(ary);
+ const VALUE *const src = RARRAY_CONST_PTR(ary);
const long end = beg + len;
const long prevlen = RARRAY_LEN(result);
if (beg < olen) {
@@ -3320,7 +3075,6 @@ rb_ary_delete(VALUE ary, VALUE item)
ary_resize_smaller(ary, i2);
- ary_verify(ary);
return v;
}
@@ -3365,7 +3119,7 @@ rb_ary_delete_at(VALUE ary, long pos)
MEMMOVE(ptr+pos, ptr+pos+1, VALUE, len-pos-1);
});
ARY_INCREASE_LEN(ary, -1);
- ary_verify(ary);
+
return del;
}
@@ -3433,7 +3187,7 @@ rb_ary_slice_bang(int argc, VALUE *argv, VALUE ary)
len = orig_len - pos;
}
if (len == 0) return rb_ary_new2(0);
- arg2 = rb_ary_new4(len, RARRAY_CONST_PTR_TRANSIENT(ary)+pos);
+ arg2 = rb_ary_new4(len, RARRAY_CONST_PTR(ary)+pos);
RBASIC_SET_CLASS(arg2, rb_obj_class(ary));
rb_ary_splice(ary, pos, len, 0, 0);
return arg2;
@@ -3469,8 +3223,7 @@ ary_reject(VALUE orig, VALUE result)
for (i = 0; i < RARRAY_LEN(orig); i++) {
VALUE v = RARRAY_AREF(orig, i);
-
- if (!RTEST(rb_yield(v))) {
+ if (!RTEST(rb_yield(v))) {
rb_ary_push(result, v);
}
}
@@ -3499,6 +3252,7 @@ static VALUE
ary_reject_bang(VALUE ary)
{
struct select_bang_arg args;
+
rb_ary_modify_check(ary);
args.ary = ary;
args.len[0] = args.len[1] = 0;
@@ -3572,7 +3326,6 @@ rb_ary_reject(VALUE ary)
static VALUE
rb_ary_delete_if(VALUE ary)
{
- ary_verify(ary);
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
ary_reject_bang(ary);
return ary;
@@ -3751,14 +3504,14 @@ rb_ary_replace(VALUE copy, VALUE orig)
VALUE shared = 0;
if (ARY_OWNS_HEAP_P(copy)) {
- ary_heap_free(copy);
+ RARRAY_PTR_USE(copy, ptr, ruby_sized_xfree(ptr, ARY_HEAP_SIZE(copy)));
}
else if (ARY_SHARED_P(copy)) {
shared = ARY_SHARED(copy);
FL_UNSET_SHARED(copy);
}
FL_SET_EMBED(copy);
- ary_memcpy(copy, 0, RARRAY_LEN(orig), RARRAY_CONST_PTR_TRANSIENT(orig));
+ ary_memcpy(copy, 0, RARRAY_LEN(orig), RARRAY_CONST_PTR(orig));
if (shared) {
rb_ary_decrement_share(shared);
}
@@ -3767,17 +3520,16 @@ rb_ary_replace(VALUE copy, VALUE orig)
else {
VALUE shared = ary_make_shared(orig);
if (ARY_OWNS_HEAP_P(copy)) {
- ary_heap_free(copy);
+ RARRAY_PTR_USE(copy, ptr, ruby_sized_xfree(ptr, ARY_HEAP_SIZE(copy)));
}
else {
rb_ary_unshare_safe(copy);
}
FL_UNSET_EMBED(copy);
- ARY_SET_PTR(copy, ARY_HEAP_PTR(orig));
- ARY_SET_LEN(copy, ARY_HEAP_LEN(orig));
+ ARY_SET_PTR(copy, RARRAY_CONST_PTR(orig));
+ ARY_SET_LEN(copy, RARRAY_LEN(orig));
rb_ary_set_shared(copy, shared);
}
- ary_verify(copy);
return copy;
}
@@ -3795,20 +3547,16 @@ VALUE
rb_ary_clear(VALUE ary)
{
rb_ary_modify_check(ary);
+ ARY_SET_LEN(ary, 0);
if (ARY_SHARED_P(ary)) {
if (!ARY_EMBED_P(ary)) {
rb_ary_unshare(ary);
FL_SET_EMBED(ary);
- ARY_SET_EMBED_LEN(ary, 0);
}
}
- else {
- ARY_SET_LEN(ary, 0);
- if (ARY_DEFAULT_SIZE * 2 < ARY_CAPA(ary)) {
- ary_resize_capa(ary, ARY_DEFAULT_SIZE * 2);
- }
+ else if (ARY_DEFAULT_SIZE * 2 < ARY_CAPA(ary)) {
+ ary_resize_capa(ary, ARY_DEFAULT_SIZE * 2);
}
- ary_verify(ary);
return ary;
}
@@ -3941,8 +3689,8 @@ rb_ary_plus(VALUE x, VALUE y)
len = xlen + ylen;
z = rb_ary_new2(len);
- ary_memcpy(z, 0, xlen, RARRAY_CONST_PTR_TRANSIENT(x));
- ary_memcpy(z, xlen, ylen, RARRAY_CONST_PTR_TRANSIENT(y));
+ ary_memcpy(z, 0, xlen, RARRAY_CONST_PTR(x));
+ ary_memcpy(z, xlen, ylen, RARRAY_CONST_PTR(y));
ARY_SET_LEN(z, len);
return z;
}
@@ -3952,7 +3700,7 @@ ary_append(VALUE x, VALUE y)
{
long n = RARRAY_LEN(y);
if (n > 0) {
- rb_ary_splice(x, RARRAY_LEN(x), 0, RARRAY_CONST_PTR_TRANSIENT(y), n);
+ rb_ary_splice(x, RARRAY_LEN(x), 0, RARRAY_CONST_PTR(y), n);
}
return x;
}
@@ -3994,7 +3742,6 @@ rb_ary_concat_multi(int argc, VALUE *argv, VALUE ary)
ary_append(ary, args);
}
- ary_verify(ary);
return ary;
}
@@ -4049,16 +3796,16 @@ rb_ary_times(VALUE ary, VALUE times)
ary2 = ary_new(rb_obj_class(ary), len);
ARY_SET_LEN(ary2, len);
- ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ ptr = RARRAY_CONST_PTR(ary);
t = RARRAY_LEN(ary);
if (0 < t) {
ary_memcpy(ary2, 0, t, ptr);
while (t <= len/2) {
- ary_memcpy(ary2, t, t, RARRAY_CONST_PTR_TRANSIENT(ary2));
+ ary_memcpy(ary2, t, t, RARRAY_CONST_PTR(ary2));
t *= 2;
}
if (t < len) {
- ary_memcpy(ary2, t, len-t, RARRAY_CONST_PTR_TRANSIENT(ary2));
+ ary_memcpy(ary2, t, len-t, RARRAY_CONST_PTR(ary2));
}
}
out:
@@ -4144,7 +3891,6 @@ recursive_equal(VALUE ary1, VALUE ary2, int recur)
if (recur) return Qtrue; /* Subtle! */
- /* rb_equal() can evacuate ptrs */
p1 = RARRAY_CONST_PTR(ary1);
p2 = RARRAY_CONST_PTR(ary2);
len1 = RARRAY_LEN(ary1);
@@ -4157,8 +3903,8 @@ recursive_equal(VALUE ary1, VALUE ary2, int recur)
return Qfalse;
if (len1 < i)
return Qtrue;
- p1 = RARRAY_CONST_PTR(ary1) + i;
- p2 = RARRAY_CONST_PTR(ary2) + i;
+ p1 = RARRAY_CONST_PTR(ary1) + i;
+ p2 = RARRAY_CONST_PTR(ary2) + i;
}
else {
return Qfalse;
@@ -4195,7 +3941,7 @@ rb_ary_equal(VALUE ary1, VALUE ary2)
return rb_equal(ary2, ary1);
}
if (RARRAY_LEN(ary1) != RARRAY_LEN(ary2)) return Qfalse;
- if (RARRAY_CONST_PTR_TRANSIENT(ary1) == RARRAY_CONST_PTR_TRANSIENT(ary2)) return Qtrue;
+ if (RARRAY_CONST_PTR(ary1) == RARRAY_CONST_PTR(ary2)) return Qtrue;
return rb_exec_recursive_paired(recursive_equal, ary1, ary2, ary2);
}
@@ -4226,7 +3972,7 @@ rb_ary_eql(VALUE ary1, VALUE ary2)
if (ary1 == ary2) return Qtrue;
if (!RB_TYPE_P(ary2, T_ARRAY)) return Qfalse;
if (RARRAY_LEN(ary1) != RARRAY_LEN(ary2)) return Qfalse;
- if (RARRAY_CONST_PTR_TRANSIENT(ary1) == RARRAY_CONST_PTR_TRANSIENT(ary2)) return Qtrue;
+ if (RARRAY_CONST_PTR(ary1) == RARRAY_CONST_PTR(ary2)) return Qtrue;
return rb_exec_recursive_paired(recursive_eql, ary1, ary2, ary2);
}
@@ -4933,14 +4679,14 @@ rb_ary_compact_bang(VALUE ary)
long n;
rb_ary_modify(ary);
- p = t = (VALUE *)RARRAY_CONST_PTR_TRANSIENT(ary); /* WB: no new reference */
+ p = t = (VALUE *)RARRAY_CONST_PTR(ary); /* WB: no new reference */
end = p + RARRAY_LEN(ary);
while (t < end) {
if (NIL_P(*t)) t++;
else *p++ = *t++;
}
- n = p - RARRAY_CONST_PTR_TRANSIENT(ary);
+ n = p - RARRAY_CONST_PTR(ary);
if (RARRAY_LEN(ary) == n) {
return Qnil;
}
@@ -5203,8 +4949,8 @@ rb_ary_shuffle_bang(int argc, VALUE *argv, VALUE ary)
while (i) {
long j = RAND_UPTO(i);
VALUE tmp;
- if (len != RARRAY_LEN(ary) || ptr != RARRAY_CONST_PTR_TRANSIENT(ary)) {
- rb_raise(rb_eRuntimeError, "modified during shuffle");
+ if (len != RARRAY_LEN(ary) || ptr != RARRAY_CONST_PTR(ary)) {
+ rb_raise(rb_eRuntimeError, "modified during shuffle");
}
tmp = ptr[--i];
ptr[i] = ptr[j];
diff --git a/common.mk b/common.mk
index 9a8456fe67..42584f65a2 100644
--- a/common.mk
+++ b/common.mk
@@ -130,7 +130,6 @@ COMMONOBJS = array.$(OBJEXT) \
thread.$(OBJEXT) \
time.$(OBJEXT) \
transcode.$(OBJEXT) \
- transient_heap.$(OBJEXT) \
util.$(OBJEXT) \
variable.$(OBJEXT) \
version.$(OBJEXT) \
@@ -2899,8 +2898,6 @@ transcode.$(OBJEXT): {$(VPATH)}st.h
transcode.$(OBJEXT): {$(VPATH)}subst.h
transcode.$(OBJEXT): {$(VPATH)}transcode.c
transcode.$(OBJEXT): {$(VPATH)}transcode_data.h
-transient_heap.$(OBJEXT): {$(VPATH)}debug_counter.h
-transient_heap.$(OBJEXT): {$(VPATH)}transient_heap.c
util.$(OBJEXT): $(hdrdir)/ruby/ruby.h
util.$(OBJEXT): $(top_srcdir)/include/ruby.h
util.$(OBJEXT): {$(VPATH)}config.h
diff --git a/compile.c b/compile.c
index ec23fb6c18..ba6924e713 100644
--- a/compile.c
+++ b/compile.c
@@ -1649,7 +1649,7 @@ iseq_set_arguments(rb_iseq_t *iseq, LINK_ANCHOR *const optargs, const NODE *cons
opt_table = ALLOC_N(VALUE, i+1);
- MEMCPY(opt_table, RARRAY_CONST_PTR_TRANSIENT(labels), VALUE, i+1);
+ MEMCPY(opt_table, RARRAY_CONST_PTR(labels), VALUE, i+1);
for (j = 0; j < i+1; j++) {
opt_table[j] &= ~1;
}
@@ -2297,14 +2297,14 @@ iseq_set_exception_table(rb_iseq_t *iseq)
struct iseq_catch_table_entry *entry;
tlen = (int)RARRAY_LEN(ISEQ_COMPILE_DATA(iseq)->catch_table_ary);
- tptr = RARRAY_CONST_PTR_TRANSIENT(ISEQ_COMPILE_DATA(iseq)->catch_table_ary);
+ tptr = RARRAY_CONST_PTR(ISEQ_COMPILE_DATA(iseq)->catch_table_ary);
if (tlen > 0) {
struct iseq_catch_table *table = xmalloc(iseq_catch_table_bytes(tlen));
table->size = tlen;
for (i = 0; i < table->size; i++) {
- ptr = RARRAY_CONST_PTR_TRANSIENT(tptr[i]);
+ ptr = RARRAY_CONST_PTR(tptr[i]);
entry = &table->entries[i];
entry->type = (enum catch_type)(ptr[0] & 0xffff);
entry->start = label_get_position((LABEL *)(ptr[1] & ~1));
diff --git a/debug_counter.h b/debug_counter.h
index 1e7df10874..3fcb562f65 100644
--- a/debug_counter.h
+++ b/debug_counter.h
@@ -141,7 +141,6 @@ RB_DEBUG_COUNTER(gc_major_oldmalloc)
* * [attr]
* * _ptr: R?? is not embed.
* * _embed: R?? is embed.
- * * _transient: R?? uses transient heap.
* * type specific attr.
* * str_shared: str is shared.
* * str_nofree: nofree
@@ -163,9 +162,8 @@ RB_DEBUG_COUNTER(obj_free)
RB_DEBUG_COUNTER(obj_promote)
RB_DEBUG_COUNTER(obj_wb_unprotect)
-RB_DEBUG_COUNTER(obj_obj_embed)
-RB_DEBUG_COUNTER(obj_obj_transient)
RB_DEBUG_COUNTER(obj_obj_ptr)
+RB_DEBUG_COUNTER(obj_obj_embed)
RB_DEBUG_COUNTER(obj_str_ptr)
RB_DEBUG_COUNTER(obj_str_embed)
@@ -173,9 +171,8 @@ RB_DEBUG_COUNTER(obj_str_shared)
RB_DEBUG_COUNTER(obj_str_nofree)
RB_DEBUG_COUNTER(obj_str_fstr)
-RB_DEBUG_COUNTER(obj_ary_embed)
-RB_DEBUG_COUNTER(obj_ary_transient)
RB_DEBUG_COUNTER(obj_ary_ptr)
+RB_DEBUG_COUNTER(obj_ary_embed)
RB_DEBUG_COUNTER(obj_hash_empty)
RB_DEBUG_COUNTER(obj_hash_under4)
@@ -222,11 +219,6 @@ RB_DEBUG_COUNTER(heap_xmalloc)
RB_DEBUG_COUNTER(heap_xrealloc)
RB_DEBUG_COUNTER(heap_xfree)
-/* transient_heap */
-RB_DEBUG_COUNTER(theap_alloc)
-RB_DEBUG_COUNTER(theap_alloc_fail)
-RB_DEBUG_COUNTER(theap_evacuate)
-
/* load (not implemented yet) */
/*
RB_DEBUG_COUNTER(load_files)
diff --git a/enum.c b/enum.c
index 3be6941c3a..9485b7eb60 100644
--- a/enum.c
+++ b/enum.c
@@ -14,7 +14,6 @@
#include "ruby/util.h"
#include "id.h"
#include "symbol.h"
-#include "transient_heap.h"
#include <assert.h>
@@ -1172,10 +1171,9 @@ enum_sort_by(VALUE obj)
rb_ary_concat(ary, buf);
}
if (RARRAY_LEN(ary) > 2) {
- rb_ary_transient_heap_evacuate(ary, TRUE); /* should be malloc heap */
- RARRAY_PTR_USE(ary, ptr,
- ruby_qsort(ptr, RARRAY_LEN(ary)/2, 2*sizeof(VALUE),
- sort_by_cmp, (void *)ary));
+ RARRAY_PTR_USE(ary, ptr,
+ ruby_qsort(ptr, RARRAY_LEN(ary)/2, 2*sizeof(VALUE),
+ sort_by_cmp, (void *)ary));
}
if (RBASIC(ary)->klass) {
rb_raise(rb_eRuntimeError, "sort_by reentered");
diff --git a/gc.c b/gc.c
index 22335586a6..dc8f5808f2 100644
--- a/gc.c
+++ b/gc.c
@@ -35,7 +35,6 @@
#include <sys/types.h>
#include "ruby_assert.h"
#include "debug_counter.h"
-#include "transient_heap.h"
#include "mjit.h"
#undef rb_data_object_wrap
@@ -846,6 +845,8 @@ static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
static VALUE define_final0(VALUE obj, VALUE block);
static void negative_size_allocation_error(const char *);
+static void *aligned_malloc(size_t, size_t);
+static void aligned_free(void *);
static void init_mark_stack(mark_stack_t *stack);
@@ -1189,7 +1190,6 @@ RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *pag
{
MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
objspace->rgengc.old_objects++;
- rb_transient_heap_promote(obj);
#if RGENGC_PROFILE >= 2
objspace->profile.total_promoted_count++;
@@ -1486,7 +1486,7 @@ heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
{
heap_allocated_pages--;
objspace->profile.total_freed_pages++;
- rb_aligned_free(GET_PAGE_BODY(page->start));
+ aligned_free(GET_PAGE_BODY(page->start));
free(page);
}
@@ -1524,7 +1524,7 @@ heap_page_allocate(rb_objspace_t *objspace)
int limit = HEAP_PAGE_OBJ_LIMIT;
/* assign heap_page body (contains heap_page_header and RVALUEs) */
- page_body = (struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
+ page_body = (struct heap_page_body *)aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
if (page_body == 0) {
rb_memerror();
}
@@ -1532,7 +1532,7 @@ heap_page_allocate(rb_objspace_t *objspace)
/* assign heap_page entry */
page = (struct heap_page *)calloc(1, sizeof(struct heap_page));
if (page == 0) {
- rb_aligned_free(page_body);
+ aligned_free(page_body);
rb_memerror();
}
@@ -1954,10 +1954,10 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect
#if GC_DEBUG_STRESS_TO_CLASS
if (UNLIKELY(stress_to_class)) {
- long i, cnt = RARRAY_LEN(stress_to_class);
- for (i = 0; i < cnt; ++i) {
+ long i, cnt = RARRAY_LEN(stress_to_class);
+ for (i = 0; i < cnt; ++i) {
if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
- }
+ }
}
#endif
if (!(during_gc ||
@@ -2215,17 +2215,14 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
- if ((RANY(obj)->as.basic.flags & ROBJECT_EMBED) ||
- RANY(obj)->as.object.as.heap.ivptr == NULL) {
- RB_DEBUG_COUNTER_INC(obj_obj_embed);
- }
- else if (ROBJ_TRANSIENT_P(obj)) {
- RB_DEBUG_COUNTER_INC(obj_obj_transient);
- }
- else {
+ if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
+ RANY(obj)->as.object.as.heap.ivptr) {
xfree(RANY(obj)->as.object.as.heap.ivptr);
- RB_DEBUG_COUNTER_INC(obj_obj_ptr);
- }
+ RB_DEBUG_COUNTER_INC(obj_obj_ptr);
+ }
+ else {
+ RB_DEBUG_COUNTER_INC(obj_obj_embed);
+ }
break;
case T_MODULE:
case T_CLASS:
@@ -2262,7 +2259,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
rb_str_free(obj);
break;
case T_ARRAY:
- rb_ary_free(obj);
+ rb_ary_free(obj);
break;
case T_HASH:
if (RANY(obj)->as.hash.ntbl) {
@@ -4518,7 +4515,6 @@ static void
gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
{
if (LIKELY(objspace->mark_func_data == NULL)) {
- if (RB_TYPE_P(obj, T_NONE)) rb_bug("...");
rgengc_check_relation(objspace, obj);
if (!gc_mark_set(objspace, obj)) return; /* already marked */
gc_aging(objspace, obj);
@@ -4675,24 +4671,16 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
break;
case T_ARRAY:
- if (FL_TEST(obj, ELTS_SHARED)) {
- VALUE root = any->as.array.as.heap.aux.shared;
- gc_mark(objspace, root);
+ if (FL_TEST(obj, ELTS_SHARED)) {
+ gc_mark(objspace, any->as.array.as.heap.aux.shared);
}
else {
long i, len = RARRAY_LEN(obj);
- const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
+ const VALUE *ptr = RARRAY_CONST_PTR(obj);
for (i=0; i < len; i++) {
- gc_mark(objspace, ptr[i]);
+ gc_mark(objspace, *ptr++);
}
-
- if (objspace->mark_func_data == NULL) {
- if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
- RARRAY_TRANSIENT_P(obj)) {
- rb_transient_heap_mark(obj, ptr);
- }
- }
- }
+ }
break;
case T_HASH:
@@ -4720,18 +4708,10 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
case T_OBJECT:
{
- const VALUE * const ptr = ROBJECT_IVPTR(obj);
-
- if (ptr) {
- uint32_t i, len = ROBJECT_NUMIV(obj);
- for (i = 0; i < len; i++) {
- gc_mark(objspace, ptr[i]);
- }
-
- if (objspace->mark_func_data == NULL &&
- ROBJ_TRANSIENT_P(obj)) {
- rb_transient_heap_mark(obj, ptr);
- }
+ uint32_t i, len = ROBJECT_NUMIV(obj);
+ VALUE *ptr = ROBJECT_IVPTR(obj);
+ for (i = 0; i < len; i++) {
+ gc_mark(objspace, *ptr++);
}
}
break;
@@ -5475,13 +5455,6 @@ rb_gc_verify_internal_consistency(void)
gc_verify_internal_consistency(Qnil);
}
-static VALUE
-gc_verify_transient_heap_internal_consistency(VALUE dmy)
-{
- rb_transient_heap_verify();
- return Qnil;
-}
-
/* marks */
static void
@@ -5698,8 +5671,6 @@ gc_marks_finish(rb_objspace_t *objspace)
#endif
}
- rb_transient_heap_finish_marking();
-
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
return TRUE;
@@ -6591,7 +6562,6 @@ gc_start(rb_objspace_t *objspace, int reason)
objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
gc_prof_setup_new_record(objspace, reason);
gc_reset_malloc_info(objspace);
- rb_transient_heap_start_marking(do_full_mark);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
GC_ASSERT(during_gc);
@@ -7842,8 +7812,8 @@ rb_memerror(void)
EC_JUMP_TAG(ec, TAG_RAISE);
}
-void *
-rb_aligned_malloc(size_t alignment, size_t size)
+static void *
+aligned_malloc(size_t alignment, size_t size)
{
void *res;
@@ -7876,8 +7846,8 @@ rb_aligned_malloc(size_t alignment, size_t size)
return res;
}
-void
-rb_aligned_free(void *ptr)
+static void
+aligned_free(void *ptr)
{
#if defined __MINGW32__
__mingw_aligned_free(ptr);
@@ -9581,21 +9551,13 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
#if USE_RGENGC
const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
- if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
- snprintf(buff, buff_size, "%p [%d%s%s%s%s] %s",
- (void *)obj, age,
- C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
- C(RVALUE_MARK_BITMAP(obj), "M"),
- C(RVALUE_MARKING_BITMAP(obj), "R"),
- C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
- obj_type_name(obj));
- }
- else {
- /* fake */
- snprintf(buff, buff_size, "%p [%dXXXX] %s",
- (void *)obj, age,
- obj_type_name(obj));
- }
+ snprintf(buff, buff_size, "%p [%d%s%s%s%s] %s",
+ (void *)obj, age,
+ C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
+ C(RVALUE_MARK_BITMAP(obj), "M"),
+ C(RVALUE_MARKING_BITMAP(obj), "R"),
+ C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
+ obj_type_name(obj));
#else
snprintf(buff, buff_size, "%p [%s] %s",
(void *)obj,
@@ -9625,25 +9587,10 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
UNEXPECTED_NODE(rb_raw_obj_info);
break;
case T_ARRAY:
- if (FL_TEST(obj, ELTS_SHARED)) {
- snprintf(buff, buff_size, "%s shared -> %s", buff,
- rb_obj_info(RARRAY(obj)->as.heap.aux.shared));
- }
- else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
- snprintf(buff, buff_size, "%s [%s%s] len: %d (embed)", buff,
- C(ARY_EMBED_P(obj), "E"),
- C(ARY_SHARED_P(obj), "S"),
- (int)RARRAY_LEN(obj));
- }
- else {
- snprintf(buff, buff_size, "%s [%s%s%s] len: %d, capa:%d ptr:%p", buff,
- C(ARY_EMBED_P(obj), "E"),
- C(ARY_SHARED_P(obj), "S"),
- C(RARRAY_TRANSIENT_P(obj), "T"),
- (int)RARRAY_LEN(obj),
- ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
- RARRAY_CONST_PTR_TRANSIENT(obj));
- }
+ snprintf(buff, buff_size, "%s [%s%s] len: %d", buff,
+ C(ARY_EMBED_P(obj), "E"),
+ C(ARY_SHARED_P(obj), "S"),
+ (int)RARRAY_LEN(obj));
break;
case T_STRING: {
snprintf(buff, buff_size, "%s %s", buff, RSTRING_PTR(obj));
@@ -9656,19 +9603,6 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
}
break;
}
- case T_OBJECT:
- {
- uint32_t len = ROBJECT_NUMIV(obj);
-
- if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
- snprintf(buff, buff_size, "%s (embed) len:%d", buff, len);
- }
- else {
- VALUE *ptr = ROBJECT_IVPTR(obj);
- snprintf(buff, buff_size, "%s len:%d ptr:%p", buff, len, ptr);
- }
- }
- break;
case T_DATA: {
const struct rb_block *block;
const rb_iseq_t *iseq;
@@ -10020,7 +9954,6 @@ Init_GC(void)
/* internal methods */
rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
- rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
#if MALLOC_ALLOCATED_SIZE
rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h
index 1512e78179..a05651ac3b 100644
--- a/include/ruby/ruby.h
+++ b/include/ruby/ruby.h
@@ -1020,15 +1020,12 @@ enum ruby_rarray_flags {
RARRAY_EMBED_LEN_MASK = (RUBY_FL_USER4|RUBY_FL_USER3),
RARRAY_EMBED_LEN_SHIFT = (RUBY_FL_USHIFT+3),
- RARRAY_TRANSIENT_FLAG = RUBY_FL_USER13,
-
RARRAY_ENUM_END
};
#define RARRAY_EMBED_FLAG (VALUE)RARRAY_EMBED_FLAG
#define RARRAY_EMBED_LEN_MASK (VALUE)RARRAY_EMBED_LEN_MASK
#define RARRAY_EMBED_LEN_MAX RARRAY_EMBED_LEN_MAX
#define RARRAY_EMBED_LEN_SHIFT RARRAY_EMBED_LEN_SHIFT
-#define RARRAY_TRANSIENT_FLAG RARRAY_TRANSIENT_FLAG
struct RArray {
struct RBasic basic;
union {
@@ -1049,14 +1046,9 @@ struct RArray {
#define RARRAY_LEN(a) rb_array_len(a)
#define RARRAY_LENINT(ary) rb_long2int(RARRAY_LEN(ary))
#define RARRAY_CONST_PTR(a) rb_array_const_ptr(a)
-#define RARRAY_CONST_PTR_TRANSIENT(a) rb_array_const_ptr_transient(a)
-#define RARRAY_TRANSIENT_P(ary) FL_TEST_RAW((ary), RARRAY_TRANSIENT_FLAG)
-
-VALUE *rb_ary_ptr_use_start(VALUE ary);
-void rb_ary_ptr_use_end(VALUE ary);
-#define RARRAY_PTR_USE_START(a) rb_ary_ptr_use_start(a)
-#define RARRAY_PTR_USE_END(a) rb_ary_ptr_use_end(a)
+#define RARRAY_PTR_USE_START(a) ((VALUE *)RARRAY_CONST_PTR(a))
+#define RARRAY_PTR_USE_END(a) /* */
#define RARRAY_PTR_USE(ary, ptr_name, expr) do { \
const VALUE _ary = (ary); \
@@ -1065,12 +1057,11 @@ void rb_ary_ptr_use_end(VALUE ary);
RARRAY_PTR_USE_END(_ary); \
} while (0)
-#define RARRAY_AREF(a, i) (RARRAY_CONST_PTR_TRANSIENT(a)[i])
+#define RARRAY_AREF(a, i) (RARRAY_CONST_PTR(a)[i])
#define RARRAY_ASET(a, i, v) do { \
const VALUE _ary = (a); \
- const VALUE _v = (v); \
VALUE *ptr = (VALUE *)RARRAY_PTR_USE_START(_ary); \
- RB_OBJ_WRITE(_ary, &ptr[i], _v); \
+ RB_OBJ_WRITE(_ary, &ptr[i], (v)); \
RARRAY_PTR_USE_END(_ary); \
} while (0)
@@ -2119,23 +2110,12 @@ rb_array_len(VALUE a)
#endif
static inline const VALUE *
-rb_array_const_ptr_transient(VALUE a)
+rb_array_const_ptr(VALUE a)
{
return FIX_CONST_VALUE_PTR((RBASIC(a)->flags & RARRAY_EMBED_FLAG) ?
RARRAY(a)->as.ary : RARRAY(a)->as.heap.ptr);
}
-void rb_ary_detransient(VALUE a);
-
-static inline const VALUE *
-rb_array_const_ptr(VALUE a)
-{
- if (RARRAY_TRANSIENT_P(a)) {
- rb_ary_detransient(a);
- }
- return rb_array_const_ptr_transient(a);
-}
-
#if defined(EXTLIB) && defined(USE_DLN_A_OUT)
/* hook for external modules */
static char *dln_libs_to_be_linked[] = { EXTLIB, 0 };
diff --git a/inits.c b/inits.c
index 7eb543104f..c9687de516 100644
--- a/inits.c
+++ b/inits.c
@@ -16,7 +16,6 @@
void
rb_call_inits(void)
{
- CALL(TransientHeap);
CALL(Method);
CALL(RandomSeedCore);
CALL(sym);
diff --git a/insns.def b/insns.def
index 747e6ef8d4..132ce2f179 100644
--- a/insns.def
+++ b/insns.def
@@ -524,7 +524,7 @@ newhashfromarray
{
VM_ASSERT(num * 2 == (rb_num_t)RARRAY_LEN(ary));
hash = rb_hash_new_with_size(num);
- rb_hash_bulk_insert(num * 2, RARRAY_CONST_PTR_TRANSIENT(ary), hash);
+ rb_hash_bulk_insert(num * 2, RARRAY_CONST_PTR(ary), hash);
}
/* put new Range object.(Range.new(low, high, flag)) */
diff --git a/internal.h b/internal.h
index b680c8c79b..697a1196fa 100644
--- a/internal.h
+++ b/internal.h
@@ -1073,26 +1073,6 @@ VALUE rb_gvar_set(struct rb_global_entry *, VALUE);
VALUE rb_gvar_defined(struct rb_global_entry *);
/* array.c */
-
-#ifndef ARRAY_DEBUG
-#define ARRAY_DEBUG 0
-#endif
-
-#ifdef ARRAY_DEBUG
-#define RARRAY_PTR_IN_USE_FLAG FL_USER14
-#define ARY_PTR_USING_P(ary) FL_TEST_RAW((ary), RARRAY_PTR_IN_USE_FLAG)
-
-#else
-
-/* disable debug function */
-#undef RARRAY_PTR_USE_START
-#undef RARRAY_PTR_USE_END
-#define RARRAY_PTR_USE_START(a) ((VALUE *)RARRAY_CONST_PTR_TRANSIENT(a))
-#define RARRAY_PTR_USE_END(a)
-#define ARY_PTR_USING_P(ary) 0
-
-#endif
-
VALUE rb_ary_last(int, const VALUE *, VALUE);
void rb_ary_set_len(VALUE, long);
void rb_ary_delete_same(VALUE, VALUE);
@@ -1120,7 +1100,7 @@ static inline VALUE
rb_ary_entry_internal(VALUE ary, long offset)
{
long len = RARRAY_LEN(ary);
- const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ const VALUE *ptr = RARRAY_CONST_PTR(ary);
if (len == 0) return Qnil;
if (offset < 0) {
offset += len;
@@ -1357,9 +1337,6 @@ RUBY_SYMBOL_EXPORT_END
rb_wb_unprotected_newobj_of(klass, flags))
#define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags)
-void *rb_aligned_malloc(size_t, size_t);
-void rb_aligned_free(void *);
-
/* hash.c */
struct st_table *rb_hash_tbl_raw(VALUE hash);
VALUE rb_hash_new_with_size(st_index_t size);
@@ -1922,9 +1899,6 @@ extern rb_encoding OnigEncodingUTF_8;
#endif
/* variable.c */
-#define ROBJECT_TRANSIENT_FLAG FL_USER13
-#define ROBJ_TRANSIENT_P(obj) FL_TEST_RAW((obj), ROBJECT_TRANSIENT_FLAG)
-
void rb_gc_mark_global_tbl(void);
size_t rb_generic_ivar_memsize(VALUE);
VALUE rb_search_class_path(VALUE);
diff --git a/string.c b/string.c
index 48718f93ac..e654a3023d 100644
--- a/string.c
+++ b/string.c
@@ -2006,9 +2006,9 @@ rb_str_format_m(VALUE str, VALUE arg)
VALUE tmp = rb_check_array_type(arg);
if (!NIL_P(tmp)) {
- const long len = RARRAY_LENINT(tmp);
- VALUE rv = rb_str_format(len, RARRAY_CONST_PTR(tmp), str);
- return rv;
+ VALUE rv = rb_str_format(RARRAY_LENINT(tmp), RARRAY_CONST_PTR(tmp), str);
+ RB_GC_GUARD(tmp);
+ return rv;
}
return rb_str_format(1, &arg, str);
}
diff --git a/test/ruby/test_enum.rb b/test/ruby/test_enum.rb
index c56e280e06..a4eace2d57 100644
--- a/test/ruby/test_enum.rb
+++ b/test/ruby/test_enum.rb
@@ -1115,19 +1115,4 @@ class TestEnumerable < Test::Unit::TestCase
assert_equal([1, 2, 3, 4, 5, 10], (1..100).uniq{|x| (x**2) % 10 }.first(6))
assert_equal([1, [1, 2]], Foo.new.to_enum.uniq)
end
-
- def test_transient_heap_sort_by
- klass = Class.new do
- include Comparable
- attr_reader :i
- def initialize e
- @i = e
- end
- def <=> other
- GC.start
- i <=> other.i
- end
- end
- assert_equal [1, 2, 3, 4, 5], (1..5).sort_by{|e| klass.new e}
- end
end
diff --git a/variable.c b/variable.c
index 367fbcb6ef..1c23173d6c 100644
--- a/variable.c
+++ b/variable.c
@@ -22,7 +22,6 @@
#include "id_table.h"
#include "debug_counter.h"
#include "vm_core.h"
-#include "transient_heap.h"
static struct rb_id_table *rb_global_tbl;
static ID autoload, classpath, tmp_classpath, classid;
@@ -1334,124 +1333,53 @@ generic_ivar_set(VALUE obj, ID id, VALUE val)
RB_OBJ_WRITTEN(obj, Qundef, val);
}
-static VALUE *
-obj_ivar_heap_alloc(VALUE obj, size_t newsize)
-{
- VALUE *newptr = rb_transient_heap_alloc(obj, sizeof(VALUE) * newsize);
-
- if (newptr != NULL) {
- FL_SET_RAW(obj, ROBJECT_TRANSIENT_FLAG);
- }
- else {
- FL_UNSET_RAW(obj, ROBJECT_TRANSIENT_FLAG);
- newptr = ALLOC_N(VALUE, newsize);
- }
- return newptr;
-}
-
-static VALUE *
-obj_ivar_heap_realloc(VALUE obj, int32_t len, size_t newsize)
-{
- VALUE *newptr;
- int i;
-
- if (ROBJ_TRANSIENT_P(obj)) {
- const VALUE *orig_ptr = ROBJECT(obj)->as.heap.ivptr;
- if ((newptr = obj_ivar_heap_alloc(obj, newsize)) != NULL) {
- /* ok */
- }
- else {
- newptr = ALLOC_N(VALUE, newsize);
- FL_UNSET_RAW(obj, ROBJECT_TRANSIENT_FLAG);
- }
- ROBJECT(obj)->as.heap.ivptr = newptr;
- for (i=0; i<(int)len; i++) {
- newptr[i] = orig_ptr[i];
- }
- }
- else {
- REALLOC_N(ROBJECT(obj)->as.heap.ivptr, VALUE, newsize);
- newptr = ROBJECT(obj)->as.heap.ivptr;
- }
-
- return newptr;
-}
-
-void
-rb_obj_transient_heap_evacuate(VALUE obj, int promote)
-{
- if (ROBJ_TRANSIENT_P(obj)) {
- uint32_t len = ROBJECT_NUMIV(obj);
- const VALUE *old_ptr = ROBJECT_IVPTR(obj);
- VALUE *new_ptr;
-
- if (promote) {
- new_ptr = ALLOC_N(VALUE, len);
- FL_UNSET_RAW(obj, ROBJECT_TRANSIENT_FLAG);
- }
- else {
- new_ptr = obj_ivar_heap_alloc(obj, len);
- }
- MEMCPY(new_ptr, old_ptr, VALUE, len);
- ROBJECT(obj)->as.heap.ivptr = new_ptr;
- }
-}
-
-static VALUE
-obj_ivar_set(VALUE obj, ID id, VALUE val)
+VALUE
+rb_ivar_set(VALUE obj, ID id, VALUE val)
{
struct ivar_update ivup;
uint32_t i, len;
- ivup.iv_extended = 0;
- ivup.u.iv_index_tbl = iv_index_tbl_make(obj);
- iv_index_tbl_extend(&ivup, id);
- len = ROBJECT_NUMIV(obj);
- if (len <= ivup.index) {
- VALUE *ptr = ROBJECT_IVPTR(obj);
- if (ivup.index < ROBJECT_EMBED_LEN_MAX) {
- RBASIC(obj)->flags |= ROBJECT_EMBED;
- ptr = ROBJECT(obj)->as.ary;
- for (i = 0; i < ROBJECT_EMBED_LEN_MAX; i++) {
- ptr[i] = Qundef;
- }
- }
- else {
- VALUE *newptr;
- uint32_t newsize = iv_index_tbl_newsize(&ivup);
-
- if (RBASIC(obj)->flags & ROBJECT_EMBED) {
- newptr = obj_ivar_heap_alloc(obj, newsize);
- // newptr = ALLOC_N(VALUE, newsize);
- MEMCPY(newptr, ptr, VALUE, len);
- RBASIC(obj)->flags &= ~ROBJECT_EMBED;
- ROBJECT(obj)->as.heap.ivptr = newptr;
- }
- else {
- newptr = obj_ivar_heap_realloc(obj, len, newsize);
- }
- for (; len < newsize; len++) {
- newptr[len] = Qundef;
- }
- ROBJECT(obj)->as.heap.numiv = newsize;
- ROBJECT(obj)->as.heap.iv_index_tbl = ivup.u.iv_index_tbl;
- }
- }
- RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[ivup.index], val);
-
- return val;
-}
-
-VALUE
-rb_ivar_set(VALUE obj, ID id, VALUE val)
-{
RB_DEBUG_COUNTER_INC(ivar_set_base);
rb_check_frozen(obj);
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
- return obj_ivar_set(obj, id, val);
+ ivup.iv_extended = 0;
+ ivup.u.iv_index_tbl = iv_index_tbl_make(obj);
+ iv_index_tbl_extend(&ivup, id);
+ len = ROBJECT_NUMIV(obj);
+ if (len <= ivup.index) {
+ VALUE *ptr = ROBJECT_IVPTR(obj);
+ if (ivup.index < ROBJECT_EMBED_LEN_MAX) {
+ RBASIC(obj)->flags |= ROBJECT_EMBED;
+ ptr = ROBJECT(obj)->as.ary;
+ for (i = 0; i < ROBJECT_EMBED_LEN_MAX; i++) {
+ ptr[i] = Qundef;
+ }
+ }
+ else {
+ VALUE *newptr;
+ uint32_t newsize = iv_index_tbl_newsize(&ivup);
+
+ if (RBASIC(obj)->flags & ROBJECT_EMBED) {
+ newptr = ALLOC_N(VALUE, newsize);
+ MEMCPY(newptr, ptr, VALUE, len);
+ RBASIC(obj)->flags &= ~ROBJECT_EMBED;
+ ROBJECT(obj)->as.heap.ivptr = newptr;
+ }
+ else {
+ REALLOC_N(ROBJECT(obj)->as.heap.ivptr, VALUE, newsize);
+ newptr = ROBJECT(obj)->as.heap.ivptr;
+ }
+ for (; len < newsize; len++)
+ newptr[len] = Qundef;
+ ROBJECT(obj)->as.heap.numiv = newsize;
+ ROBJECT(obj)->as.heap.iv_index_tbl = ivup.u.iv_index_tbl;
+ }
+ }
+ RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[ivup.index], val);
+ break;
case T_CLASS:
case T_MODULE:
if (!RCLASS_IV_TBL(obj)) RCLASS_IV_TBL(obj) = st_init_numtable();
diff --git a/vm_args.c b/vm_args.c
index 5e505f8136..4e989574ea 100644
--- a/vm_args.c
+++ b/vm_args.c
@@ -164,7 +164,7 @@ args_copy(struct args_info *args)
static inline const VALUE *
args_rest_argv(struct args_info *args)
{
- return RARRAY_CONST_PTR_TRANSIENT(args->rest) + args->rest_index;
+ return RARRAY_CONST_PTR(args->rest) + args->rest_index;
}
static inline VALUE
@@ -314,7 +314,7 @@ args_setup_post_parameters(struct args_info *args, int argc, VALUE *locals)
{
long len;
len = RARRAY_LEN(args->rest);
- MEMCPY(locals, RARRAY_CONST_PTR_TRANSIENT(args->rest) + len - argc, VALUE, argc);
+ MEMCPY(locals, RARRAY_CONST_PTR(args->rest) + len - argc, VALUE, argc);
rb_ary_resize(args->rest, len - argc);
}
@@ -335,7 +335,7 @@ args_setup_opt_parameters(struct args_info *args, int opt_max, VALUE *locals)
if (args->rest) {
int len = RARRAY_LENINT(args->rest);
- const VALUE *argv = RARRAY_CONST_PTR_TRANSIENT(args->rest);
+ const VALUE *argv = RARRAY_CONST_PTR(args->rest);
for (; i<opt_max && args->rest_index < len; i++, args->rest_index++) {
locals[i] = argv[args->rest_index];
@@ -785,7 +785,7 @@ vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calli
cfp->sp--;
if (!NIL_P(ary)) {
- const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ const VALUE *ptr = RARRAY_CONST_PTR(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
diff --git a/vm_eval.c b/vm_eval.c
index a864b75712..1176c494e1 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -764,7 +764,7 @@ rb_apply(VALUE recv, ID mid, VALUE args)
return ret;
}
argv = ALLOCA_N(VALUE, argc);
- MEMCPY(argv, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, argc);
+ MEMCPY(argv, RARRAY_CONST_PTR(args), VALUE, argc);
return rb_call(recv, mid, argc, argv, CALL_FCALL);
}
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index a7fec0b71f..e162e9d32f 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -1265,7 +1265,7 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
len = 1;
}
else {
- ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
+ ptr = RARRAY_CONST_PTR(ary);
len = (rb_num_t)RARRAY_LEN(ary);
}