diff options
author | nobu <nobu@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2012-11-09 07:08:38 +0000 |
---|---|---|
committer | nobu <nobu@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2012-11-09 07:08:38 +0000 |
commit | 06de286c68f04b989cd7fd6e282065b3e8479d7e (patch) | |
tree | d625c51011d9d5dd184b530f0f4507e8705d7a3d | |
parent | 50c859715de864630d8c9259d6e6facc7c158bb8 (diff) |
array.c: steal shared array's container when ARY_SHARED_NUM == 1
* array.c (rb_ary_modify): steal shared array's container when
ARY_SHARED_NUM == 1. [Feature #6638]
- Do not allocate new memory in rb_ary_modify when ARY_SHARED_NUM == 1
and length almost same.
- Store ARY_CAPA instead of RARRAY_LEN in ary_make_shared, to make
it useful.
- Fix rb_ary_sort_bang accordantly.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37581 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
-rw-r--r-- | ChangeLog | 38 | ||||
-rw-r--r-- | array.c | 23 |
2 files changed, 55 insertions, 6 deletions
@@ -1,3 +1,41 @@ +Fri Nov 9 16:08:35 2012 Sokolov Yura funny-falcon <funny.falcon@gmail.com> + + * array.c (rb_ary_modify): steal shared array's container when + ARY_SHARED_NUM == 1. [Feature #6638] + - Do not allocate new memory in rb_ary_modify when ARY_SHARED_NUM == 1 + and length almost same. + - Store ARY_CAPA instead of RARRAY_LEN in ary_make_shared, to make + it useful. + - Fix rb_ary_sort_bang accordantly. + + * array.c: speedup Array#unshift by using space in shared array. + [Feature #6638] + - when array owns its shared array (ARY_SHARED_NUM == 1), and there + is enough space then try unshift values directly into shared + array. + - when resulting array is big (~>64 items) then make it shared with + enough room for future #unshifts, and then insert into shared + array. + + * array.c (rb_ary_splice): use shared array in rb_ary_slice. + [Feature #6638] + - use ary_ensure_room_for_push when rb_ary_slice used to add at the + end of array, cause rb_ary_concat use rb_ary_slice. + + * array.c (ary_ensure_room_for_push): make array really suitable for + queue. [Feature #6638] + when array is shared (which happens after Array#shift), and + ARY_SHARED_NUM == 1 (which is very often when array used as queue), + then make rb_ary_push push directly into shared array. + + * array.c (rb_ary_modify): steal shared array's container when + ARY_SHARED_NUM == 1. [Feature #6638] + - Do not allocate new memory in rb_ary_modify when ARY_SHARED_NUM == 1 + and length almost same. + - Store ARY_CAPA instead of RARRAY_LEN in ary_make_shared, to make + it useful. + - Fix rb_ary_sort_bang accordantly. + Fri Nov 9 16:00:00 2012 Zachary Scott <zzak@zacharyscott.net> * ext/bigdecimal/bigdecimal.c: @@ -255,15 +255,24 @@ rb_ary_modify(VALUE ary) rb_ary_modify_check(ary); if (ARY_SHARED_P(ary)) { long len = RARRAY_LEN(ary); + VALUE shared = ARY_SHARED(ary); if (len <= RARRAY_EMBED_LEN_MAX) { VALUE *ptr = ARY_HEAP_PTR(ary); - VALUE shared = ARY_SHARED(ary); FL_UNSET_SHARED(ary); FL_SET_EMBED(ary); MEMCPY(ARY_EMBED_PTR(ary), ptr, VALUE, len); rb_ary_decrement_share(shared); ARY_SET_EMBED_LEN(ary, len); } + else if (ARY_SHARED_NUM(shared) == 1 && len > (RARRAY_LEN(shared)>>1)) { + long shift = RARRAY_PTR(ary) - RARRAY_PTR(shared); + ARY_SET_PTR(ary, RARRAY_PTR(shared)); + ARY_SET_CAPA(ary, RARRAY_LEN(shared)); + MEMMOVE(RARRAY_PTR(ary), RARRAY_PTR(ary)+shift, VALUE, len); + FL_UNSET_SHARED(ary); + FL_SET_EMBED(shared); + rb_ary_decrement_share(shared); + } else { VALUE *ptr = ALLOC_N(VALUE, len); MEMCPY(ptr, RARRAY_PTR(ary), VALUE, len); @@ -454,8 +463,9 @@ ary_make_shared(VALUE ary) NEWOBJ_OF(shared, struct RArray, 0, T_ARRAY); FL_UNSET_EMBED(shared); - ARY_SET_LEN((VALUE)shared, RARRAY_LEN(ary)); + ARY_SET_LEN((VALUE)shared, ARY_CAPA(ary)); ARY_SET_PTR((VALUE)shared, RARRAY_PTR(ary)); + rb_mem_clear(RARRAY_PTR(shared) + RARRAY_LEN(ary), ARY_CAPA(ary) - RARRAY_LEN(ary)); FL_SET_SHARED_ROOT(shared); ARY_SET_SHARED_NUM((VALUE)shared, 1); FL_SET_SHARED(ary); @@ -2188,12 +2198,13 @@ rb_ary_sort_bang(VALUE ary) if (RARRAY_LEN(ary) > 1) { VALUE tmp = ary_make_substitution(ary); /* only ary refers tmp */ struct ary_sort_data data; + long len = RARRAY_LEN(ary); RBASIC(tmp)->klass = 0; data.ary = tmp; data.opt_methods = 0; data.opt_inited = 0; - ruby_qsort(RARRAY_PTR(tmp), RARRAY_LEN(tmp), sizeof(VALUE), + ruby_qsort(RARRAY_PTR(tmp), len, sizeof(VALUE), rb_block_given_p()?sort_1:sort_2, &data); if (ARY_EMBED_P(tmp)) { @@ -2210,7 +2221,7 @@ rb_ary_sort_bang(VALUE ary) if (ARY_HEAP_PTR(ary) == ARY_HEAP_PTR(tmp)) { assert(!ARY_EMBED_P(ary)); FL_UNSET_SHARED(ary); - ARY_SET_CAPA(ary, ARY_CAPA(tmp)); + ARY_SET_CAPA(ary, RARRAY_LEN(tmp)); } else { assert(!ARY_SHARED_P(tmp)); @@ -2225,8 +2236,8 @@ rb_ary_sort_bang(VALUE ary) xfree(ARY_HEAP_PTR(ary)); } ARY_SET_PTR(ary, RARRAY_PTR(tmp)); - ARY_SET_HEAP_LEN(ary, RARRAY_LEN(tmp)); - ARY_SET_CAPA(ary, ARY_CAPA(tmp)); + ARY_SET_HEAP_LEN(ary, len); + ARY_SET_CAPA(ary, RARRAY_LEN(tmp)); } /* tmp was lost ownership for the ptr */ FL_UNSET(tmp, FL_FREEZE); |