summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authornagachika <nagachika@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2013-06-22 15:32:54 +0000
committernagachika <nagachika@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2013-06-22 15:32:54 +0000
commit4219cf6878fcbab7b0ed635926d68c21a3944c4f (patch)
tree69ddc02a2489cc0f0b80cefc6509320f58b747f1 /gc.c
parent2d3482ac7451df8c90cbf8dc076cae17ecfa136c (diff)
merge revision(s) 41325: [Backport #8554]
* gc.c: Fixup around GC by MALLOC. Add allocate size to malloc_increase before GC for updating limit in after_gc_sweep. Reset malloc_increase into garbage_collect() for preventing GC again soon. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_0_0@41577 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/gc.c b/gc.c
index faf1ea7a5a..410722d66b 100644
--- a/gc.c
+++ b/gc.c
@@ -209,6 +209,7 @@ typedef struct rb_objspace {
struct {
size_t limit;
size_t increase;
+ size_t increase2;
#if CALC_EXACT_MALLOC_SIZE
size_t allocated_size;
size_t allocations;
@@ -271,6 +272,7 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#endif
#define malloc_limit objspace->malloc_params.limit
#define malloc_increase objspace->malloc_params.increase
+#define malloc_increase2 objspace->malloc_params.increase2
#define heaps objspace->heap.ptr
#define heaps_length objspace->heap.length
#define heaps_used objspace->heap.used
@@ -1976,6 +1978,8 @@ before_gc_sweep(rb_objspace_t *objspace)
objspace->heap.free_num = 0;
objspace->heap.free_slots = NULL;
+ malloc_increase2 += ATOMIC_SIZE_EXCHANGE(malloc_increase,0);
+
/* sweep unlinked method entries */
if (GET_VM()->unlinked_method_entry_list) {
rb_sweep_method_entry(GET_VM());
@@ -1994,6 +1998,9 @@ after_gc_sweep(rb_objspace_t *objspace)
}
inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
+ inc += malloc_increase2;
+ malloc_increase2 = 0;
+
if (inc > malloc_limit) {
malloc_limit +=
(size_t)((inc - malloc_limit) * (double)objspace->heap.marked_num / (heaps_used * HEAP_OBJ_LIMIT));
@@ -3479,8 +3486,9 @@ vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
size += sizeof(size_t);
#endif
+ ATOMIC_SIZE_ADD(malloc_increase, size);
if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
- (malloc_increase+size) > malloc_limit) {
+ malloc_increase > malloc_limit) {
garbage_collect_with_gvl(objspace);
}
@@ -3490,8 +3498,6 @@ vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
static inline void *
vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
{
- ATOMIC_SIZE_ADD(malloc_increase, size);
-
#if CALC_EXACT_MALLOC_SIZE
ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size);
ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
@@ -4077,7 +4083,7 @@ gc_prof_set_malloc_info(rb_objspace_t *objspace)
if (objspace->profile.run) {
gc_profile_record *record = &objspace->profile.record[objspace->profile.count];
if (record) {
- record->allocate_increase = malloc_increase;
+ record->allocate_increase = malloc_increase + malloc_increase2;
record->allocate_limit = malloc_limit;
}
}