path: root/st.c
diff options
authormame <mame@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2019-01-15 14:19:19 (GMT)
committermame <mame@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2019-01-15 14:19:19 (GMT)
commitab2547d786572f4c14e0d849f5f64f006425c5ba (patch)
treeb9edc897559417730205ed04dafe9ce07e9e21d2 /st.c
parent10d85b19da5a9c94c5e7af16c53679981aee963b (diff)
st.c (rb_hash_bulk_insert_into_st_table): avoid out-of-bounds write
"hash_bulk_insert" first expands the table, but the target size was wrong: it was calculated by "num_entries + (size to buld insert)", but it was wrong when "num_entries < entries_bound", i.e., it has a deleted entry. "hash_bulk_insert" adds the given entries from entries_bound, which led to out-of-bounds write access. [Bug #15536] As a simple fix, this commit changes the calculation to "entries_bound + size". I'm afraid if this might be inefficient, but I think it is safe anyway. git-svn-id: svn+ssh:// b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'st.c')
1 files changed, 1 insertions, 1 deletions
diff --git a/st.c b/st.c
index c6b3644..ed235c6 100644
--- a/st.c
+++ b/st.c
@@ -2299,7 +2299,7 @@ rb_hash_bulk_insert_into_st_table(long argc, const VALUE *argv, VALUE hash)
st_table *tab = RHASH_ST_TABLE(hash);
tab = RHASH_TBL_RAW(hash);
- n = tab->num_entries + size;
+ n = tab->entries_bound + size;
st_expand_table(tab, n);
if (UNLIKELY(tab->num_entries))
st_insert_generic(tab, argc, argv, hash);