summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2025-09-25 16:24:58 -0400
committerAlan Wu <XrXr@users.noreply.github.com>2025-09-25 18:36:58 -0400
commit00e6c10168596d4810f56430f18f778b66e30769 (patch)
tree7a9338c74b8743c415d44ace0b2e037cbfd06389
parent1a52c42e61878a1fe1d411a74108607766183b10 (diff)
ZJIT: Standardize to `Iterator::map` in `Invariants::update_references`
The old code was doing a manual HashSet/HashMap rebuild, and there isn't a clear performance advantage over `Iterator::map`. So let's use `map` since it looks clearer and it's easier to see that everything was indeed updated. This also adds assertions the old code did not have by way of as_iseq() and as_cme().
-rw-r--r--zjit/src/invariants.rs43
1 files changed, 14 insertions, 29 deletions
diff --git a/zjit/src/invariants.rs b/zjit/src/invariants.rs
index 80948c696e..2e67c33a6d 100644
--- a/zjit/src/invariants.rs
+++ b/zjit/src/invariants.rs
@@ -2,7 +2,7 @@
use std::{collections::{HashMap, HashSet}, mem};
-use crate::{backend::lir::{asm_comment, Assembler}, cruby::{iseq_name, rb_callable_method_entry_t, rb_gc_location, ruby_basic_operators, src_loc, with_vm_lock, IseqPtr, RedefinitionFlag, ID, VALUE}, gc::IseqPayload, hir::Invariant, options::debug, state::{zjit_enabled_p, ZJITState}, virtualmem::CodePtr};
+use crate::{backend::lir::{asm_comment, Assembler}, cruby::{iseq_name, rb_callable_method_entry_t, rb_gc_location, ruby_basic_operators, src_loc, with_vm_lock, IseqPtr, RedefinitionFlag, ID}, gc::IseqPayload, hir::Invariant, options::debug, state::{zjit_enabled_p, ZJITState}, virtualmem::CodePtr};
use crate::stats::with_time_stat;
use crate::stats::Counter::invalidation_time_ns;
use crate::gc::remove_gc_offsets;
@@ -70,38 +70,23 @@ impl Invariants {
/// Update ISEQ references in Invariants::ep_escape_iseqs
fn update_ep_escape_iseqs(&mut self) {
- let mut moved: Vec<IseqPtr> = Vec::with_capacity(self.ep_escape_iseqs.len());
-
- self.ep_escape_iseqs.retain(|&old_iseq| {
- let new_iseq = unsafe { rb_gc_location(VALUE(old_iseq as usize)) }.0 as IseqPtr;
- if old_iseq != new_iseq {
- moved.push(new_iseq);
- }
- old_iseq == new_iseq
- });
-
- for new_iseq in moved {
- self.ep_escape_iseqs.insert(new_iseq);
- }
+ let updated = std::mem::take(&mut self.ep_escape_iseqs)
+ .into_iter()
+ .map(|iseq| unsafe { rb_gc_location(iseq.into()) }.as_iseq())
+ .collect();
+ self.ep_escape_iseqs = updated;
}
/// Update ISEQ references in Invariants::no_ep_escape_iseq_patch_points
fn update_no_ep_escape_iseq_patch_points(&mut self) {
- let mut moved: Vec<(IseqPtr, HashSet<PatchPoint>)> = Vec::with_capacity(self.no_ep_escape_iseq_patch_points.len());
- let iseqs: Vec<IseqPtr> = self.no_ep_escape_iseq_patch_points.keys().cloned().collect();
-
- for old_iseq in iseqs {
- let new_iseq = unsafe { rb_gc_location(VALUE(old_iseq as usize)) }.0 as IseqPtr;
- if old_iseq != new_iseq {
- let patch_points = self.no_ep_escape_iseq_patch_points.remove(&old_iseq).unwrap();
- // Do not insert patch points to no_ep_escape_iseq_patch_points yet to avoid corrupting keys that had a different ISEQ
- moved.push((new_iseq, patch_points));
- }
- }
-
- for (new_iseq, patch_points) in moved {
- self.no_ep_escape_iseq_patch_points.insert(new_iseq, patch_points);
- }
+ let updated = std::mem::take(&mut self.no_ep_escape_iseq_patch_points)
+ .into_iter()
+ .map(|(iseq, patch_points)| {
+ let new_iseq = unsafe { rb_gc_location(iseq.into()) };
+ (new_iseq.as_iseq(), patch_points)
+ })
+ .collect();
+ self.no_ep_escape_iseq_patch_points = updated;
}
}