diff options
| author | Alan Wu <XrXr@users.noreply.github.com> | 2025-10-23 15:22:23 -0400 |
|---|---|---|
| committer | Alan Wu <XrXr@users.noreply.github.com> | 2025-10-23 17:59:57 -0400 |
| commit | 8de628dc8055e1d812fdf326e4a6f74ce11a283d (patch) | |
| tree | 6f07bb35766d19dbc11883197460d91aa4cc22f8 | |
| parent | c2bce540f93aba77ddf89c7931a63b4e7108e466 (diff) | |
ZJIT: s/as_usize/to_usize/ to comply with rust API guidelines
When the name is `as_*`, the guideline expects the return type to be a
reference type. Also, it's good to have contrast in the naming from
the more dangerous `as usize` cast `IntoUsize` is meant to be preferred
over.
See: https://rust-lang.github.io/api-guidelines/naming.html
| -rw-r--r-- | zjit/src/cast.rs | 10 | ||||
| -rw-r--r-- | zjit/src/codegen.rs | 20 | ||||
| -rw-r--r-- | zjit/src/hir.rs | 8 |
3 files changed, 19 insertions, 19 deletions
diff --git a/zjit/src/cast.rs b/zjit/src/cast.rs index c6d11ef4af..52e2078cde 100644 --- a/zjit/src/cast.rs +++ b/zjit/src/cast.rs @@ -16,19 +16,19 @@ /// the method `into()` also causes a name conflict. pub(crate) trait IntoUsize { /// Convert to usize. Implementation conditional on width of [usize]. - fn as_usize(self) -> usize; + fn to_usize(self) -> usize; } #[cfg(target_pointer_width = "64")] impl IntoUsize for u64 { - fn as_usize(self) -> usize { + fn to_usize(self) -> usize { self as usize } } #[cfg(target_pointer_width = "64")] impl IntoUsize for u32 { - fn as_usize(self) -> usize { + fn to_usize(self) -> usize { self as usize } } @@ -36,7 +36,7 @@ impl IntoUsize for u32 { impl IntoUsize for u16 { /// Alias for `.into()`. For convenience so you could use the trait for /// all unsgined types. - fn as_usize(self) -> usize { + fn to_usize(self) -> usize { self.into() } } @@ -44,7 +44,7 @@ impl IntoUsize for u16 { impl IntoUsize for u8 { /// Alias for `.into()`. For convenience so you could use the trait for /// all unsgined types. - fn as_usize(self) -> usize { + fn to_usize(self) -> usize { self.into() } } diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs index 81608e5ae2..16b5e94d34 100644 --- a/zjit/src/codegen.rs +++ b/zjit/src/codegen.rs @@ -736,7 +736,7 @@ fn gen_ccall_with_frame( }); asm_comment!(asm, "switch to new SP register"); - let sp_offset = (caller_stack_size + VM_ENV_DATA_SIZE.as_usize()) * SIZEOF_VALUE; + let sp_offset = (caller_stack_size + VM_ENV_DATA_SIZE.to_usize()) * SIZEOF_VALUE; let new_sp = asm.add(SP, sp_offset.into()); asm.mov(SP, new_sp); @@ -792,7 +792,7 @@ fn gen_ccall_variadic( }); asm_comment!(asm, "switch to new SP register"); - let sp_offset = (state.stack().len() - args.len() + VM_ENV_DATA_SIZE.as_usize()) * SIZEOF_VALUE; + let sp_offset = (state.stack().len() - args.len() + VM_ENV_DATA_SIZE.to_usize()) * SIZEOF_VALUE; let new_sp = asm.add(SP, sp_offset.into()); asm.mov(SP, new_sp); @@ -975,7 +975,7 @@ fn gen_load_ivar_embedded(asm: &mut Assembler, self_val: Opnd, id: ID, index: u1 // See ROBJECT_FIELDS() from include/ruby/internal/core/robject.h asm_comment!(asm, "Load embedded ivar id={} index={}", id.contents_lossy(), index); - let offs = ROBJECT_OFFSET_AS_ARY as i32 + (SIZEOF_VALUE * index.as_usize()) as i32; + let offs = ROBJECT_OFFSET_AS_ARY as i32 + (SIZEOF_VALUE * index.to_usize()) as i32; let self_val = asm.load(self_val); let ivar_opnd = Opnd::mem(64, self_val, offs); asm.load(ivar_opnd) @@ -990,7 +990,7 @@ fn gen_load_ivar_extended(asm: &mut Assembler, self_val: Opnd, id: ID, index: u1 let tbl_opnd = asm.load(Opnd::mem(64, self_val, ROBJECT_OFFSET_AS_HEAP_FIELDS as i32)); // Read the ivar from the extended table - let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * index.as_usize()) as i32); + let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * index.to_usize()) as i32); asm.load(ivar_opnd) } @@ -1174,8 +1174,8 @@ fn gen_send_without_block_direct( ) -> lir::Opnd { gen_incr_counter(asm, Counter::iseq_optimized_send_count); - let local_size = unsafe { get_iseq_body_local_table_size(iseq) }.as_usize(); - let stack_growth = state.stack_size() + local_size + unsafe { get_iseq_body_stack_max(iseq) }.as_usize(); + let local_size = unsafe { get_iseq_body_local_table_size(iseq) }.to_usize(); + let stack_growth = state.stack_size() + local_size + unsafe { get_iseq_body_stack_max(iseq) }.to_usize(); gen_stack_overflow_check(jit, asm, state, stack_growth); // Save cfp->pc and cfp->sp for the caller frame @@ -1211,7 +1211,7 @@ fn gen_send_without_block_direct( }); asm_comment!(asm, "switch to new SP register"); - let sp_offset = (state.stack().len() + local_size - args.len() + VM_ENV_DATA_SIZE.as_usize()) * SIZEOF_VALUE; + let sp_offset = (state.stack().len() + local_size - args.len() + VM_ENV_DATA_SIZE.to_usize()) * SIZEOF_VALUE; let new_sp = asm.add(SP, sp_offset.into()); asm.mov(SP, new_sp); @@ -1889,7 +1889,7 @@ fn param_opnd(idx: usize) -> Opnd { /// Inverse of ep_offset_to_local_idx(). See ep_offset_to_local_idx() for details. pub fn local_idx_to_ep_offset(iseq: IseqPtr, local_idx: usize) -> i32 { let local_size = unsafe { get_iseq_body_local_table_size(iseq) }; - local_size_and_idx_to_ep_offset(local_size.as_usize(), local_idx) + local_size_and_idx_to_ep_offset(local_size.to_usize(), local_idx) } /// Convert the number of locals and a local index to an offset from the EP @@ -2005,8 +2005,8 @@ c_callable! { rb_set_cfp_sp(cfp, sp); // Fill nils to uninitialized (non-argument) locals - let local_size = get_iseq_body_local_table_size(iseq).as_usize(); - let num_params = get_iseq_body_param_size(iseq).as_usize(); + let local_size = get_iseq_body_local_table_size(iseq).to_usize(); + let num_params = get_iseq_body_param_size(iseq).to_usize(); let base = sp.offset(-local_size_and_idx_to_bp_offset(local_size, num_params) as isize); slice::from_raw_parts_mut(base, local_size - num_params).fill(Qnil); } diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs index 7ee2308eb5..dbb9177ee3 100644 --- a/zjit/src/hir.rs +++ b/zjit/src/hir.rs @@ -1873,7 +1873,7 @@ impl Function { /// Set self.param_types. They are copied to the param types of jit_entry_blocks. fn set_param_types(&mut self) { let iseq = self.iseq; - let param_size = unsafe { get_iseq_body_param_size(iseq) }.as_usize(); + let param_size = unsafe { get_iseq_body_param_size(iseq) }.to_usize(); let rest_param_idx = iseq_rest_param_idx(iseq); self.param_types.push(types::BasicObject); // self @@ -3885,7 +3885,7 @@ pub enum ParseError { /// Return the number of locals in the current ISEQ (includes parameters) fn num_locals(iseq: *const rb_iseq_t) -> usize { - (unsafe { get_iseq_body_local_table_size(iseq) }).as_usize() + (unsafe { get_iseq_body_local_table_size(iseq) }).to_usize() } /// If we can't handle the type of send (yet), bail out. @@ -4896,7 +4896,7 @@ fn compile_entry_block(fun: &mut Function, jit_entry_insns: &[u32]) { /// Compile initial locals for an entry_block for the interpreter fn compile_entry_state(fun: &mut Function, entry_block: BlockId) -> (InsnId, FrameState) { let iseq = fun.iseq; - let param_size = unsafe { get_iseq_body_param_size(iseq) }.as_usize(); + let param_size = unsafe { get_iseq_body_param_size(iseq) }.to_usize(); let rest_param_idx = iseq_rest_param_idx(iseq); let self_param = fun.push_insn(entry_block, Insn::LoadSelf); @@ -4929,7 +4929,7 @@ fn compile_jit_entry_block(fun: &mut Function, jit_entry_idx: usize, target_bloc /// Compile params and initial locals for a jit_entry_block fn compile_jit_entry_state(fun: &mut Function, jit_entry_block: BlockId) -> (InsnId, FrameState) { let iseq = fun.iseq; - let param_size = unsafe { get_iseq_body_param_size(iseq) }.as_usize(); + let param_size = unsafe { get_iseq_body_param_size(iseq) }.to_usize(); let self_param = fun.push_insn(jit_entry_block, Insn::Param); let mut entry_state = FrameState::new(iseq); |
