summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJimmy Miller <jimmy.miller@shopify.com>2022-09-30 10:14:55 -0500
committerGitHub <noreply@github.com>2022-09-30 11:14:55 -0400
commit31461c7e0eab4963ccc8649ea8ebf27979132c0c (patch)
tree69a0378ba20e47c085928ede356431a65d8e60c3
parentad651925e365ca18645f05b5e9b2eca9cd5721bc (diff)
A bunch of clippy auto fixes for yjit (#6476)
Notes
Notes: Merged-By: maximecb <maximecb@ruby-lang.org>
-rw-r--r--yjit/src/asm/arm64/arg/shifted_imm.rs2
-rw-r--r--yjit/src/asm/arm64/arg/truncate.rs8
-rw-r--r--yjit/src/asm/arm64/inst/branch_cond.rs3
-rw-r--r--yjit/src/asm/arm64/inst/halfword_imm.rs2
-rw-r--r--yjit/src/asm/arm64/inst/nop.rs2
-rw-r--r--yjit/src/asm/arm64/inst/pc_rel.rs2
-rw-r--r--yjit/src/asm/arm64/mod.rs8
-rw-r--r--yjit/src/asm/arm64/opnd.rs2
-rw-r--r--yjit/src/asm/mod.rs10
-rw-r--r--yjit/src/backend/ir.rs6
-rw-r--r--yjit/src/backend/tests.rs4
-rw-r--r--yjit/src/codegen.rs42
-rw-r--r--yjit/src/cruby.rs2
-rw-r--r--yjit/src/disasm.rs6
-rw-r--r--yjit/src/invariants.rs2
-rw-r--r--yjit/src/stats.rs4
16 files changed, 53 insertions, 52 deletions
diff --git a/yjit/src/asm/arm64/arg/shifted_imm.rs b/yjit/src/asm/arm64/arg/shifted_imm.rs
index 5d1eeaf26d..0dd7af25b5 100644
--- a/yjit/src/asm/arm64/arg/shifted_imm.rs
+++ b/yjit/src/asm/arm64/arg/shifted_imm.rs
@@ -18,7 +18,7 @@ impl TryFrom<u64> for ShiftedImmediate {
/// Attempt to convert a u64 into a BitmaskImm.
fn try_from(value: u64) -> Result<Self, Self::Error> {
- let mut current = value;
+ let current = value;
if current < 2_u64.pow(12) {
return Ok(ShiftedImmediate { shift: Shift::LSL0, value: current as u16 });
}
diff --git a/yjit/src/asm/arm64/arg/truncate.rs b/yjit/src/asm/arm64/arg/truncate.rs
index 52f2c012cb..0de562f808 100644
--- a/yjit/src/asm/arm64/arg/truncate.rs
+++ b/yjit/src/asm/arm64/arg/truncate.rs
@@ -31,7 +31,7 @@ pub fn truncate_imm<T: Into<i32>, const WIDTH: usize>(imm: T) -> u32 {
/// This should effectively be a no-op since we're just dropping leading zeroes.
pub fn truncate_uimm<T: Into<u32>, const WIDTH: usize>(uimm: T) -> u32 {
let value: u32 = uimm.into();
- let masked = (value & ((1 << WIDTH) - 1));
+ let masked = value & ((1 << WIDTH) - 1);
// Assert that we didn't drop any bits by truncating.
assert_eq!(value, masked);
@@ -46,21 +46,21 @@ mod tests {
#[test]
fn test_truncate_imm_positive() {
let inst = truncate_imm::<i32, 4>(5);
- let result: u32 = inst.into();
+ let result: u32 = inst;
assert_eq!(0b0101, result);
}
#[test]
fn test_truncate_imm_negative() {
let inst = truncate_imm::<i32, 4>(-5);
- let result: u32 = inst.into();
+ let result: u32 = inst;
assert_eq!(0b1011, result);
}
#[test]
fn test_truncate_uimm() {
let inst = truncate_uimm::<u32, 4>(5);
- let result: u32 = inst.into();
+ let result: u32 = inst;
assert_eq!(0b0101, result);
}
}
diff --git a/yjit/src/asm/arm64/inst/branch_cond.rs b/yjit/src/asm/arm64/inst/branch_cond.rs
index 4338cf0f4f..fcc07f69aa 100644
--- a/yjit/src/asm/arm64/inst/branch_cond.rs
+++ b/yjit/src/asm/arm64/inst/branch_cond.rs
@@ -1,4 +1,4 @@
-use super::super::arg::{Condition, InstructionOffset, truncate_imm};
+use super::super::arg::{InstructionOffset, truncate_imm};
/// The struct that represents an A64 conditional branch instruction that can be
/// encoded.
@@ -50,6 +50,7 @@ impl From<BranchCond> for [u8; 4] {
#[cfg(test)]
mod tests {
use super::*;
+ use super::super::super::arg::Condition;
#[test]
fn test_b_eq() {
diff --git a/yjit/src/asm/arm64/inst/halfword_imm.rs b/yjit/src/asm/arm64/inst/halfword_imm.rs
index c31d1f8945..0ddae8e8de 100644
--- a/yjit/src/asm/arm64/inst/halfword_imm.rs
+++ b/yjit/src/asm/arm64/inst/halfword_imm.rs
@@ -95,7 +95,7 @@ const FAMILY: u32 = 0b111100;
impl From<HalfwordImm> for u32 {
/// Convert an instruction into a 32-bit value.
fn from(inst: HalfwordImm) -> Self {
- let (mut opc, imm) = match inst.index {
+ let (opc, imm) = match inst.index {
Index::None => {
assert_eq!(inst.imm & 1, 0, "immediate offset must be even");
let imm12 = truncate_imm::<_, 12>(inst.imm / 2);
diff --git a/yjit/src/asm/arm64/inst/nop.rs b/yjit/src/asm/arm64/inst/nop.rs
index a99f8d34b7..d58b3574a9 100644
--- a/yjit/src/asm/arm64/inst/nop.rs
+++ b/yjit/src/asm/arm64/inst/nop.rs
@@ -18,7 +18,7 @@ impl Nop {
impl From<Nop> for u32 {
/// Convert an instruction into a 32-bit value.
- fn from(inst: Nop) -> Self {
+ fn from(_inst: Nop) -> Self {
0b11010101000000110010000000011111
}
}
diff --git a/yjit/src/asm/arm64/inst/pc_rel.rs b/yjit/src/asm/arm64/inst/pc_rel.rs
index fa330cb9d6..bd1a2b9367 100644
--- a/yjit/src/asm/arm64/inst/pc_rel.rs
+++ b/yjit/src/asm/arm64/inst/pc_rel.rs
@@ -53,7 +53,7 @@ impl From<PCRelative> for u32 {
// Toggle the sign bit if necessary.
if inst.imm < 0 {
- immhi |= (1 << 18);
+ immhi |= 1 << 18;
}
0
diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs
index 88431ce30a..9d85705ff8 100644
--- a/yjit/src/asm/arm64/mod.rs
+++ b/yjit/src/asm/arm64/mod.rs
@@ -15,7 +15,7 @@ pub use opnd::*;
/// Checks that a signed value fits within the specified number of bits.
pub const fn imm_fits_bits(imm: i64, num_bits: u8) -> bool {
- let minimum = if num_bits == 64 { i64::MIN } else { -2_i64.pow((num_bits as u32) - 1) };
+ let minimum = if num_bits == 64 { i64::MIN } else { -(2_i64.pow((num_bits as u32) - 1)) };
let maximum = if num_bits == 64 { i64::MAX } else { 2_i64.pow((num_bits as u32) - 1) - 1 };
imm >= minimum && imm <= maximum
@@ -1025,8 +1025,8 @@ mod tests {
assert!(imm_fits_bits(i32::MAX.into(), 32));
assert!(imm_fits_bits(i32::MIN.into(), 32));
- assert!(imm_fits_bits(i64::MAX.into(), 64));
- assert!(imm_fits_bits(i64::MIN.into(), 64));
+ assert!(imm_fits_bits(i64::MAX, 64));
+ assert!(imm_fits_bits(i64::MIN, 64));
}
#[test]
@@ -1034,7 +1034,7 @@ mod tests {
assert!(uimm_fits_bits(u8::MAX.into(), 8));
assert!(uimm_fits_bits(u16::MAX.into(), 16));
assert!(uimm_fits_bits(u32::MAX.into(), 32));
- assert!(uimm_fits_bits(u64::MAX.into(), 64));
+ assert!(uimm_fits_bits(u64::MAX, 64));
}
#[test]
diff --git a/yjit/src/asm/arm64/opnd.rs b/yjit/src/asm/arm64/opnd.rs
index 0dc614ab4e..108824e08d 100644
--- a/yjit/src/asm/arm64/opnd.rs
+++ b/yjit/src/asm/arm64/opnd.rs
@@ -1,4 +1,4 @@
-use crate::asm::{imm_num_bits, uimm_num_bits};
+
/// This operand represents a register.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
index f5501a4bc7..2bc83ec059 100644
--- a/yjit/src/asm/mod.rs
+++ b/yjit/src/asm/mod.rs
@@ -220,7 +220,7 @@ impl CodeBlock {
/// Allocate a new label with a given name
pub fn new_label(&mut self, name: String) -> usize {
- assert!(!name.contains(" "), "use underscores in label names, not spaces");
+ assert!(!name.contains(' '), "use underscores in label names, not spaces");
// This label doesn't have an address yet
self.label_addrs.push(0);
@@ -378,8 +378,8 @@ mod tests
assert_eq!(imm_num_bits(i32::MIN.into()), 32);
assert_eq!(imm_num_bits(i32::MAX.into()), 32);
- assert_eq!(imm_num_bits(i64::MIN.into()), 64);
- assert_eq!(imm_num_bits(i64::MAX.into()), 64);
+ assert_eq!(imm_num_bits(i64::MIN), 64);
+ assert_eq!(imm_num_bits(i64::MAX), 64);
}
#[test]
@@ -393,7 +393,7 @@ mod tests
assert_eq!(uimm_num_bits(((u16::MAX as u32) + 1).into()), 32);
assert_eq!(uimm_num_bits(u32::MAX.into()), 32);
- assert_eq!(uimm_num_bits(((u32::MAX as u64) + 1).into()), 64);
- assert_eq!(uimm_num_bits(u64::MAX.into()), 64);
+ assert_eq!(uimm_num_bits(((u32::MAX as u64) + 1)), 64);
+ assert_eq!(uimm_num_bits(u64::MAX), 64);
}
}
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index 609ca8eaf4..dfdc1deb0d 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -912,7 +912,7 @@ impl Assembler
/// Create a new label instance that we can jump to
pub fn new_label(&mut self, name: &str) -> Target
{
- assert!(!name.contains(" "), "use underscores in label names, not spaces");
+ assert!(!name.contains(' '), "use underscores in label names, not spaces");
let label_idx = self.label_names.len();
self.label_names.push(name.to_string());
@@ -1232,10 +1232,10 @@ impl AssemblerLookbackIterator {
impl fmt::Debug for Assembler {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- write!(fmt, "Assembler\n")?;
+ writeln!(fmt, "Assembler")?;
for (idx, insn) in self.insns.iter().enumerate() {
- write!(fmt, " {idx:03} {insn:?}\n")?;
+ writeln!(fmt, " {idx:03} {insn:?}")?;
}
Ok(())
diff --git a/yjit/src/backend/tests.rs b/yjit/src/backend/tests.rs
index 08e8849b4d..16f9375ba4 100644
--- a/yjit/src/backend/tests.rs
+++ b/yjit/src/backend/tests.rs
@@ -29,11 +29,11 @@ fn guard_object_is_heap(
asm.comment("guard object is heap");
// Test that the object is not an immediate
- asm.test(object_opnd.clone(), Opnd::UImm(RUBY_IMMEDIATE_MASK as u64));
+ asm.test(object_opnd, Opnd::UImm(RUBY_IMMEDIATE_MASK as u64));
asm.jnz(Target::CodePtr(side_exit));
// Test that the object is not false or nil
- asm.cmp(object_opnd.clone(), Opnd::UImm(Qnil.into()));
+ asm.cmp(object_opnd, Opnd::UImm(Qnil.into()));
asm.jbe(Target::CodePtr(side_exit));
}
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 4018a314fc..ceb834d4c7 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -13,7 +13,7 @@ use crate::utils::*;
use CodegenStatus::*;
use InsnOpnd::*;
-use std::cell::RefMut;
+
use std::cmp;
use std::collections::HashMap;
use std::ffi::CStr;
@@ -269,7 +269,7 @@ fn jit_save_pc(jit: &JITState, asm: &mut Assembler) {
/// This realigns the interpreter SP with the JIT SP
/// Note: this will change the current value of REG_SP,
/// which could invalidate memory operands
-fn gen_save_sp(jit: &JITState, asm: &mut Assembler, ctx: &mut Context) {
+fn gen_save_sp(_jit: &JITState, asm: &mut Assembler, ctx: &mut Context) {
if ctx.get_sp_offset() != 0 {
asm.comment("save SP to CFP");
let stack_pointer = ctx.sp_opnd(0);
@@ -515,7 +515,7 @@ pub fn jit_ensure_block_entry_exit(jit: &mut JITState, ocb: &mut OutlinedCb) {
// Generate the exit with the cache in jitstate.
block.entry_exit = Some(get_side_exit(jit, ocb, &block_ctx));
} else {
- let pc = unsafe { rb_iseq_pc_at_idx(blockid.iseq, blockid.idx) };
+ let _pc = unsafe { rb_iseq_pc_at_idx(blockid.iseq, blockid.idx) };
block.entry_exit = Some(gen_outlined_exit(jit.pc, &block_ctx, ocb));
}
}
@@ -913,7 +913,7 @@ fn gen_pop(
}
fn gen_dup(
- jit: &mut JITState,
+ _jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
@@ -971,7 +971,7 @@ fn gen_swap(
}
fn stack_swap(
- jit: &mut JITState,
+ _jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
offset0: u16,
@@ -1002,7 +1002,7 @@ fn gen_putnil(
KeepCompiling
}
-fn jit_putobject(jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, arg: VALUE) {
+fn jit_putobject(_jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, arg: VALUE) {
let val_type: Type = Type::from(arg);
let stack_top = ctx.stack_push(val_type);
asm.mov(stack_top, arg.into());
@@ -1927,7 +1927,7 @@ fn gen_set_ivar(
jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
- recv: VALUE,
+ _recv: VALUE,
ivar_name: ID,
) -> CodegenStatus {
// Save the PC and SP because the callee may allocate
@@ -1943,7 +1943,7 @@ fn gen_set_ivar(
rb_vm_set_ivar_id as *const u8,
vec![
recv_opnd,
- Opnd::UImm(ivar_name.into()),
+ Opnd::UImm(ivar_name),
val_opnd,
],
);
@@ -2740,7 +2740,7 @@ fn gen_opt_aset(
// Get the operands from the stack
let recv = ctx.stack_opnd(2);
let key = ctx.stack_opnd(1);
- let val = ctx.stack_opnd(0);
+ let _val = ctx.stack_opnd(0);
if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_key.fixnum_p() {
let side_exit = get_side_exit(jit, ocb, ctx);
@@ -3229,7 +3229,7 @@ fn gen_branchif(
let target = if result { jump_block } else { next_block };
gen_direct_jump(jit, ctx, target, asm);
} else {
- asm.test(val_opnd.into(), Opnd::Imm(!Qnil.as_i64()));
+ asm.test(val_opnd, Opnd::Imm(!Qnil.as_i64()));
// Generate the branch instructions
gen_branch(
@@ -3301,7 +3301,7 @@ fn gen_branchunless(
// RUBY_Qfalse /* ...0000 0000 */
// RUBY_Qnil /* ...0000 1000 */
let not_qnil = !Qnil.as_i64();
- asm.test(val_opnd.into(), not_qnil.into());
+ asm.test(val_opnd, not_qnil.into());
// Generate the branch instructions
gen_branch(
@@ -3563,7 +3563,7 @@ fn jit_guard_known_klass(
// Generate ancestry guard for protected callee.
// Calls to protected callees only go through when self.is_a?(klass_that_defines_the_callee).
fn jit_protected_callee_ancestry_guard(
- jit: &mut JITState,
+ _jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cme: *const rb_callable_method_entry_t,
@@ -3889,7 +3889,7 @@ fn jit_obj_respond_to(
ctx.get_opnd_type(StackOpnd(0)).known_truthy()
};
- let mut target_cme = unsafe { rb_callable_method_entry_or_negative(recv_class, mid) };
+ let target_cme = unsafe { rb_callable_method_entry_or_negative(recv_class, mid) };
// Should never be null, as in that case we will be returned a "negative CME"
assert!(!target_cme.is_null());
@@ -3935,7 +3935,7 @@ fn jit_obj_respond_to(
}
let sym_opnd = ctx.stack_pop(1);
- let recv_opnd = ctx.stack_pop(1);
+ let _recv_opnd = ctx.stack_pop(1);
// This is necessary because we have no guarantee that sym_opnd is a constant
asm.comment("guard known mid");
@@ -4036,8 +4036,8 @@ struct ControlFrame {
// * Stack overflow is not checked (should be done by the caller)
// * Interrupts are not checked (should be done by the caller)
fn gen_push_frame(
- jit: &mut JITState,
- ctx: &mut Context,
+ _jit: &mut JITState,
+ _ctx: &mut Context,
asm: &mut Assembler,
set_pc_cfp: bool, // if true CFP and SP will be switched to the callee
frame: ControlFrame,
@@ -4101,7 +4101,7 @@ fn gen_push_frame(
// For an iseq call PC may be None, in which case we will not set PC and will allow jitted code
// to set it as necessary.
- let pc = if let Some(pc) = frame.pc {
+ let _pc = if let Some(pc) = frame.pc {
asm.mov(cfp_opnd(RUBY_OFFSET_CFP_PC), pc.into());
};
asm.mov(cfp_opnd(RUBY_OFFSET_CFP_BP), sp);
@@ -4146,7 +4146,7 @@ fn gen_send_cfunc(
) -> CodegenStatus {
let cfunc = unsafe { get_cme_def_body_cfunc(cme) };
let cfunc_argc = unsafe { get_mct_argc(cfunc) };
- let mut argc = argc;
+ let argc = argc;
// Create a side-exit to fall back to the interpreter
let side_exit = get_side_exit(jit, ocb, ctx);
@@ -4417,7 +4417,7 @@ fn push_splat_args(required_args: i32, ctx: &mut Context, asm: &mut Assembler, o
let ary_opnd = asm.csel_nz(ary_opnd, heap_ptr_opnd);
- for i in (0..required_args as i32) {
+ for i in 0..required_args as i32 {
let top = ctx.stack_push(Type::Unknown);
asm.mov(top, Opnd::mem(64, ary_opnd, i * (SIZEOF_VALUE as i32)));
}
@@ -4662,7 +4662,7 @@ fn gen_send_iseq(
};
if let (None, Some(builtin_info)) = (block, leaf_builtin) {
let builtin_argc = unsafe { (*builtin_info).argc };
- if builtin_argc + 1 /* for self */ + 1 /* for ec */ <= (C_ARG_OPNDS.len() as i32) {
+ if builtin_argc + 1 < (C_ARG_OPNDS.len() as i32) {
asm.comment("inlined leaf builtin");
// Call the builtin func (ec, recv, arg1, arg2, ...)
@@ -5481,7 +5481,7 @@ fn gen_leave(
// Create a side-exit to fall back to the interpreter
let side_exit = get_side_exit(jit, ocb, ctx);
- let mut ocb_asm = Assembler::new();
+ let ocb_asm = Assembler::new();
// Check for interrupts
gen_check_ints(asm, counted_exit!(ocb, side_exit, leave_se_interrupt));
diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs
index 65f398f075..4307937707 100644
--- a/yjit/src/cruby.rs
+++ b/yjit/src/cruby.rs
@@ -84,7 +84,7 @@
use std::convert::From;
use std::ffi::CString;
-use std::os::raw::{c_char, c_int, c_long, c_uint};
+use std::os::raw::{c_char, c_int, c_uint};
use std::panic::{catch_unwind, UnwindSafe};
// We check that we can do this with the configure script and a couple of
diff --git a/yjit/src/disasm.rs b/yjit/src/disasm.rs
index 3d1c5b33fd..c236d9055d 100644
--- a/yjit/src/disasm.rs
+++ b/yjit/src/disasm.rs
@@ -2,7 +2,7 @@ use crate::core::*;
use crate::cruby::*;
use crate::yjit::yjit_enabled_p;
use crate::asm::CodeBlock;
-use crate::codegen::CodePtr;
+
use std::fmt::Write;
/// Primitive called in yjit.rb
@@ -152,10 +152,10 @@ pub fn disasm_addr_range(cb: &CodeBlock, start_addr: *const u8, code_size: usize
// Comments for this block
if let Some(comment_list) = cb.comments_at(insn.address() as usize) {
for comment in comment_list {
- write!(&mut out, " \x1b[1m# {}\x1b[0m\n", comment).unwrap();
+ writeln!(&mut out, " \x1b[1m# {}\x1b[0m", comment).unwrap();
}
}
- write!(&mut out, " {}\n", insn).unwrap();
+ writeln!(&mut out, " {}", insn).unwrap();
}
return out;
diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs
index ee79b2938a..c7c0701e74 100644
--- a/yjit/src/invariants.rs
+++ b/yjit/src/invariants.rs
@@ -165,7 +165,7 @@ pub fn assume_method_basic_definition(
mid: ID
) -> bool {
if unsafe { rb_method_basic_definition_p(klass, mid) } != 0 {
- let mut cme = unsafe { rb_callable_method_entry(klass, mid) };
+ let cme = unsafe { rb_callable_method_entry(klass, mid) };
assume_method_lookup_stable(jit, ocb, klass, cme);
true
} else {
diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs
index 5fc83c3896..7669308f9b 100644
--- a/yjit/src/stats.rs
+++ b/yjit/src/stats.rs
@@ -428,7 +428,7 @@ pub extern "C" fn rb_yjit_record_exit_stack(exit_pc: *const VALUE)
const BUFF_LEN: usize = 2048;
// Create 2 array buffers to be used to collect frames and lines.
- let mut frames_buffer = [VALUE(0 as usize); BUFF_LEN];
+ let mut frames_buffer = [VALUE(0_usize); BUFF_LEN];
let mut lines_buffer = [0; BUFF_LEN];
// Records call frame and line information for each method entry into two
@@ -511,7 +511,7 @@ pub extern "C" fn rb_yjit_record_exit_stack(exit_pc: *const VALUE)
// Push number of times seen onto the stack, which is 1
// because it's the first time we've seen it.
- yjit_raw_samples.push(VALUE(1 as usize));
+ yjit_raw_samples.push(VALUE(1_usize));
yjit_line_samples.push(1);
}
}