summaryrefslogtreecommitdiff
path: root/yjit/src/asm/arm64/arg
diff options
context:
space:
mode:
authorKevin Newton <kddnewton@gmail.com>2022-08-26 19:21:45 -0400
committerTakashi Kokubun <takashikkbn@gmail.com>2022-08-29 09:09:41 -0700
commitd694f320e40e77ab432f4d21575251ac0ab4ab76 (patch)
treea7cae862ea2bd33a3bc71e77d4b966772b71af5b /yjit/src/asm/arm64/arg
parent46007b88af82d6ff22fc01edb7c74922dfa5c68a (diff)
Fixed width immediates (https://github.com/Shopify/ruby/pull/437)
There are a lot of times when encoding AArch64 instructions that we need to represent an integer value with a custom fixed width. For example, the offset for a B instruction is 26 bits, so we store an i32 on the instruction struct and then mask it when we encode. We've been doing this masking everywhere, which has worked, but it's getting a bit copy-pasty all over the place. This commit centralizes that logic to make sure we stay consistent.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/6289
Diffstat (limited to 'yjit/src/asm/arm64/arg')
-rw-r--r--yjit/src/asm/arm64/arg/mod.rs2
-rw-r--r--yjit/src/asm/arm64/arg/truncate.rs66
2 files changed, 68 insertions, 0 deletions
diff --git a/yjit/src/asm/arm64/arg/mod.rs b/yjit/src/asm/arm64/arg/mod.rs
index 30f3cc3dfe..9bf4a8ea13 100644
--- a/yjit/src/asm/arm64/arg/mod.rs
+++ b/yjit/src/asm/arm64/arg/mod.rs
@@ -6,9 +6,11 @@ mod condition;
mod sf;
mod shifted_imm;
mod sys_reg;
+mod truncate;
pub use bitmask_imm::BitmaskImmediate;
pub use condition::Condition;
pub use sf::Sf;
pub use shifted_imm::ShiftedImmediate;
pub use sys_reg::SystemRegister;
+pub use truncate::{truncate_imm, truncate_uimm};
diff --git a/yjit/src/asm/arm64/arg/truncate.rs b/yjit/src/asm/arm64/arg/truncate.rs
new file mode 100644
index 0000000000..52f2c012cb
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/truncate.rs
@@ -0,0 +1,66 @@
+// There are many instances in AArch64 instruction encoding where you represent
+// an integer value with a particular bit width that isn't a power of 2. These
+// functions represent truncating those integer values down to the appropriate
+// number of bits.
+
+/// Truncate a signed immediate to fit into a compile-time known width. It is
+/// assumed before calling this function that the value fits into the correct
+/// size. If it doesn't, then this function will panic.
+///
+/// When the value is positive, this should effectively be a no-op since we're
+/// just dropping leading zeroes. When the value is negative we should only be
+/// dropping leading ones.
+pub fn truncate_imm<T: Into<i32>, const WIDTH: usize>(imm: T) -> u32 {
+ let value: i32 = imm.into();
+ let masked = (value as u32) & ((1 << WIDTH) - 1);
+
+ // Assert that we didn't drop any bits by truncating.
+ if value >= 0 {
+ assert_eq!(value as u32, masked);
+ } else {
+ assert_eq!(value as u32, masked | (u32::MAX << WIDTH));
+ }
+
+ masked
+}
+
+/// Truncate an unsigned immediate to fit into a compile-time known width. It is
+/// assumed before calling this function that the value fits into the correct
+/// size. If it doesn't, then this function will panic.
+///
+/// This should effectively be a no-op since we're just dropping leading zeroes.
+pub fn truncate_uimm<T: Into<u32>, const WIDTH: usize>(uimm: T) -> u32 {
+ let value: u32 = uimm.into();
+ let masked = (value & ((1 << WIDTH) - 1));
+
+ // Assert that we didn't drop any bits by truncating.
+ assert_eq!(value, masked);
+
+ masked
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_truncate_imm_positive() {
+ let inst = truncate_imm::<i32, 4>(5);
+ let result: u32 = inst.into();
+ assert_eq!(0b0101, result);
+ }
+
+ #[test]
+ fn test_truncate_imm_negative() {
+ let inst = truncate_imm::<i32, 4>(-5);
+ let result: u32 = inst.into();
+ assert_eq!(0b1011, result);
+ }
+
+ #[test]
+ fn test_truncate_uimm() {
+ let inst = truncate_uimm::<u32, 4>(5);
+ let result: u32 = inst.into();
+ assert_eq!(0b0101, result);
+ }
+}