summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxime Chevalier-Boisvert <maxime.chevalierboisvert@shopify.com>2022-06-21 11:05:20 -0400
committerTakashi Kokubun <takashikkbn@gmail.com>2022-08-29 08:46:58 -0700
commit2eba6aef724f20162bd650d535be876aa4a19964 (patch)
tree421b04f546d2373cae2ea541a522a7ecd6438483
parent4254174ca76d8e64db29fbcbcfc99a81e2d50211 (diff)
Port over get_branch_target()
-rw-r--r--yjit/src/backend/arm64/mod.rs16
-rw-r--r--yjit/src/backend/tests.rs7
-rw-r--r--yjit/src/core.rs24
3 files changed, 32 insertions, 15 deletions
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index be67e2384d..4e4c553c9d 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -34,7 +34,7 @@ impl From<Opnd> for A64Opnd {
impl Assembler
{
/// Get the list of registers from which we can allocate on this platform
- pub fn get_scratch_regs() -> Vec<Reg>
+ pub fn get_alloc_regs() -> Vec<Reg>
{
vec![
X12_REG,
@@ -45,6 +45,11 @@ impl Assembler
/// Split platform-specific instructions
fn arm64_split(mut self) -> Assembler
{
+ // The transformations done here are meant to make our lives simpler in later
+ // stages of the compilation pipeline.
+ // Here we may want to make sure that all instructions (except load and store)
+ // have no memory operands.
+
todo!();
}
@@ -52,6 +57,15 @@ impl Assembler
/// Returns a list of GC offsets
pub fn arm64_emit(&mut self, cb: &mut CodeBlock) -> Vec<u32>
{
+ // NOTE: dear Kevin,
+ // for arm, you may want to reserve 1 or 2 caller-save registers
+ // to use as scracth registers (during the last phase of the codegen)
+ // These registers will not be allocated to anything by the register
+ // allocator, they're just useful because arm is slightly trickier
+ // than x86 to generate code for.
+ // For example, if you want to jump far away, you may want to store
+ // the jump target address in a register first.
+
todo!();
}
diff --git a/yjit/src/backend/tests.rs b/yjit/src/backend/tests.rs
index a8ae1bc97a..3a0f14e1f4 100644
--- a/yjit/src/backend/tests.rs
+++ b/yjit/src/backend/tests.rs
@@ -192,12 +192,15 @@ fn test_c_call()
let (mut asm, mut cb) = setup_asm();
- asm.ccall(
+ let ret_val = asm.ccall(
dummy_c_fun as *const u8,
vec![Opnd::mem(64, SP, 0), Opnd::UImm(1)]
);
- asm.compile_with_num_regs(&mut cb, 2);
+ // Make sure that the call's return value is usable
+ asm.mov(Opnd::mem(64, SP, 0), ret_val);
+
+ asm.compile_with_num_regs(&mut cb, 1);
}
#[test]
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index 10ef9c5151..a2659b55fd 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -1768,29 +1768,29 @@ fn get_branch_target(
// This means the branch stub owns its own reference to the branch
let branch_ptr: *const RefCell<Branch> = BranchRef::into_raw(branchref.clone());
+ let mut asm = Assembler::new();
-
-
-
- todo!("stub codegen with new assembler");
-
- /*
// Call branch_stub_hit(branch_idx, target_idx, ec)
- mov(ocb, C_ARG_REGS[2], REG_EC);
- mov(ocb, C_ARG_REGS[1], uimm_opnd(target_idx as u64));
- mov(ocb, C_ARG_REGS[0], const_ptr_opnd(branch_ptr as *const u8));
- call_ptr(ocb, REG0, branch_stub_hit as *mut u8);
+ let jump_addr = asm.ccall(
+ branch_stub_hit as *mut u8,
+ vec![
+ EC,
+ Opnd::UImm(target_idx as u64),
+ Opnd::const_ptr(branch_ptr as *const u8)
+ ]
+ );
// Jump to the address returned by the
// branch_stub_hit call
- jmp_rm(ocb, RAX);
+ asm.jmp_opnd(jump_addr);
+
+ asm.compile(ocb);
if ocb.has_dropped_bytes() {
None // No space
} else {
Some(stub_addr)
}
- */
}
pub fn gen_branch(