summaryrefslogtreecommitdiff
path: root/yjit/src/asm
diff options
context:
space:
mode:
authorAlan Wu <alanwu@ruby-lang.org>2022-04-19 14:40:21 -0400
committerAlan Wu <XrXr@users.noreply.github.com>2022-04-27 11:00:22 -0400
commitf90549cd38518231a6a74432fe1168c943a7cc18 (patch)
treec277bbfab47e230bd549bd5f607f60c3e812a714 /yjit/src/asm
parentf553180a86b71830a1de49dd04874b3880c5c698 (diff)
Rust YJIT
In December 2021, we opened an [issue] to solicit feedback regarding the porting of the YJIT codebase from C99 to Rust. There were some reservations, but this project was given the go ahead by Ruby core developers and Matz. Since then, we have successfully completed the port of YJIT to Rust. The new Rust version of YJIT has reached parity with the C version, in that it passes all the CRuby tests, is able to run all of the YJIT benchmarks, and performs similarly to the C version (because it works the same way and largely generates the same machine code). We've even incorporated some design improvements, such as a more fine-grained constant invalidation mechanism which we expect will make a big difference in Ruby on Rails applications. Because we want to be careful, YJIT is guarded behind a configure option: ```shell ./configure --enable-yjit # Build YJIT in release mode ./configure --enable-yjit=dev # Build YJIT in dev/debug mode ``` By default, YJIT does not get compiled and cargo/rustc is not required. If YJIT is built in dev mode, then `cargo` is used to fetch development dependencies, but when building in release, `cargo` is not required, only `rustc`. At the moment YJIT requires Rust 1.60.0 or newer. The YJIT command-line options remain mostly unchanged, and more details about the build process are documented in `doc/yjit/yjit.md`. The CI tests have been updated and do not take any more resources than before. The development history of the Rust port is available at the following commit for interested parties: https://github.com/Shopify/ruby/commit/1fd9573d8b4b65219f1c2407f30a0a60e537f8be Our hope is that Rust YJIT will be compiled and included as a part of system packages and compiled binaries of the Ruby 3.2 release. We do not anticipate any major problems as Rust is well supported on every platform which YJIT supports, but to make sure that this process works smoothly, we would like to reach out to those who take care of building systems packages before the 3.2 release is shipped and resolve any issues that may come up. [issue]: https://bugs.ruby-lang.org/issues/18481 Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com> Co-authored-by: Noah Gibbs <the.codefolio.guy@gmail.com> Co-authored-by: Kevin Newton <kddnewton@gmail.com>
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/5826
Diffstat (limited to 'yjit/src/asm')
-rw-r--r--yjit/src/asm/mod.rs392
-rw-r--r--yjit/src/asm/x86_64/mod.rs1395
-rw-r--r--yjit/src/asm/x86_64/tests.rs447
3 files changed, 2234 insertions, 0 deletions
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
new file mode 100644
index 0000000000..0d61cd654a
--- /dev/null
+++ b/yjit/src/asm/mod.rs
@@ -0,0 +1,392 @@
+use std::collections::BTreeMap;
+use std::mem;
+
+// Lots of manual vertical alignment in there that rustfmt doesn't handle well.
+#[rustfmt::skip]
+pub mod x86_64;
+
+/// Pointer to a piece of machine code
+/// We may later change this to wrap an u32
+/// Note: there is no NULL constant for CodePtr. You should use Option<CodePtr> instead.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+#[repr(C)]
+pub struct CodePtr(*const u8);
+
+impl CodePtr {
+ pub fn raw_ptr(&self) -> *const u8 {
+ let CodePtr(ptr) = *self;
+ return ptr;
+ }
+
+ fn into_i64(&self) -> i64 {
+ let CodePtr(ptr) = self;
+ *ptr as i64
+ }
+
+ fn into_usize(&self) -> usize {
+ let CodePtr(ptr) = self;
+ *ptr as usize
+ }
+}
+
+impl From<*mut u8> for CodePtr {
+ fn from(value: *mut u8) -> Self {
+ assert!(value as usize != 0);
+ return CodePtr(value);
+ }
+}
+
+/// Compute an offset in bytes of a given struct field
+macro_rules! offset_of {
+ ($struct_type:ty, $field_name:tt) => {{
+ // Null pointer to our struct type
+ let foo = (0 as *const $struct_type);
+
+ unsafe {
+ let ptr_field = (&(*foo).$field_name as *const _ as usize);
+ let ptr_base = (foo as usize);
+ ptr_field - ptr_base
+ }
+ }};
+}
+pub(crate) use offset_of;
+
+//
+// TODO: need a field_size_of macro, to compute the size of a struct field in bytes
+//
+
+// 1 is not aligned so this won't match any pages
+const ALIGNED_WRITE_POSITION_NONE: usize = 1;
+
+/// Reference to an ASM label
+struct LabelRef {
+ // Position in the code block where the label reference exists
+ pos: usize,
+
+ // Label which this refers to
+ label_idx: usize,
+}
+
+/// Block of memory into which instructions can be assembled
+pub struct CodeBlock {
+ // Block of non-executable memory used for dummy code blocks
+ // This memory is owned by this block and lives as long as the block
+ dummy_block: Vec<u8>,
+
+ // Pointer to memory we are writing into
+ mem_block: *mut u8,
+
+ // Memory block size
+ mem_size: usize,
+
+ // Current writing position
+ write_pos: usize,
+
+ // Table of registered label addresses
+ label_addrs: Vec<usize>,
+
+ // Table of registered label names
+ label_names: Vec<String>,
+
+ // References to labels
+ label_refs: Vec<LabelRef>,
+
+ // Comments for assembly instructions, if that feature is enabled
+ asm_comments: BTreeMap<usize, Vec<String>>,
+
+ // Keep track of the current aligned write position.
+ // Used for changing protection when writing to the JIT buffer
+ current_aligned_write_pos: usize,
+
+ // Memory protection works at page granularity and this is the
+ // the size of each page. Used to implement W^X.
+ page_size: usize,
+
+ // Set if the CodeBlock is unable to output some instructions,
+ // for example, when there is not enough space or when a jump
+ // target is too far away.
+ dropped_bytes: bool,
+}
+
+impl CodeBlock {
+ pub fn new_dummy(mem_size: usize) -> Self {
+ // Allocate some non-executable memory
+ let mut dummy_block = vec![0; mem_size];
+ let mem_ptr = dummy_block.as_mut_ptr();
+
+ Self {
+ dummy_block: dummy_block,
+ mem_block: mem_ptr,
+ mem_size: mem_size,
+ write_pos: 0,
+ label_addrs: Vec::new(),
+ label_names: Vec::new(),
+ label_refs: Vec::new(),
+ asm_comments: BTreeMap::new(),
+ current_aligned_write_pos: ALIGNED_WRITE_POSITION_NONE,
+ page_size: 4096,
+ dropped_bytes: false,
+ }
+ }
+
+ pub fn new(mem_block: *mut u8, mem_size: usize, page_size: usize) -> Self {
+ Self {
+ dummy_block: vec![0; 0],
+ mem_block: mem_block,
+ mem_size: mem_size,
+ write_pos: 0,
+ label_addrs: Vec::new(),
+ label_names: Vec::new(),
+ label_refs: Vec::new(),
+ asm_comments: BTreeMap::new(),
+ current_aligned_write_pos: ALIGNED_WRITE_POSITION_NONE,
+ page_size,
+ dropped_bytes: false,
+ }
+ }
+
+ // Check if this code block has sufficient remaining capacity
+ pub fn has_capacity(&self, num_bytes: usize) -> bool {
+ self.write_pos + num_bytes < self.mem_size
+ }
+
+ /// Add an assembly comment if the feature is on.
+ /// If not, this becomes an inline no-op.
+ #[inline]
+ pub fn add_comment(&mut self, comment: &str) {
+ if cfg!(feature = "asm_comments") {
+ let cur_ptr = self.get_write_ptr().into_usize();
+ let this_line_comments = self.asm_comments.get(&cur_ptr);
+
+ // If there's no current list of comments for this line number, add one.
+ if this_line_comments.is_none() {
+ let new_comments = Vec::new();
+ self.asm_comments.insert(cur_ptr, new_comments);
+ }
+ let this_line_comments = self.asm_comments.get_mut(&cur_ptr).unwrap();
+
+ // Unless this comment is the same as the last one at this same line, add it.
+ let string_comment = String::from(comment);
+ if this_line_comments.last() != Some(&string_comment) {
+ this_line_comments.push(string_comment);
+ }
+ }
+ }
+
+ pub fn comments_at(&self, pos: usize) -> Option<&Vec<String>> {
+ self.asm_comments.get(&pos)
+ }
+
+ pub fn get_mem_size(&self) -> usize {
+ self.mem_size
+ }
+
+ pub fn get_write_pos(&self) -> usize {
+ self.write_pos
+ }
+
+ // Set the current write position
+ pub fn set_pos(&mut self, pos: usize) {
+ // Assert here since while CodeBlock functions do bounds checking, there is
+ // nothing stopping users from taking out an out-of-bounds pointer and
+ // doing bad accesses with it.
+ assert!(pos < self.mem_size);
+ self.write_pos = pos;
+ }
+
+ // Align the current write pointer to a multiple of bytes
+ pub fn align_pos(&mut self, multiple: u32) {
+ // Compute the alignment boundary that is lower or equal
+ // Do everything with usize
+ let multiple: usize = multiple.try_into().unwrap();
+ let pos = self.get_write_ptr().raw_ptr() as usize;
+ let remainder = pos % multiple;
+ let prev_aligned = pos - remainder;
+
+ if prev_aligned == pos {
+ // Already aligned so do nothing
+ } else {
+ // Align by advancing
+ let pad = multiple - remainder;
+ self.set_pos(self.get_write_pos() + pad);
+ }
+ }
+
+ // Set the current write position from a pointer
+ pub fn set_write_ptr(&mut self, code_ptr: CodePtr) {
+ let pos = (code_ptr.raw_ptr() as usize) - (self.mem_block as usize);
+ self.set_pos(pos);
+ }
+
+ // Get a direct pointer into the executable memory block
+ pub fn get_ptr(&self, offset: usize) -> CodePtr {
+ unsafe {
+ let ptr = self.mem_block.offset(offset as isize);
+ CodePtr(ptr)
+ }
+ }
+
+ // Get a direct pointer to the current write position
+ pub fn get_write_ptr(&mut self) -> CodePtr {
+ self.get_ptr(self.write_pos)
+ }
+
+ // Write a single byte at the current position
+ pub fn write_byte(&mut self, byte: u8) {
+ if self.write_pos < self.mem_size {
+ self.mark_position_writable(self.write_pos);
+ unsafe { self.mem_block.add(self.write_pos).write(byte) };
+ self.write_pos += 1;
+ } else {
+ self.dropped_bytes = true;
+ }
+ }
+
+ // Read a single byte at the given position
+ pub fn read_byte(&self, pos: usize) -> u8 {
+ assert!(pos < self.mem_size);
+ unsafe { self.mem_block.add(pos).read() }
+ }
+
+ // Write multiple bytes starting from the current position
+ pub fn write_bytes(&mut self, bytes: &[u8]) {
+ for byte in bytes {
+ self.write_byte(*byte);
+ }
+ }
+
+ // Write a signed integer over a given number of bits at the current position
+ pub fn write_int(&mut self, val: u64, num_bits: u32) {
+ assert!(num_bits > 0);
+ assert!(num_bits % 8 == 0);
+
+ // Switch on the number of bits
+ match num_bits {
+ 8 => self.write_byte(val as u8),
+ 16 => self.write_bytes(&[(val & 0xff) as u8, ((val >> 8) & 0xff) as u8]),
+ 32 => self.write_bytes(&[
+ (val & 0xff) as u8,
+ ((val >> 8) & 0xff) as u8,
+ ((val >> 16) & 0xff) as u8,
+ ((val >> 24) & 0xff) as u8,
+ ]),
+ _ => {
+ let mut cur = val;
+
+ // Write out the bytes
+ for _byte in 0..(num_bits / 8) {
+ self.write_byte((cur & 0xff) as u8);
+ cur >>= 8;
+ }
+ }
+ }
+ }
+
+ /// Check if bytes have been dropped (unwritten because of insufficient space)
+ pub fn has_dropped_bytes(&self) -> bool {
+ self.dropped_bytes
+ }
+
+ /// Allocate a new label with a given name
+ pub fn new_label(&mut self, name: String) -> usize {
+ // This label doesn't have an address yet
+ self.label_addrs.push(0);
+ self.label_names.push(name);
+
+ return self.label_addrs.len() - 1;
+ }
+
+ /// Write a label at the current address
+ pub fn write_label(&mut self, label_idx: usize) {
+ // TODO: make sure that label_idx is valid
+ // TODO: add an asseer here
+
+ self.label_addrs[label_idx] = self.write_pos;
+ }
+
+ // Add a label reference at the current write position
+ pub fn label_ref(&mut self, label_idx: usize) {
+ // TODO: make sure that label_idx is valid
+ // TODO: add an asseer here
+
+ // Keep track of the reference
+ self.label_refs.push(LabelRef {
+ pos: self.write_pos,
+ label_idx,
+ });
+ }
+
+ // Link internal label references
+ pub fn link_labels(&mut self) {
+ let orig_pos = self.write_pos;
+
+ // For each label reference
+ for label_ref in mem::take(&mut self.label_refs) {
+ let ref_pos = label_ref.pos;
+ let label_idx = label_ref.label_idx;
+ assert!(ref_pos < self.mem_size);
+
+ let label_addr = self.label_addrs[label_idx];
+ assert!(label_addr < self.mem_size);
+
+ // Compute the offset from the reference's end to the label
+ let offset = (label_addr as i64) - ((ref_pos + 4) as i64);
+
+ self.set_pos(ref_pos);
+ self.write_int(offset as u64, 32);
+ }
+
+ self.write_pos = orig_pos;
+
+ // Clear the label positions and references
+ self.label_addrs.clear();
+ self.label_names.clear();
+ assert!(self.label_refs.is_empty());
+ }
+
+ pub fn mark_position_writable(&mut self, write_pos: usize) {
+ let page_size = self.page_size;
+ let aligned_position = (write_pos / page_size) * page_size;
+
+ if self.current_aligned_write_pos != aligned_position {
+ self.current_aligned_write_pos = aligned_position;
+
+ #[cfg(not(test))]
+ unsafe {
+ use core::ffi::c_void;
+ let page_ptr = self.get_ptr(aligned_position).raw_ptr() as *mut c_void;
+ crate::cruby::rb_yjit_mark_writable(page_ptr, page_size.try_into().unwrap());
+ }
+ }
+ }
+
+ pub fn mark_all_executable(&mut self) {
+ self.current_aligned_write_pos = ALIGNED_WRITE_POSITION_NONE;
+
+ #[cfg(not(test))]
+ unsafe {
+ use core::ffi::c_void;
+ // NOTE(alan): Right now we do allocate one big chunck and give the top half to the outlined codeblock
+ // The start of the top half of the region isn't necessarily a page boundary...
+ let cb_start = self.get_ptr(0).raw_ptr() as *mut c_void;
+ crate::cruby::rb_yjit_mark_executable(cb_start, self.mem_size.try_into().unwrap());
+ }
+ }
+}
+
+/// Wrapper struct so we can use the type system to distinguish
+/// Between the inlined and outlined code blocks
+pub struct OutlinedCb {
+ // This must remain private
+ cb: CodeBlock,
+}
+
+impl OutlinedCb {
+ pub fn wrap(cb: CodeBlock) -> Self {
+ OutlinedCb { cb: cb }
+ }
+
+ pub fn unwrap(&mut self) -> &mut CodeBlock {
+ &mut self.cb
+ }
+}
diff --git a/yjit/src/asm/x86_64/mod.rs b/yjit/src/asm/x86_64/mod.rs
new file mode 100644
index 0000000000..c748ec1154
--- /dev/null
+++ b/yjit/src/asm/x86_64/mod.rs
@@ -0,0 +1,1395 @@
+use std::io::{Result, Write};
+use std::mem;
+use crate::asm::*;
+
+// Import the assembler tests module
+mod tests;
+
+#[derive(Clone, Copy, Debug)]
+pub struct X86Imm
+{
+ // Size in bits
+ num_bits: u8,
+
+ // The value of the immediate
+ value: i64
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct X86UImm
+{
+ // Size in bits
+ num_bits: u8,
+
+ // The value of the immediate
+ value: u64
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum RegType
+{
+ GP,
+ //FP,
+ //XMM,
+ IP,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct X86Reg
+{
+ // Size in bits
+ num_bits: u8,
+
+ // Register type
+ reg_type: RegType,
+
+ // Register index number
+ reg_no: u8,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct X86Mem
+{
+ // Size in bits
+ num_bits: u8,
+
+ /// Base register number
+ base_reg_no: u8,
+
+ /// Index register number
+ idx_reg_no: Option<u8>,
+
+ /// SIB scale exponent value (power of two, two bits)
+ scale_exp: u8,
+
+ /// Constant displacement from the base, not scaled
+ disp: i32,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum X86Opnd
+{
+ // Dummy operand
+ None,
+
+ // Immediate value
+ Imm(X86Imm),
+
+ // Unsigned immediate
+ UImm(X86UImm),
+
+ // General-purpose register
+ Reg(X86Reg),
+
+ // Memory location
+ Mem(X86Mem),
+
+ // IP-relative memory location
+ IPRel(i32)
+}
+
+impl X86Opnd {
+ fn rex_needed(&self) -> bool {
+ match self {
+ X86Opnd::None => false,
+ X86Opnd::Imm(_) => false,
+ X86Opnd::UImm(_) => false,
+ X86Opnd::Reg(reg) => reg.reg_no > 7 || reg.num_bits == 8 && reg.reg_no >= 4,
+ X86Opnd::Mem(mem) => (mem.base_reg_no > 7 || (mem.idx_reg_no.unwrap_or(0) > 7)),
+ X86Opnd::IPRel(_) => false
+ }
+ }
+
+ // Check if an SIB byte is needed to encode this operand
+ fn sib_needed(&self) -> bool {
+ match self {
+ X86Opnd::Mem(mem) => {
+ mem.idx_reg_no.is_some() ||
+ mem.base_reg_no == RSP_REG_NO ||
+ mem.base_reg_no == R12_REG_NO
+ },
+ _ => false
+ }
+ }
+
+ fn disp_size(&self) -> u32 {
+ match self {
+ X86Opnd::IPRel(_) => 32,
+ X86Opnd::Mem(mem) => {
+ if mem.disp != 0 {
+ // Compute the required displacement size
+ let num_bits = sig_imm_size(mem.disp.into());
+ if num_bits > 32 {
+ panic!("displacement does not fit in 32 bits");
+ }
+
+ // x86 can only encode 8-bit and 32-bit displacements
+ if num_bits == 16 { 32 } else { 8 }
+ } else if mem.base_reg_no == RBP_REG_NO || mem.base_reg_no == R13_REG_NO {
+ // If EBP or RBP or R13 is used as the base, displacement must be encoded
+ 8
+ } else {
+ 0
+ }
+ },
+ _ => 0
+ }
+ }
+
+ pub fn num_bits(&self) -> u8 {
+ match self {
+ X86Opnd::Reg(reg) => reg.num_bits,
+ X86Opnd::Imm(imm) => imm.num_bits,
+ X86Opnd::UImm(uimm) => uimm.num_bits,
+ X86Opnd::Mem(mem) => mem.num_bits,
+ _ => unreachable!()
+ }
+ }
+}
+
+// Instruction pointer
+pub const RIP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::IP, reg_no: 5 });
+
+// 64-bit GP registers
+const RAX_REG_NO: u8 = 0;
+const RSP_REG_NO: u8 = 4;
+const RBP_REG_NO: u8 = 5;
+const R12_REG_NO: u8 = 12;
+const R13_REG_NO: u8 = 13;
+
+pub const RAX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: RAX_REG_NO });
+pub const RCX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 1 });
+pub const RDX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 2 });
+pub const RBX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 3 });
+pub const RSP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: RSP_REG_NO });
+pub const RBP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: RBP_REG_NO });
+pub const RSI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 6 });
+pub const RDI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 7 });
+pub const R8: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 8 });
+pub const R9: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 9 });
+pub const R10: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 10 });
+pub const R11: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 11 });
+pub const R12: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: R12_REG_NO });
+pub const R13: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: R13_REG_NO });
+pub const R14: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 14 });
+pub const R15: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 15 });
+
+// 32-bit GP registers
+pub const EAX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 0 });
+pub const ECX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 1 });
+pub const EDX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 2 });
+pub const EBX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 3 });
+pub const ESP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 4 });
+pub const EBP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 5 });
+pub const ESI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 6 });
+pub const EDI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 7 });
+pub const R8D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 8 });
+pub const R9D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 9 });
+pub const R10D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 10 });
+pub const R11D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 11 });
+pub const R12D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 12 });
+pub const R13D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 13 });
+pub const R14D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 14 });
+pub const R15D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 15 });
+
+// 16-bit GP registers
+pub const AX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 0 });
+pub const CX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 1 });
+pub const DX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 2 });
+pub const BX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 3 });
+pub const SP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 4 });
+pub const BP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 5 });
+pub const SI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 6 });
+pub const DI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 7 });
+pub const R8W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 8 });
+pub const R9W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 9 });
+pub const R10W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 10 });
+pub const R11W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 11 });
+pub const R12W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 12 });
+pub const R13W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 13 });
+pub const R14W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 14 });
+pub const R15W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 15 });
+
+// 8-bit GP registers
+pub const AL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 0 });
+pub const CL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 1 });
+pub const DL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 2 });
+pub const BL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 3 });
+pub const SPL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 4 });
+pub const BPL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 5 });
+pub const SIL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 6 });
+pub const DIL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 7 });
+pub const R8B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 8 });
+pub const R9B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 9 });
+pub const R10B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 10 });
+pub const R11B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 11 });
+pub const R12B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 12 });
+pub const R13B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 13 });
+pub const R14B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 14 });
+pub const R15B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 15 });
+
+// C argument registers
+pub const C_ARG_REGS: [X86Opnd; 6] = [RDI, RSI, RDX, RCX, R8, R9];
+
+//===========================================================================
+
+/// Compute the number of bits needed to encode a signed value
+pub fn sig_imm_size(imm: i64) -> u8
+{
+ // Compute the smallest size this immediate fits in
+ if imm >= i8::MIN.into() && imm <= i8::MAX.into() {
+ return 8;
+ }
+ if imm >= i16::MIN.into() && imm <= i16::MAX.into() {
+ return 16;
+ }
+ if imm >= i32::MIN.into() && imm <= i32::MAX.into() {
+ return 32;
+ }
+
+ return 64;
+}
+
+/// Compute the number of bits needed to encode an unsigned value
+pub fn unsig_imm_size(imm: u64) -> u8
+{
+ // Compute the smallest size this immediate fits in
+ if imm <= u8::MAX.into() {
+ return 8;
+ }
+ else if imm <= u16::MAX.into() {
+ return 16;
+ }
+ else if imm <= u32::MAX.into() {
+ return 32;
+ }
+
+ return 64;
+}
+
+/// Shorthand for memory operand with base register and displacement
+pub fn mem_opnd(num_bits: u8, base_reg: X86Opnd, disp: i32) -> X86Opnd
+{
+ let base_reg = match base_reg {
+ X86Opnd::Reg(reg) => reg,
+ _ => unreachable!()
+ };
+
+ if base_reg.reg_type == RegType::IP {
+ X86Opnd::IPRel(disp)
+ } else {
+ X86Opnd::Mem(
+ X86Mem {
+ num_bits: num_bits,
+ base_reg_no: base_reg.reg_no,
+ idx_reg_no: None,
+ scale_exp: 0,
+ disp: disp,
+ }
+ )
+ }
+}
+
+/// Memory operand with SIB (Scale Index Base) indexing
+pub fn mem_opnd_sib(num_bits: u8, base_opnd: X86Opnd, index_opnd: X86Opnd, scale: i32, disp: i32) -> X86Opnd {
+ if let (X86Opnd::Reg(base_reg), X86Opnd::Reg(index_reg)) = (base_opnd, index_opnd) {
+ let scale_exp: u8;
+
+ match scale {
+ 8 => { scale_exp = 3; },
+ 4 => { scale_exp = 2; },
+ 2 => { scale_exp = 1; },
+ 1 => { scale_exp = 0; },
+ _ => unreachable!()
+ };
+
+ X86Opnd::Mem(X86Mem {
+ num_bits,
+ base_reg_no: base_reg.reg_no,
+ idx_reg_no: Some(index_reg.reg_no),
+ scale_exp,
+ disp
+ })
+ } else {
+ unreachable!()
+ }
+}
+
+/*
+// Struct member operand
+#define member_opnd(base_reg, struct_type, member_name) mem_opnd( \
+ 8 * sizeof(((struct_type*)0)->member_name), \
+ base_reg, \
+ offsetof(struct_type, member_name) \
+)
+
+// Struct member operand with an array index
+#define member_opnd_idx(base_reg, struct_type, member_name, idx) mem_opnd( \
+ 8 * sizeof(((struct_type*)0)->member_name[0]), \
+ base_reg, \
+ (offsetof(struct_type, member_name) + \
+ sizeof(((struct_type*)0)->member_name[0]) * idx) \
+)
+*/
+
+/*
+// TODO: this should be a method, X86Opnd.resize() or X86Opnd.subreg()
+static x86opnd_t resize_opnd(x86opnd_t opnd, uint32_t num_bits)
+{
+ assert (num_bits % 8 == 0);
+ x86opnd_t sub = opnd;
+ sub.num_bits = num_bits;
+ return sub;
+}
+*/
+
+pub fn imm_opnd(value: i64) -> X86Opnd
+{
+ X86Opnd::Imm(X86Imm { num_bits: sig_imm_size(value), value })
+}
+
+pub fn uimm_opnd(value: u64) -> X86Opnd
+{
+ X86Opnd::UImm(X86UImm { num_bits: unsig_imm_size(value), value })
+}
+
+pub fn const_ptr_opnd(ptr: *const u8) -> X86Opnd
+{
+ uimm_opnd(ptr as u64)
+}
+
+pub fn code_ptr_opnd(code_ptr: CodePtr) -> X86Opnd
+{
+ uimm_opnd(code_ptr.raw_ptr() as u64)
+}
+
+/// Write the REX byte
+fn write_rex(cb: &mut CodeBlock, w_flag: bool, reg_no: u8, idx_reg_no: u8, rm_reg_no: u8) {
+ // 0 1 0 0 w r x b
+ // w - 64-bit operand size flag
+ // r - MODRM.reg extension
+ // x - SIB.index extension
+ // b - MODRM.rm or SIB.base extension
+ let w: u8 = if w_flag { 1 } else { 0 };
+ let r: u8 = if (reg_no & 8) > 0 { 1 } else { 0 };
+ let x: u8 = if (idx_reg_no & 8) > 0 { 1 } else { 0 };
+ let b: u8 = if (rm_reg_no & 8) > 0 { 1 } else { 0 };
+
+ // Encode and write the REX byte
+ cb.write_byte(0x40 + (w << 3) + (r << 2) + (x << 1) + (b));
+}
+
+/// Write an opcode byte with an embedded register operand
+fn write_opcode(cb: &mut CodeBlock, opcode: u8, reg: X86Reg) {
+ let op_byte: u8 = opcode | (reg.reg_no & 7);
+ cb.write_byte(op_byte);
+}
+
+/// Encode an RM instruction
+fn write_rm(cb: &mut CodeBlock, sz_pref: bool, rex_w: bool, r_opnd: X86Opnd, rm_opnd: X86Opnd, op_ext: u8, bytes: &[u8]) {
+ let op_len = bytes.len();
+ assert!(op_len > 0 && op_len <= 3);
+ assert!(matches!(r_opnd, X86Opnd::Reg(_) | X86Opnd::None), "Can only encode an RM instruction with a register or a none");
+
+ // Flag to indicate the REX prefix is needed
+ let need_rex = rex_w || r_opnd.rex_needed() || rm_opnd.rex_needed();
+
+ // Flag to indicate SIB byte is needed
+ let need_sib = r_opnd.sib_needed() || rm_opnd.sib_needed();
+
+ // Add the operand-size prefix, if needed
+ if sz_pref {
+ cb.write_byte(0x66);
+ }
+
+ // Add the REX prefix, if needed
+ if need_rex {
+ // 0 1 0 0 w r x b
+ // w - 64-bit operand size flag
+ // r - MODRM.reg extension
+ // x - SIB.index extension
+ // b - MODRM.rm or SIB.base extension
+
+ let w = if rex_w { 1 } else { 0 };
+ let r = match r_opnd {
+ X86Opnd::None => 0,
+ X86Opnd::Reg(reg) => if (reg.reg_no & 8) > 0 { 1 } else { 0 },
+ _ => unreachable!()
+ };
+
+ let x = match (need_sib, rm_opnd) {
+ (true, X86Opnd::Mem(mem)) => if (mem.idx_reg_no.unwrap_or(0) & 8) > 0 { 1 } else { 0 },
+ _ => 0
+ };
+
+ let b = match rm_opnd {
+ X86Opnd::Reg(reg) => if (reg.reg_no & 8) > 0 { 1 } else { 0 },
+ X86Opnd::Mem(mem) => if (mem.base_reg_no & 8) > 0 { 1 } else { 0 },
+ _ => 0
+ };
+
+ // Encode and write the REX byte
+ let rex_byte: u8 = 0x40 + (w << 3) + (r << 2) + (x << 1) + (b);
+ cb.write_byte(rex_byte);
+ }
+
+ // Write the opcode bytes to the code block
+ for byte in bytes {
+ cb.write_byte(*byte)
+ }
+
+ // MODRM.mod (2 bits)
+ // MODRM.reg (3 bits)
+ // MODRM.rm (3 bits)
+
+ assert!(
+ !(op_ext != 0xff && !matches!(r_opnd, X86Opnd::None)),
+ "opcode extension and register operand present"
+ );
+
+ // Encode the mod field
+ let rm_mod = match rm_opnd {
+ X86Opnd::Reg(_) => 3,
+ X86Opnd::IPRel(_) => 0,
+ X86Opnd::Mem(_mem) => {
+ match rm_opnd.disp_size() {
+ 0 => 0,
+ 8 => 1,
+ 32 => 2,
+ _ => unreachable!()
+ }
+ },
+ _ => unreachable!()
+ };
+
+ // Encode the reg field
+ let reg: u8;
+ if op_ext != 0xff {
+ reg = op_ext;
+ } else {
+ reg = match r_opnd {
+ X86Opnd::Reg(reg) => reg.reg_no & 7,
+ _ => 0
+ };
+ }
+
+ // Encode the rm field
+ let rm = match rm_opnd {
+ X86Opnd::Reg(reg) => reg.reg_no & 7,
+ X86Opnd::Mem(mem) => if need_sib { 4 } else { mem.base_reg_no & 7 },
+ X86Opnd::IPRel(_) => 0b101,
+ _ => unreachable!()
+ };
+
+ // Encode and write the ModR/M byte
+ let rm_byte: u8 = (rm_mod << 6) + (reg << 3) + (rm);
+ cb.write_byte(rm_byte);
+
+ // Add the SIB byte, if needed
+ if need_sib {
+ // SIB.scale (2 bits)
+ // SIB.index (3 bits)
+ // SIB.base (3 bits)
+
+ match rm_opnd {
+ X86Opnd::Mem(mem) => {
+ // Encode the scale value
+ let scale = mem.scale_exp;
+
+ // Encode the index value
+ let index = mem.idx_reg_no.map(|no| no & 7).unwrap_or(4);
+
+ // Encode the base register
+ let base = mem.base_reg_no & 7;
+
+ // Encode and write the SIB byte
+ let sib_byte: u8 = (scale << 6) + (index << 3) + (base);
+ cb.write_byte(sib_byte);
+ },
+ _ => panic!("Expected mem operand")
+ }
+ }
+
+ // Add the displacement
+ match rm_opnd {
+ X86Opnd::Mem(mem) => {
+ let disp_size = rm_opnd.disp_size();
+ if disp_size > 0 {
+ cb.write_int(mem.disp as u64, disp_size);
+ }
+ },
+ X86Opnd::IPRel(rel) => {
+ cb.write_int(rel as u64, 32);
+ },
+ _ => ()
+ };
+}
+
+// Encode a mul-like single-operand RM instruction
+fn write_rm_unary(cb: &mut CodeBlock, op_mem_reg_8: u8, op_mem_reg_pref: u8, op_ext: u8, opnd: X86Opnd) {
+ assert!(matches!(opnd, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ let opnd_size = opnd.num_bits();
+ assert!(opnd_size == 8 || opnd_size == 16 || opnd_size == 32 || opnd_size == 64);
+
+ if opnd_size == 8 {
+ write_rm(cb, false, false, X86Opnd::None, opnd, op_ext, &[op_mem_reg_8]);
+ } else {
+ let sz_pref = opnd_size == 16;
+ let rex_w = opnd_size == 64;
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd, op_ext, &[op_mem_reg_pref]);
+ }
+}
+
+// Encode an add-like RM instruction with multiple possible encodings
+fn write_rm_multi(cb: &mut CodeBlock, op_mem_reg8: u8, op_mem_reg_pref: u8, op_reg_mem8: u8, op_reg_mem_pref: u8, op_mem_imm8: u8, op_mem_imm_sml: u8, op_mem_imm_lrg: u8, op_ext_imm: u8, opnd0: X86Opnd, opnd1: X86Opnd) {
+ assert!(matches!(opnd0, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ // Check the size of opnd0
+ let opnd_size = opnd0.num_bits();
+ assert!(opnd_size == 8 || opnd_size == 16 || opnd_size == 32 || opnd_size == 64);
+
+ // Check the size of opnd1
+ match opnd1 {
+ X86Opnd::Reg(reg) => assert!(reg.num_bits == opnd_size),
+ X86Opnd::Mem(mem) => assert!(mem.num_bits == opnd_size),
+ X86Opnd::Imm(imm) => assert!(imm.num_bits <= opnd_size),
+ X86Opnd::UImm(uimm) => assert!(uimm.num_bits <= opnd_size),
+ _ => ()
+ };
+
+ let sz_pref = opnd_size == 16;
+ let rex_w = opnd_size == 64;
+
+ match (opnd0, opnd1) {
+ // R/M + Reg
+ (X86Opnd::Mem(_), X86Opnd::Reg(_)) | (X86Opnd::Reg(_), X86Opnd::Reg(_)) => {
+ if opnd_size == 8 {
+ write_rm(cb, false, false, opnd1, opnd0, 0xff, &[op_mem_reg8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, opnd1, opnd0, 0xff, &[op_mem_reg_pref]);
+ }
+ },
+ // Reg + R/M/IPRel
+ (X86Opnd::Reg(_), X86Opnd::Mem(_) | X86Opnd::IPRel(_)) => {
+ if opnd_size == 8 {
+ write_rm(cb, false, false, opnd0, opnd1, 0xff, &[op_reg_mem8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, opnd0, opnd1, 0xff, &[op_reg_mem_pref]);
+ }
+ },
+ // R/M + Imm
+ (_, X86Opnd::Imm(imm)) => {
+ if imm.num_bits <= 8 {
+ // 8-bit immediate
+
+ if opnd_size == 8 {
+ write_rm(cb, false, false, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_sml]);
+ }
+
+ cb.write_int(imm.value as u64, 8);
+ } else if imm.num_bits <= 32 {
+ // 32-bit immediate
+
+ assert!(imm.num_bits <= opnd_size);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
+ cb.write_int(imm.value as u64, if opnd_size > 32 { 32 } else { opnd_size.into() });
+ } else {
+ panic!("immediate value too large");
+ }
+ },
+ // R/M + UImm
+ (_, X86Opnd::UImm(uimm)) => {
+ let num_bits = sig_imm_size(uimm.value.try_into().unwrap());
+
+ if num_bits <= 8 {
+ // 8-bit immediate
+
+ if opnd_size == 8 {
+ write_rm(cb, false, false, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_sml]);
+ }
+
+ cb.write_int(uimm.value, 8);
+ } else if num_bits <= 32 {
+ // 32-bit immediate
+
+ assert!(num_bits <= opnd_size);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
+ cb.write_int(uimm.value, if opnd_size > 32 { 32 } else { opnd_size.into() });
+ } else {
+ panic!("immediate value too large");
+ }
+ },
+ _ => unreachable!()
+ };
+}
+
+// LOCK - lock prefix for atomic shared memory operations
+pub fn write_lock_prefix(cb: &mut CodeBlock) {
+ cb.write_byte(0xf0);
+}
+
+/// add - Integer addition
+pub fn add(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x00, // opMemReg8
+ 0x01, // opMemRegPref
+ 0x02, // opRegMem8
+ 0x03, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ 0x00, // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// and - Bitwise AND
+pub fn and(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x20, // opMemReg8
+ 0x21, // opMemRegPref
+ 0x22, // opRegMem8
+ 0x23, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ 0x04, // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// call - Call to a pointer with a 32-bit displacement offset
+pub fn call_rel32(cb: &mut CodeBlock, rel32: i32) {
+ // Write the opcode
+ cb.write_byte(0xe8);
+
+ // Write the relative 32-bit jump offset
+ cb.write_bytes(&rel32.to_le_bytes());
+}
+
+/// call - Call a pointer, encode with a 32-bit offset if possible
+pub fn call_ptr(cb: &mut CodeBlock, scratch_opnd: X86Opnd, dst_ptr: *const u8) {
+ if let X86Opnd::Reg(_scratch_reg) = scratch_opnd {
+ // Pointer to the end of this call instruction
+ let end_ptr = cb.get_ptr(cb.write_pos + 5);
+
+ // Compute the jump offset
+ let rel64: i64 = dst_ptr as i64 - end_ptr.into_i64();
+
+ // If the offset fits in 32-bit
+ if rel64 >= i32::MIN.into() && rel64 <= i32::MAX.into() {
+ call_rel32(cb, rel64.try_into().unwrap());
+ return;
+ }
+
+ // Move the pointer into the scratch register and call
+ mov(cb, scratch_opnd, const_ptr_opnd(dst_ptr));
+ call(cb, scratch_opnd);
+ } else {
+ unreachable!();
+ }
+}
+
+/// call - Call to label with 32-bit offset
+pub fn call_label(cb: &mut CodeBlock, label_idx: usize) {
+ // Write the opcode
+ cb.write_byte(0xE8);
+
+ // Add a reference to the label
+ cb.label_ref(label_idx);
+
+ // Relative 32-bit offset to be patched
+ cb.write_int(0, 32);
+}
+
+/// call - Indirect call with an R/M operand
+pub fn call(cb: &mut CodeBlock, opnd: X86Opnd) {
+ write_rm(cb, false, false, X86Opnd::None, opnd, 2, &[0xff]);
+}
+
+/// Encode a conditional move instruction
+fn write_cmov(cb: &mut CodeBlock, opcode1: u8, dst: X86Opnd, src: X86Opnd) {
+ if let X86Opnd::Reg(reg) = dst {
+ match src {
+ X86Opnd::Reg(_) => (),
+ X86Opnd::Mem(_) => (),
+ _ => unreachable!()
+ };
+
+ assert!(reg.num_bits >= 16);
+ let sz_pref = reg.num_bits == 16;
+ let rex_w = reg.num_bits == 64;
+
+ write_rm(cb, sz_pref, rex_w, dst, src, 0xff, &[0x0f, opcode1]);
+ } else {
+ unreachable!()
+ }
+}
+
+// cmovcc - Conditional move
+pub fn cmova(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x47, dst, src); }
+pub fn cmovae(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x43, dst, src); }
+pub fn cmovb(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x42, dst, src); }
+pub fn cmovbe(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x46, dst, src); }
+pub fn cmovc(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x42, dst, src); }
+pub fn cmove(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x44, dst, src); }
+pub fn cmovg(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4f, dst, src); }
+pub fn cmovge(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4d, dst, src); }
+pub fn cmovl(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4c, dst, src); }
+pub fn cmovle(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4e, dst, src); }
+pub fn cmovna(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x46, dst, src); }
+pub fn cmovnae(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x42, dst, src); }
+pub fn cmovnb(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x43, dst, src); }
+pub fn cmovnbe(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x47, dst, src); }
+pub fn cmovnc(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x43, dst, src); }
+pub fn cmovne(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x45, dst, src); }
+pub fn cmovng(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4e, dst, src); }
+pub fn cmovnge(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4c, dst, src); }
+pub fn cmovnl(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4d, dst, src); }
+pub fn cmovnle(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4f, dst, src); }
+pub fn cmovno(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x41, dst, src); }
+pub fn cmovnp(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4b, dst, src); }
+pub fn cmovns(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x49, dst, src); }
+pub fn cmovnz(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x45, dst, src); }
+pub fn cmovo(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x40, dst, src); }
+pub fn cmovp(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4a, dst, src); }
+pub fn cmovpe(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4a, dst, src); }
+pub fn cmovpo(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4b, dst, src); }
+pub fn cmovs(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x48, dst, src); }
+pub fn cmovz(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x44, dst, src); }
+
+/// cmp - Compare and set flags
+pub fn cmp(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x38, // opMemReg8
+ 0x39, // opMemRegPref
+ 0x3A, // opRegMem8
+ 0x3B, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ 0x07, // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// cdq - Convert doubleword to quadword
+pub fn cdq(cb: &mut CodeBlock) {
+ cb.write_byte(0x99);
+}
+
+/// cqo - Convert quadword to octaword
+pub fn cqo(cb: &mut CodeBlock) {
+ cb.write_bytes(&[0x48, 0x99]);
+}
+
+/// Interrupt 3 - trap to debugger
+pub fn int3(cb: &mut CodeBlock) {
+ cb.write_byte(0xcc);
+}
+
+// Encode a relative jump to a label (direct or conditional)
+// Note: this always encodes a 32-bit offset
+fn write_jcc(cb: &mut CodeBlock, op0: u8, op1: u8, label_idx: usize) {
+ // Write the opcode
+ if op0 != 0xff {
+ cb.write_byte(op0);
+ }
+
+ cb.write_byte(op1);
+
+ // Add a reference to the label
+ cb.label_ref(label_idx);
+
+ // Relative 32-bit offset to be patched
+ cb.write_int( 0, 32);
+}
+
+/// jcc - relative jumps to a label
+pub fn ja_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x87, label_idx); }
+pub fn jae_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x83, label_idx); }
+pub fn jb_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x82, label_idx); }
+pub fn jbe_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x86, label_idx); }
+pub fn jc_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x82, label_idx); }
+pub fn je_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x84, label_idx); }
+pub fn jg_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8F, label_idx); }
+pub fn jge_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8D, label_idx); }
+pub fn jl_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8C, label_idx); }
+pub fn jle_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8E, label_idx); }
+pub fn jna_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x86, label_idx); }
+pub fn jnae_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x82, label_idx); }
+pub fn jnb_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x83, label_idx); }
+pub fn jnbe_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x87, label_idx); }
+pub fn jnc_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x83, label_idx); }
+pub fn jne_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x85, label_idx); }
+pub fn jng_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8E, label_idx); }
+pub fn jnge_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8C, label_idx); }
+pub fn jnl_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8D, label_idx); }
+pub fn jnle_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8F, label_idx); }
+pub fn jno_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x81, label_idx); }
+pub fn jnp_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8b, label_idx); }
+pub fn jns_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x89, label_idx); }
+pub fn jnz_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x85, label_idx); }
+pub fn jo_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x80, label_idx); }
+pub fn jp_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8A, label_idx); }
+pub fn jpe_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8A, label_idx); }
+pub fn jpo_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x8B, label_idx); }
+pub fn js_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x88, label_idx); }
+pub fn jz_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0x0F, 0x84, label_idx); }
+pub fn jmp_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc(cb, 0xFF, 0xE9, label_idx); }
+
+/// Encode a relative jump to a pointer at a 32-bit offset (direct or conditional)
+fn write_jcc_ptr(cb: &mut CodeBlock, op0: u8, op1: u8, dst_ptr: CodePtr) {
+ // Write the opcode
+ if op0 != 0xFF {
+ cb.write_byte(op0);
+ }
+
+ cb.write_byte(op1);
+
+ // Pointer to the end of this jump instruction
+ let end_ptr = cb.get_ptr(cb.write_pos + 4);
+
+ // Compute the jump offset
+ let rel64 = (dst_ptr.0 as i64) - (end_ptr.0 as i64);
+
+ if rel64 >= i32::MIN.into() && rel64 <= i32::MAX.into() {
+ // Write the relative 32-bit jump offset
+ cb.write_int(rel64 as u64, 32);
+ }
+ else {
+ // Offset doesn't fit in 4 bytes. Report error.
+ cb.dropped_bytes = true;
+ }
+}
+
+/// jcc - relative jumps to a pointer (32-bit offset)
+pub fn ja_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x87, ptr); }
+pub fn jae_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x83, ptr); }
+pub fn jb_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x82, ptr); }
+pub fn jbe_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x86, ptr); }
+pub fn jc_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x82, ptr); }
+pub fn je_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x84, ptr); }
+pub fn jg_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8F, ptr); }
+pub fn jge_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8D, ptr); }
+pub fn jl_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8C, ptr); }
+pub fn jle_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8E, ptr); }
+pub fn jna_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x86, ptr); }
+pub fn jnae_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x82, ptr); }
+pub fn jnb_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x83, ptr); }
+pub fn jnbe_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x87, ptr); }
+pub fn jnc_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x83, ptr); }
+pub fn jne_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x85, ptr); }
+pub fn jng_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8E, ptr); }
+pub fn jnge_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8C, ptr); }
+pub fn jnl_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8D, ptr); }
+pub fn jnle_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8F, ptr); }
+pub fn jno_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x81, ptr); }
+pub fn jnp_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8b, ptr); }
+pub fn jns_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x89, ptr); }
+pub fn jnz_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x85, ptr); }
+pub fn jo_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x80, ptr); }
+pub fn jp_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8A, ptr); }
+pub fn jpe_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8A, ptr); }
+pub fn jpo_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8B, ptr); }
+pub fn js_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x88, ptr); }
+pub fn jz_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x84, ptr); }
+pub fn jmp_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0xFF, 0xE9, ptr); }
+
+/// jmp - Indirect jump near to an R/M operand.
+pub fn jmp_rm(cb: &mut CodeBlock, opnd: X86Opnd) {
+ write_rm(cb, false, false, X86Opnd::None, opnd, 4, &[0xff]);
+}
+
+// jmp - Jump with relative 32-bit offset
+pub fn jmp32(cb: &mut CodeBlock, offset: i32) {
+ cb.write_byte(0xE9);
+ cb.write_int(offset as u64, 32);
+}
+
+/// lea - Load Effective Address
+pub fn lea(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
+ if let X86Opnd::Reg(reg) = dst {
+ assert!(reg.num_bits == 64);
+ write_rm(cb, false, true, dst, src, 0xff, &[0x8d]);
+ } else {
+ unreachable!();
+ }
+}
+
+/// mov - Data move operation
+pub fn mov(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
+ match (dst, src) {
+ // R + Imm
+ (X86Opnd::Reg(reg), X86Opnd::Imm(imm)) => {
+ assert!(imm.num_bits <= reg.num_bits);
+
+ // In case the source immediate could be zero extended to be 64
+ // bit, we can use the 32-bit operands version of the instruction.
+ // For example, we can turn mov(rax, 0x34) into the equivalent
+ // mov(eax, 0x34).
+ if (reg.num_bits == 64) && (imm.value > 0) && (imm.num_bits <= 32) {
+ if dst.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0xB8, reg);
+ cb.write_int(imm.value as u64, 32);
+ } else {
+ if reg.num_bits == 16 {
+ cb.write_byte(0x66);
+ }
+
+ if dst.rex_needed() || reg.num_bits == 64 {
+ write_rex(cb, reg.num_bits == 64, 0, 0, reg.reg_no);
+ }
+
+ write_opcode(cb, if reg.num_bits == 8 { 0xb0 } else { 0xb8 }, reg);
+ cb.write_int(imm.value as u64, reg.num_bits.into());
+ }
+ },
+ // R + UImm
+ (X86Opnd::Reg(reg), X86Opnd::UImm(uimm)) => {
+ assert!(uimm.num_bits <= reg.num_bits);
+
+ // In case the source immediate could be zero extended to be 64
+ // bit, we can use the 32-bit operands version of the instruction.
+ // For example, we can turn mov(rax, 0x34) into the equivalent
+ // mov(eax, 0x34).
+ if (reg.num_bits == 64) && (uimm.value <= u32::MAX.into()) {
+ if dst.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0xB8, reg);
+ cb.write_int(uimm.value, 32);
+ } else {
+ if reg.num_bits == 16 {
+ cb.write_byte(0x66);
+ }
+
+ if dst.rex_needed() || reg.num_bits == 64 {
+ write_rex(cb, reg.num_bits == 64, 0, 0, reg.reg_no);
+ }
+
+ write_opcode(cb, if reg.num_bits == 8 { 0xb0 } else { 0xb8 }, reg);
+ cb.write_int(uimm.value, reg.num_bits.into());
+ }
+ },
+ // M + Imm
+ (X86Opnd::Mem(mem), X86Opnd::Imm(imm)) => {
+ assert!(imm.num_bits <= mem.num_bits);
+
+ if mem.num_bits == 8 {
+ write_rm(cb, false, false, X86Opnd::None, dst, 0xff, &[0xc6]);
+ } else {
+ write_rm(cb, mem.num_bits == 16, mem.num_bits == 64, X86Opnd::None, dst, 0, &[0xc7]);
+ }
+
+ let output_num_bits:u32 = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() };
+ assert!(sig_imm_size(imm.value) <= (output_num_bits as u8));
+ cb.write_int(imm.value as u64, output_num_bits);
+ },
+ // M + UImm
+ (X86Opnd::Mem(mem), X86Opnd::UImm(uimm)) => {
+ assert!(uimm.num_bits <= mem.num_bits);
+
+ if mem.num_bits == 8 {
+ write_rm(cb, false, false, X86Opnd::None, dst, 0xff, &[0xc6]);
+ }
+ else {
+ write_rm(cb, mem.num_bits == 16, mem.num_bits == 64, X86Opnd::None, dst, 0, &[0xc7]);
+ }
+
+ let output_num_bits = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() };
+ assert!(sig_imm_size(uimm.value as i64) <= (output_num_bits as u8));
+ cb.write_int(uimm.value, output_num_bits);
+ },
+ // * + Imm/UImm
+ (_, X86Opnd::Imm(_) | X86Opnd::UImm(_)) => unreachable!(),
+ // * + *
+ (_, _) => {
+ write_rm_multi(
+ cb,
+ 0x88, // opMemReg8
+ 0x89, // opMemRegPref
+ 0x8A, // opRegMem8
+ 0x8B, // opRegMemPref
+ 0xC6, // opMemImm8
+ 0xFF, // opMemImmSml (not available)
+ 0xFF, // opMemImmLrg
+ 0xFF, // opExtImm
+ dst,
+ src
+ );
+ }
+ };
+}
+
+/// movsx - Move with sign extension (signed integers)
+pub fn movsx(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
+ if let X86Opnd::Reg(_dst_reg) = dst {
+ assert!(matches!(src, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ let src_num_bits = src.num_bits();
+ let dst_num_bits = dst.num_bits();
+ assert!(src_num_bits < dst_num_bits);
+
+ match src_num_bits {
+ 8 => write_rm(cb, dst_num_bits == 16, dst_num_bits == 64, dst, src, 0xff, &[0x0f, 0xbe]),
+ 16 => write_rm(cb, dst_num_bits == 16, dst_num_bits == 64, dst, src, 0xff, &[0x0f, 0xbf]),
+ 32 => write_rm(cb, false, true, dst, src, 0xff, &[0x63]),
+ _ => unreachable!()
+ };
+ } else {
+ unreachable!();
+ }
+}
+
+/*
+/// movzx - Move with zero extension (unsigned values)
+void movzx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
+{
+ cb.writeASM("movzx", dst, src);
+
+ uint32_t dstSize;
+ if (dst.isReg)
+ dstSize = dst.reg.size;
+ else
+ assert (false, "movzx dst must be a register");
+
+ uint32_t srcSize;
+ if (src.isReg)
+ srcSize = src.reg.size;
+ else if (src.isMem)
+ srcSize = src.mem.size;
+ else
+ assert (false);
+
+ assert (
+ srcSize < dstSize,
+ "movzx: srcSize >= dstSize"
+ );
+
+ if (srcSize is 8)
+ {
+ cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB6)(dstSize is 16, dstSize is 64, dst, src);
+ }
+ else if (srcSize is 16)
+ {
+ cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB7)(dstSize is 16, dstSize is 64, dst, src);
+ }
+ else
+ {
+ assert (false, "invalid src operand size for movxz");
+ }
+}
+*/
+
+/// nop - Noop, one or multiple bytes long
+pub fn nop(cb: &mut CodeBlock, length: u32) {
+ match length {
+ 0 => {},
+ 1 => cb.write_byte(0x90),
+ 2 => cb.write_bytes(&[0x66, 0x90]),
+ 3 => cb.write_bytes(&[0x0f, 0x1f, 0x00]),
+ 4 => cb.write_bytes(&[0x0f, 0x1f, 0x40, 0x00]),
+ 5 => cb.write_bytes(&[0x0f, 0x1f, 0x44, 0x00, 0x00]),
+ 6 => cb.write_bytes(&[0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00]),
+ 7 => cb.write_bytes(&[0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00]),
+ 8 => cb.write_bytes(&[0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]),
+ 9 => cb.write_bytes(&[0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]),
+ _ => {
+ let mut written: u32 = 0;
+ while written + 9 <= length {
+ nop(cb, 9);
+ written += 9;
+ }
+ nop(cb, length - written);
+ }
+ };
+}
+
+/// not - Bitwise NOT
+pub fn not(cb: &mut CodeBlock, opnd: X86Opnd) {
+ write_rm_unary(
+ cb,
+ 0xf6, // opMemReg8
+ 0xf7, // opMemRegPref
+ 0x02, // opExt
+ opnd
+ );
+}
+
+/// or - Bitwise OR
+pub fn or(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x08, // opMemReg8
+ 0x09, // opMemRegPref
+ 0x0A, // opRegMem8
+ 0x0B, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ 0x01, // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// pop - Pop a register off the stack
+pub fn pop(cb: &mut CodeBlock, opnd: X86Opnd) {
+ match opnd {
+ X86Opnd::Reg(reg) => {
+ assert!(reg.num_bits == 64);
+
+ if opnd.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0x58, reg);
+ },
+ X86Opnd::Mem(mem) => {
+ assert!(mem.num_bits == 64);
+
+ write_rm(cb, false, false, X86Opnd::None, opnd, 0, &[0x8f]);
+ },
+ _ => unreachable!()
+ };
+}
+
+/// popfq - Pop the flags register (64-bit)
+pub fn popfq(cb: &mut CodeBlock) {
+ // REX.W + 0x9D
+ cb.write_bytes(&[0x48, 0x9d]);
+}
+
+/// push - Push an operand on the stack
+pub fn push(cb: &mut CodeBlock, opnd: X86Opnd) {
+ match opnd {
+ X86Opnd::Reg(reg) => {
+ if opnd.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0x50, reg);
+ },
+ X86Opnd::Mem(_mem) => {
+ write_rm(cb, false, false, X86Opnd::None, opnd, 6, &[0xff]);
+ },
+ _ => unreachable!()
+ }
+}
+
+/// pushfq - Push the flags register (64-bit)
+pub fn pushfq(cb: &mut CodeBlock) {
+ cb.write_byte(0x9C);
+}
+
+/// ret - Return from call, popping only the return address
+pub fn ret(cb: &mut CodeBlock) {
+ cb.write_byte(0xC3);
+}
+
+// Encode a single-operand shift instruction
+fn write_shift(cb: &mut CodeBlock, op_mem_one_pref: u8, _op_mem_cl_pref: u8, op_mem_imm_pref: u8, op_ext: u8, opnd0: X86Opnd, opnd1: X86Opnd) {
+ assert!(matches!(opnd0, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ // Check the size of opnd0
+ let opnd_size = opnd0.num_bits();
+ assert!(opnd_size == 16 || opnd_size == 32 || opnd_size == 64);
+
+ let sz_pref = opnd_size == 16;
+ let rex_w = opnd_size == 64;
+
+ if let X86Opnd::UImm(imm) = opnd1 {
+ if imm.value == 1 {
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext, &[op_mem_one_pref]);
+ } else {
+ assert!(imm.num_bits <= 8);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext, &[op_mem_imm_pref]);
+ cb.write_byte(imm.value as u8);
+ }
+ } else {
+ unreachable!();
+ }
+}
+
+// sal - Shift arithmetic left
+pub fn sal(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x04,
+ opnd0,
+ opnd1
+ );
+}
+
+/// sar - Shift arithmetic right (signed)
+pub fn sar(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x07,
+ opnd0,
+ opnd1
+ );
+}
+
+// shl - Shift logical left
+pub fn shl(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x04,
+ opnd0,
+ opnd1
+ );
+}
+
+/// shr - Shift logical right (unsigned)
+pub fn shr(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x05,
+ opnd0,
+ opnd1
+ );
+}
+
+/// sub - Integer subtraction
+pub fn sub(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x28, // opMemReg8
+ 0x29, // opMemRegPref
+ 0x2A, // opRegMem8
+ 0x2B, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ 0x05, // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+fn resize_opnd(opnd: X86Opnd, num_bits: u8) -> X86Opnd {
+ match opnd {
+ X86Opnd::Reg(reg) => {
+ let mut cloned = reg.clone();
+ cloned.num_bits = num_bits;
+ X86Opnd::Reg(cloned)
+ },
+ X86Opnd::Mem(mem) => {
+ let mut cloned = mem.clone();
+ cloned.num_bits = num_bits;
+ X86Opnd::Mem(cloned)
+ },
+ _ => unreachable!()
+ }
+}
+
+/// test - Logical Compare
+pub fn test(cb: &mut CodeBlock, rm_opnd: X86Opnd, test_opnd: X86Opnd) {
+ assert!(matches!(rm_opnd, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+ let rm_num_bits = rm_opnd.num_bits();
+
+ match test_opnd {
+ X86Opnd::UImm(uimm) => {
+ assert!(uimm.num_bits <= 32);
+ assert!(uimm.num_bits <= rm_num_bits);
+
+ // Use the smallest operand size possible
+ assert!(rm_num_bits % 8 == 0);
+ let rm_resized = resize_opnd(rm_opnd, uimm.num_bits);
+
+ if uimm.num_bits == 8 {
+ write_rm(cb, false, false, X86Opnd::None, rm_resized, 0x00, &[0xf6]);
+ cb.write_int(uimm.value, uimm.num_bits.into());
+ } else {
+ write_rm(cb, uimm.num_bits == 16, false, X86Opnd::None, rm_resized, 0x00, &[0xf7]);
+ cb.write_int(uimm.value, uimm.num_bits.into());
+ }
+ },
+ X86Opnd::Imm(imm) => {
+ // This mode only applies to 64-bit R/M operands with 32-bit signed immediates
+ assert!(imm.num_bits <= 32);
+ assert!(rm_num_bits == 64);
+
+ write_rm(cb, false, true, X86Opnd::None, rm_opnd, 0x00, &[0xf7]);
+ cb.write_int(imm.value as u64, 32);
+ },
+ X86Opnd::Reg(reg) => {
+ assert!(reg.num_bits == rm_num_bits);
+
+ if rm_num_bits == 8 {
+ write_rm(cb, false, false, test_opnd, rm_opnd, 0xff, &[0x84]);
+ } else {
+ write_rm(cb, rm_num_bits == 16, rm_num_bits == 64, test_opnd, rm_opnd, 0xff, &[0x85]);
+ }
+ },
+ _ => unreachable!()
+ };
+}
+
+/// Undefined opcode
+pub fn ud2(cb: &mut CodeBlock) {
+ cb.write_bytes(&[0x0f, 0x0b]);
+}
+
+/// xchg - Exchange Register/Memory with Register
+pub fn xchg(cb: &mut CodeBlock, rm_opnd: X86Opnd, r_opnd: X86Opnd) {
+ if let (X86Opnd::Reg(rm_reg), X86Opnd::Reg(r_reg)) = (rm_opnd, r_opnd) {
+ assert!(rm_reg.num_bits == 64);
+ assert!(r_reg.num_bits == 64);
+
+ // If we're exchanging with RAX
+ if rm_reg.reg_no == RAX_REG_NO {
+ // Write the REX byte
+ write_rex(cb, true, 0, 0, r_reg.reg_no);
+
+ // Write the opcode and register number
+ cb.write_byte(0x90 + (r_reg.reg_no & 7));
+ } else {
+ write_rm(cb, false, true, r_opnd, rm_opnd, 0xff, &[0x87]);
+ }
+ } else {
+ unreachable!();
+ }
+}
+
+/// xor - Exclusive bitwise OR
+pub fn xor(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x30, // opMemReg8
+ 0x31, // opMemRegPref
+ 0x32, // opRegMem8
+ 0x33, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ 0x06, // opExtImm
+ opnd0,
+ opnd1
+ );
+}
diff --git a/yjit/src/asm/x86_64/tests.rs b/yjit/src/asm/x86_64/tests.rs
new file mode 100644
index 0000000000..bb36468a34
--- /dev/null
+++ b/yjit/src/asm/x86_64/tests.rs
@@ -0,0 +1,447 @@
+#![cfg(test)]
+
+use crate::asm::x86_64::*;
+use std::fmt;
+
+/// Produce hex string output from the bytes in a code block
+impl<'a> fmt::LowerHex for super::CodeBlock {
+ fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
+ for pos in 0..self.write_pos {
+ let byte = self.read_byte(pos);
+ fmtr.write_fmt(format_args!("{:02x}", byte))?;
+ }
+ Ok(())
+ }
+}
+
+/// Check that the bytes for an instruction sequence match a hex string
+fn check_bytes<R>(bytes: &str, run: R) where R: FnOnce(&mut super::CodeBlock) {
+ let mut cb = super::CodeBlock::new_dummy(4096);
+ run(&mut cb);
+ assert_eq!(format!("{:x}", cb), bytes);
+}
+
+#[test]
+fn test_add() {
+ check_bytes("80c103", |cb| add(cb, CL, imm_opnd(3)));
+ check_bytes("00d9", |cb| add(cb, CL, BL));
+ check_bytes("4000e1", |cb| add(cb, CL, SPL));
+ check_bytes("6601d9", |cb| add(cb, CX, BX));
+ check_bytes("4801d8", |cb| add(cb, RAX, RBX));
+ check_bytes("01d1", |cb| add(cb, ECX, EDX));
+ check_bytes("4c01f2", |cb| add(cb, RDX, R14));
+ check_bytes("480110", |cb| add(cb, mem_opnd(64, RAX, 0), RDX));
+ check_bytes("480310", |cb| add(cb, RDX, mem_opnd(64, RAX, 0)));
+ check_bytes("48035008", |cb| add(cb, RDX, mem_opnd(64, RAX, 8)));
+ check_bytes("480390ff000000", |cb| add(cb, RDX, mem_opnd(64, RAX, 255)));
+ check_bytes("4881407fff000000", |cb| add(cb, mem_opnd(64, RAX, 127), imm_opnd(255)));
+ check_bytes("0110", |cb| add(cb, mem_opnd(32, RAX, 0), EDX));
+ check_bytes("4883c408", |cb| add(cb, RSP, imm_opnd(8)));
+ check_bytes("83c108", |cb| add(cb, ECX, imm_opnd(8)));
+ check_bytes("81c1ff000000", |cb| add(cb, ECX, imm_opnd(255)));
+}
+
+#[test]
+fn test_add_unsigned() {
+ // ADD r/m8, imm8
+ check_bytes("4180c001", |cb| add(cb, R8B, uimm_opnd(1)));
+ check_bytes("4180c07f", |cb| add(cb, R8B, imm_opnd(i8::MAX.try_into().unwrap())));
+
+ // ADD r/m16, imm16
+ check_bytes("664183c001", |cb| add(cb, R8W, uimm_opnd(1)));
+ check_bytes("664181c0ff7f", |cb| add(cb, R8W, uimm_opnd(i16::MAX.try_into().unwrap())));
+
+ // ADD r/m32, imm32
+ check_bytes("4183c001", |cb| add(cb, R8D, uimm_opnd(1)));
+ check_bytes("4181c0ffffff7f", |cb| add(cb, R8D, uimm_opnd(i32::MAX.try_into().unwrap())));
+
+ // ADD r/m64, imm32
+ check_bytes("4983c001", |cb| add(cb, R8, uimm_opnd(1)));
+ check_bytes("4981c0ffffff7f", |cb| add(cb, R8, uimm_opnd(i32::MAX.try_into().unwrap())));
+}
+
+#[test]
+fn test_and() {
+ check_bytes("4421e5", |cb| and(cb, EBP, R12D));
+ check_bytes("48832008", |cb| and(cb, mem_opnd(64, RAX, 0), imm_opnd(0x08)));
+}
+
+#[test]
+fn test_call_label() {
+ check_bytes("e8fbffffff", |cb| {
+ let label_idx = cb.new_label("fn".to_owned());
+ call_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_call_ptr() {
+ // calling a lower address
+ check_bytes("e8fbffffff", |cb| {
+ let ptr = cb.get_write_ptr();
+ call_ptr(cb, RAX, ptr.raw_ptr());
+ });
+}
+
+#[test]
+fn test_call_reg() {
+ check_bytes("ffd0", |cb| call(cb, RAX));
+}
+
+#[test]
+fn test_call_mem() {
+ check_bytes("ff542408", |cb| call(cb, mem_opnd(64, RSP, 8)));
+}
+
+#[test]
+fn test_cmovcc() {
+ check_bytes("0f4ff7", |cb| cmovg(cb, ESI, EDI));
+ check_bytes("0f4f750c", |cb| cmovg(cb, ESI, mem_opnd(32, RBP, 12)));
+ check_bytes("0f4cc1", |cb| cmovl(cb, EAX, ECX));
+ check_bytes("480f4cdd", |cb| cmovl(cb, RBX, RBP));
+ check_bytes("0f4e742404", |cb| cmovle(cb, ESI, mem_opnd(32, RSP, 4)));
+}
+
+#[test]
+fn test_cmp() {
+ check_bytes("38d1", |cb| cmp(cb, CL, DL));
+ check_bytes("39f9", |cb| cmp(cb, ECX, EDI));
+ check_bytes("493b1424", |cb| cmp(cb, RDX, mem_opnd(64, R12, 0)));
+ check_bytes("4883f802", |cb| cmp(cb, RAX, imm_opnd(2)));
+}
+
+#[test]
+fn test_cqo() {
+ check_bytes("4899", |cb| cqo(cb));
+}
+
+#[test]
+fn test_jge_label() {
+ check_bytes("0f8dfaffffff", |cb| {
+ let label_idx = cb.new_label("loop".to_owned());
+ jge_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_jmp_label() {
+ // Forward jump
+ check_bytes("e900000000", |cb| {
+ let label_idx = cb.new_label("next".to_owned());
+ jmp_label(cb, label_idx);
+ cb.write_label(label_idx);
+ cb.link_labels();
+ });
+
+ // Backwards jump
+ check_bytes("e9fbffffff", |cb| {
+ let label_idx = cb.new_label("loop".to_owned());
+ cb.write_label(label_idx);
+ jmp_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_jmp_rm() {
+ check_bytes("41ffe4", |cb| jmp_rm(cb, R12));
+}
+
+#[test]
+fn test_jo_label() {
+ check_bytes("0f80faffffff", |cb| {
+ let label_idx = cb.new_label("loop".to_owned());
+ jo_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_lea() {
+ check_bytes("488d5108", |cb| lea(cb, RDX, mem_opnd(64, RCX, 8)));
+ check_bytes("488d0500000000", |cb| lea(cb, RAX, mem_opnd(8, RIP, 0)));
+ check_bytes("488d0505000000", |cb| lea(cb, RAX, mem_opnd(8, RIP, 5)));
+ check_bytes("488d3d05000000", |cb| lea(cb, RDI, mem_opnd(8, RIP, 5)));
+}
+
+#[test]
+fn test_mov() {
+ check_bytes("b807000000", |cb| mov(cb, EAX, imm_opnd(7)));
+ check_bytes("b8fdffffff", |cb| mov(cb, EAX, imm_opnd(-3)));
+ check_bytes("41bf03000000", |cb| mov(cb, R15, imm_opnd(3)));
+ check_bytes("89d8", |cb| mov(cb, EAX, EBX));
+ check_bytes("89c8", |cb| mov(cb, EAX, ECX));
+ check_bytes("8b9380000000", |cb| mov(cb, EDX, mem_opnd(32, RBX, 128)));
+ check_bytes("488b442404", |cb| mov(cb, RAX, mem_opnd(64, RSP, 4)));
+
+ // Test `mov rax, 3` => `mov eax, 3` optimization
+ check_bytes("41b834000000", |cb| mov(cb, R8, imm_opnd(0x34)));
+ check_bytes("49b80000008000000000", |cb| mov(cb, R8, imm_opnd(0x80000000)));
+ check_bytes("49b8ffffffffffffffff", |cb| mov(cb, R8, imm_opnd(-1)));
+
+ check_bytes("b834000000", |cb| mov(cb, RAX, imm_opnd(0x34)));
+ check_bytes("48b8020000000000c0ff", |cb| mov(cb, RAX, imm_opnd(-18014398509481982)));
+ check_bytes("48b80000008000000000", |cb| mov(cb, RAX, imm_opnd(0x80000000)));
+ check_bytes("48b8ccffffffffffffff", |cb| mov(cb, RAX, imm_opnd(-52))); // yasm thinks this could use a dword immediate instead of qword
+ check_bytes("48b8ffffffffffffffff", |cb| mov(cb, RAX, imm_opnd(-1))); // yasm thinks this could use a dword immediate instead of qword
+ check_bytes("4488c9", |cb| mov(cb, CL, R9B));
+ check_bytes("4889c3", |cb| mov(cb, RBX, RAX));
+ check_bytes("4889df", |cb| mov(cb, RDI, RBX));
+ check_bytes("40b60b", |cb| mov(cb, SIL, imm_opnd(11)));
+
+ check_bytes("c60424fd", |cb| mov(cb, mem_opnd(8, RSP, 0), imm_opnd(-3)));
+ check_bytes("48c7470801000000", |cb| mov(cb, mem_opnd(64, RDI, 8), imm_opnd(1)));
+ //check_bytes("67c7400411000000", |cb| mov(cb, mem_opnd(32, EAX, 4), imm_opnd(0x34))); // We don't distinguish between EAX and RAX here - that's probably fine?
+ check_bytes("c7400411000000", |cb| mov(cb, mem_opnd(32, RAX, 4), imm_opnd(17)));
+ check_bytes("41895814", |cb| mov(cb, mem_opnd(32, R8, 20), EBX));
+ check_bytes("4d8913", |cb| mov(cb, mem_opnd(64, R11, 0), R10));
+ check_bytes("48c742f8f4ffffff", |cb| mov(cb, mem_opnd(64, RDX, -8), imm_opnd(-12)));
+}
+
+#[test]
+fn test_mov_unsigned() {
+ // MOV AL, imm8
+ check_bytes("b001", |cb| mov(cb, AL, uimm_opnd(1)));
+ check_bytes("b0ff", |cb| mov(cb, AL, uimm_opnd(u8::MAX.into())));
+
+ // MOV AX, imm16
+ check_bytes("66b80100", |cb| mov(cb, AX, uimm_opnd(1)));
+ check_bytes("66b8ffff", |cb| mov(cb, AX, uimm_opnd(u16::MAX.into())));
+
+ // MOV EAX, imm32
+ check_bytes("b801000000", |cb| mov(cb, EAX, uimm_opnd(1)));
+ check_bytes("b8ffffffff", |cb| mov(cb, EAX, uimm_opnd(u32::MAX.into())));
+ check_bytes("41b800000000", |cb| mov(cb, R8, uimm_opnd(0)));
+ check_bytes("41b8ffffffff", |cb| mov(cb, R8, uimm_opnd(0xFF_FF_FF_FF)));
+
+ // MOV RAX, imm64, will move down into EAX since it fits into 32 bits
+ check_bytes("b801000000", |cb| mov(cb, RAX, uimm_opnd(1)));
+ check_bytes("b8ffffffff", |cb| mov(cb, RAX, uimm_opnd(u32::MAX.into())));
+
+ // MOV RAX, imm64, will not move down into EAX since it does not fit into 32 bits
+ check_bytes("48b80000000001000000", |cb| mov(cb, RAX, uimm_opnd(u32::MAX as u64 + 1)));
+ check_bytes("48b8ffffffffffffffff", |cb| mov(cb, RAX, uimm_opnd(u64::MAX.into())));
+ check_bytes("49b8ffffffffffffffff", |cb| mov(cb, R8, uimm_opnd(u64::MAX)));
+
+ // MOV r8, imm8
+ check_bytes("41b001", |cb| mov(cb, R8B, uimm_opnd(1)));
+ check_bytes("41b0ff", |cb| mov(cb, R8B, uimm_opnd(u8::MAX.into())));
+
+ // MOV r16, imm16
+ check_bytes("6641b80100", |cb| mov(cb, R8W, uimm_opnd(1)));
+ check_bytes("6641b8ffff", |cb| mov(cb, R8W, uimm_opnd(u16::MAX.into())));
+
+ // MOV r32, imm32
+ check_bytes("41b801000000", |cb| mov(cb, R8D, uimm_opnd(1)));
+ check_bytes("41b8ffffffff", |cb| mov(cb, R8D, uimm_opnd(u32::MAX.into())));
+
+ // MOV r64, imm64, will move down into 32 bit since it fits into 32 bits
+ check_bytes("41b801000000", |cb| mov(cb, R8, uimm_opnd(1)));
+
+ // MOV r64, imm64, will not move down into 32 bit since it does not fit into 32 bits
+ check_bytes("49b8ffffffffffffffff", |cb| mov(cb, R8, uimm_opnd(u64::MAX)));
+}
+
+#[test]
+fn test_mov_iprel() {
+ check_bytes("8b0500000000", |cb| mov(cb, EAX, mem_opnd(32, RIP, 0)));
+ check_bytes("8b0505000000", |cb| mov(cb, EAX, mem_opnd(32, RIP, 5)));
+
+ check_bytes("488b0500000000", |cb| mov(cb, RAX, mem_opnd(64, RIP, 0)));
+ check_bytes("488b0505000000", |cb| mov(cb, RAX, mem_opnd(64, RIP, 5)));
+ check_bytes("488b3d05000000", |cb| mov(cb, RDI, mem_opnd(64, RIP, 5)));
+}
+
+#[test]
+fn test_movsx() {
+ check_bytes("660fbec0", |cb| movsx(cb, AX, AL));
+ check_bytes("0fbed0", |cb| movsx(cb, EDX, AL));
+ check_bytes("480fbec3", |cb| movsx(cb, RAX, BL));
+ check_bytes("0fbfc8", |cb| movsx(cb, ECX, AX));
+ check_bytes("4c0fbed9", |cb| movsx(cb, R11, CL));
+ check_bytes("4c6354240c", |cb| movsx(cb, R10, mem_opnd(32, RSP, 12)));
+ check_bytes("480fbe0424", |cb| movsx(cb, RAX, mem_opnd(8, RSP, 0)));
+ check_bytes("490fbf5504", |cb| movsx(cb, RDX, mem_opnd(16, R13, 4)));
+}
+
+#[test]
+fn test_nop() {
+ check_bytes("90", |cb| nop(cb, 1));
+ check_bytes("6690", |cb| nop(cb, 2));
+ check_bytes("0f1f00", |cb| nop(cb, 3));
+ check_bytes("0f1f4000", |cb| nop(cb, 4));
+ check_bytes("0f1f440000", |cb| nop(cb, 5));
+ check_bytes("660f1f440000", |cb| nop(cb, 6));
+ check_bytes("0f1f8000000000", |cb| nop(cb, 7));
+ check_bytes("0f1f840000000000", |cb| nop(cb, 8));
+ check_bytes("660f1f840000000000", |cb| nop(cb, 9));
+ check_bytes("660f1f84000000000090", |cb| nop(cb, 10));
+ check_bytes("660f1f8400000000006690", |cb| nop(cb, 11));
+ check_bytes("660f1f8400000000000f1f00", |cb| nop(cb, 12));
+}
+
+#[test]
+fn test_not() {
+ check_bytes("66f7d0", |cb| not(cb, AX));
+ check_bytes("f7d0", |cb| not(cb, EAX));
+ check_bytes("49f71424", |cb| not(cb, mem_opnd(64, R12, 0)));
+ check_bytes("f794242d010000", |cb| not(cb, mem_opnd(32, RSP, 301)));
+ check_bytes("f71424", |cb| not(cb, mem_opnd(32, RSP, 0)));
+ check_bytes("f7542403", |cb| not(cb, mem_opnd(32, RSP, 3)));
+ check_bytes("f75500", |cb| not(cb, mem_opnd(32, RBP, 0)));
+ check_bytes("f7550d", |cb| not(cb, mem_opnd(32, RBP, 13)));
+ check_bytes("48f7d0", |cb| not(cb, RAX));
+ check_bytes("49f7d3", |cb| not(cb, R11));
+ check_bytes("f710", |cb| not(cb, mem_opnd(32, RAX, 0)));
+ check_bytes("f716", |cb| not(cb, mem_opnd(32, RSI, 0)));
+ check_bytes("f717", |cb| not(cb, mem_opnd(32, RDI, 0)));
+ check_bytes("f75237", |cb| not(cb, mem_opnd(32, RDX, 55)));
+ check_bytes("f79239050000", |cb| not(cb, mem_opnd(32, RDX, 1337)));
+ check_bytes("f752c9", |cb| not(cb, mem_opnd(32, RDX, -55)));
+ check_bytes("f792d5fdffff", |cb| not(cb, mem_opnd(32, RDX, -555)));
+}
+
+#[test]
+fn test_or() {
+ check_bytes("09f2", |cb| or(cb, EDX, ESI));
+}
+
+#[test]
+fn test_pop() {
+ check_bytes("58", |cb| pop(cb, RAX));
+ check_bytes("5b", |cb| pop(cb, RBX));
+ check_bytes("5c", |cb| pop(cb, RSP));
+ check_bytes("5d", |cb| pop(cb, RBP));
+ check_bytes("415c", |cb| pop(cb, R12));
+ check_bytes("8f00", |cb| pop(cb, mem_opnd(64, RAX, 0)));
+ check_bytes("418f00", |cb| pop(cb, mem_opnd(64, R8, 0)));
+ check_bytes("418f4003", |cb| pop(cb, mem_opnd(64, R8, 3)));
+ check_bytes("8f44c803", |cb| pop(cb, mem_opnd_sib(64, RAX, RCX, 8, 3)));
+ check_bytes("418f44c803", |cb| pop(cb, mem_opnd_sib(64, R8, RCX, 8, 3)));
+}
+
+#[test]
+fn test_push() {
+ check_bytes("50", |cb| push(cb, RAX));
+ check_bytes("53", |cb| push(cb, RBX));
+ check_bytes("4154", |cb| push(cb, R12));
+ check_bytes("ff30", |cb| push(cb, mem_opnd(64, RAX, 0)));
+ check_bytes("41ff30", |cb| push(cb, mem_opnd(64, R8, 0)));
+ check_bytes("41ff7003", |cb| push(cb, mem_opnd(64, R8, 3)));
+ check_bytes("ff74c803", |cb| push(cb, mem_opnd_sib(64, RAX, RCX, 8, 3)));
+ check_bytes("41ff74c803", |cb| push(cb, mem_opnd_sib(64, R8, RCX, 8, 3)));
+}
+
+#[test]
+fn test_ret() {
+ check_bytes("c3", |cb| ret(cb));
+}
+
+#[test]
+fn test_sal() {
+ check_bytes("66d1e1", |cb| sal(cb, CX, uimm_opnd(1)));
+ check_bytes("d1e1", |cb| sal(cb, ECX, uimm_opnd(1)));
+ check_bytes("c1e505", |cb| sal(cb, EBP, uimm_opnd(5)));
+ check_bytes("d1642444", |cb| sal(cb, mem_opnd(32, RSP, 68), uimm_opnd(1)));
+}
+
+#[test]
+fn test_sar() {
+ check_bytes("d1fa", |cb| sar(cb, EDX, uimm_opnd(1)));
+}
+
+#[test]
+fn test_shr() {
+ check_bytes("49c1ee07", |cb| shr(cb, R14, uimm_opnd(7)));
+}
+
+#[test]
+fn test_sub() {
+ check_bytes("83e801", |cb| sub(cb, EAX, imm_opnd(1)));
+ check_bytes("4883e802", |cb| sub(cb, RAX, imm_opnd(2)));
+}
+
+#[test]
+fn test_test() {
+ check_bytes("84c0", |cb| test(cb, AL, AL));
+ check_bytes("6685c0", |cb| test(cb, AX, AX));
+ check_bytes("f6c108", |cb| test(cb, CL, uimm_opnd(8)));
+ check_bytes("f6c207", |cb| test(cb, DL, uimm_opnd(7)));
+ check_bytes("f6c108", |cb| test(cb, RCX, uimm_opnd(8)));
+ check_bytes("f6420808", |cb| test(cb, mem_opnd(8, RDX, 8), uimm_opnd(8)));
+ check_bytes("f64208ff", |cb| test(cb, mem_opnd(8, RDX, 8), uimm_opnd(255)));
+ check_bytes("66f7c2ffff", |cb| test(cb, DX, uimm_opnd(0xffff)));
+ check_bytes("66f74208ffff", |cb| test(cb, mem_opnd(16, RDX, 8), uimm_opnd(0xffff)));
+ check_bytes("f60601", |cb| test(cb, mem_opnd(8, RSI, 0), uimm_opnd(1)));
+ check_bytes("f6461001", |cb| test(cb, mem_opnd(8, RSI, 16), uimm_opnd(1)));
+ check_bytes("f646f001", |cb| test(cb, mem_opnd(8, RSI, -16), uimm_opnd(1)));
+ check_bytes("854640", |cb| test(cb, mem_opnd(32, RSI, 64), EAX));
+ check_bytes("4885472a", |cb| test(cb, mem_opnd(64, RDI, 42), RAX));
+ check_bytes("4885c0", |cb| test(cb, RAX, RAX));
+ check_bytes("4885f0", |cb| test(cb, RAX, RSI));
+ check_bytes("48f74640f7ffffff", |cb| test(cb, mem_opnd(64, RSI, 64), imm_opnd(!0x08)));
+ check_bytes("48f7464008000000", |cb| test(cb, mem_opnd(64, RSI, 64), imm_opnd(0x08)));
+ check_bytes("48f7c108000000", |cb| test(cb, RCX, imm_opnd(0x08)));
+ //check_bytes("48a9f7ffff0f", |cb| test(cb, RAX, imm_opnd(0x0FFFFFF7)));
+}
+
+#[test]
+fn test_xchg() {
+ check_bytes("4891", |cb| xchg(cb, RAX, RCX));
+ check_bytes("4995", |cb| xchg(cb, RAX, R13));
+ check_bytes("4887d9", |cb| xchg(cb, RCX, RBX));
+ check_bytes("4d87f9", |cb| xchg(cb, R9, R15));
+}
+
+#[test]
+fn test_xor() {
+ check_bytes("31c0", |cb| xor(cb, EAX, EAX));
+}
+
+#[test]
+#[cfg(feature = "disasm")]
+fn basic_capstone_usage() -> std::result::Result<(), capstone::Error> {
+ // Test drive Capstone with simple input
+ extern crate capstone;
+ use capstone::prelude::*;
+ let cs = Capstone::new()
+ .x86()
+ .mode(arch::x86::ArchMode::Mode64)
+ .syntax(arch::x86::ArchSyntax::Intel)
+ .build()?;
+
+ let insns = cs.disasm_all(&[0xCC], 0x1000)?;
+
+ match insns.as_ref() {
+ [insn] => {
+ assert_eq!(Some("int3"), insn.mnemonic());
+ Ok(())
+ }
+ _ => Err(capstone::Error::CustomError(
+ "expected to disassemble to int3",
+ )),
+ }
+}
+
+#[test]
+#[cfg(feature = "asm_comments")]
+fn block_comments() {
+ let mut cb = super::CodeBlock::new_dummy(4096);
+
+ let first_write_ptr = cb.get_write_ptr().into_usize();
+ cb.add_comment("Beginning");
+ xor(&mut cb, EAX, EAX); // 2 bytes long
+ let second_write_ptr = cb.get_write_ptr().into_usize();
+ cb.add_comment("Two bytes in");
+ cb.add_comment("Still two bytes in");
+ cb.add_comment("Still two bytes in"); // Duplicate, should be ignored
+ test(&mut cb, mem_opnd(64, RSI, 64), imm_opnd(!0x08)); // 8 bytes long
+ let third_write_ptr = cb.get_write_ptr().into_usize();
+ cb.add_comment("Ten bytes in");
+
+ assert_eq!(&vec!( "Beginning".to_string() ), cb.comments_at(first_write_ptr).unwrap());
+ assert_eq!(&vec!( "Two bytes in".to_string(), "Still two bytes in".to_string() ), cb.comments_at(second_write_ptr).unwrap());
+ assert_eq!(&vec!( "Ten bytes in".to_string() ), cb.comments_at(third_write_ptr).unwrap());
+}