summaryrefslogtreecommitdiff
path: root/yjit/src
diff options
context:
space:
mode:
Diffstat (limited to 'yjit/src')
-rw-r--r--yjit/src/asm/arm64/README.md16
-rw-r--r--yjit/src/asm/arm64/arg/bitmask_imm.rs255
-rw-r--r--yjit/src/asm/arm64/arg/condition.rs52
-rw-r--r--yjit/src/asm/arm64/arg/inst_offset.rs47
-rw-r--r--yjit/src/asm/arm64/arg/mod.rs18
-rw-r--r--yjit/src/asm/arm64/arg/sf.rs19
-rw-r--r--yjit/src/asm/arm64/arg/shifted_imm.rs81
-rw-r--r--yjit/src/asm/arm64/arg/sys_reg.rs6
-rw-r--r--yjit/src/asm/arm64/arg/truncate.rs66
-rw-r--r--yjit/src/asm/arm64/inst/atomic.rs86
-rw-r--r--yjit/src/asm/arm64/inst/branch.rs100
-rw-r--r--yjit/src/asm/arm64/inst/branch_cond.rs78
-rw-r--r--yjit/src/asm/arm64/inst/breakpoint.rs55
-rw-r--r--yjit/src/asm/arm64/inst/call.rs104
-rw-r--r--yjit/src/asm/arm64/inst/conditional.rs73
-rw-r--r--yjit/src/asm/arm64/inst/data_imm.rs143
-rw-r--r--yjit/src/asm/arm64/inst/data_reg.rs192
-rw-r--r--yjit/src/asm/arm64/inst/halfword_imm.rs179
-rw-r--r--yjit/src/asm/arm64/inst/load_literal.rs89
-rw-r--r--yjit/src/asm/arm64/inst/load_register.rs108
-rw-r--r--yjit/src/asm/arm64/inst/load_store.rs249
-rw-r--r--yjit/src/asm/arm64/inst/load_store_exclusive.rs109
-rw-r--r--yjit/src/asm/arm64/inst/logical_imm.rs154
-rw-r--r--yjit/src/asm/arm64/inst/logical_reg.rs207
-rw-r--r--yjit/src/asm/arm64/inst/madd.rs73
-rw-r--r--yjit/src/asm/arm64/inst/mod.rs54
-rw-r--r--yjit/src/asm/arm64/inst/mov.rs155
-rw-r--r--yjit/src/asm/arm64/inst/nop.rs44
-rw-r--r--yjit/src/asm/arm64/inst/pc_rel.rs107
-rw-r--r--yjit/src/asm/arm64/inst/reg_pair.rs212
-rw-r--r--yjit/src/asm/arm64/inst/sbfm.rs103
-rw-r--r--yjit/src/asm/arm64/inst/shift_imm.rs147
-rw-r--r--yjit/src/asm/arm64/inst/smulh.rs60
-rw-r--r--yjit/src/asm/arm64/inst/sys_reg.rs86
-rw-r--r--yjit/src/asm/arm64/inst/test_bit.rs133
-rw-r--r--yjit/src/asm/arm64/mod.rs1680
-rw-r--r--yjit/src/asm/arm64/opnd.rs195
-rw-r--r--yjit/src/asm/mod.rs847
-rw-r--r--yjit/src/asm/x86_64/mod.rs1456
-rw-r--r--yjit/src/asm/x86_64/tests.rs460
-rw-r--r--yjit/src/backend/arm64/mod.rs1829
-rw-r--r--yjit/src/backend/ir.rs2154
-rw-r--r--yjit/src/backend/mod.rs14
-rw-r--r--yjit/src/backend/tests.rs329
-rw-r--r--yjit/src/backend/x86_64/mod.rs1340
-rw-r--r--yjit/src/codegen.rs11433
-rw-r--r--yjit/src/core.rs4603
-rw-r--r--yjit/src/cruby.rs831
-rw-r--r--yjit/src/cruby_bindings.inc.rs1322
-rw-r--r--yjit/src/disasm.rs400
-rw-r--r--yjit/src/invariants.rs709
-rw-r--r--yjit/src/lib.rs31
-rw-r--r--yjit/src/log.rs179
-rw-r--r--yjit/src/options.rs432
-rw-r--r--yjit/src/stats.rs1064
-rw-r--r--yjit/src/utils.rs287
-rw-r--r--yjit/src/virtualmem.rs488
-rw-r--r--yjit/src/yjit.rs277
58 files changed, 36020 insertions, 0 deletions
diff --git a/yjit/src/asm/arm64/README.md b/yjit/src/asm/arm64/README.md
new file mode 100644
index 0000000000..edae5773e8
--- /dev/null
+++ b/yjit/src/asm/arm64/README.md
@@ -0,0 +1,16 @@
+# Arm64
+
+This module is responsible for encoding YJIT operands into an appropriate Arm64 encoding.
+
+## Architecture
+
+Every instruction in the Arm64 instruction set is 32 bits wide and is represented in little-endian order. Because they're all going to the same size, we represent each instruction by a struct that implements `From<T> for u32`, which contains the mechanism for encoding each instruction. The encoding for each instruction is shown in the documentation for the struct that ends up being created.
+
+In general each set of bytes inside of the struct has either a direct value (usually a `u8`/`u16`) or some kind of `enum` that can be converted directly into a `u32`. For more complicated pieces of encoding (e.g., bitmask immediates) a corresponding module under the `arg` namespace is available.
+
+## Helpful links
+
+* [Arm A64 Instruction Set Architecture](https://developer.arm.com/documentation/ddi0596/2021-12?lang=en) Official documentation
+* [armconverter.com](https://armconverter.com/) A website that encodes Arm assembly syntax
+* [hatstone](https://github.com/tenderlove/hatstone) A wrapper around the Capstone disassembler written in Ruby
+* [onlinedisassembler.com](https://onlinedisassembler.com/odaweb/) A web-based disassembler
diff --git a/yjit/src/asm/arm64/arg/bitmask_imm.rs b/yjit/src/asm/arm64/arg/bitmask_imm.rs
new file mode 100644
index 0000000000..70a439afd5
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/bitmask_imm.rs
@@ -0,0 +1,255 @@
+/// Immediates used by the logical immediate instructions are not actually the
+/// immediate value, but instead are encoded into a 13-bit wide mask of 3
+/// elements. This allows many more values to be represented than 13 bits would
+/// normally allow, at the expense of not being able to represent every possible
+/// value.
+///
+/// In order for a number to be encodeable in this form, the binary
+/// representation must consist of a single set of contiguous 1s. That pattern
+/// must then be replicatable across all of the bits either 1, 2, 4, 8, 16, or
+/// 32 times (rotated or not).
+///
+/// For example, 1 (0b1), 2 (0b10), 3 (0b11), and 4 (0b100) are all valid.
+/// However, 5 (0b101) is invalid, because it contains 2 sets of 1s and cannot
+/// be replicated across 64 bits.
+///
+/// Some more examples to illustrate the idea of replication:
+/// * 0x5555555555555555 is a valid value (0b0101...) because it consists of a
+/// single set of 1s which can be replicated across all of the bits 32 times.
+/// * 0xf0f0f0f0f0f0f0f0 is a valid value (0b1111000011110000...) because it
+/// consists of a single set of 1s which can be replicated across all of the
+/// bits 8 times (rotated by 4 bits).
+/// * 0x0ff00ff00ff00ff0 is a valid value (0000111111110000...) because it
+/// consists of a single set of 1s which can be replicated across all of the
+/// bits 4 times (rotated by 12 bits).
+///
+/// To encode the values, there are 3 elements:
+/// * n = 1 if the pattern is 64-bits wide, 0 otherwise
+/// * imms = the size of the pattern, a 0, and then one less than the number of
+/// sequential 1s
+/// * immr = the number of right rotations to apply to the pattern to get the
+/// target value
+///
+pub struct BitmaskImmediate {
+ n: u8,
+ imms: u8,
+ immr: u8
+}
+
+impl TryFrom<u64> for BitmaskImmediate {
+ type Error = ();
+
+ /// Attempt to convert a u64 into a BitmaskImmediate.
+ ///
+ /// The implementation here is largely based on this blog post:
+ /// <https://dougallj.wordpress.com/2021/10/30/bit-twiddling-optimising-aarch64-logical-immediate-encoding-and-decoding/>
+ fn try_from(value: u64) -> Result<Self, Self::Error> {
+ if value == 0 || value == u64::MAX {
+ return Err(());
+ }
+
+ fn rotate_right(value: u64, rotations: u32) -> u64 {
+ (value >> (rotations & 0x3F)) |
+ (value << (rotations.wrapping_neg() & 0x3F))
+ }
+
+ let rotations = (value & (value + 1)).trailing_zeros();
+ let normalized = rotate_right(value, rotations & 0x3F);
+
+ let zeroes = normalized.leading_zeros();
+ let ones = (!normalized).trailing_zeros();
+ let size = zeroes + ones;
+
+ if rotate_right(value, size & 0x3F) != value {
+ return Err(());
+ }
+
+ Ok(BitmaskImmediate {
+ n: ((size >> 6) & 1) as u8,
+ imms: (((size << 1).wrapping_neg() | (ones - 1)) & 0x3F) as u8,
+ immr: ((rotations.wrapping_neg() & (size - 1)) & 0x3F) as u8
+ })
+ }
+}
+
+impl BitmaskImmediate {
+ /// Attempt to make a BitmaskImmediate for a 32 bit register.
+ /// The result has N==0, which is required for some 32-bit instructions.
+ /// Note that the exact same BitmaskImmediate produces different values
+ /// depending on the size of the target register.
+ pub fn new_32b_reg(value: u32) -> Result<Self, ()> {
+ // The same bit pattern replicated to u64
+ let value = value as u64;
+ let replicated: u64 = (value << 32) | value;
+ let converted = Self::try_from(replicated);
+ if let Ok(ref imm) = converted {
+ assert_eq!(0, imm.n);
+ }
+
+ converted
+ }
+}
+
+impl BitmaskImmediate {
+ /// Encode a bitmask immediate into a 32-bit value.
+ pub fn encode(self) -> u32 {
+ 0
+ | ((self.n as u32) << 12)
+ | ((self.immr as u32) << 6)
+ | (self.imms as u32)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_failures() {
+ [5, 9, 10, 11, 13, 17, 18, 19].iter().for_each(|&imm| {
+ assert!(BitmaskImmediate::try_from(imm).is_err());
+ });
+ }
+
+ #[test]
+ fn test_negative() {
+ let bitmask: BitmaskImmediate = (-9_i64 as u64).try_into().unwrap();
+ let encoded: u32 = bitmask.encode();
+ assert_eq!(7998, encoded);
+ }
+
+ #[test]
+ fn test_size_2_minimum() {
+ let bitmask = BitmaskImmediate::try_from(0x5555555555555555);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000000, imms: 0b111100 })));
+ }
+
+ #[test]
+ fn test_size_2_maximum() {
+ let bitmask = BitmaskImmediate::try_from(0xaaaaaaaaaaaaaaaa);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000001, imms: 0b111100 })));
+ }
+
+ #[test]
+ fn test_size_4_minimum() {
+ let bitmask = BitmaskImmediate::try_from(0x1111111111111111);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000000, imms: 0b111000 })));
+ }
+
+ #[test]
+ fn test_size_4_rotated() {
+ let bitmask = BitmaskImmediate::try_from(0x6666666666666666);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000011, imms: 0b111001 })));
+ }
+
+ #[test]
+ fn test_size_4_maximum() {
+ let bitmask = BitmaskImmediate::try_from(0xeeeeeeeeeeeeeeee);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000011, imms: 0b111010 })));
+ }
+
+ #[test]
+ fn test_size_8_minimum() {
+ let bitmask = BitmaskImmediate::try_from(0x0101010101010101);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000000, imms: 0b110000 })));
+ }
+
+ #[test]
+ fn test_size_8_rotated() {
+ let bitmask = BitmaskImmediate::try_from(0x1818181818181818);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000101, imms: 0b110001 })));
+ }
+
+ #[test]
+ fn test_size_8_maximum() {
+ let bitmask = BitmaskImmediate::try_from(0xfefefefefefefefe);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000111, imms: 0b110110 })));
+ }
+
+ #[test]
+ fn test_size_16_minimum() {
+ let bitmask = BitmaskImmediate::try_from(0x0001000100010001);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000000, imms: 0b100000 })));
+ }
+
+ #[test]
+ fn test_size_16_rotated() {
+ let bitmask = BitmaskImmediate::try_from(0xff8fff8fff8fff8f);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b001001, imms: 0b101100 })));
+ }
+
+ #[test]
+ fn test_size_16_maximum() {
+ let bitmask = BitmaskImmediate::try_from(0xfffefffefffefffe);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b001111, imms: 0b101110 })));
+ }
+
+ #[test]
+ fn test_size_32_minimum() {
+ let bitmask = BitmaskImmediate::try_from(0x0000000100000001);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b000000, imms: 0b000000 })));
+ }
+
+ #[test]
+ fn test_size_32_rotated() {
+ let bitmask = BitmaskImmediate::try_from(0x3fffff003fffff00);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b011000, imms: 0b010101 })));
+ }
+
+ #[test]
+ fn test_size_32_maximum() {
+ let bitmask = BitmaskImmediate::try_from(0xfffffffefffffffe);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 0, immr: 0b011111, imms: 0b011110 })));
+ }
+
+ #[test]
+ fn test_size_64_minimum() {
+ let bitmask = BitmaskImmediate::try_from(0x0000000000000001);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 1, immr: 0b000000, imms: 0b000000 })));
+ }
+
+ #[test]
+ fn test_size_64_rotated() {
+ let bitmask = BitmaskImmediate::try_from(0x0000001fffff0000);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 1, immr: 0b110000, imms: 0b010100 })));
+ }
+
+ #[test]
+ fn test_size_64_maximum() {
+ let bitmask = BitmaskImmediate::try_from(0xfffffffffffffffe);
+ assert!(matches!(bitmask, Ok(BitmaskImmediate { n: 1, immr: 0b111111, imms: 0b111110 })));
+ }
+
+ #[test]
+ fn test_size_64_invalid() {
+ let bitmask = BitmaskImmediate::try_from(u64::MAX);
+ assert!(matches!(bitmask, Err(())));
+ }
+
+ #[test]
+ fn test_all_valid_32b_pattern() {
+ let mut patterns = vec![];
+ for pattern_size in [2, 4, 8, 16, 32_u64] {
+ for ones_count in 1..pattern_size {
+ for rotation in 0..pattern_size {
+ let ones = (1_u64 << ones_count) - 1;
+ let rotated = (ones >> rotation) |
+ ((ones & ((1 << rotation) - 1)) << (pattern_size - rotation));
+ let mut replicated = rotated;
+ let mut shift = pattern_size;
+ while shift < 32 {
+ replicated |= replicated << shift;
+ shift *= 2;
+ }
+ let replicated: u32 = replicated.try_into().unwrap();
+ assert!(BitmaskImmediate::new_32b_reg(replicated).is_ok());
+ patterns.push(replicated);
+ }
+ }
+ }
+ patterns.sort();
+ patterns.dedup();
+ // Up to {size}-1 ones, and a total of {size} possible rotations.
+ assert_eq!(1*2 + 3*4 + 7*8 + 15*16 + 31*32, patterns.len());
+ }
+}
diff --git a/yjit/src/asm/arm64/arg/condition.rs b/yjit/src/asm/arm64/arg/condition.rs
new file mode 100644
index 0000000000..f711b8b0d8
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/condition.rs
@@ -0,0 +1,52 @@
+/// Various instructions in A64 can have condition codes attached. This enum
+/// includes all of the various kinds of conditions along with their respective
+/// encodings.
+pub struct Condition;
+
+impl Condition {
+ pub const EQ: u8 = 0b0000; // equal to
+ pub const NE: u8 = 0b0001; // not equal to
+ pub const CS: u8 = 0b0010; // carry set (alias for HS)
+ pub const CC: u8 = 0b0011; // carry clear (alias for LO)
+ pub const MI: u8 = 0b0100; // minus, negative
+ pub const PL: u8 = 0b0101; // positive or zero
+ pub const VS: u8 = 0b0110; // signed overflow
+ pub const VC: u8 = 0b0111; // no signed overflow
+ pub const HI: u8 = 0b1000; // greater than (unsigned)
+ pub const LS: u8 = 0b1001; // less than or equal to (unsigned)
+ pub const GE: u8 = 0b1010; // greater than or equal to (signed)
+ pub const LT: u8 = 0b1011; // less than (signed)
+ pub const GT: u8 = 0b1100; // greater than (signed)
+ pub const LE: u8 = 0b1101; // less than or equal to (signed)
+ pub const AL: u8 = 0b1110; // always
+
+ pub const fn inverse(condition: u8) -> u8 {
+ match condition {
+ Condition::EQ => Condition::NE,
+ Condition::NE => Condition::EQ,
+
+ Condition::CS => Condition::CC,
+ Condition::CC => Condition::CS,
+
+ Condition::MI => Condition::PL,
+ Condition::PL => Condition::MI,
+
+ Condition::VS => Condition::VC,
+ Condition::VC => Condition::VS,
+
+ Condition::HI => Condition::LS,
+ Condition::LS => Condition::HI,
+
+ Condition::LT => Condition::GE,
+ Condition::GE => Condition::LT,
+
+ Condition::GT => Condition::LE,
+ Condition::LE => Condition::GT,
+
+ Condition::AL => Condition::AL,
+
+ _ => panic!("Unknown condition")
+
+ }
+ }
+}
diff --git a/yjit/src/asm/arm64/arg/inst_offset.rs b/yjit/src/asm/arm64/arg/inst_offset.rs
new file mode 100644
index 0000000000..f4a6bc73a0
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/inst_offset.rs
@@ -0,0 +1,47 @@
+/// There are a lot of instructions in the AArch64 architectrue that take an
+/// offset in terms of number of instructions. Usually they are jump
+/// instructions or instructions that load a value relative to the current PC.
+///
+/// This struct is used to mark those locations instead of a generic operand in
+/// order to give better clarity to the developer when reading the AArch64
+/// backend code. It also helps to clarify that everything is in terms of a
+/// number of instructions and not a number of bytes (i.e., the offset is the
+/// number of bytes divided by 4).
+#[derive(Copy, Clone)]
+pub struct InstructionOffset(i32);
+
+impl InstructionOffset {
+ /// Create a new instruction offset.
+ pub fn from_insns(insns: i32) -> Self {
+ InstructionOffset(insns)
+ }
+
+ /// Create a new instruction offset from a number of bytes.
+ pub fn from_bytes(bytes: i32) -> Self {
+ assert_eq!(bytes % 4, 0, "Byte offset must be a multiple of 4");
+ InstructionOffset(bytes / 4)
+ }
+}
+
+impl From<i32> for InstructionOffset {
+ /// Convert an i64 into an instruction offset.
+ fn from(value: i32) -> Self {
+ InstructionOffset(value)
+ }
+}
+
+impl From<InstructionOffset> for i32 {
+ /// Convert an instruction offset into a number of instructions as an i32.
+ fn from(offset: InstructionOffset) -> Self {
+ offset.0
+ }
+}
+
+impl From<InstructionOffset> for i64 {
+ /// Convert an instruction offset into a number of instructions as an i64.
+ /// This is useful for when we're checking how many bits this offset fits
+ /// into.
+ fn from(offset: InstructionOffset) -> Self {
+ offset.0.into()
+ }
+}
diff --git a/yjit/src/asm/arm64/arg/mod.rs b/yjit/src/asm/arm64/arg/mod.rs
new file mode 100644
index 0000000000..7eb37834f9
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/mod.rs
@@ -0,0 +1,18 @@
+// This module contains various A64 instruction arguments and the logic
+// necessary to encode them.
+
+mod bitmask_imm;
+mod condition;
+mod inst_offset;
+mod sf;
+mod shifted_imm;
+mod sys_reg;
+mod truncate;
+
+pub use bitmask_imm::BitmaskImmediate;
+pub use condition::Condition;
+pub use inst_offset::InstructionOffset;
+pub use sf::Sf;
+pub use shifted_imm::ShiftedImmediate;
+pub use sys_reg::SystemRegister;
+pub use truncate::{truncate_imm, truncate_uimm};
diff --git a/yjit/src/asm/arm64/arg/sf.rs b/yjit/src/asm/arm64/arg/sf.rs
new file mode 100644
index 0000000000..c2fd33302c
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/sf.rs
@@ -0,0 +1,19 @@
+/// This is commonly the top-most bit in the encoding of the instruction, and
+/// represents whether register operands should be treated as 64-bit registers
+/// or 32-bit registers.
+pub enum Sf {
+ Sf32 = 0b0,
+ Sf64 = 0b1
+}
+
+/// A convenience function so that we can convert the number of bits of an
+/// register operand directly into an Sf enum variant.
+impl From<u8> for Sf {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Sf::Sf64,
+ 32 => Sf::Sf32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
diff --git a/yjit/src/asm/arm64/arg/shifted_imm.rs b/yjit/src/asm/arm64/arg/shifted_imm.rs
new file mode 100644
index 0000000000..4602ac64ab
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/shifted_imm.rs
@@ -0,0 +1,81 @@
+/// How much to shift the immediate by.
+pub enum Shift {
+ LSL0 = 0b0, // no shift
+ LSL12 = 0b1 // logical shift left by 12 bits
+}
+
+/// Some instructions accept a 12-bit immediate that has an optional shift
+/// attached to it. This allows encoding larger values than just fit into 12
+/// bits. We attempt to encode those here. If the values are too large we have
+/// to bail out.
+pub struct ShiftedImmediate {
+ shift: Shift,
+ value: u16
+}
+
+impl TryFrom<u64> for ShiftedImmediate {
+ type Error = ();
+
+ /// Attempt to convert a u64 into a BitmaskImm.
+ fn try_from(value: u64) -> Result<Self, Self::Error> {
+ let current = value;
+ if current < 2_u64.pow(12) {
+ return Ok(ShiftedImmediate { shift: Shift::LSL0, value: current as u16 });
+ }
+
+ if (current & (2_u64.pow(12) - 1) == 0) && ((current >> 12) < 2_u64.pow(12)) {
+ return Ok(ShiftedImmediate { shift: Shift::LSL12, value: (current >> 12) as u16 });
+ }
+
+ Err(())
+ }
+}
+
+impl From<ShiftedImmediate> for u32 {
+ /// Encode a bitmask immediate into a 32-bit value.
+ fn from(imm: ShiftedImmediate) -> Self {
+ 0
+ | (((imm.shift as u32) & 1) << 12)
+ | (imm.value as u32)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_no_shift() {
+ let expected_value = 256;
+ let result = ShiftedImmediate::try_from(expected_value);
+
+ match result {
+ Ok(ShiftedImmediate { shift: Shift::LSL0, value }) => assert_eq!(value as u64, expected_value),
+ _ => panic!("Unexpected shift value")
+ }
+ }
+
+ #[test]
+ fn test_maximum_no_shift() {
+ let expected_value = (1 << 12) - 1;
+ let result = ShiftedImmediate::try_from(expected_value);
+
+ match result {
+ Ok(ShiftedImmediate { shift: Shift::LSL0, value }) => assert_eq!(value as u64, expected_value),
+ _ => panic!("Unexpected shift value")
+ }
+ }
+
+ #[test]
+ fn test_with_shift() {
+ let result = ShiftedImmediate::try_from(256 << 12);
+
+ assert!(matches!(result, Ok(ShiftedImmediate { shift: Shift::LSL12, value: 256 })));
+ }
+
+ #[test]
+ fn test_unencodable() {
+ let result = ShiftedImmediate::try_from((256 << 12) + 1);
+ assert!(matches!(result, Err(())));
+ }
+}
diff --git a/yjit/src/asm/arm64/arg/sys_reg.rs b/yjit/src/asm/arm64/arg/sys_reg.rs
new file mode 100644
index 0000000000..6229d5c1fd
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/sys_reg.rs
@@ -0,0 +1,6 @@
+/// The encoded representation of an A64 system register.
+/// <https://developer.arm.com/documentation/ddi0601/2022-06/AArch64-Registers/>
+pub enum SystemRegister {
+ /// <https://developer.arm.com/documentation/ddi0601/2022-06/AArch64-Registers/NZCV--Condition-Flags?lang=en>
+ NZCV = 0b1_011_0100_0010_000
+}
diff --git a/yjit/src/asm/arm64/arg/truncate.rs b/yjit/src/asm/arm64/arg/truncate.rs
new file mode 100644
index 0000000000..85d56ff202
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/truncate.rs
@@ -0,0 +1,66 @@
+// There are many instances in AArch64 instruction encoding where you represent
+// an integer value with a particular bit width that isn't a power of 2. These
+// functions represent truncating those integer values down to the appropriate
+// number of bits.
+
+/// Truncate a signed immediate to fit into a compile-time known width. It is
+/// assumed before calling this function that the value fits into the correct
+/// size. If it doesn't, then this function will panic.
+///
+/// When the value is positive, this should effectively be a no-op since we're
+/// just dropping leading zeroes. When the value is negative we should only be
+/// dropping leading ones.
+pub fn truncate_imm<T: Into<i32>, const WIDTH: usize>(imm: T) -> u32 {
+ let value: i32 = imm.into();
+ let masked = (value as u32) & ((1 << WIDTH) - 1);
+
+ // Assert that we didn't drop any bits by truncating.
+ if value >= 0 {
+ assert_eq!(value as u32, masked);
+ } else {
+ assert_eq!(value as u32, masked | (u32::MAX << WIDTH));
+ }
+
+ masked
+}
+
+/// Truncate an unsigned immediate to fit into a compile-time known width. It is
+/// assumed before calling this function that the value fits into the correct
+/// size. If it doesn't, then this function will panic.
+///
+/// This should effectively be a no-op since we're just dropping leading zeroes.
+pub fn truncate_uimm<T: Into<u32>, const WIDTH: usize>(uimm: T) -> u32 {
+ let value: u32 = uimm.into();
+ let masked = value & ((1 << WIDTH) - 1);
+
+ // Assert that we didn't drop any bits by truncating.
+ assert_eq!(value, masked);
+
+ masked
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_truncate_imm_positive() {
+ let inst = truncate_imm::<i32, 4>(5);
+ let result: u32 = inst;
+ assert_eq!(0b0101, result);
+ }
+
+ #[test]
+ fn test_truncate_imm_negative() {
+ let inst = truncate_imm::<i32, 4>(-5);
+ let result: u32 = inst;
+ assert_eq!(0b1011, result);
+ }
+
+ #[test]
+ fn test_truncate_uimm() {
+ let inst = truncate_uimm::<u32, 4>(5);
+ let result: u32 = inst;
+ assert_eq!(0b0101, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/atomic.rs b/yjit/src/asm/arm64/inst/atomic.rs
new file mode 100644
index 0000000000..dce9affedf
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/atomic.rs
@@ -0,0 +1,86 @@
+/// The size of the register operands to this instruction.
+enum Size {
+ /// Using 32-bit registers.
+ Size32 = 0b10,
+
+ /// Using 64-bit registers.
+ Size64 = 0b11
+}
+
+/// A convenience function so that we can convert the number of bits of an
+/// register operand directly into a Size enum variant.
+impl From<u8> for Size {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Size::Size64,
+ 32 => Size::Size32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
+
+/// The struct that represents an A64 atomic instruction that can be encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 1 0 0 0 1 1 1 0 0 0 0 0 0 |
+/// | size rs.............. rn.............. rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Atomic {
+ /// The register holding the value to be loaded.
+ rt: u8,
+
+ /// The base register.
+ rn: u8,
+
+ /// The register holding the data value to be operated on.
+ rs: u8,
+
+ /// The size of the registers used in this instruction.
+ size: Size
+}
+
+impl Atomic {
+ /// LDADDAL
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDADD--LDADDA--LDADDAL--LDADDL--Atomic-add-on-word-or-doubleword-in-memory-?lang=en>
+ pub fn ldaddal(rs: u8, rt: u8, rn: u8, num_bits: u8) -> Self {
+ Self { rt, rn, rs, size: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b0100;
+
+impl From<Atomic> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: Atomic) -> Self {
+ 0
+ | ((inst.size as u32) << 30)
+ | (0b11 << 28)
+ | (FAMILY << 25)
+ | (0b111 << 21)
+ | ((inst.rs as u32) << 16)
+ | ((inst.rn as u32) << 5)
+ | (inst.rt as u32)
+ }
+}
+
+impl From<Atomic> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Atomic) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldaddal() {
+ let result: u32 = Atomic::ldaddal(20, 21, 22, 64).into();
+ assert_eq!(0xf8f402d5, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/branch.rs b/yjit/src/asm/arm64/inst/branch.rs
new file mode 100644
index 0000000000..14fcb2e9fd
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/branch.rs
@@ -0,0 +1,100 @@
+/// Which operation to perform.
+enum Op {
+ /// Perform a BR instruction.
+ BR = 0b00,
+
+ /// Perform a BLR instruction.
+ BLR = 0b01,
+
+ /// Perform a RET instruction.
+ RET = 0b10
+}
+
+/// The struct that represents an A64 branch instruction that can be encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 0 1 0 1 1 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 |
+/// | op... rn.............. rm.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Branch {
+ /// The register holding the address to be branched to.
+ rn: u8,
+
+ /// The operation to perform.
+ op: Op
+}
+
+impl Branch {
+ /// BR
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/BR--Branch-to-Register-?lang=en>
+ pub fn br(rn: u8) -> Self {
+ Self { rn, op: Op::BR }
+ }
+
+ /// BLR
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/BLR--Branch-with-Link-to-Register-?lang=en>
+ pub fn blr(rn: u8) -> Self {
+ Self { rn, op: Op::BLR }
+ }
+
+ /// RET
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/RET--Return-from-subroutine-?lang=en>
+ pub fn ret(rn: u8) -> Self {
+ Self { rn, op: Op::RET }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en>
+const FAMILY: u32 = 0b101;
+
+impl From<Branch> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: Branch) -> Self {
+ 0
+ | (0b11 << 30)
+ | (FAMILY << 26)
+ | (1 << 25)
+ | ((inst.op as u32) << 21)
+ | (0b11111 << 16)
+ | ((inst.rn as u32) << 5)
+ }
+}
+
+impl From<Branch> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Branch) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_br() {
+ let result: u32 = Branch::br(0).into();
+ assert_eq!(0xd61f0000, result);
+ }
+
+ #[test]
+ fn test_blr() {
+ let result: u32 = Branch::blr(0).into();
+ assert_eq!(0xd63f0000, result);
+ }
+
+ #[test]
+ fn test_ret() {
+ let result: u32 = Branch::ret(30).into();
+ assert_eq!(0xd65f03C0, result);
+ }
+
+ #[test]
+ fn test_ret_rn() {
+ let result: u32 = Branch::ret(20).into();
+ assert_eq!(0xd65f0280, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/branch_cond.rs b/yjit/src/asm/arm64/inst/branch_cond.rs
new file mode 100644
index 0000000000..266e9ccb31
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/branch_cond.rs
@@ -0,0 +1,78 @@
+use super::super::arg::{InstructionOffset, truncate_imm};
+
+/// The struct that represents an A64 conditional branch instruction that can be
+/// encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 0 1 0 1 0 0 0 |
+/// | imm19........................................................... cond....... |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct BranchCond {
+ /// The kind of condition to check before branching.
+ cond: u8,
+
+ /// The instruction offset from this instruction to branch to.
+ offset: InstructionOffset
+}
+
+impl BranchCond {
+ /// B.cond
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/B-cond--Branch-conditionally->
+ pub fn bcond(cond: u8, offset: InstructionOffset) -> Self {
+ Self { cond, offset }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en>
+const FAMILY: u32 = 0b101;
+
+impl From<BranchCond> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: BranchCond) -> Self {
+ 0
+ | (1 << 30)
+ | (FAMILY << 26)
+ | (truncate_imm::<_, 19>(inst.offset) << 5)
+ | (inst.cond as u32)
+ }
+}
+
+impl From<BranchCond> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: BranchCond) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use super::super::super::arg::Condition;
+
+ #[test]
+ fn test_b_eq() {
+ let result: u32 = BranchCond::bcond(Condition::EQ, 32.into()).into();
+ assert_eq!(0x54000400, result);
+ }
+
+ #[test]
+ fn test_b_vs() {
+ let result: u32 = BranchCond::bcond(Condition::VS, 32.into()).into();
+ assert_eq!(0x54000406, result);
+ }
+
+ #[test]
+ fn test_b_eq_max() {
+ let result: u32 = BranchCond::bcond(Condition::EQ, ((1 << 18) - 1).into()).into();
+ assert_eq!(0x547fffe0, result);
+ }
+
+ #[test]
+ fn test_b_eq_min() {
+ let result: u32 = BranchCond::bcond(Condition::EQ, (-(1 << 18)).into()).into();
+ assert_eq!(0x54800000, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/breakpoint.rs b/yjit/src/asm/arm64/inst/breakpoint.rs
new file mode 100644
index 0000000000..d66a35c4c6
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/breakpoint.rs
@@ -0,0 +1,55 @@
+/// The struct that represents an A64 breakpoint instruction that can be encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 0 1 0 1 0 0 0 0 1 0 0 0 0 0 |
+/// | imm16.................................................. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Breakpoint {
+ /// The value to be captured by ESR_ELx.ISS
+ imm16: u16
+}
+
+impl Breakpoint {
+ /// BRK
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/BRK--Breakpoint-instruction->
+ pub fn brk(imm16: u16) -> Self {
+ Self { imm16 }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en#control>
+const FAMILY: u32 = 0b101;
+
+impl From<Breakpoint> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: Breakpoint) -> Self {
+ let imm16 = inst.imm16 as u32;
+
+ 0
+ | (0b11 << 30)
+ | (FAMILY << 26)
+ | (1 << 21)
+ | (imm16 << 5)
+ }
+}
+
+impl From<Breakpoint> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Breakpoint) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_brk() {
+ let result: u32 = Breakpoint::brk(7).into();
+ assert_eq!(0xd42000e0, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/call.rs b/yjit/src/asm/arm64/inst/call.rs
new file mode 100644
index 0000000000..fd26d09f8a
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/call.rs
@@ -0,0 +1,104 @@
+use super::super::arg::{InstructionOffset, truncate_imm};
+
+/// The operation to perform for this instruction.
+enum Op {
+ /// Branch directly, with a hint that this is not a subroutine call or
+ /// return.
+ Branch = 0,
+
+ /// Branch directly, with a hint that this is a subroutine call or return.
+ BranchWithLink = 1
+}
+
+/// The struct that represents an A64 branch with our without link instruction
+/// that can be encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 0 1 0 1 |
+/// | op imm26.................................................................................... |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Call {
+ /// The PC-relative offset to jump to in terms of number of instructions.
+ offset: InstructionOffset,
+
+ /// The operation to perform for this instruction.
+ op: Op
+}
+
+impl Call {
+ /// B
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/B--Branch->
+ pub fn b(offset: InstructionOffset) -> Self {
+ Self { offset, op: Op::Branch }
+ }
+
+ /// BL
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/BL--Branch-with-Link-?lang=en>
+ pub fn bl(offset: InstructionOffset) -> Self {
+ Self { offset, op: Op::BranchWithLink }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en>
+const FAMILY: u32 = 0b101;
+
+impl From<Call> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: Call) -> Self {
+ 0
+ | ((inst.op as u32) << 31)
+ | (FAMILY << 26)
+ | truncate_imm::<_, 26>(inst.offset)
+ }
+}
+
+impl From<Call> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Call) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_bl() {
+ let result: u32 = Call::bl(0.into()).into();
+ assert_eq!(0x94000000, result);
+ }
+
+ #[test]
+ fn test_bl_positive() {
+ let result: u32 = Call::bl(256.into()).into();
+ assert_eq!(0x94000100, result);
+ }
+
+ #[test]
+ fn test_bl_negative() {
+ let result: u32 = Call::bl((-256).into()).into();
+ assert_eq!(0x97ffff00, result);
+ }
+
+ #[test]
+ fn test_b() {
+ let result: u32 = Call::b(0.into()).into();
+ assert_eq!(0x14000000, result);
+ }
+
+ #[test]
+ fn test_b_positive() {
+ let result: u32 = Call::b(((1 << 25) - 1).into()).into();
+ assert_eq!(0x15ffffff, result);
+ }
+
+ #[test]
+ fn test_b_negative() {
+ let result: u32 = Call::b((-(1 << 25)).into()).into();
+ assert_eq!(0x16000000, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/conditional.rs b/yjit/src/asm/arm64/inst/conditional.rs
new file mode 100644
index 0000000000..1e26c7408b
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/conditional.rs
@@ -0,0 +1,73 @@
+use super::super::arg::Sf;
+
+/// The struct that represents an A64 conditional instruction that can be
+/// encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 0 1 1 0 1 0 1 0 0 0 0 |
+/// | sf rm.............. cond....... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Conditional {
+ /// The number of the general-purpose destination register.
+ rd: u8,
+
+ /// The number of the first general-purpose source register.
+ rn: u8,
+
+ /// The condition to use for the conditional instruction.
+ cond: u8,
+
+ /// The number of the second general-purpose source register.
+ rm: u8,
+
+ /// The size of the registers of this instruction.
+ sf: Sf
+}
+
+impl Conditional {
+ /// CSEL
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/CSEL--Conditional-Select-?lang=en>
+ pub fn csel(rd: u8, rn: u8, rm: u8, cond: u8, num_bits: u8) -> Self {
+ Self { rd, rn, cond, rm, sf: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Register?lang=en#condsel>
+const FAMILY: u32 = 0b101;
+
+impl From<Conditional> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: Conditional) -> Self {
+ 0
+ | ((inst.sf as u32) << 31)
+ | (1 << 28)
+ | (FAMILY << 25)
+ | (1 << 23)
+ | ((inst.rm as u32) << 16)
+ | ((inst.cond as u32) << 12)
+ | ((inst.rn as u32) << 5)
+ | (inst.rd as u32)
+ }
+}
+
+impl From<Conditional> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Conditional) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use super::super::super::arg::Condition;
+
+ #[test]
+ fn test_csel() {
+ let result: u32 = Conditional::csel(0, 1, 2, Condition::NE, 64).into();
+ assert_eq!(0x9a821020, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/data_imm.rs b/yjit/src/asm/arm64/inst/data_imm.rs
new file mode 100644
index 0000000000..ea71705478
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/data_imm.rs
@@ -0,0 +1,143 @@
+use super::super::arg::{Sf, ShiftedImmediate};
+
+/// The operation being performed by this instruction.
+enum Op {
+ Add = 0b0,
+ Sub = 0b1
+}
+
+// Whether or not to update the flags when this instruction is performed.
+enum S {
+ LeaveFlags = 0b0,
+ UpdateFlags = 0b1
+}
+
+/// The struct that represents an A64 data processing -- immediate instruction
+/// that can be encoded.
+///
+/// Add/subtract (immediate)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 0 0 1 0 |
+/// | sf op S sh imm12.................................... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct DataImm {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The register number of the first operand register.
+ rn: u8,
+
+ /// How much to shift the immediate by.
+ imm: ShiftedImmediate,
+
+ /// Whether or not to update the flags when this instruction is performed.
+ s: S,
+
+ /// The opcode for this instruction.
+ op: Op,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl DataImm {
+ /// ADD (immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ADD--immediate---Add--immediate--?lang=en>
+ pub fn add(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::LeaveFlags, op: Op::Add, sf: num_bits.into() }
+ }
+
+ /// ADDS (immediate, set flags)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ADDS--immediate---Add--immediate---setting-flags-?lang=en>
+ pub fn adds(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::UpdateFlags, op: Op::Add, sf: num_bits.into() }
+ }
+
+ /// CMP (immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/CMP--immediate---Compare--immediate---an-alias-of-SUBS--immediate--?lang=en>
+ pub fn cmp(rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self::subs(31, rn, imm, num_bits)
+ }
+
+ /// SUB (immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SUB--immediate---Subtract--immediate--?lang=en>
+ pub fn sub(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::LeaveFlags, op: Op::Sub, sf: num_bits.into() }
+ }
+
+ /// SUBS (immediate, set flags)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SUBS--immediate---Subtract--immediate---setting-flags-?lang=en>
+ pub fn subs(rd: u8, rn: u8, imm: ShiftedImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, s: S::UpdateFlags, op: Op::Sub, sf: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en>
+const FAMILY: u32 = 0b1000;
+
+impl From<DataImm> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: DataImm) -> Self {
+ let imm: u32 = inst.imm.into();
+
+ 0
+ | ((inst.sf as u32) << 31)
+ | ((inst.op as u32) << 30)
+ | ((inst.s as u32) << 29)
+ | (FAMILY << 25)
+ | (1 << 24)
+ | (imm << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<DataImm> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: DataImm) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_add() {
+ let inst = DataImm::add(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x91001c20, result);
+ }
+
+ #[test]
+ fn test_adds() {
+ let inst = DataImm::adds(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xb1001c20, result);
+ }
+
+ #[test]
+ fn test_cmp() {
+ let inst = DataImm::cmp(0, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf1001c1f, result);
+ }
+
+ #[test]
+ fn test_sub() {
+ let inst = DataImm::sub(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd1001c20, result);
+ }
+
+ #[test]
+ fn test_subs() {
+ let inst = DataImm::subs(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf1001c20, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/data_reg.rs b/yjit/src/asm/arm64/inst/data_reg.rs
new file mode 100644
index 0000000000..ed4afa956b
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/data_reg.rs
@@ -0,0 +1,192 @@
+use super::super::arg::{Sf, truncate_uimm};
+
+/// The operation being performed by this instruction.
+enum Op {
+ Add = 0b0,
+ Sub = 0b1
+}
+
+// Whether or not to update the flags when this instruction is performed.
+enum S {
+ LeaveFlags = 0b0,
+ UpdateFlags = 0b1
+}
+
+/// The type of shift to perform on the second operand register.
+enum Shift {
+ LSL = 0b00, // logical shift left (unsigned)
+ LSR = 0b01, // logical shift right (unsigned)
+ ASR = 0b10 // arithmetic shift right (signed)
+}
+
+/// The struct that represents an A64 data processing -- register instruction
+/// that can be encoded.
+///
+/// Add/subtract (shifted register)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 0 1 1 0 |
+/// | sf op S shift rm.............. imm6............... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct DataReg {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The register number of the first operand register.
+ rn: u8,
+
+ /// The amount to shift the second operand register by.
+ imm6: u8,
+
+ /// The register number of the second operand register.
+ rm: u8,
+
+ /// The type of shift to perform on the second operand register.
+ shift: Shift,
+
+ /// Whether or not to update the flags when this instruction is performed.
+ s: S,
+
+ /// The opcode for this instruction.
+ op: Op,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl DataReg {
+ /// ADD (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ADD--shifted-register---Add--shifted-register--?lang=en>
+ pub fn add(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self {
+ rd,
+ rn,
+ imm6: 0,
+ rm,
+ shift: Shift::LSL,
+ s: S::LeaveFlags,
+ op: Op::Add,
+ sf: num_bits.into()
+ }
+ }
+
+ /// ADDS (shifted register, set flags)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ADDS--shifted-register---Add--shifted-register---setting-flags-?lang=en>
+ pub fn adds(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self {
+ rd,
+ rn,
+ imm6: 0,
+ rm,
+ shift: Shift::LSL,
+ s: S::UpdateFlags,
+ op: Op::Add,
+ sf: num_bits.into()
+ }
+ }
+
+ /// CMP (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/CMP--shifted-register---Compare--shifted-register---an-alias-of-SUBS--shifted-register--?lang=en>
+ pub fn cmp(rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self::subs(31, rn, rm, num_bits)
+ }
+
+ /// SUB (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SUB--shifted-register---Subtract--shifted-register--?lang=en>
+ pub fn sub(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self {
+ rd,
+ rn,
+ imm6: 0,
+ rm,
+ shift: Shift::LSL,
+ s: S::LeaveFlags,
+ op: Op::Sub,
+ sf: num_bits.into()
+ }
+ }
+
+ /// SUBS (shifted register, set flags)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SUBS--shifted-register---Subtract--shifted-register---setting-flags-?lang=en>
+ pub fn subs(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self {
+ rd,
+ rn,
+ imm6: 0,
+ rm,
+ shift: Shift::LSL,
+ s: S::UpdateFlags,
+ op: Op::Sub,
+ sf: num_bits.into()
+ }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Register?lang=en>
+const FAMILY: u32 = 0b0101;
+
+impl From<DataReg> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: DataReg) -> Self {
+ 0
+ | ((inst.sf as u32) << 31)
+ | ((inst.op as u32) << 30)
+ | ((inst.s as u32) << 29)
+ | (FAMILY << 25)
+ | (1 << 24)
+ | ((inst.shift as u32) << 22)
+ | ((inst.rm as u32) << 16)
+ | (truncate_uimm::<_, 6>(inst.imm6) << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<DataReg> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: DataReg) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_add() {
+ let inst = DataReg::add(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x8b020020, result);
+ }
+
+ #[test]
+ fn test_adds() {
+ let inst = DataReg::adds(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xab020020, result);
+ }
+
+ #[test]
+ fn test_cmp() {
+ let inst = DataReg::cmp(0, 1, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xeb01001f, result);
+ }
+
+ #[test]
+ fn test_sub() {
+ let inst = DataReg::sub(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xcb020020, result);
+ }
+
+ #[test]
+ fn test_subs() {
+ let inst = DataReg::subs(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xeb020020, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/halfword_imm.rs b/yjit/src/asm/arm64/inst/halfword_imm.rs
new file mode 100644
index 0000000000..863ac947dd
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/halfword_imm.rs
@@ -0,0 +1,179 @@
+use super::super::arg::truncate_imm;
+
+/// Whether this is a load or a store.
+enum Op {
+ Load = 1,
+ Store = 0
+}
+
+/// The type of indexing to perform for this instruction.
+enum Index {
+ /// No indexing.
+ None = 0b00,
+
+ /// Mutate the register after the read.
+ PostIndex = 0b01,
+
+ /// Mutate the register before the read.
+ PreIndex = 0b11
+}
+
+/// The struct that represents an A64 halfword instruction that can be encoded.
+///
+/// LDRH/STRH
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 1 1 1 0 0 1 0 |
+/// | op imm12.................................... rn.............. rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+/// LDRH (pre-index/post-index)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 1 1 1 0 0 0 0 0 |
+/// | op imm9.......................... index rn.............. rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct HalfwordImm {
+ /// The number of the 32-bit register to be loaded.
+ rt: u8,
+
+ /// The number of the 64-bit base register to calculate the memory address.
+ rn: u8,
+
+ /// The type of indexing to perform for this instruction.
+ index: Index,
+
+ /// The immediate offset from the base register.
+ imm: i16,
+
+ /// The operation to perform.
+ op: Op
+}
+
+impl HalfwordImm {
+ /// LDRH
+ /// <https://developer.arm.com/documentation/ddi0602/2022-06/Base-Instructions/LDRH--immediate---Load-Register-Halfword--immediate-->
+ pub fn ldrh(rt: u8, rn: u8, imm12: i16) -> Self {
+ Self { rt, rn, index: Index::None, imm: imm12, op: Op::Load }
+ }
+
+ /// LDRH (pre-index)
+ /// <https://developer.arm.com/documentation/ddi0602/2022-06/Base-Instructions/LDRH--immediate---Load-Register-Halfword--immediate-->
+ pub fn ldrh_pre(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, index: Index::PreIndex, imm: imm9, op: Op::Load }
+ }
+
+ /// LDRH (post-index)
+ /// <https://developer.arm.com/documentation/ddi0602/2022-06/Base-Instructions/LDRH--immediate---Load-Register-Halfword--immediate-->
+ pub fn ldrh_post(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, index: Index::PostIndex, imm: imm9, op: Op::Load }
+ }
+
+ /// STRH
+ /// <https://developer.arm.com/documentation/ddi0602/2022-06/Base-Instructions/STRH--immediate---Store-Register-Halfword--immediate-->
+ pub fn strh(rt: u8, rn: u8, imm12: i16) -> Self {
+ Self { rt, rn, index: Index::None, imm: imm12, op: Op::Store }
+ }
+
+ /// STRH (pre-index)
+ /// <https://developer.arm.com/documentation/ddi0602/2022-06/Base-Instructions/STRH--immediate---Store-Register-Halfword--immediate-->
+ pub fn strh_pre(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, index: Index::PreIndex, imm: imm9, op: Op::Store }
+ }
+
+ /// STRH (post-index)
+ /// <https://developer.arm.com/documentation/ddi0602/2022-06/Base-Instructions/STRH--immediate---Store-Register-Halfword--immediate-->
+ pub fn strh_post(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, index: Index::PostIndex, imm: imm9, op: Op::Store }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b111100;
+
+impl From<HalfwordImm> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: HalfwordImm) -> Self {
+ let (opc, imm) = match inst.index {
+ Index::None => {
+ assert_eq!(inst.imm & 1, 0, "immediate offset must be even");
+ let imm12 = truncate_imm::<_, 12>(inst.imm / 2);
+ (0b100, imm12)
+ },
+ Index::PreIndex | Index::PostIndex => {
+ let imm9 = truncate_imm::<_, 9>(inst.imm);
+ (0b000, (imm9 << 2) | (inst.index as u32))
+ }
+ };
+
+ 0
+ | (FAMILY << 25)
+ | ((opc | (inst.op as u32)) << 22)
+ | (imm << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rt as u32)
+ }
+}
+
+impl From<HalfwordImm> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: HalfwordImm) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldrh() {
+ let inst = HalfwordImm::ldrh(0, 1, 8);
+ let result: u32 = inst.into();
+ assert_eq!(0x79401020, result);
+ }
+
+ #[test]
+ fn test_ldrh_pre() {
+ let inst = HalfwordImm::ldrh_pre(0, 1, 16);
+ let result: u32 = inst.into();
+ assert_eq!(0x78410c20, result);
+ }
+
+ #[test]
+ fn test_ldrh_post() {
+ let inst = HalfwordImm::ldrh_post(0, 1, 24);
+ let result: u32 = inst.into();
+ assert_eq!(0x78418420, result);
+ }
+
+ #[test]
+ fn test_ldrh_post_negative() {
+ let inst = HalfwordImm::ldrh_post(0, 1, -24);
+ let result: u32 = inst.into();
+ assert_eq!(0x785e8420, result);
+ }
+
+ #[test]
+ fn test_strh() {
+ let inst = HalfwordImm::strh(0, 1, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x79000020, result);
+ }
+
+ #[test]
+ fn test_strh_pre() {
+ let inst = HalfwordImm::strh_pre(0, 1, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x78000c20, result);
+ }
+
+ #[test]
+ fn test_strh_post() {
+ let inst = HalfwordImm::strh_post(0, 1, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x78000420, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/load_literal.rs b/yjit/src/asm/arm64/inst/load_literal.rs
new file mode 100644
index 0000000000..817e893553
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/load_literal.rs
@@ -0,0 +1,89 @@
+use super::super::arg::{InstructionOffset, truncate_imm};
+
+/// The size of the operands being operated on.
+enum Opc {
+ Size32 = 0b00,
+ Size64 = 0b01,
+}
+
+/// A convenience function so that we can convert the number of bits of an
+/// register operand directly into an Sf enum variant.
+impl From<u8> for Opc {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Opc::Size64,
+ 32 => Opc::Size32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
+
+/// The struct that represents an A64 load literal instruction that can be encoded.
+///
+/// LDR
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 1 0 0 0 |
+/// | opc.. imm19........................................................... rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct LoadLiteral {
+ /// The number of the register to load the value into.
+ rt: u8,
+
+ /// The PC-relative number of instructions to load the value from.
+ offset: InstructionOffset,
+
+ /// The size of the operands being operated on.
+ opc: Opc
+}
+
+impl LoadLiteral {
+ /// LDR (load literal)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDR--literal---Load-Register--literal--?lang=en>
+ pub fn ldr_literal(rt: u8, offset: InstructionOffset, num_bits: u8) -> Self {
+ Self { rt, offset, opc: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b0100;
+
+impl From<LoadLiteral> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: LoadLiteral) -> Self {
+ 0
+ | ((inst.opc as u32) << 30)
+ | (1 << 28)
+ | (FAMILY << 25)
+ | (truncate_imm::<_, 19>(inst.offset) << 5)
+ | (inst.rt as u32)
+ }
+}
+
+impl From<LoadLiteral> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: LoadLiteral) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldr_positive() {
+ let inst = LoadLiteral::ldr_literal(0, 5.into(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x580000a0, result);
+ }
+
+ #[test]
+ fn test_ldr_negative() {
+ let inst = LoadLiteral::ldr_literal(0, (-5).into(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x58ffff60, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/load_register.rs b/yjit/src/asm/arm64/inst/load_register.rs
new file mode 100644
index 0000000000..3d94e8da1f
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/load_register.rs
@@ -0,0 +1,108 @@
+/// Whether or not to shift the register.
+enum S {
+ Shift = 1,
+ NoShift = 0
+}
+
+/// The option for this instruction.
+enum Option {
+ UXTW = 0b010,
+ LSL = 0b011,
+ SXTW = 0b110,
+ SXTX = 0b111
+}
+
+/// The size of the operands of this instruction.
+enum Size {
+ Size32 = 0b10,
+ Size64 = 0b11
+}
+
+/// A convenience function so that we can convert the number of bits of an
+/// register operand directly into a Size enum variant.
+impl From<u8> for Size {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Size::Size64,
+ 32 => Size::Size32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
+
+/// The struct that represents an A64 load instruction that can be encoded.
+///
+/// LDR
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 1 0 0 0 0 1 1 1 0 |
+/// | size. rm.............. option.. S rn.............. rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct LoadRegister {
+ /// The number of the register to load the value into.
+ rt: u8,
+
+ /// The base register with which to form the address.
+ rn: u8,
+
+ /// Whether or not to shift the value of the register.
+ s: S,
+
+ /// The option associated with this instruction that controls the shift.
+ option: Option,
+
+ /// The number of the offset register.
+ rm: u8,
+
+ /// The size of the operands.
+ size: Size
+}
+
+impl LoadRegister {
+ /// LDR
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDR--register---Load-Register--register--?lang=en>
+ pub fn ldr(rt: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rt, rn, s: S::NoShift, option: Option::LSL, rm, size: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b0100;
+
+impl From<LoadRegister> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: LoadRegister) -> Self {
+ 0
+ | ((inst.size as u32) << 30)
+ | (0b11 << 28)
+ | (FAMILY << 25)
+ | (0b11 << 21)
+ | ((inst.rm as u32) << 16)
+ | ((inst.option as u32) << 13)
+ | ((inst.s as u32) << 12)
+ | (0b10 << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rt as u32)
+ }
+}
+
+impl From<LoadRegister> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: LoadRegister) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldr() {
+ let inst = LoadRegister::ldr(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf8626820, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/load_store.rs b/yjit/src/asm/arm64/inst/load_store.rs
new file mode 100644
index 0000000000..e27909ae35
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/load_store.rs
@@ -0,0 +1,249 @@
+use super::super::arg::truncate_imm;
+
+/// The size of the operands being operated on.
+enum Size {
+ Size8 = 0b00,
+ Size16 = 0b01,
+ Size32 = 0b10,
+ Size64 = 0b11,
+}
+
+/// A convenience function so that we can convert the number of bits of an
+/// register operand directly into an Sf enum variant.
+impl From<u8> for Size {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Size::Size64,
+ 32 => Size::Size32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
+
+/// The operation to perform for this instruction.
+enum Opc {
+ STR = 0b00,
+ LDR = 0b01,
+ LDURSW = 0b10
+}
+
+/// What kind of indexing to perform for this instruction.
+enum Index {
+ None = 0b00,
+ PostIndex = 0b01,
+ PreIndex = 0b11
+}
+
+/// The struct that represents an A64 load or store instruction that can be
+/// encoded.
+///
+/// LDR/LDUR/LDURSW/STR/STUR
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 1 0 0 0 0 |
+/// | size. opc.. imm9.......................... idx.. rn.............. rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct LoadStore {
+ /// The number of the register to load the value into.
+ rt: u8,
+
+ /// The base register with which to form the address.
+ rn: u8,
+
+ /// What kind of indexing to perform for this instruction.
+ idx: Index,
+
+ /// The optional signed immediate byte offset from the base register.
+ imm9: i16,
+
+ /// The operation to perform for this instruction.
+ opc: Opc,
+
+ /// The size of the operands being operated on.
+ size: Size
+}
+
+impl LoadStore {
+ /// LDR (immediate, post-index)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDR--immediate---Load-Register--immediate-->
+ pub fn ldr_post(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self {
+ Self { rt, rn, idx: Index::PostIndex, imm9, opc: Opc::LDR, size: num_bits.into() }
+ }
+
+ /// LDR (immediate, pre-index)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDR--immediate---Load-Register--immediate-->
+ pub fn ldr_pre(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self {
+ Self { rt, rn, idx: Index::PreIndex, imm9, opc: Opc::LDR, size: num_bits.into() }
+ }
+
+ /// LDUR (load register, unscaled)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDUR--Load-Register--unscaled--?lang=en>
+ pub fn ldur(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self {
+ Self { rt, rn, idx: Index::None, imm9, opc: Opc::LDR, size: num_bits.into() }
+ }
+
+ /// LDURH Load Register Halfword (unscaled)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDURH--Load-Register-Halfword--unscaled--?lang=en>
+ pub fn ldurh(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, idx: Index::None, imm9, opc: Opc::LDR, size: Size::Size16 }
+ }
+
+ /// LDURB (load register, byte, unscaled)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDURB--Load-Register-Byte--unscaled--?lang=en>
+ pub fn ldurb(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, idx: Index::None, imm9, opc: Opc::LDR, size: Size::Size8 }
+ }
+
+ /// LDURSW (load register, unscaled, signed)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDURSW--Load-Register-Signed-Word--unscaled--?lang=en>
+ pub fn ldursw(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, idx: Index::None, imm9, opc: Opc::LDURSW, size: Size::Size32 }
+ }
+
+ /// STR (immediate, post-index)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/STR--immediate---Store-Register--immediate-->
+ pub fn str_post(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self {
+ Self { rt, rn, idx: Index::PostIndex, imm9, opc: Opc::STR, size: num_bits.into() }
+ }
+
+ /// STR (immediate, pre-index)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/STR--immediate---Store-Register--immediate-->
+ pub fn str_pre(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self {
+ Self { rt, rn, idx: Index::PreIndex, imm9, opc: Opc::STR, size: num_bits.into() }
+ }
+
+ /// STUR (store register, unscaled)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STUR--Store-Register--unscaled--?lang=en>
+ pub fn stur(rt: u8, rn: u8, imm9: i16, num_bits: u8) -> Self {
+ Self { rt, rn, idx: Index::None, imm9, opc: Opc::STR, size: num_bits.into() }
+ }
+
+ /// STURH (store register, halfword, unscaled)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STURH--Store-Register-Halfword--unscaled--?lang=en>
+ pub fn sturh(rt: u8, rn: u8, imm9: i16) -> Self {
+ Self { rt, rn, idx: Index::None, imm9, opc: Opc::STR, size: Size::Size16 }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b0100;
+
+impl From<LoadStore> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: LoadStore) -> Self {
+ 0
+ | ((inst.size as u32) << 30)
+ | (0b11 << 28)
+ | (FAMILY << 25)
+ | ((inst.opc as u32) << 22)
+ | (truncate_imm::<_, 9>(inst.imm9) << 12)
+ | ((inst.idx as u32) << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rt as u32)
+ }
+}
+
+impl From<LoadStore> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: LoadStore) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldr_post() {
+ let inst = LoadStore::ldr_post(0, 1, 16, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf8410420, result);
+ }
+
+ #[test]
+ fn test_ldr_pre() {
+ let inst = LoadStore::ldr_pre(0, 1, 16, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf8410c20, result);
+ }
+
+ #[test]
+ fn test_ldur() {
+ let inst = LoadStore::ldur(0, 1, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf8400020, result);
+ }
+
+ #[test]
+ fn test_ldurb() {
+ let inst = LoadStore::ldurb(0, 1, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x38400020, result);
+ }
+
+ #[test]
+ fn test_ldurh() {
+ let inst = LoadStore::ldurh(0, 1, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x78400020, result);
+ }
+
+ #[test]
+ fn test_ldur_with_imm() {
+ let inst = LoadStore::ldur(0, 1, 123, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf847b020, result);
+ }
+
+ #[test]
+ fn test_ldursw() {
+ let inst = LoadStore::ldursw(0, 1, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0xb8800020, result);
+ }
+
+ #[test]
+ fn test_ldursw_with_imm() {
+ let inst = LoadStore::ldursw(0, 1, 123);
+ let result: u32 = inst.into();
+ assert_eq!(0xb887b020, result);
+ }
+
+ #[test]
+ fn test_str_post() {
+ let inst = LoadStore::str_post(0, 1, -16, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf81f0420, result);
+ }
+
+ #[test]
+ fn test_str_pre() {
+ let inst = LoadStore::str_pre(0, 1, -16, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf81f0c20, result);
+ }
+
+ #[test]
+ fn test_stur() {
+ let inst = LoadStore::stur(0, 1, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf8000020, result);
+ }
+
+ #[test]
+ fn test_stur_negative_offset() {
+ let inst = LoadStore::stur(0, 1, -1, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf81ff020, result);
+ }
+
+ #[test]
+ fn test_stur_positive_offset() {
+ let inst = LoadStore::stur(0, 1, 255, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf80ff020, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/load_store_exclusive.rs b/yjit/src/asm/arm64/inst/load_store_exclusive.rs
new file mode 100644
index 0000000000..1106b4cb37
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/load_store_exclusive.rs
@@ -0,0 +1,109 @@
+/// The operation being performed for this instruction.
+enum Op {
+ Store = 0,
+ Load = 1
+}
+
+/// The size of the registers being operated on.
+enum Size {
+ Size32 = 0b10,
+ Size64 = 0b11
+}
+
+/// A convenience function so that we can convert the number of bits of an
+/// register operand directly into a Size enum variant.
+impl From<u8> for Size {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Size::Size64,
+ 32 => Size::Size32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
+
+/// The struct that represents an A64 load or store exclusive instruction that
+/// can be encoded.
+///
+/// LDAXR/STLXR
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 0 1 0 0 0 0 0 1 1 1 1 1 1 |
+/// | size. op rs.............. rn.............. rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct LoadStoreExclusive {
+ /// The number of the register to be loaded.
+ rt: u8,
+
+ /// The base register with which to form the address.
+ rn: u8,
+
+ /// The register to be used for the status result if it applies to this
+ /// operation. Otherwise it's the zero register.
+ rs: u8,
+
+ /// The operation being performed for this instruction.
+ op: Op,
+
+ /// The size of the registers being operated on.
+ size: Size
+}
+
+impl LoadStoreExclusive {
+ /// LDAXR
+ /// <https://developer.arm.com/documentation/ddi0602/2021-12/Base-Instructions/LDAXR--Load-Acquire-Exclusive-Register->
+ pub fn ldaxr(rt: u8, rn: u8, num_bits: u8) -> Self {
+ Self { rt, rn, rs: 31, op: Op::Load, size: num_bits.into() }
+ }
+
+ /// STLXR
+ /// <https://developer.arm.com/documentation/ddi0602/2021-12/Base-Instructions/STLXR--Store-Release-Exclusive-Register->
+ pub fn stlxr(rs: u8, rt: u8, rn: u8, num_bits: u8) -> Self {
+ Self { rt, rn, rs, op: Op::Store, size: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b0100;
+
+impl From<LoadStoreExclusive> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: LoadStoreExclusive) -> Self {
+ 0
+ | ((inst.size as u32) << 30)
+ | (FAMILY << 25)
+ | ((inst.op as u32) << 22)
+ | ((inst.rs as u32) << 16)
+ | (0b111111 << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rt as u32)
+ }
+}
+
+impl From<LoadStoreExclusive> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: LoadStoreExclusive) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldaxr() {
+ let inst = LoadStoreExclusive::ldaxr(16, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xc85ffc10, result);
+ }
+
+ #[test]
+ fn test_stlxr() {
+ let inst = LoadStoreExclusive::stlxr(17, 16, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xc811fc10, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/logical_imm.rs b/yjit/src/asm/arm64/inst/logical_imm.rs
new file mode 100644
index 0000000000..d57ad5f5b7
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/logical_imm.rs
@@ -0,0 +1,154 @@
+use super::super::arg::{BitmaskImmediate, Sf};
+
+// Which operation to perform.
+enum Opc {
+ /// The AND operation.
+ And = 0b00,
+
+ /// The ORR operation.
+ Orr = 0b01,
+
+ /// The EOR operation.
+ Eor = 0b10,
+
+ /// The ANDS operation.
+ Ands = 0b11
+}
+
+/// The struct that represents an A64 bitwise immediate instruction that can be
+/// encoded.
+///
+/// AND/ORR/ANDS (immediate)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 0 1 0 0 |
+/// | sf opc.. N immr............... imms............... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct LogicalImm {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The register number of the first operand register.
+ rn: u8,
+
+ /// The immediate value to test.
+ imm: BitmaskImmediate,
+
+ /// The opcode for this instruction.
+ opc: Opc,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl LogicalImm {
+ /// AND (bitmask immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/AND--immediate---Bitwise-AND--immediate--?lang=en>
+ pub fn and(rd: u8, rn: u8, imm: BitmaskImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, opc: Opc::And, sf: num_bits.into() }
+ }
+
+ /// ANDS (bitmask immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ANDS--immediate---Bitwise-AND--immediate---setting-flags-?lang=en>
+ pub fn ands(rd: u8, rn: u8, imm: BitmaskImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, opc: Opc::Ands, sf: num_bits.into() }
+ }
+
+ /// EOR (bitmask immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/EOR--immediate---Bitwise-Exclusive-OR--immediate-->
+ pub fn eor(rd: u8, rn: u8, imm: BitmaskImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, opc: Opc::Eor, sf: num_bits.into() }
+ }
+
+ /// MOV (bitmask immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/MOV--bitmask-immediate---Move--bitmask-immediate---an-alias-of-ORR--immediate--?lang=en>
+ pub fn mov(rd: u8, imm: BitmaskImmediate, num_bits: u8) -> Self {
+ Self { rd, rn: 0b11111, imm, opc: Opc::Orr, sf: num_bits.into() }
+ }
+
+ /// ORR (bitmask immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/ORR--immediate---Bitwise-OR--immediate-->
+ pub fn orr(rd: u8, rn: u8, imm: BitmaskImmediate, num_bits: u8) -> Self {
+ Self { rd, rn, imm, opc: Opc::Orr, sf: num_bits.into() }
+ }
+
+ /// TST (bitmask immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/TST--immediate---Test-bits--immediate---an-alias-of-ANDS--immediate--?lang=en>
+ pub fn tst(rn: u8, imm: BitmaskImmediate, num_bits: u8) -> Self {
+ Self::ands(31, rn, imm, num_bits)
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en#log_imm>
+const FAMILY: u32 = 0b1001;
+
+impl From<LogicalImm> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: LogicalImm) -> Self {
+ let imm: u32 = inst.imm.encode();
+
+ 0
+ | ((inst.sf as u32) << 31)
+ | ((inst.opc as u32) << 29)
+ | (FAMILY << 25)
+ | (imm << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<LogicalImm> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: LogicalImm) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_and() {
+ let inst = LogicalImm::and(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x92400820, result);
+ }
+
+ #[test]
+ fn test_ands() {
+ let inst = LogicalImm::ands(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf2400820, result);
+ }
+
+ #[test]
+ fn test_eor() {
+ let inst = LogicalImm::eor(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd2400820, result);
+ }
+
+ #[test]
+ fn test_mov() {
+ let inst = LogicalImm::mov(0, 0x5555555555555555.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xb200f3e0, result);
+ }
+
+ #[test]
+ fn test_orr() {
+ let inst = LogicalImm::orr(0, 1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xb2400820, result);
+ }
+
+ #[test]
+ fn test_tst() {
+ let inst = LogicalImm::tst(1, 7.try_into().unwrap(), 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf240083f, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/logical_reg.rs b/yjit/src/asm/arm64/inst/logical_reg.rs
new file mode 100644
index 0000000000..18edff606f
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/logical_reg.rs
@@ -0,0 +1,207 @@
+use super::super::arg::{Sf, truncate_uimm};
+
+/// Whether or not this is a NOT instruction.
+enum N {
+ /// This is not a NOT instruction.
+ No = 0,
+
+ /// This is a NOT instruction.
+ Yes = 1
+}
+
+/// The type of shift to perform on the second operand register.
+enum Shift {
+ LSL = 0b00, // logical shift left (unsigned)
+ LSR = 0b01, // logical shift right (unsigned)
+ ASR = 0b10, // arithmetic shift right (signed)
+ ROR = 0b11 // rotate right (unsigned)
+}
+
+// Which operation to perform.
+enum Opc {
+ /// The AND operation.
+ And = 0b00,
+
+ /// The ORR operation.
+ Orr = 0b01,
+
+ /// The EOR operation.
+ Eor = 0b10,
+
+ /// The ANDS operation.
+ Ands = 0b11
+}
+
+/// The struct that represents an A64 logical register instruction that can be
+/// encoded.
+///
+/// AND/ORR/ANDS (shifted register)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 0 1 0 |
+/// | sf opc.. shift N rm.............. imm6............... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct LogicalReg {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The register number of the first operand register.
+ rn: u8,
+
+ /// The amount to shift the second operand register.
+ imm6: u8,
+
+ /// The register number of the second operand register.
+ rm: u8,
+
+ /// Whether or not this is a NOT instruction.
+ n: N,
+
+ /// The type of shift to perform on the second operand register.
+ shift: Shift,
+
+ /// The opcode for this instruction.
+ opc: Opc,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl LogicalReg {
+ /// AND (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/AND--shifted-register---Bitwise-AND--shifted-register--?lang=en>
+ pub fn and(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn, imm6: 0, rm, n: N::No, shift: Shift::LSL, opc: Opc::And, sf: num_bits.into() }
+ }
+
+ /// ANDS (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/ANDS--shifted-register---Bitwise-AND--shifted-register---setting-flags-?lang=en>
+ pub fn ands(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn, imm6: 0, rm, n: N::No, shift: Shift::LSL, opc: Opc::Ands, sf: num_bits.into() }
+ }
+
+ /// EOR (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/EOR--shifted-register---Bitwise-Exclusive-OR--shifted-register-->
+ pub fn eor(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn, imm6: 0, rm, n: N::No, shift: Shift::LSL, opc: Opc::Eor, sf: num_bits.into() }
+ }
+
+ /// MOV (register)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/MOV--register---Move--register---an-alias-of-ORR--shifted-register--?lang=en>
+ pub fn mov(rd: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn: 0b11111, imm6: 0, rm, n: N::No, shift: Shift::LSL, opc: Opc::Orr, sf: num_bits.into() }
+ }
+
+ /// MVN (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/MVN--Bitwise-NOT--an-alias-of-ORN--shifted-register--?lang=en>
+ pub fn mvn(rd: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn: 0b11111, imm6: 0, rm, n: N::Yes, shift: Shift::LSL, opc: Opc::Orr, sf: num_bits.into() }
+ }
+
+ /// ORN (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/ORN--shifted-register---Bitwise-OR-NOT--shifted-register-->
+ pub fn orn(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn, imm6: 0, rm, n: N::Yes, shift: Shift::LSL, opc: Opc::Orr, sf: num_bits.into() }
+ }
+
+ /// ORR (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/ORR--shifted-register---Bitwise-OR--shifted-register-->
+ pub fn orr(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn, imm6: 0, rm, n: N::No, shift: Shift::LSL, opc: Opc::Orr, sf: num_bits.into() }
+ }
+
+ /// TST (shifted register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/TST--shifted-register---Test--shifted-register---an-alias-of-ANDS--shifted-register--?lang=en>
+ pub fn tst(rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd: 31, rn, imm6: 0, rm, n: N::No, shift: Shift::LSL, opc: Opc::Ands, sf: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Register?lang=en>
+const FAMILY: u32 = 0b0101;
+
+impl From<LogicalReg> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: LogicalReg) -> Self {
+ 0
+ | ((inst.sf as u32) << 31)
+ | ((inst.opc as u32) << 29)
+ | (FAMILY << 25)
+ | ((inst.shift as u32) << 22)
+ | ((inst.n as u32) << 21)
+ | ((inst.rm as u32) << 16)
+ | (truncate_uimm::<_, 6>(inst.imm6) << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<LogicalReg> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: LogicalReg) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_and() {
+ let inst = LogicalReg::and(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x8a020020, result);
+ }
+
+ #[test]
+ fn test_ands() {
+ let inst = LogicalReg::ands(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xea020020, result);
+ }
+
+ #[test]
+ fn test_eor() {
+ let inst = LogicalReg::eor(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xca020020, result);
+ }
+
+ #[test]
+ fn test_mov() {
+ let inst = LogicalReg::mov(0, 1, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xaa0103e0, result);
+ }
+
+ #[test]
+ fn test_mvn() {
+ let inst = LogicalReg::mvn(0, 1, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xaa2103e0, result);
+ }
+
+ #[test]
+ fn test_orn() {
+ let inst = LogicalReg::orn(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xaa220020, result);
+ }
+
+ #[test]
+ fn test_orr() {
+ let inst = LogicalReg::orr(0, 1, 2, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xaa020020, result);
+ }
+
+ #[test]
+ fn test_tst() {
+ let inst = LogicalReg::tst(0, 1, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xea01001f, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/madd.rs b/yjit/src/asm/arm64/inst/madd.rs
new file mode 100644
index 0000000000..71f2ab230a
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/madd.rs
@@ -0,0 +1,73 @@
+use super::super::arg::Sf;
+
+/// The struct that represents an A64 multiply-add instruction that can be
+/// encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 0 1 1 0 1 1 0 0 0 0 |
+/// | sf rm.............. ra.............. rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct MAdd {
+ /// The number of the general-purpose destination register.
+ rd: u8,
+
+ /// The number of the first general-purpose source register.
+ rn: u8,
+
+ /// The number of the third general-purpose source register.
+ ra: u8,
+
+ /// The number of the second general-purpose source register.
+ rm: u8,
+
+ /// The size of the registers of this instruction.
+ sf: Sf
+}
+
+impl MAdd {
+ /// MUL
+ /// <https://developer.arm.com/documentation/ddi0602/2023-06/Base-Instructions/MUL--Multiply--an-alias-of-MADD->
+ pub fn mul(rd: u8, rn: u8, rm: u8, num_bits: u8) -> Self {
+ Self { rd, rn, ra: 0b11111, rm, sf: num_bits.into() }
+ }
+}
+
+impl From<MAdd> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: MAdd) -> Self {
+ 0
+ | ((inst.sf as u32) << 31)
+ | (0b11011 << 24)
+ | ((inst.rm as u32) << 16)
+ | ((inst.ra as u32) << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rd as u32)
+ }
+}
+
+impl From<MAdd> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: MAdd) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_mul_32() {
+ let result: u32 = MAdd::mul(0, 1, 2, 32).into();
+ assert_eq!(0x1B027C20, result);
+ }
+
+ #[test]
+ fn test_mul_64() {
+ let result: u32 = MAdd::mul(0, 1, 2, 64).into();
+ assert_eq!(0x9B027C20, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/mod.rs b/yjit/src/asm/arm64/inst/mod.rs
new file mode 100644
index 0000000000..bfffd914ef
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/mod.rs
@@ -0,0 +1,54 @@
+// This module contains various A64 instructions and the logic necessary to
+// encode them into u32s.
+
+mod atomic;
+mod branch;
+mod branch_cond;
+mod breakpoint;
+mod call;
+mod conditional;
+mod data_imm;
+mod data_reg;
+mod halfword_imm;
+mod load_literal;
+mod load_register;
+mod load_store;
+mod load_store_exclusive;
+mod logical_imm;
+mod logical_reg;
+mod madd;
+mod smulh;
+mod mov;
+mod nop;
+mod pc_rel;
+mod reg_pair;
+mod sbfm;
+mod shift_imm;
+mod sys_reg;
+mod test_bit;
+
+pub use atomic::Atomic;
+pub use branch::Branch;
+pub use branch_cond::BranchCond;
+pub use breakpoint::Breakpoint;
+pub use call::Call;
+pub use conditional::Conditional;
+pub use data_imm::DataImm;
+pub use data_reg::DataReg;
+pub use halfword_imm::HalfwordImm;
+pub use load_literal::LoadLiteral;
+pub use load_register::LoadRegister;
+pub use load_store::LoadStore;
+pub use load_store_exclusive::LoadStoreExclusive;
+pub use logical_imm::LogicalImm;
+pub use logical_reg::LogicalReg;
+pub use madd::MAdd;
+pub use smulh::SMulH;
+pub use mov::Mov;
+pub use nop::Nop;
+pub use pc_rel::PCRelative;
+pub use reg_pair::RegisterPair;
+pub use sbfm::SBFM;
+pub use shift_imm::ShiftImm;
+pub use sys_reg::SysReg;
+pub use test_bit::TestBit;
diff --git a/yjit/src/asm/arm64/inst/mov.rs b/yjit/src/asm/arm64/inst/mov.rs
new file mode 100644
index 0000000000..eae4565c3a
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/mov.rs
@@ -0,0 +1,155 @@
+use super::super::arg::Sf;
+
+/// Which operation is being performed.
+enum Op {
+ /// A movz operation which zeroes out the other bits.
+ MOVZ = 0b10,
+
+ /// A movk operation which keeps the other bits in place.
+ MOVK = 0b11
+}
+
+/// How much to shift the immediate by.
+enum Hw {
+ LSL0 = 0b00,
+ LSL16 = 0b01,
+ LSL32 = 0b10,
+ LSL48 = 0b11
+}
+
+impl From<u8> for Hw {
+ fn from(shift: u8) -> Self {
+ match shift {
+ 0 => Hw::LSL0,
+ 16 => Hw::LSL16,
+ 32 => Hw::LSL32,
+ 48 => Hw::LSL48,
+ _ => panic!("Invalid value for shift: {}", shift)
+ }
+ }
+}
+
+/// The struct that represents a MOVK or MOVZ instruction.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 0 1 0 1 |
+/// | sf op... hw... imm16.................................................. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Mov {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The value to move into the register.
+ imm16: u16,
+
+ /// The shift of the value to move.
+ hw: Hw,
+
+ /// Which operation is being performed.
+ op: Op,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl Mov {
+ /// MOVK
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/MOVK--Move-wide-with-keep-?lang=en>
+ pub fn movk(rd: u8, imm16: u16, hw: u8, num_bits: u8) -> Self {
+ Self { rd, imm16, hw: hw.into(), op: Op::MOVK, sf: num_bits.into() }
+ }
+
+ /// MOVZ
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/MOVZ--Move-wide-with-zero-?lang=en>
+ pub fn movz(rd: u8, imm16: u16, hw: u8, num_bits: u8) -> Self {
+ Self { rd, imm16, hw: hw.into(), op: Op::MOVZ, sf: num_bits.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en>
+const FAMILY: u32 = 0b1000;
+
+impl From<Mov> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: Mov) -> Self {
+ 0
+ | ((inst.sf as u32) << 31)
+ | ((inst.op as u32) << 29)
+ | (FAMILY << 25)
+ | (0b101 << 23)
+ | ((inst.hw as u32) << 21)
+ | ((inst.imm16 as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<Mov> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Mov) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_movk_unshifted() {
+ let inst = Mov::movk(0, 123, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf2800f60, result);
+ }
+
+ #[test]
+ fn test_movk_shifted_16() {
+ let inst = Mov::movk(0, 123, 16, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf2A00f60, result);
+ }
+
+ #[test]
+ fn test_movk_shifted_32() {
+ let inst = Mov::movk(0, 123, 32, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf2C00f60, result);
+ }
+
+ #[test]
+ fn test_movk_shifted_48() {
+ let inst = Mov::movk(0, 123, 48, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xf2e00f60, result);
+ }
+
+ #[test]
+ fn test_movz_unshifted() {
+ let inst = Mov::movz(0, 123, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd2800f60, result);
+ }
+
+ #[test]
+ fn test_movz_shifted_16() {
+ let inst = Mov::movz(0, 123, 16, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd2a00f60, result);
+ }
+
+ #[test]
+ fn test_movz_shifted_32() {
+ let inst = Mov::movz(0, 123, 32, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd2c00f60, result);
+ }
+
+ #[test]
+ fn test_movz_shifted_48() {
+ let inst = Mov::movz(0, 123, 48, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd2e00f60, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/nop.rs b/yjit/src/asm/arm64/inst/nop.rs
new file mode 100644
index 0000000000..081d8558f5
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/nop.rs
@@ -0,0 +1,44 @@
+/// The struct that represents an A64 nop instruction that can be encoded.
+///
+/// NOP
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 0 1 0 1 0 1 0 0 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 1 1 |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct Nop;
+
+impl Nop {
+ /// NOP
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/NOP--No-Operation->
+ pub fn nop() -> Self {
+ Self {}
+ }
+}
+
+impl From<Nop> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(_inst: Nop) -> Self {
+ 0b11010101000000110010000000011111
+ }
+}
+
+impl From<Nop> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: Nop) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_nop() {
+ let inst = Nop::nop();
+ let result: u32 = inst.into();
+ assert_eq!(0xd503201f, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/pc_rel.rs b/yjit/src/asm/arm64/inst/pc_rel.rs
new file mode 100644
index 0000000000..2ea586a778
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/pc_rel.rs
@@ -0,0 +1,107 @@
+/// Which operation to perform for the PC-relative instruction.
+enum Op {
+ /// Form a PC-relative address.
+ ADR = 0,
+
+ /// Form a PC-relative address to a 4KB page.
+ ADRP = 1
+}
+
+/// The struct that represents an A64 PC-relative address instruction that can
+/// be encoded.
+///
+/// ADR
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 0 0 0 |
+/// | op immlo immhi........................................................... rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct PCRelative {
+ /// The number for the general-purpose register to load the address into.
+ rd: u8,
+
+ /// The number of bytes to add to the PC to form the address.
+ imm: i32,
+
+ /// Which operation to perform for this instruction.
+ op: Op
+}
+
+impl PCRelative {
+ /// ADR
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/ADR--Form-PC-relative-address->
+ pub fn adr(rd: u8, imm: i32) -> Self {
+ Self { rd, imm, op: Op::ADR }
+ }
+
+ /// ADRP
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/ADRP--Form-PC-relative-address-to-4KB-page->
+ pub fn adrp(rd: u8, imm: i32) -> Self {
+ Self { rd, imm: imm >> 12, op: Op::ADRP }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en>
+const FAMILY: u32 = 0b1000;
+
+impl From<PCRelative> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: PCRelative) -> Self {
+ let immlo = (inst.imm & 0b11) as u32;
+ let mut immhi = ((inst.imm >> 2) & ((1 << 18) - 1)) as u32;
+
+ // Toggle the sign bit if necessary.
+ if inst.imm < 0 {
+ immhi |= 1 << 18;
+ }
+
+ 0
+ | ((inst.op as u32) << 31)
+ | (immlo << 29)
+ | (FAMILY << 25)
+ | (immhi << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<PCRelative> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: PCRelative) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_adr_positive() {
+ let inst = PCRelative::adr(0, 5);
+ let result: u32 = inst.into();
+ assert_eq!(0x30000020, result);
+ }
+
+ #[test]
+ fn test_adr_negative() {
+ let inst = PCRelative::adr(0, -5);
+ let result: u32 = inst.into();
+ assert_eq!(0x70ffffc0, result);
+ }
+
+ #[test]
+ fn test_adrp_positive() {
+ let inst = PCRelative::adrp(0, 0x4000);
+ let result: u32 = inst.into();
+ assert_eq!(0x90000020, result);
+ }
+
+ #[test]
+ fn test_adrp_negative() {
+ let inst = PCRelative::adrp(0, -0x4000);
+ let result: u32 = inst.into();
+ assert_eq!(0x90ffffe0, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/reg_pair.rs b/yjit/src/asm/arm64/inst/reg_pair.rs
new file mode 100644
index 0000000000..9bffcd8479
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/reg_pair.rs
@@ -0,0 +1,212 @@
+use super::super::arg::truncate_imm;
+
+/// The operation to perform for this instruction.
+enum Opc {
+ /// When the registers are 32-bits wide.
+ Opc32 = 0b00,
+
+ /// When the registers are 64-bits wide.
+ Opc64 = 0b10
+}
+
+/// The kind of indexing to perform for this instruction.
+enum Index {
+ StorePostIndex = 0b010,
+ LoadPostIndex = 0b011,
+ StoreSignedOffset = 0b100,
+ LoadSignedOffset = 0b101,
+ StorePreIndex = 0b110,
+ LoadPreIndex = 0b111
+}
+
+/// A convenience function so that we can convert the number of bits of a
+/// register operand directly into an Opc variant.
+impl From<u8> for Opc {
+ fn from(num_bits: u8) -> Self {
+ match num_bits {
+ 64 => Opc::Opc64,
+ 32 => Opc::Opc32,
+ _ => panic!("Invalid number of bits: {}", num_bits)
+ }
+ }
+}
+
+/// The struct that represents an A64 register pair instruction that can be
+/// encoded.
+///
+/// STP/LDP
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 0 1 0 0 |
+/// | opc index..... imm7.................... rt2............. rn.............. rt1............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct RegisterPair {
+ /// The number of the first register to be transferred.
+ rt1: u8,
+
+ /// The number of the base register.
+ rn: u8,
+
+ /// The number of the second register to be transferred.
+ rt2: u8,
+
+ /// The signed immediate byte offset, a multiple of 8.
+ imm7: i16,
+
+ /// The kind of indexing to use for this instruction.
+ index: Index,
+
+ /// The operation to be performed (in terms of size).
+ opc: Opc
+}
+
+impl RegisterPair {
+ /// Create a register pair instruction with a given indexing mode.
+ fn new(rt1: u8, rt2: u8, rn: u8, disp: i16, index: Index, num_bits: u8) -> Self {
+ Self { rt1, rn, rt2, imm7: disp / 8, index, opc: num_bits.into() }
+ }
+
+ /// LDP (signed offset)
+ /// `LDP <Xt1>, <Xt2>, [<Xn|SP>{, #<imm>}]`
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDP--Load-Pair-of-Registers-?lang=en>
+ pub fn ldp(rt1: u8, rt2: u8, rn: u8, disp: i16, num_bits: u8) -> Self {
+ Self::new(rt1, rt2, rn, disp, Index::LoadSignedOffset, num_bits)
+ }
+
+ /// LDP (pre-index)
+ /// `LDP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]!`
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDP--Load-Pair-of-Registers-?lang=en>
+ pub fn ldp_pre(rt1: u8, rt2: u8, rn: u8, disp: i16, num_bits: u8) -> Self {
+ Self::new(rt1, rt2, rn, disp, Index::LoadPreIndex, num_bits)
+ }
+
+ /// LDP (post-index)
+ /// `LDP <Xt1>, <Xt2>, [<Xn|SP>], #<imm>`
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDP--Load-Pair-of-Registers-?lang=en>
+ pub fn ldp_post(rt1: u8, rt2: u8, rn: u8, disp: i16, num_bits: u8) -> Self {
+ Self::new(rt1, rt2, rn, disp, Index::LoadPostIndex, num_bits)
+ }
+
+ /// STP (signed offset)
+ /// `STP <Xt1>, <Xt2>, [<Xn|SP>{, #<imm>}]`
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STP--Store-Pair-of-Registers-?lang=en>
+ pub fn stp(rt1: u8, rt2: u8, rn: u8, disp: i16, num_bits: u8) -> Self {
+ Self::new(rt1, rt2, rn, disp, Index::StoreSignedOffset, num_bits)
+ }
+
+ /// STP (pre-index)
+ /// `STP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]!`
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STP--Store-Pair-of-Registers-?lang=en>
+ pub fn stp_pre(rt1: u8, rt2: u8, rn: u8, disp: i16, num_bits: u8) -> Self {
+ Self::new(rt1, rt2, rn, disp, Index::StorePreIndex, num_bits)
+ }
+
+ /// STP (post-index)
+ /// `STP <Xt1>, <Xt2>, [<Xn|SP>], #<imm>`
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STP--Store-Pair-of-Registers-?lang=en>
+ pub fn stp_post(rt1: u8, rt2: u8, rn: u8, disp: i16, num_bits: u8) -> Self {
+ Self::new(rt1, rt2, rn, disp, Index::StorePostIndex, num_bits)
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Loads-and-Stores?lang=en>
+const FAMILY: u32 = 0b0100;
+
+impl From<RegisterPair> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: RegisterPair) -> Self {
+ 0
+ | ((inst.opc as u32) << 30)
+ | (1 << 29)
+ | (FAMILY << 25)
+ | ((inst.index as u32) << 22)
+ | (truncate_imm::<_, 7>(inst.imm7) << 15)
+ | ((inst.rt2 as u32) << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rt1 as u32)
+ }
+}
+
+impl From<RegisterPair> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: RegisterPair) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_ldp() {
+ let inst = RegisterPair::ldp(0, 1, 2, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa9400440, result);
+ }
+
+ #[test]
+ fn test_ldp_maximum_displacement() {
+ let inst = RegisterPair::ldp(0, 1, 2, 504, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa95f8440, result);
+ }
+
+ #[test]
+ fn test_ldp_minimum_displacement() {
+ let inst = RegisterPair::ldp(0, 1, 2, -512, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa9600440, result);
+ }
+
+ #[test]
+ fn test_ldp_pre() {
+ let inst = RegisterPair::ldp_pre(0, 1, 2, 256, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa9d00440, result);
+ }
+
+ #[test]
+ fn test_ldp_post() {
+ let inst = RegisterPair::ldp_post(0, 1, 2, 256, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa8d00440, result);
+ }
+
+ #[test]
+ fn test_stp() {
+ let inst = RegisterPair::stp(0, 1, 2, 0, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa9000440, result);
+ }
+
+ #[test]
+ fn test_stp_maximum_displacement() {
+ let inst = RegisterPair::stp(0, 1, 2, 504, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa91f8440, result);
+ }
+
+ #[test]
+ fn test_stp_minimum_displacement() {
+ let inst = RegisterPair::stp(0, 1, 2, -512, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa9200440, result);
+ }
+
+ #[test]
+ fn test_stp_pre() {
+ let inst = RegisterPair::stp_pre(0, 1, 2, 256, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa9900440, result);
+ }
+
+ #[test]
+ fn test_stp_post() {
+ let inst = RegisterPair::stp_post(0, 1, 2, 256, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xa8900440, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/sbfm.rs b/yjit/src/asm/arm64/inst/sbfm.rs
new file mode 100644
index 0000000000..12944ba722
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/sbfm.rs
@@ -0,0 +1,103 @@
+use super::super::arg::{Sf, truncate_uimm};
+
+/// The struct that represents an A64 signed bitfield move instruction that can
+/// be encoded.
+///
+/// SBFM
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 0 1 0 0 1 1 0 |
+/// | sf N immr............... imms............... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct SBFM {
+ /// The number for the general-purpose register to load the value into.
+ rd: u8,
+
+ /// The number for the general-purpose register to copy from.
+ rn: u8,
+
+ /// The leftmost bit number to be moved from the source.
+ imms: u8,
+
+ // The right rotate amount.
+ immr: u8,
+
+ /// Whether or not this is a 64-bit operation.
+ n: bool,
+
+ /// The size of this operation.
+ sf: Sf
+}
+
+impl SBFM {
+ /// ASR
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/ASR--immediate---Arithmetic-Shift-Right--immediate---an-alias-of-SBFM-?lang=en>
+ pub fn asr(rd: u8, rn: u8, shift: u8, num_bits: u8) -> Self {
+ let (imms, n) = if num_bits == 64 {
+ (0b111111, true)
+ } else {
+ (0b011111, false)
+ };
+
+ Self { rd, rn, immr: shift, imms, n, sf: num_bits.into() }
+ }
+
+ /// SXTW
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SXTW--Sign-Extend-Word--an-alias-of-SBFM-?lang=en>
+ pub fn sxtw(rd: u8, rn: u8) -> Self {
+ Self { rd, rn, immr: 0, imms: 31, n: true, sf: Sf::Sf64 }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en#bitfield>
+const FAMILY: u32 = 0b1001;
+
+impl From<SBFM> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: SBFM) -> Self {
+ 0
+ | ((inst.sf as u32) << 31)
+ | (FAMILY << 25)
+ | (1 << 24)
+ | ((inst.n as u32) << 22)
+ | (truncate_uimm::<_, 6>(inst.immr) << 16)
+ | (truncate_uimm::<_, 6>(inst.imms) << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<SBFM> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: SBFM) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_asr_32_bits() {
+ let inst = SBFM::asr(0, 1, 2, 32);
+ let result: u32 = inst.into();
+ assert_eq!(0x13027c20, result);
+ }
+
+ #[test]
+ fn test_asr_64_bits() {
+ let inst = SBFM::asr(10, 11, 5, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0x9345fd6a, result);
+ }
+
+ #[test]
+ fn test_sxtw() {
+ let inst = SBFM::sxtw(0, 1);
+ let result: u32 = inst.into();
+ assert_eq!(0x93407c20, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/shift_imm.rs b/yjit/src/asm/arm64/inst/shift_imm.rs
new file mode 100644
index 0000000000..9dac9a1408
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/shift_imm.rs
@@ -0,0 +1,147 @@
+use super::super::arg::Sf;
+
+/// The operation to perform for this instruction.
+enum Opc {
+ /// Logical left shift
+ LSL,
+
+ /// Logical shift right
+ LSR
+}
+
+/// The struct that represents an A64 unsigned bitfield move instruction that
+/// can be encoded.
+///
+/// LSL (immediate)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 1 0 0 1 1 0 |
+/// | sf N immr............... imms............... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct ShiftImm {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The register number of the first operand register.
+ rn: u8,
+
+ /// The immediate value to shift by.
+ shift: u8,
+
+ /// The opcode for this instruction.
+ opc: Opc,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl ShiftImm {
+ /// LSL (immediate)
+ /// <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LSL--immediate---Logical-Shift-Left--immediate---an-alias-of-UBFM-?lang=en>
+ pub fn lsl(rd: u8, rn: u8, shift: u8, num_bits: u8) -> Self {
+ ShiftImm { rd, rn, shift, opc: Opc::LSL, sf: num_bits.into() }
+ }
+
+ /// LSR (immediate)
+ /// <https://developer.arm.com/documentation/ddi0602/2021-12/Base-Instructions/LSR--immediate---Logical-Shift-Right--immediate---an-alias-of-UBFM-?lang=en>
+ pub fn lsr(rd: u8, rn: u8, shift: u8, num_bits: u8) -> Self {
+ ShiftImm { rd, rn, shift, opc: Opc::LSR, sf: num_bits.into() }
+ }
+
+ /// Returns a triplet of (n, immr, imms) encoded in u32s for this
+ /// instruction. This mirrors how they will be encoded in the actual bits.
+ fn bitmask(&self) -> (u32, u32, u32) {
+ match self.opc {
+ // The key insight is a little buried in the docs, but effectively:
+ // LSL <Wd>, <Wn>, #<shift> == UBFM <Wd>, <Wn>, #(-<shift> MOD 32), #(31-<shift>)
+ // LSL <Xd>, <Xn>, #<shift> == UBFM <Xd>, <Xn>, #(-<shift> MOD 64), #(63-<shift>)
+ Opc::LSL => {
+ let shift = -(self.shift as i16);
+
+ match self.sf {
+ Sf::Sf32 => (
+ 0,
+ (shift.rem_euclid(32) & 0x3f) as u32,
+ ((31 - self.shift) & 0x3f) as u32
+ ),
+ Sf::Sf64 => (
+ 1,
+ (shift.rem_euclid(64) & 0x3f) as u32,
+ ((63 - self.shift) & 0x3f) as u32
+ )
+ }
+ },
+ // Similar to LSL:
+ // LSR <Wd>, <Wn>, #<shift> == UBFM <Wd>, <Wn>, #<shift>, #31
+ // LSR <Xd>, <Xn>, #<shift> == UBFM <Xd>, <Xn>, #<shift>, #63
+ Opc::LSR => {
+ match self.sf {
+ Sf::Sf32 => (0, (self.shift & 0x3f) as u32, 31),
+ Sf::Sf64 => (1, (self.shift & 0x3f) as u32, 63)
+ }
+ }
+ }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en#bitfield>
+const FAMILY: u32 = 0b10011;
+
+impl From<ShiftImm> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: ShiftImm) -> Self {
+ let (n, immr, imms) = inst.bitmask();
+
+ 0
+ | ((inst.sf as u32) << 31)
+ | (1 << 30)
+ | (FAMILY << 24)
+ | (n << 22)
+ | (immr << 16)
+ | (imms << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<ShiftImm> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: ShiftImm) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_lsl_32() {
+ let inst = ShiftImm::lsl(0, 1, 7, 32);
+ let result: u32 = inst.into();
+ assert_eq!(0x53196020, result);
+ }
+
+ #[test]
+ fn test_lsl_64() {
+ let inst = ShiftImm::lsl(0, 1, 7, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd379e020, result);
+ }
+
+ #[test]
+ fn test_lsr_32() {
+ let inst = ShiftImm::lsr(0, 1, 7, 32);
+ let result: u32 = inst.into();
+ assert_eq!(0x53077c20, result);
+ }
+
+ #[test]
+ fn test_lsr_64() {
+ let inst = ShiftImm::lsr(0, 1, 7, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd347fc20, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/smulh.rs b/yjit/src/asm/arm64/inst/smulh.rs
new file mode 100644
index 0000000000..f355cb6531
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/smulh.rs
@@ -0,0 +1,60 @@
+/// The struct that represents an A64 signed multiply high instruction
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 0 1 1 0 1 1 0 1 0 0 |
+/// | rm.............. ra.............. rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct SMulH {
+ /// The number of the general-purpose destination register.
+ rd: u8,
+
+ /// The number of the first general-purpose source register.
+ rn: u8,
+
+ /// The number of the third general-purpose source register.
+ ra: u8,
+
+ /// The number of the second general-purpose source register.
+ rm: u8,
+}
+
+impl SMulH {
+ /// SMULH
+ /// <https://developer.arm.com/documentation/ddi0602/2023-06/Base-Instructions/SMULH--Signed-Multiply-High->
+ pub fn smulh(rd: u8, rn: u8, rm: u8) -> Self {
+ Self { rd, rn, ra: 0b11111, rm }
+ }
+}
+
+impl From<SMulH> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: SMulH) -> Self {
+ 0
+ | (0b10011011010 << 21)
+ | ((inst.rm as u32) << 16)
+ | ((inst.ra as u32) << 10)
+ | ((inst.rn as u32) << 5)
+ | (inst.rd as u32)
+ }
+}
+
+impl From<SMulH> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: SMulH) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_smulh() {
+ let result: u32 = SMulH::smulh(0, 1, 2).into();
+ assert_eq!(0x9b427c20, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/sys_reg.rs b/yjit/src/asm/arm64/inst/sys_reg.rs
new file mode 100644
index 0000000000..7191dfbfd9
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/sys_reg.rs
@@ -0,0 +1,86 @@
+use super::super::arg::SystemRegister;
+
+/// Which operation to perform (loading or storing the system register value).
+enum L {
+ /// Store the value of a general-purpose register in a system register.
+ MSR = 0,
+
+ /// Store the value of a system register in a general-purpose register.
+ MRS = 1
+}
+
+/// The struct that represents an A64 system register instruction that can be
+/// encoded.
+///
+/// MSR/MRS (register)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 1 0 1 0 1 0 1 0 0 1 |
+/// | L o0 op1..... CRn........ CRm........ op2..... rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct SysReg {
+ /// The register to load the system register value into.
+ rt: u8,
+
+ /// Which system register to load or store.
+ systemreg: SystemRegister,
+
+ /// Which operation to perform (loading or storing the system register value).
+ l: L
+}
+
+impl SysReg {
+ /// MRS (register)
+ /// <https://developer.arm.com/documentation/ddi0602/2022-03/Base-Instructions/MRS--Move-System-Register-?lang=en>
+ pub fn mrs(rt: u8, systemreg: SystemRegister) -> Self {
+ SysReg { rt, systemreg, l: L::MRS }
+ }
+
+ /// MSR (register)
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/MSR--register---Move-general-purpose-register-to-System-Register-?lang=en>
+ pub fn msr(systemreg: SystemRegister, rt: u8) -> Self {
+ SysReg { rt, systemreg, l: L::MSR }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en#systemmove>
+const FAMILY: u32 = 0b110101010001;
+
+impl From<SysReg> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: SysReg) -> Self {
+ 0
+ | (FAMILY << 20)
+ | ((inst.l as u32) << 21)
+ | ((inst.systemreg as u32) << 5)
+ | inst.rt as u32
+ }
+}
+
+impl From<SysReg> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: SysReg) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_mrs() {
+ let inst = SysReg::mrs(0, SystemRegister::NZCV);
+ let result: u32 = inst.into();
+ assert_eq!(0xd53b4200, result);
+ }
+
+ #[test]
+ fn test_msr() {
+ let inst = SysReg::msr(SystemRegister::NZCV, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0xd51b4200, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/test_bit.rs b/yjit/src/asm/arm64/inst/test_bit.rs
new file mode 100644
index 0000000000..f7aeca70fd
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/test_bit.rs
@@ -0,0 +1,133 @@
+use super::super::arg::truncate_imm;
+
+/// The upper bit of the bit number to test.
+#[derive(Debug)]
+enum B5 {
+ /// When the bit number is below 32.
+ B532 = 0,
+
+ /// When the bit number is equal to or above 32.
+ B564 = 1
+}
+
+/// A convenience function so that we can convert the bit number directly into a
+/// B5 variant.
+impl From<u8> for B5 {
+ fn from(bit_num: u8) -> Self {
+ match bit_num {
+ 0..=31 => B5::B532,
+ 32..=63 => B5::B564,
+ _ => panic!("Invalid bit number: {}", bit_num)
+ }
+ }
+}
+
+/// The operation to perform for this instruction.
+enum Op {
+ /// The test bit zero operation.
+ TBZ = 0,
+
+ /// The test bit not zero operation.
+ TBNZ = 1
+}
+
+/// The struct that represents an A64 test bit instruction that can be encoded.
+///
+/// TBNZ/TBZ
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 1 0 1 1 |
+/// | b5 op b40............. imm14.......................................... rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct TestBit {
+ /// The number of the register to test.
+ rt: u8,
+
+ /// The PC-relative offset to the target instruction in term of number of
+ /// instructions.
+ imm14: i16,
+
+ /// The lower 5 bits of the bit number to be tested.
+ b40: u8,
+
+ /// The operation to perform for this instruction.
+ op: Op,
+
+ /// The upper bit of the bit number to test.
+ b5: B5
+}
+
+impl TestBit {
+ /// TBNZ
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/TBNZ--Test-bit-and-Branch-if-Nonzero-?lang=en>
+ pub fn tbnz(rt: u8, bit_num: u8, offset: i16) -> Self {
+ Self { rt, imm14: offset, b40: bit_num & 0b11111, op: Op::TBNZ, b5: bit_num.into() }
+ }
+
+ /// TBZ
+ /// <https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/TBZ--Test-bit-and-Branch-if-Zero-?lang=en>
+ pub fn tbz(rt: u8, bit_num: u8, offset: i16) -> Self {
+ Self { rt, imm14: offset, b40: bit_num & 0b11111, op: Op::TBZ, b5: bit_num.into() }
+ }
+}
+
+/// <https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en>
+const FAMILY: u32 = 0b11011;
+
+impl From<TestBit> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: TestBit) -> Self {
+ let b40 = (inst.b40 & 0b11111) as u32;
+ let imm14 = truncate_imm::<_, 14>(inst.imm14);
+
+ 0
+ | ((inst.b5 as u32) << 31)
+ | (FAMILY << 25)
+ | ((inst.op as u32) << 24)
+ | (b40 << 19)
+ | (imm14 << 5)
+ | inst.rt as u32
+ }
+}
+
+impl From<TestBit> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: TestBit) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_tbnz() {
+ let inst = TestBit::tbnz(0, 0, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x37000000, result);
+ }
+
+ #[test]
+ fn test_tbnz_negative() {
+ let inst = TestBit::tbnz(0, 0, -1);
+ let result: u32 = inst.into();
+ assert_eq!(0x3707ffe0, result);
+ }
+
+ #[test]
+ fn test_tbz() {
+ let inst = TestBit::tbz(0, 0, 0);
+ let result: u32 = inst.into();
+ assert_eq!(0x36000000, result);
+ }
+
+ #[test]
+ fn test_tbz_negative() {
+ let inst = TestBit::tbz(0, 0, -1);
+ let result: u32 = inst.into();
+ assert_eq!(0x3607ffe0, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs
new file mode 100644
index 0000000000..18b5270f9d
--- /dev/null
+++ b/yjit/src/asm/arm64/mod.rs
@@ -0,0 +1,1680 @@
+#![allow(dead_code)] // For instructions and operands we're not currently using.
+
+use crate::asm::CodeBlock;
+
+mod arg;
+mod inst;
+mod opnd;
+
+use inst::*;
+
+// We're going to make these public to make using these things easier in the
+// backend (so they don't have to have knowledge about the submodule).
+pub use arg::*;
+pub use opnd::*;
+
+/// Checks that a signed value fits within the specified number of bits.
+pub const fn imm_fits_bits(imm: i64, num_bits: u8) -> bool {
+ let minimum = if num_bits == 64 { i64::MIN } else { -(2_i64.pow((num_bits as u32) - 1)) };
+ let maximum = if num_bits == 64 { i64::MAX } else { 2_i64.pow((num_bits as u32) - 1) - 1 };
+
+ imm >= minimum && imm <= maximum
+}
+
+/// Checks that an unsigned value fits within the specified number of bits.
+pub const fn uimm_fits_bits(uimm: u64, num_bits: u8) -> bool {
+ let maximum = if num_bits == 64 { u64::MAX } else { 2_u64.pow(num_bits as u32) - 1 };
+
+ uimm <= maximum
+}
+
+/// ADD - add rn and rm, put the result in rd, don't update flags
+pub fn add(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::add(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ DataImm::add(rd.reg_no, rn.reg_no, uimm12.try_into().unwrap(), rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ if imm12 < 0 {
+ DataImm::sub(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ } else {
+ DataImm::add(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ }
+ },
+ _ => panic!("Invalid operand combination to add instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ADDS - add rn and rm, put the result in rd, update flags
+pub fn adds(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::adds(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ DataImm::adds(rd.reg_no, rn.reg_no, imm12.try_into().unwrap(), rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ if imm12 < 0 {
+ DataImm::subs(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ } else {
+ DataImm::adds(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ }
+ },
+ _ => panic!("Invalid operand combination to adds instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ADR - form a PC-relative address and load it into a register
+pub fn adr(cb: &mut CodeBlock, rd: A64Opnd, imm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, imm) {
+ (A64Opnd::Reg(rd), A64Opnd::Imm(imm)) => {
+ assert!(rd.num_bits == 64, "The destination register must be 64 bits.");
+ assert!(imm_fits_bits(imm, 21), "The immediate operand must be 21 bits or less.");
+
+ PCRelative::adr(rd.reg_no, imm as i32).into()
+ },
+ _ => panic!("Invalid operand combination to adr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ADRP - form a PC-relative address to a 4KB page and load it into a register.
+/// This is effectively the same as ADR except that the immediate must be a
+/// multiple of 4KB.
+pub fn adrp(cb: &mut CodeBlock, rd: A64Opnd, imm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, imm) {
+ (A64Opnd::Reg(rd), A64Opnd::Imm(imm)) => {
+ assert!(rd.num_bits == 64, "The destination register must be 64 bits.");
+ assert!(imm_fits_bits(imm, 32), "The immediate operand must be 32 bits or less.");
+
+ PCRelative::adrp(rd.reg_no, imm as i32).into()
+ },
+ _ => panic!("Invalid operand combination to adr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// AND - and rn and rm, put the result in rd, don't update flags
+pub fn and(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ LogicalReg::and(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ let bitmask_imm = if rd.num_bits == 32 {
+ BitmaskImmediate::new_32b_reg(imm.try_into().unwrap())
+ } else {
+ imm.try_into()
+ }.unwrap();
+
+ LogicalImm::and(rd.reg_no, rn.reg_no, bitmask_imm, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to and instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ANDS - and rn and rm, put the result in rd, update flags
+pub fn ands(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ LogicalReg::ands(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ let bitmask_imm = if rd.num_bits == 32 {
+ BitmaskImmediate::new_32b_reg(imm.try_into().unwrap())
+ } else {
+ imm.try_into()
+ }.unwrap();
+
+ LogicalImm::ands(rd.reg_no, rn.reg_no, bitmask_imm, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ands instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ASR - arithmetic shift right rn by shift, put the result in rd, don't update
+/// flags
+pub fn asr(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, shift) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(shift)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ assert!(uimm_fits_bits(shift, 6), "The shift operand must be 6 bits or less.");
+
+ SBFM::asr(rd.reg_no, rn.reg_no, shift.try_into().unwrap(), rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to asr instruction: asr {:?}, {:?}, {:?}", rd, rn, shift),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// Whether or not the offset between two instructions fits into the branch with
+/// or without link instruction. If it doesn't, then we have to load the value
+/// into a register first.
+pub const fn b_offset_fits_bits(offset: i64) -> bool {
+ imm_fits_bits(offset, 26)
+}
+
+/// B - branch without link (offset is number of instructions to jump)
+pub fn b(cb: &mut CodeBlock, offset: InstructionOffset) {
+ assert!(b_offset_fits_bits(offset.into()), "The immediate operand must be 26 bits or less.");
+ let bytes: [u8; 4] = Call::b(offset).into();
+
+ cb.write_bytes(&bytes);
+}
+
+/// Whether or not the offset in number of instructions between two instructions
+/// fits into the b.cond instruction. If it doesn't, then we have to load the
+/// value into a register first, then use the b.cond instruction to skip past a
+/// direct jump.
+pub const fn bcond_offset_fits_bits(offset: i64) -> bool {
+ imm_fits_bits(offset, 19)
+}
+
+/// CBZ and CBNZ also have a limit of 19 bits for the branch offset.
+pub use bcond_offset_fits_bits as cmp_branch_offset_fits_bits;
+
+/// B.cond - branch to target if condition is true
+pub fn bcond(cb: &mut CodeBlock, cond: u8, offset: InstructionOffset) {
+ assert!(bcond_offset_fits_bits(offset.into()), "The offset must be 19 bits or less.");
+ let bytes: [u8; 4] = BranchCond::bcond(cond, offset).into();
+
+ cb.write_bytes(&bytes);
+}
+
+/// BL - branch with link (offset is number of instructions to jump)
+pub fn bl(cb: &mut CodeBlock, offset: InstructionOffset) {
+ assert!(b_offset_fits_bits(offset.into()), "The offset must be 26 bits or less.");
+ let bytes: [u8; 4] = Call::bl(offset).into();
+
+ cb.write_bytes(&bytes);
+}
+
+/// BLR - branch with link to a register
+pub fn blr(cb: &mut CodeBlock, rn: A64Opnd) {
+ let bytes: [u8; 4] = match rn {
+ A64Opnd::Reg(rn) => Branch::blr(rn.reg_no).into(),
+ _ => panic!("Invalid operand to blr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// BR - branch to a register
+pub fn br(cb: &mut CodeBlock, rn: A64Opnd) {
+ let bytes: [u8; 4] = match rn {
+ A64Opnd::Reg(rn) => Branch::br(rn.reg_no).into(),
+ _ => panic!("Invalid operand to br instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// BRK - create a breakpoint
+pub fn brk(cb: &mut CodeBlock, imm16: A64Opnd) {
+ let bytes: [u8; 4] = match imm16 {
+ A64Opnd::None => Breakpoint::brk(0xf000).into(),
+ A64Opnd::UImm(imm16) => {
+ assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
+ Breakpoint::brk(imm16 as u16).into()
+ },
+ _ => panic!("Invalid operand combination to brk instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// CMP - compare rn and rm, update flags
+pub fn cmp(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rn, rm) {
+ (A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::cmp(rn.reg_no, rm.reg_no, rn.num_bits).into()
+ },
+ (A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
+ DataImm::cmp(rn.reg_no, (imm12 as u64).try_into().unwrap(), rn.num_bits).into()
+ },
+ (A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ DataImm::cmp(rn.reg_no, imm12.try_into().unwrap(), rn.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to cmp instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// CSEL - conditionally select between two registers
+pub fn csel(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd, cond: u8) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ Conditional::csel(rd.reg_no, rn.reg_no, rm.reg_no, cond, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to csel instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// EOR - perform a bitwise XOR of rn and rm, put the result in rd, don't update flags
+pub fn eor(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ LogicalReg::eor(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ let bitmask_imm = if rd.num_bits == 32 {
+ BitmaskImmediate::new_32b_reg(imm.try_into().unwrap())
+ } else {
+ imm.try_into()
+ }.unwrap();
+
+ LogicalImm::eor(rd.reg_no, rn.reg_no, bitmask_imm, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to eor instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDADDAL - atomic add with acquire and release semantics
+pub fn ldaddal(cb: &mut CodeBlock, rs: A64Opnd, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rs, rt, rn) {
+ (A64Opnd::Reg(rs), A64Opnd::Reg(rt), A64Opnd::Reg(rn)) => {
+ assert!(
+ rs.num_bits == rt.num_bits && rt.num_bits == rn.num_bits,
+ "All operands must be of the same size."
+ );
+
+ Atomic::ldaddal(rs.reg_no, rt.reg_no, rn.reg_no, rs.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldaddal instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDAXR - atomic load with acquire semantics
+pub fn ldaxr(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Reg(rn)) => {
+ assert_eq!(rn.num_bits, 64, "rn must be a 64-bit register.");
+
+ LoadStoreExclusive::ldaxr(rt.reg_no, rn.reg_no, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldaxr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDP (signed offset) - load a pair of registers from memory
+pub fn ldp(cb: &mut CodeBlock, rt1: A64Opnd, rt2: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt1, rt2, rn) {
+ (A64Opnd::Reg(rt1), A64Opnd::Reg(rt2), A64Opnd::Mem(rn)) => {
+ assert!(rt1.num_bits == rt2.num_bits, "Expected source registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 10), "The displacement must be 10 bits or less.");
+ assert_ne!(rt1.reg_no, rt2.reg_no, "Behavior is unpredictable with pairs of the same register");
+
+ RegisterPair::ldp(rt1.reg_no, rt2.reg_no, rn.base_reg_no, rn.disp as i16, rt1.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldp instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDP (pre-index) - load a pair of registers from memory, update the base pointer before loading it
+pub fn ldp_pre(cb: &mut CodeBlock, rt1: A64Opnd, rt2: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt1, rt2, rn) {
+ (A64Opnd::Reg(rt1), A64Opnd::Reg(rt2), A64Opnd::Mem(rn)) => {
+ assert!(rt1.num_bits == rt2.num_bits, "Expected source registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 10), "The displacement must be 10 bits or less.");
+ assert_ne!(rt1.reg_no, rt2.reg_no, "Behavior is unpredictable with pairs of the same register");
+
+ RegisterPair::ldp_pre(rt1.reg_no, rt2.reg_no, rn.base_reg_no, rn.disp as i16, rt1.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldp instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDP (post-index) - load a pair of registers from memory, update the base pointer after loading it
+pub fn ldp_post(cb: &mut CodeBlock, rt1: A64Opnd, rt2: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt1, rt2, rn) {
+ (A64Opnd::Reg(rt1), A64Opnd::Reg(rt2), A64Opnd::Mem(rn)) => {
+ assert!(rt1.num_bits == rt2.num_bits, "Expected source registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 10), "The displacement must be 10 bits or less.");
+ assert_ne!(rt1.reg_no, rt2.reg_no, "Behavior is unpredictable with pairs of the same register");
+
+ RegisterPair::ldp_post(rt1.reg_no, rt2.reg_no, rn.base_reg_no, rn.disp as i16, rt1.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldp instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDR - load a memory address into a register with a register offset
+pub fn ldr(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn, rm) {
+ (A64Opnd::Reg(rt), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(rn.num_bits == rm.num_bits, "Expected registers to be the same size");
+
+ LoadRegister::ldr(rt.reg_no, rn.reg_no, rm.reg_no, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldr instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDR - load a PC-relative memory address into a register
+pub fn ldr_literal(cb: &mut CodeBlock, rt: A64Opnd, rn: InstructionOffset) {
+ let bytes: [u8; 4] = match rt {
+ A64Opnd::Reg(rt) => {
+ LoadLiteral::ldr_literal(rt.reg_no, rn, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDRH - load a halfword from memory
+pub fn ldrh(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert_eq!(rt.num_bits, 32, "Expected to be loading a halfword");
+ assert!(imm_fits_bits(rn.disp.into(), 12), "The displacement must be 12 bits or less.");
+
+ HalfwordImm::ldrh(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to ldrh instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDRH (pre-index) - load a halfword from memory, update the base pointer before loading it
+pub fn ldrh_pre(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert_eq!(rt.num_bits, 32, "Expected to be loading a halfword");
+ assert!(imm_fits_bits(rn.disp.into(), 9), "The displacement must be 9 bits or less.");
+
+ HalfwordImm::ldrh_pre(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to ldrh instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDRH (post-index) - load a halfword from memory, update the base pointer after loading it
+pub fn ldrh_post(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert_eq!(rt.num_bits, 32, "Expected to be loading a halfword");
+ assert!(imm_fits_bits(rn.disp.into(), 9), "The displacement must be 9 bits or less.");
+
+ HalfwordImm::ldrh_post(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to ldrh instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// Whether or not a memory address displacement fits into the maximum number of
+/// bits such that it can be used without loading it into a register first.
+pub fn mem_disp_fits_bits(disp: i32) -> bool {
+ imm_fits_bits(disp.into(), 9)
+}
+
+/// LDR (post-index) - load a register from memory, update the base pointer after loading it
+pub fn ldr_post(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "All operands must be of the same size.");
+ assert!(mem_disp_fits_bits(rn.disp), "The displacement must be 9 bits or less.");
+
+ LoadStore::ldr_post(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDR (pre-index) - load a register from memory, update the base pointer before loading it
+pub fn ldr_pre(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "All operands must be of the same size.");
+ assert!(mem_disp_fits_bits(rn.disp), "The displacement must be 9 bits or less.");
+
+ LoadStore::ldr_pre(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDUR - load a memory address into a register
+pub fn ldur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Reg(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "All operands must be of the same size.");
+
+ LoadStore::ldur(rt.reg_no, rn.reg_no, 0, rt.num_bits).into()
+ },
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(mem_disp_fits_bits(rn.disp), "Expected displacement to be 9 bits or less");
+
+ LoadStore::ldur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operands for LDUR")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDURH - load a byte from memory, zero-extend it, and write it to a register
+pub fn ldurh(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(mem_disp_fits_bits(rn.disp), "Expected displacement to be 9 bits or less");
+
+ LoadStore::ldurh(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operands for LDURH")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDURB - load a byte from memory, zero-extend it, and write it to a register
+pub fn ldurb(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(rt.num_bits == 8, "Expected registers to have size 8");
+ assert!(mem_disp_fits_bits(rn.disp), "Expected displacement to be 9 bits or less");
+
+ LoadStore::ldurb(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operands for LDURB")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDURSW - load a 32-bit memory address into a register and sign-extend it
+pub fn ldursw(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(mem_disp_fits_bits(rn.disp), "Expected displacement to be 9 bits or less");
+
+ LoadStore::ldursw(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to ldursw instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LSL - logical shift left a register by an immediate
+pub fn lsl(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, shift) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm)) => {
+ assert!(rd.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(uimm_fits_bits(uimm, 6), "Expected shift to be 6 bits or less");
+
+ ShiftImm::lsl(rd.reg_no, rn.reg_no, uimm as u8, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operands combination to lsl instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LSR - logical shift right a register by an immediate
+pub fn lsr(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, shift) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm)) => {
+ assert!(rd.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(uimm_fits_bits(uimm, 6), "Expected shift to be 6 bits or less");
+
+ ShiftImm::lsr(rd.reg_no, rn.reg_no, uimm as u8, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operands combination to lsr instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MOV - move a value in a register to another register
+pub fn mov(cb: &mut CodeBlock, rd: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rm) {
+ (A64Opnd::Reg(A64Reg { reg_no: 31, num_bits: 64 }), A64Opnd::Reg(rm)) => {
+ assert!(rm.num_bits == 64, "Expected rm to be 64 bits");
+
+ DataImm::add(31, rm.reg_no, 0.try_into().unwrap(), 64).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(A64Reg { reg_no: 31, num_bits: 64 })) => {
+ assert!(rd.num_bits == 64, "Expected rd to be 64 bits");
+
+ DataImm::add(rd.reg_no, 31, 0.try_into().unwrap(), 64).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rm)) => {
+ assert!(rd.num_bits == rm.num_bits, "Expected registers to be the same size");
+
+ LogicalReg::mov(rd.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::UImm(0)) => {
+ LogicalReg::mov(rd.reg_no, XZR_REG.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::UImm(imm)) => {
+ let bitmask_imm = if rd.num_bits == 32 {
+ BitmaskImmediate::new_32b_reg(imm.try_into().unwrap())
+ } else {
+ imm.try_into()
+ }.unwrap();
+
+ LogicalImm::mov(rd.reg_no, bitmask_imm, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to mov instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MOVK - move a 16 bit immediate into a register, keep the other bits in place
+pub fn movk(cb: &mut CodeBlock, rd: A64Opnd, imm16: A64Opnd, shift: u8) {
+ let bytes: [u8; 4] = match (rd, imm16) {
+ (A64Opnd::Reg(rd), A64Opnd::UImm(imm16)) => {
+ assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
+
+ Mov::movk(rd.reg_no, imm16 as u16, shift, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to movk instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MOVZ - move a 16 bit immediate into a register, zero the other bits
+pub fn movz(cb: &mut CodeBlock, rd: A64Opnd, imm16: A64Opnd, shift: u8) {
+ let bytes: [u8; 4] = match (rd, imm16) {
+ (A64Opnd::Reg(rd), A64Opnd::UImm(imm16)) => {
+ assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
+
+ Mov::movz(rd.reg_no, imm16 as u16, shift, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to movz instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MRS - move a system register into a general-purpose register
+pub fn mrs(cb: &mut CodeBlock, rt: A64Opnd, systemregister: SystemRegister) {
+ let bytes: [u8; 4] = match rt {
+ A64Opnd::Reg(rt) => {
+ SysReg::mrs(rt.reg_no, systemregister).into()
+ },
+ _ => panic!("Invalid operand combination to mrs instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MSR - move a general-purpose register into a system register
+pub fn msr(cb: &mut CodeBlock, systemregister: SystemRegister, rt: A64Opnd) {
+ let bytes: [u8; 4] = match rt {
+ A64Opnd::Reg(rt) => {
+ SysReg::msr(systemregister, rt.reg_no).into()
+ },
+ _ => panic!("Invalid operand combination to msr instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MUL - multiply two registers, put the result in a third register
+pub fn mul(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits, "Expected registers to be the same size");
+
+ MAdd::mul(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to mul instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// SMULH - multiply two 64-bit registers to produce a 128-bit result, put the high 64-bits of the result into rd
+pub fn smulh(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits, "Expected registers to be the same size");
+ assert!(rd.num_bits == 64, "smulh only applicable to 64-bit registers");
+
+ SMulH::smulh(rd.reg_no, rn.reg_no, rm.reg_no).into()
+ },
+ _ => panic!("Invalid operand combination to mul instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MVN - move a value in a register to another register, negating it
+pub fn mvn(cb: &mut CodeBlock, rd: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rm)) => {
+ assert!(rd.num_bits == rm.num_bits, "Expected registers to be the same size");
+
+ LogicalReg::mvn(rd.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to mvn instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// NOP - no-operation, used for alignment purposes
+pub fn nop(cb: &mut CodeBlock) {
+ let bytes: [u8; 4] = Nop::nop().into();
+
+ cb.write_bytes(&bytes);
+}
+
+/// ORN - perform a bitwise OR of rn and NOT rm, put the result in rd, don't update flags
+pub fn orn(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits, "Expected registers to be the same size");
+
+ LogicalReg::orn(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to orn instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ORR - perform a bitwise OR of rn and rm, put the result in rd, don't update flags
+pub fn orr(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ LogicalReg::orr(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ let bitmask_imm = if rd.num_bits == 32 {
+ BitmaskImmediate::new_32b_reg(imm.try_into().unwrap())
+ } else {
+ imm.try_into()
+ }.unwrap();
+
+ LogicalImm::orr(rd.reg_no, rn.reg_no, bitmask_imm, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to orr instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STLXR - store a value to memory, release exclusive access
+pub fn stlxr(cb: &mut CodeBlock, rs: A64Opnd, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rs, rt, rn) {
+ (A64Opnd::Reg(rs), A64Opnd::Reg(rt), A64Opnd::Reg(rn)) => {
+ assert_eq!(rs.num_bits, 32, "rs must be a 32-bit register.");
+ assert_eq!(rn.num_bits, 64, "rn must be a 64-bit register.");
+
+ LoadStoreExclusive::stlxr(rs.reg_no, rt.reg_no, rn.reg_no, rn.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to stlxr instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STP (signed offset) - store a pair of registers to memory
+pub fn stp(cb: &mut CodeBlock, rt1: A64Opnd, rt2: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt1, rt2, rn) {
+ (A64Opnd::Reg(rt1), A64Opnd::Reg(rt2), A64Opnd::Mem(rn)) => {
+ assert!(rt1.num_bits == rt2.num_bits, "Expected source registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 10), "The displacement must be 10 bits or less.");
+ assert_ne!(rt1.reg_no, rt2.reg_no, "Behavior is unpredictable with pairs of the same register");
+
+ RegisterPair::stp(rt1.reg_no, rt2.reg_no, rn.base_reg_no, rn.disp as i16, rt1.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to stp instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STP (pre-index) - store a pair of registers to memory, update the base pointer before loading it
+pub fn stp_pre(cb: &mut CodeBlock, rt1: A64Opnd, rt2: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt1, rt2, rn) {
+ (A64Opnd::Reg(rt1), A64Opnd::Reg(rt2), A64Opnd::Mem(rn)) => {
+ assert!(rt1.num_bits == rt2.num_bits, "Expected source registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 10), "The displacement must be 10 bits or less.");
+ assert_ne!(rt1.reg_no, rt2.reg_no, "Behavior is unpredictable with pairs of the same register");
+
+ RegisterPair::stp_pre(rt1.reg_no, rt2.reg_no, rn.base_reg_no, rn.disp as i16, rt1.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to stp instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STP (post-index) - store a pair of registers to memory, update the base pointer after loading it
+pub fn stp_post(cb: &mut CodeBlock, rt1: A64Opnd, rt2: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt1, rt2, rn) {
+ (A64Opnd::Reg(rt1), A64Opnd::Reg(rt2), A64Opnd::Mem(rn)) => {
+ assert!(rt1.num_bits == rt2.num_bits, "Expected source registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 10), "The displacement must be 10 bits or less.");
+ assert_ne!(rt1.reg_no, rt2.reg_no, "Behavior is unpredictable with pairs of the same register");
+
+ RegisterPair::stp_post(rt1.reg_no, rt2.reg_no, rn.base_reg_no, rn.disp as i16, rt1.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to stp instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STR (post-index) - store a register to memory, update the base pointer after loading it
+pub fn str_post(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "All operands must be of the same size.");
+ assert!(mem_disp_fits_bits(rn.disp), "The displacement must be 9 bits or less.");
+
+ LoadStore::str_post(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to str instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STR (pre-index) - store a register to memory, update the base pointer before loading it
+pub fn str_pre(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "All operands must be of the same size.");
+ assert!(mem_disp_fits_bits(rn.disp), "The displacement must be 9 bits or less.");
+
+ LoadStore::str_pre(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to str instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STRH - store a halfword into memory
+pub fn strh(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert_eq!(rt.num_bits, 32, "Expected to be loading a halfword");
+ assert!(imm_fits_bits(rn.disp.into(), 12), "The displacement must be 12 bits or less.");
+
+ HalfwordImm::strh(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to strh instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STRH (pre-index) - store a halfword into memory, update the base pointer before loading it
+pub fn strh_pre(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert_eq!(rt.num_bits, 32, "Expected to be loading a halfword");
+ assert!(imm_fits_bits(rn.disp.into(), 9), "The displacement must be 9 bits or less.");
+
+ HalfwordImm::strh_pre(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to strh instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STRH (post-index) - store a halfword into memory, update the base pointer after loading it
+pub fn strh_post(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert_eq!(rt.num_bits, 32, "Expected to be loading a halfword");
+ assert!(imm_fits_bits(rn.disp.into(), 9), "The displacement must be 9 bits or less.");
+
+ HalfwordImm::strh_post(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to strh instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STUR - store a value in a register at a memory address
+pub fn stur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rn.num_bits == 32 || rn.num_bits == 64);
+ assert!(mem_disp_fits_bits(rn.disp), "Expected displacement to be 9 bits or less");
+
+ LoadStore::stur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rn.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to stur instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STURH - store a value in a register at a memory address
+pub fn sturh(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rn.num_bits == 16);
+ assert!(mem_disp_fits_bits(rn.disp), "Expected displacement to be 9 bits or less");
+
+ LoadStore::sturh(rt.reg_no, rn.base_reg_no, rn.disp as i16).into()
+ },
+ _ => panic!("Invalid operand combination to stur instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// SUB - subtract rm from rn, put the result in rd, don't update flags
+pub fn sub(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::sub(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ DataImm::sub(rd.reg_no, rn.reg_no, uimm12.try_into().unwrap(), rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ if imm12 < 0 {
+ DataImm::add(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ } else {
+ DataImm::sub(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ }
+ },
+ _ => panic!("Invalid operand combination to sub instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// SUBS - subtract rm from rn, put the result in rd, update flags
+pub fn subs(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::subs(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ DataImm::subs(rd.reg_no, rn.reg_no, uimm12.try_into().unwrap(), rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Imm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ if imm12 < 0 {
+ DataImm::adds(rd.reg_no, rn.reg_no, (-imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ } else {
+ DataImm::subs(rd.reg_no, rn.reg_no, (imm12 as u64).try_into().unwrap(), rd.num_bits).into()
+ }
+ },
+ _ => panic!("Invalid operand combination to subs instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// SXTW - sign extend a 32-bit register into a 64-bit register
+pub fn sxtw(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn)) => {
+ assert_eq!(rd.num_bits, 64, "rd must be 64-bits wide.");
+ assert_eq!(rn.num_bits, 32, "rn must be 32-bits wide.");
+
+ SBFM::sxtw(rd.reg_no, rn.reg_no).into()
+ },
+ _ => panic!("Invalid operand combination to sxtw instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// RET - unconditionally return to a location in a register, defaults to X30
+pub fn ret(cb: &mut CodeBlock, rn: A64Opnd) {
+ let bytes: [u8; 4] = match rn {
+ A64Opnd::None => Branch::ret(30).into(),
+ A64Opnd::Reg(reg) => Branch::ret(reg.reg_no).into(),
+ _ => panic!("Invalid operand to ret instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// TBNZ - test bit and branch if not zero
+pub fn tbnz(cb: &mut CodeBlock, rt: A64Opnd, bit_num: A64Opnd, offset: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, bit_num, offset) {
+ (A64Opnd::Reg(rt), A64Opnd::UImm(bit_num), A64Opnd::Imm(offset)) => {
+ TestBit::tbnz(rt.reg_no, bit_num.try_into().unwrap(), offset.try_into().unwrap()).into()
+ },
+ _ => panic!("Invalid operand combination to tbnz instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// TBZ - test bit and branch if zero
+pub fn tbz(cb: &mut CodeBlock, rt: A64Opnd, bit_num: A64Opnd, offset: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, bit_num, offset) {
+ (A64Opnd::Reg(rt), A64Opnd::UImm(bit_num), A64Opnd::Imm(offset)) => {
+ TestBit::tbz(rt.reg_no, bit_num.try_into().unwrap(), offset.try_into().unwrap()).into()
+ },
+ _ => panic!("Invalid operand combination to tbz instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// TST - test the bits of a register against a mask, then update flags
+pub fn tst(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rn, rm) {
+ (A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(rn.num_bits == rm.num_bits, "All operands must be of the same size.");
+
+ LogicalReg::tst(rn.reg_no, rm.reg_no, rn.num_bits).into()
+ },
+ (A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ let bitmask_imm = if rn.num_bits == 32 {
+ BitmaskImmediate::new_32b_reg(imm.try_into().unwrap())
+ } else {
+ imm.try_into()
+ }.unwrap();
+
+ LogicalImm::tst(rn.reg_no, bitmask_imm, rn.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to tst instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// CBZ - branch if a register is zero
+pub fn cbz(cb: &mut CodeBlock, rt: A64Opnd, offset: InstructionOffset) {
+ assert!(imm_fits_bits(offset.into(), 19), "jump offset for cbz must fit in 19 bits");
+ let bytes: [u8; 4] = if let A64Opnd::Reg(rt) = rt {
+ cbz_cbnz(rt.num_bits, false, offset, rt.reg_no)
+ } else {
+ panic!("Invalid operand combination to cbz instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// CBNZ - branch if a register is non-zero
+pub fn cbnz(cb: &mut CodeBlock, rt: A64Opnd, offset: InstructionOffset) {
+ assert!(imm_fits_bits(offset.into(), 19), "jump offset for cbz must fit in 19 bits");
+ let bytes: [u8; 4] = if let A64Opnd::Reg(rt) = rt {
+ cbz_cbnz(rt.num_bits, true, offset, rt.reg_no)
+ } else {
+ panic!("Invalid operand combination to cbnz instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// Encode Compare and Branch on Zero (CBZ) with `op=0` or Compare and Branch on Nonzero (CBNZ)
+/// with `op=1`.
+///
+/// <https://developer.arm.com/documentation/ddi0602/2024-03/Base-Instructions/CBZ--Compare-and-Branch-on-Zero->
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | sf 0 1 1 0 1 0 op |
+/// | imm19........................................................... Rt.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+fn cbz_cbnz(num_bits: u8, op: bool, offset: InstructionOffset, rt: u8) -> [u8; 4] {
+ ((Sf::from(num_bits) as u32) << 31 |
+ 0b11010 << 25 |
+ u32::from(op) << 24 |
+ truncate_imm::<_, 19>(offset) << 5 |
+ rt as u32).to_le_bytes()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ /// Check that the bytes for an instruction sequence match a hex string
+ fn check_bytes<R>(bytes: &str, run: R) where R: FnOnce(&mut super::CodeBlock) {
+ let mut cb = super::CodeBlock::new_dummy(128);
+ run(&mut cb);
+ assert_eq!(format!("{:x}", cb), bytes);
+ }
+
+ #[test]
+ fn test_imm_fits_bits() {
+ assert!(imm_fits_bits(i8::MAX.into(), 8));
+ assert!(imm_fits_bits(i8::MIN.into(), 8));
+
+ assert!(imm_fits_bits(i16::MAX.into(), 16));
+ assert!(imm_fits_bits(i16::MIN.into(), 16));
+
+ assert!(imm_fits_bits(i32::MAX.into(), 32));
+ assert!(imm_fits_bits(i32::MIN.into(), 32));
+
+ assert!(imm_fits_bits(i64::MAX, 64));
+ assert!(imm_fits_bits(i64::MIN, 64));
+ }
+
+ #[test]
+ fn test_uimm_fits_bits() {
+ assert!(uimm_fits_bits(u8::MAX.into(), 8));
+ assert!(uimm_fits_bits(u16::MAX.into(), 16));
+ assert!(uimm_fits_bits(u32::MAX.into(), 32));
+ assert!(uimm_fits_bits(u64::MAX, 64));
+ }
+
+ #[test]
+ fn test_add_reg() {
+ check_bytes("2000028b", |cb| add(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_add_uimm() {
+ check_bytes("201c0091", |cb| add(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_add_imm_positive() {
+ check_bytes("201c0091", |cb| add(cb, X0, X1, A64Opnd::new_imm(7)));
+ }
+
+ #[test]
+ fn test_add_imm_negative() {
+ check_bytes("201c00d1", |cb| add(cb, X0, X1, A64Opnd::new_imm(-7)));
+ }
+
+ #[test]
+ fn test_adds_reg() {
+ check_bytes("200002ab", |cb| adds(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_adds_uimm() {
+ check_bytes("201c00b1", |cb| adds(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_adds_imm_positive() {
+ check_bytes("201c00b1", |cb| adds(cb, X0, X1, A64Opnd::new_imm(7)));
+ }
+
+ #[test]
+ fn test_adds_imm_negative() {
+ check_bytes("201c00f1", |cb| adds(cb, X0, X1, A64Opnd::new_imm(-7)));
+ }
+
+ #[test]
+ fn test_adr() {
+ check_bytes("aa000010", |cb| adr(cb, X10, A64Opnd::new_imm(20)));
+ }
+
+ #[test]
+ fn test_adrp() {
+ check_bytes("4a000090", |cb| adrp(cb, X10, A64Opnd::new_imm(0x8000)));
+ }
+
+ #[test]
+ fn test_and_register() {
+ check_bytes("2000028a", |cb| and(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_and_immediate() {
+ check_bytes("20084092", |cb| and(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_and_32b_immediate() {
+ check_bytes("404c0012", |cb| and(cb, W0, W2, A64Opnd::new_uimm(0xfffff)));
+ }
+
+ #[test]
+ fn test_ands_register() {
+ check_bytes("200002ea", |cb| ands(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_ands_immediate() {
+ check_bytes("200840f2", |cb| ands(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_asr() {
+ check_bytes("b4fe4a93", |cb| asr(cb, X20, X21, A64Opnd::new_uimm(10)));
+ }
+
+ #[test]
+ fn test_bcond() {
+ let offset = InstructionOffset::from_insns(0x100);
+ check_bytes("01200054", |cb| bcond(cb, Condition::NE, offset));
+ }
+
+ #[test]
+ fn test_b() {
+ let offset = InstructionOffset::from_insns((1 << 25) - 1);
+ check_bytes("ffffff15", |cb| b(cb, offset));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_b_too_big() {
+ // There are 26 bits available
+ let offset = InstructionOffset::from_insns(1 << 25);
+ check_bytes("", |cb| b(cb, offset));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_b_too_small() {
+ // There are 26 bits available
+ let offset = InstructionOffset::from_insns(-(1 << 25) - 1);
+ check_bytes("", |cb| b(cb, offset));
+ }
+
+ #[test]
+ fn test_bl() {
+ let offset = InstructionOffset::from_insns(-(1 << 25));
+ check_bytes("00000096", |cb| bl(cb, offset));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_bl_too_big() {
+ // There are 26 bits available
+ let offset = InstructionOffset::from_insns(1 << 25);
+ check_bytes("", |cb| bl(cb, offset));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_bl_too_small() {
+ // There are 26 bits available
+ let offset = InstructionOffset::from_insns(-(1 << 25) - 1);
+ check_bytes("", |cb| bl(cb, offset));
+ }
+
+ #[test]
+ fn test_blr() {
+ check_bytes("80023fd6", |cb| blr(cb, X20));
+ }
+
+ #[test]
+ fn test_br() {
+ check_bytes("80021fd6", |cb| br(cb, X20));
+ }
+
+ #[test]
+ fn test_cbz() {
+ let offset = InstructionOffset::from_insns(-1);
+ check_bytes("e0ffffb4e0ffff34", |cb| {
+ cbz(cb, X0, offset);
+ cbz(cb, W0, offset);
+ });
+ }
+
+ #[test]
+ fn test_cbnz() {
+ let offset = InstructionOffset::from_insns(2);
+ check_bytes("540000b554000035", |cb| {
+ cbnz(cb, X20, offset);
+ cbnz(cb, W20, offset);
+ });
+ }
+
+ #[test]
+ fn test_brk_none() {
+ check_bytes("00003ed4", |cb| brk(cb, A64Opnd::None));
+ }
+
+ #[test]
+ fn test_brk_uimm() {
+ check_bytes("c00120d4", |cb| brk(cb, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_cmp_register() {
+ check_bytes("5f010beb", |cb| cmp(cb, X10, X11));
+ }
+
+ #[test]
+ fn test_cmp_immediate() {
+ check_bytes("5f3900f1", |cb| cmp(cb, X10, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_csel() {
+ check_bytes("6a018c9a", |cb| csel(cb, X10, X11, X12, Condition::EQ));
+ }
+
+ #[test]
+ fn test_eor_register() {
+ check_bytes("6a010cca", |cb| eor(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_eor_immediate() {
+ check_bytes("6a0940d2", |cb| eor(cb, X10, X11, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_eor_32b_immediate() {
+ check_bytes("29040152", |cb| eor(cb, W9, W1, A64Opnd::new_uimm(0x80000001)));
+ }
+
+ #[test]
+ fn test_ldaddal() {
+ check_bytes("8b01eaf8", |cb| ldaddal(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_ldaxr() {
+ check_bytes("6afd5fc8", |cb| ldaxr(cb, X10, X11));
+ }
+
+ #[test]
+ fn test_ldp() {
+ check_bytes("8a2d4da9", |cb| ldp(cb, X10, X11, A64Opnd::new_mem(64, X12, 208)));
+ }
+
+ #[test]
+ fn test_ldp_pre() {
+ check_bytes("8a2dcda9", |cb| ldp_pre(cb, X10, X11, A64Opnd::new_mem(64, X12, 208)));
+ }
+
+ #[test]
+ fn test_ldp_post() {
+ check_bytes("8a2dcda8", |cb| ldp_post(cb, X10, X11, A64Opnd::new_mem(64, X12, 208)));
+ }
+
+ #[test]
+ fn test_ldr() {
+ check_bytes("6a696cf8", |cb| ldr(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_ldr_literal() {
+ check_bytes("40010058", |cb| ldr_literal(cb, X0, 10.into()));
+ }
+
+ #[test]
+ fn test_ldr_post() {
+ check_bytes("6a0541f8", |cb| ldr_post(cb, X10, A64Opnd::new_mem(64, X11, 16)));
+ }
+
+ #[test]
+ fn test_ldr_pre() {
+ check_bytes("6a0d41f8", |cb| ldr_pre(cb, X10, A64Opnd::new_mem(64, X11, 16)));
+ }
+
+ #[test]
+ fn test_ldrh() {
+ check_bytes("6a194079", |cb| ldrh(cb, W10, A64Opnd::new_mem(64, X11, 12)));
+ }
+
+ #[test]
+ fn test_ldrh_pre() {
+ check_bytes("6acd4078", |cb| ldrh_pre(cb, W10, A64Opnd::new_mem(64, X11, 12)));
+ }
+
+ #[test]
+ fn test_ldrh_post() {
+ check_bytes("6ac54078", |cb| ldrh_post(cb, W10, A64Opnd::new_mem(64, X11, 12)));
+ }
+
+ #[test]
+ fn test_ldurh_memory() {
+ check_bytes("2a004078", |cb| ldurh(cb, W10, A64Opnd::new_mem(64, X1, 0)));
+ check_bytes("2ab04778", |cb| ldurh(cb, W10, A64Opnd::new_mem(64, X1, 123)));
+ }
+
+ #[test]
+ fn test_ldur_memory() {
+ check_bytes("20b047f8", |cb| ldur(cb, X0, A64Opnd::new_mem(64, X1, 123)));
+ }
+
+ #[test]
+ fn test_ldur_register() {
+ check_bytes("200040f8", |cb| ldur(cb, X0, X1));
+ }
+
+ #[test]
+ fn test_ldursw() {
+ check_bytes("6ab187b8", |cb| ldursw(cb, X10, A64Opnd::new_mem(64, X11, 123)));
+ }
+
+ #[test]
+ fn test_lsl() {
+ check_bytes("6ac572d3", |cb| lsl(cb, X10, X11, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_lsr() {
+ check_bytes("6afd4ed3", |cb| lsr(cb, X10, X11, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_mov_registers() {
+ check_bytes("ea030baa", |cb| mov(cb, X10, X11));
+ }
+
+ #[test]
+ fn test_mov_immediate() {
+ check_bytes("eaf300b2", |cb| mov(cb, X10, A64Opnd::new_uimm(0x5555555555555555)));
+ }
+
+ #[test]
+ fn test_mov_32b_immediate() {
+ check_bytes("ea070132", |cb| mov(cb, W10, A64Opnd::new_uimm(0x80000001)));
+ }
+ #[test]
+ fn test_mov_into_sp() {
+ check_bytes("1f000091", |cb| mov(cb, X31, X0));
+ }
+
+ #[test]
+ fn test_mov_from_sp() {
+ check_bytes("e0030091", |cb| mov(cb, X0, X31));
+ }
+
+ #[test]
+ fn test_movk() {
+ check_bytes("600fa0f2", |cb| movk(cb, X0, A64Opnd::new_uimm(123), 16));
+ }
+
+ #[test]
+ fn test_movz() {
+ check_bytes("600fa0d2", |cb| movz(cb, X0, A64Opnd::new_uimm(123), 16));
+ }
+
+ #[test]
+ fn test_mrs() {
+ check_bytes("0a423bd5", |cb| mrs(cb, X10, SystemRegister::NZCV));
+ }
+
+ #[test]
+ fn test_msr() {
+ check_bytes("0a421bd5", |cb| msr(cb, SystemRegister::NZCV, X10));
+ }
+
+ #[test]
+ fn test_mul() {
+ check_bytes("6a7d0c9b", |cb| mul(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_mvn() {
+ check_bytes("ea032baa", |cb| mvn(cb, X10, X11));
+ }
+
+ #[test]
+ fn test_nop() {
+ check_bytes("1f2003d5", |cb| nop(cb));
+ }
+
+ #[test]
+ fn test_orn() {
+ check_bytes("6a012caa", |cb| orn(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_orr_register() {
+ check_bytes("6a010caa", |cb| orr(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_orr_immediate() {
+ check_bytes("6a0940b2", |cb| orr(cb, X10, X11, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_orr_32b_immediate() {
+ check_bytes("6a010032", |cb| orr(cb, W10, W11, A64Opnd::new_uimm(1)));
+ }
+
+ #[test]
+ fn test_ret_none() {
+ check_bytes("c0035fd6", |cb| ret(cb, A64Opnd::None));
+ }
+
+ #[test]
+ fn test_ret_register() {
+ check_bytes("80025fd6", |cb| ret(cb, X20));
+ }
+
+ #[test]
+ fn test_stlxr() {
+ check_bytes("8bfd0ac8", |cb| stlxr(cb, W10, X11, X12));
+ }
+
+ #[test]
+ fn test_stp() {
+ check_bytes("8a2d0da9", |cb| stp(cb, X10, X11, A64Opnd::new_mem(64, X12, 208)));
+ }
+
+ #[test]
+ fn test_stp_pre() {
+ check_bytes("8a2d8da9", |cb| stp_pre(cb, X10, X11, A64Opnd::new_mem(64, X12, 208)));
+ }
+
+ #[test]
+ fn test_stp_post() {
+ check_bytes("8a2d8da8", |cb| stp_post(cb, X10, X11, A64Opnd::new_mem(64, X12, 208)));
+ }
+
+ #[test]
+ fn test_str_post() {
+ check_bytes("6a051ff8", |cb| str_post(cb, X10, A64Opnd::new_mem(64, X11, -16)));
+ }
+
+ #[test]
+ fn test_str_pre() {
+ check_bytes("6a0d1ff8", |cb| str_pre(cb, X10, A64Opnd::new_mem(64, X11, -16)));
+ }
+
+ #[test]
+ fn test_strh() {
+ check_bytes("6a190079", |cb| strh(cb, W10, A64Opnd::new_mem(64, X11, 12)));
+ }
+
+ #[test]
+ fn test_strh_pre() {
+ check_bytes("6acd0078", |cb| strh_pre(cb, W10, A64Opnd::new_mem(64, X11, 12)));
+ }
+
+ #[test]
+ fn test_strh_post() {
+ check_bytes("6ac50078", |cb| strh_post(cb, W10, A64Opnd::new_mem(64, X11, 12)));
+ }
+
+ #[test]
+ fn test_stur_64_bits() {
+ check_bytes("6a0108f8", |cb| stur(cb, X10, A64Opnd::new_mem(64, X11, 128)));
+ }
+
+ #[test]
+ fn test_stur_32_bits() {
+ check_bytes("6a0108b8", |cb| stur(cb, X10, A64Opnd::new_mem(32, X11, 128)));
+ }
+
+ #[test]
+ fn test_sub_reg() {
+ check_bytes("200002cb", |cb| sub(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_sub_uimm() {
+ check_bytes("201c00d1", |cb| sub(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_sub_imm_positive() {
+ check_bytes("201c00d1", |cb| sub(cb, X0, X1, A64Opnd::new_imm(7)));
+ }
+
+ #[test]
+ fn test_sub_imm_negative() {
+ check_bytes("201c0091", |cb| sub(cb, X0, X1, A64Opnd::new_imm(-7)));
+ }
+
+ #[test]
+ fn test_subs_reg() {
+ check_bytes("200002eb", |cb| subs(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_subs_imm_positive() {
+ check_bytes("201c00f1", |cb| subs(cb, X0, X1, A64Opnd::new_imm(7)));
+ }
+
+ #[test]
+ fn test_subs_imm_negative() {
+ check_bytes("201c00b1", |cb| subs(cb, X0, X1, A64Opnd::new_imm(-7)));
+ }
+
+ #[test]
+ fn test_subs_uimm() {
+ check_bytes("201c00f1", |cb| subs(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_sxtw() {
+ check_bytes("6a7d4093", |cb| sxtw(cb, X10, W11));
+ }
+
+ #[test]
+ fn test_tbnz() {
+ check_bytes("4a005037", |cb| tbnz(cb, X10, A64Opnd::UImm(10), A64Opnd::Imm(2)));
+ }
+
+ #[test]
+ fn test_tbz() {
+ check_bytes("4a005036", |cb| tbz(cb, X10, A64Opnd::UImm(10), A64Opnd::Imm(2)));
+ }
+
+ #[test]
+ fn test_tst_register() {
+ check_bytes("1f0001ea", |cb| tst(cb, X0, X1));
+ }
+
+ #[test]
+ fn test_tst_immediate() {
+ check_bytes("3f0840f2", |cb| tst(cb, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_tst_32b_immediate() {
+ check_bytes("1f3c0072", |cb| tst(cb, W0, A64Opnd::new_uimm(0xffff)));
+ }
+}
diff --git a/yjit/src/asm/arm64/opnd.rs b/yjit/src/asm/arm64/opnd.rs
new file mode 100644
index 0000000000..108824e08d
--- /dev/null
+++ b/yjit/src/asm/arm64/opnd.rs
@@ -0,0 +1,195 @@
+
+
+/// This operand represents a register.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct A64Reg
+{
+ // Size in bits
+ pub num_bits: u8,
+
+ // Register index number
+ pub reg_no: u8,
+}
+
+impl A64Reg {
+ pub fn with_num_bits(&self, num_bits: u8) -> Self {
+ assert!(num_bits == 8 || num_bits == 16 || num_bits == 32 || num_bits == 64);
+ Self { num_bits, reg_no: self.reg_no }
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct A64Mem
+{
+ // Size in bits
+ pub num_bits: u8,
+
+ /// Base register number
+ pub base_reg_no: u8,
+
+ /// Constant displacement from the base, not scaled
+ pub disp: i32,
+}
+
+impl A64Mem {
+ pub fn new(num_bits: u8, reg: A64Opnd, disp: i32) -> Self {
+ match reg {
+ A64Opnd::Reg(reg) => {
+ Self { num_bits, base_reg_no: reg.reg_no, disp }
+ },
+ _ => panic!("Expected register operand")
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum A64Opnd
+{
+ // Dummy operand
+ None,
+
+ // Immediate value
+ Imm(i64),
+
+ // Unsigned immediate
+ UImm(u64),
+
+ // Register
+ Reg(A64Reg),
+
+ // Memory
+ Mem(A64Mem)
+}
+
+impl A64Opnd {
+ /// Create a new immediate value operand.
+ pub fn new_imm(value: i64) -> Self {
+ A64Opnd::Imm(value)
+ }
+
+ /// Create a new unsigned immediate value operand.
+ pub fn new_uimm(value: u64) -> Self {
+ A64Opnd::UImm(value)
+ }
+
+ /// Creates a new memory operand.
+ pub fn new_mem(num_bits: u8, reg: A64Opnd, disp: i32) -> Self {
+ A64Opnd::Mem(A64Mem::new(num_bits, reg, disp))
+ }
+
+ /// Convenience function to check if this operand is a register.
+ pub fn is_reg(&self) -> bool {
+ match self {
+ A64Opnd::Reg(_) => true,
+ _ => false
+ }
+ }
+
+ /// Unwrap a register from an operand.
+ pub fn unwrap_reg(&self) -> A64Reg {
+ match self {
+ A64Opnd::Reg(reg) => *reg,
+ _ => panic!("Expected register operand")
+ }
+ }
+}
+
+// argument registers
+pub const X0_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 0 };
+pub const X1_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 1 };
+pub const X2_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 2 };
+pub const X3_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 3 };
+pub const X4_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 4 };
+pub const X5_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 5 };
+
+// caller-save registers
+pub const X9_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 9 };
+pub const X10_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 10 };
+pub const X11_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 11 };
+pub const X12_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 12 };
+pub const X13_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 13 };
+pub const X14_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 14 };
+pub const X15_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 15 };
+pub const X16_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 16 };
+pub const X17_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 17 };
+
+// callee-save registers
+pub const X19_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 19 };
+pub const X20_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 20 };
+pub const X21_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 21 };
+pub const X22_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 22 };
+
+// zero register
+pub const XZR_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 31 };
+
+// 64-bit registers
+pub const X0: A64Opnd = A64Opnd::Reg(X0_REG);
+pub const X1: A64Opnd = A64Opnd::Reg(X1_REG);
+pub const X2: A64Opnd = A64Opnd::Reg(X2_REG);
+pub const X3: A64Opnd = A64Opnd::Reg(X3_REG);
+pub const X4: A64Opnd = A64Opnd::Reg(X4_REG);
+pub const X5: A64Opnd = A64Opnd::Reg(X5_REG);
+pub const X6: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 6 });
+pub const X7: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 7 });
+pub const X8: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 8 });
+pub const X9: A64Opnd = A64Opnd::Reg(X9_REG);
+pub const X10: A64Opnd = A64Opnd::Reg(X10_REG);
+pub const X11: A64Opnd = A64Opnd::Reg(X11_REG);
+pub const X12: A64Opnd = A64Opnd::Reg(X12_REG);
+pub const X13: A64Opnd = A64Opnd::Reg(X13_REG);
+pub const X14: A64Opnd = A64Opnd::Reg(X14_REG);
+pub const X15: A64Opnd = A64Opnd::Reg(X15_REG);
+pub const X16: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 16 });
+pub const X17: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 17 });
+pub const X18: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 18 });
+pub const X19: A64Opnd = A64Opnd::Reg(X19_REG);
+pub const X20: A64Opnd = A64Opnd::Reg(X20_REG);
+pub const X21: A64Opnd = A64Opnd::Reg(X21_REG);
+pub const X22: A64Opnd = A64Opnd::Reg(X22_REG);
+pub const X23: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 23 });
+pub const X24: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 24 });
+pub const X25: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 25 });
+pub const X26: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 26 });
+pub const X27: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 27 });
+pub const X28: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 28 });
+pub const X29: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 29 });
+pub const X30: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 30 });
+pub const X31: A64Opnd = A64Opnd::Reg(XZR_REG);
+
+// 32-bit registers
+pub const W0: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 0 });
+pub const W1: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 1 });
+pub const W2: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 2 });
+pub const W3: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 3 });
+pub const W4: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 4 });
+pub const W5: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 5 });
+pub const W6: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 6 });
+pub const W7: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 7 });
+pub const W8: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 8 });
+pub const W9: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 9 });
+pub const W10: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 10 });
+pub const W11: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 11 });
+pub const W12: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 12 });
+pub const W13: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 13 });
+pub const W14: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 14 });
+pub const W15: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 15 });
+pub const W16: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 16 });
+pub const W17: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 17 });
+pub const W18: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 18 });
+pub const W19: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 19 });
+pub const W20: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 20 });
+pub const W21: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 21 });
+pub const W22: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 22 });
+pub const W23: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 23 });
+pub const W24: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 24 });
+pub const W25: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 25 });
+pub const W26: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 26 });
+pub const W27: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 27 });
+pub const W28: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 28 });
+pub const W29: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 29 });
+pub const W30: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 30 });
+pub const W31: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 32, reg_no: 31 });
+
+// C argument registers
+pub const C_ARG_REGS: [A64Opnd; 4] = [X0, X1, X2, X3];
+pub const C_ARG_REGREGS: [A64Reg; 4] = [X0_REG, X1_REG, X2_REG, X3_REG];
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
new file mode 100644
index 0000000000..9ef675b34d
--- /dev/null
+++ b/yjit/src/asm/mod.rs
@@ -0,0 +1,847 @@
+use std::fmt;
+use std::mem;
+use std::rc::Rc;
+use std::collections::BTreeMap;
+
+use crate::core::IseqPayload;
+use crate::core::for_each_off_stack_iseq_payload;
+use crate::core::for_each_on_stack_iseq_payload;
+use crate::invariants::rb_yjit_tracing_invalidate_all;
+use crate::stats::incr_counter;
+use crate::virtualmem::WriteError;
+use crate::codegen::CodegenGlobals;
+use crate::virtualmem::{VirtualMem, CodePtr};
+
+// Lots of manual vertical alignment in there that rustfmt doesn't handle well.
+#[rustfmt::skip]
+pub mod x86_64;
+
+pub mod arm64;
+
+//
+// TODO: need a field_size_of macro, to compute the size of a struct field in bytes
+//
+
+/// Reference to an ASM label
+#[derive(Clone)]
+pub struct LabelRef {
+ // Position in the code block where the label reference exists
+ pos: usize,
+
+ // Label which this refers to
+ label_idx: usize,
+
+ /// The number of bytes that this label reference takes up in the memory.
+ /// It's necessary to know this ahead of time so that when we come back to
+ /// patch it it takes the same amount of space.
+ num_bytes: usize,
+
+ /// The object that knows how to encode the branch instruction.
+ encode: fn(&mut CodeBlock, i64, i64)
+}
+
+/// Block of memory into which instructions can be assembled
+pub struct CodeBlock {
+ // Memory for storing the encoded instructions
+ mem_block: Rc<VirtualMem>,
+
+ // Size of a code page in bytes. Each code page is split into an inlined and an outlined portion.
+ // Code GC collects code memory at this granularity.
+ // Must be a multiple of the OS page size.
+ page_size: usize,
+
+ // Memory block size
+ mem_size: usize,
+
+ // Current writing position
+ write_pos: usize,
+
+ // The index of the last page with written bytes
+ last_page_idx: usize,
+
+ // Total number of bytes written to past pages
+ past_page_bytes: usize,
+
+ // Size reserved for writing a jump to the next page
+ page_end_reserve: usize,
+
+ // Table of registered label addresses
+ label_addrs: Vec<usize>,
+
+ // Table of registered label names
+ label_names: Vec<String>,
+
+ // References to labels
+ label_refs: Vec<LabelRef>,
+
+ // A switch for keeping comments. They take up memory.
+ keep_comments: bool,
+
+ // Comments for assembly instructions, if that feature is enabled
+ asm_comments: BTreeMap<usize, Vec<String>>,
+
+ // True for OutlinedCb
+ pub outlined: bool,
+
+ // Set if the CodeBlock is unable to output some instructions,
+ // for example, when there is not enough space or when a jump
+ // target is too far away.
+ dropped_bytes: bool,
+
+ // Keeps track of what pages we can write to after code gc.
+ // `None` means all pages are free.
+ freed_pages: Rc<Option<Vec<usize>>>,
+}
+
+/// Set of CodeBlock label states. Used for recovering the previous state.
+pub struct LabelState {
+ label_addrs: Vec<usize>,
+ label_names: Vec<String>,
+ label_refs: Vec<LabelRef>,
+}
+
+impl CodeBlock {
+ /// Works for common AArch64 systems that have 16 KiB pages and
+ /// common x86_64 systems that use 4 KiB pages.
+ const PREFERRED_CODE_PAGE_SIZE: usize = 16 * 1024;
+
+ /// Make a new CodeBlock
+ pub fn new(mem_block: Rc<VirtualMem>, outlined: bool, freed_pages: Rc<Option<Vec<usize>>>, keep_comments: bool) -> Self {
+ // Pick the code page size
+ let system_page_size = mem_block.system_page_size();
+ let page_size = if 0 == Self::PREFERRED_CODE_PAGE_SIZE % system_page_size {
+ Self::PREFERRED_CODE_PAGE_SIZE
+ } else {
+ system_page_size
+ };
+
+ let mem_size = mem_block.virtual_region_size();
+ let mut cb = Self {
+ mem_block,
+ mem_size,
+ page_size,
+ write_pos: 0,
+ last_page_idx: 0,
+ past_page_bytes: 0,
+ page_end_reserve: 0,
+ label_addrs: Vec::new(),
+ label_names: Vec::new(),
+ label_refs: Vec::new(),
+ keep_comments,
+ asm_comments: BTreeMap::new(),
+ outlined,
+ dropped_bytes: false,
+ freed_pages,
+ };
+ cb.page_end_reserve = cb.jmp_ptr_bytes();
+ cb.write_pos = cb.page_start();
+
+ #[cfg(not(test))]
+ assert_eq!(0, mem_size % page_size, "partially in-bounds code pages should be impossible");
+
+ cb
+ }
+
+ /// Move the CodeBlock to the next page. If it's on the furthest page,
+ /// move the other CodeBlock to the next page as well.
+ #[must_use]
+ pub fn next_page<F: Fn(&mut CodeBlock, CodePtr)>(&mut self, base_ptr: CodePtr, jmp_ptr: F) -> bool {
+ let old_write_ptr = self.get_write_ptr();
+ self.set_write_ptr(base_ptr);
+
+ // Use the freed_pages list if code GC has been used. Otherwise use the next page.
+ let next_page_idx = if let Some(freed_pages) = self.freed_pages.as_ref() {
+ let current_page = self.write_pos / self.page_size;
+ freed_pages.iter().find(|&&page| current_page < page).map(|&page| page)
+ } else {
+ Some(self.write_pos / self.page_size + 1)
+ };
+
+ // Move self to the next page
+ if next_page_idx.is_none() || !self.set_page(next_page_idx.unwrap(), &jmp_ptr) {
+ self.set_write_ptr(old_write_ptr); // rollback if there are no more pages
+ return false;
+ }
+
+ // Move the other CodeBlock to the same page if it's on the furthest page
+ if cfg!(not(test)) {
+ self.other_cb().unwrap().set_page(next_page_idx.unwrap(), &jmp_ptr);
+ }
+
+ return !self.dropped_bytes;
+ }
+
+ /// Move the CodeBlock to page_idx only if it's not going backwards.
+ fn set_page<F: Fn(&mut CodeBlock, CodePtr)>(&mut self, page_idx: usize, jmp_ptr: &F) -> bool {
+ // Do not move the CodeBlock if page_idx points to an old position so that this
+ // CodeBlock will not overwrite existing code.
+ //
+ // Let's say this is the current situation:
+ // cb: [page1, page2, page3 (write_pos)], ocb: [page1, page2, page3 (write_pos)]
+ //
+ // When cb needs to patch page1, this will be temporarily changed to:
+ // cb: [page1 (write_pos), page2, page3], ocb: [page1, page2, page3 (write_pos)]
+ //
+ // While patching page1, cb may need to jump to page2. What set_page currently does is:
+ // cb: [page1, page2 (write_pos), page3], ocb: [page1, page2, page3 (write_pos)]
+ // instead of:
+ // cb: [page1, page2 (write_pos), page3], ocb: [page1, page2 (write_pos), page3]
+ // because moving ocb's write_pos from page3 to the beginning of page2 will let ocb's
+ // write_pos point to existing code in page2, which might let ocb overwrite it later.
+ //
+ // We could remember the last write_pos in page2 and let set_page use that position,
+ // but you need to waste some space for keeping write_pos for every single page.
+ // It doesn't seem necessary for performance either. So we're currently not doing it.
+ let dst_pos = self.get_page_pos(page_idx);
+ if self.write_pos < dst_pos {
+ // Fail if next page is out of bounds
+ if dst_pos >= self.mem_size {
+ return false;
+ }
+
+ // Reset dropped_bytes
+ self.dropped_bytes = false;
+
+ // Generate jmp_ptr from src_pos to dst_pos
+ let dst_ptr = self.get_ptr(dst_pos);
+ self.without_page_end_reserve(|cb| {
+ assert!(cb.has_capacity(cb.jmp_ptr_bytes()));
+ cb.add_comment("jump to next page");
+ jmp_ptr(cb, dst_ptr);
+ });
+
+ // Update past_page_bytes for code_size() if this is a new page
+ if self.last_page_idx < page_idx {
+ self.past_page_bytes += self.current_page_bytes();
+ }
+
+ // Start the next code from dst_pos
+ self.write_pos = dst_pos;
+ // Update the last_page_idx if page_idx points to the furthest page
+ self.last_page_idx = usize::max(self.last_page_idx, page_idx);
+ }
+ !self.dropped_bytes
+ }
+
+ /// Free the memory pages of given code page indexes
+ fn free_pages(&mut self, page_idxs: &Vec<usize>) {
+ let mut page_idxs = page_idxs.clone();
+ page_idxs.reverse(); // to loop with pop()
+
+ // Group adjacent page indexes and free them in batches to reduce the # of syscalls.
+ while let Some(page_idx) = page_idxs.pop() {
+ // Group first adjacent page indexes
+ let mut batch_idxs = vec![page_idx];
+ while page_idxs.last() == Some(&(batch_idxs.last().unwrap() + 1)) {
+ batch_idxs.push(page_idxs.pop().unwrap());
+ }
+
+ // Free the grouped pages at once
+ let start_ptr = self.mem_block.start_ptr().add_bytes(page_idx * self.page_size);
+ let batch_size = self.page_size * batch_idxs.len();
+ self.mem_block.free_bytes(start_ptr, batch_size as u32);
+ }
+ }
+
+ pub fn page_size(&self) -> usize {
+ self.page_size
+ }
+
+ pub fn mapped_region_size(&self) -> usize {
+ self.mem_block.mapped_region_size()
+ }
+
+ /// Size of the region in bytes where writes could be attempted.
+ #[cfg(target_arch = "aarch64")]
+ pub fn virtual_region_size(&self) -> usize {
+ self.mem_block.virtual_region_size()
+ }
+
+ /// Return the number of code pages that have been mapped by the VirtualMemory.
+ pub fn num_mapped_pages(&self) -> usize {
+ // CodeBlock's page size != VirtualMem's page size on Linux,
+ // so mapped_region_size % self.page_size may not be 0
+ ((self.mapped_region_size() - 1) / self.page_size) + 1
+ }
+
+ /// Return the number of code pages that have been reserved by the VirtualMemory.
+ pub fn num_virtual_pages(&self) -> usize {
+ let virtual_region_size = self.mem_block.virtual_region_size();
+ // CodeBlock's page size != VirtualMem's page size on Linux,
+ // so mapped_region_size % self.page_size may not be 0
+ ((virtual_region_size - 1) / self.page_size) + 1
+ }
+
+ /// Return the number of code pages that have been freed and not used yet.
+ pub fn num_freed_pages(&self) -> usize {
+ (0..self.num_mapped_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count()
+ }
+
+ pub fn has_freed_page(&self, page_idx: usize) -> bool {
+ self.freed_pages.as_ref().as_ref().map_or(false, |pages| pages.contains(&page_idx)) && // code GCed
+ self.write_pos < page_idx * self.page_size // and not written yet
+ }
+
+ /// Convert a page index to the write_pos for the page start.
+ fn get_page_pos(&self, page_idx: usize) -> usize {
+ self.page_size * page_idx + self.page_start()
+ }
+
+ /// write_pos of the current page start
+ pub fn page_start_pos(&self) -> usize {
+ self.get_write_pos() / self.page_size * self.page_size + self.page_start()
+ }
+
+ /// Offset of each page where CodeBlock should start writing
+ pub fn page_start(&self) -> usize {
+ let mut start = if self.inline() {
+ 0
+ } else {
+ self.page_size / 2
+ };
+ if cfg!(debug_assertions) && !cfg!(test) {
+ // Leave illegal instructions at the beginning of each page to assert
+ // we're not accidentally crossing page boundaries.
+ start += self.jmp_ptr_bytes();
+ }
+ start
+ }
+
+ /// Offset of each page where CodeBlock should stop writing (exclusive)
+ pub fn page_end(&self) -> usize {
+ let page_end = if self.inline() {
+ self.page_size / 2
+ } else {
+ self.page_size
+ };
+ page_end - self.page_end_reserve // reserve space to jump to the next page
+ }
+
+ /// Call a given function with page_end_reserve = 0
+ pub fn without_page_end_reserve<F: Fn(&mut Self)>(&mut self, block: F) {
+ let old_page_end_reserve = self.page_end_reserve;
+ self.page_end_reserve = 0;
+ block(self);
+ self.page_end_reserve = old_page_end_reserve;
+ }
+
+ /// Return the address ranges of a given address range that this CodeBlock can write.
+ #[allow(dead_code)]
+ pub fn writable_addrs(&self, start_ptr: CodePtr, end_ptr: CodePtr) -> Vec<(usize, usize)> {
+ let region_start = self.get_ptr(0).raw_addr(self);
+ let region_end = self.get_ptr(self.get_mem_size()).raw_addr(self);
+ let mut start = start_ptr.raw_addr(self);
+ let end = std::cmp::min(end_ptr.raw_addr(self), region_end);
+
+ let freed_pages = self.freed_pages.as_ref().as_ref();
+ let mut addrs = vec![];
+ while start < end {
+ let page_idx = start.saturating_sub(region_start) / self.page_size;
+ let current_page = region_start + (page_idx * self.page_size);
+ let page_end = std::cmp::min(end, current_page + self.page_end());
+ // If code GC has been used, skip pages that are used by past on-stack code
+ if freed_pages.map_or(true, |pages| pages.contains(&page_idx)) {
+ addrs.push((start, page_end));
+ }
+ start = current_page + self.page_size + self.page_start();
+ }
+ addrs
+ }
+
+ /// Return the number of bytes written by this CodeBlock.
+ pub fn code_size(&self) -> usize {
+ self.current_page_bytes() + self.past_page_bytes
+ }
+
+ /// Return the number of bytes written to the current page.
+ fn current_page_bytes(&self) -> usize {
+ (self.write_pos % self.page_size).saturating_sub(self.page_start())
+ }
+
+ /// Check if this code block has sufficient remaining capacity
+ pub fn has_capacity(&self, num_bytes: usize) -> bool {
+ let page_offset = self.write_pos % self.page_size;
+ let capacity = self.page_end().saturating_sub(page_offset);
+ num_bytes <= capacity
+ }
+
+ /// Add an assembly comment if the feature is on.
+ pub fn add_comment(&mut self, comment: &str) {
+ if !self.keep_comments {
+ return;
+ }
+
+ let cur_ptr = self.get_write_ptr().raw_addr(self);
+
+ // If there's no current list of comments for this line number, add one.
+ let this_line_comments = self.asm_comments.entry(cur_ptr).or_default();
+
+ // Unless this comment is the same as the last one at this same line, add it.
+ if this_line_comments.last().map(String::as_str) != Some(comment) {
+ this_line_comments.push(comment.to_string());
+ }
+ }
+
+ pub fn comments_at(&self, pos: usize) -> Option<&Vec<String>> {
+ self.asm_comments.get(&pos)
+ }
+
+ pub fn remove_comments(&mut self, start_addr: CodePtr, end_addr: CodePtr) {
+ if self.asm_comments.is_empty() {
+ return;
+ }
+ for addr in start_addr.raw_addr(self)..end_addr.raw_addr(self) {
+ self.asm_comments.remove(&addr);
+ }
+ }
+
+ pub fn clear_comments(&mut self) {
+ self.asm_comments.clear();
+ }
+
+ pub fn get_mem_size(&self) -> usize {
+ self.mem_size
+ }
+
+ pub fn get_write_pos(&self) -> usize {
+ self.write_pos
+ }
+
+ pub fn write_mem(&self, write_ptr: CodePtr, byte: u8) -> Result<(), WriteError> {
+ self.mem_block.write_byte(write_ptr, byte)
+ }
+
+ // Set the current write position
+ pub fn set_pos(&mut self, pos: usize) {
+ // No bounds check here since we can be out of bounds
+ // when the code block fills up. We want to be able to
+ // restore to the filled up state after patching something
+ // in the middle.
+ self.write_pos = pos;
+ }
+
+ // Set the current write position from a pointer
+ pub fn set_write_ptr(&mut self, code_ptr: CodePtr) {
+ let pos = code_ptr.as_offset() - self.mem_block.start_ptr().as_offset();
+ self.set_pos(pos.try_into().unwrap());
+ }
+
+ /// Get a (possibly dangling) direct pointer into the executable memory block
+ pub fn get_ptr(&self, offset: usize) -> CodePtr {
+ self.mem_block.start_ptr().add_bytes(offset)
+ }
+
+ /// Convert an address range to memory page indexes against a num_pages()-sized array.
+ pub fn addrs_to_pages(&self, start_addr: CodePtr, end_addr: CodePtr) -> impl Iterator<Item = usize> {
+ let mem_start = self.mem_block.start_ptr().raw_addr(self);
+ let mem_end = self.mem_block.mapped_end_ptr().raw_addr(self);
+ assert!(mem_start <= start_addr.raw_addr(self));
+ assert!(start_addr.raw_addr(self) <= end_addr.raw_addr(self));
+ assert!(end_addr.raw_addr(self) <= mem_end);
+
+ // Ignore empty code ranges
+ if start_addr == end_addr {
+ return 0..0;
+ }
+
+ let start_page = (start_addr.raw_addr(self) - mem_start) / self.page_size;
+ let end_page = (end_addr.raw_addr(self) - mem_start - 1) / self.page_size;
+ start_page..end_page + 1
+ }
+
+ /// Get a (possibly dangling) direct pointer to the current write position
+ pub fn get_write_ptr(&self) -> CodePtr {
+ self.get_ptr(self.write_pos)
+ }
+
+ /// Write a single byte at the current position.
+ pub fn write_byte(&mut self, byte: u8) {
+ let write_ptr = self.get_write_ptr();
+ if self.has_capacity(1) && self.mem_block.write_byte(write_ptr, byte).is_ok() {
+ self.write_pos += 1;
+ } else {
+ self.dropped_bytes = true;
+ }
+ }
+
+ /// Write multiple bytes starting from the current position.
+ pub fn write_bytes(&mut self, bytes: &[u8]) {
+ for byte in bytes {
+ self.write_byte(*byte);
+ }
+ }
+
+ /// Write an integer over the given number of bits at the current position.
+ fn write_int(&mut self, val: u64, num_bits: u32) {
+ assert!(num_bits > 0);
+ assert!(num_bits % 8 == 0);
+
+ // Switch on the number of bits
+ match num_bits {
+ 8 => self.write_byte(val as u8),
+ 16 => self.write_bytes(&[(val & 0xff) as u8, ((val >> 8) & 0xff) as u8]),
+ 32 => self.write_bytes(&[
+ (val & 0xff) as u8,
+ ((val >> 8) & 0xff) as u8,
+ ((val >> 16) & 0xff) as u8,
+ ((val >> 24) & 0xff) as u8,
+ ]),
+ _ => {
+ let mut cur = val;
+
+ // Write out the bytes
+ for _byte in 0..(num_bits / 8) {
+ self.write_byte((cur & 0xff) as u8);
+ cur >>= 8;
+ }
+ }
+ }
+ }
+
+ /// Check if bytes have been dropped (unwritten because of insufficient space)
+ pub fn has_dropped_bytes(&self) -> bool {
+ self.dropped_bytes
+ }
+
+ /// To patch code that straddle pages correctly, we need to start with
+ /// the dropped bytes flag unset so we can detect when to switch to a new page.
+ pub fn set_dropped_bytes(&mut self, dropped_bytes: bool) {
+ self.dropped_bytes = dropped_bytes;
+ }
+
+ /// Allocate a new label with a given name
+ pub fn new_label(&mut self, name: String) -> usize {
+ assert!(!name.contains(' '), "use underscores in label names, not spaces");
+
+ // This label doesn't have an address yet
+ self.label_addrs.push(0);
+ self.label_names.push(name);
+
+ return self.label_addrs.len() - 1;
+ }
+
+ /// Write a label at the current address
+ pub fn write_label(&mut self, label_idx: usize) {
+ self.label_addrs[label_idx] = self.write_pos;
+ }
+
+ // Add a label reference at the current write position
+ pub fn label_ref(&mut self, label_idx: usize, num_bytes: usize, encode: fn(&mut CodeBlock, i64, i64)) {
+ assert!(label_idx < self.label_addrs.len());
+
+ // Keep track of the reference
+ self.label_refs.push(LabelRef { pos: self.write_pos, label_idx, num_bytes, encode });
+
+ // Move past however many bytes the instruction takes up
+ if self.has_capacity(num_bytes) {
+ self.write_pos += num_bytes;
+ } else {
+ self.dropped_bytes = true; // retry emitting the Insn after next_page
+ }
+ }
+
+ // Link internal label references
+ pub fn link_labels(&mut self) {
+ let orig_pos = self.write_pos;
+
+ // For each label reference
+ for label_ref in mem::take(&mut self.label_refs) {
+ let ref_pos = label_ref.pos;
+ let label_idx = label_ref.label_idx;
+ assert!(ref_pos < self.mem_size);
+
+ let label_addr = self.label_addrs[label_idx];
+ assert!(label_addr < self.mem_size);
+
+ self.set_pos(ref_pos);
+ (label_ref.encode)(self, (ref_pos + label_ref.num_bytes) as i64, label_addr as i64);
+
+ // Assert that we've written the same number of bytes that we
+ // expected to have written.
+ assert!(self.write_pos == ref_pos + label_ref.num_bytes);
+ }
+
+ self.write_pos = orig_pos;
+
+ // Clear the label positions and references
+ self.label_addrs.clear();
+ self.label_names.clear();
+ assert!(self.label_refs.is_empty());
+ }
+
+ pub fn clear_labels(&mut self) {
+ self.label_addrs.clear();
+ self.label_names.clear();
+ self.label_refs.clear();
+ }
+
+ pub fn get_label_state(&self) -> LabelState {
+ LabelState {
+ label_addrs: self.label_addrs.clone(),
+ label_names: self.label_names.clone(),
+ label_refs: self.label_refs.clone(),
+ }
+ }
+
+ pub fn set_label_state(&mut self, state: LabelState) {
+ self.label_addrs = state.label_addrs;
+ self.label_names = state.label_names;
+ self.label_refs = state.label_refs;
+ }
+
+ pub fn mark_all_writeable(&mut self) {
+ self.mem_block.mark_all_writeable();
+ }
+
+ pub fn mark_all_executable(&mut self) {
+ self.mem_block.mark_all_executable();
+ }
+
+ /// Code GC. Free code pages that are not on stack and reuse them.
+ pub fn code_gc(&mut self, ocb: &mut OutlinedCb) {
+ assert!(self.inline(), "must use on inline code block");
+
+ // The previous code GC failed to free any pages. Give up.
+ if self.freed_pages.as_ref() == &Some(vec![]) {
+ return;
+ }
+
+ // Check which pages are still in use
+ let mut pages_in_use = vec![false; self.num_mapped_pages()];
+ // For each ISEQ, we currently assume that only code pages used by inline code
+ // are used by outlined code, so we mark only code pages used by inlined code.
+ for_each_on_stack_iseq_payload(|iseq_payload| {
+ for page in &iseq_payload.pages {
+ pages_in_use[*page] = true;
+ }
+ });
+ // Avoid accumulating freed pages for future code GC
+ for_each_off_stack_iseq_payload(|iseq_payload: &mut IseqPayload| {
+ iseq_payload.pages = std::collections::HashSet::default();
+ });
+ // Outlined code generated by CodegenGlobals::init() should also be kept.
+ for page in CodegenGlobals::get_ocb_pages() {
+ pages_in_use[*page] = true;
+ }
+
+ // Invalidate everything to have more compact code after code GC.
+ // This currently patches every ISEQ, which works, but in the future,
+ // we could limit that to patch only on-stack ISEQs for optimizing code GC.
+ rb_yjit_tracing_invalidate_all();
+
+ // Assert that all code pages are freeable
+ assert_eq!(
+ 0,
+ self.mem_size % self.page_size,
+ "end of the last code page should be the end of the entire region"
+ );
+
+ // Let VirtuamMem free the pages
+ let mut freed_pages: Vec<usize> = pages_in_use.iter().enumerate()
+ .filter(|&(_, &in_use)| !in_use).map(|(page, _)| page).collect();
+ // ObjectSpace API may trigger Ruby's GC, which marks gc_offsets in JIT code.
+ // So this should be called after for_each_*_iseq_payload and rb_yjit_tracing_invalidate_all.
+ self.free_pages(&freed_pages);
+
+ // Append virtual pages in case RubyVM::YJIT.code_gc is manually triggered.
+ let mut virtual_pages: Vec<usize> = (self.num_mapped_pages()..self.num_virtual_pages()).collect();
+ freed_pages.append(&mut virtual_pages);
+
+ if let Some(&first_page) = freed_pages.first() {
+ for cb in [&mut *self, ocb.unwrap()] {
+ cb.write_pos = cb.get_page_pos(first_page);
+ cb.past_page_bytes = 0;
+ cb.dropped_bytes = false;
+ cb.clear_comments();
+ }
+ }
+
+ // Track which pages are free.
+ let new_freed_pages = Rc::new(Some(freed_pages));
+ let old_freed_pages = mem::replace(&mut self.freed_pages, Rc::clone(&new_freed_pages));
+ ocb.unwrap().freed_pages = new_freed_pages;
+ assert_eq!(1, Rc::strong_count(&old_freed_pages)); // will deallocate
+
+ incr_counter!(code_gc_count);
+ }
+
+ pub fn inline(&self) -> bool {
+ !self.outlined
+ }
+
+ pub fn other_cb(&self) -> Option<&'static mut Self> {
+ if !CodegenGlobals::has_instance() {
+ None
+ } else if self.inline() {
+ Some(CodegenGlobals::get_outlined_cb().unwrap())
+ } else {
+ Some(CodegenGlobals::get_inline_cb())
+ }
+ }
+}
+
+#[cfg(test)]
+impl CodeBlock {
+ /// Stubbed CodeBlock for testing. Can't execute generated code.
+ pub fn new_dummy(mem_size: usize) -> Self {
+ use std::ptr::NonNull;
+ use crate::virtualmem::*;
+ use crate::virtualmem::tests::TestingAllocator;
+
+ let alloc = TestingAllocator::new(mem_size);
+ let mem_start: *const u8 = alloc.mem_start();
+ let virt_mem = VirtualMem::new(alloc, 1, NonNull::new(mem_start as *mut u8).unwrap(), mem_size, 128 * 1024 * 1024);
+
+ Self::new(Rc::new(virt_mem), false, Rc::new(None), true)
+ }
+
+ /// Stubbed CodeBlock for testing conditions that can arise due to code GC. Can't execute generated code.
+ #[cfg(target_arch = "aarch64")]
+ pub fn new_dummy_with_freed_pages(mut freed_pages: Vec<usize>) -> Self {
+ use std::ptr::NonNull;
+ use crate::virtualmem::*;
+ use crate::virtualmem::tests::TestingAllocator;
+
+ freed_pages.sort_unstable();
+ let mem_size = Self::PREFERRED_CODE_PAGE_SIZE *
+ (1 + freed_pages.last().expect("freed_pages vec should not be empty"));
+
+ let alloc = TestingAllocator::new(mem_size);
+ let mem_start: *const u8 = alloc.mem_start();
+ let virt_mem = VirtualMem::new(alloc, 1, NonNull::new(mem_start as *mut u8).unwrap(), mem_size, 128 * 1024 * 1024);
+
+ Self::new(Rc::new(virt_mem), false, Rc::new(Some(freed_pages)), true)
+ }
+}
+
+/// Produce hex string output from the bytes in a code block
+impl fmt::LowerHex for CodeBlock {
+ fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
+ for pos in 0..self.write_pos {
+ let mem_block = &*self.mem_block;
+ let byte = unsafe { mem_block.start_ptr().raw_ptr(mem_block).add(pos).read() };
+ fmtr.write_fmt(format_args!("{:02x}", byte))?;
+ }
+ Ok(())
+ }
+}
+
+impl crate::virtualmem::CodePtrBase for CodeBlock {
+ fn base_ptr(&self) -> std::ptr::NonNull<u8> {
+ self.mem_block.base_ptr()
+ }
+}
+
+/// Wrapper struct so we can use the type system to distinguish
+/// Between the inlined and outlined code blocks
+pub struct OutlinedCb {
+ // This must remain private
+ cb: CodeBlock,
+}
+
+impl OutlinedCb {
+ pub fn wrap(cb: CodeBlock) -> Self {
+ OutlinedCb { cb: cb }
+ }
+
+ pub fn unwrap(&mut self) -> &mut CodeBlock {
+ &mut self.cb
+ }
+}
+
+/// Compute the number of bits needed to encode a signed value
+pub fn imm_num_bits(imm: i64) -> u8
+{
+ // Compute the smallest size this immediate fits in
+ if imm >= i8::MIN.into() && imm <= i8::MAX.into() {
+ return 8;
+ }
+ if imm >= i16::MIN.into() && imm <= i16::MAX.into() {
+ return 16;
+ }
+ if imm >= i32::MIN.into() && imm <= i32::MAX.into() {
+ return 32;
+ }
+
+ return 64;
+}
+
+/// Compute the number of bits needed to encode an unsigned value
+pub fn uimm_num_bits(uimm: u64) -> u8
+{
+ // Compute the smallest size this immediate fits in
+ if uimm <= u8::MAX.into() {
+ return 8;
+ }
+ else if uimm <= u16::MAX.into() {
+ return 16;
+ }
+ else if uimm <= u32::MAX.into() {
+ return 32;
+ }
+
+ return 64;
+}
+
+#[cfg(test)]
+mod tests
+{
+ use super::*;
+
+ #[test]
+ fn test_imm_num_bits()
+ {
+ assert_eq!(imm_num_bits(i8::MIN.into()), 8);
+ assert_eq!(imm_num_bits(i8::MAX.into()), 8);
+
+ assert_eq!(imm_num_bits(i16::MIN.into()), 16);
+ assert_eq!(imm_num_bits(i16::MAX.into()), 16);
+
+ assert_eq!(imm_num_bits(i32::MIN.into()), 32);
+ assert_eq!(imm_num_bits(i32::MAX.into()), 32);
+
+ assert_eq!(imm_num_bits(i64::MIN), 64);
+ assert_eq!(imm_num_bits(i64::MAX), 64);
+ }
+
+ #[test]
+ fn test_uimm_num_bits() {
+ assert_eq!(uimm_num_bits(u8::MIN.into()), 8);
+ assert_eq!(uimm_num_bits(u8::MAX.into()), 8);
+
+ assert_eq!(uimm_num_bits(((u8::MAX as u16) + 1).into()), 16);
+ assert_eq!(uimm_num_bits(u16::MAX.into()), 16);
+
+ assert_eq!(uimm_num_bits(((u16::MAX as u32) + 1).into()), 32);
+ assert_eq!(uimm_num_bits(u32::MAX.into()), 32);
+
+ assert_eq!(uimm_num_bits((u32::MAX as u64) + 1), 64);
+ assert_eq!(uimm_num_bits(u64::MAX), 64);
+ }
+
+ #[test]
+ fn test_code_size() {
+ // Write 4 bytes in the first page
+ let mut cb = CodeBlock::new_dummy(CodeBlock::PREFERRED_CODE_PAGE_SIZE * 2);
+ cb.write_bytes(&[0, 0, 0, 0]);
+ assert_eq!(cb.code_size(), 4);
+
+ // Moving to the next page should not increase code_size
+ assert!(cb.next_page(cb.get_write_ptr(), |_, _| {}));
+ assert_eq!(cb.code_size(), 4);
+
+ // Write 4 bytes in the second page
+ cb.write_bytes(&[0, 0, 0, 0]);
+ assert_eq!(cb.code_size(), 8);
+
+ // Rewrite 4 bytes in the first page
+ let old_write_pos = cb.get_write_pos();
+ cb.set_pos(0);
+ cb.write_bytes(&[1, 1, 1, 1]);
+
+ // Moving from an old page to the next page should not increase code_size
+ assert!(cb.next_page(cb.get_write_ptr(), |_, _| {}));
+ cb.set_pos(old_write_pos);
+ assert_eq!(cb.code_size(), 8);
+ }
+}
diff --git a/yjit/src/asm/x86_64/mod.rs b/yjit/src/asm/x86_64/mod.rs
new file mode 100644
index 0000000000..0ef5e92117
--- /dev/null
+++ b/yjit/src/asm/x86_64/mod.rs
@@ -0,0 +1,1456 @@
+#![allow(dead_code)] // For instructions we don't currently generate
+
+use crate::asm::*;
+
+// Import the assembler tests module
+mod tests;
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct X86Imm
+{
+ // Size in bits
+ pub num_bits: u8,
+
+ // The value of the immediate
+ pub value: i64
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct X86UImm
+{
+ // Size in bits
+ pub num_bits: u8,
+
+ // The value of the immediate
+ pub value: u64
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum RegType
+{
+ GP,
+ //FP,
+ //XMM,
+ IP,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct X86Reg
+{
+ // Size in bits
+ pub num_bits: u8,
+
+ // Register type
+ pub reg_type: RegType,
+
+ // Register index number
+ pub reg_no: u8,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct X86Mem
+{
+ // Size in bits
+ pub num_bits: u8,
+
+ /// Base register number
+ pub base_reg_no: u8,
+
+ /// Index register number
+ pub idx_reg_no: Option<u8>,
+
+ /// SIB scale exponent value (power of two, two bits)
+ pub scale_exp: u8,
+
+ /// Constant displacement from the base, not scaled
+ pub disp: i32,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum X86Opnd
+{
+ // Dummy operand
+ None,
+
+ // Immediate value
+ Imm(X86Imm),
+
+ // Unsigned immediate
+ UImm(X86UImm),
+
+ // General-purpose register
+ Reg(X86Reg),
+
+ // Memory location
+ Mem(X86Mem),
+
+ // IP-relative memory location
+ IPRel(i32)
+}
+
+impl X86Reg {
+ pub fn with_num_bits(&self, num_bits: u8) -> Self {
+ assert!(
+ num_bits == 8 ||
+ num_bits == 16 ||
+ num_bits == 32 ||
+ num_bits == 64
+ );
+ Self {
+ num_bits,
+ reg_type: self.reg_type,
+ reg_no: self.reg_no
+ }
+ }
+}
+
+impl X86Opnd {
+ fn rex_needed(&self) -> bool {
+ match self {
+ X86Opnd::None => false,
+ X86Opnd::Imm(_) => false,
+ X86Opnd::UImm(_) => false,
+ X86Opnd::Reg(reg) => reg.reg_no > 7 || reg.num_bits == 8 && reg.reg_no >= 4,
+ X86Opnd::Mem(mem) => mem.base_reg_no > 7 || (mem.idx_reg_no.unwrap_or(0) > 7),
+ X86Opnd::IPRel(_) => false
+ }
+ }
+
+ // Check if an SIB byte is needed to encode this operand
+ fn sib_needed(&self) -> bool {
+ match self {
+ X86Opnd::Mem(mem) => {
+ mem.idx_reg_no.is_some() ||
+ mem.base_reg_no == RSP_REG_NO ||
+ mem.base_reg_no == R12_REG_NO
+ },
+ _ => false
+ }
+ }
+
+ fn disp_size(&self) -> u32 {
+ match self {
+ X86Opnd::IPRel(_) => 32,
+ X86Opnd::Mem(mem) => {
+ if mem.disp != 0 {
+ // Compute the required displacement size
+ let num_bits = imm_num_bits(mem.disp.into());
+ if num_bits > 32 {
+ panic!("displacement does not fit in 32 bits");
+ }
+
+ // x86 can only encode 8-bit and 32-bit displacements
+ if num_bits == 16 { 32 } else { 8 }
+ } else if mem.base_reg_no == RBP_REG_NO || mem.base_reg_no == R13_REG_NO {
+ // If EBP or RBP or R13 is used as the base, displacement must be encoded
+ 8
+ } else {
+ 0
+ }
+ },
+ _ => 0
+ }
+ }
+
+ pub fn num_bits(&self) -> u8 {
+ match self {
+ X86Opnd::Reg(reg) => reg.num_bits,
+ X86Opnd::Imm(imm) => imm.num_bits,
+ X86Opnd::UImm(uimm) => uimm.num_bits,
+ X86Opnd::Mem(mem) => mem.num_bits,
+ _ => unreachable!()
+ }
+ }
+
+ pub fn is_some(&self) -> bool {
+ match self {
+ X86Opnd::None => false,
+ _ => true
+ }
+ }
+
+}
+
+// Instruction pointer
+pub const RIP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 64, reg_type: RegType::IP, reg_no: 5 });
+
+// 64-bit GP registers
+const RAX_REG_NO: u8 = 0;
+const RSP_REG_NO: u8 = 4;
+const RBP_REG_NO: u8 = 5;
+const R12_REG_NO: u8 = 12;
+const R13_REG_NO: u8 = 13;
+
+pub const RAX_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: RAX_REG_NO };
+pub const RCX_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 1 };
+pub const RDX_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 2 };
+pub const RBX_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 3 };
+pub const RSP_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: RSP_REG_NO };
+pub const RBP_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: RBP_REG_NO };
+pub const RSI_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 6 };
+pub const RDI_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 7 };
+pub const R8_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 8 };
+pub const R9_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 9 };
+pub const R10_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 10 };
+pub const R11_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 11 };
+pub const R12_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: R12_REG_NO };
+pub const R13_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: R13_REG_NO };
+pub const R14_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 14 };
+pub const R15_REG: X86Reg = X86Reg { num_bits: 64, reg_type: RegType::GP, reg_no: 15 };
+
+pub const RAX: X86Opnd = X86Opnd::Reg(RAX_REG);
+pub const RCX: X86Opnd = X86Opnd::Reg(RCX_REG);
+pub const RDX: X86Opnd = X86Opnd::Reg(RDX_REG);
+pub const RBX: X86Opnd = X86Opnd::Reg(RBX_REG);
+pub const RSP: X86Opnd = X86Opnd::Reg(RSP_REG);
+pub const RBP: X86Opnd = X86Opnd::Reg(RBP_REG);
+pub const RSI: X86Opnd = X86Opnd::Reg(RSI_REG);
+pub const RDI: X86Opnd = X86Opnd::Reg(RDI_REG);
+pub const R8: X86Opnd = X86Opnd::Reg(R8_REG);
+pub const R9: X86Opnd = X86Opnd::Reg(R9_REG);
+pub const R10: X86Opnd = X86Opnd::Reg(R10_REG);
+pub const R11: X86Opnd = X86Opnd::Reg(R11_REG);
+pub const R12: X86Opnd = X86Opnd::Reg(R12_REG);
+pub const R13: X86Opnd = X86Opnd::Reg(R13_REG);
+pub const R14: X86Opnd = X86Opnd::Reg(R14_REG);
+pub const R15: X86Opnd = X86Opnd::Reg(R15_REG);
+
+// 32-bit GP registers
+pub const EAX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 0 });
+pub const ECX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 1 });
+pub const EDX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 2 });
+pub const EBX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 3 });
+pub const ESP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 4 });
+pub const EBP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 5 });
+pub const ESI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 6 });
+pub const EDI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 7 });
+pub const R8D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 8 });
+pub const R9D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 9 });
+pub const R10D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 10 });
+pub const R11D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 11 });
+pub const R12D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 12 });
+pub const R13D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 13 });
+pub const R14D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 14 });
+pub const R15D: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 32, reg_type: RegType::GP, reg_no: 15 });
+
+// 16-bit GP registers
+pub const AX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 0 });
+pub const CX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 1 });
+pub const DX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 2 });
+pub const BX: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 3 });
+//pub const SP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 4 });
+pub const BP: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 5 });
+pub const SI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 6 });
+pub const DI: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 7 });
+pub const R8W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 8 });
+pub const R9W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 9 });
+pub const R10W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 10 });
+pub const R11W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 11 });
+pub const R12W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 12 });
+pub const R13W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 13 });
+pub const R14W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 14 });
+pub const R15W: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 16, reg_type: RegType::GP, reg_no: 15 });
+
+// 8-bit GP registers
+pub const AL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 0 });
+pub const CL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 1 });
+pub const DL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 2 });
+pub const BL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 3 });
+pub const SPL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 4 });
+pub const BPL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 5 });
+pub const SIL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 6 });
+pub const DIL: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 7 });
+pub const R8B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 8 });
+pub const R9B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 9 });
+pub const R10B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 10 });
+pub const R11B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 11 });
+pub const R12B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 12 });
+pub const R13B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 13 });
+pub const R14B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 14 });
+pub const R15B: X86Opnd = X86Opnd::Reg(X86Reg { num_bits: 8, reg_type: RegType::GP, reg_no: 15 });
+
+//===========================================================================
+
+/// Shorthand for memory operand with base register and displacement
+pub fn mem_opnd(num_bits: u8, base_reg: X86Opnd, disp: i32) -> X86Opnd
+{
+ let base_reg = match base_reg {
+ X86Opnd::Reg(reg) => reg,
+ _ => unreachable!()
+ };
+
+ if base_reg.reg_type == RegType::IP {
+ X86Opnd::IPRel(disp)
+ } else {
+ X86Opnd::Mem(
+ X86Mem {
+ num_bits: num_bits,
+ base_reg_no: base_reg.reg_no,
+ idx_reg_no: None,
+ scale_exp: 0,
+ disp: disp,
+ }
+ )
+ }
+}
+
+/// Memory operand with SIB (Scale Index Base) indexing
+pub fn mem_opnd_sib(num_bits: u8, base_opnd: X86Opnd, index_opnd: X86Opnd, scale: i32, disp: i32) -> X86Opnd {
+ if let (X86Opnd::Reg(base_reg), X86Opnd::Reg(index_reg)) = (base_opnd, index_opnd) {
+ let scale_exp: u8;
+
+ match scale {
+ 8 => { scale_exp = 3; },
+ 4 => { scale_exp = 2; },
+ 2 => { scale_exp = 1; },
+ 1 => { scale_exp = 0; },
+ _ => unreachable!()
+ };
+
+ X86Opnd::Mem(X86Mem {
+ num_bits,
+ base_reg_no: base_reg.reg_no,
+ idx_reg_no: Some(index_reg.reg_no),
+ scale_exp,
+ disp
+ })
+ } else {
+ unreachable!()
+ }
+}
+
+/*
+// Struct member operand
+#define member_opnd(base_reg, struct_type, member_name) mem_opnd( \
+ 8 * sizeof(((struct_type*)0)->member_name), \
+ base_reg, \
+ offsetof(struct_type, member_name) \
+)
+
+// Struct member operand with an array index
+#define member_opnd_idx(base_reg, struct_type, member_name, idx) mem_opnd( \
+ 8 * sizeof(((struct_type*)0)->member_name[0]), \
+ base_reg, \
+ (offsetof(struct_type, member_name) + \
+ sizeof(((struct_type*)0)->member_name[0]) * idx) \
+)
+*/
+
+/*
+// TODO: this should be a method, X86Opnd.resize() or X86Opnd.subreg()
+static x86opnd_t resize_opnd(x86opnd_t opnd, uint32_t num_bits)
+{
+ assert (num_bits % 8 == 0);
+ x86opnd_t sub = opnd;
+ sub.num_bits = num_bits;
+ return sub;
+}
+*/
+
+pub fn imm_opnd(value: i64) -> X86Opnd
+{
+ X86Opnd::Imm(X86Imm { num_bits: imm_num_bits(value), value })
+}
+
+pub fn uimm_opnd(value: u64) -> X86Opnd
+{
+ X86Opnd::UImm(X86UImm { num_bits: uimm_num_bits(value), value })
+}
+
+pub fn const_ptr_opnd(ptr: *const u8) -> X86Opnd
+{
+ uimm_opnd(ptr as u64)
+}
+
+/// Write the REX byte
+fn write_rex(cb: &mut CodeBlock, w_flag: bool, reg_no: u8, idx_reg_no: u8, rm_reg_no: u8) {
+ // 0 1 0 0 w r x b
+ // w - 64-bit operand size flag
+ // r - MODRM.reg extension
+ // x - SIB.index extension
+ // b - MODRM.rm or SIB.base extension
+ let w: u8 = if w_flag { 1 } else { 0 };
+ let r: u8 = if (reg_no & 8) > 0 { 1 } else { 0 };
+ let x: u8 = if (idx_reg_no & 8) > 0 { 1 } else { 0 };
+ let b: u8 = if (rm_reg_no & 8) > 0 { 1 } else { 0 };
+
+ // Encode and write the REX byte
+ cb.write_byte(0x40 + (w << 3) + (r << 2) + (x << 1) + (b));
+}
+
+/// Write an opcode byte with an embedded register operand
+fn write_opcode(cb: &mut CodeBlock, opcode: u8, reg: X86Reg) {
+ let op_byte: u8 = opcode | (reg.reg_no & 7);
+ cb.write_byte(op_byte);
+}
+
+/// Encode an RM instruction
+fn write_rm(cb: &mut CodeBlock, sz_pref: bool, rex_w: bool, r_opnd: X86Opnd, rm_opnd: X86Opnd, op_ext: Option<u8>, bytes: &[u8]) {
+ let op_len = bytes.len();
+ assert!(op_len > 0 && op_len <= 3);
+ assert!(matches!(r_opnd, X86Opnd::Reg(_) | X86Opnd::None), "Can only encode an RM instruction with a register or a none");
+
+ // Flag to indicate the REX prefix is needed
+ let need_rex = rex_w || r_opnd.rex_needed() || rm_opnd.rex_needed();
+
+ // Flag to indicate SIB byte is needed
+ let need_sib = r_opnd.sib_needed() || rm_opnd.sib_needed();
+
+ // Add the operand-size prefix, if needed
+ if sz_pref {
+ cb.write_byte(0x66);
+ }
+
+ // Add the REX prefix, if needed
+ if need_rex {
+ // 0 1 0 0 w r x b
+ // w - 64-bit operand size flag
+ // r - MODRM.reg extension
+ // x - SIB.index extension
+ // b - MODRM.rm or SIB.base extension
+
+ let w = if rex_w { 1 } else { 0 };
+ let r = match r_opnd {
+ X86Opnd::None => 0,
+ X86Opnd::Reg(reg) => if (reg.reg_no & 8) > 0 { 1 } else { 0 },
+ _ => unreachable!()
+ };
+
+ let x = match (need_sib, rm_opnd) {
+ (true, X86Opnd::Mem(mem)) => if (mem.idx_reg_no.unwrap_or(0) & 8) > 0 { 1 } else { 0 },
+ _ => 0
+ };
+
+ let b = match rm_opnd {
+ X86Opnd::Reg(reg) => if (reg.reg_no & 8) > 0 { 1 } else { 0 },
+ X86Opnd::Mem(mem) => if (mem.base_reg_no & 8) > 0 { 1 } else { 0 },
+ _ => 0
+ };
+
+ // Encode and write the REX byte
+ let rex_byte: u8 = 0x40 + (w << 3) + (r << 2) + (x << 1) + (b);
+ cb.write_byte(rex_byte);
+ }
+
+ // Write the opcode bytes to the code block
+ for byte in bytes {
+ cb.write_byte(*byte)
+ }
+
+ // MODRM.mod (2 bits)
+ // MODRM.reg (3 bits)
+ // MODRM.rm (3 bits)
+
+ assert!(
+ !(op_ext.is_some() && r_opnd.is_some()),
+ "opcode extension and register operand present"
+ );
+
+ // Encode the mod field
+ let rm_mod = match rm_opnd {
+ X86Opnd::Reg(_) => 3,
+ X86Opnd::IPRel(_) => 0,
+ X86Opnd::Mem(_mem) => {
+ match rm_opnd.disp_size() {
+ 0 => 0,
+ 8 => 1,
+ 32 => 2,
+ _ => unreachable!()
+ }
+ },
+ _ => unreachable!()
+ };
+
+ // Encode the reg field
+ let reg: u8;
+ if let Some(val) = op_ext {
+ reg = val;
+ } else {
+ reg = match r_opnd {
+ X86Opnd::Reg(reg) => reg.reg_no & 7,
+ _ => 0
+ };
+ }
+
+ // Encode the rm field
+ let rm = match rm_opnd {
+ X86Opnd::Reg(reg) => reg.reg_no & 7,
+ X86Opnd::Mem(mem) => if need_sib { 4 } else { mem.base_reg_no & 7 },
+ X86Opnd::IPRel(_) => 0b101,
+ _ => unreachable!()
+ };
+
+ // Encode and write the ModR/M byte
+ let rm_byte: u8 = (rm_mod << 6) + (reg << 3) + (rm);
+ cb.write_byte(rm_byte);
+
+ // Add the SIB byte, if needed
+ if need_sib {
+ // SIB.scale (2 bits)
+ // SIB.index (3 bits)
+ // SIB.base (3 bits)
+
+ match rm_opnd {
+ X86Opnd::Mem(mem) => {
+ // Encode the scale value
+ let scale = mem.scale_exp;
+
+ // Encode the index value
+ let index = mem.idx_reg_no.map(|no| no & 7).unwrap_or(4);
+
+ // Encode the base register
+ let base = mem.base_reg_no & 7;
+
+ // Encode and write the SIB byte
+ let sib_byte: u8 = (scale << 6) + (index << 3) + (base);
+ cb.write_byte(sib_byte);
+ },
+ _ => panic!("Expected mem operand")
+ }
+ }
+
+ // Add the displacement
+ match rm_opnd {
+ X86Opnd::Mem(mem) => {
+ let disp_size = rm_opnd.disp_size();
+ if disp_size > 0 {
+ cb.write_int(mem.disp as u64, disp_size);
+ }
+ },
+ X86Opnd::IPRel(rel) => {
+ cb.write_int(rel as u64, 32);
+ },
+ _ => ()
+ };
+}
+
+// Encode a mul-like single-operand RM instruction
+fn write_rm_unary(cb: &mut CodeBlock, op_mem_reg_8: u8, op_mem_reg_pref: u8, op_ext: Option<u8>, opnd: X86Opnd) {
+ assert!(matches!(opnd, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ let opnd_size = opnd.num_bits();
+ assert!(opnd_size == 8 || opnd_size == 16 || opnd_size == 32 || opnd_size == 64);
+
+ if opnd_size == 8 {
+ write_rm(cb, false, false, X86Opnd::None, opnd, op_ext, &[op_mem_reg_8]);
+ } else {
+ let sz_pref = opnd_size == 16;
+ let rex_w = opnd_size == 64;
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd, op_ext, &[op_mem_reg_pref]);
+ }
+}
+
+// Encode an add-like RM instruction with multiple possible encodings
+fn write_rm_multi(cb: &mut CodeBlock, op_mem_reg8: u8, op_mem_reg_pref: u8, op_reg_mem8: u8, op_reg_mem_pref: u8, op_mem_imm8: u8, op_mem_imm_sml: u8, op_mem_imm_lrg: u8, op_ext_imm: Option<u8>, opnd0: X86Opnd, opnd1: X86Opnd) {
+ assert!(matches!(opnd0, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ // Check the size of opnd0
+ let opnd_size = opnd0.num_bits();
+ assert!(opnd_size == 8 || opnd_size == 16 || opnd_size == 32 || opnd_size == 64);
+
+ // Check the size of opnd1
+ match opnd1 {
+ X86Opnd::Reg(reg) => assert_eq!(reg.num_bits, opnd_size),
+ X86Opnd::Mem(mem) => assert_eq!(mem.num_bits, opnd_size),
+ X86Opnd::Imm(imm) => assert!(imm.num_bits <= opnd_size),
+ X86Opnd::UImm(uimm) => assert!(uimm.num_bits <= opnd_size),
+ _ => ()
+ };
+
+ let sz_pref = opnd_size == 16;
+ let rex_w = opnd_size == 64;
+
+ match (opnd0, opnd1) {
+ // R/M + Reg
+ (X86Opnd::Mem(_), X86Opnd::Reg(_)) | (X86Opnd::Reg(_), X86Opnd::Reg(_)) => {
+ if opnd_size == 8 {
+ write_rm(cb, false, false, opnd1, opnd0, None, &[op_mem_reg8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, opnd1, opnd0, None, &[op_mem_reg_pref]);
+ }
+ },
+ // Reg + R/M/IPRel
+ (X86Opnd::Reg(_), X86Opnd::Mem(_) | X86Opnd::IPRel(_)) => {
+ if opnd_size == 8 {
+ write_rm(cb, false, false, opnd0, opnd1, None, &[op_reg_mem8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, opnd0, opnd1, None, &[op_reg_mem_pref]);
+ }
+ },
+ // R/M + Imm
+ (_, X86Opnd::Imm(imm)) => {
+ if imm.num_bits <= 8 {
+ // 8-bit immediate
+
+ if opnd_size == 8 {
+ write_rm(cb, false, false, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_sml]);
+ }
+
+ cb.write_int(imm.value as u64, 8);
+ } else if imm.num_bits <= 32 {
+ // 32-bit immediate
+
+ assert!(imm.num_bits <= opnd_size);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
+ cb.write_int(imm.value as u64, if opnd_size > 32 { 32 } else { opnd_size.into() });
+ } else {
+ panic!("immediate value too large");
+ }
+ },
+ // R/M + UImm
+ (_, X86Opnd::UImm(uimm)) => {
+ // If the size of left hand operand equals the number of bits
+ // required to represent the right hand immediate, then we
+ // don't care about sign extension when calculating the immediate
+ let num_bits = if opnd0.num_bits() == uimm_num_bits(uimm.value) {
+ uimm_num_bits(uimm.value)
+ } else {
+ imm_num_bits(uimm.value.try_into().unwrap())
+ };
+
+ if num_bits <= 8 {
+ // 8-bit immediate
+
+ if opnd_size == 8 {
+ write_rm(cb, false, false, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm8]);
+ } else {
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_sml]);
+ }
+
+ cb.write_int(uimm.value, 8);
+ } else if num_bits <= 32 {
+ // 32-bit immediate
+
+ assert!(num_bits <= opnd_size);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
+ cb.write_int(uimm.value, if opnd_size > 32 { 32 } else { opnd_size.into() });
+ } else {
+ panic!("immediate value too large (num_bits={}, num={uimm:?})", num_bits);
+ }
+ },
+ _ => panic!("unknown encoding combo: {opnd0:?} {opnd1:?}")
+ };
+}
+
+// LOCK - lock prefix for atomic shared memory operations
+pub fn write_lock_prefix(cb: &mut CodeBlock) {
+ cb.write_byte(0xf0);
+}
+
+/// add - Integer addition
+pub fn add(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x00, // opMemReg8
+ 0x01, // opMemRegPref
+ 0x02, // opRegMem8
+ 0x03, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ Some(0x00), // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// and - Bitwise AND
+pub fn and(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x20, // opMemReg8
+ 0x21, // opMemRegPref
+ 0x22, // opRegMem8
+ 0x23, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ Some(0x04), // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// call - Call to a pointer with a 32-bit displacement offset
+pub fn call_rel32(cb: &mut CodeBlock, rel32: i32) {
+ // Write the opcode
+ cb.write_byte(0xe8);
+
+ // Write the relative 32-bit jump offset
+ cb.write_bytes(&rel32.to_le_bytes());
+}
+
+/// call - Call a pointer, encode with a 32-bit offset if possible
+pub fn call_ptr(cb: &mut CodeBlock, scratch_opnd: X86Opnd, dst_ptr: *const u8) {
+ if let X86Opnd::Reg(_scratch_reg) = scratch_opnd {
+ use crate::stats::{incr_counter};
+
+ // Pointer to the end of this call instruction
+ let end_ptr = cb.get_ptr(cb.write_pos + 5);
+
+ // Compute the jump offset
+ let rel64: i64 = dst_ptr as i64 - end_ptr.raw_ptr(cb) as i64;
+
+ // If the offset fits in 32-bit
+ if rel64 >= i32::MIN.into() && rel64 <= i32::MAX.into() {
+ incr_counter!(num_send_x86_rel32);
+ call_rel32(cb, rel64.try_into().unwrap());
+ return;
+ }
+
+ // Move the pointer into the scratch register and call
+ incr_counter!(num_send_x86_reg);
+ mov(cb, scratch_opnd, const_ptr_opnd(dst_ptr));
+ call(cb, scratch_opnd);
+ } else {
+ unreachable!();
+ }
+}
+
+/// call - Call to label with 32-bit offset
+pub fn call_label(cb: &mut CodeBlock, label_idx: usize) {
+ cb.label_ref(label_idx, 5, |cb, src_addr, dst_addr| {
+ cb.write_byte(0xE8);
+ cb.write_int((dst_addr - src_addr) as u64, 32);
+ });
+}
+
+/// call - Indirect call with an R/M operand
+pub fn call(cb: &mut CodeBlock, opnd: X86Opnd) {
+ write_rm(cb, false, false, X86Opnd::None, opnd, Some(2), &[0xff]);
+}
+
+/// Encode a conditional move instruction
+fn write_cmov(cb: &mut CodeBlock, opcode1: u8, dst: X86Opnd, src: X86Opnd) {
+ if let X86Opnd::Reg(reg) = dst {
+ match src {
+ X86Opnd::Reg(_) => (),
+ X86Opnd::Mem(_) => (),
+ _ => unreachable!()
+ };
+
+ assert!(reg.num_bits >= 16);
+ let sz_pref = reg.num_bits == 16;
+ let rex_w = reg.num_bits == 64;
+
+ write_rm(cb, sz_pref, rex_w, dst, src, None, &[0x0f, opcode1]);
+ } else {
+ unreachable!()
+ }
+}
+
+// cmovcc - Conditional move
+pub fn cmova(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x47, dst, src); }
+pub fn cmovae(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x43, dst, src); }
+pub fn cmovb(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x42, dst, src); }
+pub fn cmovbe(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x46, dst, src); }
+pub fn cmovc(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x42, dst, src); }
+pub fn cmove(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x44, dst, src); }
+pub fn cmovg(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4f, dst, src); }
+pub fn cmovge(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4d, dst, src); }
+pub fn cmovl(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4c, dst, src); }
+pub fn cmovle(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4e, dst, src); }
+pub fn cmovna(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x46, dst, src); }
+pub fn cmovnae(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x42, dst, src); }
+pub fn cmovnb(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x43, dst, src); }
+pub fn cmovnbe(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x47, dst, src); }
+pub fn cmovnc(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x43, dst, src); }
+pub fn cmovne(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x45, dst, src); }
+pub fn cmovng(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4e, dst, src); }
+pub fn cmovnge(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4c, dst, src); }
+pub fn cmovnl(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4d, dst, src); }
+pub fn cmovnle(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4f, dst, src); }
+pub fn cmovno(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x41, dst, src); }
+pub fn cmovnp(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4b, dst, src); }
+pub fn cmovns(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x49, dst, src); }
+pub fn cmovnz(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x45, dst, src); }
+pub fn cmovo(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x40, dst, src); }
+pub fn cmovp(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4a, dst, src); }
+pub fn cmovpe(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4a, dst, src); }
+pub fn cmovpo(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x4b, dst, src); }
+pub fn cmovs(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x48, dst, src); }
+pub fn cmovz(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { write_cmov(cb, 0x44, dst, src); }
+
+/// cmp - Compare and set flags
+pub fn cmp(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x38, // opMemReg8
+ 0x39, // opMemRegPref
+ 0x3A, // opRegMem8
+ 0x3B, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ Some(0x07), // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// cdq - Convert doubleword to quadword
+pub fn cdq(cb: &mut CodeBlock) {
+ cb.write_byte(0x99);
+}
+
+/// cqo - Convert quadword to octaword
+pub fn cqo(cb: &mut CodeBlock) {
+ cb.write_bytes(&[0x48, 0x99]);
+}
+
+/// imul - signed integer multiply
+pub fn imul(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ assert!(opnd0.num_bits() == 64);
+ assert!(opnd1.num_bits() == 64);
+ assert!(matches!(opnd0, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+ assert!(matches!(opnd1, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ match (opnd0, opnd1) {
+ (X86Opnd::Reg(_), X86Opnd::Reg(_) | X86Opnd::Mem(_)) => {
+ //REX.W + 0F AF /rIMUL r64, r/m64
+ // Quadword register := Quadword register * r/m64.
+ write_rm(cb, false, true, opnd0, opnd1, None, &[0x0F, 0xAF]);
+ }
+
+ // Flip the operands to handle this case. This instruction has weird encoding restrictions.
+ (X86Opnd::Mem(_), X86Opnd::Reg(_)) => {
+ //REX.W + 0F AF /rIMUL r64, r/m64
+ // Quadword register := Quadword register * r/m64.
+ write_rm(cb, false, true, opnd1, opnd0, None, &[0x0F, 0xAF]);
+ }
+
+ _ => unreachable!()
+ }
+}
+
+/// Interrupt 3 - trap to debugger
+pub fn int3(cb: &mut CodeBlock) {
+ cb.write_byte(0xcc);
+}
+
+// Encode a conditional relative jump to a label
+// Note: this always encodes a 32-bit offset
+fn write_jcc<const OP: u8>(cb: &mut CodeBlock, label_idx: usize) {
+ cb.label_ref(label_idx, 6, |cb, src_addr, dst_addr| {
+ cb.write_byte(0x0F);
+ cb.write_byte(OP);
+ cb.write_int((dst_addr - src_addr) as u64, 32);
+ });
+}
+
+/// jcc - relative jumps to a label
+pub fn ja_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x87>(cb, label_idx); }
+pub fn jae_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x83>(cb, label_idx); }
+pub fn jb_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x82>(cb, label_idx); }
+pub fn jbe_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x86>(cb, label_idx); }
+pub fn jc_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x82>(cb, label_idx); }
+pub fn je_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x84>(cb, label_idx); }
+pub fn jg_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8F>(cb, label_idx); }
+pub fn jge_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8D>(cb, label_idx); }
+pub fn jl_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8C>(cb, label_idx); }
+pub fn jle_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8E>(cb, label_idx); }
+pub fn jna_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x86>(cb, label_idx); }
+pub fn jnae_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x82>(cb, label_idx); }
+pub fn jnb_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x83>(cb, label_idx); }
+pub fn jnbe_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x87>(cb, label_idx); }
+pub fn jnc_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x83>(cb, label_idx); }
+pub fn jne_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x85>(cb, label_idx); }
+pub fn jng_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8E>(cb, label_idx); }
+pub fn jnge_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8C>(cb, label_idx); }
+pub fn jnl_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8D>(cb, label_idx); }
+pub fn jnle_label(cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8F>(cb, label_idx); }
+pub fn jno_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x81>(cb, label_idx); }
+pub fn jnp_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8b>(cb, label_idx); }
+pub fn jns_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x89>(cb, label_idx); }
+pub fn jnz_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x85>(cb, label_idx); }
+pub fn jo_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x80>(cb, label_idx); }
+pub fn jp_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8A>(cb, label_idx); }
+pub fn jpe_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8A>(cb, label_idx); }
+pub fn jpo_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x8B>(cb, label_idx); }
+pub fn js_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x88>(cb, label_idx); }
+pub fn jz_label (cb: &mut CodeBlock, label_idx: usize) { write_jcc::<0x84>(cb, label_idx); }
+
+pub fn jmp_label(cb: &mut CodeBlock, label_idx: usize) {
+ cb.label_ref(label_idx, 5, |cb, src_addr, dst_addr| {
+ cb.write_byte(0xE9);
+ cb.write_int((dst_addr - src_addr) as u64, 32);
+ });
+}
+
+/// Encode a relative jump to a pointer at a 32-bit offset (direct or conditional)
+fn write_jcc_ptr(cb: &mut CodeBlock, op0: u8, op1: u8, dst_ptr: CodePtr) {
+ // Write the opcode
+ if op0 != 0xFF {
+ cb.write_byte(op0);
+ }
+
+ cb.write_byte(op1);
+
+ // Pointer to the end of this jump instruction
+ let end_ptr = cb.get_ptr(cb.write_pos + 4);
+
+ // Compute the jump offset
+ let rel64 = dst_ptr.as_offset() - end_ptr.as_offset();
+
+ if rel64 >= i32::MIN.into() && rel64 <= i32::MAX.into() {
+ // Write the relative 32-bit jump offset
+ cb.write_int(rel64 as u64, 32);
+ }
+ else {
+ // Offset doesn't fit in 4 bytes. Report error.
+ cb.dropped_bytes = true;
+ }
+}
+
+/// jcc - relative jumps to a pointer (32-bit offset)
+pub fn ja_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x87, ptr); }
+pub fn jae_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x83, ptr); }
+pub fn jb_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x82, ptr); }
+pub fn jbe_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x86, ptr); }
+pub fn jc_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x82, ptr); }
+pub fn je_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x84, ptr); }
+pub fn jg_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8F, ptr); }
+pub fn jge_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8D, ptr); }
+pub fn jl_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8C, ptr); }
+pub fn jle_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8E, ptr); }
+pub fn jna_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x86, ptr); }
+pub fn jnae_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x82, ptr); }
+pub fn jnb_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x83, ptr); }
+pub fn jnbe_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x87, ptr); }
+pub fn jnc_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x83, ptr); }
+pub fn jne_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x85, ptr); }
+pub fn jng_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8E, ptr); }
+pub fn jnge_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8C, ptr); }
+pub fn jnl_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8D, ptr); }
+pub fn jnle_ptr(cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8F, ptr); }
+pub fn jno_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x81, ptr); }
+pub fn jnp_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8b, ptr); }
+pub fn jns_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x89, ptr); }
+pub fn jnz_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x85, ptr); }
+pub fn jo_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x80, ptr); }
+pub fn jp_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8A, ptr); }
+pub fn jpe_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8A, ptr); }
+pub fn jpo_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x8B, ptr); }
+pub fn js_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x88, ptr); }
+pub fn jz_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0x0F, 0x84, ptr); }
+pub fn jmp_ptr (cb: &mut CodeBlock, ptr: CodePtr) { write_jcc_ptr(cb, 0xFF, 0xE9, ptr); }
+
+/// jmp - Indirect jump near to an R/M operand.
+pub fn jmp_rm(cb: &mut CodeBlock, opnd: X86Opnd) {
+ write_rm(cb, false, false, X86Opnd::None, opnd, Some(4), &[0xff]);
+}
+
+// jmp - Jump with relative 32-bit offset
+pub fn jmp32(cb: &mut CodeBlock, offset: i32) {
+ cb.write_byte(0xE9);
+ cb.write_int(offset as u64, 32);
+}
+
+/// lea - Load Effective Address
+pub fn lea(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
+ if let X86Opnd::Reg(reg) = dst {
+ assert!(reg.num_bits == 64);
+ assert!(matches!(src, X86Opnd::Mem(_) | X86Opnd::IPRel(_)));
+ write_rm(cb, false, true, dst, src, None, &[0x8d]);
+ } else {
+ unreachable!();
+ }
+}
+
+/// mov - Data move operation
+pub fn mov(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
+ match (dst, src) {
+ // R + Imm
+ (X86Opnd::Reg(reg), X86Opnd::Imm(imm)) => {
+ assert!(imm.num_bits <= reg.num_bits);
+
+ // In case the source immediate could be zero extended to be 64
+ // bit, we can use the 32-bit operands version of the instruction.
+ // For example, we can turn mov(rax, 0x34) into the equivalent
+ // mov(eax, 0x34).
+ if (reg.num_bits == 64) && (imm.value > 0) && (imm.num_bits <= 32) {
+ if dst.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0xB8, reg);
+ cb.write_int(imm.value as u64, 32);
+ } else {
+ if reg.num_bits == 16 {
+ cb.write_byte(0x66);
+ }
+
+ if dst.rex_needed() || reg.num_bits == 64 {
+ write_rex(cb, reg.num_bits == 64, 0, 0, reg.reg_no);
+ }
+
+ write_opcode(cb, if reg.num_bits == 8 { 0xb0 } else { 0xb8 }, reg);
+ cb.write_int(imm.value as u64, reg.num_bits.into());
+ }
+ },
+ // R + UImm
+ (X86Opnd::Reg(reg), X86Opnd::UImm(uimm)) => {
+ assert!(uimm.num_bits <= reg.num_bits);
+
+ // In case the source immediate could be zero extended to be 64
+ // bit, we can use the 32-bit operands version of the instruction.
+ // For example, we can turn mov(rax, 0x34) into the equivalent
+ // mov(eax, 0x34).
+ if (reg.num_bits == 64) && (uimm.value <= u32::MAX.into()) {
+ if dst.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0xB8, reg);
+ cb.write_int(uimm.value, 32);
+ } else {
+ if reg.num_bits == 16 {
+ cb.write_byte(0x66);
+ }
+
+ if dst.rex_needed() || reg.num_bits == 64 {
+ write_rex(cb, reg.num_bits == 64, 0, 0, reg.reg_no);
+ }
+
+ write_opcode(cb, if reg.num_bits == 8 { 0xb0 } else { 0xb8 }, reg);
+ cb.write_int(uimm.value, reg.num_bits.into());
+ }
+ },
+ // M + Imm
+ (X86Opnd::Mem(mem), X86Opnd::Imm(imm)) => {
+ assert!(imm.num_bits <= mem.num_bits);
+
+ if mem.num_bits == 8 {
+ write_rm(cb, false, false, X86Opnd::None, dst, None, &[0xc6]);
+ } else {
+ write_rm(cb, mem.num_bits == 16, mem.num_bits == 64, X86Opnd::None, dst, Some(0), &[0xc7]);
+ }
+
+ let output_num_bits:u32 = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() };
+ assert!(
+ mem.num_bits < 64 || imm_num_bits(imm.value) <= (output_num_bits as u8),
+ "immediate value should be small enough to survive sign extension"
+ );
+ cb.write_int(imm.value as u64, output_num_bits);
+ },
+ // M + UImm
+ (X86Opnd::Mem(mem), X86Opnd::UImm(uimm)) => {
+ assert!(uimm.num_bits <= mem.num_bits);
+
+ if mem.num_bits == 8 {
+ write_rm(cb, false, false, X86Opnd::None, dst, None, &[0xc6]);
+ }
+ else {
+ write_rm(cb, mem.num_bits == 16, mem.num_bits == 64, X86Opnd::None, dst, Some(0), &[0xc7]);
+ }
+
+ let output_num_bits = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() };
+ assert!(
+ mem.num_bits < 64 || imm_num_bits(uimm.value as i64) <= (output_num_bits as u8),
+ "immediate value should be small enough to survive sign extension"
+ );
+ cb.write_int(uimm.value, output_num_bits);
+ },
+ // * + Imm/UImm
+ (_, X86Opnd::Imm(_) | X86Opnd::UImm(_)) => unreachable!(),
+ // * + *
+ (_, _) => {
+ write_rm_multi(
+ cb,
+ 0x88, // opMemReg8
+ 0x89, // opMemRegPref
+ 0x8A, // opRegMem8
+ 0x8B, // opRegMemPref
+ 0xC6, // opMemImm8
+ 0xFF, // opMemImmSml (not available)
+ 0xFF, // opMemImmLrg
+ None, // opExtImm
+ dst,
+ src
+ );
+ }
+ };
+}
+
+/// A variant of mov used for always writing the value in 64 bits for GC offsets.
+pub fn movabs(cb: &mut CodeBlock, dst: X86Opnd, value: u64) {
+ match dst {
+ X86Opnd::Reg(reg) => {
+ assert_eq!(reg.num_bits, 64);
+ write_rex(cb, true, 0, 0, reg.reg_no);
+
+ write_opcode(cb, 0xb8, reg);
+ cb.write_int(value, 64);
+ },
+ _ => unreachable!()
+ }
+}
+
+/// movsx - Move with sign extension (signed integers)
+pub fn movsx(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) {
+ if let X86Opnd::Reg(_dst_reg) = dst {
+ assert!(matches!(src, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ let src_num_bits = src.num_bits();
+ let dst_num_bits = dst.num_bits();
+ assert!(src_num_bits < dst_num_bits);
+
+ match src_num_bits {
+ 8 => write_rm(cb, dst_num_bits == 16, dst_num_bits == 64, dst, src, None, &[0x0f, 0xbe]),
+ 16 => write_rm(cb, dst_num_bits == 16, dst_num_bits == 64, dst, src, None, &[0x0f, 0xbf]),
+ 32 => write_rm(cb, false, true, dst, src, None, &[0x63]),
+ _ => unreachable!()
+ };
+ } else {
+ unreachable!();
+ }
+}
+
+/*
+/// movzx - Move with zero extension (unsigned values)
+void movzx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
+{
+ cb.writeASM("movzx", dst, src);
+
+ uint32_t dstSize;
+ if (dst.isReg)
+ dstSize = dst.reg.size;
+ else
+ assert (false, "movzx dst must be a register");
+
+ uint32_t srcSize;
+ if (src.isReg)
+ srcSize = src.reg.size;
+ else if (src.isMem)
+ srcSize = src.mem.size;
+ else
+ assert (false);
+
+ assert (
+ srcSize < dstSize,
+ "movzx: srcSize >= dstSize"
+ );
+
+ if (srcSize is 8)
+ {
+ cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB6)(dstSize is 16, dstSize is 64, dst, src);
+ }
+ else if (srcSize is 16)
+ {
+ cb.writeRMInstr!('r', 0xFF, 0x0F, 0xB7)(dstSize is 16, dstSize is 64, dst, src);
+ }
+ else
+ {
+ assert (false, "invalid src operand size for movxz");
+ }
+}
+*/
+
+/// nop - Noop, one or multiple bytes long
+pub fn nop(cb: &mut CodeBlock, length: u32) {
+ match length {
+ 0 => {},
+ 1 => cb.write_byte(0x90),
+ 2 => cb.write_bytes(&[0x66, 0x90]),
+ 3 => cb.write_bytes(&[0x0f, 0x1f, 0x00]),
+ 4 => cb.write_bytes(&[0x0f, 0x1f, 0x40, 0x00]),
+ 5 => cb.write_bytes(&[0x0f, 0x1f, 0x44, 0x00, 0x00]),
+ 6 => cb.write_bytes(&[0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00]),
+ 7 => cb.write_bytes(&[0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00]),
+ 8 => cb.write_bytes(&[0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]),
+ 9 => cb.write_bytes(&[0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]),
+ _ => {
+ let mut written: u32 = 0;
+ while written + 9 <= length {
+ nop(cb, 9);
+ written += 9;
+ }
+ nop(cb, length - written);
+ }
+ };
+}
+
+/// not - Bitwise NOT
+pub fn not(cb: &mut CodeBlock, opnd: X86Opnd) {
+ write_rm_unary(
+ cb,
+ 0xf6, // opMemReg8
+ 0xf7, // opMemRegPref
+ Some(0x02), // opExt
+ opnd
+ );
+}
+
+/// or - Bitwise OR
+pub fn or(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x08, // opMemReg8
+ 0x09, // opMemRegPref
+ 0x0A, // opRegMem8
+ 0x0B, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ Some(0x01), // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+/// pop - Pop a register off the stack
+pub fn pop(cb: &mut CodeBlock, opnd: X86Opnd) {
+ match opnd {
+ X86Opnd::Reg(reg) => {
+ assert!(reg.num_bits == 64);
+
+ if opnd.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0x58, reg);
+ },
+ X86Opnd::Mem(mem) => {
+ assert!(mem.num_bits == 64);
+
+ write_rm(cb, false, false, X86Opnd::None, opnd, Some(0), &[0x8f]);
+ },
+ _ => unreachable!()
+ };
+}
+
+/// popfq - Pop the flags register (64-bit)
+pub fn popfq(cb: &mut CodeBlock) {
+ // REX.W + 0x9D
+ cb.write_bytes(&[0x48, 0x9d]);
+}
+
+/// push - Push an operand on the stack
+pub fn push(cb: &mut CodeBlock, opnd: X86Opnd) {
+ match opnd {
+ X86Opnd::Reg(reg) => {
+ if opnd.rex_needed() {
+ write_rex(cb, false, 0, 0, reg.reg_no);
+ }
+ write_opcode(cb, 0x50, reg);
+ },
+ X86Opnd::Mem(_mem) => {
+ write_rm(cb, false, false, X86Opnd::None, opnd, Some(6), &[0xff]);
+ },
+ _ => unreachable!()
+ }
+}
+
+/// pushfq - Push the flags register (64-bit)
+pub fn pushfq(cb: &mut CodeBlock) {
+ cb.write_byte(0x9C);
+}
+
+/// ret - Return from call, popping only the return address
+pub fn ret(cb: &mut CodeBlock) {
+ cb.write_byte(0xC3);
+}
+
+// Encode a bitwise shift instruction
+fn write_shift(cb: &mut CodeBlock, op_mem_one_pref: u8, op_mem_cl_pref: u8, op_mem_imm_pref: u8, op_ext: u8, opnd0: X86Opnd, opnd1: X86Opnd) {
+ assert!(matches!(opnd0, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+
+ // Check the size of opnd0
+ let opnd_size = opnd0.num_bits();
+ assert!(opnd_size == 16 || opnd_size == 32 || opnd_size == 64);
+
+ let sz_pref = opnd_size == 16;
+ let rex_w = opnd_size == 64;
+
+ match opnd1 {
+ X86Opnd::UImm(imm) => {
+ if imm.value == 1 {
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, Some(op_ext), &[op_mem_one_pref]);
+ } else {
+ assert!(imm.num_bits <= 8);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, Some(op_ext), &[op_mem_imm_pref]);
+ cb.write_byte(imm.value as u8);
+ }
+ }
+
+ X86Opnd::Reg(reg) => {
+ // We can only use CL/RCX as the shift amount
+ assert!(reg.reg_no == RCX_REG.reg_no);
+ write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, Some(op_ext), &[op_mem_cl_pref]);
+ }
+
+ _ => {
+ unreachable!("unsupported operands: {:?}, {:?}", opnd0, opnd1);
+ }
+ }
+}
+
+// sal - Shift arithmetic left
+pub fn sal(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x04,
+ opnd0,
+ opnd1
+ );
+}
+
+/// sar - Shift arithmetic right (signed)
+pub fn sar(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x07,
+ opnd0,
+ opnd1
+ );
+}
+
+// shl - Shift logical left
+pub fn shl(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x04,
+ opnd0,
+ opnd1
+ );
+}
+
+/// shr - Shift logical right (unsigned)
+pub fn shr(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_shift(
+ cb,
+ 0xD1, // opMemOnePref,
+ 0xD3, // opMemClPref,
+ 0xC1, // opMemImmPref,
+ 0x05,
+ opnd0,
+ opnd1
+ );
+}
+
+/// sub - Integer subtraction
+pub fn sub(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x28, // opMemReg8
+ 0x29, // opMemRegPref
+ 0x2A, // opRegMem8
+ 0x2B, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ Some(0x05), // opExtImm
+ opnd0,
+ opnd1
+ );
+}
+
+fn resize_opnd(opnd: X86Opnd, num_bits: u8) -> X86Opnd {
+ match opnd {
+ X86Opnd::Reg(reg) => {
+ let mut cloned = reg;
+ cloned.num_bits = num_bits;
+ X86Opnd::Reg(cloned)
+ },
+ X86Opnd::Mem(mem) => {
+ let mut cloned = mem;
+ cloned.num_bits = num_bits;
+ X86Opnd::Mem(cloned)
+ },
+ _ => unreachable!()
+ }
+}
+
+/// test - Logical Compare
+pub fn test(cb: &mut CodeBlock, rm_opnd: X86Opnd, test_opnd: X86Opnd) {
+ assert!(matches!(rm_opnd, X86Opnd::Reg(_) | X86Opnd::Mem(_)));
+ let rm_num_bits = rm_opnd.num_bits();
+
+ match test_opnd {
+ X86Opnd::UImm(uimm) => {
+ assert!(uimm.num_bits <= 32);
+ assert!(uimm.num_bits <= rm_num_bits);
+
+ // Use the smallest operand size possible
+ assert!(rm_num_bits % 8 == 0);
+ let rm_resized = resize_opnd(rm_opnd, uimm.num_bits);
+
+ if uimm.num_bits == 8 {
+ write_rm(cb, false, false, X86Opnd::None, rm_resized, Some(0x00), &[0xf6]);
+ cb.write_int(uimm.value, uimm.num_bits.into());
+ } else {
+ write_rm(cb, uimm.num_bits == 16, false, X86Opnd::None, rm_resized, Some(0x00), &[0xf7]);
+ cb.write_int(uimm.value, uimm.num_bits.into());
+ }
+ },
+ X86Opnd::Imm(imm) => {
+ // This mode only applies to 64-bit R/M operands with 32-bit signed immediates
+ assert!(imm.num_bits <= 32);
+ assert!(rm_num_bits == 64);
+
+ write_rm(cb, false, true, X86Opnd::None, rm_opnd, Some(0x00), &[0xf7]);
+ cb.write_int(imm.value as u64, 32);
+ },
+ X86Opnd::Reg(reg) => {
+ assert!(reg.num_bits == rm_num_bits);
+
+ if rm_num_bits == 8 {
+ write_rm(cb, false, false, test_opnd, rm_opnd, None, &[0x84]);
+ } else {
+ write_rm(cb, rm_num_bits == 16, rm_num_bits == 64, test_opnd, rm_opnd, None, &[0x85]);
+ }
+ },
+ _ => unreachable!()
+ };
+}
+
+/// Undefined opcode
+pub fn ud2(cb: &mut CodeBlock) {
+ cb.write_bytes(&[0x0f, 0x0b]);
+}
+
+/// xchg - Exchange Register/Memory with Register
+pub fn xchg(cb: &mut CodeBlock, rm_opnd: X86Opnd, r_opnd: X86Opnd) {
+ if let (X86Opnd::Reg(rm_reg), X86Opnd::Reg(r_reg)) = (rm_opnd, r_opnd) {
+ assert!(rm_reg.num_bits == 64);
+ assert!(r_reg.num_bits == 64);
+
+ // If we're exchanging with RAX
+ if rm_reg.reg_no == RAX_REG_NO {
+ // Write the REX byte
+ write_rex(cb, true, 0, 0, r_reg.reg_no);
+
+ // Write the opcode and register number
+ cb.write_byte(0x90 + (r_reg.reg_no & 7));
+ } else {
+ write_rm(cb, false, true, r_opnd, rm_opnd, None, &[0x87]);
+ }
+ } else {
+ unreachable!();
+ }
+}
+
+/// xor - Exclusive bitwise OR
+pub fn xor(cb: &mut CodeBlock, opnd0: X86Opnd, opnd1: X86Opnd) {
+ write_rm_multi(
+ cb,
+ 0x30, // opMemReg8
+ 0x31, // opMemRegPref
+ 0x32, // opRegMem8
+ 0x33, // opRegMemPref
+ 0x80, // opMemImm8
+ 0x83, // opMemImmSml
+ 0x81, // opMemImmLrg
+ Some(0x06), // opExtImm
+ opnd0,
+ opnd1
+ );
+}
diff --git a/yjit/src/asm/x86_64/tests.rs b/yjit/src/asm/x86_64/tests.rs
new file mode 100644
index 0000000000..eefcbfd52e
--- /dev/null
+++ b/yjit/src/asm/x86_64/tests.rs
@@ -0,0 +1,460 @@
+#![cfg(test)]
+
+use crate::asm::x86_64::*;
+
+/// Check that the bytes for an instruction sequence match a hex string
+fn check_bytes<R>(bytes: &str, run: R) where R: FnOnce(&mut super::CodeBlock) {
+ let mut cb = super::CodeBlock::new_dummy(4096);
+ run(&mut cb);
+ assert_eq!(format!("{:x}", cb), bytes);
+}
+
+#[test]
+fn test_add() {
+ check_bytes("80c103", |cb| add(cb, CL, imm_opnd(3)));
+ check_bytes("00d9", |cb| add(cb, CL, BL));
+ check_bytes("4000e1", |cb| add(cb, CL, SPL));
+ check_bytes("6601d9", |cb| add(cb, CX, BX));
+ check_bytes("4801d8", |cb| add(cb, RAX, RBX));
+ check_bytes("01d1", |cb| add(cb, ECX, EDX));
+ check_bytes("4c01f2", |cb| add(cb, RDX, R14));
+ check_bytes("480110", |cb| add(cb, mem_opnd(64, RAX, 0), RDX));
+ check_bytes("480310", |cb| add(cb, RDX, mem_opnd(64, RAX, 0)));
+ check_bytes("48035008", |cb| add(cb, RDX, mem_opnd(64, RAX, 8)));
+ check_bytes("480390ff000000", |cb| add(cb, RDX, mem_opnd(64, RAX, 255)));
+ check_bytes("4881407fff000000", |cb| add(cb, mem_opnd(64, RAX, 127), imm_opnd(255)));
+ check_bytes("0110", |cb| add(cb, mem_opnd(32, RAX, 0), EDX));
+ check_bytes("4883c408", |cb| add(cb, RSP, imm_opnd(8)));
+ check_bytes("83c108", |cb| add(cb, ECX, imm_opnd(8)));
+ check_bytes("81c1ff000000", |cb| add(cb, ECX, imm_opnd(255)));
+}
+
+#[test]
+fn test_add_unsigned() {
+ // ADD r/m8, imm8
+ check_bytes("4180c001", |cb| add(cb, R8B, uimm_opnd(1)));
+ check_bytes("4180c07f", |cb| add(cb, R8B, imm_opnd(i8::MAX.try_into().unwrap())));
+
+ // ADD r/m16, imm16
+ check_bytes("664183c001", |cb| add(cb, R8W, uimm_opnd(1)));
+ check_bytes("664181c0ff7f", |cb| add(cb, R8W, uimm_opnd(i16::MAX.try_into().unwrap())));
+
+ // ADD r/m32, imm32
+ check_bytes("4183c001", |cb| add(cb, R8D, uimm_opnd(1)));
+ check_bytes("4181c0ffffff7f", |cb| add(cb, R8D, uimm_opnd(i32::MAX.try_into().unwrap())));
+
+ // ADD r/m64, imm32
+ check_bytes("4983c001", |cb| add(cb, R8, uimm_opnd(1)));
+ check_bytes("4981c0ffffff7f", |cb| add(cb, R8, uimm_opnd(i32::MAX.try_into().unwrap())));
+}
+
+#[test]
+fn test_and() {
+ check_bytes("4421e5", |cb| and(cb, EBP, R12D));
+ check_bytes("48832008", |cb| and(cb, mem_opnd(64, RAX, 0), imm_opnd(0x08)));
+}
+
+#[test]
+fn test_call_label() {
+ check_bytes("e8fbffffff", |cb| {
+ let label_idx = cb.new_label("fn".to_owned());
+ call_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_call_ptr() {
+ // calling a lower address
+ check_bytes("e8fbffffff", |cb| {
+ let ptr = cb.get_write_ptr();
+ call_ptr(cb, RAX, ptr.raw_ptr(cb));
+ });
+}
+
+#[test]
+fn test_call_reg() {
+ check_bytes("ffd0", |cb| call(cb, RAX));
+}
+
+#[test]
+fn test_call_mem() {
+ check_bytes("ff542408", |cb| call(cb, mem_opnd(64, RSP, 8)));
+}
+
+#[test]
+fn test_cmovcc() {
+ check_bytes("0f4ff7", |cb| cmovg(cb, ESI, EDI));
+ check_bytes("0f4f750c", |cb| cmovg(cb, ESI, mem_opnd(32, RBP, 12)));
+ check_bytes("0f4cc1", |cb| cmovl(cb, EAX, ECX));
+ check_bytes("480f4cdd", |cb| cmovl(cb, RBX, RBP));
+ check_bytes("0f4e742404", |cb| cmovle(cb, ESI, mem_opnd(32, RSP, 4)));
+}
+
+#[test]
+fn test_cmp() {
+ check_bytes("38d1", |cb| cmp(cb, CL, DL));
+ check_bytes("39f9", |cb| cmp(cb, ECX, EDI));
+ check_bytes("493b1424", |cb| cmp(cb, RDX, mem_opnd(64, R12, 0)));
+ check_bytes("4883f802", |cb| cmp(cb, RAX, imm_opnd(2)));
+ check_bytes("81f900000080", |cb| cmp(cb, ECX, uimm_opnd(0x8000_0000)));
+}
+
+#[test]
+fn test_cqo() {
+ check_bytes("4899", |cb| cqo(cb));
+}
+
+#[test]
+fn test_imul() {
+ check_bytes("480fafc3", |cb| imul(cb, RAX, RBX));
+ check_bytes("480faf10", |cb| imul(cb, RDX, mem_opnd(64, RAX, 0)));
+
+ // Operands flipped for encoding since multiplication is commutative
+ check_bytes("480faf10", |cb| imul(cb, mem_opnd(64, RAX, 0), RDX));
+}
+
+#[test]
+fn test_jge_label() {
+ check_bytes("0f8dfaffffff", |cb| {
+ let label_idx = cb.new_label("loop".to_owned());
+ jge_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_jmp_label() {
+ // Forward jump
+ check_bytes("e900000000", |cb| {
+ let label_idx = cb.new_label("next".to_owned());
+ jmp_label(cb, label_idx);
+ cb.write_label(label_idx);
+ cb.link_labels();
+ });
+
+ // Backwards jump
+ check_bytes("e9fbffffff", |cb| {
+ let label_idx = cb.new_label("loop".to_owned());
+ cb.write_label(label_idx);
+ jmp_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_jmp_rm() {
+ check_bytes("41ffe4", |cb| jmp_rm(cb, R12));
+}
+
+#[test]
+fn test_jo_label() {
+ check_bytes("0f80faffffff", |cb| {
+ let label_idx = cb.new_label("loop".to_owned());
+ jo_label(cb, label_idx);
+ cb.link_labels();
+ });
+}
+
+#[test]
+fn test_lea() {
+ check_bytes("488d5108", |cb| lea(cb, RDX, mem_opnd(64, RCX, 8)));
+ check_bytes("488d0500000000", |cb| lea(cb, RAX, mem_opnd(8, RIP, 0)));
+ check_bytes("488d0505000000", |cb| lea(cb, RAX, mem_opnd(8, RIP, 5)));
+ check_bytes("488d3d05000000", |cb| lea(cb, RDI, mem_opnd(8, RIP, 5)));
+}
+
+#[test]
+fn test_mov() {
+ check_bytes("b807000000", |cb| mov(cb, EAX, imm_opnd(7)));
+ check_bytes("b8fdffffff", |cb| mov(cb, EAX, imm_opnd(-3)));
+ check_bytes("41bf03000000", |cb| mov(cb, R15, imm_opnd(3)));
+ check_bytes("89d8", |cb| mov(cb, EAX, EBX));
+ check_bytes("89c8", |cb| mov(cb, EAX, ECX));
+ check_bytes("8b9380000000", |cb| mov(cb, EDX, mem_opnd(32, RBX, 128)));
+ check_bytes("488b442404", |cb| mov(cb, RAX, mem_opnd(64, RSP, 4)));
+
+ // Test `mov rax, 3` => `mov eax, 3` optimization
+ check_bytes("41b834000000", |cb| mov(cb, R8, imm_opnd(0x34)));
+ check_bytes("49b80000008000000000", |cb| mov(cb, R8, imm_opnd(0x80000000)));
+ check_bytes("49b8ffffffffffffffff", |cb| mov(cb, R8, imm_opnd(-1)));
+
+ check_bytes("b834000000", |cb| mov(cb, RAX, imm_opnd(0x34)));
+ check_bytes("48b8020000000000c0ff", |cb| mov(cb, RAX, imm_opnd(-18014398509481982)));
+ check_bytes("48b80000008000000000", |cb| mov(cb, RAX, imm_opnd(0x80000000)));
+ check_bytes("48b8ccffffffffffffff", |cb| mov(cb, RAX, imm_opnd(-52))); // yasm thinks this could use a dword immediate instead of qword
+ check_bytes("48b8ffffffffffffffff", |cb| mov(cb, RAX, imm_opnd(-1))); // yasm thinks this could use a dword immediate instead of qword
+ check_bytes("4488c9", |cb| mov(cb, CL, R9B));
+ check_bytes("4889c3", |cb| mov(cb, RBX, RAX));
+ check_bytes("4889df", |cb| mov(cb, RDI, RBX));
+ check_bytes("40b60b", |cb| mov(cb, SIL, imm_opnd(11)));
+
+ check_bytes("c60424fd", |cb| mov(cb, mem_opnd(8, RSP, 0), imm_opnd(-3)));
+ check_bytes("48c7470801000000", |cb| mov(cb, mem_opnd(64, RDI, 8), imm_opnd(1)));
+ //check_bytes("67c7400411000000", |cb| mov(cb, mem_opnd(32, EAX, 4), imm_opnd(0x34))); // We don't distinguish between EAX and RAX here - that's probably fine?
+ check_bytes("c7400411000000", |cb| mov(cb, mem_opnd(32, RAX, 4), imm_opnd(17)));
+ check_bytes("c7400401000080", |cb| mov(cb, mem_opnd(32, RAX, 4), uimm_opnd(0x80000001)));
+ check_bytes("41895814", |cb| mov(cb, mem_opnd(32, R8, 20), EBX));
+ check_bytes("4d8913", |cb| mov(cb, mem_opnd(64, R11, 0), R10));
+ check_bytes("48c742f8f4ffffff", |cb| mov(cb, mem_opnd(64, RDX, -8), imm_opnd(-12)));
+}
+
+#[test]
+fn test_movabs() {
+ check_bytes("49b83400000000000000", |cb| movabs(cb, R8, 0x34));
+ check_bytes("49b80000008000000000", |cb| movabs(cb, R8, 0x80000000));
+}
+
+#[test]
+fn test_mov_unsigned() {
+ // MOV AL, imm8
+ check_bytes("b001", |cb| mov(cb, AL, uimm_opnd(1)));
+ check_bytes("b0ff", |cb| mov(cb, AL, uimm_opnd(u8::MAX.into())));
+
+ // MOV AX, imm16
+ check_bytes("66b80100", |cb| mov(cb, AX, uimm_opnd(1)));
+ check_bytes("66b8ffff", |cb| mov(cb, AX, uimm_opnd(u16::MAX.into())));
+
+ // MOV EAX, imm32
+ check_bytes("b801000000", |cb| mov(cb, EAX, uimm_opnd(1)));
+ check_bytes("b8ffffffff", |cb| mov(cb, EAX, uimm_opnd(u32::MAX.into())));
+ check_bytes("41b800000000", |cb| mov(cb, R8, uimm_opnd(0)));
+ check_bytes("41b8ffffffff", |cb| mov(cb, R8, uimm_opnd(0xFF_FF_FF_FF)));
+
+ // MOV RAX, imm64, will move down into EAX since it fits into 32 bits
+ check_bytes("b801000000", |cb| mov(cb, RAX, uimm_opnd(1)));
+ check_bytes("b8ffffffff", |cb| mov(cb, RAX, uimm_opnd(u32::MAX.into())));
+
+ // MOV RAX, imm64, will not move down into EAX since it does not fit into 32 bits
+ check_bytes("48b80000000001000000", |cb| mov(cb, RAX, uimm_opnd(u32::MAX as u64 + 1)));
+ check_bytes("48b8ffffffffffffffff", |cb| mov(cb, RAX, uimm_opnd(u64::MAX)));
+ check_bytes("49b8ffffffffffffffff", |cb| mov(cb, R8, uimm_opnd(u64::MAX)));
+
+ // MOV r8, imm8
+ check_bytes("41b001", |cb| mov(cb, R8B, uimm_opnd(1)));
+ check_bytes("41b0ff", |cb| mov(cb, R8B, uimm_opnd(u8::MAX.into())));
+
+ // MOV r16, imm16
+ check_bytes("6641b80100", |cb| mov(cb, R8W, uimm_opnd(1)));
+ check_bytes("6641b8ffff", |cb| mov(cb, R8W, uimm_opnd(u16::MAX.into())));
+
+ // MOV r32, imm32
+ check_bytes("41b801000000", |cb| mov(cb, R8D, uimm_opnd(1)));
+ check_bytes("41b8ffffffff", |cb| mov(cb, R8D, uimm_opnd(u32::MAX.into())));
+
+ // MOV r64, imm64, will move down into 32 bit since it fits into 32 bits
+ check_bytes("41b801000000", |cb| mov(cb, R8, uimm_opnd(1)));
+
+ // MOV r64, imm64, will not move down into 32 bit since it does not fit into 32 bits
+ check_bytes("49b8ffffffffffffffff", |cb| mov(cb, R8, uimm_opnd(u64::MAX)));
+}
+
+#[test]
+fn test_mov_iprel() {
+ check_bytes("8b0500000000", |cb| mov(cb, EAX, mem_opnd(32, RIP, 0)));
+ check_bytes("8b0505000000", |cb| mov(cb, EAX, mem_opnd(32, RIP, 5)));
+
+ check_bytes("488b0500000000", |cb| mov(cb, RAX, mem_opnd(64, RIP, 0)));
+ check_bytes("488b0505000000", |cb| mov(cb, RAX, mem_opnd(64, RIP, 5)));
+ check_bytes("488b3d05000000", |cb| mov(cb, RDI, mem_opnd(64, RIP, 5)));
+}
+
+#[test]
+fn test_movsx() {
+ check_bytes("660fbec0", |cb| movsx(cb, AX, AL));
+ check_bytes("0fbed0", |cb| movsx(cb, EDX, AL));
+ check_bytes("480fbec3", |cb| movsx(cb, RAX, BL));
+ check_bytes("0fbfc8", |cb| movsx(cb, ECX, AX));
+ check_bytes("4c0fbed9", |cb| movsx(cb, R11, CL));
+ check_bytes("4c6354240c", |cb| movsx(cb, R10, mem_opnd(32, RSP, 12)));
+ check_bytes("480fbe0424", |cb| movsx(cb, RAX, mem_opnd(8, RSP, 0)));
+ check_bytes("490fbf5504", |cb| movsx(cb, RDX, mem_opnd(16, R13, 4)));
+}
+
+#[test]
+fn test_nop() {
+ check_bytes("90", |cb| nop(cb, 1));
+ check_bytes("6690", |cb| nop(cb, 2));
+ check_bytes("0f1f00", |cb| nop(cb, 3));
+ check_bytes("0f1f4000", |cb| nop(cb, 4));
+ check_bytes("0f1f440000", |cb| nop(cb, 5));
+ check_bytes("660f1f440000", |cb| nop(cb, 6));
+ check_bytes("0f1f8000000000", |cb| nop(cb, 7));
+ check_bytes("0f1f840000000000", |cb| nop(cb, 8));
+ check_bytes("660f1f840000000000", |cb| nop(cb, 9));
+ check_bytes("660f1f84000000000090", |cb| nop(cb, 10));
+ check_bytes("660f1f8400000000006690", |cb| nop(cb, 11));
+ check_bytes("660f1f8400000000000f1f00", |cb| nop(cb, 12));
+}
+
+#[test]
+fn test_not() {
+ check_bytes("66f7d0", |cb| not(cb, AX));
+ check_bytes("f7d0", |cb| not(cb, EAX));
+ check_bytes("49f71424", |cb| not(cb, mem_opnd(64, R12, 0)));
+ check_bytes("f794242d010000", |cb| not(cb, mem_opnd(32, RSP, 301)));
+ check_bytes("f71424", |cb| not(cb, mem_opnd(32, RSP, 0)));
+ check_bytes("f7542403", |cb| not(cb, mem_opnd(32, RSP, 3)));
+ check_bytes("f75500", |cb| not(cb, mem_opnd(32, RBP, 0)));
+ check_bytes("f7550d", |cb| not(cb, mem_opnd(32, RBP, 13)));
+ check_bytes("48f7d0", |cb| not(cb, RAX));
+ check_bytes("49f7d3", |cb| not(cb, R11));
+ check_bytes("f710", |cb| not(cb, mem_opnd(32, RAX, 0)));
+ check_bytes("f716", |cb| not(cb, mem_opnd(32, RSI, 0)));
+ check_bytes("f717", |cb| not(cb, mem_opnd(32, RDI, 0)));
+ check_bytes("f75237", |cb| not(cb, mem_opnd(32, RDX, 55)));
+ check_bytes("f79239050000", |cb| not(cb, mem_opnd(32, RDX, 1337)));
+ check_bytes("f752c9", |cb| not(cb, mem_opnd(32, RDX, -55)));
+ check_bytes("f792d5fdffff", |cb| not(cb, mem_opnd(32, RDX, -555)));
+}
+
+#[test]
+fn test_or() {
+ check_bytes("09f2", |cb| or(cb, EDX, ESI));
+}
+
+#[test]
+fn test_pop() {
+ check_bytes("58", |cb| pop(cb, RAX));
+ check_bytes("5b", |cb| pop(cb, RBX));
+ check_bytes("5c", |cb| pop(cb, RSP));
+ check_bytes("5d", |cb| pop(cb, RBP));
+ check_bytes("415c", |cb| pop(cb, R12));
+ check_bytes("8f00", |cb| pop(cb, mem_opnd(64, RAX, 0)));
+ check_bytes("418f00", |cb| pop(cb, mem_opnd(64, R8, 0)));
+ check_bytes("418f4003", |cb| pop(cb, mem_opnd(64, R8, 3)));
+ check_bytes("8f44c803", |cb| pop(cb, mem_opnd_sib(64, RAX, RCX, 8, 3)));
+ check_bytes("418f44c803", |cb| pop(cb, mem_opnd_sib(64, R8, RCX, 8, 3)));
+}
+
+#[test]
+fn test_push() {
+ check_bytes("50", |cb| push(cb, RAX));
+ check_bytes("53", |cb| push(cb, RBX));
+ check_bytes("4154", |cb| push(cb, R12));
+ check_bytes("ff30", |cb| push(cb, mem_opnd(64, RAX, 0)));
+ check_bytes("41ff30", |cb| push(cb, mem_opnd(64, R8, 0)));
+ check_bytes("41ff7003", |cb| push(cb, mem_opnd(64, R8, 3)));
+ check_bytes("ff74c803", |cb| push(cb, mem_opnd_sib(64, RAX, RCX, 8, 3)));
+ check_bytes("41ff74c803", |cb| push(cb, mem_opnd_sib(64, R8, RCX, 8, 3)));
+}
+
+#[test]
+fn test_ret() {
+ check_bytes("c3", |cb| ret(cb));
+}
+
+#[test]
+fn test_sal() {
+ check_bytes("66d1e1", |cb| sal(cb, CX, uimm_opnd(1)));
+ check_bytes("d1e1", |cb| sal(cb, ECX, uimm_opnd(1)));
+ check_bytes("c1e505", |cb| sal(cb, EBP, uimm_opnd(5)));
+ check_bytes("d1642444", |cb| sal(cb, mem_opnd(32, RSP, 68), uimm_opnd(1)));
+ check_bytes("48d3e1", |cb| sal(cb, RCX, CL));
+}
+
+#[test]
+fn test_sar() {
+ check_bytes("d1fa", |cb| sar(cb, EDX, uimm_opnd(1)));
+}
+
+#[test]
+fn test_shr() {
+ check_bytes("49c1ee07", |cb| shr(cb, R14, uimm_opnd(7)));
+}
+
+#[test]
+fn test_sub() {
+ check_bytes("83e801", |cb| sub(cb, EAX, imm_opnd(1)));
+ check_bytes("4883e802", |cb| sub(cb, RAX, imm_opnd(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_sub_uimm_too_large() {
+ // This immediate becomes a different value after
+ // sign extension, so not safe to encode.
+ check_bytes("ff", |cb| sub(cb, RCX, uimm_opnd(0x8000_0000)));
+}
+
+#[test]
+fn test_test() {
+ check_bytes("84c0", |cb| test(cb, AL, AL));
+ check_bytes("6685c0", |cb| test(cb, AX, AX));
+ check_bytes("f6c108", |cb| test(cb, CL, uimm_opnd(8)));
+ check_bytes("f6c207", |cb| test(cb, DL, uimm_opnd(7)));
+ check_bytes("f6c108", |cb| test(cb, RCX, uimm_opnd(8)));
+ check_bytes("f6420808", |cb| test(cb, mem_opnd(8, RDX, 8), uimm_opnd(8)));
+ check_bytes("f64208ff", |cb| test(cb, mem_opnd(8, RDX, 8), uimm_opnd(255)));
+ check_bytes("66f7c2ffff", |cb| test(cb, DX, uimm_opnd(0xffff)));
+ check_bytes("66f74208ffff", |cb| test(cb, mem_opnd(16, RDX, 8), uimm_opnd(0xffff)));
+ check_bytes("f60601", |cb| test(cb, mem_opnd(8, RSI, 0), uimm_opnd(1)));
+ check_bytes("f6461001", |cb| test(cb, mem_opnd(8, RSI, 16), uimm_opnd(1)));
+ check_bytes("f646f001", |cb| test(cb, mem_opnd(8, RSI, -16), uimm_opnd(1)));
+ check_bytes("854640", |cb| test(cb, mem_opnd(32, RSI, 64), EAX));
+ check_bytes("4885472a", |cb| test(cb, mem_opnd(64, RDI, 42), RAX));
+ check_bytes("4885c0", |cb| test(cb, RAX, RAX));
+ check_bytes("4885f0", |cb| test(cb, RAX, RSI));
+ check_bytes("48f74640f7ffffff", |cb| test(cb, mem_opnd(64, RSI, 64), imm_opnd(!0x08)));
+ check_bytes("48f7464008000000", |cb| test(cb, mem_opnd(64, RSI, 64), imm_opnd(0x08)));
+ check_bytes("48f7c108000000", |cb| test(cb, RCX, imm_opnd(0x08)));
+ //check_bytes("48a9f7ffff0f", |cb| test(cb, RAX, imm_opnd(0x0FFFFFF7)));
+}
+
+#[test]
+fn test_xchg() {
+ check_bytes("4891", |cb| xchg(cb, RAX, RCX));
+ check_bytes("4995", |cb| xchg(cb, RAX, R13));
+ check_bytes("4887d9", |cb| xchg(cb, RCX, RBX));
+ check_bytes("4d87f9", |cb| xchg(cb, R9, R15));
+}
+
+#[test]
+fn test_xor() {
+ check_bytes("31c0", |cb| xor(cb, EAX, EAX));
+}
+
+#[test]
+#[cfg(feature = "disasm")]
+fn basic_capstone_usage() -> std::result::Result<(), capstone::Error> {
+ // Test drive Capstone with simple input
+ use capstone::prelude::*;
+ let cs = Capstone::new()
+ .x86()
+ .mode(arch::x86::ArchMode::Mode64)
+ .syntax(arch::x86::ArchSyntax::Intel)
+ .build()?;
+
+ let insns = cs.disasm_all(&[0xCC], 0x1000)?;
+
+ match insns.as_ref() {
+ [insn] => {
+ assert_eq!(Some("int3"), insn.mnemonic());
+ Ok(())
+ }
+ _ => Err(capstone::Error::CustomError(
+ "expected to disassemble to int3",
+ )),
+ }
+}
+
+#[test]
+#[cfg(feature = "disasm")]
+fn block_comments() {
+ let mut cb = super::CodeBlock::new_dummy(4096);
+
+ let first_write_ptr = cb.get_write_ptr().raw_addr(&cb);
+ cb.add_comment("Beginning");
+ xor(&mut cb, EAX, EAX); // 2 bytes long
+ let second_write_ptr = cb.get_write_ptr().raw_addr(&cb);
+ cb.add_comment("Two bytes in");
+ cb.add_comment("Still two bytes in");
+ cb.add_comment("Still two bytes in"); // Duplicate, should be ignored
+ test(&mut cb, mem_opnd(64, RSI, 64), imm_opnd(!0x08)); // 8 bytes long
+ let third_write_ptr = cb.get_write_ptr().raw_addr(&cb);
+ cb.add_comment("Ten bytes in");
+
+ assert_eq!(&vec!( "Beginning".to_string() ), cb.comments_at(first_write_ptr).unwrap());
+ assert_eq!(&vec!( "Two bytes in".to_string(), "Still two bytes in".to_string() ), cb.comments_at(second_write_ptr).unwrap());
+ assert_eq!(&vec!( "Ten bytes in".to_string() ), cb.comments_at(third_write_ptr).unwrap());
+}
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
new file mode 100644
index 0000000000..0521e09d0b
--- /dev/null
+++ b/yjit/src/backend/arm64/mod.rs
@@ -0,0 +1,1829 @@
+use std::mem::take;
+
+use crate::asm::{CodeBlock, OutlinedCb};
+use crate::asm::arm64::*;
+use crate::cruby::*;
+use crate::backend::ir::*;
+use crate::virtualmem::CodePtr;
+use crate::utils::*;
+
+// Use the arm64 register type for this platform
+pub type Reg = A64Reg;
+
+// Callee-saved registers
+pub const _CFP: Opnd = Opnd::Reg(X19_REG);
+pub const _EC: Opnd = Opnd::Reg(X20_REG);
+pub const _SP: Opnd = Opnd::Reg(X21_REG);
+
+// C argument registers on this platform
+pub const _C_ARG_OPNDS: [Opnd; 6] = [
+ Opnd::Reg(X0_REG),
+ Opnd::Reg(X1_REG),
+ Opnd::Reg(X2_REG),
+ Opnd::Reg(X3_REG),
+ Opnd::Reg(X4_REG),
+ Opnd::Reg(X5_REG)
+];
+
+// C return value register on this platform
+pub const C_RET_REG: Reg = X0_REG;
+pub const _C_RET_OPND: Opnd = Opnd::Reg(X0_REG);
+
+// These constants define the way we work with Arm64's stack pointer. The stack
+// pointer always needs to be aligned to a 16-byte boundary.
+pub const C_SP_REG: A64Opnd = X31;
+pub const C_SP_STEP: i32 = 16;
+
+impl CodeBlock {
+ // The maximum number of bytes that can be generated by emit_jmp_ptr.
+ pub fn jmp_ptr_bytes(&self) -> usize {
+ // b instruction's offset is encoded as imm26 times 4. It can jump to
+ // +/-128MiB, so this can be used when --yjit-exec-mem-size <= 128.
+ let num_insns = if b_offset_fits_bits(self.virtual_region_size() as i64 / 4) {
+ 1 // b instruction
+ } else {
+ 5 // 4 instructions to load a 64-bit absolute address + br instruction
+ };
+ num_insns * 4
+ }
+
+ // The maximum number of instructions that can be generated by emit_conditional_jump.
+ fn conditional_jump_insns(&self) -> i32 {
+ // The worst case is instructions for a jump + bcond.
+ self.jmp_ptr_bytes() as i32 / 4 + 1
+ }
+}
+
+/// Map Opnd to A64Opnd
+impl From<Opnd> for A64Opnd {
+ fn from(opnd: Opnd) -> Self {
+ match opnd {
+ Opnd::UImm(value) => A64Opnd::new_uimm(value),
+ Opnd::Imm(value) => A64Opnd::new_imm(value),
+ Opnd::Reg(reg) => A64Opnd::Reg(reg),
+ Opnd::Mem(Mem { base: MemBase::Reg(reg_no), num_bits, disp }) => {
+ A64Opnd::new_mem(num_bits, A64Opnd::Reg(A64Reg { num_bits, reg_no }), disp)
+ },
+ Opnd::Mem(Mem { base: MemBase::InsnOut(_), .. }) => {
+ panic!("attempted to lower an Opnd::Mem with a MemBase::InsnOut base")
+ },
+ Opnd::CArg(_) => panic!("attempted to lower an Opnd::CArg"),
+ Opnd::InsnOut { .. } => panic!("attempted to lower an Opnd::InsnOut"),
+ Opnd::Value(_) => panic!("attempted to lower an Opnd::Value"),
+ Opnd::Stack { .. } => panic!("attempted to lower an Opnd::Stack"),
+ Opnd::None => panic!(
+ "Attempted to lower an Opnd::None. This often happens when an out operand was not allocated for an instruction because the output of the instruction was not used. Please ensure you are using the output."
+ ),
+
+ }
+ }
+}
+
+/// Also implement going from a reference to an operand for convenience.
+impl From<&Opnd> for A64Opnd {
+ fn from(opnd: &Opnd) -> Self {
+ A64Opnd::from(*opnd)
+ }
+}
+
+/// Call emit_jmp_ptr and immediately invalidate the written range.
+/// This is needed when next_page also moves other_cb that is not invalidated
+/// by compile_with_regs. Doing it here allows you to avoid invalidating a lot
+/// more than necessary when other_cb jumps from a position early in the page.
+/// This invalidates a small range of cb twice, but we accept the small cost.
+fn emit_jmp_ptr_with_invalidation(cb: &mut CodeBlock, dst_ptr: CodePtr) {
+ #[cfg(not(test))]
+ let start = cb.get_write_ptr();
+ emit_jmp_ptr(cb, dst_ptr, true);
+ #[cfg(not(test))]
+ {
+ let end = cb.get_write_ptr();
+ unsafe { rb_jit_icache_invalidate(start.raw_ptr(cb) as _, end.raw_ptr(cb) as _) };
+ }
+}
+
+fn emit_jmp_ptr(cb: &mut CodeBlock, dst_ptr: CodePtr, padding: bool) {
+ let src_addr = cb.get_write_ptr().as_offset();
+ let dst_addr = dst_ptr.as_offset();
+
+ // If the offset is short enough, then we'll use the
+ // branch instruction. Otherwise, we'll move the
+ // destination into a register and use the branch
+ // register instruction.
+ let num_insns = if b_offset_fits_bits((dst_addr - src_addr) / 4) {
+ b(cb, InstructionOffset::from_bytes((dst_addr - src_addr) as i32));
+ 1
+ } else {
+ let num_insns = emit_load_value(cb, Assembler::SCRATCH0, dst_addr as u64);
+ br(cb, Assembler::SCRATCH0);
+ num_insns + 1
+ };
+
+ if padding {
+ // Make sure it's always a consistent number of
+ // instructions in case it gets patched and has to
+ // use the other branch.
+ assert!(num_insns * 4 <= cb.jmp_ptr_bytes());
+ for _ in num_insns..(cb.jmp_ptr_bytes() / 4) {
+ nop(cb);
+ }
+ }
+}
+
+/// Emit the required instructions to load the given value into the
+/// given register. Our goal here is to use as few instructions as
+/// possible to get this value into the register.
+fn emit_load_value(cb: &mut CodeBlock, rd: A64Opnd, value: u64) -> usize {
+ let mut current = value;
+
+ if current <= 0xffff {
+ // If the value fits into a single movz
+ // instruction, then we'll use that.
+ movz(cb, rd, A64Opnd::new_uimm(current), 0);
+ return 1;
+ } else if BitmaskImmediate::try_from(current).is_ok() {
+ // Otherwise, if the immediate can be encoded
+ // with the special bitmask immediate encoding,
+ // we'll use that.
+ mov(cb, rd, A64Opnd::new_uimm(current));
+ return 1;
+ } else {
+ // Finally we'll fall back to encoding the value
+ // using movz for the first 16 bits and movk for
+ // each subsequent set of 16 bits as long we
+ // they are necessary.
+ movz(cb, rd, A64Opnd::new_uimm(current & 0xffff), 0);
+ let mut num_insns = 1;
+
+ // (We're sure this is necessary since we
+ // checked if it only fit into movz above).
+ current >>= 16;
+ movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 16);
+ num_insns += 1;
+
+ if current > 0xffff {
+ current >>= 16;
+ movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 32);
+ num_insns += 1;
+ }
+
+ if current > 0xffff {
+ current >>= 16;
+ movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 48);
+ num_insns += 1;
+ }
+ return num_insns;
+ }
+}
+
+/// List of registers that can be used for stack temps.
+/// These are caller-saved registers.
+pub static TEMP_REGS: [Reg; 5] = [X1_REG, X9_REG, X10_REG, X14_REG, X15_REG];
+
+#[derive(Debug, PartialEq)]
+enum EmitError {
+ RetryOnNextPage,
+ OutOfMemory,
+}
+
+impl Assembler
+{
+ // Special scratch registers for intermediate processing.
+ // This register is caller-saved (so we don't have to save it before using it)
+ pub const SCRATCH_REG: Reg = X16_REG;
+ const SCRATCH0: A64Opnd = A64Opnd::Reg(Assembler::SCRATCH_REG);
+ const SCRATCH1: A64Opnd = A64Opnd::Reg(X17_REG);
+
+ /// Get the list of registers from which we will allocate on this platform
+ /// These are caller-saved registers
+ /// Note: we intentionally exclude C_RET_REG (X0) from this list
+ /// because of the way it's used in gen_leave() and gen_leave_exit()
+ pub fn get_alloc_regs() -> Vec<Reg> {
+ vec![X11_REG, X12_REG, X13_REG]
+ }
+
+ /// Get a list of all of the caller-saved registers
+ pub fn get_caller_save_regs() -> Vec<Reg> {
+ vec![X1_REG, X9_REG, X10_REG, X11_REG, X12_REG, X13_REG, X14_REG, X15_REG]
+ }
+
+ /// Split platform-specific instructions
+ /// The transformations done here are meant to make our lives simpler in later
+ /// stages of the compilation pipeline.
+ /// Here we may want to make sure that all instructions (except load and store)
+ /// have no memory operands.
+ fn arm64_split(mut self) -> Assembler
+ {
+ /// When we're attempting to load a memory address into a register, the
+ /// displacement must fit into the maximum number of bits for an Op::Add
+ /// immediate. If it doesn't, we have to load the displacement into a
+ /// register first.
+ fn split_lea_operand(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ match opnd {
+ Opnd::Mem(Mem { base, disp, num_bits }) => {
+ if disp >= 0 && ShiftedImmediate::try_from(disp as u64).is_ok() {
+ asm.lea(opnd)
+ } else {
+ let disp = asm.load(Opnd::Imm(disp.into()));
+ let reg = match base {
+ MemBase::Reg(reg_no) => Opnd::Reg(Reg { reg_no, num_bits }),
+ MemBase::InsnOut(idx) => Opnd::InsnOut { idx, num_bits }
+ };
+
+ asm.add(reg, disp)
+ }
+ },
+ _ => unreachable!("Op::Lea only accepts Opnd::Mem operands.")
+ }
+ }
+
+ /// When you're storing a register into a memory location or loading a
+ /// memory location into a register, the displacement from the base
+ /// register of the memory location must fit into 9 bits. If it doesn't,
+ /// then we need to load that memory address into a register first.
+ fn split_memory_address(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ match opnd {
+ Opnd::Mem(mem) => {
+ if mem_disp_fits_bits(mem.disp) {
+ opnd
+ } else {
+ let base = split_lea_operand(asm, opnd);
+ Opnd::mem(64, base, 0)
+ }
+ },
+ _ => unreachable!("Can only split memory addresses.")
+ }
+ }
+
+ /// Any memory operands you're sending into an Op::Load instruction need
+ /// to be split in case their displacement doesn't fit into 9 bits.
+ fn split_load_operand(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ match opnd {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => opnd,
+ Opnd::Mem(_) => {
+ let split_opnd = split_memory_address(asm, opnd);
+ let out_opnd = asm.load(split_opnd);
+ // Many Arm insns support only 32-bit or 64-bit operands. asm.load with fewer
+ // bits zero-extends the value, so it's safe to recognize it as a 32-bit value.
+ if out_opnd.rm_num_bits() < 32 {
+ out_opnd.with_num_bits(32).unwrap()
+ } else {
+ out_opnd
+ }
+ },
+ _ => asm.load(opnd)
+ }
+ }
+
+ /// Operands that take the place of bitmask immediates must follow a
+ /// certain encoding. In this function we ensure that those operands
+ /// do follow that encoding, and if they don't then we load them first.
+ fn split_bitmask_immediate(asm: &mut Assembler, opnd: Opnd, dest_num_bits: u8) -> Opnd {
+ match opnd {
+ Opnd::Reg(_) | Opnd::CArg(_) | Opnd::InsnOut { .. } | Opnd::Stack { .. } => opnd,
+ Opnd::Mem(_) => split_load_operand(asm, opnd),
+ Opnd::Imm(imm) => {
+ if imm == 0 {
+ Opnd::Reg(XZR_REG)
+ } else if (dest_num_bits == 64 &&
+ BitmaskImmediate::try_from(imm as u64).is_ok()) ||
+ (dest_num_bits == 32 &&
+ u32::try_from(imm).is_ok() &&
+ BitmaskImmediate::new_32b_reg(imm as u32).is_ok()) {
+ Opnd::UImm(imm as u64)
+ } else {
+ asm.load(opnd).with_num_bits(dest_num_bits).unwrap()
+ }
+ },
+ Opnd::UImm(uimm) => {
+ if (dest_num_bits == 64 && BitmaskImmediate::try_from(uimm).is_ok()) ||
+ (dest_num_bits == 32 &&
+ u32::try_from(uimm).is_ok() &&
+ BitmaskImmediate::new_32b_reg(uimm as u32).is_ok()) {
+ opnd
+ } else {
+ asm.load(opnd).with_num_bits(dest_num_bits).unwrap()
+ }
+ },
+ Opnd::None | Opnd::Value(_) => unreachable!()
+ }
+ }
+
+ /// Operands that take the place of a shifted immediate must fit within
+ /// a certain size. If they don't then we need to load them first.
+ fn split_shifted_immediate(asm: &mut Assembler, opnd: Opnd) -> Opnd {
+ match opnd {
+ Opnd::Reg(_) | Opnd::CArg(_) | Opnd::InsnOut { .. } => opnd,
+ Opnd::Mem(_) => split_load_operand(asm, opnd),
+ Opnd::Imm(imm) => if ShiftedImmediate::try_from(imm as u64).is_ok() {
+ opnd
+ } else {
+ asm.load(opnd)
+ }
+ Opnd::UImm(uimm) => {
+ if ShiftedImmediate::try_from(uimm).is_ok() {
+ opnd
+ } else {
+ asm.load(opnd)
+ }
+ },
+ Opnd::None | Opnd::Value(_) | Opnd::Stack { .. } => unreachable!()
+ }
+ }
+
+ /// Returns the operands that should be used for a boolean logic
+ /// instruction.
+ fn split_boolean_operands(asm: &mut Assembler, opnd0: Opnd, opnd1: Opnd) -> (Opnd, Opnd) {
+ match (opnd0, opnd1) {
+ (Opnd::Reg(_), Opnd::Reg(_)) => {
+ (opnd0, opnd1)
+ },
+ (reg_opnd @ Opnd::Reg(_), other_opnd) |
+ (other_opnd, reg_opnd @ Opnd::Reg(_)) => {
+ let opnd1 = split_bitmask_immediate(asm, other_opnd, reg_opnd.rm_num_bits());
+ (reg_opnd, opnd1)
+ },
+ _ => {
+ let opnd0 = split_load_operand(asm, opnd0);
+ let opnd1 = split_bitmask_immediate(asm, opnd1, opnd0.rm_num_bits());
+ (opnd0, opnd1)
+ }
+ }
+ }
+
+ /// Returns the operands that should be used for a csel instruction.
+ fn split_csel_operands(asm: &mut Assembler, opnd0: Opnd, opnd1: Opnd) -> (Opnd, Opnd) {
+ let opnd0 = match opnd0 {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => opnd0,
+ _ => split_load_operand(asm, opnd0)
+ };
+
+ let opnd1 = match opnd1 {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => opnd1,
+ _ => split_load_operand(asm, opnd1)
+ };
+
+ (opnd0, opnd1)
+ }
+
+ fn split_less_than_32_cmp(asm: &mut Assembler, opnd0: Opnd) -> Opnd {
+ match opnd0 {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => {
+ match opnd0.rm_num_bits() {
+ 8 => asm.and(opnd0.with_num_bits(64).unwrap(), Opnd::UImm(0xff)),
+ 16 => asm.and(opnd0.with_num_bits(64).unwrap(), Opnd::UImm(0xffff)),
+ 32 | 64 => opnd0,
+ bits => unreachable!("Invalid number of bits. {}", bits)
+ }
+ }
+ _ => opnd0
+ }
+ }
+
+ let live_ranges: Vec<usize> = take(&mut self.live_ranges);
+ let mut asm_local = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits), self.num_locals);
+ let asm = &mut asm_local;
+ let mut iterator = self.into_draining_iter();
+
+ while let Some((index, mut insn)) = iterator.next_mapped() {
+ // Here we're going to map the operands of the instruction to load
+ // any Opnd::Value operands into registers if they are heap objects
+ // such that only the Op::Load instruction needs to handle that
+ // case. If the values aren't heap objects then we'll treat them as
+ // if they were just unsigned integer.
+ let is_load = matches!(insn, Insn::Load { .. } | Insn::LoadInto { .. });
+ let mut opnd_iter = insn.opnd_iter_mut();
+
+ while let Some(opnd) = opnd_iter.next() {
+ match opnd {
+ Opnd::Value(value) => {
+ if value.special_const_p() {
+ *opnd = Opnd::UImm(value.as_u64());
+ } else if !is_load {
+ *opnd = asm.load(*opnd);
+ }
+ },
+ Opnd::Stack { .. } => {
+ *opnd = asm.lower_stack_opnd(opnd);
+ }
+ _ => {}
+ };
+ }
+
+ // We are replacing instructions here so we know they are already
+ // being used. It is okay not to use their output here.
+ #[allow(unused_must_use)]
+ match &mut insn {
+ Insn::Add { left, right, .. } => {
+ match (*left, *right) {
+ (Opnd::Reg(_) | Opnd::InsnOut { .. }, Opnd::Reg(_) | Opnd::InsnOut { .. }) => {
+ asm.add(*left, *right);
+ },
+ (reg_opnd @ (Opnd::Reg(_) | Opnd::InsnOut { .. }), other_opnd) |
+ (other_opnd, reg_opnd @ (Opnd::Reg(_) | Opnd::InsnOut { .. })) => {
+ let opnd1 = split_shifted_immediate(asm, other_opnd);
+ asm.add(reg_opnd, opnd1);
+ },
+ _ => {
+ let opnd0 = split_load_operand(asm, *left);
+ let opnd1 = split_shifted_immediate(asm, *right);
+ asm.add(opnd0, opnd1);
+ }
+ }
+ },
+ Insn::And { left, right, out } |
+ Insn::Or { left, right, out } |
+ Insn::Xor { left, right, out } => {
+ let (opnd0, opnd1) = split_boolean_operands(asm, *left, *right);
+ *left = opnd0;
+ *right = opnd1;
+
+ // Since these instructions are lowered to an instruction that have 2 input
+ // registers and an output register, look to merge with an `Insn::Mov` that
+ // follows which puts the output in another register. For example:
+ // `Add a, b => out` followed by `Mov c, out` becomes `Add a, b => c`.
+ if let (Opnd::Reg(_), Opnd::Reg(_), Some(Insn::Mov { dest, src })) = (left, right, iterator.peek()) {
+ if live_ranges[index] == index + 1 {
+ // Check after potentially lowering a stack operand to a register operand
+ let lowered_dest = if let Opnd::Stack { .. } = dest {
+ asm.lower_stack_opnd(dest)
+ } else {
+ *dest
+ };
+ if out == src && matches!(lowered_dest, Opnd::Reg(_)) {
+ *out = lowered_dest;
+ iterator.map_insn_index(asm);
+ iterator.next_unmapped(); // Pop merged Insn::Mov
+ }
+ }
+ }
+
+ asm.push_insn(insn);
+ }
+ // Lower to Joz and Jonz for generating CBZ/CBNZ for compare-with-0-and-branch.
+ ref insn @ Insn::Cmp { ref left, right: ref right @ (Opnd::UImm(0) | Opnd::Imm(0)) } |
+ ref insn @ Insn::Test { ref left, right: ref right @ (Opnd::InsnOut { .. } | Opnd::Reg(_)) } if {
+ let same_opnd_if_test = if let Insn::Test { .. } = insn {
+ left == right
+ } else {
+ true
+ };
+
+ same_opnd_if_test && if let Some(
+ Insn::Jz(target) | Insn::Je(target) | Insn::Jnz(target) | Insn::Jne(target)
+ ) = iterator.peek() {
+ matches!(target, Target::SideExit { .. })
+ } else {
+ false
+ }
+ } => {
+ let reg = split_load_operand(asm, *left);
+ match iterator.peek() {
+ Some(Insn::Jz(target) | Insn::Je(target)) => asm.push_insn(Insn::Joz(reg, *target)),
+ Some(Insn::Jnz(target) | Insn::Jne(target)) => asm.push_insn(Insn::Jonz(reg, *target)),
+ _ => ()
+ }
+
+ iterator.map_insn_index(asm);
+ iterator.next_unmapped(); // Pop merged jump instruction
+ }
+ Insn::CCall { opnds, fptr, .. } => {
+ assert!(opnds.len() <= C_ARG_OPNDS.len());
+
+ // Load each operand into the corresponding argument
+ // register.
+ // Note: the iteration order is reversed to avoid corrupting x0,
+ // which is both the return value and first argument register
+ for (idx, opnd) in opnds.into_iter().enumerate().rev() {
+ // If the value that we're sending is 0, then we can use
+ // the zero register, so in this case we'll just send
+ // a UImm of 0 along as the argument to the move.
+ let value = match opnd {
+ Opnd::UImm(0) | Opnd::Imm(0) => Opnd::UImm(0),
+ Opnd::Mem(_) => split_memory_address(asm, *opnd),
+ _ => *opnd
+ };
+
+ asm.load_into(Opnd::c_arg(C_ARG_OPNDS[idx]), value);
+ }
+
+ // Now we push the CCall without any arguments so that it
+ // just performs the call.
+ asm.ccall(*fptr, vec![]);
+ },
+ Insn::Cmp { left, right } => {
+ let opnd0 = split_load_operand(asm, *left);
+ let opnd0 = split_less_than_32_cmp(asm, opnd0);
+ let split_right = split_shifted_immediate(asm, *right);
+ let opnd1 = match split_right {
+ Opnd::InsnOut { .. } if opnd0.num_bits() != split_right.num_bits() => {
+ split_right.with_num_bits(opnd0.num_bits().unwrap()).unwrap()
+ },
+ _ => split_right
+ };
+
+ asm.cmp(opnd0, opnd1);
+ },
+ Insn::CRet(opnd) => {
+ match opnd {
+ // If the value is already in the return register, then
+ // we don't need to do anything.
+ Opnd::Reg(C_RET_REG) => {},
+
+ // If the value is a memory address, we need to first
+ // make sure the displacement isn't too large and then
+ // load it into the return register.
+ Opnd::Mem(_) => {
+ let split = split_memory_address(asm, *opnd);
+ asm.load_into(C_RET_OPND, split);
+ },
+
+ // Otherwise we just need to load the value into the
+ // return register.
+ _ => {
+ asm.load_into(C_RET_OPND, *opnd);
+ }
+ }
+ asm.cret(C_RET_OPND);
+ },
+ Insn::CSelZ { truthy, falsy, out } |
+ Insn::CSelNZ { truthy, falsy, out } |
+ Insn::CSelE { truthy, falsy, out } |
+ Insn::CSelNE { truthy, falsy, out } |
+ Insn::CSelL { truthy, falsy, out } |
+ Insn::CSelLE { truthy, falsy, out } |
+ Insn::CSelG { truthy, falsy, out } |
+ Insn::CSelGE { truthy, falsy, out } => {
+ let (opnd0, opnd1) = split_csel_operands(asm, *truthy, *falsy);
+ *truthy = opnd0;
+ *falsy = opnd1;
+ // Merge `csel` and `mov` into a single `csel` when possible
+ match iterator.peek() {
+ Some(Insn::Mov { dest: Opnd::Reg(reg), src })
+ if matches!(out, Opnd::InsnOut { .. }) && *out == *src && live_ranges[index] == index + 1 => {
+ *out = Opnd::Reg(*reg);
+ asm.push_insn(insn);
+ iterator.map_insn_index(asm);
+ iterator.next_unmapped(); // Pop merged Insn::Mov
+ }
+ _ => {
+ asm.push_insn(insn);
+ }
+ }
+ },
+ Insn::IncrCounter { mem, value } => {
+ let counter_addr = match mem {
+ Opnd::Mem(_) => split_lea_operand(asm, *mem),
+ _ => *mem
+ };
+
+ asm.incr_counter(counter_addr, *value);
+ },
+ Insn::JmpOpnd(opnd) => {
+ if let Opnd::Mem(_) = opnd {
+ let opnd0 = split_load_operand(asm, *opnd);
+ asm.jmp_opnd(opnd0);
+ } else {
+ asm.jmp_opnd(*opnd);
+ }
+ },
+ Insn::Load { opnd, .. } |
+ Insn::LoadInto { opnd, .. } => {
+ *opnd = match opnd {
+ Opnd::Mem(_) => split_memory_address(asm, *opnd),
+ _ => *opnd
+ };
+ asm.push_insn(insn);
+ },
+ Insn::LoadSExt { opnd, .. } => {
+ match opnd {
+ // We only want to sign extend if the operand is a
+ // register, instruction output, or memory address that
+ // is 32 bits. Otherwise we'll just load the value
+ // directly since there's no need to sign extend.
+ Opnd::Reg(Reg { num_bits: 32, .. }) |
+ Opnd::InsnOut { num_bits: 32, .. } |
+ Opnd::Mem(Mem { num_bits: 32, .. }) => {
+ asm.load_sext(*opnd);
+ },
+ _ => {
+ asm.load(*opnd);
+ }
+ };
+ },
+ Insn::Mov { dest, src } => {
+ match (&dest, &src) {
+ // If we're attempting to load into a memory operand, then
+ // we'll switch over to the store instruction.
+ (Opnd::Mem(_), _) => {
+ let opnd0 = split_memory_address(asm, *dest);
+ let value = match *src {
+ // If the first operand is zero, then we can just use
+ // the zero register.
+ Opnd::UImm(0) | Opnd::Imm(0) => Opnd::Reg(XZR_REG),
+ // If the first operand is a memory operand, we're going
+ // to transform this into a store instruction, so we'll
+ // need to load this anyway.
+ Opnd::UImm(_) => asm.load(*src),
+ // The value that is being moved must be either a
+ // register or an immediate that can be encoded as a
+ // bitmask immediate. Otherwise, we'll need to split the
+ // move into multiple instructions.
+ _ => split_bitmask_immediate(asm, *src, dest.rm_num_bits())
+ };
+
+ asm.store(opnd0, value);
+ },
+ // If we're loading a memory operand into a register, then
+ // we'll switch over to the load instruction.
+ (Opnd::Reg(_), Opnd::Mem(_)) => {
+ let value = split_memory_address(asm, *src);
+ asm.load_into(*dest, value);
+ },
+ // Otherwise we'll use the normal mov instruction.
+ (Opnd::Reg(_), _) => {
+ let value = match *src {
+ // Unlike other instructions, we can avoid splitting this case, using movz.
+ Opnd::UImm(uimm) if uimm <= 0xffff => *src,
+ _ => split_bitmask_immediate(asm, *src, dest.rm_num_bits()),
+ };
+ asm.mov(*dest, value);
+ },
+ _ => unreachable!()
+ };
+ },
+ Insn::Not { opnd, .. } => {
+ // The value that is being negated must be in a register, so
+ // if we get anything else we need to load it first.
+ let opnd0 = match opnd {
+ Opnd::Mem(_) => split_load_operand(asm, *opnd),
+ _ => *opnd
+ };
+
+ asm.not(opnd0);
+ },
+ Insn::LShift { opnd, .. } |
+ Insn::RShift { opnd, .. } |
+ Insn::URShift { opnd, .. } => {
+ // The operand must be in a register, so
+ // if we get anything else we need to load it first.
+ let opnd0 = match opnd {
+ Opnd::Mem(_) => split_load_operand(asm, *opnd),
+ _ => *opnd
+ };
+
+ *opnd = opnd0;
+ asm.push_insn(insn);
+ },
+ Insn::Store { dest, src } => {
+ // The value being stored must be in a register, so if it's
+ // not already one we'll load it first.
+ let opnd1 = match src {
+ // If the first operand is zero, then we can just use
+ // the zero register.
+ Opnd::UImm(0) | Opnd::Imm(0) => Opnd::Reg(XZR_REG),
+ // Otherwise we'll check if we need to load it first.
+ _ => split_load_operand(asm, *src)
+ };
+
+ match dest {
+ Opnd::Reg(_) => {
+ // Store does not support a register as a dest operand.
+ asm.mov(*dest, opnd1);
+ }
+ _ => {
+ // The displacement for the STUR instruction can't be more
+ // than 9 bits long. If it's longer, we need to load the
+ // memory address into a register first.
+ let opnd0 = split_memory_address(asm, *dest);
+ asm.store(opnd0, opnd1);
+ }
+ }
+ },
+ Insn::Sub { left, right, .. } => {
+ let opnd0 = split_load_operand(asm, *left);
+ let opnd1 = split_shifted_immediate(asm, *right);
+ asm.sub(opnd0, opnd1);
+ },
+ Insn::Mul { left, right, .. } => {
+ let opnd0 = split_load_operand(asm, *left);
+ let opnd1 = split_load_operand(asm, *right);
+ asm.mul(opnd0, opnd1);
+ },
+ Insn::Test { left, right } => {
+ // The value being tested must be in a register, so if it's
+ // not already one we'll load it first.
+ let opnd0 = split_load_operand(asm, *left);
+
+ // The second value must be either a register or an
+ // unsigned immediate that can be encoded as a bitmask
+ // immediate. If it's not one of those, we'll need to load
+ // it first.
+ let opnd1 = split_bitmask_immediate(asm, *right, opnd0.rm_num_bits());
+ asm.test(opnd0, opnd1);
+ },
+ _ => {
+ // If we have an output operand, then we need to replace it
+ // with a new output operand from the new assembler.
+ if insn.out_opnd().is_some() {
+ let out_num_bits = Opnd::match_num_bits_iter(insn.opnd_iter());
+ let out = insn.out_opnd_mut().unwrap();
+ *out = asm.next_opnd_out(out_num_bits);
+ }
+
+ asm.push_insn(insn);
+ }
+ };
+
+ iterator.map_insn_index(asm);
+ }
+
+ asm_local
+ }
+
+ /// Emit platform-specific machine code
+ /// Returns a list of GC offsets. Can return failure to signal caller to retry.
+ fn arm64_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Result<Vec<u32>, EmitError> {
+ /// Determine how many instructions it will take to represent moving
+ /// this value into a register. Note that the return value of this
+ /// function must correspond to how many instructions are used to
+ /// represent this load in the emit_load_value function.
+ fn emit_load_size(value: u64) -> u8 {
+ if BitmaskImmediate::try_from(value).is_ok() {
+ return 1;
+ }
+
+ if value < (1 << 16) {
+ 1
+ } else if value < (1 << 32) {
+ 2
+ } else if value < (1 << 48) {
+ 3
+ } else {
+ 4
+ }
+ }
+
+ /// Emit a conditional jump instruction to a specific target. This is
+ /// called when lowering any of the conditional jump instructions.
+ fn emit_conditional_jump<const CONDITION: u8>(cb: &mut CodeBlock, target: Target) {
+ match target {
+ Target::CodePtr(dst_ptr) | Target::SideExitPtr(dst_ptr) => {
+ let dst_addr = dst_ptr.as_offset();
+ let src_addr = cb.get_write_ptr().as_offset();
+
+ let num_insns = if bcond_offset_fits_bits((dst_addr - src_addr) / 4) {
+ // If the jump offset fits into the conditional jump as
+ // an immediate value and it's properly aligned, then we
+ // can use the b.cond instruction directly. We're safe
+ // to use as i32 here since we already checked that it
+ // fits.
+ let bytes = (dst_addr - src_addr) as i32;
+ bcond(cb, CONDITION, InstructionOffset::from_bytes(bytes));
+
+ // Here we're going to return 1 because we've only
+ // written out 1 instruction.
+ 1
+ } else if b_offset_fits_bits((dst_addr - (src_addr + 4)) / 4) { // + 4 for bcond
+ // If the jump offset fits into the unconditional jump as
+ // an immediate value, we can use inverse b.cond + b.
+ //
+ // We're going to write out the inverse condition so
+ // that if it doesn't match it will skip over the
+ // instruction used for branching.
+ bcond(cb, Condition::inverse(CONDITION), 2.into());
+ b(cb, InstructionOffset::from_bytes((dst_addr - (src_addr + 4)) as i32)); // + 4 for bcond
+
+ // We've only written out 2 instructions.
+ 2
+ } else {
+ // Otherwise, we need to load the address into a
+ // register and use the branch register instruction.
+ let dst_addr = (dst_ptr.raw_ptr(cb) as usize).as_u64();
+ let load_insns: i32 = emit_load_size(dst_addr).into();
+
+ // We're going to write out the inverse condition so
+ // that if it doesn't match it will skip over the
+ // instructions used for branching.
+ bcond(cb, Condition::inverse(CONDITION), (load_insns + 2).into());
+ emit_load_value(cb, Assembler::SCRATCH0, dst_addr);
+ br(cb, Assembler::SCRATCH0);
+
+ // Here we'll return the number of instructions that it
+ // took to write out the destination address + 1 for the
+ // b.cond and 1 for the br.
+ load_insns + 2
+ };
+
+ if let Target::CodePtr(_) = target {
+ // We need to make sure we have at least 6 instructions for
+ // every kind of jump for invalidation purposes, so we're
+ // going to write out padding nop instructions here.
+ assert!(num_insns <= cb.conditional_jump_insns());
+ for _ in num_insns..cb.conditional_jump_insns() { nop(cb); }
+ }
+ },
+ Target::Label(label_idx) => {
+ // Here we're going to save enough space for ourselves and
+ // then come back and write the instruction once we know the
+ // offset. We're going to assume we can fit into a single
+ // b.cond instruction. It will panic otherwise.
+ cb.label_ref(label_idx, 4, |cb, src_addr, dst_addr| {
+ let bytes: i32 = (dst_addr - (src_addr - 4)).try_into().unwrap();
+ bcond(cb, CONDITION, InstructionOffset::from_bytes(bytes));
+ });
+ },
+ Target::SideExit { .. } => {
+ unreachable!("Target::SideExit should have been compiled by compile_side_exit")
+ },
+ };
+ }
+
+ /// Emit a CBZ or CBNZ which branches when a register is zero or non-zero
+ fn emit_cmp_zero_jump(cb: &mut CodeBlock, reg: A64Opnd, branch_if_zero: bool, target: Target) {
+ if let Target::SideExitPtr(dst_ptr) = target {
+ let dst_addr = dst_ptr.as_offset();
+ let src_addr = cb.get_write_ptr().as_offset();
+
+ if cmp_branch_offset_fits_bits((dst_addr - src_addr) / 4) {
+ // If the offset fits in one instruction, generate cbz or cbnz
+ let bytes = (dst_addr - src_addr) as i32;
+ if branch_if_zero {
+ cbz(cb, reg, InstructionOffset::from_bytes(bytes));
+ } else {
+ cbnz(cb, reg, InstructionOffset::from_bytes(bytes));
+ }
+ } else {
+ // Otherwise, we load the address into a register and
+ // use the branch register instruction. Note that because
+ // side exits should always be close, this form should be
+ // rare or impossible to see.
+ let dst_addr = dst_ptr.raw_addr(cb) as u64;
+ let load_insns: i32 = emit_load_size(dst_addr).into();
+
+ // Write out the inverse condition so that if
+ // it doesn't match it will skip over the
+ // instructions used for branching.
+ if branch_if_zero {
+ cbnz(cb, reg, InstructionOffset::from_insns(load_insns + 2));
+ } else {
+ cbz(cb, reg, InstructionOffset::from_insns(load_insns + 2));
+ }
+ emit_load_value(cb, Assembler::SCRATCH0, dst_addr);
+ br(cb, Assembler::SCRATCH0);
+
+ }
+ } else {
+ unreachable!("We should only generate Joz/Jonz with side-exit targets");
+ }
+ }
+
+ /// Push a value to the stack by subtracting from the stack pointer then storing,
+ /// leaving an 8-byte gap for alignment.
+ fn emit_push(cb: &mut CodeBlock, opnd: A64Opnd) {
+ str_pre(cb, opnd, A64Opnd::new_mem(64, C_SP_REG, -C_SP_STEP));
+ }
+
+ /// Pop a value from the stack by loading `[sp]` then adding to the stack pointer.
+ fn emit_pop(cb: &mut CodeBlock, opnd: A64Opnd) {
+ ldr_post(cb, opnd, A64Opnd::new_mem(64, C_SP_REG, C_SP_STEP));
+ }
+
+ /// Compile a side exit if Target::SideExit is given.
+ fn compile_side_exit(
+ target: Target,
+ asm: &mut Assembler,
+ ocb: &mut Option<&mut OutlinedCb>,
+ ) -> Result<Target, EmitError> {
+ if let Target::SideExit { counter, context } = target {
+ let side_exit = asm.get_side_exit(&context.unwrap(), Some(counter), ocb.as_mut().unwrap())
+ .ok_or(EmitError::OutOfMemory)?;
+ Ok(Target::SideExitPtr(side_exit))
+ } else {
+ Ok(target)
+ }
+ }
+
+ // dbg!(&self.insns);
+
+ // List of GC offsets
+ let mut gc_offsets: Vec<u32> = Vec::new();
+
+ // Buffered list of PosMarker callbacks to fire if codegen is successful
+ let mut pos_markers: Vec<(usize, CodePtr)> = vec![];
+
+ // For each instruction
+ let start_write_pos = cb.get_write_pos();
+ let mut insn_idx: usize = 0;
+ while let Some(insn) = self.insns.get(insn_idx) {
+ let src_ptr = cb.get_write_ptr();
+ let had_dropped_bytes = cb.has_dropped_bytes();
+ let old_label_state = cb.get_label_state();
+ let mut insn_gc_offsets: Vec<u32> = Vec::new();
+
+ match insn {
+ Insn::Comment(text) => {
+ cb.add_comment(text);
+ },
+ Insn::Label(target) => {
+ cb.write_label(target.unwrap_label_idx());
+ },
+ // Report back the current position in the generated code
+ Insn::PosMarker(..) => {
+ pos_markers.push((insn_idx, cb.get_write_ptr()))
+ }
+ Insn::BakeString(text) => {
+ for byte in text.as_bytes() {
+ cb.write_byte(*byte);
+ }
+
+ // Add a null-terminator byte for safety (in case we pass
+ // this to C code)
+ cb.write_byte(0);
+
+ // Pad out the string to the next 4-byte boundary so that
+ // it's easy to jump past.
+ for _ in 0..(4 - ((text.len() + 1) % 4)) {
+ cb.write_byte(0);
+ }
+ },
+ Insn::FrameSetup => {
+ stp_pre(cb, X29, X30, A64Opnd::new_mem(128, C_SP_REG, -16));
+
+ // X29 (frame_pointer) = SP
+ mov(cb, X29, C_SP_REG);
+ },
+ Insn::FrameTeardown => {
+ // SP = X29 (frame pointer)
+ mov(cb, C_SP_REG, X29);
+
+ ldp_post(cb, X29, X30, A64Opnd::new_mem(128, C_SP_REG, 16));
+ },
+ Insn::Add { left, right, out } => {
+ adds(cb, out.into(), left.into(), right.into());
+ },
+ Insn::Sub { left, right, out } => {
+ subs(cb, out.into(), left.into(), right.into());
+ },
+ Insn::Mul { left, right, out } => {
+ // If the next instruction is jo (jump on overflow)
+ match (self.insns.get(insn_idx + 1), self.insns.get(insn_idx + 2)) {
+ (Some(Insn::JoMul(_)), _) |
+ (Some(Insn::PosMarker(_)), Some(Insn::JoMul(_))) => {
+ // Compute the high 64 bits
+ smulh(cb, Self::SCRATCH0, left.into(), right.into());
+
+ // Compute the low 64 bits
+ // This may clobber one of the input registers,
+ // so we do it after smulh
+ mul(cb, out.into(), left.into(), right.into());
+
+ // Produce a register that is all zeros or all ones
+ // Based on the sign bit of the 64-bit mul result
+ asr(cb, Self::SCRATCH1, out.into(), A64Opnd::UImm(63));
+
+ // If the high 64-bits are not all zeros or all ones,
+ // matching the sign bit, then we have an overflow
+ cmp(cb, Self::SCRATCH0, Self::SCRATCH1);
+ // Insn::JoMul will emit_conditional_jump::<{Condition::NE}>
+ }
+ _ => {
+ mul(cb, out.into(), left.into(), right.into());
+ }
+ }
+ },
+ Insn::And { left, right, out } => {
+ and(cb, out.into(), left.into(), right.into());
+ },
+ Insn::Or { left, right, out } => {
+ orr(cb, out.into(), left.into(), right.into());
+ },
+ Insn::Xor { left, right, out } => {
+ eor(cb, out.into(), left.into(), right.into());
+ },
+ Insn::Not { opnd, out } => {
+ mvn(cb, out.into(), opnd.into());
+ },
+ Insn::RShift { opnd, shift, out } => {
+ asr(cb, out.into(), opnd.into(), shift.into());
+ },
+ Insn::URShift { opnd, shift, out } => {
+ lsr(cb, out.into(), opnd.into(), shift.into());
+ },
+ Insn::LShift { opnd, shift, out } => {
+ lsl(cb, out.into(), opnd.into(), shift.into());
+ },
+ Insn::Store { dest, src } => {
+ // This order may be surprising but it is correct. The way
+ // the Arm64 assembler works, the register that is going to
+ // be stored is first and the address is second. However in
+ // our IR we have the address first and the register second.
+ match dest.rm_num_bits() {
+ 64 | 32 => stur(cb, src.into(), dest.into()),
+ 16 => sturh(cb, src.into(), dest.into()),
+ num_bits => panic!("unexpected dest num_bits: {} (src: {:#?}, dest: {:#?})", num_bits, src, dest),
+ }
+ },
+ Insn::Load { opnd, out } |
+ Insn::LoadInto { opnd, dest: out } => {
+ match *opnd {
+ Opnd::Reg(_) | Opnd::InsnOut { .. } => {
+ mov(cb, out.into(), opnd.into());
+ },
+ Opnd::UImm(uimm) => {
+ emit_load_value(cb, out.into(), uimm);
+ },
+ Opnd::Imm(imm) => {
+ emit_load_value(cb, out.into(), imm as u64);
+ },
+ Opnd::Mem(_) => {
+ match opnd.rm_num_bits() {
+ 64 | 32 => ldur(cb, out.into(), opnd.into()),
+ 16 => ldurh(cb, out.into(), opnd.into()),
+ 8 => ldurb(cb, out.into(), opnd.into()),
+ num_bits => panic!("unexpected num_bits: {}", num_bits)
+ };
+ },
+ Opnd::Value(value) => {
+ // We dont need to check if it's a special const
+ // here because we only allow these operands to hit
+ // this point if they're not a special const.
+ assert!(!value.special_const_p());
+
+ // This assumes only load instructions can contain
+ // references to GC'd Value operands. If the value
+ // being loaded is a heap object, we'll report that
+ // back out to the gc_offsets list.
+ ldr_literal(cb, out.into(), 2.into());
+ b(cb, InstructionOffset::from_bytes(4 + (SIZEOF_VALUE as i32)));
+ cb.write_bytes(&value.as_u64().to_le_bytes());
+
+ let ptr_offset: u32 = (cb.get_write_pos() as u32) - (SIZEOF_VALUE as u32);
+ insn_gc_offsets.push(ptr_offset);
+ },
+ Opnd::CArg { .. } => {
+ unreachable!("C argument operand was not lowered before arm64_emit");
+ }
+ Opnd::Stack { .. } => {
+ unreachable!("Stack operand was not lowered before arm64_emit");
+ }
+ Opnd::None => {
+ unreachable!("Attempted to load from None operand");
+ }
+ };
+ },
+ Insn::LoadSExt { opnd, out } => {
+ match *opnd {
+ Opnd::Reg(Reg { num_bits: 32, .. }) |
+ Opnd::InsnOut { num_bits: 32, .. } => {
+ sxtw(cb, out.into(), opnd.into());
+ },
+ Opnd::Mem(Mem { num_bits: 32, .. }) => {
+ ldursw(cb, out.into(), opnd.into());
+ },
+ _ => unreachable!()
+ };
+ },
+ Insn::Mov { dest, src } => {
+ // This supports the following two kinds of immediates:
+ // * The value fits into a single movz instruction
+ // * It can be encoded with the special bitmask immediate encoding
+ // arm64_split() should have split other immediates that require multiple instructions.
+ match src {
+ Opnd::UImm(uimm) if *uimm <= 0xffff => {
+ movz(cb, dest.into(), A64Opnd::new_uimm(*uimm), 0);
+ },
+ _ => {
+ mov(cb, dest.into(), src.into());
+ }
+ }
+ },
+ Insn::Lea { opnd, out } => {
+ let opnd: A64Opnd = opnd.into();
+
+ match opnd {
+ A64Opnd::Mem(mem) => {
+ add(
+ cb,
+ out.into(),
+ A64Opnd::Reg(A64Reg { reg_no: mem.base_reg_no, num_bits: 64 }),
+ A64Opnd::new_imm(mem.disp.into())
+ );
+ },
+ _ => {
+ panic!("Op::Lea only accepts Opnd::Mem operands.");
+ }
+ };
+ },
+ Insn::LeaJumpTarget { out, target, .. } => {
+ if let Target::Label(label_idx) = target {
+ // Set output to the raw address of the label
+ cb.label_ref(*label_idx, 4, |cb, end_addr, dst_addr| {
+ adr(cb, Self::SCRATCH0, A64Opnd::new_imm(dst_addr - (end_addr - 4)));
+ });
+
+ mov(cb, out.into(), Self::SCRATCH0);
+ } else {
+ // Set output to the jump target's raw address
+ let target_code = target.unwrap_code_ptr();
+ let target_addr = target_code.raw_addr(cb).as_u64();
+ emit_load_value(cb, out.into(), target_addr);
+ }
+ },
+ Insn::CPush(opnd) => {
+ emit_push(cb, opnd.into());
+ },
+ Insn::CPop { out } => {
+ emit_pop(cb, out.into());
+ },
+ Insn::CPopInto(opnd) => {
+ emit_pop(cb, opnd.into());
+ },
+ Insn::CPushAll => {
+ let regs = Assembler::get_caller_save_regs();
+
+ for reg in regs {
+ emit_push(cb, A64Opnd::Reg(reg));
+ }
+
+ // Push the flags/state register
+ mrs(cb, Self::SCRATCH0, SystemRegister::NZCV);
+ emit_push(cb, Self::SCRATCH0);
+ },
+ Insn::CPopAll => {
+ let regs = Assembler::get_caller_save_regs();
+
+ // Pop the state/flags register
+ emit_pop(cb, Self::SCRATCH0);
+ msr(cb, SystemRegister::NZCV, Self::SCRATCH0);
+
+ for reg in regs.into_iter().rev() {
+ emit_pop(cb, A64Opnd::Reg(reg));
+ }
+ },
+ Insn::CCall { fptr, .. } => {
+ // The offset to the call target in bytes
+ let src_addr = cb.get_write_ptr().raw_ptr(cb) as i64;
+ let dst_addr = *fptr as i64;
+
+ // Use BL if the offset is short enough to encode as an immediate.
+ // Otherwise, use BLR with a register.
+ if b_offset_fits_bits((dst_addr - src_addr) / 4) {
+ bl(cb, InstructionOffset::from_bytes((dst_addr - src_addr) as i32));
+ } else {
+ emit_load_value(cb, Self::SCRATCH0, dst_addr as u64);
+ blr(cb, Self::SCRATCH0);
+ }
+ },
+ Insn::CRet { .. } => {
+ ret(cb, A64Opnd::None);
+ },
+ Insn::Cmp { left, right } => {
+ cmp(cb, left.into(), right.into());
+ },
+ Insn::Test { left, right } => {
+ tst(cb, left.into(), right.into());
+ },
+ Insn::JmpOpnd(opnd) => {
+ br(cb, opnd.into());
+ },
+ Insn::Jmp(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(dst_ptr) => {
+ emit_jmp_ptr(cb, dst_ptr, true);
+ },
+ Target::SideExitPtr(dst_ptr) => {
+ emit_jmp_ptr(cb, dst_ptr, false);
+ },
+ Target::Label(label_idx) => {
+ // Here we're going to save enough space for
+ // ourselves and then come back and write the
+ // instruction once we know the offset. We're going
+ // to assume we can fit into a single b instruction.
+ // It will panic otherwise.
+ cb.label_ref(label_idx, 4, |cb, src_addr, dst_addr| {
+ let bytes: i32 = (dst_addr - (src_addr - 4)).try_into().unwrap();
+ b(cb, InstructionOffset::from_bytes(bytes));
+ });
+ },
+ Target::SideExit { .. } => {
+ unreachable!("Target::SideExit should have been compiled by compile_side_exit")
+ },
+ };
+ },
+ Insn::Je(target) | Insn::Jz(target) => {
+ emit_conditional_jump::<{Condition::EQ}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jne(target) | Insn::Jnz(target) | Insn::JoMul(target) => {
+ emit_conditional_jump::<{Condition::NE}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jl(target) => {
+ emit_conditional_jump::<{Condition::LT}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jg(target) => {
+ emit_conditional_jump::<{Condition::GT}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jge(target) => {
+ emit_conditional_jump::<{Condition::GE}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jbe(target) => {
+ emit_conditional_jump::<{Condition::LS}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jb(target) => {
+ emit_conditional_jump::<{Condition::CC}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jo(target) => {
+ emit_conditional_jump::<{Condition::VS}>(cb, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Joz(opnd, target) => {
+ emit_cmp_zero_jump(cb, opnd.into(), true, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::Jonz(opnd, target) => {
+ emit_cmp_zero_jump(cb, opnd.into(), false, compile_side_exit(*target, self, ocb)?);
+ },
+ Insn::IncrCounter { mem, value } => {
+ let label = cb.new_label("incr_counter_loop".to_string());
+ cb.write_label(label);
+
+ ldaxr(cb, Self::SCRATCH0, mem.into());
+ add(cb, Self::SCRATCH0, Self::SCRATCH0, value.into());
+
+ // The status register that gets used to track whether or
+ // not the store was successful must be 32 bytes. Since we
+ // store the SCRATCH registers as their 64-bit versions, we
+ // need to rewrap it here.
+ let status = A64Opnd::Reg(Self::SCRATCH1.unwrap_reg().with_num_bits(32));
+ stlxr(cb, status, Self::SCRATCH0, mem.into());
+
+ cmp(cb, Self::SCRATCH1, A64Opnd::new_uimm(0));
+ emit_conditional_jump::<{Condition::NE}>(cb, Target::Label(label));
+ },
+ Insn::Breakpoint => {
+ brk(cb, A64Opnd::None);
+ },
+ Insn::CSelZ { truthy, falsy, out } |
+ Insn::CSelE { truthy, falsy, out } => {
+ csel(cb, out.into(), truthy.into(), falsy.into(), Condition::EQ);
+ },
+ Insn::CSelNZ { truthy, falsy, out } |
+ Insn::CSelNE { truthy, falsy, out } => {
+ csel(cb, out.into(), truthy.into(), falsy.into(), Condition::NE);
+ },
+ Insn::CSelL { truthy, falsy, out } => {
+ csel(cb, out.into(), truthy.into(), falsy.into(), Condition::LT);
+ },
+ Insn::CSelLE { truthy, falsy, out } => {
+ csel(cb, out.into(), truthy.into(), falsy.into(), Condition::LE);
+ },
+ Insn::CSelG { truthy, falsy, out } => {
+ csel(cb, out.into(), truthy.into(), falsy.into(), Condition::GT);
+ },
+ Insn::CSelGE { truthy, falsy, out } => {
+ csel(cb, out.into(), truthy.into(), falsy.into(), Condition::GE);
+ }
+ Insn::LiveReg { .. } => (), // just a reg alloc signal, no code
+ Insn::PadInvalPatch => {
+ while (cb.get_write_pos().saturating_sub(std::cmp::max(start_write_pos, cb.page_start_pos()))) < cb.jmp_ptr_bytes() && !cb.has_dropped_bytes() {
+ nop(cb);
+ }
+ }
+ };
+
+ // On failure, jump to the next page and retry the current insn
+ if !had_dropped_bytes && cb.has_dropped_bytes() && cb.next_page(src_ptr, emit_jmp_ptr_with_invalidation) {
+ // Reset cb states before retrying the current Insn
+ cb.set_label_state(old_label_state);
+
+ // We don't want label references to cross page boundaries. Signal caller for
+ // retry.
+ if !self.label_names.is_empty() {
+ return Err(EmitError::RetryOnNextPage);
+ }
+ } else {
+ insn_idx += 1;
+ gc_offsets.append(&mut insn_gc_offsets);
+ }
+ }
+
+ // Error if we couldn't write out everything
+ if cb.has_dropped_bytes() {
+ return Err(EmitError::OutOfMemory)
+ } else {
+ // No bytes dropped, so the pos markers point to valid code
+ for (insn_idx, pos) in pos_markers {
+ if let Insn::PosMarker(callback) = self.insns.get(insn_idx).unwrap() {
+ callback(pos, &cb);
+ } else {
+ panic!("non-PosMarker in pos_markers insn_idx={insn_idx} {self:?}");
+ }
+ }
+
+ return Ok(gc_offsets)
+ }
+ }
+
+ /// Optimize and compile the stored instructions
+ pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
+ let asm = self.arm64_split();
+ let mut asm = asm.alloc_regs(regs);
+
+ // Create label instances in the code block
+ for (idx, name) in asm.label_names.iter().enumerate() {
+ let label_idx = cb.new_label(name.to_string());
+ assert!(label_idx == idx);
+ }
+
+ let start_ptr = cb.get_write_ptr();
+ let starting_label_state = cb.get_label_state();
+ let mut ocb = ocb; // for &mut
+ let emit_result = match asm.arm64_emit(cb, &mut ocb) {
+ Err(EmitError::RetryOnNextPage) => {
+ // we want to lower jumps to labels to b.cond instructions, which have a 1 MiB
+ // range limit. We can easily exceed the limit in case the jump straddles two pages.
+ // In this case, we retry with a fresh page once.
+ cb.set_label_state(starting_label_state);
+ if cb.next_page(start_ptr, emit_jmp_ptr_with_invalidation) {
+ asm.arm64_emit(cb, &mut ocb)
+ } else {
+ Err(EmitError::OutOfMemory)
+ }
+ }
+ result => result
+ };
+
+ if let (Ok(gc_offsets), false) = (emit_result, cb.has_dropped_bytes()) {
+ cb.link_labels();
+
+ // Invalidate icache for newly written out region so we don't run stale code.
+ // It should invalidate only the code ranges of the current cb because the code
+ // ranges of the other cb might have a memory region that is still PROT_NONE.
+ #[cfg(not(test))]
+ cb.without_page_end_reserve(|cb| {
+ for (start, end) in cb.writable_addrs(start_ptr, cb.get_write_ptr()) {
+ unsafe { rb_jit_icache_invalidate(start as _, end as _) };
+ }
+ });
+
+ Some((start_ptr, gc_offsets))
+ } else {
+ cb.clear_labels();
+
+ None
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::disasm::*;
+
+ fn setup_asm() -> (Assembler, CodeBlock) {
+ (Assembler::new(0), CodeBlock::new_dummy(1024))
+ }
+
+ #[test]
+ fn test_emit_add() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.add(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_regs(&mut cb, None, vec![X3_REG]);
+
+ // Assert that only 2 instructions were written.
+ assert_eq!(8, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_bake_string() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.bake_string("Hello, world!");
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ // Testing that we pad the string to the nearest 4-byte boundary to make
+ // it easier to jump over.
+ assert_eq!(16, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_cpush_all() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.cpush_all();
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_cpop_all() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.cpop_all(crate::core::RegMapping::default());
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_frame() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.frame_setup();
+ asm.frame_teardown();
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_je_fits_into_bcond() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let target: CodePtr = cb.get_write_ptr().add_bytes(80);
+
+ asm.je(Target::CodePtr(target));
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_je_does_not_fit_into_bcond() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let offset = 1 << 21;
+ let target: CodePtr = cb.get_write_ptr().add_bytes(offset);
+
+ asm.je(Target::CodePtr(target));
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_lea_label() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let label = asm.new_label("label");
+ let opnd = asm.lea_jump_target(label);
+
+ asm.write_label(label);
+ asm.bake_string("Hello, world!");
+ asm.store(Opnd::mem(64, SP, 0), opnd);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_load_mem_disp_fits_into_load() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.load(Opnd::mem(64, SP, 0));
+ asm.store(Opnd::mem(64, SP, 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that two instructions were written: LDUR and STUR.
+ assert_eq!(8, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_load_mem_disp_fits_into_add() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.load(Opnd::mem(64, SP, 1 << 10));
+ asm.store(Opnd::mem(64, SP, 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that three instructions were written: ADD, LDUR, and STUR.
+ assert_eq!(12, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_load_mem_disp_does_not_fit_into_add() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.load(Opnd::mem(64, SP, 1 << 12 | 1));
+ asm.store(Opnd::mem(64, SP, 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that three instructions were written: MOVZ, ADD, LDUR, and STUR.
+ assert_eq!(16, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_load_value_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.load(Opnd::Value(Qnil));
+ asm.store(Opnd::mem(64, SP, 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that only two instructions were written since the value is an
+ // immediate.
+ assert_eq!(8, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_load_value_non_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.load(Opnd::Value(VALUE(0xCAFECAFECAFE0000)));
+ asm.store(Opnd::mem(64, SP, 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that five instructions were written since the value is not an
+ // immediate and needs to be loaded into a register.
+ assert_eq!(20, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_test_32b_reg_not_bitmask_imm() {
+ let (mut asm, mut cb) = setup_asm();
+ let w0 = Opnd::Reg(X0_REG).with_num_bits(32).unwrap();
+ asm.test(w0, Opnd::UImm(u32::MAX.into()));
+ // All ones is not encodable with a bitmask immediate,
+ // so this needs one register
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_test_32b_reg_bitmask_imm() {
+ let (mut asm, mut cb) = setup_asm();
+ let w0 = Opnd::Reg(X0_REG).with_num_bits(32).unwrap();
+ asm.test(w0, Opnd::UImm(0x80000001));
+ asm.compile_with_num_regs(&mut cb, 0);
+ }
+
+ #[test]
+ fn test_emit_or() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.or(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_lshift() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.lshift(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_rshift() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.rshift(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_urshift() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.urshift(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+ asm.compile_with_num_regs(&mut cb, 1);
+ }
+
+ #[test]
+ fn test_emit_test() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ // Assert that only one instruction was written.
+ assert_eq!(4, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_test_with_encodable_unsigned_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(X0_REG), Opnd::UImm(7));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ // Assert that only one instruction was written.
+ assert_eq!(4, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_test_with_unencodable_unsigned_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(X0_REG), Opnd::UImm(5));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that a load and a test instruction were written.
+ assert_eq!(8, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_test_with_encodable_signed_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(X0_REG), Opnd::Imm(7));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ // Assert that only one instruction was written.
+ assert_eq!(4, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_test_with_unencodable_signed_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(X0_REG), Opnd::Imm(5));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that a load and a test instruction were written.
+ assert_eq!(8, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_emit_test_with_negative_signed_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(X0_REG), Opnd::Imm(-7));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ // Assert that a test instruction is written.
+ assert_eq!(4, cb.get_write_pos());
+ }
+
+ #[test]
+ fn test_32_bit_register_with_some_number() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(32, Opnd::Reg(X0_REG), 6);
+ asm.cmp(shape_opnd, Opnd::UImm(4097));
+ asm.compile_with_num_regs(&mut cb, 2);
+ }
+
+ #[test]
+ fn test_16_bit_register_store_some_number() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(16, Opnd::Reg(X0_REG), 0);
+ asm.store(shape_opnd, Opnd::UImm(4097));
+ asm.compile_with_num_regs(&mut cb, 2);
+ }
+
+ #[test]
+ fn test_32_bit_register_store_some_number() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(32, Opnd::Reg(X0_REG), 6);
+ asm.store(shape_opnd, Opnd::UImm(4097));
+ asm.compile_with_num_regs(&mut cb, 2);
+ }
+
+ #[test]
+ fn test_bcond_straddling_code_pages() {
+ const LANDING_PAGE: usize = 65;
+ let mut asm = Assembler::new(0);
+ let mut cb = CodeBlock::new_dummy_with_freed_pages(vec![0, LANDING_PAGE]);
+
+ // Skip to near the end of the page. Room for two instructions.
+ cb.set_pos(cb.page_start_pos() + cb.page_end() - 8);
+
+ let end = asm.new_label("end");
+ // Start with a conditional jump...
+ asm.jz(end);
+
+ // A few instructions, enough to cause a page switch.
+ let sum = asm.add(399.into(), 111.into());
+ let xorred = asm.xor(sum, 859.into());
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), xorred);
+ asm.store(Opnd::mem(64, Opnd::Reg(X0_REG), 0), xorred);
+
+ // The branch target. It should be in the landing page.
+ asm.write_label(end);
+ asm.cret(xorred);
+
+ // [Bug #19385]
+ // This used to panic with "The offset must be 19 bits or less."
+ // due to attempting to lower the `asm.jz` above to a `b.e` with an offset that's > 1 MiB.
+ let starting_pos = cb.get_write_pos();
+ asm.compile_with_num_regs(&mut cb, 2);
+ let gap = cb.get_write_pos() - starting_pos;
+ assert!(gap > 0b1111111111111111111);
+
+ let instruction_at_starting_pos: [u8; 4] = unsafe {
+ std::slice::from_raw_parts(cb.get_ptr(starting_pos).raw_ptr(&cb), 4)
+ }.try_into().unwrap();
+ assert_eq!(
+ 0b000101 << 26_u32,
+ u32::from_le_bytes(instruction_at_starting_pos) & (0b111111 << 26_u32),
+ "starting instruction should be an unconditional branch to the new page (B)"
+ );
+ }
+
+ #[test]
+ fn test_emit_xor() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let opnd = asm.xor(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG));
+ asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_disasm!(cb, "0b0001ca4b0000f8", "
+ 0x0: eor x11, x0, x1
+ 0x4: stur x11, [x2]
+ ");
+ }
+
+ #[test]
+ #[cfg(feature = "disasm")]
+ fn test_simple_disasm() -> std::result::Result<(), capstone::Error> {
+ // Test drive Capstone with simple input
+ use capstone::prelude::*;
+
+ let cs = Capstone::new()
+ .arm64()
+ .mode(arch::arm64::ArchMode::Arm)
+ .build()?;
+
+ let insns = cs.disasm_all(&[0x60, 0x0f, 0x80, 0xF2], 0x1000)?;
+
+ match insns.as_ref() {
+ [insn] => {
+ assert_eq!(Some("movk"), insn.mnemonic());
+ Ok(())
+ }
+ _ => Err(capstone::Error::CustomError(
+ "expected to disassemble to movk",
+ )),
+ }
+ }
+
+ #[test]
+ fn test_replace_mov_with_ldur() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), Opnd::mem(64, CFP, 8));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_disasm!(cb, "618240f8", {"
+ 0x0: ldur x1, [x19, #8]
+ "});
+ }
+
+ #[test]
+ fn test_not_split_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), Opnd::UImm(0xffff));
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), Opnd::UImm(0x10000));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_disasm!(cb, "e1ff9fd2e10370b2", {"
+ 0x0: mov x1, #0xffff
+ 0x4: orr x1, xzr, #0x10000
+ "});
+ }
+
+ #[test]
+ fn test_merge_csel_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let out = asm.csel_l(Qtrue.into(), Qfalse.into());
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
+ asm.compile_with_num_regs(&mut cb, 2);
+
+ assert_disasm!(cb, "8b0280d20c0080d261b18c9a", {"
+ 0x0: mov x11, #0x14
+ 0x4: mov x12, #0
+ 0x8: csel x1, x11, x12, lt
+ "});
+ }
+
+ #[test]
+ fn test_add_with_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let out = asm.add(Opnd::Reg(TEMP_REGS[1]), 1.into());
+ let out = asm.add(out, 1_usize.into());
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
+ asm.compile_with_num_regs(&mut cb, 2);
+
+ assert_disasm!(cb, "2b0500b16b0500b1e1030baa", {"
+ 0x0: adds x11, x9, #1
+ 0x4: adds x11, x11, #1
+ 0x8: mov x1, x11
+ "});
+ }
+
+ #[test]
+ fn test_mul_with_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let out = asm.mul(Opnd::Reg(TEMP_REGS[1]), 3.into());
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
+ asm.compile_with_num_regs(&mut cb, 2);
+
+ assert_disasm!(cb, "6b0080d22b7d0b9be1030baa", {"
+ 0x0: mov x11, #3
+ 0x4: mul x11, x9, x11
+ 0x8: mov x1, x11
+ "});
+ }
+}
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
new file mode 100644
index 0000000000..3fb67bc7cc
--- /dev/null
+++ b/yjit/src/backend/ir.rs
@@ -0,0 +1,2154 @@
+use std::collections::HashMap;
+use std::fmt;
+use std::convert::From;
+use std::mem::take;
+use crate::codegen::{gen_counted_exit, gen_outlined_exit};
+use crate::cruby::{vm_stack_canary, SIZEOF_VALUE_I32, VALUE, VM_ENV_DATA_SIZE};
+use crate::virtualmem::CodePtr;
+use crate::asm::{CodeBlock, OutlinedCb};
+use crate::core::{Context, RegMapping, RegOpnd, MAX_CTX_TEMPS};
+use crate::options::*;
+use crate::stats::*;
+
+use crate::backend::current::*;
+
+pub const EC: Opnd = _EC;
+pub const CFP: Opnd = _CFP;
+pub const SP: Opnd = _SP;
+
+pub const C_ARG_OPNDS: [Opnd; 6] = _C_ARG_OPNDS;
+pub const C_RET_OPND: Opnd = _C_RET_OPND;
+pub use crate::backend::current::{Reg, C_RET_REG};
+
+// Memory operand base
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum MemBase
+{
+ Reg(u8),
+ InsnOut(usize),
+}
+
+// Memory location
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub struct Mem
+{
+ // Base register number or instruction index
+ pub(super) base: MemBase,
+
+ // Offset relative to the base pointer
+ pub(super) disp: i32,
+
+ // Size in bits
+ pub(super) num_bits: u8,
+}
+
+impl fmt::Debug for Mem {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "Mem{}[{:?}", self.num_bits, self.base)?;
+ if self.disp != 0 {
+ let sign = if self.disp > 0 { '+' } else { '-' };
+ write!(fmt, " {sign} {}", self.disp)?;
+ }
+
+ write!(fmt, "]")
+ }
+}
+
+/// Operand to an IR instruction
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum Opnd
+{
+ None, // For insns with no output
+
+ // Immediate Ruby value, may be GC'd, movable
+ Value(VALUE),
+
+ /// C argument register. The alloc_regs resolves its register dependencies.
+ CArg(Reg),
+
+ // Output of a preceding instruction in this block
+ InsnOut{ idx: usize, num_bits: u8 },
+
+ /// Pointer to a slot on the VM stack
+ Stack {
+ /// Index from stack top. Used for conversion to StackOpnd.
+ idx: i32,
+ /// Number of bits for Opnd::Reg and Opnd::Mem.
+ num_bits: u8,
+ /// ctx.stack_size when this operand is made. Used with idx for Opnd::Reg.
+ stack_size: u8,
+ /// The number of local variables in the current ISEQ. Used only for locals.
+ num_locals: Option<u32>,
+ /// ctx.sp_offset when this operand is made. Used with idx for Opnd::Mem.
+ sp_offset: i8,
+ /// ctx.reg_mapping when this operand is read. Used for register allocation.
+ reg_mapping: Option<RegMapping>
+ },
+
+ // Low-level operands, for lowering
+ Imm(i64), // Raw signed immediate
+ UImm(u64), // Raw unsigned immediate
+ Mem(Mem), // Memory location
+ Reg(Reg), // Machine register
+}
+
+impl fmt::Debug for Opnd {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ use Opnd::*;
+ match self {
+ Self::None => write!(fmt, "None"),
+ Value(val) => write!(fmt, "Value({val:?})"),
+ CArg(reg) => write!(fmt, "CArg({reg:?})"),
+ Stack { idx, sp_offset, .. } => write!(fmt, "SP[{}]", *sp_offset as i32 - idx - 1),
+ InsnOut { idx, num_bits } => write!(fmt, "Out{num_bits}({idx})"),
+ Imm(signed) => write!(fmt, "{signed:x}_i64"),
+ UImm(unsigned) => write!(fmt, "{unsigned:x}_u64"),
+ // Say Mem and Reg only once
+ Mem(mem) => write!(fmt, "{mem:?}"),
+ Reg(reg) => write!(fmt, "{reg:?}"),
+ }
+ }
+}
+
+impl Opnd
+{
+ /// Convenience constructor for memory operands
+ pub fn mem(num_bits: u8, base: Opnd, disp: i32) -> Self {
+ match base {
+ Opnd::Reg(base_reg) => {
+ assert!(base_reg.num_bits == 64);
+ Opnd::Mem(Mem {
+ base: MemBase::Reg(base_reg.reg_no),
+ disp: disp,
+ num_bits: num_bits,
+ })
+ },
+
+ Opnd::InsnOut{idx, num_bits: out_num_bits } => {
+ assert!(num_bits <= out_num_bits);
+ Opnd::Mem(Mem {
+ base: MemBase::InsnOut(idx),
+ disp: disp,
+ num_bits: num_bits,
+ })
+ },
+
+ _ => unreachable!("memory operand with non-register base")
+ }
+ }
+
+ /// Constructor for constant pointer operand
+ pub fn const_ptr(ptr: *const u8) -> Self {
+ Opnd::UImm(ptr as u64)
+ }
+
+ /// Constructor for a C argument operand
+ pub fn c_arg(reg_opnd: Opnd) -> Self {
+ match reg_opnd {
+ Opnd::Reg(reg) => Opnd::CArg(reg),
+ _ => unreachable!(),
+ }
+ }
+
+ /// Unwrap a register operand
+ pub fn unwrap_reg(&self) -> Reg {
+ match self {
+ Opnd::Reg(reg) => *reg,
+ _ => unreachable!("trying to unwrap {:?} into reg", self)
+ }
+ }
+
+ /// Get the size in bits for this operand if there is one.
+ pub fn num_bits(&self) -> Option<u8> {
+ match *self {
+ Opnd::Reg(Reg { num_bits, .. }) => Some(num_bits),
+ Opnd::Mem(Mem { num_bits, .. }) => Some(num_bits),
+ Opnd::InsnOut { num_bits, .. } => Some(num_bits),
+ _ => None
+ }
+ }
+
+ pub fn with_num_bits(&self, num_bits: u8) -> Option<Opnd> {
+ assert!(num_bits == 8 || num_bits == 16 || num_bits == 32 || num_bits == 64);
+ match *self {
+ Opnd::Reg(reg) => Some(Opnd::Reg(reg.with_num_bits(num_bits))),
+ Opnd::Mem(Mem { base, disp, .. }) => Some(Opnd::Mem(Mem { base, disp, num_bits })),
+ Opnd::InsnOut { idx, .. } => Some(Opnd::InsnOut { idx, num_bits }),
+ Opnd::Stack { idx, stack_size, num_locals, sp_offset, reg_mapping, .. } => Some(Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, reg_mapping }),
+ _ => None,
+ }
+ }
+
+ /// Get the size in bits for register/memory operands.
+ pub fn rm_num_bits(&self) -> u8 {
+ self.num_bits().unwrap()
+ }
+
+ /// Maps the indices from a previous list of instructions to a new list of
+ /// instructions.
+ pub fn map_index(self, indices: &Vec<usize>) -> Opnd {
+ match self {
+ Opnd::InsnOut { idx, num_bits } => {
+ Opnd::InsnOut { idx: indices[idx], num_bits }
+ }
+ Opnd::Mem(Mem { base: MemBase::InsnOut(idx), disp, num_bits }) => {
+ Opnd::Mem(Mem { base: MemBase::InsnOut(indices[idx]), disp, num_bits })
+ },
+ _ => self
+ }
+ }
+
+ /// When there aren't any operands to check against, this is the number of
+ /// bits that should be used for any given output variable.
+ const DEFAULT_NUM_BITS: u8 = 64;
+
+ /// Determine the size in bits from the iterator of operands. If any of them
+ /// are different sizes this will panic.
+ pub fn match_num_bits_iter<'a>(opnds: impl Iterator<Item = &'a Opnd>) -> u8 {
+ let mut value: Option<u8> = None;
+
+ for opnd in opnds {
+ if let Some(num_bits) = opnd.num_bits() {
+ match value {
+ None => {
+ value = Some(num_bits);
+ },
+ Some(value) => {
+ assert_eq!(value, num_bits, "operands of incompatible sizes");
+ }
+ };
+ }
+ }
+
+ value.unwrap_or(Self::DEFAULT_NUM_BITS)
+ }
+
+ /// Determine the size in bits of the slice of the given operands. If any of
+ /// them are different sizes this will panic.
+ pub fn match_num_bits(opnds: &[Opnd]) -> u8 {
+ Self::match_num_bits_iter(opnds.iter())
+ }
+
+ /// Convert Opnd::Stack into RegMapping
+ pub fn reg_opnd(&self) -> RegOpnd {
+ self.get_reg_opnd().unwrap()
+ }
+
+ /// Convert an operand into RegMapping if it's Opnd::Stack
+ pub fn get_reg_opnd(&self) -> Option<RegOpnd> {
+ match *self {
+ Opnd::Stack { idx, stack_size, num_locals, .. } => Some(
+ if let Some(num_locals) = num_locals {
+ let last_idx = stack_size as i32 + VM_ENV_DATA_SIZE as i32 - 1;
+ assert!(last_idx <= idx, "Local index {} must be >= last local index {}", idx, last_idx);
+ assert!(idx <= last_idx + num_locals as i32, "Local index {} must be < last local index {} + local size {}", idx, last_idx, num_locals);
+ RegOpnd::Local((last_idx + num_locals as i32 - idx) as u8)
+ } else {
+ assert!(idx < stack_size as i32);
+ RegOpnd::Stack((stack_size as i32 - idx - 1) as u8)
+ }
+ ),
+ _ => None,
+ }
+ }
+}
+
+impl From<usize> for Opnd {
+ fn from(value: usize) -> Self {
+ Opnd::UImm(value.try_into().unwrap())
+ }
+}
+
+impl From<u64> for Opnd {
+ fn from(value: u64) -> Self {
+ Opnd::UImm(value)
+ }
+}
+
+impl From<i64> for Opnd {
+ fn from(value: i64) -> Self {
+ Opnd::Imm(value)
+ }
+}
+
+impl From<i32> for Opnd {
+ fn from(value: i32) -> Self {
+ Opnd::Imm(value.try_into().unwrap())
+ }
+}
+
+impl From<u32> for Opnd {
+ fn from(value: u32) -> Self {
+ Opnd::UImm(value as u64)
+ }
+}
+
+impl From<VALUE> for Opnd {
+ fn from(value: VALUE) -> Self {
+ Opnd::Value(value)
+ }
+}
+
+/// Branch target (something that we can jump to)
+/// for branch instructions
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum Target
+{
+ /// Pointer to a piece of YJIT-generated code
+ CodePtr(CodePtr),
+ /// Side exit with a counter
+ SideExit { counter: Counter, context: Option<SideExitContext> },
+ /// Pointer to a side exit code
+ SideExitPtr(CodePtr),
+ /// A label within the generated code
+ Label(usize),
+}
+
+impl Target
+{
+ pub fn side_exit(counter: Counter) -> Target {
+ Target::SideExit { counter, context: None }
+ }
+
+ pub fn unwrap_label_idx(&self) -> usize {
+ match self {
+ Target::Label(idx) => *idx,
+ _ => unreachable!("trying to unwrap {:?} into label", self)
+ }
+ }
+
+ pub fn unwrap_code_ptr(&self) -> CodePtr {
+ match self {
+ Target::CodePtr(ptr) => *ptr,
+ Target::SideExitPtr(ptr) => *ptr,
+ _ => unreachable!("trying to unwrap {:?} into code ptr", self)
+ }
+ }
+}
+
+impl From<CodePtr> for Target {
+ fn from(code_ptr: CodePtr) -> Self {
+ Target::CodePtr(code_ptr)
+ }
+}
+
+type PosMarkerFn = Box<dyn Fn(CodePtr, &CodeBlock)>;
+
+/// YJIT IR instruction
+pub enum Insn {
+ /// Add two operands together, and return the result as a new operand.
+ Add { left: Opnd, right: Opnd, out: Opnd },
+
+ /// This is the same as the OP_ADD instruction, except that it performs the
+ /// binary AND operation.
+ And { left: Opnd, right: Opnd, out: Opnd },
+
+ /// Bake a string directly into the instruction stream.
+ BakeString(String),
+
+ // Trigger a debugger breakpoint
+ #[allow(dead_code)]
+ Breakpoint,
+
+ /// Add a comment into the IR at the point that this instruction is added.
+ /// It won't have any impact on that actual compiled code.
+ Comment(String),
+
+ /// Compare two operands
+ Cmp { left: Opnd, right: Opnd },
+
+ /// Pop a register from the C stack
+ CPop { out: Opnd },
+
+ /// Pop all of the caller-save registers and the flags from the C stack
+ CPopAll,
+
+ /// Pop a register from the C stack and store it into another register
+ CPopInto(Opnd),
+
+ /// Push a register onto the C stack
+ CPush(Opnd),
+
+ /// Push all of the caller-save registers and the flags to the C stack
+ CPushAll,
+
+ // C function call with N arguments (variadic)
+ CCall { opnds: Vec<Opnd>, fptr: *const u8, out: Opnd },
+
+ // C function return
+ CRet(Opnd),
+
+ /// Conditionally select if equal
+ CSelE { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if greater
+ CSelG { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if greater or equal
+ CSelGE { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if less
+ CSelL { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if less or equal
+ CSelLE { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if not equal
+ CSelNE { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if not zero
+ CSelNZ { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Conditionally select if zero
+ CSelZ { truthy: Opnd, falsy: Opnd, out: Opnd },
+
+ /// Set up the frame stack as necessary per the architecture.
+ FrameSetup,
+
+ /// Tear down the frame stack as necessary per the architecture.
+ FrameTeardown,
+
+ // Atomically increment a counter
+ // Input: memory operand, increment value
+ // Produces no output
+ IncrCounter { mem: Opnd, value: Opnd },
+
+ /// Jump if below or equal (unsigned)
+ Jbe(Target),
+
+ /// Jump if below (unsigned)
+ Jb(Target),
+
+ /// Jump if equal
+ Je(Target),
+
+ /// Jump if lower
+ Jl(Target),
+
+ /// Jump if greater
+ Jg(Target),
+
+ /// Jump if greater or equal
+ Jge(Target),
+
+ // Unconditional jump to a branch target
+ Jmp(Target),
+
+ // Unconditional jump which takes a reg/mem address operand
+ JmpOpnd(Opnd),
+
+ /// Jump if not equal
+ Jne(Target),
+
+ /// Jump if not zero
+ Jnz(Target),
+
+ /// Jump if overflow
+ Jo(Target),
+
+ /// Jump if overflow in multiplication
+ JoMul(Target),
+
+ /// Jump if zero
+ Jz(Target),
+
+ /// Jump if operand is zero (only used during lowering at the moment)
+ Joz(Opnd, Target),
+
+ /// Jump if operand is non-zero (only used during lowering at the moment)
+ Jonz(Opnd, Target),
+
+ // Add a label into the IR at the point that this instruction is added.
+ Label(Target),
+
+ /// Get the code address of a jump target
+ LeaJumpTarget { target: Target, out: Opnd },
+
+ // Load effective address
+ Lea { opnd: Opnd, out: Opnd },
+
+ /// Take a specific register. Signal the register allocator to not use it.
+ LiveReg { opnd: Opnd, out: Opnd },
+
+ // A low-level instruction that loads a value into a register.
+ Load { opnd: Opnd, out: Opnd },
+
+ // A low-level instruction that loads a value into a specified register.
+ LoadInto { dest: Opnd, opnd: Opnd },
+
+ // A low-level instruction that loads a value into a register and
+ // sign-extends it to a 64-bit value.
+ LoadSExt { opnd: Opnd, out: Opnd },
+
+ /// Shift a value left by a certain amount.
+ LShift { opnd: Opnd, shift: Opnd, out: Opnd },
+
+ // A low-level mov instruction. It accepts two operands.
+ Mov { dest: Opnd, src: Opnd },
+
+ // Perform the NOT operation on an individual operand, and return the result
+ // as a new operand. This operand can then be used as the operand on another
+ // instruction.
+ Not { opnd: Opnd, out: Opnd },
+
+ // This is the same as the OP_ADD instruction, except that it performs the
+ // binary OR operation.
+ Or { left: Opnd, right: Opnd, out: Opnd },
+
+ /// Pad nop instructions to accommodate Op::Jmp in case the block or the insn
+ /// is invalidated.
+ PadInvalPatch,
+
+ // Mark a position in the generated code
+ PosMarker(PosMarkerFn),
+
+ /// Shift a value right by a certain amount (signed).
+ RShift { opnd: Opnd, shift: Opnd, out: Opnd },
+
+ // Low-level instruction to store a value to memory.
+ Store { dest: Opnd, src: Opnd },
+
+ // This is the same as the add instruction, except for subtraction.
+ Sub { left: Opnd, right: Opnd, out: Opnd },
+
+ // Integer multiplication
+ Mul { left: Opnd, right: Opnd, out: Opnd },
+
+ // Bitwise AND test instruction
+ Test { left: Opnd, right: Opnd },
+
+ /// Shift a value right by a certain amount (unsigned).
+ URShift { opnd: Opnd, shift: Opnd, out: Opnd },
+
+ // This is the same as the OP_ADD instruction, except that it performs the
+ // binary XOR operation.
+ Xor { left: Opnd, right: Opnd, out: Opnd }
+}
+
+impl Insn {
+ /// Create an iterator that will yield a non-mutable reference to each
+ /// operand in turn for this instruction.
+ pub(super) fn opnd_iter(&self) -> InsnOpndIterator<'_> {
+ InsnOpndIterator::new(self)
+ }
+
+ /// Create an iterator that will yield a mutable reference to each operand
+ /// in turn for this instruction.
+ pub(super) fn opnd_iter_mut(&mut self) -> InsnOpndMutIterator<'_> {
+ InsnOpndMutIterator::new(self)
+ }
+
+ /// Get a mutable reference to a Target if it exists.
+ pub(super) fn target_mut(&mut self) -> Option<&mut Target> {
+ match self {
+ Insn::Jbe(target) |
+ Insn::Jb(target) |
+ Insn::Je(target) |
+ Insn::Jl(target) |
+ Insn::Jg(target) |
+ Insn::Jge(target) |
+ Insn::Jmp(target) |
+ Insn::Jne(target) |
+ Insn::Jnz(target) |
+ Insn::Jo(target) |
+ Insn::Jz(target) |
+ Insn::Label(target) |
+ Insn::JoMul(target) |
+ Insn::Joz(_, target) |
+ Insn::Jonz(_, target) |
+ Insn::LeaJumpTarget { target, .. } => {
+ Some(target)
+ }
+ _ => None,
+ }
+ }
+
+ /// Returns a string that describes which operation this instruction is
+ /// performing. This is used for debugging.
+ fn op(&self) -> &'static str {
+ match self {
+ Insn::Add { .. } => "Add",
+ Insn::And { .. } => "And",
+ Insn::BakeString(_) => "BakeString",
+ Insn::Breakpoint => "Breakpoint",
+ Insn::Comment(_) => "Comment",
+ Insn::Cmp { .. } => "Cmp",
+ Insn::CPop { .. } => "CPop",
+ Insn::CPopAll => "CPopAll",
+ Insn::CPopInto(_) => "CPopInto",
+ Insn::CPush(_) => "CPush",
+ Insn::CPushAll => "CPushAll",
+ Insn::CCall { .. } => "CCall",
+ Insn::CRet(_) => "CRet",
+ Insn::CSelE { .. } => "CSelE",
+ Insn::CSelG { .. } => "CSelG",
+ Insn::CSelGE { .. } => "CSelGE",
+ Insn::CSelL { .. } => "CSelL",
+ Insn::CSelLE { .. } => "CSelLE",
+ Insn::CSelNE { .. } => "CSelNE",
+ Insn::CSelNZ { .. } => "CSelNZ",
+ Insn::CSelZ { .. } => "CSelZ",
+ Insn::FrameSetup => "FrameSetup",
+ Insn::FrameTeardown => "FrameTeardown",
+ Insn::IncrCounter { .. } => "IncrCounter",
+ Insn::Jbe(_) => "Jbe",
+ Insn::Jb(_) => "Jb",
+ Insn::Je(_) => "Je",
+ Insn::Jl(_) => "Jl",
+ Insn::Jg(_) => "Jg",
+ Insn::Jge(_) => "Jge",
+ Insn::Jmp(_) => "Jmp",
+ Insn::JmpOpnd(_) => "JmpOpnd",
+ Insn::Jne(_) => "Jne",
+ Insn::Jnz(_) => "Jnz",
+ Insn::Jo(_) => "Jo",
+ Insn::JoMul(_) => "JoMul",
+ Insn::Jz(_) => "Jz",
+ Insn::Joz(..) => "Joz",
+ Insn::Jonz(..) => "Jonz",
+ Insn::Label(_) => "Label",
+ Insn::LeaJumpTarget { .. } => "LeaJumpTarget",
+ Insn::Lea { .. } => "Lea",
+ Insn::LiveReg { .. } => "LiveReg",
+ Insn::Load { .. } => "Load",
+ Insn::LoadInto { .. } => "LoadInto",
+ Insn::LoadSExt { .. } => "LoadSExt",
+ Insn::LShift { .. } => "LShift",
+ Insn::Mov { .. } => "Mov",
+ Insn::Not { .. } => "Not",
+ Insn::Or { .. } => "Or",
+ Insn::PadInvalPatch => "PadEntryExit",
+ Insn::PosMarker(_) => "PosMarker",
+ Insn::RShift { .. } => "RShift",
+ Insn::Store { .. } => "Store",
+ Insn::Sub { .. } => "Sub",
+ Insn::Mul { .. } => "Mul",
+ Insn::Test { .. } => "Test",
+ Insn::URShift { .. } => "URShift",
+ Insn::Xor { .. } => "Xor"
+ }
+ }
+
+ /// Return a non-mutable reference to the out operand for this instruction
+ /// if it has one.
+ pub fn out_opnd(&self) -> Option<&Opnd> {
+ match self {
+ Insn::Add { out, .. } |
+ Insn::And { out, .. } |
+ Insn::CCall { out, .. } |
+ Insn::CPop { out, .. } |
+ Insn::CSelE { out, .. } |
+ Insn::CSelG { out, .. } |
+ Insn::CSelGE { out, .. } |
+ Insn::CSelL { out, .. } |
+ Insn::CSelLE { out, .. } |
+ Insn::CSelNE { out, .. } |
+ Insn::CSelNZ { out, .. } |
+ Insn::CSelZ { out, .. } |
+ Insn::Lea { out, .. } |
+ Insn::LeaJumpTarget { out, .. } |
+ Insn::LiveReg { out, .. } |
+ Insn::Load { out, .. } |
+ Insn::LoadSExt { out, .. } |
+ Insn::LShift { out, .. } |
+ Insn::Not { out, .. } |
+ Insn::Or { out, .. } |
+ Insn::RShift { out, .. } |
+ Insn::Sub { out, .. } |
+ Insn::Mul { out, .. } |
+ Insn::URShift { out, .. } |
+ Insn::Xor { out, .. } => Some(out),
+ _ => None
+ }
+ }
+
+ /// Return a mutable reference to the out operand for this instruction if it
+ /// has one.
+ pub fn out_opnd_mut(&mut self) -> Option<&mut Opnd> {
+ match self {
+ Insn::Add { out, .. } |
+ Insn::And { out, .. } |
+ Insn::CCall { out, .. } |
+ Insn::CPop { out, .. } |
+ Insn::CSelE { out, .. } |
+ Insn::CSelG { out, .. } |
+ Insn::CSelGE { out, .. } |
+ Insn::CSelL { out, .. } |
+ Insn::CSelLE { out, .. } |
+ Insn::CSelNE { out, .. } |
+ Insn::CSelNZ { out, .. } |
+ Insn::CSelZ { out, .. } |
+ Insn::Lea { out, .. } |
+ Insn::LeaJumpTarget { out, .. } |
+ Insn::LiveReg { out, .. } |
+ Insn::Load { out, .. } |
+ Insn::LoadSExt { out, .. } |
+ Insn::LShift { out, .. } |
+ Insn::Not { out, .. } |
+ Insn::Or { out, .. } |
+ Insn::RShift { out, .. } |
+ Insn::Sub { out, .. } |
+ Insn::Mul { out, .. } |
+ Insn::URShift { out, .. } |
+ Insn::Xor { out, .. } => Some(out),
+ _ => None
+ }
+ }
+
+ /// Returns the target for this instruction if there is one.
+ pub fn target(&self) -> Option<&Target> {
+ match self {
+ Insn::Jbe(target) |
+ Insn::Jb(target) |
+ Insn::Je(target) |
+ Insn::Jl(target) |
+ Insn::Jg(target) |
+ Insn::Jge(target) |
+ Insn::Jmp(target) |
+ Insn::Jne(target) |
+ Insn::Jnz(target) |
+ Insn::Jo(target) |
+ Insn::Jz(target) |
+ Insn::LeaJumpTarget { target, .. } => Some(target),
+ _ => None
+ }
+ }
+
+ /// Returns the text associated with this instruction if there is some.
+ pub fn text(&self) -> Option<&String> {
+ match self {
+ Insn::BakeString(text) |
+ Insn::Comment(text) => Some(text),
+ _ => None
+ }
+ }
+}
+
+/// An iterator that will yield a non-mutable reference to each operand in turn
+/// for the given instruction.
+pub(super) struct InsnOpndIterator<'a> {
+ insn: &'a Insn,
+ idx: usize,
+}
+
+impl<'a> InsnOpndIterator<'a> {
+ fn new(insn: &'a Insn) -> Self {
+ Self { insn, idx: 0 }
+ }
+}
+
+impl<'a> Iterator for InsnOpndIterator<'a> {
+ type Item = &'a Opnd;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.insn {
+ Insn::BakeString(_) |
+ Insn::Breakpoint |
+ Insn::Comment(_) |
+ Insn::CPop { .. } |
+ Insn::CPopAll |
+ Insn::CPushAll |
+ Insn::FrameSetup |
+ Insn::FrameTeardown |
+ Insn::Jbe(_) |
+ Insn::Jb(_) |
+ Insn::Je(_) |
+ Insn::Jl(_) |
+ Insn::Jg(_) |
+ Insn::Jge(_) |
+ Insn::Jmp(_) |
+ Insn::Jne(_) |
+ Insn::Jnz(_) |
+ Insn::Jo(_) |
+ Insn::JoMul(_) |
+ Insn::Jz(_) |
+ Insn::Label(_) |
+ Insn::LeaJumpTarget { .. } |
+ Insn::PadInvalPatch |
+ Insn::PosMarker(_) => None,
+
+ Insn::CPopInto(opnd) |
+ Insn::CPush(opnd) |
+ Insn::CRet(opnd) |
+ Insn::JmpOpnd(opnd) |
+ Insn::Lea { opnd, .. } |
+ Insn::LiveReg { opnd, .. } |
+ Insn::Load { opnd, .. } |
+ Insn::LoadSExt { opnd, .. } |
+ Insn::Joz(opnd, _) |
+ Insn::Jonz(opnd, _) |
+ Insn::Not { opnd, .. } => {
+ match self.idx {
+ 0 => {
+ self.idx += 1;
+ Some(&opnd)
+ },
+ _ => None
+ }
+ },
+ Insn::Add { left: opnd0, right: opnd1, .. } |
+ Insn::And { left: opnd0, right: opnd1, .. } |
+ Insn::Cmp { left: opnd0, right: opnd1 } |
+ Insn::CSelE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelG { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelGE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelL { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelLE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelNE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelNZ { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelZ { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::IncrCounter { mem: opnd0, value: opnd1, .. } |
+ Insn::LoadInto { dest: opnd0, opnd: opnd1 } |
+ Insn::LShift { opnd: opnd0, shift: opnd1, .. } |
+ Insn::Mov { dest: opnd0, src: opnd1 } |
+ Insn::Or { left: opnd0, right: opnd1, .. } |
+ Insn::RShift { opnd: opnd0, shift: opnd1, .. } |
+ Insn::Store { dest: opnd0, src: opnd1 } |
+ Insn::Sub { left: opnd0, right: opnd1, .. } |
+ Insn::Mul { left: opnd0, right: opnd1, .. } |
+ Insn::Test { left: opnd0, right: opnd1 } |
+ Insn::URShift { opnd: opnd0, shift: opnd1, .. } |
+ Insn::Xor { left: opnd0, right: opnd1, .. } => {
+ match self.idx {
+ 0 => {
+ self.idx += 1;
+ Some(&opnd0)
+ }
+ 1 => {
+ self.idx += 1;
+ Some(&opnd1)
+ }
+ _ => None
+ }
+ },
+ Insn::CCall { opnds, .. } => {
+ if self.idx < opnds.len() {
+ let opnd = &opnds[self.idx];
+ self.idx += 1;
+ Some(opnd)
+ } else {
+ None
+ }
+ }
+ }
+ }
+}
+
+/// An iterator that will yield each operand in turn for the given instruction.
+pub(super) struct InsnOpndMutIterator<'a> {
+ insn: &'a mut Insn,
+ idx: usize,
+}
+
+impl<'a> InsnOpndMutIterator<'a> {
+ fn new(insn: &'a mut Insn) -> Self {
+ Self { insn, idx: 0 }
+ }
+
+ pub(super) fn next(&mut self) -> Option<&mut Opnd> {
+ match self.insn {
+ Insn::BakeString(_) |
+ Insn::Breakpoint |
+ Insn::Comment(_) |
+ Insn::CPop { .. } |
+ Insn::CPopAll |
+ Insn::CPushAll |
+ Insn::FrameSetup |
+ Insn::FrameTeardown |
+ Insn::Jbe(_) |
+ Insn::Jb(_) |
+ Insn::Je(_) |
+ Insn::Jl(_) |
+ Insn::Jg(_) |
+ Insn::Jge(_) |
+ Insn::Jmp(_) |
+ Insn::Jne(_) |
+ Insn::Jnz(_) |
+ Insn::Jo(_) |
+ Insn::JoMul(_) |
+ Insn::Jz(_) |
+ Insn::Label(_) |
+ Insn::LeaJumpTarget { .. } |
+ Insn::PadInvalPatch |
+ Insn::PosMarker(_) => None,
+
+ Insn::CPopInto(opnd) |
+ Insn::CPush(opnd) |
+ Insn::CRet(opnd) |
+ Insn::JmpOpnd(opnd) |
+ Insn::Lea { opnd, .. } |
+ Insn::LiveReg { opnd, .. } |
+ Insn::Load { opnd, .. } |
+ Insn::LoadSExt { opnd, .. } |
+ Insn::Joz(opnd, _) |
+ Insn::Jonz(opnd, _) |
+ Insn::Not { opnd, .. } => {
+ match self.idx {
+ 0 => {
+ self.idx += 1;
+ Some(opnd)
+ },
+ _ => None
+ }
+ },
+ Insn::Add { left: opnd0, right: opnd1, .. } |
+ Insn::And { left: opnd0, right: opnd1, .. } |
+ Insn::Cmp { left: opnd0, right: opnd1 } |
+ Insn::CSelE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelG { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelGE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelL { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelLE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelNE { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelNZ { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::CSelZ { truthy: opnd0, falsy: opnd1, .. } |
+ Insn::IncrCounter { mem: opnd0, value: opnd1, .. } |
+ Insn::LoadInto { dest: opnd0, opnd: opnd1 } |
+ Insn::LShift { opnd: opnd0, shift: opnd1, .. } |
+ Insn::Mov { dest: opnd0, src: opnd1 } |
+ Insn::Or { left: opnd0, right: opnd1, .. } |
+ Insn::RShift { opnd: opnd0, shift: opnd1, .. } |
+ Insn::Store { dest: opnd0, src: opnd1 } |
+ Insn::Sub { left: opnd0, right: opnd1, .. } |
+ Insn::Mul { left: opnd0, right: opnd1, .. } |
+ Insn::Test { left: opnd0, right: opnd1 } |
+ Insn::URShift { opnd: opnd0, shift: opnd1, .. } |
+ Insn::Xor { left: opnd0, right: opnd1, .. } => {
+ match self.idx {
+ 0 => {
+ self.idx += 1;
+ Some(opnd0)
+ }
+ 1 => {
+ self.idx += 1;
+ Some(opnd1)
+ }
+ _ => None
+ }
+ },
+ Insn::CCall { opnds, .. } => {
+ if self.idx < opnds.len() {
+ let opnd = &mut opnds[self.idx];
+ self.idx += 1;
+ Some(opnd)
+ } else {
+ None
+ }
+ }
+ }
+ }
+}
+
+impl fmt::Debug for Insn {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "{}(", self.op())?;
+
+ // Print list of operands
+ let mut opnd_iter = self.opnd_iter();
+ if let Some(first_opnd) = opnd_iter.next() {
+ write!(fmt, "{first_opnd:?}")?;
+ }
+ for opnd in opnd_iter {
+ write!(fmt, ", {opnd:?}")?;
+ }
+ write!(fmt, ")")?;
+
+ // Print text, target, and pos if they are present
+ if let Some(text) = self.text() {
+ write!(fmt, " {text:?}")?
+ }
+ if let Some(target) = self.target() {
+ write!(fmt, " target={target:?}")?;
+ }
+
+ write!(fmt, " -> {:?}", self.out_opnd().unwrap_or(&Opnd::None))
+ }
+}
+
+/// Set of variables used for generating side exits
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct SideExitContext {
+ /// PC of the instruction being compiled
+ pub pc: *mut VALUE,
+
+ /// Context fields used by get_generic_ctx()
+ pub stack_size: u8,
+ pub sp_offset: i8,
+ pub reg_mapping: RegMapping,
+ pub is_return_landing: bool,
+ pub is_deferred: bool,
+}
+
+impl SideExitContext {
+ /// Convert PC and Context into SideExitContext
+ pub fn new(pc: *mut VALUE, ctx: Context) -> Self {
+ let exit_ctx = SideExitContext {
+ pc,
+ stack_size: ctx.get_stack_size(),
+ sp_offset: ctx.get_sp_offset(),
+ reg_mapping: ctx.get_reg_mapping(),
+ is_return_landing: ctx.is_return_landing(),
+ is_deferred: ctx.is_deferred(),
+ };
+ if cfg!(debug_assertions) {
+ // Assert that we're not losing any mandatory metadata
+ assert_eq!(exit_ctx.get_ctx(), ctx.get_generic_ctx());
+ }
+ exit_ctx
+ }
+
+ /// Convert SideExitContext to Context
+ fn get_ctx(&self) -> Context {
+ let mut ctx = Context::default();
+ ctx.set_stack_size(self.stack_size);
+ ctx.set_sp_offset(self.sp_offset);
+ ctx.set_reg_mapping(self.reg_mapping);
+ if self.is_return_landing {
+ ctx.set_as_return_landing();
+ }
+ if self.is_deferred {
+ ctx.mark_as_deferred();
+ }
+ ctx
+ }
+}
+
+/// Initial capacity for asm.insns vector
+const ASSEMBLER_INSNS_CAPACITY: usize = 256;
+
+/// Object into which we assemble instructions to be
+/// optimized and lowered
+pub struct Assembler {
+ pub(super) insns: Vec<Insn>,
+
+ /// Parallel vec with insns
+ /// Index of the last insn using the output of this insn
+ pub(super) live_ranges: Vec<usize>,
+
+ /// Names of labels
+ pub(super) label_names: Vec<String>,
+
+ /// Context for generating the current insn
+ pub ctx: Context,
+
+ /// The current ISEQ's local table size. asm.local_opnd() uses this, and it's
+ /// sometimes hard to pass this value, e.g. asm.spill_regs() in asm.ccall().
+ ///
+ /// `None` means we're not assembling for an ISEQ, or that the local size is
+ /// not relevant.
+ pub(super) num_locals: Option<u32>,
+
+ /// Side exit caches for each SideExitContext
+ pub(super) side_exits: HashMap<SideExitContext, CodePtr>,
+
+ /// PC for Target::SideExit
+ side_exit_pc: Option<*mut VALUE>,
+
+ /// Stack size for Target::SideExit
+ side_exit_stack_size: Option<u8>,
+
+ /// If true, the next ccall() should verify its leafness
+ leaf_ccall: bool,
+}
+
+impl Assembler
+{
+ /// Create an Assembler for ISEQ-specific code.
+ /// It includes all inline code and some outlined code like side exits and stubs.
+ pub fn new(num_locals: u32) -> Self {
+ Self::new_with_label_names(Vec::default(), HashMap::default(), Some(num_locals))
+ }
+
+ /// Create an Assembler for outlined code that are not specific to any ISEQ,
+ /// e.g. trampolines that are shared globally.
+ pub fn new_without_iseq() -> Self {
+ Self::new_with_label_names(Vec::default(), HashMap::default(), None)
+ }
+
+ /// Create an Assembler with parameters that are populated by another Assembler instance.
+ /// This API is used for copying an Assembler for the next compiler pass.
+ pub fn new_with_label_names(
+ label_names: Vec<String>,
+ side_exits: HashMap<SideExitContext, CodePtr>,
+ num_locals: Option<u32>
+ ) -> Self {
+ Self {
+ insns: Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY),
+ live_ranges: Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY),
+ label_names,
+ ctx: Context::default(),
+ num_locals,
+ side_exits,
+ side_exit_pc: None,
+ side_exit_stack_size: None,
+ leaf_ccall: false,
+ }
+ }
+
+ /// Get the list of registers that can be used for stack temps.
+ pub fn get_temp_regs() -> &'static [Reg] {
+ let num_regs = get_option!(num_temp_regs);
+ &TEMP_REGS[0..num_regs]
+ }
+
+ /// Get the number of locals for the ISEQ being compiled
+ pub fn get_num_locals(&self) -> Option<u32> {
+ self.num_locals
+ }
+
+ /// Set a context for generating side exits
+ pub fn set_side_exit_context(&mut self, pc: *mut VALUE, stack_size: u8) {
+ self.side_exit_pc = Some(pc);
+ self.side_exit_stack_size = Some(stack_size);
+ }
+
+ /// Build an Opnd::InsnOut from the current index of the assembler and the
+ /// given number of bits.
+ pub(super) fn next_opnd_out(&self, num_bits: u8) -> Opnd {
+ Opnd::InsnOut { idx: self.insns.len(), num_bits }
+ }
+
+ /// Append an instruction onto the current list of instructions and update
+ /// the live ranges of any instructions whose outputs are being used as
+ /// operands to this instruction.
+ pub fn push_insn(&mut self, mut insn: Insn) {
+ // Index of this instruction
+ let insn_idx = self.insns.len();
+
+ let mut opnd_iter = insn.opnd_iter_mut();
+ while let Some(opnd) = opnd_iter.next() {
+ match *opnd {
+ // If we find any InsnOut from previous instructions, we're going to update
+ // the live range of the previous instruction to point to this one.
+ Opnd::InsnOut { idx, .. } => {
+ assert!(idx < self.insns.len());
+ self.live_ranges[idx] = insn_idx;
+ }
+ Opnd::Mem(Mem { base: MemBase::InsnOut(idx), .. }) => {
+ assert!(idx < self.insns.len());
+ self.live_ranges[idx] = insn_idx;
+ }
+ // Set current ctx.reg_mapping to Opnd::Stack.
+ Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, reg_mapping: None } => {
+ assert_eq!(
+ self.ctx.get_stack_size() as i16 - self.ctx.get_sp_offset() as i16,
+ stack_size as i16 - sp_offset as i16,
+ "Opnd::Stack (stack_size: {}, sp_offset: {}) expects a different SP position from asm.ctx (stack_size: {}, sp_offset: {})",
+ stack_size, sp_offset, self.ctx.get_stack_size(), self.ctx.get_sp_offset(),
+ );
+ *opnd = Opnd::Stack {
+ idx,
+ num_bits,
+ stack_size,
+ num_locals,
+ sp_offset,
+ reg_mapping: Some(self.ctx.get_reg_mapping()),
+ };
+ }
+ _ => {}
+ }
+ }
+
+ // Set a side exit context to Target::SideExit
+ if let Some(Target::SideExit { context, .. }) = insn.target_mut() {
+ // We should skip this when this instruction is being copied from another Assembler.
+ if context.is_none() {
+ *context = Some(SideExitContext::new(
+ self.side_exit_pc.unwrap(),
+ self.ctx.with_stack_size(self.side_exit_stack_size.unwrap()),
+ ));
+ }
+ }
+
+ self.insns.push(insn);
+ self.live_ranges.push(insn_idx);
+ }
+
+ /// Get a cached side exit, wrapping a counter if specified
+ pub fn get_side_exit(&mut self, side_exit_context: &SideExitContext, counter: Option<Counter>, ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ // Get a cached side exit
+ let side_exit = match self.side_exits.get(&side_exit_context) {
+ None => {
+ let exit_code = gen_outlined_exit(side_exit_context.pc, self.num_locals.unwrap(), &side_exit_context.get_ctx(), ocb)?;
+ self.side_exits.insert(*side_exit_context, exit_code);
+ exit_code
+ }
+ Some(code_ptr) => *code_ptr,
+ };
+
+ // Wrap a counter if needed
+ gen_counted_exit(side_exit_context.pc, side_exit, ocb, counter)
+ }
+
+ /// Create a new label instance that we can jump to
+ pub fn new_label(&mut self, name: &str) -> Target
+ {
+ assert!(!name.contains(' '), "use underscores in label names, not spaces");
+
+ let label_idx = self.label_names.len();
+ self.label_names.push(name.to_string());
+ Target::Label(label_idx)
+ }
+
+ /// Convert Opnd::Stack to Opnd::Mem or Opnd::Reg
+ pub fn lower_stack_opnd(&self, opnd: &Opnd) -> Opnd {
+ // Convert Opnd::Stack to Opnd::Mem
+ fn mem_opnd(opnd: &Opnd) -> Opnd {
+ if let Opnd::Stack { idx, sp_offset, num_bits, .. } = *opnd {
+ incr_counter!(temp_mem_opnd);
+ Opnd::mem(num_bits, SP, (sp_offset as i32 - idx - 1) * SIZEOF_VALUE_I32)
+ } else {
+ unreachable!()
+ }
+ }
+
+ // Convert Opnd::Stack to Opnd::Reg
+ fn reg_opnd(opnd: &Opnd, reg_idx: usize) -> Opnd {
+ let regs = Assembler::get_temp_regs();
+ if let Opnd::Stack { num_bits, .. } = *opnd {
+ incr_counter!(temp_reg_opnd);
+ Opnd::Reg(regs[reg_idx]).with_num_bits(num_bits).unwrap()
+ } else {
+ unreachable!()
+ }
+ }
+
+ match opnd {
+ Opnd::Stack { reg_mapping, .. } => {
+ if let Some(reg_idx) = reg_mapping.unwrap().get_reg(opnd.reg_opnd()) {
+ reg_opnd(opnd, reg_idx)
+ } else {
+ mem_opnd(opnd)
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ /// Allocate a register to a stack temp if available.
+ pub fn alloc_reg(&mut self, mapping: RegOpnd) {
+ // Allocate a register if there's no conflict.
+ let mut reg_mapping = self.ctx.get_reg_mapping();
+ if reg_mapping.alloc_reg(mapping) {
+ self.set_reg_mapping(reg_mapping);
+ }
+ }
+
+ /// Erase local variable type information
+ /// eg: because of a call we can't track
+ pub fn clear_local_types(&mut self) {
+ asm_comment!(self, "clear local variable types");
+ self.ctx.clear_local_types();
+ }
+
+ /// Repurpose stack temp registers to the corresponding locals for arguments
+ pub fn map_temp_regs_to_args(&mut self, callee_ctx: &mut Context, argc: i32) -> Vec<RegOpnd> {
+ let mut callee_reg_mapping = callee_ctx.get_reg_mapping();
+ let mut mapped_temps = vec![];
+
+ for arg_idx in 0..argc {
+ let stack_idx: u8 = (self.ctx.get_stack_size() as i32 - argc + arg_idx).try_into().unwrap();
+ let temp_opnd = RegOpnd::Stack(stack_idx);
+
+ // For each argument, if the stack temp for it has a register,
+ // let the callee use the register for the local variable.
+ if let Some(reg_idx) = self.ctx.get_reg_mapping().get_reg(temp_opnd) {
+ let local_opnd = RegOpnd::Local(arg_idx.try_into().unwrap());
+ callee_reg_mapping.set_reg(local_opnd, reg_idx);
+ mapped_temps.push(temp_opnd);
+ }
+ }
+
+ asm_comment!(self, "local maps: {:?}", callee_reg_mapping);
+ callee_ctx.set_reg_mapping(callee_reg_mapping);
+ mapped_temps
+ }
+
+ /// Spill all live registers to the stack
+ pub fn spill_regs(&mut self) {
+ self.spill_regs_except(&vec![]);
+ }
+
+ /// Spill all live registers except `ignored_temps` to the stack
+ pub fn spill_regs_except(&mut self, ignored_temps: &Vec<RegOpnd>) {
+ // Forget registers above the stack top
+ let mut reg_mapping = self.ctx.get_reg_mapping();
+ for stack_idx in self.ctx.get_stack_size()..MAX_CTX_TEMPS as u8 {
+ reg_mapping.dealloc_reg(RegOpnd::Stack(stack_idx));
+ }
+ self.set_reg_mapping(reg_mapping);
+
+ // If no registers are in use, skip all checks
+ if self.ctx.get_reg_mapping() == RegMapping::default() {
+ return;
+ }
+
+ // Collect stack temps to be spilled
+ let mut spilled_opnds = vec![];
+ for stack_idx in 0..u8::min(MAX_CTX_TEMPS as u8, self.ctx.get_stack_size()) {
+ let reg_opnd = RegOpnd::Stack(stack_idx);
+ if !ignored_temps.contains(&reg_opnd) && reg_mapping.dealloc_reg(reg_opnd) {
+ let idx = self.ctx.get_stack_size() - 1 - stack_idx;
+ let spilled_opnd = self.stack_opnd(idx.into());
+ spilled_opnds.push(spilled_opnd);
+ reg_mapping.dealloc_reg(spilled_opnd.reg_opnd());
+ }
+ }
+
+ // Collect locals to be spilled
+ for local_idx in 0..MAX_CTX_TEMPS as u8 {
+ if reg_mapping.dealloc_reg(RegOpnd::Local(local_idx)) {
+ let first_local_ep_offset = self.num_locals.unwrap() + VM_ENV_DATA_SIZE - 1;
+ let ep_offset = first_local_ep_offset - local_idx as u32;
+ let spilled_opnd = self.local_opnd(ep_offset);
+ spilled_opnds.push(spilled_opnd);
+ reg_mapping.dealloc_reg(spilled_opnd.reg_opnd());
+ }
+ }
+
+ // Spill stack temps and locals
+ if !spilled_opnds.is_empty() {
+ asm_comment!(self, "spill_regs: {:?} -> {:?}", self.ctx.get_reg_mapping(), reg_mapping);
+ for &spilled_opnd in spilled_opnds.iter() {
+ self.spill_reg(spilled_opnd);
+ }
+ self.ctx.set_reg_mapping(reg_mapping);
+ }
+ }
+
+ /// Spill a stack temp from a register to the stack
+ pub fn spill_reg(&mut self, opnd: Opnd) {
+ assert_ne!(self.ctx.get_reg_mapping().get_reg(opnd.reg_opnd()), None);
+
+ // Use different RegMappings for dest and src operands
+ let reg_mapping = self.ctx.get_reg_mapping();
+ let mut mem_mappings = reg_mapping;
+ mem_mappings.dealloc_reg(opnd.reg_opnd());
+
+ // Move the stack operand from a register to memory
+ match opnd {
+ Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, .. } => {
+ self.mov(
+ Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, reg_mapping: Some(mem_mappings) },
+ Opnd::Stack { idx, num_bits, stack_size, num_locals, sp_offset, reg_mapping: Some(reg_mapping) },
+ );
+ }
+ _ => unreachable!(),
+ }
+ incr_counter!(temp_spill);
+ }
+
+ /// Update which stack temps are in a register
+ pub fn set_reg_mapping(&mut self, reg_mapping: RegMapping) {
+ if self.ctx.get_reg_mapping() != reg_mapping {
+ asm_comment!(self, "reg_mapping: {:?} -> {:?}", self.ctx.get_reg_mapping(), reg_mapping);
+ self.ctx.set_reg_mapping(reg_mapping);
+ }
+ }
+
+ // Shuffle register moves, sometimes adding extra moves using SCRATCH_REG,
+ // so that they will not rewrite each other before they are used.
+ pub fn reorder_reg_moves(old_moves: &Vec<(Reg, Opnd)>) -> Vec<(Reg, Opnd)> {
+ // Return the index of a move whose destination is not used as a source if any.
+ fn find_safe_move(moves: &Vec<(Reg, Opnd)>) -> Option<usize> {
+ moves.iter().enumerate().find(|(_, &(dest_reg, _))| {
+ moves.iter().all(|&(_, src_opnd)| src_opnd != Opnd::Reg(dest_reg))
+ }).map(|(index, _)| index)
+ }
+
+ // Remove moves whose source and destination are the same
+ let mut old_moves: Vec<(Reg, Opnd)> = old_moves.clone().into_iter()
+ .filter(|&(reg, opnd)| Opnd::Reg(reg) != opnd).collect();
+
+ let mut new_moves = vec![];
+ while old_moves.len() > 0 {
+ // Keep taking safe moves
+ while let Some(index) = find_safe_move(&old_moves) {
+ new_moves.push(old_moves.remove(index));
+ }
+
+ // No safe move. Load the source of one move into SCRATCH_REG, and
+ // then load SCRATCH_REG into the destination when it's safe.
+ if old_moves.len() > 0 {
+ // Make sure it's safe to use SCRATCH_REG
+ assert!(old_moves.iter().all(|&(_, opnd)| opnd != Opnd::Reg(Assembler::SCRATCH_REG)));
+
+ // Move SCRATCH <- opnd, and delay reg <- SCRATCH
+ let (reg, opnd) = old_moves.remove(0);
+ new_moves.push((Assembler::SCRATCH_REG, opnd));
+ old_moves.push((reg, Opnd::Reg(Assembler::SCRATCH_REG)));
+ }
+ }
+ new_moves
+ }
+
+ /// Sets the out field on the various instructions that require allocated
+ /// registers because their output is used as the operand on a subsequent
+ /// instruction. This is our implementation of the linear scan algorithm.
+ pub(super) fn alloc_regs(mut self, regs: Vec<Reg>) -> Assembler
+ {
+ //dbg!(&self);
+
+ // First, create the pool of registers.
+ let mut pool: u32 = 0;
+
+ // Mutate the pool bitmap to indicate that the register at that index
+ // has been allocated and is live.
+ fn alloc_reg(pool: &mut u32, regs: &Vec<Reg>) -> Option<Reg> {
+ for (index, reg) in regs.iter().enumerate() {
+ if (*pool & (1 << index)) == 0 {
+ *pool |= 1 << index;
+ return Some(*reg);
+ }
+ }
+ None
+ }
+
+ // Allocate a specific register
+ fn take_reg(pool: &mut u32, regs: &Vec<Reg>, reg: &Reg) -> Reg {
+ let reg_index = regs.iter().position(|elem| elem.reg_no == reg.reg_no);
+
+ if let Some(reg_index) = reg_index {
+ assert_eq!(*pool & (1 << reg_index), 0, "register already allocated");
+ *pool |= 1 << reg_index;
+ }
+
+ return *reg;
+ }
+
+ // Mutate the pool bitmap to indicate that the given register is being
+ // returned as it is no longer used by the instruction that previously
+ // held it.
+ fn dealloc_reg(pool: &mut u32, regs: &Vec<Reg>, reg: &Reg) {
+ let reg_index = regs.iter().position(|elem| elem.reg_no == reg.reg_no);
+
+ if let Some(reg_index) = reg_index {
+ *pool &= !(1 << reg_index);
+ }
+ }
+
+ // Adjust the number of entries in live_ranges so that it can be indexed by mapped indexes.
+ fn shift_live_ranges(live_ranges: &mut Vec<usize>, start_index: usize, shift_offset: isize) {
+ if shift_offset >= 0 {
+ for index in 0..(shift_offset as usize) {
+ live_ranges.insert(start_index + index, start_index + index);
+ }
+ } else {
+ for _ in 0..-shift_offset {
+ live_ranges.remove(start_index);
+ }
+ }
+ }
+
+ // Dump live registers for register spill debugging.
+ fn dump_live_regs(insns: Vec<Insn>, live_ranges: Vec<usize>, num_regs: usize, spill_index: usize) {
+ // Convert live_ranges to live_regs: the number of live registers at each index
+ let mut live_regs: Vec<usize> = vec![];
+ let mut end_idxs: Vec<usize> = vec![];
+ for (cur_idx, &end_idx) in live_ranges.iter().enumerate() {
+ end_idxs.push(end_idx);
+ while let Some(end_idx) = end_idxs.iter().position(|&end_idx| cur_idx == end_idx) {
+ end_idxs.remove(end_idx);
+ }
+ live_regs.push(end_idxs.len());
+ }
+
+ // Dump insns along with live registers
+ for (insn_idx, insn) in insns.iter().enumerate() {
+ eprint!("{:3} ", if spill_index == insn_idx { "==>" } else { "" });
+ for reg in 0..=num_regs {
+ eprint!("{:1}", if reg < live_regs[insn_idx] { "|" } else { "" });
+ }
+ eprintln!(" [{:3}] {:?}", insn_idx, insn);
+ }
+ }
+
+ // We may need to reorder LoadInto instructions with a C argument operand.
+ // This buffers the operands of such instructions to process them in batches.
+ let mut c_args: Vec<(Reg, Opnd)> = vec![];
+
+ // live_ranges is indexed by original `index` given by the iterator.
+ let live_ranges: Vec<usize> = take(&mut self.live_ranges);
+ // shifted_live_ranges is indexed by mapped indexes in insn operands.
+ let mut shifted_live_ranges: Vec<usize> = live_ranges.clone();
+ let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits), self.num_locals);
+ let mut iterator = self.into_draining_iter();
+
+ while let Some((index, mut insn)) = iterator.next_mapped() {
+ // Check if this is the last instruction that uses an operand that
+ // spans more than one instruction. In that case, return the
+ // allocated register to the pool.
+ for opnd in insn.opnd_iter() {
+ match opnd {
+ Opnd::InsnOut { idx, .. } |
+ Opnd::Mem(Mem { base: MemBase::InsnOut(idx), .. }) => {
+ // Since we have an InsnOut, we know it spans more that one
+ // instruction.
+ let start_index = *idx;
+
+ // We're going to check if this is the last instruction that
+ // uses this operand. If it is, we can return the allocated
+ // register to the pool.
+ if shifted_live_ranges[start_index] == index {
+ if let Some(Opnd::Reg(reg)) = asm.insns[start_index].out_opnd() {
+ dealloc_reg(&mut pool, &regs, reg);
+ } else {
+ unreachable!("no register allocated for insn {:?}", insn);
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // C return values need to be mapped to the C return register
+ if matches!(insn, Insn::CCall { .. }) {
+ assert_eq!(pool, 0, "register lives past C function call");
+ }
+
+ // If this instruction is used by another instruction,
+ // we need to allocate a register to it
+ if live_ranges[index] != index {
+ // If we get to this point where the end of the live range is
+ // not equal to the index of the instruction, then it must be
+ // true that we set an output operand for this instruction. If
+ // it's not true, something has gone wrong.
+ assert!(
+ !matches!(insn.out_opnd(), None),
+ "Instruction output reused but no output operand set"
+ );
+
+ // This is going to be the output operand that we will set on
+ // the instruction.
+ let mut out_reg: Option<Reg> = None;
+
+ // C return values need to be mapped to the C return register
+ if matches!(insn, Insn::CCall { .. }) {
+ out_reg = Some(take_reg(&mut pool, &regs, &C_RET_REG));
+ }
+
+ // If this instruction's first operand maps to a register and
+ // this is the last use of the register, reuse the register
+ // We do this to improve register allocation on x86
+ // e.g. out = add(reg0, reg1)
+ // reg0 = add(reg0, reg1)
+ if out_reg.is_none() {
+ let mut opnd_iter = insn.opnd_iter();
+
+ if let Some(Opnd::InsnOut{ idx, .. }) = opnd_iter.next() {
+ if shifted_live_ranges[*idx] == index {
+ if let Some(Opnd::Reg(reg)) = asm.insns[*idx].out_opnd() {
+ out_reg = Some(take_reg(&mut pool, &regs, reg));
+ }
+ }
+ }
+ }
+
+ // Allocate a new register for this instruction if one is not
+ // already allocated.
+ if out_reg.is_none() {
+ out_reg = match &insn {
+ Insn::LiveReg { opnd, .. } => {
+ // Allocate a specific register
+ let reg = opnd.unwrap_reg();
+ Some(take_reg(&mut pool, &regs, &reg))
+ },
+ _ => match alloc_reg(&mut pool, &regs) {
+ Some(reg) => Some(reg),
+ None => {
+ let mut insns = asm.insns;
+ insns.push(insn);
+ for insn in iterator.insns {
+ insns.push(insn);
+ }
+ dump_live_regs(insns, live_ranges, regs.len(), index);
+ unreachable!("Register spill not supported");
+ }
+ }
+ };
+ }
+
+ // Set the output operand on the instruction
+ let out_num_bits = Opnd::match_num_bits_iter(insn.opnd_iter());
+
+ // If we have gotten to this point, then we're sure we have an
+ // output operand on this instruction because the live range
+ // extends beyond the index of the instruction.
+ let out = insn.out_opnd_mut().unwrap();
+ *out = Opnd::Reg(out_reg.unwrap().with_num_bits(out_num_bits));
+ }
+
+ // Replace InsnOut operands by their corresponding register
+ let mut opnd_iter = insn.opnd_iter_mut();
+ while let Some(opnd) = opnd_iter.next() {
+ match *opnd {
+ Opnd::InsnOut { idx, num_bits } => {
+ *opnd = (*asm.insns[idx].out_opnd().unwrap()).with_num_bits(num_bits).unwrap();
+ },
+ Opnd::Mem(Mem { base: MemBase::InsnOut(idx), disp, num_bits }) => {
+ let base = MemBase::Reg(asm.insns[idx].out_opnd().unwrap().unwrap_reg().reg_no);
+ *opnd = Opnd::Mem(Mem { base, disp, num_bits });
+ }
+ _ => {},
+ }
+ }
+
+ // Push instruction(s). Batch and reorder C argument operations if needed.
+ if let Insn::LoadInto { dest: Opnd::CArg(reg), opnd } = insn {
+ // Buffer C arguments
+ c_args.push((reg, opnd));
+ } else {
+ // C arguments are buffered until CCall
+ if c_args.len() > 0 {
+ // Resolve C argument dependencies
+ let c_args_len = c_args.len() as isize;
+ let moves = Self::reorder_reg_moves(&std::mem::take(&mut c_args));
+ shift_live_ranges(&mut shifted_live_ranges, asm.insns.len(), moves.len() as isize - c_args_len);
+
+ // Push batched C arguments
+ for (reg, opnd) in moves {
+ asm.load_into(Opnd::Reg(reg), opnd);
+ }
+ }
+ // Other instructions are pushed as is
+ asm.push_insn(insn);
+ }
+ iterator.map_insn_index(&mut asm);
+ }
+
+ assert_eq!(pool, 0, "Expected all registers to be returned to the pool");
+ asm
+ }
+
+ /// Compile the instructions down to machine code.
+ /// Can fail due to lack of code memory and inopportune code placement, among other reasons.
+ #[must_use]
+ pub fn compile(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>) -> Option<(CodePtr, Vec<u32>)>
+ {
+ let start_addr = cb.get_write_ptr();
+ let alloc_regs = Self::get_alloc_regs();
+ let ret = self.compile_with_regs(cb, ocb, alloc_regs);
+
+ if let Some(dump_disasm) = get_option_ref!(dump_disasm) {
+ use crate::disasm::dump_disasm_addr_range;
+ let end_addr = cb.get_write_ptr();
+ dump_disasm_addr_range(cb, start_addr, end_addr, dump_disasm)
+ }
+ ret
+ }
+
+ /// Compile with a limited number of registers. Used only for unit tests.
+ #[cfg(test)]
+ pub fn compile_with_num_regs(self, cb: &mut CodeBlock, num_regs: usize) -> (CodePtr, Vec<u32>)
+ {
+ let mut alloc_regs = Self::get_alloc_regs();
+ let alloc_regs = alloc_regs.drain(0..num_regs).collect();
+ self.compile_with_regs(cb, None, alloc_regs).unwrap()
+ }
+
+ /// Consume the assembler by creating a new draining iterator.
+ pub fn into_draining_iter(self) -> AssemblerDrainingIterator {
+ AssemblerDrainingIterator::new(self)
+ }
+
+ /// Return true if the next ccall() is expected to be leaf.
+ pub fn get_leaf_ccall(&mut self) -> bool {
+ self.leaf_ccall
+ }
+
+ /// Assert that the next ccall() is going to be leaf.
+ pub fn expect_leaf_ccall(&mut self) {
+ self.leaf_ccall = true;
+ }
+}
+
+/// A struct that allows iterating through an assembler's instructions and
+/// consuming them as it iterates.
+pub struct AssemblerDrainingIterator {
+ insns: std::iter::Peekable<std::vec::IntoIter<Insn>>,
+ index: usize,
+ indices: Vec<usize>
+}
+
+impl AssemblerDrainingIterator {
+ fn new(asm: Assembler) -> Self {
+ Self {
+ insns: asm.insns.into_iter().peekable(),
+ index: 0,
+ indices: Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY),
+ }
+ }
+
+ /// When you're working with two lists of instructions, you need to make
+ /// sure you do some bookkeeping to align the indices contained within the
+ /// operands of the two lists.
+ ///
+ /// This function accepts the assembler that is being built and tracks the
+ /// end of the current list of instructions in order to maintain that
+ /// alignment.
+ pub fn map_insn_index(&mut self, asm: &mut Assembler) {
+ self.indices.push(asm.insns.len().saturating_sub(1));
+ }
+
+ /// Map an operand by using this iterator's list of mapped indices.
+ #[cfg(target_arch = "x86_64")]
+ pub fn map_opnd(&self, opnd: Opnd) -> Opnd {
+ opnd.map_index(&self.indices)
+ }
+
+ /// Returns the next instruction in the list with the indices corresponding
+ /// to the next list of instructions.
+ pub fn next_mapped(&mut self) -> Option<(usize, Insn)> {
+ self.next_unmapped().map(|(index, mut insn)| {
+ let mut opnd_iter = insn.opnd_iter_mut();
+ while let Some(opnd) = opnd_iter.next() {
+ *opnd = opnd.map_index(&self.indices);
+ }
+
+ (index, insn)
+ })
+ }
+
+ /// Returns the next instruction in the list with the indices corresponding
+ /// to the previous list of instructions.
+ pub fn next_unmapped(&mut self) -> Option<(usize, Insn)> {
+ let index = self.index;
+ self.index += 1;
+ self.insns.next().map(|insn| (index, insn))
+ }
+
+ /// Returns the next instruction without incrementing the iterator's index.
+ pub fn peek(&mut self) -> Option<&Insn> {
+ self.insns.peek()
+ }
+}
+
+impl fmt::Debug for Assembler {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ writeln!(fmt, "Assembler")?;
+
+ for (idx, insn) in self.insns.iter().enumerate() {
+ writeln!(fmt, " {idx:03} {insn:?}")?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Assembler {
+ #[must_use]
+ pub fn add(&mut self, left: Opnd, right: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[left, right]));
+ self.push_insn(Insn::Add { left, right, out });
+ out
+ }
+
+ #[must_use]
+ pub fn and(&mut self, left: Opnd, right: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[left, right]));
+ self.push_insn(Insn::And { left, right, out });
+ out
+ }
+
+ pub fn bake_string(&mut self, text: &str) {
+ self.push_insn(Insn::BakeString(text.to_string()));
+ }
+
+ #[allow(dead_code)]
+ pub fn breakpoint(&mut self) {
+ self.push_insn(Insn::Breakpoint);
+ }
+
+ pub fn ccall(&mut self, fptr: *const u8, opnds: Vec<Opnd>) -> Opnd {
+ // Let vm_check_canary() assert this ccall's leafness if leaf_ccall is set
+ let canary_opnd = self.set_stack_canary(&opnds);
+
+ let old_temps = self.ctx.get_reg_mapping(); // with registers
+ // Spill stack temp registers since they are caller-saved registers.
+ // Note that this doesn't spill stack temps that are already popped
+ // but may still be used in the C arguments.
+ self.spill_regs();
+ let new_temps = self.ctx.get_reg_mapping(); // all spilled
+
+ // Temporarily manipulate RegMappings so that we can use registers
+ // to pass stack operands that are already spilled above.
+ self.ctx.set_reg_mapping(old_temps);
+
+ // Call a C function
+ let out = self.next_opnd_out(Opnd::match_num_bits(&opnds));
+ self.push_insn(Insn::CCall { fptr, opnds, out });
+
+ // Registers in old_temps may be clobbered by the above C call,
+ // so rollback the manipulated RegMappings to a spilled version.
+ self.ctx.set_reg_mapping(new_temps);
+
+ // Clear the canary after use
+ if let Some(canary_opnd) = canary_opnd {
+ self.mov(canary_opnd, 0.into());
+ }
+
+ out
+ }
+
+ /// Let vm_check_canary() assert the leafness of this ccall if leaf_ccall is set
+ fn set_stack_canary(&mut self, opnds: &Vec<Opnd>) -> Option<Opnd> {
+ // Use the slot right above the stack top for verifying leafness.
+ let canary_opnd = self.stack_opnd(-1);
+
+ // If the slot is already used, which is a valid optimization to avoid spills,
+ // give up the verification.
+ let canary_opnd = if cfg!(feature = "runtime_checks") && self.leaf_ccall && opnds.iter().all(|opnd|
+ opnd.get_reg_opnd() != canary_opnd.get_reg_opnd()
+ ) {
+ asm_comment!(self, "set stack canary");
+ self.mov(canary_opnd, vm_stack_canary().into());
+ Some(canary_opnd)
+ } else {
+ None
+ };
+
+ // Avoid carrying the flag to the next instruction whether we verified it or not.
+ self.leaf_ccall = false;
+
+ canary_opnd
+ }
+
+ pub fn cmp(&mut self, left: Opnd, right: Opnd) {
+ self.push_insn(Insn::Cmp { left, right });
+ }
+
+ #[must_use]
+ pub fn cpop(&mut self) -> Opnd {
+ let out = self.next_opnd_out(Opnd::DEFAULT_NUM_BITS);
+ self.push_insn(Insn::CPop { out });
+ out
+ }
+
+ pub fn cpop_all(&mut self, reg_mapping: RegMapping) {
+ self.push_insn(Insn::CPopAll);
+
+ // Re-enable ccall's RegMappings assertion disabled by cpush_all.
+ // cpush_all + cpop_all preserve all stack temp registers, so it's safe.
+ self.set_reg_mapping(reg_mapping);
+ }
+
+ pub fn cpop_into(&mut self, opnd: Opnd) {
+ self.push_insn(Insn::CPopInto(opnd));
+ }
+
+ pub fn cpush(&mut self, opnd: Opnd) {
+ self.push_insn(Insn::CPush(opnd));
+ }
+
+ pub fn cpush_all(&mut self) -> RegMapping {
+ self.push_insn(Insn::CPushAll);
+
+ // Mark all temps as not being in registers.
+ // Temps will be marked back as being in registers by cpop_all.
+ // We assume that cpush_all + cpop_all are used for C functions in utils.rs
+ // that don't require spill_regs for GC.
+ let mapping = self.ctx.get_reg_mapping();
+ self.set_reg_mapping(RegMapping::default());
+ mapping
+ }
+
+ pub fn cret(&mut self, opnd: Opnd) {
+ self.push_insn(Insn::CRet(opnd));
+ }
+
+ #[must_use]
+ pub fn csel_e(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelE { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_g(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelG { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_ge(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelGE { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_l(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelL { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_le(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelLE { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_ne(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelNE { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_nz(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelNZ { truthy, falsy, out });
+ out
+ }
+
+ #[must_use]
+ pub fn csel_z(&mut self, truthy: Opnd, falsy: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[truthy, falsy]));
+ self.push_insn(Insn::CSelZ { truthy, falsy, out });
+ out
+ }
+
+ pub fn frame_setup(&mut self) {
+ self.push_insn(Insn::FrameSetup);
+ }
+
+ pub fn frame_teardown(&mut self) {
+ self.push_insn(Insn::FrameTeardown);
+ }
+
+ pub fn incr_counter(&mut self, mem: Opnd, value: Opnd) {
+ self.push_insn(Insn::IncrCounter { mem, value });
+ }
+
+ pub fn jbe(&mut self, target: Target) {
+ self.push_insn(Insn::Jbe(target));
+ }
+
+ pub fn jb(&mut self, target: Target) {
+ self.push_insn(Insn::Jb(target));
+ }
+
+ pub fn je(&mut self, target: Target) {
+ self.push_insn(Insn::Je(target));
+ }
+
+ pub fn jl(&mut self, target: Target) {
+ self.push_insn(Insn::Jl(target));
+ }
+
+ #[allow(dead_code)]
+ pub fn jg(&mut self, target: Target) {
+ self.push_insn(Insn::Jg(target));
+ }
+
+ #[allow(dead_code)]
+ pub fn jge(&mut self, target: Target) {
+ self.push_insn(Insn::Jge(target));
+ }
+
+ pub fn jmp(&mut self, target: Target) {
+ self.push_insn(Insn::Jmp(target));
+ }
+
+ pub fn jmp_opnd(&mut self, opnd: Opnd) {
+ self.push_insn(Insn::JmpOpnd(opnd));
+ }
+
+ pub fn jne(&mut self, target: Target) {
+ self.push_insn(Insn::Jne(target));
+ }
+
+ pub fn jnz(&mut self, target: Target) {
+ self.push_insn(Insn::Jnz(target));
+ }
+
+ pub fn jo(&mut self, target: Target) {
+ self.push_insn(Insn::Jo(target));
+ }
+
+ pub fn jo_mul(&mut self, target: Target) {
+ self.push_insn(Insn::JoMul(target));
+ }
+
+ pub fn jz(&mut self, target: Target) {
+ self.push_insn(Insn::Jz(target));
+ }
+
+ #[must_use]
+ pub fn lea(&mut self, opnd: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd]));
+ self.push_insn(Insn::Lea { opnd, out });
+ out
+ }
+
+ #[must_use]
+ pub fn lea_jump_target(&mut self, target: Target) -> Opnd {
+ let out = self.next_opnd_out(Opnd::DEFAULT_NUM_BITS);
+ self.push_insn(Insn::LeaJumpTarget { target, out });
+ out
+ }
+
+ #[must_use]
+ pub fn live_reg_opnd(&mut self, opnd: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd]));
+ self.push_insn(Insn::LiveReg { opnd, out });
+ out
+ }
+
+ #[must_use]
+ pub fn load(&mut self, opnd: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd]));
+ self.push_insn(Insn::Load { opnd, out });
+ out
+ }
+
+ pub fn load_into(&mut self, dest: Opnd, opnd: Opnd) {
+ match (dest, opnd) {
+ (Opnd::Reg(dest), Opnd::Reg(opnd)) if dest == opnd => {}, // skip if noop
+ _ => self.push_insn(Insn::LoadInto { dest, opnd }),
+ }
+ }
+
+ #[must_use]
+ pub fn load_sext(&mut self, opnd: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd]));
+ self.push_insn(Insn::LoadSExt { opnd, out });
+ out
+ }
+
+ #[must_use]
+ pub fn lshift(&mut self, opnd: Opnd, shift: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd, shift]));
+ self.push_insn(Insn::LShift { opnd, shift, out });
+ out
+ }
+
+ pub fn mov(&mut self, dest: Opnd, src: Opnd) {
+ self.push_insn(Insn::Mov { dest, src });
+ }
+
+ #[must_use]
+ pub fn not(&mut self, opnd: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd]));
+ self.push_insn(Insn::Not { opnd, out });
+ out
+ }
+
+ #[must_use]
+ pub fn or(&mut self, left: Opnd, right: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[left, right]));
+ self.push_insn(Insn::Or { left, right, out });
+ out
+ }
+
+ pub fn pad_inval_patch(&mut self) {
+ self.push_insn(Insn::PadInvalPatch);
+ }
+
+ //pub fn pos_marker<F: FnMut(CodePtr)>(&mut self, marker_fn: F)
+ pub fn pos_marker(&mut self, marker_fn: impl Fn(CodePtr, &CodeBlock) + 'static) {
+ self.push_insn(Insn::PosMarker(Box::new(marker_fn)));
+ }
+
+ #[must_use]
+ pub fn rshift(&mut self, opnd: Opnd, shift: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd, shift]));
+ self.push_insn(Insn::RShift { opnd, shift, out });
+ out
+ }
+
+ pub fn store(&mut self, dest: Opnd, src: Opnd) {
+ self.push_insn(Insn::Store { dest, src });
+ }
+
+ #[must_use]
+ pub fn sub(&mut self, left: Opnd, right: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[left, right]));
+ self.push_insn(Insn::Sub { left, right, out });
+ out
+ }
+
+ #[must_use]
+ pub fn mul(&mut self, left: Opnd, right: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[left, right]));
+ self.push_insn(Insn::Mul { left, right, out });
+ out
+ }
+
+ pub fn test(&mut self, left: Opnd, right: Opnd) {
+ self.push_insn(Insn::Test { left, right });
+ }
+
+ #[must_use]
+ #[allow(dead_code)]
+ pub fn urshift(&mut self, opnd: Opnd, shift: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[opnd, shift]));
+ self.push_insn(Insn::URShift { opnd, shift, out });
+ out
+ }
+
+ /// Verify the leafness of the given block
+ pub fn with_leaf_ccall<F, R>(&mut self, mut block: F) -> R
+ where F: FnMut(&mut Self) -> R {
+ let old_leaf_ccall = self.leaf_ccall;
+ self.leaf_ccall = true;
+ let ret = block(self);
+ self.leaf_ccall = old_leaf_ccall;
+ ret
+ }
+
+ /// Add a label at the current position
+ pub fn write_label(&mut self, target: Target) {
+ assert!(target.unwrap_label_idx() < self.label_names.len());
+ self.push_insn(Insn::Label(target));
+ }
+
+ #[must_use]
+ pub fn xor(&mut self, left: Opnd, right: Opnd) -> Opnd {
+ let out = self.next_opnd_out(Opnd::match_num_bits(&[left, right]));
+ self.push_insn(Insn::Xor { left, right, out });
+ out
+ }
+}
+
+/// Macro to use format! for Insn::Comment, which skips a format! call
+/// when not dumping disassembly.
+macro_rules! asm_comment {
+ ($asm:expr, $($fmt:tt)*) => {
+ if $crate::options::get_option_ref!(dump_disasm).is_some() {
+ $asm.push_insn(Insn::Comment(format!($($fmt)*)));
+ }
+ };
+}
+pub(crate) use asm_comment;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_opnd_iter() {
+ let insn = Insn::Add { left: Opnd::None, right: Opnd::None, out: Opnd::None };
+
+ let mut opnd_iter = insn.opnd_iter();
+ assert!(matches!(opnd_iter.next(), Some(Opnd::None)));
+ assert!(matches!(opnd_iter.next(), Some(Opnd::None)));
+
+ assert!(matches!(opnd_iter.next(), None));
+ }
+
+ #[test]
+ fn test_opnd_iter_mut() {
+ let mut insn = Insn::Add { left: Opnd::None, right: Opnd::None, out: Opnd::None };
+
+ let mut opnd_iter = insn.opnd_iter_mut();
+ assert!(matches!(opnd_iter.next(), Some(Opnd::None)));
+ assert!(matches!(opnd_iter.next(), Some(Opnd::None)));
+
+ assert!(matches!(opnd_iter.next(), None));
+ }
+}
diff --git a/yjit/src/backend/mod.rs b/yjit/src/backend/mod.rs
new file mode 100644
index 0000000000..6921244c72
--- /dev/null
+++ b/yjit/src/backend/mod.rs
@@ -0,0 +1,14 @@
+#[cfg(target_arch = "x86_64")]
+pub mod x86_64;
+
+#[cfg(target_arch = "aarch64")]
+pub mod arm64;
+
+#[cfg(target_arch = "x86_64")]
+pub use x86_64 as current;
+
+#[cfg(target_arch = "aarch64")]
+pub use arm64 as current;
+
+pub mod ir;
+mod tests;
diff --git a/yjit/src/backend/tests.rs b/yjit/src/backend/tests.rs
new file mode 100644
index 0000000000..bfeea5163a
--- /dev/null
+++ b/yjit/src/backend/tests.rs
@@ -0,0 +1,329 @@
+#![cfg(test)]
+use crate::asm::CodeBlock;
+use crate::backend::ir::*;
+use crate::cruby::*;
+use crate::utils::c_callable;
+
+#[test]
+fn test_add() {
+ let mut asm = Assembler::new(0);
+ let out = asm.add(SP, Opnd::UImm(1));
+ let _ = asm.add(out, Opnd::UImm(2));
+}
+
+#[test]
+fn test_alloc_regs() {
+ let mut asm = Assembler::new(0);
+
+ // Get the first output that we're going to reuse later.
+ let out1 = asm.add(EC, Opnd::UImm(1));
+
+ // Pad some instructions in to make sure it can handle that.
+ let _ = asm.add(EC, Opnd::UImm(2));
+
+ // Get the second output we're going to reuse.
+ let out2 = asm.add(EC, Opnd::UImm(3));
+
+ // Pad another instruction.
+ let _ = asm.add(EC, Opnd::UImm(4));
+
+ // Reuse both the previously captured outputs.
+ let _ = asm.add(out1, out2);
+
+ // Now get a third output to make sure that the pool has registers to
+ // allocate now that the previous ones have been returned.
+ let out3 = asm.add(EC, Opnd::UImm(5));
+ let _ = asm.add(out3, Opnd::UImm(6));
+
+ // Here we're going to allocate the registers.
+ let result = asm.alloc_regs(Assembler::get_alloc_regs());
+
+ // Now we're going to verify that the out field has been appropriately
+ // updated for each of the instructions that needs it.
+ let regs = Assembler::get_alloc_regs();
+ let reg0 = regs[0];
+ let reg1 = regs[1];
+
+ match result.insns[0].out_opnd() {
+ Some(Opnd::Reg(value)) => assert_eq!(value, &reg0),
+ val => panic!("Unexpected register value {:?}", val),
+ }
+
+ match result.insns[2].out_opnd() {
+ Some(Opnd::Reg(value)) => assert_eq!(value, &reg1),
+ val => panic!("Unexpected register value {:?}", val),
+ }
+
+ match result.insns[5].out_opnd() {
+ Some(Opnd::Reg(value)) => assert_eq!(value, &reg0),
+ val => panic!("Unexpected register value {:?}", val),
+ }
+}
+
+fn setup_asm() -> (Assembler, CodeBlock) {
+ return (
+ Assembler::new(0),
+ CodeBlock::new_dummy(1024)
+ );
+}
+
+// Test full codegen pipeline
+#[test]
+fn test_compile()
+{
+ let (mut asm, mut cb) = setup_asm();
+ let regs = Assembler::get_alloc_regs();
+
+ let out = asm.add(Opnd::Reg(regs[0]), Opnd::UImm(2));
+ let out2 = asm.add(out, Opnd::UImm(2));
+ asm.store(Opnd::mem(64, SP, 0), out2);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+// Test memory-to-memory move
+#[test]
+fn test_mov_mem2mem()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ asm_comment!(asm, "check that comments work too");
+ asm.mov(Opnd::mem(64, SP, 0), Opnd::mem(64, SP, 8));
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+// Test load of register into new register
+#[test]
+fn test_load_reg()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let out = asm.load(SP);
+ asm.mov(Opnd::mem(64, SP, 0), out);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+// Test load of a GC'd value
+#[test]
+fn test_load_value()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let gcd_value = VALUE(0xFFFFFFFFFFFF00);
+ assert!(!gcd_value.special_const_p());
+
+ let out = asm.load(Opnd::Value(gcd_value));
+ asm.mov(Opnd::mem(64, SP, 0), out);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+// Multiple registers needed and register reuse
+#[test]
+fn test_reuse_reg()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let v0 = asm.add(Opnd::mem(64, SP, 0), Opnd::UImm(1));
+ let v1 = asm.add(Opnd::mem(64, SP, 8), Opnd::UImm(1));
+
+ let v2 = asm.add(v1, Opnd::UImm(1)); // Reuse v1 register
+ let v3 = asm.add(v0, v2);
+
+ asm.store(Opnd::mem(64, SP, 0), v2);
+ asm.store(Opnd::mem(64, SP, 8), v3);
+
+ asm.compile_with_num_regs(&mut cb, 2);
+}
+
+// 64-bit values can't be written directly to memory,
+// need to be split into one or more register movs first
+#[test]
+fn test_store_u64()
+{
+ let (mut asm, mut cb) = setup_asm();
+ asm.store(Opnd::mem(64, SP, 0), u64::MAX.into());
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+// Use instruction output as base register for memory operand
+#[test]
+fn test_base_insn_out()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ // Forced register to be reused
+ // This also causes the insn sequence to change length
+ asm.mov(
+ Opnd::mem(64, SP, 8),
+ Opnd::mem(64, SP, 0)
+ );
+
+ // Load the pointer into a register
+ let ptr_reg = asm.load(Opnd::const_ptr(4351776248 as *const u8));
+ let counter_opnd = Opnd::mem(64, ptr_reg, 0);
+
+ // Increment and store the updated value
+ asm.incr_counter(counter_opnd, 1.into());
+
+ asm.compile_with_num_regs(&mut cb, 2);
+}
+
+#[test]
+fn test_c_call()
+{
+ c_callable! {
+ fn dummy_c_fun(_v0: usize, _v1: usize) {}
+ }
+
+ let (mut asm, mut cb) = setup_asm();
+
+ let ret_val = asm.ccall(
+ dummy_c_fun as *const u8,
+ vec![Opnd::mem(64, SP, 0), Opnd::UImm(1)]
+ );
+
+ // Make sure that the call's return value is usable
+ asm.mov(Opnd::mem(64, SP, 0), ret_val);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+#[test]
+fn test_alloc_ccall_regs() {
+ let mut asm = Assembler::new(0);
+ let out1 = asm.ccall(0 as *const u8, vec![]);
+ let out2 = asm.ccall(0 as *const u8, vec![out1]);
+ asm.mov(EC, out2);
+ let mut cb = CodeBlock::new_dummy(1024);
+ asm.compile_with_regs(&mut cb, None, Assembler::get_alloc_regs());
+}
+
+#[test]
+fn test_lea_ret()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let addr = asm.lea(Opnd::mem(64, SP, 0));
+ asm.cret(addr);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+#[test]
+fn test_jcc_label()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let label = asm.new_label("foo");
+ asm.cmp(EC, EC);
+ asm.je(label);
+ asm.write_label(label);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+#[test]
+fn test_jcc_ptr()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let side_exit = Target::CodePtr(cb.get_write_ptr().add_bytes(4));
+ let not_mask = asm.not(Opnd::mem(32, EC, RUBY_OFFSET_EC_INTERRUPT_MASK as i32));
+ asm.test(
+ Opnd::mem(32, EC, RUBY_OFFSET_EC_INTERRUPT_FLAG as i32),
+ not_mask,
+ );
+ asm.jnz(side_exit);
+
+ asm.compile_with_num_regs(&mut cb, 2);
+}
+
+/// Direct jump to a stub e.g. for deferred compilation
+#[test]
+fn test_jmp_ptr()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let stub = Target::CodePtr(cb.get_write_ptr().add_bytes(4));
+ asm.jmp(stub);
+
+ asm.compile_with_num_regs(&mut cb, 0);
+}
+
+#[test]
+fn test_jo()
+{
+ let (mut asm, mut cb) = setup_asm();
+
+ let side_exit = Target::CodePtr(cb.get_write_ptr().add_bytes(4));
+
+ let arg1 = Opnd::mem(64, SP, 0);
+ let arg0 = Opnd::mem(64, SP, 8);
+
+ let arg0_untag = asm.sub(arg0, Opnd::Imm(1));
+ let out_val = asm.add(arg0_untag, arg1);
+ asm.jo(side_exit);
+
+ asm.mov(Opnd::mem(64, SP, 0), out_val);
+
+ asm.compile_with_num_regs(&mut cb, 2);
+}
+
+#[test]
+fn test_bake_string() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.bake_string("Hello, world!");
+ asm.compile_with_num_regs(&mut cb, 0);
+}
+
+#[test]
+fn test_draining_iterator() {
+ let mut asm = Assembler::new(0);
+
+ let _ = asm.load(Opnd::None);
+ asm.store(Opnd::None, Opnd::None);
+ let _ = asm.add(Opnd::None, Opnd::None);
+
+ let mut iter = asm.into_draining_iter();
+
+ while let Some((index, insn)) = iter.next_unmapped() {
+ match index {
+ 0 => assert!(matches!(insn, Insn::Load { .. })),
+ 1 => assert!(matches!(insn, Insn::Store { .. })),
+ 2 => assert!(matches!(insn, Insn::Add { .. })),
+ _ => panic!("Unexpected instruction index"),
+ };
+ }
+}
+
+#[test]
+fn test_cmp_8_bit() {
+ let (mut asm, mut cb) = setup_asm();
+ let reg = Assembler::get_alloc_regs()[0];
+ asm.cmp(Opnd::Reg(reg).with_num_bits(8).unwrap(), Opnd::UImm(RUBY_SYMBOL_FLAG as u64));
+
+ asm.compile_with_num_regs(&mut cb, 1);
+}
+
+#[test]
+fn test_no_pos_marker_callback_when_compile_fails() {
+ // When compilation fails (e.g. when out of memory), the code written out is malformed.
+ // We don't want to invoke the pos_marker callbacks with positions of malformed code.
+ let mut asm = Assembler::new(0);
+
+ // Markers around code to exhaust memory limit
+ let fail_if_called = |_code_ptr, _cb: &_| panic!("pos_marker callback should not be called");
+ asm.pos_marker(fail_if_called);
+ let zero = asm.load(0.into());
+ let sum = asm.add(zero, 500.into());
+ asm.store(Opnd::mem(64, SP, 8), sum);
+ asm.pos_marker(fail_if_called);
+
+ let cb = &mut CodeBlock::new_dummy(8);
+ assert!(asm.compile(cb, None).is_none(), "should fail due to tiny size limit");
+}
diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs
new file mode 100644
index 0000000000..ef435bca7e
--- /dev/null
+++ b/yjit/src/backend/x86_64/mod.rs
@@ -0,0 +1,1340 @@
+use std::mem::take;
+
+use crate::asm::*;
+use crate::asm::x86_64::*;
+use crate::codegen::CodePtr;
+use crate::cruby::*;
+use crate::backend::ir::*;
+use crate::options::*;
+use crate::utils::*;
+
+// Use the x86 register type for this platform
+pub type Reg = X86Reg;
+
+// Callee-saved registers
+pub const _CFP: Opnd = Opnd::Reg(R13_REG);
+pub const _EC: Opnd = Opnd::Reg(R12_REG);
+pub const _SP: Opnd = Opnd::Reg(RBX_REG);
+
+// C argument registers on this platform
+pub const _C_ARG_OPNDS: [Opnd; 6] = [
+ Opnd::Reg(RDI_REG),
+ Opnd::Reg(RSI_REG),
+ Opnd::Reg(RDX_REG),
+ Opnd::Reg(RCX_REG),
+ Opnd::Reg(R8_REG),
+ Opnd::Reg(R9_REG)
+];
+
+// C return value register on this platform
+pub const C_RET_REG: Reg = RAX_REG;
+pub const _C_RET_OPND: Opnd = Opnd::Reg(RAX_REG);
+
+impl CodeBlock {
+ // The number of bytes that are generated by jmp_ptr
+ pub fn jmp_ptr_bytes(&self) -> usize { 5 }
+}
+
+/// Map Opnd to X86Opnd
+impl From<Opnd> for X86Opnd {
+ fn from(opnd: Opnd) -> Self {
+ match opnd {
+ // NOTE: these operand types need to be lowered first
+ //Value(VALUE), // Immediate Ruby value, may be GC'd, movable
+ //InsnOut(usize), // Output of a preceding instruction in this block
+
+ Opnd::InsnOut{..} => panic!("InsnOut operand made it past register allocation"),
+
+ Opnd::UImm(val) => uimm_opnd(val),
+ Opnd::Imm(val) => imm_opnd(val),
+ Opnd::Value(VALUE(uimm)) => uimm_opnd(uimm as u64),
+
+ // General-purpose register
+ Opnd::Reg(reg) => X86Opnd::Reg(reg),
+
+ // Memory operand with displacement
+ Opnd::Mem(Mem{ base: MemBase::Reg(reg_no), num_bits, disp }) => {
+ let reg = X86Reg {
+ reg_no,
+ num_bits: 64,
+ reg_type: RegType::GP
+ };
+
+ mem_opnd(num_bits, X86Opnd::Reg(reg), disp)
+ }
+
+ Opnd::None => panic!(
+ "Attempted to lower an Opnd::None. This often happens when an out operand was not allocated for an instruction because the output of the instruction was not used. Please ensure you are using the output."
+ ),
+
+ _ => panic!("unsupported x86 operand type")
+ }
+ }
+}
+
+/// Also implement going from a reference to an operand for convenience.
+impl From<&Opnd> for X86Opnd {
+ fn from(opnd: &Opnd) -> Self {
+ X86Opnd::from(*opnd)
+ }
+}
+
+/// List of registers that can be used for stack temps and locals.
+pub static TEMP_REGS: [Reg; 5] = [RSI_REG, RDI_REG, R8_REG, R9_REG, R10_REG];
+
+impl Assembler
+{
+ // A special scratch register for intermediate processing.
+ // This register is caller-saved (so we don't have to save it before using it)
+ pub const SCRATCH_REG: Reg = R11_REG;
+ const SCRATCH0: X86Opnd = X86Opnd::Reg(Assembler::SCRATCH_REG);
+
+
+ /// Get the list of registers from which we can allocate on this platform
+ pub fn get_alloc_regs() -> Vec<Reg>
+ {
+ vec![
+ RAX_REG,
+ RCX_REG,
+ RDX_REG,
+ ]
+ }
+
+ /// Get a list of all of the caller-save registers
+ pub fn get_caller_save_regs() -> Vec<Reg> {
+ vec![RAX_REG, RCX_REG, RDX_REG, RSI_REG, RDI_REG, R8_REG, R9_REG, R10_REG, R11_REG]
+ }
+
+ // These are the callee-saved registers in the x86-64 SysV ABI
+ // RBX, RSP, RBP, and R12–R15
+
+ /// Split IR instructions for the x86 platform
+ fn x86_split(mut self) -> Assembler
+ {
+ let live_ranges: Vec<usize> = take(&mut self.live_ranges);
+ let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits), self.num_locals);
+ let mut iterator = self.into_draining_iter();
+
+ while let Some((index, mut insn)) = iterator.next_unmapped() {
+ // When we're iterating through the instructions with x86_split, we
+ // need to know the previous live ranges in order to tell if a
+ // register lasts beyond the current instruction. So instead of
+ // using next_mapped, we call next_unmapped. When you're using the
+ // next_unmapped API, you need to make sure that you map each
+ // operand that could reference an old index, which means both
+ // Opnd::InsnOut operands and Opnd::Mem operands with a base of
+ // MemBase::InsnOut.
+ //
+ // You need to ensure that you only map it _once_, because otherwise
+ // you'll end up mapping an incorrect index which could end up being
+ // out of bounds of the old set of indices.
+ //
+ // We handle all of that mapping here to ensure that it's only
+ // mapped once. We also handle loading Opnd::Value operands into
+ // registers here so that all mapping happens in one place. We load
+ // Opnd::Value operands into registers here because:
+ //
+ // - Most instructions can't be encoded with 64-bit immediates.
+ // - We look for Op::Load specifically when emitting to keep GC'ed
+ // VALUEs alive. This is a sort of canonicalization.
+ let mut unmapped_opnds: Vec<Opnd> = vec![];
+
+ let is_load = matches!(insn, Insn::Load { .. } | Insn::LoadInto { .. });
+ let mut opnd_iter = insn.opnd_iter_mut();
+
+ while let Some(opnd) = opnd_iter.next() {
+ if let Opnd::Stack { .. } = opnd {
+ *opnd = asm.lower_stack_opnd(opnd);
+ }
+ unmapped_opnds.push(*opnd);
+
+ *opnd = match opnd {
+ Opnd::Value(value) if !is_load => {
+ // Since mov(mem64, imm32) sign extends, as_i64() makes sure
+ // we split when the extended value is different.
+ if !value.special_const_p() || imm_num_bits(value.as_i64()) > 32 {
+ asm.load(iterator.map_opnd(*opnd))
+ } else {
+ Opnd::UImm(value.as_u64())
+ }
+ }
+ _ => iterator.map_opnd(*opnd),
+ };
+ }
+
+ // We are replacing instructions here so we know they are already
+ // being used. It is okay not to use their output here.
+ #[allow(unused_must_use)]
+ match &mut insn {
+ Insn::Add { left, right, out } |
+ Insn::Sub { left, right, out } |
+ Insn::Mul { left, right, out } |
+ Insn::And { left, right, out } |
+ Insn::Or { left, right, out } |
+ Insn::Xor { left, right, out } => {
+ match (&left, &right, iterator.peek()) {
+ // Merge this insn, e.g. `add REG, right -> out`, and `mov REG, out` if possible
+ (Opnd::Reg(_), Opnd::UImm(value), Some(Insn::Mov { dest, src }))
+ if out == src && left == dest && live_ranges[index] == index + 1 && uimm_num_bits(*value) <= 32 => {
+ *out = *dest;
+ asm.push_insn(insn);
+ iterator.map_insn_index(&mut asm);
+ iterator.next_unmapped(); // Pop merged Insn::Mov
+ }
+ (Opnd::Reg(_), Opnd::Reg(_), Some(Insn::Mov { dest, src }))
+ if out == src && live_ranges[index] == index + 1 && {
+ // We want to do `dest == left`, but `left` has already gone
+ // through lower_stack_opnd() while `dest` has not. So we
+ // lower `dest` before comparing.
+ let lowered_dest = if let Opnd::Stack { .. } = dest {
+ asm.lower_stack_opnd(dest)
+ } else {
+ *dest
+ };
+ lowered_dest == *left
+ } => {
+ *out = *dest;
+ asm.push_insn(insn);
+ iterator.map_insn_index(&mut asm);
+ iterator.next_unmapped(); // Pop merged Insn::Mov
+ }
+ _ => {
+ match (unmapped_opnds[0], unmapped_opnds[1]) {
+ (Opnd::Mem(_), Opnd::Mem(_)) => {
+ *left = asm.load(*left);
+ *right = asm.load(*right);
+ },
+ (Opnd::Mem(_), Opnd::UImm(_) | Opnd::Imm(_)) => {
+ *left = asm.load(*left);
+ },
+ // Instruction output whose live range spans beyond this instruction
+ (Opnd::InsnOut { idx, .. }, _) => {
+ if live_ranges[idx] > index {
+ *left = asm.load(*left);
+ }
+ },
+ // We have to load memory operands to avoid corrupting them
+ (Opnd::Mem(_) | Opnd::Reg(_), _) => {
+ *left = asm.load(*left);
+ },
+ _ => {}
+ };
+
+ *out = asm.next_opnd_out(Opnd::match_num_bits(&[*left, *right]));
+ asm.push_insn(insn);
+ }
+ }
+ },
+ Insn::Cmp { left, right } => {
+ // Replace `cmp REG, 0` (4 bytes) with `test REG, REG` (3 bytes)
+ // when next IR is `je`, `jne`, `csel_e`, or `csel_ne`
+ match (&left, &right, iterator.peek()) {
+ (Opnd::InsnOut { .. },
+ Opnd::UImm(0) | Opnd::Imm(0),
+ Some(Insn::Je(_) | Insn::Jne(_) | Insn::CSelE { .. } | Insn::CSelNE { .. })) => {
+ asm.push_insn(Insn::Test { left: *left, right: *left });
+ }
+ _ => {
+ if let (Opnd::Mem(_), Opnd::Mem(_)) = (&left, &right) {
+ let loaded = asm.load(*right);
+ *right = loaded;
+ }
+ asm.push_insn(insn);
+ }
+ }
+ },
+ Insn::Test { left, right } => {
+ if let (Opnd::Mem(_), Opnd::Mem(_)) = (&left, &right) {
+ let loaded = asm.load(*right);
+ *right = loaded;
+ }
+ asm.push_insn(insn);
+ },
+ // These instructions modify their input operand in-place, so we
+ // may need to load the input value to preserve it
+ Insn::LShift { opnd, shift, out } |
+ Insn::RShift { opnd, shift, out } |
+ Insn::URShift { opnd, shift, out } => {
+ match (&unmapped_opnds[0], &unmapped_opnds[1]) {
+ // Instruction output whose live range spans beyond this instruction
+ (Opnd::InsnOut { idx, .. }, _) => {
+ if live_ranges[*idx] > index {
+ *opnd = asm.load(*opnd);
+ }
+ },
+ // We have to load memory operands to avoid corrupting them
+ (Opnd::Mem(_) | Opnd::Reg(_), _) => {
+ *opnd = asm.load(*opnd);
+ },
+ _ => {}
+ };
+
+ *out = asm.next_opnd_out(Opnd::match_num_bits(&[*opnd, *shift]));
+ asm.push_insn(insn);
+ },
+ Insn::CSelZ { truthy, falsy, out } |
+ Insn::CSelNZ { truthy, falsy, out } |
+ Insn::CSelE { truthy, falsy, out } |
+ Insn::CSelNE { truthy, falsy, out } |
+ Insn::CSelL { truthy, falsy, out } |
+ Insn::CSelLE { truthy, falsy, out } |
+ Insn::CSelG { truthy, falsy, out } |
+ Insn::CSelGE { truthy, falsy, out } => {
+ match unmapped_opnds[0] {
+ // If we have an instruction output whose live range
+ // spans beyond this instruction, we have to load it.
+ Opnd::InsnOut { idx, .. } => {
+ if live_ranges[idx] > index {
+ *truthy = asm.load(*truthy);
+ }
+ },
+ Opnd::UImm(_) | Opnd::Imm(_) => {
+ *truthy = asm.load(*truthy);
+ },
+ // Opnd::Value could have already been split
+ Opnd::Value(_) if !matches!(truthy, Opnd::InsnOut { .. }) => {
+ *truthy = asm.load(*truthy);
+ },
+ _ => {}
+ };
+
+ match falsy {
+ Opnd::UImm(_) | Opnd::Imm(_) => {
+ *falsy = asm.load(*falsy);
+ },
+ _ => {}
+ };
+
+ *out = asm.next_opnd_out(Opnd::match_num_bits(&[*truthy, *falsy]));
+ asm.push_insn(insn);
+ },
+ Insn::Mov { dest, src } | Insn::Store { dest, src } => {
+ match (&dest, &src) {
+ (Opnd::Mem(_), Opnd::Mem(_)) => {
+ // We load opnd1 because for mov, opnd0 is the output
+ let opnd1 = asm.load(*src);
+ asm.mov(*dest, opnd1);
+ },
+ (Opnd::Mem(Mem { num_bits, .. }), Opnd::UImm(value)) => {
+ // For 64 bit destinations, 32-bit values will be sign-extended
+ if *num_bits == 64 && imm_num_bits(*value as i64) > 32 {
+ let opnd1 = asm.load(*src);
+ asm.mov(*dest, opnd1);
+ } else {
+ asm.mov(*dest, *src);
+ }
+ },
+ (Opnd::Mem(Mem { num_bits, .. }), Opnd::Imm(value)) => {
+ // For 64 bit destinations, 32-bit values will be sign-extended
+ if *num_bits == 64 && imm_num_bits(*value) > 32 {
+ let opnd1 = asm.load(*src);
+ asm.mov(*dest, opnd1);
+ } else if uimm_num_bits(*value as u64) <= *num_bits {
+ // If the bit string is short enough for the destination, use the unsigned representation.
+ // Note that 64-bit and negative values are ruled out.
+ asm.mov(*dest, Opnd::UImm(*value as u64));
+ } else {
+ asm.mov(*dest, *src);
+ }
+ },
+ _ => {
+ asm.mov(*dest, *src);
+ }
+ }
+ },
+ Insn::Not { opnd, .. } => {
+ let opnd0 = match unmapped_opnds[0] {
+ // If we have an instruction output whose live range
+ // spans beyond this instruction, we have to load it.
+ Opnd::InsnOut { idx, .. } => {
+ if live_ranges[idx] > index {
+ asm.load(*opnd)
+ } else {
+ *opnd
+ }
+ },
+ // We have to load memory and register operands to avoid
+ // corrupting them.
+ Opnd::Mem(_) | Opnd::Reg(_) => {
+ asm.load(*opnd)
+ },
+ // Otherwise we can just reuse the existing operand.
+ _ => *opnd
+ };
+
+ asm.not(opnd0);
+ },
+ Insn::CCall { opnds, fptr, .. } => {
+ assert!(opnds.len() <= C_ARG_OPNDS.len());
+
+ // Load each operand into the corresponding argument
+ // register.
+ for (idx, opnd) in opnds.into_iter().enumerate() {
+ asm.load_into(Opnd::c_arg(C_ARG_OPNDS[idx]), *opnd);
+ }
+
+ // Now we push the CCall without any arguments so that it
+ // just performs the call.
+ asm.ccall(*fptr, vec![]);
+ },
+ Insn::Lea { .. } => {
+ // Merge `lea` and `mov` into a single `lea` when possible
+ match (&insn, iterator.peek()) {
+ (Insn::Lea { opnd, out }, Some(Insn::Mov { dest: Opnd::Reg(reg), src }))
+ if matches!(out, Opnd::InsnOut { .. }) && out == src && live_ranges[index] == index + 1 => {
+ asm.push_insn(Insn::Lea { opnd: *opnd, out: Opnd::Reg(*reg) });
+ iterator.map_insn_index(&mut asm);
+ iterator.next_unmapped(); // Pop merged Insn::Mov
+ }
+ _ => asm.push_insn(insn),
+ }
+ },
+ _ => {
+ if insn.out_opnd().is_some() {
+ let out_num_bits = Opnd::match_num_bits_iter(insn.opnd_iter());
+ let out = insn.out_opnd_mut().unwrap();
+ *out = asm.next_opnd_out(out_num_bits);
+ }
+
+ asm.push_insn(insn);
+ }
+ };
+
+ iterator.map_insn_index(&mut asm);
+ }
+
+ asm
+ }
+
+ /// Emit platform-specific machine code
+ pub fn x86_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Option<Vec<u32>>
+ {
+ /// For some instructions, we want to be able to lower a 64-bit operand
+ /// without requiring more registers to be available in the register
+ /// allocator. So we just use the SCRATCH0 register temporarily to hold
+ /// the value before we immediately use it.
+ fn emit_64bit_immediate(cb: &mut CodeBlock, opnd: &Opnd) -> X86Opnd {
+ match opnd {
+ Opnd::Imm(value) => {
+ // 32-bit values will be sign-extended
+ if imm_num_bits(*value) > 32 {
+ mov(cb, Assembler::SCRATCH0, opnd.into());
+ Assembler::SCRATCH0
+ } else {
+ opnd.into()
+ }
+ },
+ Opnd::UImm(value) => {
+ // 32-bit values will be sign-extended
+ if imm_num_bits(*value as i64) > 32 {
+ mov(cb, Assembler::SCRATCH0, opnd.into());
+ Assembler::SCRATCH0
+ } else {
+ opnd.into()
+ }
+ },
+ _ => opnd.into()
+ }
+ }
+
+ /// Compile a side exit if Target::SideExit is given.
+ fn compile_side_exit(
+ target: Target,
+ asm: &mut Assembler,
+ ocb: &mut Option<&mut OutlinedCb>,
+ ) -> Option<Target> {
+ if let Target::SideExit { counter, context } = target {
+ let side_exit = asm.get_side_exit(&context.unwrap(), Some(counter), ocb.as_mut().unwrap());
+ Some(Target::SideExitPtr(side_exit?))
+ } else {
+ Some(target)
+ }
+ }
+
+ fn emit_csel(
+ cb: &mut CodeBlock,
+ truthy: Opnd,
+ falsy: Opnd,
+ out: Opnd,
+ cmov_fn: fn(&mut CodeBlock, X86Opnd, X86Opnd),
+ cmov_neg: fn(&mut CodeBlock, X86Opnd, X86Opnd)){
+
+ // Assert that output is a register
+ out.unwrap_reg();
+
+ // If the truthy value is a memory operand
+ if let Opnd::Mem(_) = truthy {
+ if out != falsy {
+ mov(cb, out.into(), falsy.into());
+ }
+
+ cmov_fn(cb, out.into(), truthy.into());
+ } else {
+ if out != truthy {
+ mov(cb, out.into(), truthy.into());
+ }
+
+ cmov_neg(cb, out.into(), falsy.into());
+ }
+ }
+
+ //dbg!(&self.insns);
+
+ // List of GC offsets
+ let mut gc_offsets: Vec<u32> = Vec::new();
+
+ // Buffered list of PosMarker callbacks to fire if codegen is successful
+ let mut pos_markers: Vec<(usize, CodePtr)> = vec![];
+
+ // For each instruction
+ let start_write_pos = cb.get_write_pos();
+ let mut insn_idx: usize = 0;
+ while let Some(insn) = self.insns.get(insn_idx) {
+ let src_ptr = cb.get_write_ptr();
+ let had_dropped_bytes = cb.has_dropped_bytes();
+ let old_label_state = cb.get_label_state();
+ let mut insn_gc_offsets: Vec<u32> = Vec::new();
+
+ match insn {
+ Insn::Comment(text) => {
+ cb.add_comment(text);
+ },
+
+ // Write the label at the current position
+ Insn::Label(target) => {
+ cb.write_label(target.unwrap_label_idx());
+ },
+
+ // Report back the current position in the generated code
+ Insn::PosMarker(..) => {
+ pos_markers.push((insn_idx, cb.get_write_ptr()));
+ },
+
+ Insn::BakeString(text) => {
+ for byte in text.as_bytes() {
+ cb.write_byte(*byte);
+ }
+
+ // Add a null-terminator byte for safety (in case we pass
+ // this to C code)
+ cb.write_byte(0);
+ },
+
+ // Set up RBP to work with frame pointer unwinding
+ // (e.g. with Linux `perf record --call-graph fp`)
+ Insn::FrameSetup => {
+ if get_option!(frame_pointer) {
+ push(cb, RBP);
+ mov(cb, RBP, RSP);
+ push(cb, RBP);
+ }
+ },
+ Insn::FrameTeardown => {
+ if get_option!(frame_pointer) {
+ pop(cb, RBP);
+ pop(cb, RBP);
+ }
+ },
+
+ Insn::Add { left, right, .. } => {
+ let opnd1 = emit_64bit_immediate(cb, right);
+ add(cb, left.into(), opnd1);
+ },
+
+ Insn::Sub { left, right, .. } => {
+ let opnd1 = emit_64bit_immediate(cb, right);
+ sub(cb, left.into(), opnd1);
+ },
+
+ Insn::Mul { left, right, .. } => {
+ let opnd1 = emit_64bit_immediate(cb, right);
+ imul(cb, left.into(), opnd1);
+ },
+
+ Insn::And { left, right, .. } => {
+ let opnd1 = emit_64bit_immediate(cb, right);
+ and(cb, left.into(), opnd1);
+ },
+
+ Insn::Or { left, right, .. } => {
+ let opnd1 = emit_64bit_immediate(cb, right);
+ or(cb, left.into(), opnd1);
+ },
+
+ Insn::Xor { left, right, .. } => {
+ let opnd1 = emit_64bit_immediate(cb, right);
+ xor(cb, left.into(), opnd1);
+ },
+
+ Insn::Not { opnd, .. } => {
+ not(cb, opnd.into());
+ },
+
+ Insn::LShift { opnd, shift , ..} => {
+ shl(cb, opnd.into(), shift.into())
+ },
+
+ Insn::RShift { opnd, shift , ..} => {
+ sar(cb, opnd.into(), shift.into())
+ },
+
+ Insn::URShift { opnd, shift, .. } => {
+ shr(cb, opnd.into(), shift.into())
+ },
+
+ Insn::Store { dest, src } => {
+ mov(cb, dest.into(), src.into());
+ },
+
+ // This assumes only load instructions can contain references to GC'd Value operands
+ Insn::Load { opnd, out } |
+ Insn::LoadInto { dest: out, opnd } => {
+ match opnd {
+ Opnd::Value(val) if val.heap_object_p() => {
+ // Using movabs because mov might write value in 32 bits
+ movabs(cb, out.into(), val.0 as _);
+ // The pointer immediate is encoded as the last part of the mov written out
+ let ptr_offset: u32 = (cb.get_write_pos() as u32) - (SIZEOF_VALUE as u32);
+ insn_gc_offsets.push(ptr_offset);
+ }
+ _ => mov(cb, out.into(), opnd.into())
+ }
+ },
+
+ Insn::LoadSExt { opnd, out } => {
+ movsx(cb, out.into(), opnd.into());
+ },
+
+ Insn::Mov { dest, src } => {
+ mov(cb, dest.into(), src.into());
+ },
+
+ // Load effective address
+ Insn::Lea { opnd, out } => {
+ lea(cb, out.into(), opnd.into());
+ },
+
+ // Load address of jump target
+ Insn::LeaJumpTarget { target, out } => {
+ if let Target::Label(label_idx) = target {
+ // Set output to the raw address of the label
+ cb.label_ref(*label_idx, 7, |cb, src_addr, dst_addr| {
+ let disp = dst_addr - src_addr;
+ lea(cb, Self::SCRATCH0, mem_opnd(8, RIP, disp.try_into().unwrap()));
+ });
+
+ mov(cb, out.into(), Self::SCRATCH0);
+ } else {
+ // Set output to the jump target's raw address
+ let target_code = target.unwrap_code_ptr();
+ let target_addr = target_code.raw_addr(cb).as_u64();
+ // Constant encoded length important for patching
+ movabs(cb, out.into(), target_addr);
+ }
+ },
+
+ // Push and pop to/from the C stack
+ Insn::CPush(opnd) => {
+ push(cb, opnd.into());
+ },
+ Insn::CPop { out } => {
+ pop(cb, out.into());
+ },
+ Insn::CPopInto(opnd) => {
+ pop(cb, opnd.into());
+ },
+
+ // Push and pop to the C stack all caller-save registers and the
+ // flags
+ Insn::CPushAll => {
+ let regs = Assembler::get_caller_save_regs();
+
+ for reg in regs {
+ push(cb, X86Opnd::Reg(reg));
+ }
+ pushfq(cb);
+ },
+ Insn::CPopAll => {
+ let regs = Assembler::get_caller_save_regs();
+
+ popfq(cb);
+ for reg in regs.into_iter().rev() {
+ pop(cb, X86Opnd::Reg(reg));
+ }
+ },
+
+ // C function call
+ Insn::CCall { fptr, .. } => {
+ call_ptr(cb, RAX, *fptr);
+ },
+
+ Insn::CRet(opnd) => {
+ // TODO: bias allocation towards return register
+ if *opnd != Opnd::Reg(C_RET_REG) {
+ mov(cb, RAX, opnd.into());
+ }
+
+ ret(cb);
+ },
+
+ // Compare
+ Insn::Cmp { left, right } => {
+ let num_bits = match right {
+ Opnd::Imm(value) => Some(imm_num_bits(*value)),
+ Opnd::UImm(value) => Some(uimm_num_bits(*value)),
+ _ => None
+ };
+
+ // If the immediate is less than 64 bits (like 32, 16, 8), and the operand
+ // sizes match, then we can represent it as an immediate in the instruction
+ // without moving it to a register first.
+ // IOW, 64 bit immediates must always be moved to a register
+ // before comparisons, where other sizes may be encoded
+ // directly in the instruction.
+ if num_bits.is_some() && left.num_bits() == num_bits && num_bits.unwrap() < 64 {
+ cmp(cb, left.into(), right.into());
+ } else {
+ let emitted = emit_64bit_immediate(cb, right);
+ cmp(cb, left.into(), emitted);
+ }
+ }
+
+ // Test and set flags
+ Insn::Test { left, right } => {
+ let emitted = emit_64bit_immediate(cb, right);
+ test(cb, left.into(), emitted);
+ }
+
+ Insn::JmpOpnd(opnd) => {
+ jmp_rm(cb, opnd.into());
+ }
+
+ // Conditional jump to a label
+ Insn::Jmp(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jmp_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jmp_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ }
+
+ Insn::Je(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => je_ptr(cb, code_ptr),
+ Target::Label(label_idx) => je_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ }
+
+ Insn::Jne(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jne_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jne_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ }
+
+ Insn::Jl(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jl_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jl_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ },
+
+ Insn::Jg(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jg_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jg_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ },
+
+ Insn::Jge(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jge_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jge_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ },
+
+ Insn::Jbe(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jbe_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jbe_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ },
+
+ Insn::Jb(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jb_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jb_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ },
+
+ Insn::Jz(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jz_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jz_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ }
+
+ Insn::Jnz(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jnz_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jnz_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ }
+
+ Insn::Jo(target) |
+ Insn::JoMul(target) => {
+ match compile_side_exit(*target, self, ocb)? {
+ Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jo_ptr(cb, code_ptr),
+ Target::Label(label_idx) => jo_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
+ }
+ }
+
+ Insn::Joz(..) | Insn::Jonz(..) => unreachable!("Joz/Jonz should be unused for now"),
+
+ // Atomically increment a counter at a given memory location
+ Insn::IncrCounter { mem, value } => {
+ assert!(matches!(mem, Opnd::Mem(_)));
+ assert!(matches!(value, Opnd::UImm(_) | Opnd::Imm(_) ) );
+ write_lock_prefix(cb);
+ add(cb, mem.into(), value.into());
+ },
+
+ Insn::Breakpoint => int3(cb),
+
+ Insn::CSelZ { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovz, cmovnz);
+ },
+ Insn::CSelNZ { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovnz, cmovz);
+ },
+ Insn::CSelE { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmove, cmovne);
+ },
+ Insn::CSelNE { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovne, cmove);
+ },
+ Insn::CSelL { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovl, cmovge);
+ },
+ Insn::CSelLE { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovle, cmovg);
+ },
+ Insn::CSelG { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovg, cmovle);
+ },
+ Insn::CSelGE { truthy, falsy, out } => {
+ emit_csel(cb, *truthy, *falsy, *out, cmovge, cmovl);
+ }
+ Insn::LiveReg { .. } => (), // just a reg alloc signal, no code
+ Insn::PadInvalPatch => {
+ let code_size = cb.get_write_pos().saturating_sub(std::cmp::max(start_write_pos, cb.page_start_pos()));
+ if code_size < cb.jmp_ptr_bytes() {
+ nop(cb, (cb.jmp_ptr_bytes() - code_size) as u32);
+ }
+ }
+ };
+
+ // On failure, jump to the next page and retry the current insn
+ if !had_dropped_bytes && cb.has_dropped_bytes() && cb.next_page(src_ptr, jmp_ptr) {
+ // Reset cb states before retrying the current Insn
+ cb.set_label_state(old_label_state);
+ } else {
+ insn_idx += 1;
+ gc_offsets.append(&mut insn_gc_offsets);
+ }
+ }
+
+ // Error if we couldn't write out everything
+ if cb.has_dropped_bytes() {
+ return None
+ } else {
+ // No bytes dropped, so the pos markers point to valid code
+ for (insn_idx, pos) in pos_markers {
+ if let Insn::PosMarker(callback) = self.insns.get(insn_idx).unwrap() {
+ callback(pos, &cb);
+ } else {
+ panic!("non-PosMarker in pos_markers insn_idx={insn_idx} {self:?}");
+ }
+ }
+
+ return Some(gc_offsets)
+ }
+ }
+
+ /// Optimize and compile the stored instructions
+ pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
+ let asm = self.x86_split();
+ let mut asm = asm.alloc_regs(regs);
+
+ // Create label instances in the code block
+ for (idx, name) in asm.label_names.iter().enumerate() {
+ let label_idx = cb.new_label(name.to_string());
+ assert!(label_idx == idx);
+ }
+
+ let mut ocb = ocb; // for &mut
+ let start_ptr = cb.get_write_ptr();
+ let gc_offsets = asm.x86_emit(cb, &mut ocb);
+
+ if let (Some(gc_offsets), false) = (gc_offsets, cb.has_dropped_bytes()) {
+ cb.link_labels();
+
+ Some((start_ptr, gc_offsets))
+ } else {
+ cb.clear_labels();
+
+ None
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::disasm::assert_disasm;
+ #[cfg(feature = "disasm")]
+ use crate::disasm::{unindent, disasm_addr_range};
+
+ use super::*;
+
+ fn setup_asm() -> (Assembler, CodeBlock) {
+ (Assembler::new(0), CodeBlock::new_dummy(1024))
+ }
+
+ #[test]
+ fn test_emit_add_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.add(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c04881c0ff000000");
+ }
+
+ #[test]
+ fn test_emit_add_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.add(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c049bbffffffffffff00004c01d8");
+ }
+
+ #[test]
+ fn test_emit_and_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.and(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c04881e0ff000000");
+ }
+
+ #[test]
+ fn test_emit_and_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.and(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c049bbffffffffffff00004c21d8");
+ }
+
+ #[test]
+ fn test_emit_cmp_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.cmp(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_eq!(format!("{:x}", cb), "4881f8ff000000");
+ }
+
+ #[test]
+ fn test_emit_cmp_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.cmp(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_eq!(format!("{:x}", cb), "49bbffffffffffff00004c39d8");
+ }
+
+ #[test]
+ fn test_emit_cmp_mem_16_bits_with_imm_16() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(16, Opnd::Reg(RAX_REG), 6);
+
+ asm.cmp(shape_opnd, Opnd::UImm(0xF000));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_eq!(format!("{:x}", cb), "6681780600f0");
+ }
+
+ #[test]
+ fn test_emit_cmp_mem_32_bits_with_imm_32() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(32, Opnd::Reg(RAX_REG), 4);
+
+ asm.cmp(shape_opnd, Opnd::UImm(0xF000_0000));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_eq!(format!("{:x}", cb), "817804000000f0");
+ }
+
+ #[test]
+ fn test_emit_or_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.or(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c04881c8ff000000");
+ }
+
+ #[test]
+ fn test_emit_or_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.or(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c049bbffffffffffff00004c09d8");
+ }
+
+ #[test]
+ fn test_emit_sub_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.sub(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c04881e8ff000000");
+ }
+
+ #[test]
+ fn test_emit_sub_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.sub(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c049bbffffffffffff00004c29d8");
+ }
+
+ #[test]
+ fn test_emit_test_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_eq!(format!("{:x}", cb), "f6c0ff");
+ }
+
+ #[test]
+ fn test_emit_test_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.test(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_eq!(format!("{:x}", cb), "49bbffffffffffff00004c85d8");
+ }
+
+ #[test]
+ fn test_emit_xor_lt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.xor(Opnd::Reg(RAX_REG), Opnd::UImm(0xFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c04881f0ff000000");
+ }
+
+ #[test]
+ fn test_emit_xor_gt_32_bits() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let _ = asm.xor(Opnd::Reg(RAX_REG), Opnd::UImm(0xFFFF_FFFF_FFFF));
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4889c049bbffffffffffff00004c31d8");
+ }
+
+ #[test]
+ fn test_merge_lea_reg() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.lea(Opnd::mem(64, SP, 8));
+ asm.mov(SP, sp); // should be merged to lea
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_disasm!(cb, "488d5b08", {"
+ 0x0: lea rbx, [rbx + 8]
+ "});
+ }
+
+ #[test]
+ fn test_merge_lea_mem() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.lea(Opnd::mem(64, SP, 8));
+ asm.mov(Opnd::mem(64, SP, 0), sp); // should NOT be merged to lea
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_disasm!(cb, "488d4308488903", {"
+ 0x0: lea rax, [rbx + 8]
+ 0x4: mov qword ptr [rbx], rax
+ "});
+ }
+
+ #[test]
+ fn test_replace_cmp_0() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let val = asm.load(Opnd::mem(64, SP, 8));
+ asm.cmp(val, 0.into());
+ let result = asm.csel_e(Qtrue.into(), Qfalse.into());
+ asm.mov(Opnd::Reg(RAX_REG), result);
+ asm.compile_with_num_regs(&mut cb, 2);
+
+ assert_eq!(format!("{:x}", cb), "488b43084885c0b814000000b900000000480f45c14889c0");
+ }
+
+ #[test]
+ fn test_merge_add_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.add(CFP, Opnd::UImm(0x40));
+ asm.mov(CFP, sp); // should be merged to add
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4983c540");
+ }
+
+ #[test]
+ fn test_merge_sub_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.sub(CFP, Opnd::UImm(0x40));
+ asm.mov(CFP, sp); // should be merged to add
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4983ed40");
+ }
+
+ #[test]
+ fn test_merge_and_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.and(CFP, Opnd::UImm(0x40));
+ asm.mov(CFP, sp); // should be merged to add
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4983e540");
+ }
+
+ #[test]
+ fn test_merge_or_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.or(CFP, Opnd::UImm(0x40));
+ asm.mov(CFP, sp); // should be merged to add
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4983cd40");
+ }
+
+ #[test]
+ fn test_merge_xor_mov() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let sp = asm.xor(CFP, Opnd::UImm(0x40));
+ asm.mov(CFP, sp); // should be merged to add
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_eq!(format!("{:x}", cb), "4983f540");
+ }
+
+ #[test]
+ fn test_reorder_c_args_no_cycle() {
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[0], // mov rdi, rdi (optimized away)
+ C_ARG_OPNDS[1], // mov rsi, rsi (optimized away)
+ ]);
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_disasm!(cb, "b800000000ffd0", {"
+ 0x0: mov eax, 0
+ 0x5: call rax
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_single_cycle() {
+ let (mut asm, mut cb) = setup_asm();
+
+ // rdi and rsi form a cycle
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[1], // mov rdi, rsi
+ C_ARG_OPNDS[0], // mov rsi, rdi
+ C_ARG_OPNDS[2], // mov rdx, rdx (optimized away)
+ ]);
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_disasm!(cb, "4989f34889fe4c89dfb800000000ffd0", {"
+ 0x0: mov r11, rsi
+ 0x3: mov rsi, rdi
+ 0x6: mov rdi, r11
+ 0x9: mov eax, 0
+ 0xe: call rax
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_two_cycles() {
+ let (mut asm, mut cb) = setup_asm();
+
+ // rdi and rsi form a cycle, and rdx and rcx form another cycle
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[1], // mov rdi, rsi
+ C_ARG_OPNDS[0], // mov rsi, rdi
+ C_ARG_OPNDS[3], // mov rdx, rcx
+ C_ARG_OPNDS[2], // mov rcx, rdx
+ ]);
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_disasm!(cb, "4989f34889fe4c89df4989cb4889d14c89dab800000000ffd0", {"
+ 0x0: mov r11, rsi
+ 0x3: mov rsi, rdi
+ 0x6: mov rdi, r11
+ 0x9: mov r11, rcx
+ 0xc: mov rcx, rdx
+ 0xf: mov rdx, r11
+ 0x12: mov eax, 0
+ 0x17: call rax
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_large_cycle() {
+ let (mut asm, mut cb) = setup_asm();
+
+ // rdi, rsi, and rdx form a cycle
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[1], // mov rdi, rsi
+ C_ARG_OPNDS[2], // mov rsi, rdx
+ C_ARG_OPNDS[0], // mov rdx, rdi
+ ]);
+ asm.compile_with_num_regs(&mut cb, 0);
+
+ assert_disasm!(cb, "4989f34889d64889fa4c89dfb800000000ffd0", {"
+ 0x0: mov r11, rsi
+ 0x3: mov rsi, rdx
+ 0x6: mov rdx, rdi
+ 0x9: mov rdi, r11
+ 0xc: mov eax, 0
+ 0x11: call rax
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_with_insn_out() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let rax = asm.load(Opnd::UImm(1));
+ let rcx = asm.load(Opnd::UImm(2));
+ let rdx = asm.load(Opnd::UImm(3));
+ // rcx and rdx form a cycle
+ asm.ccall(0 as _, vec![
+ rax, // mov rdi, rax
+ rcx, // mov rsi, rcx
+ rcx, // mov rdx, rcx
+ rdx, // mov rcx, rdx
+ ]);
+ asm.compile_with_num_regs(&mut cb, 3);
+
+ assert_disasm!(cb, "b801000000b902000000ba030000004889c74889ce4989cb4889d14c89dab800000000ffd0", {"
+ 0x0: mov eax, 1
+ 0x5: mov ecx, 2
+ 0xa: mov edx, 3
+ 0xf: mov rdi, rax
+ 0x12: mov rsi, rcx
+ 0x15: mov r11, rcx
+ 0x18: mov rcx, rdx
+ 0x1b: mov rdx, r11
+ 0x1e: mov eax, 0
+ 0x23: call rax
+ "});
+ }
+
+ #[test]
+ fn test_cmov_mem() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let top = Opnd::mem(64, SP, 0);
+ let ary_opnd = SP;
+ let array_len_opnd = Opnd::mem(64, SP, 16);
+
+ asm.cmp(array_len_opnd, 1.into());
+ let elem_opnd = asm.csel_g(Opnd::mem(64, ary_opnd, 0), Qnil.into());
+ asm.mov(top, elem_opnd);
+
+ asm.compile_with_num_regs(&mut cb, 1);
+
+ assert_disasm!(cb, "48837b1001b804000000480f4f03488903", {"
+ 0x0: cmp qword ptr [rbx + 0x10], 1
+ 0x5: mov eax, 4
+ 0xa: cmovg rax, qword ptr [rbx]
+ 0xe: mov qword ptr [rbx], rax
+ "});
+ }
+
+ #[test]
+ fn test_csel_split() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let stack_top = Opnd::mem(64, SP, 0);
+ let elem_opnd = asm.csel_ne(VALUE(0x7f22c88d1930).into(), Qnil.into());
+ asm.mov(stack_top, elem_opnd);
+
+ asm.compile_with_num_regs(&mut cb, 3);
+
+ assert_disasm!(cb, "48b830198dc8227f0000b904000000480f44c1488903", {"
+ 0x0: movabs rax, 0x7f22c88d1930
+ 0xa: mov ecx, 4
+ 0xf: cmove rax, rcx
+ 0x13: mov qword ptr [rbx], rax
+ "});
+ }
+
+ #[test]
+ fn test_mov_m32_imm32() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(32, C_RET_OPND, 0);
+ asm.mov(shape_opnd, Opnd::UImm(0x8000_0001));
+ asm.mov(shape_opnd, Opnd::Imm(0x8000_0001));
+
+ asm.compile_with_num_regs(&mut cb, 0);
+ assert_disasm!(cb, "c70001000080c70001000080", {"
+ 0x0: mov dword ptr [rax], 0x80000001
+ 0x6: mov dword ptr [rax], 0x80000001
+ "});
+ }
+}
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
new file mode 100644
index 0000000000..0fbca85716
--- /dev/null
+++ b/yjit/src/codegen.rs
@@ -0,0 +1,11433 @@
+// We use the YARV bytecode constants which have a CRuby-style name
+#![allow(non_upper_case_globals)]
+
+use crate::asm::*;
+use crate::backend::ir::*;
+use crate::backend::current::TEMP_REGS;
+use crate::core::*;
+use crate::cruby::*;
+use crate::invariants::*;
+use crate::options::*;
+use crate::stats::*;
+use crate::utils::*;
+use CodegenStatus::*;
+use YARVOpnd::*;
+
+use std::cell::Cell;
+use std::cmp;
+use std::cmp::min;
+use std::collections::HashMap;
+use std::ffi::c_void;
+use std::ffi::CStr;
+use std::mem;
+use std::os::raw::c_int;
+use std::ptr;
+use std::rc::Rc;
+use std::cell::RefCell;
+use std::slice;
+
+pub use crate::virtualmem::CodePtr;
+
+/// Status returned by code generation functions
+#[derive(PartialEq, Debug)]
+enum CodegenStatus {
+ KeepCompiling,
+ EndBlock,
+}
+
+/// Code generation function signature
+type InsnGenFn = fn(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus>;
+
+/// Ephemeral code generation state.
+/// Represents a [crate::core::Block] while we build it.
+pub struct JITState<'a> {
+ /// Instruction sequence for the compiling block
+ pub iseq: IseqPtr,
+
+ /// The iseq index of the first instruction in the block
+ starting_insn_idx: IseqIdx,
+
+ /// The [Context] entering into the first instruction of the block
+ starting_ctx: Context,
+
+ /// The placement for the machine code of the [Block]
+ output_ptr: CodePtr,
+
+ /// Index of the current instruction being compiled
+ insn_idx: IseqIdx,
+
+ /// Opcode for the instruction being compiled
+ opcode: usize,
+
+ /// PC of the instruction being compiled
+ pc: *mut VALUE,
+
+ /// stack_size when it started to compile the current instruction.
+ stack_size_for_pc: u8,
+
+ /// Execution context when compilation started
+ /// This allows us to peek at run-time values
+ ec: EcPtr,
+
+ /// The code block used for stubs, exits, and other code that are
+ /// not on the hot path.
+ outlined_code_block: &'a mut OutlinedCb,
+
+ /// The outgoing branches the block will have
+ pub pending_outgoing: Vec<PendingBranchRef>,
+
+ // --- Fields for block invalidation and invariants tracking below:
+ // Public mostly so into_block defined in the sibling module core
+ // can partially move out of Self.
+
+ /// Whether we need to record the code address at
+ /// the end of this bytecode instruction for global invalidation
+ pub record_boundary_patch_point: bool,
+
+ /// Code for immediately exiting upon entry to the block.
+ /// Required for invalidation.
+ pub block_entry_exit: Option<CodePtr>,
+
+ /// A list of callable method entries that must be valid for the block to be valid.
+ pub method_lookup_assumptions: Vec<CmePtr>,
+
+ /// A list of basic operators that not be redefined for the block to be valid.
+ pub bop_assumptions: Vec<(RedefinitionFlag, ruby_basic_operators)>,
+
+ /// A list of constant expression path segments that must have
+ /// not been written to for the block to be valid.
+ pub stable_constant_names_assumption: Option<*const ID>,
+
+ /// A list of classes that are not supposed to have a singleton class.
+ pub no_singleton_class_assumptions: Vec<VALUE>,
+
+ /// When true, the block is valid only when base pointer is equal to environment pointer.
+ pub no_ep_escape: bool,
+
+ /// When true, the block is valid only when there is a total of one ractor running
+ pub block_assumes_single_ractor: bool,
+
+ /// Address range for Linux perf's [JIT interface](https://github.com/torvalds/linux/blob/master/tools/perf/Documentation/jit-interface.txt)
+ perf_map: Rc::<RefCell::<Vec<(CodePtr, Option<CodePtr>, String)>>>,
+
+ /// Stack of symbol names for --yjit-perf
+ perf_stack: Vec<String>,
+
+ /// When true, this block is the first block compiled by gen_block_series().
+ first_block: bool,
+
+ /// A killswitch for bailing out of compilation. Used in rare situations where we need to fail
+ /// compilation deep in the stack (e.g. codegen failed for some jump target, but not due to
+ /// OOM). Because these situations are so rare it's not worth it to check and propogate at each
+ /// site. Instead, we check this once at the end.
+ block_abandoned: bool,
+}
+
+impl<'a> JITState<'a> {
+ pub fn new(blockid: BlockId, starting_ctx: Context, output_ptr: CodePtr, ec: EcPtr, ocb: &'a mut OutlinedCb, first_block: bool) -> Self {
+ JITState {
+ iseq: blockid.iseq,
+ starting_insn_idx: blockid.idx,
+ starting_ctx,
+ output_ptr,
+ insn_idx: 0,
+ opcode: 0,
+ pc: ptr::null_mut::<VALUE>(),
+ stack_size_for_pc: starting_ctx.get_stack_size(),
+ pending_outgoing: vec![],
+ ec,
+ outlined_code_block: ocb,
+ record_boundary_patch_point: false,
+ block_entry_exit: None,
+ method_lookup_assumptions: vec![],
+ bop_assumptions: vec![],
+ stable_constant_names_assumption: None,
+ no_singleton_class_assumptions: vec![],
+ no_ep_escape: false,
+ block_assumes_single_ractor: false,
+ perf_map: Rc::default(),
+ perf_stack: vec![],
+ first_block,
+ block_abandoned: false,
+ }
+ }
+
+ pub fn get_insn_idx(&self) -> IseqIdx {
+ self.insn_idx
+ }
+
+ pub fn get_iseq(&self) -> IseqPtr {
+ self.iseq
+ }
+
+ pub fn get_opcode(&self) -> usize {
+ self.opcode
+ }
+
+ pub fn get_pc(&self) -> *mut VALUE {
+ self.pc
+ }
+
+ pub fn get_starting_insn_idx(&self) -> IseqIdx {
+ self.starting_insn_idx
+ }
+
+ pub fn get_block_entry_exit(&self) -> Option<CodePtr> {
+ self.block_entry_exit
+ }
+
+ pub fn get_starting_ctx(&self) -> Context {
+ self.starting_ctx
+ }
+
+ pub fn get_arg(&self, arg_idx: isize) -> VALUE {
+ // insn_len require non-test config
+ #[cfg(not(test))]
+ assert!(insn_len(self.get_opcode()) > (arg_idx + 1).try_into().unwrap());
+ unsafe { *(self.pc.offset(arg_idx + 1)) }
+ }
+
+ /// Get [Self::outlined_code_block]
+ pub fn get_ocb(&mut self) -> &mut OutlinedCb {
+ self.outlined_code_block
+ }
+
+ /// Leave a code stub to re-enter the compiler at runtime when the compiling program point is
+ /// reached. Should always be used in tail position like `return jit.defer_compilation(asm);`.
+ #[must_use]
+ fn defer_compilation(&mut self, asm: &mut Assembler) -> Option<CodegenStatus> {
+ if crate::core::defer_compilation(self, asm).is_err() {
+ // If we can't leave a stub, the block isn't usable and we have to bail.
+ self.block_abandoned = true;
+ }
+ Some(EndBlock)
+ }
+
+ /// Generate a branch with either end possibly stubbed out
+ fn gen_branch(
+ &mut self,
+ asm: &mut Assembler,
+ target0: BlockId,
+ ctx0: &Context,
+ target1: Option<BlockId>,
+ ctx1: Option<&Context>,
+ gen_fn: BranchGenFn,
+ ) {
+ if crate::core::gen_branch(self, asm, target0, ctx0, target1, ctx1, gen_fn).is_none() {
+ // If we can't meet the request for a branch, the code is
+ // essentially corrupt and we have to discard the block.
+ self.block_abandoned = true;
+ }
+ }
+
+ /// Wrapper for [self::gen_outlined_exit] with error handling.
+ fn gen_outlined_exit(&mut self, exit_pc: *mut VALUE, ctx: &Context) -> Option<CodePtr> {
+ let result = gen_outlined_exit(exit_pc, self.num_locals(), ctx, self.get_ocb());
+ if result.is_none() {
+ // When we can't have the exits, the code is incomplete and we have to bail.
+ self.block_abandoned = true;
+ }
+
+ result
+ }
+
+ /// Return true if the current ISEQ could escape an environment.
+ ///
+ /// As of vm_push_frame(), EP is always equal to BP. However, after pushing
+ /// a frame, some ISEQ setups call vm_bind_update_env(), which redirects EP.
+ /// Also, some method calls escape the environment to the heap.
+ fn escapes_ep(&self) -> bool {
+ match unsafe { get_iseq_body_type(self.iseq) } {
+ // <main> frame is always associated to TOPLEVEL_BINDING.
+ ISEQ_TYPE_MAIN |
+ // Kernel#eval uses a heap EP when a Binding argument is not nil.
+ ISEQ_TYPE_EVAL => true,
+ // If this ISEQ has previously escaped EP, give up the optimization.
+ _ if iseq_escapes_ep(self.iseq) => true,
+ _ => false,
+ }
+ }
+
+ // Get the index of the next instruction
+ fn next_insn_idx(&self) -> u16 {
+ self.insn_idx + insn_len(self.get_opcode()) as u16
+ }
+
+ /// Get the index of the next instruction of the next instruction
+ fn next_next_insn_idx(&self) -> u16 {
+ let next_pc = unsafe { rb_iseq_pc_at_idx(self.iseq, self.next_insn_idx().into()) };
+ let next_opcode: usize = unsafe { rb_iseq_opcode_at_pc(self.iseq, next_pc) }.try_into().unwrap();
+ self.next_insn_idx() + insn_len(next_opcode) as u16
+ }
+
+ // Check if we are compiling the instruction at the stub PC with the target Context
+ // Meaning we are compiling the instruction that is next to execute
+ pub fn at_compile_target(&self) -> bool {
+ // If this is not the first block compiled by gen_block_series(),
+ // it might be compiling the same block again with a different Context.
+ // In that case, it should defer_compilation() and inspect the stack there.
+ if !self.first_block {
+ return false;
+ }
+
+ let ec_pc: *mut VALUE = unsafe { get_cfp_pc(self.get_cfp()) };
+ ec_pc == self.pc
+ }
+
+ // Peek at the nth topmost value on the Ruby stack.
+ // Returns the topmost value when n == 0.
+ pub fn peek_at_stack(&self, ctx: &Context, n: isize) -> VALUE {
+ assert!(self.at_compile_target());
+ assert!(n < ctx.get_stack_size() as isize);
+
+ // Note: this does not account for ctx->sp_offset because
+ // this is only available when hitting a stub, and while
+ // hitting a stub, cfp->sp needs to be up to date in case
+ // codegen functions trigger GC. See :stub-sp-flush:.
+ return unsafe {
+ let sp: *mut VALUE = get_cfp_sp(self.get_cfp());
+
+ *(sp.offset(-1 - n))
+ };
+ }
+
+ fn peek_at_self(&self) -> VALUE {
+ unsafe { get_cfp_self(self.get_cfp()) }
+ }
+
+ fn peek_at_local(&self, n: i32) -> VALUE {
+ assert!(self.at_compile_target());
+
+ let local_table_size: isize = unsafe { get_iseq_body_local_table_size(self.iseq) }
+ .try_into()
+ .unwrap();
+ assert!(n < local_table_size.try_into().unwrap());
+
+ unsafe {
+ let ep = get_cfp_ep(self.get_cfp());
+ let n_isize: isize = n.try_into().unwrap();
+ let offs: isize = -(VM_ENV_DATA_SIZE as isize) - local_table_size + n_isize + 1;
+ *ep.offset(offs)
+ }
+ }
+
+ fn peek_at_block_handler(&self, level: u32) -> VALUE {
+ assert!(self.at_compile_target());
+
+ unsafe {
+ let ep = get_cfp_ep_level(self.get_cfp(), level);
+ *ep.offset(VM_ENV_DATA_INDEX_SPECVAL as isize)
+ }
+ }
+
+ pub fn assume_expected_cfunc(
+ &mut self,
+ asm: &mut Assembler,
+ class: VALUE,
+ method: ID,
+ cfunc: *mut c_void,
+ ) -> bool {
+ let cme = unsafe { rb_callable_method_entry(class, method) };
+
+ if cme.is_null() {
+ return false;
+ }
+
+ let def_type = unsafe { get_cme_def_type(cme) };
+ if def_type != VM_METHOD_TYPE_CFUNC {
+ return false;
+ }
+ if unsafe { get_mct_func(get_cme_def_body_cfunc(cme)) } != cfunc {
+ return false;
+ }
+
+ self.assume_method_lookup_stable(asm, cme);
+
+ true
+ }
+
+ pub fn assume_method_lookup_stable(&mut self, asm: &mut Assembler, cme: CmePtr) -> Option<()> {
+ jit_ensure_block_entry_exit(self, asm)?;
+ self.method_lookup_assumptions.push(cme);
+
+ Some(())
+ }
+
+ /// Assume that objects of a given class will have no singleton class.
+ /// Return true if there has been no such singleton class since boot
+ /// and we can safely invalidate it.
+ pub fn assume_no_singleton_class(&mut self, asm: &mut Assembler, klass: VALUE) -> bool {
+ if jit_ensure_block_entry_exit(self, asm).is_none() {
+ return false; // out of space, give up
+ }
+ if has_singleton_class_of(klass) {
+ return false; // we've seen a singleton class. disable the optimization to avoid an invalidation loop.
+ }
+ self.no_singleton_class_assumptions.push(klass);
+ true
+ }
+
+ /// Assume that base pointer is equal to environment pointer in the current ISEQ.
+ /// Return true if it's safe to assume so.
+ fn assume_no_ep_escape(&mut self, asm: &mut Assembler) -> bool {
+ if jit_ensure_block_entry_exit(self, asm).is_none() {
+ return false; // out of space, give up
+ }
+ if self.escapes_ep() {
+ return false; // EP has been escaped in this ISEQ. disable the optimization to avoid an invalidation loop.
+ }
+ self.no_ep_escape = true;
+ true
+ }
+
+ fn get_cfp(&self) -> *mut rb_control_frame_struct {
+ unsafe { get_ec_cfp(self.ec) }
+ }
+
+ pub fn assume_stable_constant_names(&mut self, asm: &mut Assembler, id: *const ID) -> Option<()> {
+ jit_ensure_block_entry_exit(self, asm)?;
+ self.stable_constant_names_assumption = Some(id);
+
+ Some(())
+ }
+
+ pub fn queue_outgoing_branch(&mut self, branch: PendingBranchRef) {
+ self.pending_outgoing.push(branch)
+ }
+
+ /// Push a symbol for --yjit-perf
+ fn perf_symbol_push(&mut self, asm: &mut Assembler, symbol_name: &str) {
+ if !self.perf_stack.is_empty() {
+ self.perf_symbol_range_end(asm);
+ }
+ self.perf_stack.push(symbol_name.to_string());
+ self.perf_symbol_range_start(asm, symbol_name);
+ }
+
+ /// Pop the stack-top symbol for --yjit-perf
+ fn perf_symbol_pop(&mut self, asm: &mut Assembler) {
+ self.perf_symbol_range_end(asm);
+ self.perf_stack.pop();
+ if let Some(symbol_name) = self.perf_stack.get(0) {
+ self.perf_symbol_range_start(asm, symbol_name);
+ }
+ }
+
+ /// Mark the start address of a symbol to be reported to perf
+ fn perf_symbol_range_start(&self, asm: &mut Assembler, symbol_name: &str) {
+ let symbol_name = format!("[JIT] {}", symbol_name);
+ let syms = self.perf_map.clone();
+ asm.pos_marker(move |start, _| syms.borrow_mut().push((start, None, symbol_name.clone())));
+ }
+
+ /// Mark the end address of a symbol to be reported to perf
+ fn perf_symbol_range_end(&self, asm: &mut Assembler) {
+ let syms = self.perf_map.clone();
+ asm.pos_marker(move |end, _| {
+ if let Some((_, ref mut end_store, _)) = syms.borrow_mut().last_mut() {
+ assert_eq!(None, *end_store);
+ *end_store = Some(end);
+ }
+ });
+ }
+
+ /// Flush addresses and symbols to /tmp/perf-{pid}.map
+ fn flush_perf_symbols(&self, cb: &CodeBlock) {
+ assert_eq!(0, self.perf_stack.len());
+ let path = format!("/tmp/perf-{}.map", std::process::id());
+ let mut f = std::io::BufWriter::new(std::fs::File::options().create(true).append(true).open(path).unwrap());
+ for sym in self.perf_map.borrow().iter() {
+ if let (start, Some(end), name) = sym {
+ // In case the code straddles two pages, part of it belongs to the symbol.
+ for (inline_start, inline_end) in cb.writable_addrs(*start, *end) {
+ use std::io::Write;
+ let code_size = inline_end - inline_start;
+ writeln!(f, "{inline_start:x} {code_size:x} {name}").unwrap();
+ }
+ }
+ }
+ }
+
+ /// Return true if we're compiling a send-like instruction, not an opt_* instruction.
+ pub fn is_sendish(&self) -> bool {
+ match unsafe { rb_iseq_opcode_at_pc(self.iseq, self.pc) } as u32 {
+ YARVINSN_send |
+ YARVINSN_opt_send_without_block |
+ YARVINSN_invokesuper => true,
+ _ => false,
+ }
+ }
+
+ /// Return the number of locals in the current ISEQ
+ pub fn num_locals(&self) -> u32 {
+ unsafe { get_iseq_body_local_table_size(self.iseq) }
+ }
+}
+
+/// Macro to call jit.perf_symbol_push() without evaluating arguments when
+/// the option is turned off, which is useful for avoiding string allocation.
+macro_rules! jit_perf_symbol_push {
+ ($jit:expr, $asm:expr, $symbol_name:expr, $perf_map:expr) => {
+ if get_option!(perf_map) == Some($perf_map) {
+ $jit.perf_symbol_push($asm, $symbol_name);
+ }
+ };
+}
+
+/// Macro to call jit.perf_symbol_pop(), for consistency with jit_perf_symbol_push!().
+macro_rules! jit_perf_symbol_pop {
+ ($jit:expr, $asm:expr, $perf_map:expr) => {
+ if get_option!(perf_map) == Some($perf_map) {
+ $jit.perf_symbol_pop($asm);
+ }
+ };
+}
+
+/// Macro to push and pop a perf symbol around a function call.
+macro_rules! perf_call {
+ // perf_call!("prefix: ", func(...)) uses "prefix: func" as a symbol.
+ ($prefix:expr, $func_name:ident($jit:expr, $asm:expr$(, $arg:expr)*$(,)?) ) => {
+ {
+ jit_perf_symbol_push!($jit, $asm, &format!("{}{}", $prefix, stringify!($func_name)), PerfMap::Codegen);
+ let ret = $func_name($jit, $asm, $($arg),*);
+ jit_perf_symbol_pop!($jit, $asm, PerfMap::Codegen);
+ ret
+ }
+ };
+ // perf_call! { func(...) } uses "func" as a symbol.
+ { $func_name:ident($jit:expr, $asm:expr$(, $arg:expr)*$(,)?) } => {
+ perf_call!("", $func_name($jit, $asm, $($arg),*))
+ };
+}
+
+use crate::codegen::JCCKinds::*;
+use crate::log::Log;
+
+#[allow(non_camel_case_types, unused)]
+pub enum JCCKinds {
+ JCC_JNE,
+ JCC_JNZ,
+ JCC_JZ,
+ JCC_JE,
+ JCC_JB,
+ JCC_JBE,
+ JCC_JNA,
+ JCC_JNAE,
+ JCC_JO_MUL,
+}
+
+/// Generate code to increment a given counter. With --yjit-trace-exits=counter,
+/// the counter is traced when it's incremented by this function.
+#[inline(always)]
+fn gen_counter_incr(jit: &JITState, asm: &mut Assembler, counter: Counter) {
+ gen_counter_incr_with_pc(asm, counter, jit.pc);
+}
+
+/// Same as gen_counter_incr(), but takes PC isntead of JITState.
+#[inline(always)]
+fn gen_counter_incr_with_pc(asm: &mut Assembler, counter: Counter, pc: *mut VALUE) {
+ gen_counter_incr_without_pc(asm, counter);
+
+ // Trace a counter if --yjit-trace-exits=counter is given.
+ // TraceExits::All is handled by gen_exit().
+ if get_option!(trace_exits) == Some(TraceExits::Counter(counter)) {
+ with_caller_saved_temp_regs(asm, |asm| {
+ asm.ccall(rb_yjit_record_exit_stack as *const u8, vec![Opnd::const_ptr(pc as *const u8)]);
+ });
+ }
+}
+
+/// Generate code to increment a given counter. Not traced by --yjit-trace-exits=counter
+/// unlike gen_counter_incr() or gen_counter_incr_with_pc().
+#[inline(always)]
+fn gen_counter_incr_without_pc(asm: &mut Assembler, counter: Counter) {
+ // Assert that default counters are not incremented by generated code as this would impact performance
+ assert!(!DEFAULT_COUNTERS.contains(&counter), "gen_counter_incr incremented {:?}", counter);
+
+ if get_option!(gen_stats) {
+ asm_comment!(asm, "increment counter {}", counter.get_name());
+ let ptr = get_counter_ptr(&counter.get_name());
+ let ptr_reg = asm.load(Opnd::const_ptr(ptr as *const u8));
+ let counter_opnd = Opnd::mem(64, ptr_reg, 0);
+
+ // Increment and store the updated value
+ asm.incr_counter(counter_opnd, Opnd::UImm(1));
+ }
+}
+
+// Save the incremented PC on the CFP
+// This is necessary when callees can raise or allocate
+fn jit_save_pc(jit: &JITState, asm: &mut Assembler) {
+ let pc: *mut VALUE = jit.get_pc();
+ let ptr: *mut VALUE = unsafe {
+ let cur_insn_len = insn_len(jit.get_opcode()) as isize;
+ pc.offset(cur_insn_len)
+ };
+
+ asm_comment!(asm, "save PC to CFP");
+ asm.mov(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_PC), Opnd::const_ptr(ptr as *const u8));
+}
+
+/// Save the current SP on the CFP
+/// This realigns the interpreter SP with the JIT SP
+/// Note: this will change the current value of REG_SP,
+/// which could invalidate memory operands
+fn gen_save_sp(asm: &mut Assembler) {
+ gen_save_sp_with_offset(asm, 0);
+}
+
+/// Save the current SP + offset on the CFP
+fn gen_save_sp_with_offset(asm: &mut Assembler, offset: i8) {
+ if asm.ctx.get_sp_offset() != -offset {
+ asm_comment!(asm, "save SP to CFP");
+ let stack_pointer = asm.ctx.sp_opnd(offset as i32);
+ let sp_addr = asm.lea(stack_pointer);
+ asm.mov(SP, sp_addr);
+ let cfp_sp_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
+ asm.mov(cfp_sp_opnd, SP);
+ asm.ctx.set_sp_offset(-offset);
+ }
+}
+
+/// Basically jit_prepare_non_leaf_call(), but this registers the current PC
+/// to lazily push a C method frame when it's necessary.
+fn jit_prepare_lazy_frame_call(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cme: *const rb_callable_method_entry_t,
+ recv_opnd: YARVOpnd,
+) -> bool {
+ // We can use this only when the receiver is on stack.
+ let recv_idx = match recv_opnd {
+ StackOpnd(recv_idx) => recv_idx,
+ _ => unreachable!("recv_opnd must be on stack, but got: {:?}", recv_opnd),
+ };
+
+ // Get the next PC. jit_save_pc() saves that PC.
+ let pc: *mut VALUE = unsafe {
+ let cur_insn_len = insn_len(jit.get_opcode()) as isize;
+ jit.get_pc().offset(cur_insn_len)
+ };
+
+ let pc_to_cfunc = CodegenGlobals::get_pc_to_cfunc();
+ match pc_to_cfunc.get(&pc) {
+ Some(&(other_cme, _)) if other_cme != cme => {
+ // Bail out if it's not the only cme on this callsite.
+ incr_counter!(lazy_frame_failure);
+ return false;
+ }
+ _ => {
+ // Let rb_yjit_lazy_push_frame() lazily push a C frame on this PC.
+ incr_counter!(lazy_frame_count);
+ pc_to_cfunc.insert(pc, (cme, recv_idx));
+ }
+ }
+
+ // Save the PC to trigger a lazy frame push, and save the SP to get the receiver.
+ // The C func may call a method that doesn't raise, so prepare for invalidation too.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Make sure we're ready for calling rb_vm_push_cfunc_frame().
+ let cfunc_argc = unsafe { get_mct_argc(get_cme_def_body_cfunc(cme)) };
+ if cfunc_argc != -1 {
+ assert_eq!(recv_idx as i32, cfunc_argc); // verify the receiver index if possible
+ }
+ assert!(asm.get_leaf_ccall()); // It checks the stack canary we set for known_cfunc_codegen.
+
+ true
+}
+
+/// jit_save_pc() + gen_save_sp(). Should be used before calling a routine that could:
+/// - Perform GC allocation
+/// - Take the VM lock through RB_VM_LOCK_ENTER()
+/// - Perform Ruby method call
+///
+/// If the routine doesn't call arbitrary methods, use jit_prepare_call_with_gc() instead.
+fn jit_prepare_non_leaf_call(
+ jit: &mut JITState,
+ asm: &mut Assembler
+) {
+ // Prepare for GC. Setting PC also prepares for showing a backtrace.
+ jit.record_boundary_patch_point = true; // VM lock could trigger invalidation
+ jit_save_pc(jit, asm); // for allocation tracing
+ gen_save_sp(asm); // protect objects from GC
+
+ // In case the routine calls Ruby methods, it can set local variables
+ // through Kernel#binding, rb_debug_inspector API, and other means.
+ asm.clear_local_types();
+}
+
+/// jit_save_pc() + gen_save_sp(). Should be used before calling a routine that could:
+/// - Perform GC allocation
+/// - Take the VM lock through RB_VM_LOCK_ENTER()
+fn jit_prepare_call_with_gc(
+ jit: &mut JITState,
+ asm: &mut Assembler
+) {
+ jit.record_boundary_patch_point = true; // VM lock could trigger invalidation
+ jit_save_pc(jit, asm); // for allocation tracing
+ gen_save_sp(asm); // protect objects from GC
+
+ // Expect a leaf ccall(). You should use jit_prepare_non_leaf_call() if otherwise.
+ asm.expect_leaf_ccall();
+}
+
+/// Record the current codeblock write position for rewriting into a jump into
+/// the outlined block later. Used to implement global code invalidation.
+fn record_global_inval_patch(asm: &mut Assembler, outline_block_target_pos: CodePtr) {
+ // We add a padding before pos_marker so that the previous patch will not overlap this.
+ // jump_to_next_insn() puts a patch point at the end of the block in fallthrough cases.
+ // In the fallthrough case, the next block should start with the same Context, so the
+ // patch is fine, but it should not overlap another patch.
+ asm.pad_inval_patch();
+ asm.pos_marker(move |code_ptr, cb| {
+ CodegenGlobals::push_global_inval_patch(code_ptr, outline_block_target_pos, cb);
+ });
+}
+
+/// Verify the ctx's types and mappings against the compile-time stack, self,
+/// and locals.
+fn verify_ctx(jit: &JITState, ctx: &Context) {
+ fn obj_info_str<'a>(val: VALUE) -> &'a str {
+ unsafe { CStr::from_ptr(rb_obj_info(val)).to_str().unwrap() }
+ }
+
+ // Some types such as CString only assert the class field of the object
+ // when there has never been a singleton class created for objects of that class.
+ // Once there is a singleton class created they become their weaker
+ // `T*` variant, and we more objects should pass the verification.
+ fn relax_type_with_singleton_class_assumption(ty: Type) -> Type {
+ if let Type::CString | Type::CArray | Type::CHash = ty {
+ if has_singleton_class_of(ty.known_class().unwrap()) {
+ match ty {
+ Type::CString => return Type::TString,
+ Type::CArray => return Type::TArray,
+ Type::CHash => return Type::THash,
+ _ => (),
+ }
+ }
+ }
+
+ ty
+ }
+
+ // Only able to check types when at current insn
+ assert!(jit.at_compile_target());
+
+ let self_val = jit.peek_at_self();
+ let self_val_type = Type::from(self_val);
+ let learned_self_type = ctx.get_opnd_type(SelfOpnd);
+ let learned_self_type = relax_type_with_singleton_class_assumption(learned_self_type);
+
+
+ // Verify self operand type
+ if self_val_type.diff(learned_self_type) == TypeDiff::Incompatible {
+ panic!(
+ "verify_ctx: ctx self type ({:?}) incompatible with actual value of self {}",
+ ctx.get_opnd_type(SelfOpnd),
+ obj_info_str(self_val)
+ );
+ }
+
+ // Verify stack operand types
+ let top_idx = cmp::min(ctx.get_stack_size(), MAX_CTX_TEMPS as u8);
+ for i in 0..top_idx {
+ let learned_mapping = ctx.get_opnd_mapping(StackOpnd(i));
+ let learned_type = ctx.get_opnd_type(StackOpnd(i));
+ let learned_type = relax_type_with_singleton_class_assumption(learned_type);
+
+ let stack_val = jit.peek_at_stack(ctx, i as isize);
+ let val_type = Type::from(stack_val);
+
+ match learned_mapping {
+ TempMapping::MapToSelf => {
+ if self_val != stack_val {
+ panic!(
+ "verify_ctx: stack value was mapped to self, but values did not match!\n stack: {}\n self: {}",
+ obj_info_str(stack_val),
+ obj_info_str(self_val)
+ );
+ }
+ }
+ TempMapping::MapToLocal(local_idx) => {
+ let local_val = jit.peek_at_local(local_idx.into());
+ if local_val != stack_val {
+ panic!(
+ "verify_ctx: stack value was mapped to local, but values did not match\n stack: {}\n local {}: {}",
+ obj_info_str(stack_val),
+ local_idx,
+ obj_info_str(local_val)
+ );
+ }
+ }
+ TempMapping::MapToStack(_) => {}
+ }
+
+ // If the actual type differs from the learned type
+ if val_type.diff(learned_type) == TypeDiff::Incompatible {
+ panic!(
+ "verify_ctx: ctx type ({:?}) incompatible with actual value on stack: {} ({:?})",
+ learned_type,
+ obj_info_str(stack_val),
+ val_type,
+ );
+ }
+ }
+
+ // Verify local variable types
+ let local_table_size = unsafe { get_iseq_body_local_table_size(jit.iseq) };
+ let top_idx: usize = cmp::min(local_table_size as usize, MAX_CTX_TEMPS);
+ for i in 0..top_idx {
+ let learned_type = ctx.get_local_type(i);
+ let learned_type = relax_type_with_singleton_class_assumption(learned_type);
+ let local_val = jit.peek_at_local(i as i32);
+ let local_type = Type::from(local_val);
+
+ if local_type.diff(learned_type) == TypeDiff::Incompatible {
+ panic!(
+ "verify_ctx: ctx type ({:?}) incompatible with actual value of local: {} (type {:?})",
+ learned_type,
+ obj_info_str(local_val),
+ local_type
+ );
+ }
+ }
+}
+
+// Fill code_for_exit_from_stub. This is used by branch_stub_hit() to exit
+// to the interpreter when it cannot service a stub by generating new code.
+// Before coming here, branch_stub_hit() takes care of fully reconstructing
+// interpreter state.
+fn gen_stub_exit(ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+ let mut asm = Assembler::new_without_iseq();
+
+ gen_counter_incr_without_pc(&mut asm, Counter::exit_from_branch_stub);
+
+ asm_comment!(asm, "exit from branch stub");
+ asm.cpop_into(SP);
+ asm.cpop_into(EC);
+ asm.cpop_into(CFP);
+
+ asm.frame_teardown();
+
+ asm.cret(Qundef.into());
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// Generate an exit to return to the interpreter
+fn gen_exit(exit_pc: *mut VALUE, asm: &mut Assembler) {
+ #[cfg(not(test))]
+ asm_comment!(asm, "exit to interpreter on {}", {
+ let opcode = unsafe { rb_vm_insn_addr2opcode((*exit_pc).as_ptr()) };
+ insn_name(opcode as usize)
+ });
+
+ if asm.ctx.is_return_landing() {
+ asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, C_RET_OPND);
+ }
+
+ // Spill stack temps before returning to the interpreter
+ asm.spill_regs();
+
+ // Generate the code to exit to the interpreters
+ // Write the adjusted SP back into the CFP
+ if asm.ctx.get_sp_offset() != 0 {
+ let sp_opnd = asm.lea(asm.ctx.sp_opnd(0));
+ asm.mov(
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP),
+ sp_opnd
+ );
+ }
+
+ // Update CFP->PC
+ asm.mov(
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_PC),
+ Opnd::const_ptr(exit_pc as *const u8)
+ );
+
+ // Accumulate stats about interpreter exits
+ if get_option!(gen_stats) {
+ asm.ccall(
+ rb_yjit_count_side_exit_op as *const u8,
+ vec![Opnd::const_ptr(exit_pc as *const u8)]
+ );
+
+ // If --yjit-trace-exits is enabled, record the exit stack while recording
+ // the side exits. TraceExits::Counter is handled by gen_counted_exit().
+ if get_option!(trace_exits) == Some(TraceExits::All) {
+ asm.ccall(
+ rb_yjit_record_exit_stack as *const u8,
+ vec![Opnd::const_ptr(exit_pc as *const u8)]
+ );
+ }
+ }
+
+ asm.cpop_into(SP);
+ asm.cpop_into(EC);
+ asm.cpop_into(CFP);
+
+ asm.frame_teardown();
+
+ asm.cret(Qundef.into());
+}
+
+/// :side-exit:
+/// Get an exit for the current instruction in the outlined block. The code
+/// for each instruction often begins with several guards before proceeding
+/// to do work. When guards fail, an option we have is to exit to the
+/// interpreter at an instruction boundary. The piece of code that takes
+/// care of reconstructing interpreter state and exiting out of generated
+/// code is called the side exit.
+///
+/// No guards change the logic for reconstructing interpreter state at the
+/// moment, so there is one unique side exit for each context. Note that
+/// it's incorrect to jump to the side exit after any ctx stack push operations
+/// since they change the logic required for reconstructing interpreter state.
+///
+/// If you're in [the codegen module][self], use [JITState::gen_outlined_exit]
+/// instead of calling this directly.
+#[must_use]
+pub fn gen_outlined_exit(exit_pc: *mut VALUE, num_locals: u32, ctx: &Context, ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let mut cb = ocb.unwrap();
+ let mut asm = Assembler::new(num_locals);
+ asm.ctx = *ctx;
+ asm.set_reg_mapping(ctx.get_reg_mapping());
+
+ gen_exit(exit_pc, &mut asm);
+
+ asm.compile(&mut cb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// Get a side exit. Increment a counter in it if --yjit-stats is enabled.
+pub fn gen_counted_exit(exit_pc: *mut VALUE, side_exit: CodePtr, ocb: &mut OutlinedCb, counter: Option<Counter>) -> Option<CodePtr> {
+ // The counter is only incremented when stats are enabled
+ if !get_option!(gen_stats) {
+ return Some(side_exit);
+ }
+ let counter = match counter {
+ Some(counter) => counter,
+ None => return Some(side_exit),
+ };
+
+ let mut asm = Assembler::new_without_iseq();
+
+ // Increment a counter
+ gen_counter_incr_with_pc(&mut asm, counter, exit_pc);
+
+ // Jump to the existing side exit
+ asm.jmp(Target::CodePtr(side_exit));
+
+ let ocb = ocb.unwrap();
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// Preserve caller-saved stack temp registers during the call of a given block
+fn with_caller_saved_temp_regs<F, R>(asm: &mut Assembler, block: F) -> R where F: FnOnce(&mut Assembler) -> R {
+ for &reg in caller_saved_temp_regs() {
+ asm.cpush(Opnd::Reg(reg)); // save stack temps
+ }
+ let ret = block(asm);
+ for &reg in caller_saved_temp_regs().rev() {
+ asm.cpop_into(Opnd::Reg(reg)); // restore stack temps
+ }
+ ret
+}
+
+// Ensure that there is an exit for the start of the block being compiled.
+// Block invalidation uses this exit.
+#[must_use]
+pub fn jit_ensure_block_entry_exit(jit: &mut JITState, asm: &mut Assembler) -> Option<()> {
+ if jit.block_entry_exit.is_some() {
+ return Some(());
+ }
+
+ let block_starting_context = &jit.get_starting_ctx();
+
+ // If we're compiling the first instruction in the block.
+ if jit.insn_idx == jit.starting_insn_idx {
+ // Generate the exit with the cache in Assembler.
+ let side_exit_context = SideExitContext::new(jit.pc, *block_starting_context);
+ let entry_exit = asm.get_side_exit(&side_exit_context, None, jit.get_ocb());
+ jit.block_entry_exit = Some(entry_exit?);
+ } else {
+ let block_entry_pc = unsafe { rb_iseq_pc_at_idx(jit.iseq, jit.starting_insn_idx.into()) };
+ jit.block_entry_exit = Some(jit.gen_outlined_exit(block_entry_pc, block_starting_context)?);
+ }
+
+ Some(())
+}
+
+// Landing code for when c_return tracing is enabled. See full_cfunc_return().
+fn gen_full_cfunc_return(ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+ let mut asm = Assembler::new_without_iseq();
+
+ // This chunk of code expects REG_EC to be filled properly and
+ // RAX to contain the return value of the C method.
+
+ asm_comment!(asm, "full cfunc return");
+ asm.ccall(
+ rb_full_cfunc_return as *const u8,
+ vec![EC, C_RET_OPND]
+ );
+
+ // Count the exit
+ gen_counter_incr_without_pc(&mut asm, Counter::traced_cfunc_return);
+
+ // Return to the interpreter
+ asm.cpop_into(SP);
+ asm.cpop_into(EC);
+ asm.cpop_into(CFP);
+
+ asm.frame_teardown();
+
+ asm.cret(Qundef.into());
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// Generate a continuation for leave that exits to the interpreter at REG_CFP->pc.
+/// This is used by gen_leave() and gen_entry_prologue()
+fn gen_leave_exit(ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+ let mut asm = Assembler::new_without_iseq();
+
+ // gen_leave() fully reconstructs interpreter state and leaves the
+ // return value in C_RET_OPND before coming here.
+ let ret_opnd = asm.live_reg_opnd(C_RET_OPND);
+
+ // Every exit to the interpreter should be counted
+ gen_counter_incr_without_pc(&mut asm, Counter::leave_interp_return);
+
+ asm_comment!(asm, "exit from leave");
+ asm.cpop_into(SP);
+ asm.cpop_into(EC);
+ asm.cpop_into(CFP);
+
+ asm.frame_teardown();
+
+ asm.cret(ret_opnd);
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+// Increment SP and transfer the execution to the interpreter after jit_exec_exception().
+// On jit_exec_exception(), you need to return Qundef to keep executing caller non-FINISH
+// frames on the interpreter. You also need to increment SP to push the return value to
+// the caller's stack, which is different from gen_stub_exit().
+fn gen_leave_exception(ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+ let mut asm = Assembler::new_without_iseq();
+
+ // gen_leave() leaves the return value in C_RET_OPND before coming here.
+ let ruby_ret_val = asm.live_reg_opnd(C_RET_OPND);
+
+ // Every exit to the interpreter should be counted
+ gen_counter_incr_without_pc(&mut asm, Counter::leave_interp_return);
+
+ asm_comment!(asm, "push return value through cfp->sp");
+ let cfp_sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
+ let sp = asm.load(cfp_sp);
+ asm.mov(Opnd::mem(64, sp, 0), ruby_ret_val);
+ let new_sp = asm.add(sp, SIZEOF_VALUE.into());
+ asm.mov(cfp_sp, new_sp);
+
+ asm_comment!(asm, "exit from exception");
+ asm.cpop_into(SP);
+ asm.cpop_into(EC);
+ asm.cpop_into(CFP);
+
+ asm.frame_teardown();
+
+ // Execute vm_exec_core
+ asm.cret(Qundef.into());
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+// Generate a runtime guard that ensures the PC is at the expected
+// instruction index in the iseq, otherwise takes an entry stub
+// that generates another check and entry.
+// This is to handle the situation of optional parameters.
+// When a function with optional parameters is called, the entry
+// PC for the method isn't necessarily 0.
+pub fn gen_entry_chain_guard(
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ blockid: BlockId,
+) -> Option<PendingEntryRef> {
+ let entry = new_pending_entry();
+ let stub_addr = gen_entry_stub(entry.uninit_entry.as_ptr() as usize, ocb)?;
+
+ let pc_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_PC);
+ let expected_pc = unsafe { rb_iseq_pc_at_idx(blockid.iseq, blockid.idx.into()) };
+ let expected_pc_opnd = Opnd::const_ptr(expected_pc as *const u8);
+
+ asm_comment!(asm, "guard expected PC");
+ asm.cmp(pc_opnd, expected_pc_opnd);
+
+ asm.mark_entry_start(&entry);
+ asm.jne(stub_addr.into());
+ asm.mark_entry_end(&entry);
+ return Some(entry);
+}
+
+/// Compile an interpreter entry block to be inserted into an iseq
+/// Returns None if compilation fails.
+/// If jit_exception is true, compile JIT code for handling exceptions.
+/// See jit_compile_exception() for details.
+pub fn gen_entry_prologue(
+ cb: &mut CodeBlock,
+ ocb: &mut OutlinedCb,
+ blockid: BlockId,
+ stack_size: u8,
+ jit_exception: bool,
+) -> Option<(CodePtr, RegMapping)> {
+ let iseq = blockid.iseq;
+ let code_ptr = cb.get_write_ptr();
+
+ let mut asm = Assembler::new(unsafe { get_iseq_body_local_table_size(iseq) });
+ asm_comment!(asm, "YJIT entry point: {}", iseq_get_location(iseq, 0));
+
+ asm.frame_setup();
+
+ // Save the CFP, EC, SP registers to the C stack
+ asm.cpush(CFP);
+ asm.cpush(EC);
+ asm.cpush(SP);
+
+ // We are passed EC and CFP as arguments
+ asm.mov(EC, C_ARG_OPNDS[0]);
+ asm.mov(CFP, C_ARG_OPNDS[1]);
+
+ // Load the current SP from the CFP into REG_SP
+ asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
+
+ // Setup cfp->jit_return
+ // If this is an exception handler entry point
+ if jit_exception {
+ // On jit_exec_exception(), it's NOT safe to return a non-Qundef value
+ // from a non-FINISH frame. This function fixes that problem.
+ // See [jit_compile_exception] for details.
+ asm.ccall(
+ rb_yjit_set_exception_return as *mut u8,
+ vec![
+ CFP,
+ Opnd::const_ptr(CodegenGlobals::get_leave_exit_code().raw_ptr(cb)),
+ Opnd::const_ptr(CodegenGlobals::get_leave_exception_code().raw_ptr(cb)),
+ ],
+ );
+ } else {
+ // On jit_exec() or JIT_EXEC(), it's safe to return a non-Qundef value
+ // on the entry frame. See [jit_compile] for details.
+ asm.mov(
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_JIT_RETURN),
+ Opnd::const_ptr(CodegenGlobals::get_leave_exit_code().raw_ptr(cb)),
+ );
+ }
+
+ // We're compiling iseqs that we *expect* to start at `insn_idx`.
+ // But in the case of optional parameters or when handling exceptions,
+ // the interpreter can set the pc to a different location. For
+ // such scenarios, we'll add a runtime check that the PC we've
+ // compiled for is the same PC that the interpreter wants us to run with.
+ // If they don't match, then we'll jump to an entry stub and generate
+ // another PC check and entry there.
+ let pending_entry = if unsafe { get_iseq_flags_has_opt(iseq) } || jit_exception {
+ Some(gen_entry_chain_guard(&mut asm, ocb, blockid)?)
+ } else {
+ None
+ };
+ let reg_mapping = gen_entry_reg_mapping(&mut asm, blockid, stack_size);
+
+ asm.compile(cb, Some(ocb))?;
+
+ if cb.has_dropped_bytes() {
+ None
+ } else {
+ // Mark code pages for code GC
+ let iseq_payload = get_or_create_iseq_payload(iseq);
+ for page in cb.addrs_to_pages(code_ptr, cb.get_write_ptr()) {
+ iseq_payload.pages.insert(page);
+ }
+ // Write an entry to the heap and push it to the ISEQ
+ if let Some(pending_entry) = pending_entry {
+ let pending_entry = Rc::try_unwrap(pending_entry)
+ .ok().expect("PendingEntry should be unique");
+ iseq_payload.entries.push(pending_entry.into_entry());
+ }
+ Some((code_ptr, reg_mapping))
+ }
+}
+
+/// Generate code to load registers for a JIT entry. When the entry block is compiled for
+/// the first time, it loads no register. When it has been already compiled as a callee
+/// block, it loads some registers to reuse the block.
+pub fn gen_entry_reg_mapping(asm: &mut Assembler, blockid: BlockId, stack_size: u8) -> RegMapping {
+ // Find an existing callee block. If it's not found or uses no register, skip loading registers.
+ let mut ctx = Context::default();
+ ctx.set_stack_size(stack_size);
+ let reg_mapping = find_most_compatible_reg_mapping(blockid, &ctx).unwrap_or(RegMapping::default());
+ if reg_mapping == RegMapping::default() {
+ return reg_mapping;
+ }
+
+ // If found, load the same registers to reuse the block.
+ asm_comment!(asm, "reuse maps: {:?}", reg_mapping);
+ let local_table_size: u32 = unsafe { get_iseq_body_local_table_size(blockid.iseq) }.try_into().unwrap();
+ for &reg_opnd in reg_mapping.get_reg_opnds().iter() {
+ match reg_opnd {
+ RegOpnd::Local(local_idx) => {
+ let loaded_reg = TEMP_REGS[reg_mapping.get_reg(reg_opnd).unwrap()];
+ let loaded_temp = asm.local_opnd(local_table_size - local_idx as u32 + VM_ENV_DATA_SIZE - 1);
+ asm.load_into(Opnd::Reg(loaded_reg), loaded_temp);
+ }
+ RegOpnd::Stack(_) => unreachable!("find_most_compatible_reg_mapping should not leave {:?}", reg_opnd),
+ }
+ }
+
+ reg_mapping
+}
+
+// Generate code to check for interrupts and take a side-exit.
+// Warning: this function clobbers REG0
+fn gen_check_ints(
+ asm: &mut Assembler,
+ counter: Counter,
+) {
+ // Check for interrupts
+ // see RUBY_VM_CHECK_INTS(ec) macro
+ asm_comment!(asm, "RUBY_VM_CHECK_INTS(ec)");
+
+ // Not checking interrupt_mask since it's zero outside finalize_deferred_heap_pages,
+ // signal_exec, or rb_postponed_job_flush.
+ let interrupt_flag = asm.load(Opnd::mem(32, EC, RUBY_OFFSET_EC_INTERRUPT_FLAG as i32));
+ asm.test(interrupt_flag, interrupt_flag);
+
+ asm.jnz(Target::side_exit(counter));
+}
+
+// Generate a stubbed unconditional jump to the next bytecode instruction.
+// Blocks that are part of a guard chain can use this to share the same successor.
+fn jump_to_next_insn(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ end_block_with_jump(jit, asm, jit.next_insn_idx())
+}
+
+fn end_block_with_jump(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ continuation_insn_idx: u16,
+) -> Option<CodegenStatus> {
+ // Reset the depth since in current usages we only ever jump to
+ // chain_depth > 0 from the same instruction.
+ let mut reset_depth = asm.ctx;
+ reset_depth.reset_chain_depth_and_defer();
+
+ let jump_block = BlockId {
+ iseq: jit.iseq,
+ idx: continuation_insn_idx,
+ };
+
+ // We are at the end of the current instruction. Record the boundary.
+ if jit.record_boundary_patch_point {
+ jit.record_boundary_patch_point = false;
+ let exit_pc = unsafe { rb_iseq_pc_at_idx(jit.iseq, continuation_insn_idx.into())};
+ let exit_pos = jit.gen_outlined_exit(exit_pc, &reset_depth);
+ record_global_inval_patch(asm, exit_pos?);
+ }
+
+ // Generate the jump instruction
+ gen_direct_jump(jit, &reset_depth, jump_block, asm);
+ Some(EndBlock)
+}
+
+// Compile a sequence of bytecode instructions for a given basic block version.
+// Part of gen_block_version().
+// Note: this function will mutate its context while generating code,
+// but the input start_ctx argument should remain immutable.
+pub fn gen_single_block(
+ blockid: BlockId,
+ start_ctx: &Context,
+ ec: EcPtr,
+ cb: &mut CodeBlock,
+ ocb: &mut OutlinedCb,
+ first_block: bool,
+) -> Result<BlockRef, ()> {
+ // Limit the number of specialized versions for this block
+ let ctx = limit_block_versions(blockid, start_ctx);
+
+ verify_blockid(blockid);
+ assert!(!(blockid.idx == 0 && ctx.get_stack_size() > 0));
+
+ // Save machine code placement of the block. `cb` might page switch when we
+ // generate code in `ocb`.
+ let block_start_addr = cb.get_write_ptr();
+
+ // Instruction sequence to compile
+ let iseq = blockid.iseq;
+ let iseq_size = unsafe { get_iseq_encoded_size(iseq) };
+ let iseq_size: IseqIdx = if let Ok(size) = iseq_size.try_into() {
+ size
+ } else {
+ // ISeq too large to compile
+ return Err(());
+ };
+ let mut insn_idx: IseqIdx = blockid.idx;
+
+ // Initialize a JIT state object
+ let mut jit = JITState::new(blockid, ctx, cb.get_write_ptr(), ec, ocb, first_block);
+ jit.iseq = blockid.iseq;
+
+ // Create a backend assembler instance
+ let mut asm = Assembler::new(jit.num_locals());
+ asm.ctx = ctx;
+
+ if get_option_ref!(dump_disasm).is_some() {
+ let blockid_idx = blockid.idx;
+ let chain_depth = if asm.ctx.get_chain_depth() > 0 { format!("(chain_depth: {})", asm.ctx.get_chain_depth()) } else { "".to_string() };
+ asm_comment!(asm, "Block: {} {}", iseq_get_location(blockid.iseq, blockid_idx), chain_depth);
+ asm_comment!(asm, "reg_mapping: {:?}", asm.ctx.get_reg_mapping());
+ }
+
+ Log::add_block_with_chain_depth(blockid, asm.ctx.get_chain_depth());
+
+ // Mark the start of an ISEQ for --yjit-perf
+ jit_perf_symbol_push!(jit, &mut asm, &get_iseq_name(iseq), PerfMap::ISEQ);
+
+ if asm.ctx.is_return_landing() {
+ // Continuation of the end of gen_leave().
+ // Reload REG_SP for the current frame and transfer the return value
+ // to the stack top.
+ asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
+
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, C_RET_OPND);
+
+ asm.ctx.clear_return_landing();
+ }
+
+ // For each instruction to compile
+ // NOTE: could rewrite this loop with a std::iter::Iterator
+ while insn_idx < iseq_size {
+ // Get the current pc and opcode
+ let pc = unsafe { rb_iseq_pc_at_idx(iseq, insn_idx.into()) };
+ // try_into() call below is unfortunate. Maybe pick i32 instead of usize for opcodes.
+ let opcode: usize = unsafe { rb_iseq_opcode_at_pc(iseq, pc) }
+ .try_into()
+ .unwrap();
+
+ // We need opt_getconstant_path to be in a block all on its own. Cut the block short
+ // if we run into it. This is necessary because we want to invalidate based on the
+ // instruction's index.
+ if opcode == YARVINSN_opt_getconstant_path.as_usize() && insn_idx > jit.starting_insn_idx {
+ jump_to_next_insn(&mut jit, &mut asm);
+ break;
+ }
+
+ // Set the current instruction
+ jit.insn_idx = insn_idx;
+ jit.opcode = opcode;
+ jit.pc = pc;
+ jit.stack_size_for_pc = asm.ctx.get_stack_size();
+ asm.set_side_exit_context(pc, asm.ctx.get_stack_size());
+
+ // stack_pop doesn't immediately deallocate a register for stack temps,
+ // but it's safe to do so at this instruction boundary.
+ for stack_idx in asm.ctx.get_stack_size()..MAX_CTX_TEMPS as u8 {
+ asm.ctx.dealloc_reg(RegOpnd::Stack(stack_idx));
+ }
+
+ // If previous instruction requested to record the boundary
+ if jit.record_boundary_patch_point {
+ // Generate an exit to this instruction and record it
+ let exit_pos = jit.gen_outlined_exit(jit.pc, &asm.ctx).ok_or(())?;
+ record_global_inval_patch(&mut asm, exit_pos);
+ jit.record_boundary_patch_point = false;
+ }
+
+ // In debug mode, verify our existing assumption
+ if cfg!(debug_assertions) && get_option!(verify_ctx) && jit.at_compile_target() {
+ verify_ctx(&jit, &asm.ctx);
+ }
+
+ // :count-placement:
+ // Count bytecode instructions that execute in generated code.
+ // Note that the increment happens even when the output takes side exit.
+ gen_counter_incr(&jit, &mut asm, Counter::yjit_insns_count);
+
+ // Lookup the codegen function for this instruction
+ let mut status = None;
+ if let Some(gen_fn) = get_gen_fn(VALUE(opcode)) {
+ // Add a comment for the name of the YARV instruction
+ asm_comment!(asm, "Insn: {:04} {} (stack_size: {})", insn_idx, insn_name(opcode), asm.ctx.get_stack_size());
+
+ // If requested, dump instructions for debugging
+ if get_option!(dump_insns) {
+ println!("compiling {}", insn_name(opcode));
+ print_str(&mut asm, &format!("executing {}", insn_name(opcode)));
+ }
+
+ // Call the code generation function
+ jit_perf_symbol_push!(jit, &mut asm, &insn_name(opcode), PerfMap::Codegen);
+ status = gen_fn(&mut jit, &mut asm);
+ jit_perf_symbol_pop!(jit, &mut asm, PerfMap::Codegen);
+
+ #[cfg(debug_assertions)]
+ assert!(!asm.get_leaf_ccall(), "ccall() wasn't used after leaf_ccall was set in {}", insn_name(opcode));
+ }
+
+ // If we can't compile this instruction
+ // exit to the interpreter and stop compiling
+ if status == None {
+ if get_option!(dump_insns) {
+ println!("can't compile {}", insn_name(opcode));
+ }
+
+ // Rewind stack_size using ctx.with_stack_size to allow stack_size changes
+ // before you return None.
+ asm.ctx = asm.ctx.with_stack_size(jit.stack_size_for_pc);
+ gen_exit(jit.pc, &mut asm);
+
+ // If this is the first instruction in the block, then
+ // the entry address is the address for block_entry_exit
+ if insn_idx == jit.starting_insn_idx {
+ jit.block_entry_exit = Some(jit.output_ptr);
+ }
+
+ break;
+ }
+
+ // For now, reset the chain depth after each instruction as only the
+ // first instruction in the block can concern itself with the depth.
+ asm.ctx.reset_chain_depth_and_defer();
+
+ // Move to the next instruction to compile
+ insn_idx += insn_len(opcode) as u16;
+
+ // If the instruction terminates this block
+ if status == Some(EndBlock) {
+ break;
+ }
+ }
+ let end_insn_idx = insn_idx;
+
+ // We currently can't handle cases where the request is for a block that
+ // doesn't go to the next instruction in the same iseq.
+ assert!(!jit.record_boundary_patch_point);
+
+ // Bail when requested to.
+ if jit.block_abandoned {
+ incr_counter!(abandoned_block_count);
+ return Err(());
+ }
+
+ // Pad the block if it has the potential to be invalidated
+ if jit.block_entry_exit.is_some() {
+ asm.pad_inval_patch();
+ }
+
+ // Mark the end of an ISEQ for --yjit-perf
+ jit_perf_symbol_pop!(jit, &mut asm, PerfMap::ISEQ);
+
+ // Compile code into the code block
+ let (_, gc_offsets) = asm.compile(cb, Some(jit.get_ocb())).ok_or(())?;
+ let end_addr = cb.get_write_ptr();
+
+ // Flush perf symbols after asm.compile() writes addresses
+ if get_option!(perf_map).is_some() {
+ jit.flush_perf_symbols(cb);
+ }
+
+ // If code for the block doesn't fit, fail
+ if cb.has_dropped_bytes() || jit.get_ocb().unwrap().has_dropped_bytes() {
+ return Err(());
+ }
+
+ // Block compiled successfully
+ Ok(jit.into_block(end_insn_idx, block_start_addr, end_addr, gc_offsets))
+}
+
+fn gen_nop(
+ _jit: &mut JITState,
+ _asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Do nothing
+ Some(KeepCompiling)
+}
+
+fn gen_pop(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Decrement SP
+ asm.stack_pop(1);
+ Some(KeepCompiling)
+}
+
+fn gen_dup(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let dup_val = asm.stack_opnd(0);
+ let mapping = asm.ctx.get_opnd_mapping(dup_val.into());
+
+ let loc0 = asm.stack_push_mapping(mapping);
+ asm.mov(loc0, dup_val);
+
+ Some(KeepCompiling)
+}
+
+// duplicate stack top n elements
+fn gen_dupn(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let n = jit.get_arg(0).as_usize();
+
+ // In practice, seems to be only used for n==2
+ if n != 2 {
+ return None;
+ }
+
+ let opnd1: Opnd = asm.stack_opnd(1);
+ let opnd0: Opnd = asm.stack_opnd(0);
+
+ let mapping1 = asm.ctx.get_opnd_mapping(opnd1.into());
+ let mapping0 = asm.ctx.get_opnd_mapping(opnd0.into());
+
+ let dst1: Opnd = asm.stack_push_mapping(mapping1);
+ asm.mov(dst1, opnd1);
+
+ let dst0: Opnd = asm.stack_push_mapping(mapping0);
+ asm.mov(dst0, opnd0);
+
+ Some(KeepCompiling)
+}
+
+// Reverse top X stack entries
+fn gen_opt_reverse(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let count = jit.get_arg(0).as_i32();
+ for n in 0..(count/2) {
+ stack_swap(asm, n, count - 1 - n);
+ }
+ Some(KeepCompiling)
+}
+
+// Swap top 2 stack entries
+fn gen_swap(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ stack_swap(asm, 0, 1);
+ Some(KeepCompiling)
+}
+
+fn stack_swap(
+ asm: &mut Assembler,
+ offset0: i32,
+ offset1: i32,
+) {
+ let stack0_mem = asm.stack_opnd(offset0);
+ let stack1_mem = asm.stack_opnd(offset1);
+
+ let mapping0 = asm.ctx.get_opnd_mapping(stack0_mem.into());
+ let mapping1 = asm.ctx.get_opnd_mapping(stack1_mem.into());
+
+ let stack0_reg = asm.load(stack0_mem);
+ let stack1_reg = asm.load(stack1_mem);
+ asm.mov(stack0_mem, stack1_reg);
+ asm.mov(stack1_mem, stack0_reg);
+
+ asm.ctx.set_opnd_mapping(stack0_mem.into(), mapping1);
+ asm.ctx.set_opnd_mapping(stack1_mem.into(), mapping0);
+}
+
+fn gen_putnil(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ jit_putobject(asm, Qnil);
+ Some(KeepCompiling)
+}
+
+fn jit_putobject(asm: &mut Assembler, arg: VALUE) {
+ let val_type: Type = Type::from(arg);
+ let stack_top = asm.stack_push(val_type);
+ asm.mov(stack_top, arg.into());
+}
+
+fn gen_putobject_int2fix(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let opcode = jit.opcode;
+ let cst_val: usize = if opcode == YARVINSN_putobject_INT2FIX_0_.as_usize() {
+ 0
+ } else {
+ 1
+ };
+ let cst_val = VALUE::fixnum_from_usize(cst_val);
+
+ if let Some(result) = fuse_putobject_opt_ltlt(jit, asm, cst_val) {
+ return Some(result);
+ }
+
+ jit_putobject(asm, cst_val);
+ Some(KeepCompiling)
+}
+
+fn gen_putobject(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let arg: VALUE = jit.get_arg(0);
+
+ if let Some(result) = fuse_putobject_opt_ltlt(jit, asm, arg) {
+ return Some(result);
+ }
+
+ jit_putobject(asm, arg);
+ Some(KeepCompiling)
+}
+
+/// Combine `putobject` and `opt_ltlt` together if profitable, for example when
+/// left shifting an integer by a constant amount.
+fn fuse_putobject_opt_ltlt(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ constant_object: VALUE,
+) -> Option<CodegenStatus> {
+ let next_opcode = unsafe { rb_vm_insn_addr2opcode(jit.pc.add(insn_len(jit.opcode).as_usize()).read().as_ptr()) };
+ if next_opcode == YARVINSN_opt_ltlt as i32 && constant_object.fixnum_p() {
+ // Untag the fixnum shift amount
+ let shift_amt = constant_object.as_isize() >> 1;
+ if shift_amt > 63 || shift_amt < 0 {
+ return None;
+ }
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let lhs = jit.peek_at_stack(&asm.ctx, 0);
+ if !lhs.fixnum_p() {
+ return None;
+ }
+
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_LTLT) {
+ return None;
+ }
+
+ asm_comment!(asm, "integer left shift with rhs={shift_amt}");
+ let lhs = asm.stack_opnd(0);
+
+ // Guard that lhs is a fixnum if necessary
+ let lhs_type = asm.ctx.get_opnd_type(lhs.into());
+ if lhs_type != Type::Fixnum {
+ asm_comment!(asm, "guard arg0 fixnum");
+ asm.test(lhs, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ jit_chain_guard(
+ JCC_JZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_fixnums,
+ );
+ }
+
+ asm.stack_pop(1);
+ fixnum_left_shift_body(asm, lhs, shift_amt as u64);
+ return end_block_with_jump(jit, asm, jit.next_next_insn_idx());
+ }
+ return None;
+}
+
+fn gen_putself(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+
+ // Write it on the stack
+ let stack_top = asm.stack_push_self();
+ asm.mov(
+ stack_top,
+ Opnd::mem(VALUE_BITS, CFP, RUBY_OFFSET_CFP_SELF)
+ );
+
+ Some(KeepCompiling)
+}
+
+fn gen_putspecialobject(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let object_type = jit.get_arg(0).as_usize();
+
+ if object_type == VM_SPECIAL_OBJECT_VMCORE.as_usize() {
+ let stack_top = asm.stack_push(Type::UnknownHeap);
+ let frozen_core = unsafe { rb_mRubyVMFrozenCore };
+ asm.mov(stack_top, frozen_core.into());
+ Some(KeepCompiling)
+ } else {
+ // TODO: implement for VM_SPECIAL_OBJECT_CBASE and
+ // VM_SPECIAL_OBJECT_CONST_BASE
+ None
+ }
+}
+
+// set Nth stack entry to stack top
+fn gen_setn(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let n = jit.get_arg(0).as_usize();
+
+ let top_val = asm.stack_opnd(0);
+ let dst_opnd = asm.stack_opnd(n.try_into().unwrap());
+ asm.mov(
+ dst_opnd,
+ top_val
+ );
+
+ let mapping = asm.ctx.get_opnd_mapping(top_val.into());
+ asm.ctx.set_opnd_mapping(dst_opnd.into(), mapping);
+
+ Some(KeepCompiling)
+}
+
+// get nth stack value, then push it
+fn gen_topn(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let n = jit.get_arg(0).as_usize();
+
+ let top_n_val = asm.stack_opnd(n.try_into().unwrap());
+ let mapping = asm.ctx.get_opnd_mapping(top_n_val.into());
+ let loc0 = asm.stack_push_mapping(mapping);
+ asm.mov(loc0, top_n_val);
+
+ Some(KeepCompiling)
+}
+
+// Pop n values off the stack
+fn gen_adjuststack(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let n = jit.get_arg(0).as_usize();
+ asm.stack_pop(n);
+ Some(KeepCompiling)
+}
+
+fn gen_opt_plus(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_PLUS) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Add arg0 + arg1 and test for overflow
+ let arg0_untag = asm.sub(arg0, Opnd::Imm(1));
+ let out_val = asm.add(arg0_untag, arg1);
+ asm.jo(Target::side_exit(Counter::opt_plus_overflow));
+
+ // Push the output on the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, out_val);
+
+ Some(KeepCompiling)
+ } else {
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+// new array initialized from top N values
+fn gen_newarray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let n = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we are allocating
+ jit_prepare_call_with_gc(jit, asm);
+
+ // If n is 0, then elts is never going to be read, so we can just pass null
+ let values_ptr = if n == 0 {
+ Opnd::UImm(0)
+ } else {
+ asm_comment!(asm, "load pointer to array elements");
+ let values_opnd = asm.ctx.sp_opnd(-(n as i32));
+ asm.lea(values_opnd)
+ };
+
+ // call rb_ec_ary_new_from_values(struct rb_execution_context_struct *ec, long n, const VALUE *elts);
+ let new_ary = asm.ccall(
+ rb_ec_ary_new_from_values as *const u8,
+ vec![
+ EC,
+ Opnd::UImm(n.into()),
+ values_ptr
+ ]
+ );
+
+ asm.stack_pop(n.as_usize());
+ let stack_ret = asm.stack_push(Type::CArray);
+ asm.mov(stack_ret, new_ary);
+
+ Some(KeepCompiling)
+}
+
+// dup array
+fn gen_duparray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let ary = jit.get_arg(0);
+
+ // Save the PC and SP because we are allocating
+ jit_prepare_call_with_gc(jit, asm);
+
+ // call rb_ary_resurrect(VALUE ary);
+ let new_ary = asm.ccall(
+ rb_ary_resurrect as *const u8,
+ vec![ary.into()],
+ );
+
+ let stack_ret = asm.stack_push(Type::CArray);
+ asm.mov(stack_ret, new_ary);
+
+ Some(KeepCompiling)
+}
+
+// dup hash
+fn gen_duphash(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let hash = jit.get_arg(0);
+
+ // Save the PC and SP because we are allocating
+ jit_prepare_call_with_gc(jit, asm);
+
+ // call rb_hash_resurrect(VALUE hash);
+ let hash = asm.ccall(rb_hash_resurrect as *const u8, vec![hash.into()]);
+
+ let stack_ret = asm.stack_push(Type::CHash);
+ asm.mov(stack_ret, hash);
+
+ Some(KeepCompiling)
+}
+
+// call to_a on the array on the stack
+fn gen_splatarray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let flag = jit.get_arg(0).as_usize();
+
+ // Save the PC and SP because the callee may call #to_a
+ // Note that this modifies REG_SP, which is why we do it first
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Get the operands from the stack
+ let ary_opnd = asm.stack_opnd(0);
+
+ // Call rb_vm_splat_array(flag, ary)
+ let ary = asm.ccall(rb_vm_splat_array as *const u8, vec![flag.into(), ary_opnd]);
+ asm.stack_pop(1); // Keep it on stack during ccall for GC
+
+ let stack_ret = asm.stack_push(Type::TArray);
+ asm.mov(stack_ret, ary);
+
+ Some(KeepCompiling)
+}
+
+// call to_hash on hash to keyword splat before converting block
+// e.g. foo(**object, &block)
+fn gen_splatkw(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Defer compilation so we can specialize on a runtime hash operand
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let comptime_hash = jit.peek_at_stack(&asm.ctx, 1);
+ if comptime_hash.hash_p() {
+ // If a compile-time hash operand is T_HASH, just guard that it's T_HASH.
+ let hash_opnd = asm.stack_opnd(1);
+ guard_object_is_hash(asm, hash_opnd, hash_opnd.into(), Counter::splatkw_not_hash);
+ } else if comptime_hash.nil_p() {
+ // Speculate we'll see nil if compile-time hash operand is nil
+ let hash_opnd = asm.stack_opnd(1);
+ let hash_opnd_type = asm.ctx.get_opnd_type(hash_opnd.into());
+
+ if hash_opnd_type != Type::Nil {
+ asm.cmp(hash_opnd, Qnil.into());
+ asm.jne(Target::side_exit(Counter::splatkw_not_nil));
+
+ if Type::Nil.diff(hash_opnd_type) != TypeDiff::Incompatible {
+ asm.ctx.upgrade_opnd_type(hash_opnd.into(), Type::Nil);
+ }
+ }
+ } else {
+ // Otherwise, call #to_hash on the operand if it's not nil.
+
+ // Save the PC and SP because the callee may call #to_hash
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Get the operands from the stack
+ let block_opnd = asm.stack_opnd(0);
+ let block_type = asm.ctx.get_opnd_type(block_opnd.into());
+ let hash_opnd = asm.stack_opnd(1);
+
+ c_callable! {
+ fn to_hash_if_not_nil(mut obj: VALUE) -> VALUE {
+ if obj != Qnil {
+ obj = unsafe { rb_to_hash_type(obj) };
+ }
+ obj
+ }
+ }
+
+ let hash = asm.ccall(to_hash_if_not_nil as _, vec![hash_opnd]);
+ asm.stack_pop(2); // Keep it on stack during ccall for GC
+
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, hash);
+ asm.stack_push(block_type);
+ // Leave block_opnd spilled by ccall as is
+ asm.ctx.dealloc_reg(RegOpnd::Stack(asm.ctx.get_stack_size() - 1));
+ }
+
+ Some(KeepCompiling)
+}
+
+// concat two arrays
+fn gen_concatarray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Save the PC and SP because the callee may call #to_a
+ // Note that this modifies REG_SP, which is why we do it first
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Get the operands from the stack
+ let ary2st_opnd = asm.stack_opnd(0);
+ let ary1_opnd = asm.stack_opnd(1);
+
+ // Call rb_vm_concat_array(ary1, ary2st)
+ let ary = asm.ccall(rb_vm_concat_array as *const u8, vec![ary1_opnd, ary2st_opnd]);
+ asm.stack_pop(2); // Keep them on stack during ccall for GC
+
+ let stack_ret = asm.stack_push(Type::TArray);
+ asm.mov(stack_ret, ary);
+
+ Some(KeepCompiling)
+}
+
+// concat second array to first array.
+// first argument must already be an array.
+// attempts to convert second object to array using to_a.
+fn gen_concattoarray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Save the PC and SP because the callee may call #to_a
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Get the operands from the stack
+ let ary2_opnd = asm.stack_opnd(0);
+ let ary1_opnd = asm.stack_opnd(1);
+
+ let ary = asm.ccall(rb_vm_concat_to_array as *const u8, vec![ary1_opnd, ary2_opnd]);
+ asm.stack_pop(2); // Keep them on stack during ccall for GC
+
+ let stack_ret = asm.stack_push(Type::TArray);
+ asm.mov(stack_ret, ary);
+
+ Some(KeepCompiling)
+}
+
+// push given number of objects to array directly before.
+fn gen_pushtoarray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let num = jit.get_arg(0).as_u64();
+
+ // Save the PC and SP because the callee may allocate
+ jit_prepare_call_with_gc(jit, asm);
+
+ // Get the operands from the stack
+ let ary_opnd = asm.stack_opnd(num as i32);
+ let objp_opnd = asm.lea(asm.ctx.sp_opnd(-(num as i32)));
+
+ let ary = asm.ccall(rb_ary_cat as *const u8, vec![ary_opnd, objp_opnd, num.into()]);
+ asm.stack_pop(num as usize + 1); // Keep it on stack during ccall for GC
+
+ let stack_ret = asm.stack_push(Type::TArray);
+ asm.mov(stack_ret, ary);
+
+ Some(KeepCompiling)
+}
+
+// new range initialized from top 2 values
+fn gen_newrange(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let flag = jit.get_arg(0).as_usize();
+
+ // rb_range_new() allocates and can raise
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // val = rb_range_new(low, high, (int)flag);
+ let range_opnd = asm.ccall(
+ rb_range_new as *const u8,
+ vec![
+ asm.stack_opnd(1),
+ asm.stack_opnd(0),
+ flag.into()
+ ]
+ );
+
+ asm.stack_pop(2);
+ let stack_ret = asm.stack_push(Type::UnknownHeap);
+ asm.mov(stack_ret, range_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn guard_object_is_heap(
+ asm: &mut Assembler,
+ object: Opnd,
+ object_opnd: YARVOpnd,
+ counter: Counter,
+) {
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
+ if object_type.is_heap() {
+ return;
+ }
+
+ asm_comment!(asm, "guard object is heap");
+
+ // Test that the object is not an immediate
+ asm.test(object, (RUBY_IMMEDIATE_MASK as u64).into());
+ asm.jnz(Target::side_exit(counter));
+
+ // Test that the object is not false
+ asm.cmp(object, Qfalse.into());
+ asm.je(Target::side_exit(counter));
+
+ if Type::UnknownHeap.diff(object_type) != TypeDiff::Incompatible {
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::UnknownHeap);
+ }
+}
+
+fn guard_object_is_array(
+ asm: &mut Assembler,
+ object: Opnd,
+ object_opnd: YARVOpnd,
+ counter: Counter,
+) {
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
+ if object_type.is_array() {
+ return;
+ }
+
+ let object_reg = match object {
+ Opnd::InsnOut { .. } => object,
+ _ => asm.load(object),
+ };
+ guard_object_is_heap(asm, object_reg, object_opnd, counter);
+
+ asm_comment!(asm, "guard object is array");
+
+ // Pull out the type mask
+ let flags_opnd = Opnd::mem(VALUE_BITS, object_reg, RUBY_OFFSET_RBASIC_FLAGS);
+ let flags_opnd = asm.and(flags_opnd, (RUBY_T_MASK as u64).into());
+
+ // Compare the result with T_ARRAY
+ asm.cmp(flags_opnd, (RUBY_T_ARRAY as u64).into());
+ asm.jne(Target::side_exit(counter));
+
+ if Type::TArray.diff(object_type) != TypeDiff::Incompatible {
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::TArray);
+ }
+}
+
+fn guard_object_is_hash(
+ asm: &mut Assembler,
+ object: Opnd,
+ object_opnd: YARVOpnd,
+ counter: Counter,
+) {
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
+ if object_type.is_hash() {
+ return;
+ }
+
+ let object_reg = match object {
+ Opnd::InsnOut { .. } => object,
+ _ => asm.load(object),
+ };
+ guard_object_is_heap(asm, object_reg, object_opnd, counter);
+
+ asm_comment!(asm, "guard object is hash");
+
+ // Pull out the type mask
+ let flags_opnd = Opnd::mem(VALUE_BITS, object_reg, RUBY_OFFSET_RBASIC_FLAGS);
+ let flags_opnd = asm.and(flags_opnd, (RUBY_T_MASK as u64).into());
+
+ // Compare the result with T_HASH
+ asm.cmp(flags_opnd, (RUBY_T_HASH as u64).into());
+ asm.jne(Target::side_exit(counter));
+
+ if Type::THash.diff(object_type) != TypeDiff::Incompatible {
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::THash);
+ }
+}
+
+fn guard_object_is_fixnum(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ object: Opnd,
+ object_opnd: YARVOpnd
+) {
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
+ if object_type.is_heap() {
+ asm_comment!(asm, "arg is heap object");
+ asm.jmp(Target::side_exit(Counter::guard_send_not_fixnum));
+ return;
+ }
+
+ if object_type != Type::Fixnum && object_type.is_specific() {
+ asm_comment!(asm, "arg is not fixnum");
+ asm.jmp(Target::side_exit(Counter::guard_send_not_fixnum));
+ return;
+ }
+
+ assert!(!object_type.is_heap());
+ assert!(object_type == Type::Fixnum || object_type.is_unknown());
+
+ // If not fixnums at run-time, fall back
+ if object_type != Type::Fixnum {
+ asm_comment!(asm, "guard object fixnum");
+ asm.test(object, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ jit_chain_guard(
+ JCC_JZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_fixnum,
+ );
+ }
+
+ // Set the stack type in the context.
+ asm.ctx.upgrade_opnd_type(object.into(), Type::Fixnum);
+}
+
+fn guard_object_is_string(
+ asm: &mut Assembler,
+ object: Opnd,
+ object_opnd: YARVOpnd,
+ counter: Counter,
+) {
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
+ if object_type.is_string() {
+ return;
+ }
+
+ let object_reg = match object {
+ Opnd::InsnOut { .. } => object,
+ _ => asm.load(object),
+ };
+ guard_object_is_heap(asm, object_reg, object_opnd, counter);
+
+ asm_comment!(asm, "guard object is string");
+
+ // Pull out the type mask
+ let flags_reg = asm.load(Opnd::mem(VALUE_BITS, object_reg, RUBY_OFFSET_RBASIC_FLAGS));
+ let flags_reg = asm.and(flags_reg, Opnd::UImm(RUBY_T_MASK as u64));
+
+ // Compare the result with T_STRING
+ asm.cmp(flags_reg, Opnd::UImm(RUBY_T_STRING as u64));
+ asm.jne(Target::side_exit(counter));
+
+ if Type::TString.diff(object_type) != TypeDiff::Incompatible {
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::TString);
+ }
+}
+
+/// This guards that a special flag is not set on a hash.
+/// By passing a hash with this flag set as the last argument
+/// in a splat call, you can change the way keywords are handled
+/// to behave like ruby 2. We don't currently support this.
+fn guard_object_is_not_ruby2_keyword_hash(
+ asm: &mut Assembler,
+ object_opnd: Opnd,
+ counter: Counter,
+) {
+ asm_comment!(asm, "guard object is not ruby2 keyword hash");
+
+ let not_ruby2_keyword = asm.new_label("not_ruby2_keyword");
+ asm.test(object_opnd, (RUBY_IMMEDIATE_MASK as u64).into());
+ asm.jnz(not_ruby2_keyword);
+
+ asm.cmp(object_opnd, Qfalse.into());
+ asm.je(not_ruby2_keyword);
+
+ let flags_opnd = asm.load(Opnd::mem(
+ VALUE_BITS,
+ object_opnd,
+ RUBY_OFFSET_RBASIC_FLAGS,
+ ));
+ let type_opnd = asm.and(flags_opnd, (RUBY_T_MASK as u64).into());
+
+ asm.cmp(type_opnd, (RUBY_T_HASH as u64).into());
+ asm.jne(not_ruby2_keyword);
+
+ asm.test(flags_opnd, (RHASH_PASS_AS_KEYWORDS as u64).into());
+ asm.jnz(Target::side_exit(counter));
+
+ asm.write_label(not_ruby2_keyword);
+}
+
+/// This instruction pops a single value off the stack, converts it to an
+/// arrayif it isn’t already one using the #to_ary method, and then pushes
+/// the values from the array back onto the stack.
+fn gen_expandarray(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Both arguments are rb_num_t which is unsigned
+ let num = jit.get_arg(0).as_u32();
+ let flag = jit.get_arg(1).as_usize();
+
+ // If this instruction has the splat flag, then bail out.
+ if flag & 0x01 != 0 {
+ gen_counter_incr(jit, asm, Counter::expandarray_splat);
+ return None;
+ }
+
+ // If this instruction has the postarg flag, then bail out.
+ if flag & 0x02 != 0 {
+ gen_counter_incr(jit, asm, Counter::expandarray_postarg);
+ return None;
+ }
+
+ let array_opnd = asm.stack_opnd(0);
+
+ // Defer compilation so we can specialize on a runtime `self`
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 0);
+
+ // If the comptime receiver is not an array, speculate for when the `rb_check_array_type()`
+ // conversion returns nil and without side-effects (e.g. arbitrary method calls).
+ if !unsafe { RB_TYPE_P(comptime_recv, RUBY_T_ARRAY) } {
+ // at compile time, ensure to_ary is not defined
+ let target_cme = unsafe { rb_callable_method_entry_or_negative(comptime_recv.class_of(), ID!(to_ary)) };
+ let cme_def_type = unsafe { get_cme_def_type(target_cme) };
+
+ // if to_ary is defined, return can't compile so to_ary can be called
+ if cme_def_type != VM_METHOD_TYPE_UNDEF {
+ gen_counter_incr(jit, asm, Counter::expandarray_to_ary);
+ return None;
+ }
+
+ // Bail when method_missing is defined to avoid generating code to call it.
+ // Also, for simplicity, bail when BasicObject#method_missing has been removed.
+ if !assume_method_basic_definition(jit, asm, comptime_recv.class_of(), ID!(method_missing)) {
+ gen_counter_incr(jit, asm, Counter::expandarray_method_missing);
+ return None;
+ }
+
+ // invalidate compile block if to_ary is later defined
+ jit.assume_method_lookup_stable(asm, target_cme);
+
+ jit_guard_known_klass(
+ jit,
+ asm,
+ array_opnd,
+ array_opnd.into(),
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::expandarray_not_array,
+ );
+
+ let opnd = asm.stack_pop(1); // pop after using the type info
+
+ // If we don't actually want any values, then just keep going
+ if num == 0 {
+ return Some(KeepCompiling);
+ }
+
+ // load opnd to avoid a race because we are also pushing onto the stack
+ let opnd = asm.load(opnd);
+
+ for _ in 1..num {
+ let push_opnd = asm.stack_push(Type::Nil);
+ asm.mov(push_opnd, Qnil.into());
+ }
+
+ let push_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(push_opnd, opnd);
+
+ return Some(KeepCompiling);
+ }
+
+ // Get the compile-time array length
+ let comptime_len = unsafe { rb_jit_array_len(comptime_recv) as u32 };
+
+ // Move the array from the stack and check that it's an array.
+ guard_object_is_array(
+ asm,
+ array_opnd,
+ array_opnd.into(),
+ Counter::expandarray_not_array,
+ );
+
+ // If we don't actually want any values, then just return.
+ if num == 0 {
+ asm.stack_pop(1); // pop the array
+ return Some(KeepCompiling);
+ }
+
+ let array_opnd = asm.stack_opnd(0);
+ let array_reg = asm.load(array_opnd);
+ let array_len_opnd = get_array_len(asm, array_reg);
+
+ // Guard on the comptime/expected array length
+ if comptime_len >= num {
+ asm_comment!(asm, "guard array length >= {}", num);
+ asm.cmp(array_len_opnd, num.into());
+ jit_chain_guard(
+ JCC_JB,
+ jit,
+ asm,
+ EXPANDARRAY_MAX_CHAIN_DEPTH,
+ Counter::expandarray_chain_max_depth,
+ );
+
+ } else {
+ asm_comment!(asm, "guard array length == {}", comptime_len);
+ asm.cmp(array_len_opnd, comptime_len.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ EXPANDARRAY_MAX_CHAIN_DEPTH,
+ Counter::expandarray_chain_max_depth,
+ );
+ }
+
+ let array_opnd = asm.stack_pop(1); // pop after using the type info
+
+ // Load the pointer to the embedded or heap array
+ let ary_opnd = if comptime_len > 0 {
+ let array_reg = asm.load(array_opnd);
+ Some(get_array_ptr(asm, array_reg))
+ } else {
+ None
+ };
+
+ // Loop backward through the array and push each element onto the stack.
+ for i in (0..num).rev() {
+ let top = asm.stack_push(if i < comptime_len { Type::Unknown } else { Type::Nil });
+ let offset = i32::try_from(i * (SIZEOF_VALUE as u32)).unwrap();
+
+ // Missing elements are Qnil
+ asm_comment!(asm, "load array[{}]", i);
+ let elem_opnd = if i < comptime_len { Opnd::mem(64, ary_opnd.unwrap(), offset) } else { Qnil.into() };
+ asm.mov(top, elem_opnd);
+ }
+
+ Some(KeepCompiling)
+}
+
+// Compute the index of a local variable from its slot index
+fn ep_offset_to_local_idx(iseq: IseqPtr, ep_offset: u32) -> u32 {
+ // Layout illustration
+ // This is an array of VALUE
+ // | VM_ENV_DATA_SIZE |
+ // v v
+ // low addr <+-------+-------+-------+-------+------------------+
+ // |local 0|local 1| ... |local n| .... |
+ // +-------+-------+-------+-------+------------------+
+ // ^ ^ ^ ^
+ // +-------+---local_table_size----+ cfp->ep--+
+ // | |
+ // +------------------ep_offset---------------+
+ //
+ // See usages of local_var_name() from iseq.c for similar calculation.
+
+ // Equivalent of iseq->body->local_table_size
+ let local_table_size: i32 = unsafe { get_iseq_body_local_table_size(iseq) }
+ .try_into()
+ .unwrap();
+ let op = (ep_offset - VM_ENV_DATA_SIZE) as i32;
+ let local_idx = local_table_size - op - 1;
+ assert!(local_idx >= 0 && local_idx < local_table_size);
+ local_idx.try_into().unwrap()
+}
+
+// Get EP at level from CFP
+fn gen_get_ep(asm: &mut Assembler, level: u32) -> Opnd {
+ // Load environment pointer EP from CFP into a register
+ let ep_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_EP);
+ let mut ep_opnd = asm.load(ep_opnd);
+
+ for _ in (0..level).rev() {
+ // Get the previous EP from the current EP
+ // See GET_PREV_EP(ep) macro
+ // VALUE *prev_ep = ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
+ let offs = SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL;
+ ep_opnd = asm.load(Opnd::mem(64, ep_opnd, offs));
+ ep_opnd = asm.and(ep_opnd, Opnd::Imm(!0x03));
+ }
+
+ ep_opnd
+}
+
+// Gets the EP of the ISeq of the containing method, or "local level".
+// Equivalent of GET_LEP() macro.
+fn gen_get_lep(jit: &JITState, asm: &mut Assembler) -> Opnd {
+ // Equivalent of get_lvar_level() in compile.c
+ fn get_lvar_level(iseq: IseqPtr) -> u32 {
+ if iseq == unsafe { rb_get_iseq_body_local_iseq(iseq) } {
+ 0
+ } else {
+ 1 + get_lvar_level(unsafe { rb_get_iseq_body_parent_iseq(iseq) })
+ }
+ }
+
+ let level = get_lvar_level(jit.get_iseq());
+ gen_get_ep(asm, level)
+}
+
+fn gen_getlocal_generic(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ep_offset: u32,
+ level: u32,
+) -> Option<CodegenStatus> {
+ // Split the block if we need to invalidate this instruction when EP escapes
+ if level == 0 && !jit.escapes_ep() && !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let local_opnd = if level == 0 && jit.assume_no_ep_escape(asm) {
+ // Load the local using SP register
+ asm.local_opnd(ep_offset)
+ } else {
+ // Load environment pointer EP (level 0) from CFP
+ let ep_opnd = gen_get_ep(asm, level);
+
+ // Load the local from the block
+ // val = *(vm_get_ep(GET_EP(), level) - idx);
+ let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32);
+ let local_opnd = Opnd::mem(64, ep_opnd, offs);
+
+ // Write back an argument register to the stack. If the local variable
+ // is an argument, it might have an allocated register, but if this ISEQ
+ // is known to escape EP, the register shouldn't be used after this getlocal.
+ if level == 0 && asm.ctx.get_reg_mapping().get_reg(asm.local_opnd(ep_offset).reg_opnd()).is_some() {
+ asm.mov(local_opnd, asm.local_opnd(ep_offset));
+ }
+
+ local_opnd
+ };
+
+ // Write the local at SP
+ let stack_top = if level == 0 {
+ let local_idx = ep_offset_to_local_idx(jit.get_iseq(), ep_offset);
+ asm.stack_push_local(local_idx.as_usize())
+ } else {
+ asm.stack_push(Type::Unknown)
+ };
+
+ asm.mov(stack_top, local_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_getlocal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let idx = jit.get_arg(0).as_u32();
+ let level = jit.get_arg(1).as_u32();
+ gen_getlocal_generic(jit, asm, idx, level)
+}
+
+fn gen_getlocal_wc0(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let idx = jit.get_arg(0).as_u32();
+ gen_getlocal_generic(jit, asm, idx, 0)
+}
+
+fn gen_getlocal_wc1(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let idx = jit.get_arg(0).as_u32();
+ gen_getlocal_generic(jit, asm, idx, 1)
+}
+
+fn gen_setlocal_generic(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ep_offset: u32,
+ level: u32,
+) -> Option<CodegenStatus> {
+ // Post condition: The type of of the set local is updated in the Context.
+ let value_type = asm.ctx.get_opnd_type(StackOpnd(0));
+
+ // Fallback because of write barrier
+ if asm.ctx.get_chain_depth() > 0 {
+ // Load environment pointer EP at level
+ let ep_opnd = gen_get_ep(asm, level);
+
+ // This function should not yield to the GC.
+ // void rb_vm_env_write(const VALUE *ep, int index, VALUE v)
+ let index = -(ep_offset as i64);
+ let value_opnd = asm.stack_opnd(0);
+ asm.ccall(
+ rb_vm_env_write as *const u8,
+ vec![
+ ep_opnd,
+ index.into(),
+ value_opnd,
+ ]
+ );
+ asm.stack_pop(1);
+
+ // Set local type in the context
+ if level == 0 {
+ let local_idx = ep_offset_to_local_idx(jit.get_iseq(), ep_offset).as_usize();
+ asm.ctx.set_local_type(local_idx, value_type);
+ }
+ return Some(KeepCompiling);
+ }
+
+ // Split the block if we need to invalidate this instruction when EP escapes
+ if level == 0 && !jit.escapes_ep() && !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let (flags_opnd, local_opnd) = if level == 0 && jit.assume_no_ep_escape(asm) {
+ // Load flags and the local using SP register
+ let flags_opnd = asm.ctx.ep_opnd(VM_ENV_DATA_INDEX_FLAGS as i32);
+ let local_opnd = asm.local_opnd(ep_offset);
+
+ // Allocate a register to the new local operand
+ asm.alloc_reg(local_opnd.reg_opnd());
+ (flags_opnd, local_opnd)
+ } else {
+ // Make sure getlocal doesn't read a stale register. If the local variable
+ // is an argument, it might have an allocated register, but if this ISEQ
+ // is known to escape EP, the register shouldn't be used after this setlocal.
+ if level == 0 {
+ asm.ctx.dealloc_reg(asm.local_opnd(ep_offset).reg_opnd());
+ }
+
+ // Load flags and the local for the level
+ let ep_opnd = gen_get_ep(asm, level);
+ let flags_opnd = Opnd::mem(
+ 64,
+ ep_opnd,
+ SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_FLAGS as i32,
+ );
+ (flags_opnd, Opnd::mem(64, ep_opnd, -SIZEOF_VALUE_I32 * ep_offset as i32))
+ };
+
+ // Write barriers may be required when VM_ENV_FLAG_WB_REQUIRED is set, however write barriers
+ // only affect heap objects being written. If we know an immediate value is being written we
+ // can skip this check.
+ if !value_type.is_imm() {
+ // flags & VM_ENV_FLAG_WB_REQUIRED
+ asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
+
+ // if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
+ assert!(asm.ctx.get_chain_depth() == 0);
+ jit_chain_guard(
+ JCC_JNZ,
+ jit,
+ asm,
+ 1,
+ Counter::setlocal_wb_required,
+ );
+ }
+
+ // Set local type in the context
+ if level == 0 {
+ let local_idx = ep_offset_to_local_idx(jit.get_iseq(), ep_offset).as_usize();
+ asm.ctx.set_local_type(local_idx, value_type);
+ }
+
+ // Pop the value to write from the stack
+ let stack_top = asm.stack_pop(1);
+
+ // Write the value at the environment pointer
+ asm.mov(local_opnd, stack_top);
+
+ Some(KeepCompiling)
+}
+
+fn gen_setlocal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let idx = jit.get_arg(0).as_u32();
+ let level = jit.get_arg(1).as_u32();
+ gen_setlocal_generic(jit, asm, idx, level)
+}
+
+fn gen_setlocal_wc0(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let idx = jit.get_arg(0).as_u32();
+ gen_setlocal_generic(jit, asm, idx, 0)
+}
+
+fn gen_setlocal_wc1(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let idx = jit.get_arg(0).as_u32();
+ gen_setlocal_generic(jit, asm, idx, 1)
+}
+
+// new hash initialized from top N values
+fn gen_newhash(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let num: u64 = jit.get_arg(0).as_u64();
+
+ // Save the PC and SP because we are allocating
+ jit_prepare_call_with_gc(jit, asm);
+
+ if num != 0 {
+ // val = rb_hash_new_with_size(num / 2);
+ let new_hash = asm.ccall(
+ rb_hash_new_with_size as *const u8,
+ vec![Opnd::UImm(num / 2)]
+ );
+
+ // Save the allocated hash as we want to push it after insertion
+ asm.cpush(new_hash);
+ asm.cpush(new_hash); // x86 alignment
+
+ // Get a pointer to the values to insert into the hash
+ let stack_addr_from_top = asm.lea(asm.stack_opnd((num - 1) as i32));
+
+ // rb_hash_bulk_insert(num, STACK_ADDR_FROM_TOP(num), val);
+ asm.ccall(
+ rb_hash_bulk_insert as *const u8,
+ vec![
+ Opnd::UImm(num),
+ stack_addr_from_top,
+ new_hash
+ ]
+ );
+
+ let new_hash = asm.cpop();
+ asm.cpop_into(new_hash); // x86 alignment
+
+ asm.stack_pop(num.try_into().unwrap());
+ let stack_ret = asm.stack_push(Type::CHash);
+ asm.mov(stack_ret, new_hash);
+ } else {
+ // val = rb_hash_new();
+ let new_hash = asm.ccall(rb_hash_new as *const u8, vec![]);
+ let stack_ret = asm.stack_push(Type::CHash);
+ asm.mov(stack_ret, new_hash);
+ }
+
+ Some(KeepCompiling)
+}
+
+fn gen_putstring(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let put_val = jit.get_arg(0);
+
+ // Save the PC and SP because the callee will allocate
+ jit_prepare_call_with_gc(jit, asm);
+
+ let str_opnd = asm.ccall(
+ rb_ec_str_resurrect as *const u8,
+ vec![EC, put_val.into(), 0.into()]
+ );
+
+ let stack_top = asm.stack_push(Type::CString);
+ asm.mov(stack_top, str_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_putchilledstring(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let put_val = jit.get_arg(0);
+
+ // Save the PC and SP because the callee will allocate
+ jit_prepare_call_with_gc(jit, asm);
+
+ let str_opnd = asm.ccall(
+ rb_ec_str_resurrect as *const u8,
+ vec![EC, put_val.into(), 1.into()]
+ );
+
+ let stack_top = asm.stack_push(Type::CString);
+ asm.mov(stack_top, str_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_checkmatch(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let flag = jit.get_arg(0).as_u32();
+
+ // rb_vm_check_match is not leaf unless flag is VM_CHECKMATCH_TYPE_WHEN.
+ // See also: leafness_of_checkmatch() and check_match()
+ if flag != VM_CHECKMATCH_TYPE_WHEN {
+ jit_prepare_non_leaf_call(jit, asm);
+ }
+
+ let pattern = asm.stack_opnd(0);
+ let target = asm.stack_opnd(1);
+
+ extern "C" {
+ fn rb_vm_check_match(ec: EcPtr, target: VALUE, pattern: VALUE, num: u32) -> VALUE;
+ }
+ let result = asm.ccall(rb_vm_check_match as *const u8, vec![EC, target, pattern, flag.into()]);
+ asm.stack_pop(2); // Keep them on stack during ccall for GC
+
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, result);
+
+ Some(KeepCompiling)
+}
+
+// Push Qtrue or Qfalse depending on whether the given keyword was supplied by
+// the caller
+fn gen_checkkeyword(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // When a keyword is unspecified past index 32, a hash will be used
+ // instead. This can only happen in iseqs taking more than 32 keywords.
+ if unsafe { (*get_iseq_body_param_keyword(jit.iseq)).num >= VM_KW_SPECIFIED_BITS_MAX.try_into().unwrap() } {
+ return None;
+ }
+
+ // The EP offset to the undefined bits local
+ let bits_offset = jit.get_arg(0).as_i32();
+
+ // The index of the keyword we want to check
+ let index: i64 = jit.get_arg(1).as_i64();
+
+ // `unspecified_bits` is a part of the local table. Therefore, we may allocate a register for
+ // that "local" when passing it as an argument. We must use such a register to avoid loading
+ // random bits from the stack if any. We assume that EP is not escaped as of entering a method
+ // with keyword arguments.
+ let bits_opnd = asm.local_opnd(bits_offset as u32);
+
+ // unsigned int b = (unsigned int)FIX2ULONG(kw_bits);
+ // if ((b & (0x01 << idx))) {
+ //
+ // We can skip the FIX2ULONG conversion by shifting the bit we test
+ let bit_test: i64 = 0x01 << (index + 1);
+ asm.test(bits_opnd, Opnd::Imm(bit_test));
+ let ret_opnd = asm.csel_z(Qtrue.into(), Qfalse.into());
+
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret_opnd);
+
+ Some(KeepCompiling)
+}
+
+// Generate a jump to a stub that recompiles the current YARV instruction on failure.
+// When depth_limit is exceeded, generate a jump to a side exit.
+fn jit_chain_guard(
+ jcc: JCCKinds,
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ depth_limit: u8,
+ counter: Counter,
+) {
+ let target0_gen_fn = match jcc {
+ JCC_JNE | JCC_JNZ => BranchGenFn::JNZToTarget0,
+ JCC_JZ | JCC_JE => BranchGenFn::JZToTarget0,
+ JCC_JBE | JCC_JNA => BranchGenFn::JBEToTarget0,
+ JCC_JB | JCC_JNAE => BranchGenFn::JBToTarget0,
+ JCC_JO_MUL => BranchGenFn::JOMulToTarget0,
+ };
+
+ if asm.ctx.get_chain_depth() < depth_limit {
+ // Rewind Context to use the stack_size at the beginning of this instruction.
+ let mut deeper = asm.ctx.with_stack_size(jit.stack_size_for_pc);
+ deeper.increment_chain_depth();
+ let bid = BlockId {
+ iseq: jit.iseq,
+ idx: jit.insn_idx,
+ };
+
+ jit.gen_branch(asm, bid, &deeper, None, None, target0_gen_fn);
+ } else {
+ target0_gen_fn.call(asm, Target::side_exit(counter), None);
+ }
+}
+
+// up to 8 different shapes for each
+pub const GET_IVAR_MAX_DEPTH: u8 = 8;
+
+// up to 8 different shapes for each
+pub const SET_IVAR_MAX_DEPTH: u8 = 8;
+
+// hashes and arrays
+pub const OPT_AREF_MAX_CHAIN_DEPTH: u8 = 2;
+
+// expandarray
+pub const EXPANDARRAY_MAX_CHAIN_DEPTH: u8 = 4;
+
+// up to 5 different methods for send
+pub const SEND_MAX_DEPTH: u8 = 5;
+
+// up to 20 different offsets for case-when
+pub const CASE_WHEN_MAX_DEPTH: u8 = 20;
+
+pub const MAX_SPLAT_LENGTH: i32 = 127;
+
+// Codegen for getting an instance variable.
+// Preconditions:
+// - receiver has the same class as CLASS_OF(comptime_receiver)
+// - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
+fn gen_get_ivar(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ max_chain_depth: u8,
+ comptime_receiver: VALUE,
+ ivar_name: ID,
+ recv: Opnd,
+ recv_opnd: YARVOpnd,
+) -> Option<CodegenStatus> {
+ // If recv isn't already a register, load it.
+ let recv = match recv {
+ Opnd::InsnOut { .. } => recv,
+ _ => asm.load(recv),
+ };
+
+ // Check if the comptime receiver is a T_OBJECT
+ let receiver_t_object = unsafe { RB_TYPE_P(comptime_receiver, RUBY_T_OBJECT) };
+ // Use a general C call at the last chain to avoid exits on megamorphic shapes
+ let megamorphic = asm.ctx.get_chain_depth() >= max_chain_depth;
+ if megamorphic {
+ gen_counter_incr(jit, asm, Counter::num_getivar_megamorphic);
+ }
+
+ // NOTE: This assumes T_OBJECT can't ever have the same shape_id as any other type.
+ // too-complex shapes can't use index access, so we use rb_ivar_get for them too.
+ if !comptime_receiver.heap_object_p() || comptime_receiver.shape_too_complex() || megamorphic {
+ // General case. Call rb_ivar_get().
+ // VALUE rb_ivar_get(VALUE obj, ID id)
+ asm_comment!(asm, "call rb_ivar_get()");
+
+ // The function could raise RactorIsolationError.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let ivar_val = asm.ccall(rb_ivar_get as *const u8, vec![recv, Opnd::UImm(ivar_name)]);
+
+ if recv_opnd != SelfOpnd {
+ asm.stack_pop(1);
+ }
+
+ // Push the ivar on the stack
+ let out_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(out_opnd, ivar_val);
+
+ // Jump to next instruction. This allows guard chains to share the same successor.
+ jump_to_next_insn(jit, asm);
+ return Some(EndBlock);
+ }
+
+ let ivar_index = unsafe {
+ let shape_id = comptime_receiver.shape_id_of();
+ let mut ivar_index: u16 = 0;
+ if rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index) {
+ Some(ivar_index as usize)
+ } else {
+ None
+ }
+ };
+
+ // Guard heap object (recv_opnd must be used before stack_pop)
+ guard_object_is_heap(asm, recv, recv_opnd, Counter::getivar_not_heap);
+
+ let expected_shape = unsafe { rb_obj_shape_id(comptime_receiver) };
+ let shape_id_offset = unsafe { rb_shape_id_offset() };
+ let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
+
+ asm_comment!(asm, "guard shape");
+ asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ max_chain_depth,
+ Counter::getivar_megamorphic,
+ );
+
+ // Pop receiver if it's on the temp stack
+ if recv_opnd != SelfOpnd {
+ asm.stack_pop(1);
+ }
+
+ match ivar_index {
+ // If there is no IVAR index, then the ivar was undefined
+ // when we entered the compiler. That means we can just return
+ // nil for this shape + iv name
+ None => {
+ let out_opnd = asm.stack_push(Type::Nil);
+ asm.mov(out_opnd, Qnil.into());
+ }
+ Some(ivar_index) => {
+ let ivar_opnd = if receiver_t_object {
+ if comptime_receiver.embedded_p() {
+ // See ROBJECT_FIELDS() from include/ruby/internal/core/robject.h
+
+ // Load the variable
+ let offs = ROBJECT_OFFSET_AS_ARY as i32 + (ivar_index * SIZEOF_VALUE) as i32;
+ Opnd::mem(64, recv, offs)
+ } else {
+ // Compile time value is *not* embedded.
+
+ // Get a pointer to the extended table
+ let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_FIELDS as i32));
+
+ // Read the ivar from the extended table
+ Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32)
+ }
+ } else {
+ asm_comment!(asm, "call rb_ivar_get_at()");
+
+ if assume_single_ractor_mode(jit, asm) {
+ asm.ccall(rb_ivar_get_at_no_ractor_check as *const u8, vec![recv, Opnd::UImm((ivar_index as u32).into())])
+ } else {
+ // The function could raise RactorIsolationError.
+ jit_prepare_non_leaf_call(jit, asm);
+ asm.ccall(rb_ivar_get_at as *const u8, vec![recv, Opnd::UImm((ivar_index as u32).into()), Opnd::UImm(ivar_name)])
+ }
+ };
+
+ // Push the ivar on the stack
+ let out_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(out_opnd, ivar_opnd);
+ }
+ }
+
+ // Jump to next instruction. This allows guard chains to share the same successor.
+ jump_to_next_insn(jit, asm);
+ Some(EndBlock)
+}
+
+fn gen_getinstancevariable(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Defer compilation so we can specialize on a runtime `self`
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let ivar_name = jit.get_arg(0).as_u64();
+
+ let comptime_val = jit.peek_at_self();
+
+ // Guard that the receiver has the same class as the one from compile time.
+ let self_asm_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF);
+
+ gen_get_ivar(
+ jit,
+ asm,
+ GET_IVAR_MAX_DEPTH,
+ comptime_val,
+ ivar_name,
+ self_asm_opnd,
+ SelfOpnd,
+ )
+}
+
+// Generate an IV write.
+// This function doesn't deal with writing the shape, or expanding an object
+// to use an IV buffer if necessary. That is the callers responsibility
+fn gen_write_iv(
+ asm: &mut Assembler,
+ comptime_receiver: VALUE,
+ recv: Opnd,
+ ivar_index: usize,
+ set_value: Opnd,
+ extension_needed: bool)
+{
+ // Compile time self is embedded and the ivar index lands within the object
+ let embed_test_result = comptime_receiver.embedded_p() && !extension_needed;
+
+ if embed_test_result {
+ // Find the IV offset
+ let offs = ROBJECT_OFFSET_AS_ARY as i32 + (ivar_index * SIZEOF_VALUE) as i32;
+ let ivar_opnd = Opnd::mem(64, recv, offs);
+
+ // Write the IV
+ asm_comment!(asm, "write IV");
+ asm.mov(ivar_opnd, set_value);
+ } else {
+ // Compile time value is *not* embedded.
+
+ // Get a pointer to the extended table
+ let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_FIELDS as i32));
+
+ // Write the ivar in to the extended table
+ let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32);
+
+ asm_comment!(asm, "write IV");
+ asm.mov(ivar_opnd, set_value);
+ }
+}
+
+fn gen_setinstancevariable(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Defer compilation so we can specialize on a runtime `self`
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let ivar_name = jit.get_arg(0).as_u64();
+ let ic = jit.get_arg(1).as_ptr();
+ let comptime_receiver = jit.peek_at_self();
+ gen_set_ivar(
+ jit,
+ asm,
+ comptime_receiver,
+ ivar_name,
+ SelfOpnd,
+ Some(ic),
+ )
+}
+
+/// Set an instance variable on setinstancevariable or attr_writer.
+/// It switches the behavior based on what recv_opnd is given.
+/// * SelfOpnd: setinstancevariable, which doesn't push a result onto the stack.
+/// * StackOpnd: attr_writer, which pushes a result onto the stack.
+fn gen_set_ivar(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ comptime_receiver: VALUE,
+ ivar_name: ID,
+ recv_opnd: YARVOpnd,
+ ic: Option<*const iseq_inline_iv_cache_entry>,
+) -> Option<CodegenStatus> {
+ // If the comptime receiver is frozen, writing an IV will raise an exception
+ // and we don't want to JIT code to deal with that situation.
+ if comptime_receiver.is_frozen() {
+ gen_counter_incr(jit, asm, Counter::setivar_frozen);
+ return None;
+ }
+
+ let stack_type = asm.ctx.get_opnd_type(StackOpnd(0));
+
+ // Check if the comptime receiver is a T_OBJECT
+ let receiver_t_object = unsafe { RB_TYPE_P(comptime_receiver, RUBY_T_OBJECT) };
+ // Use a general C call at the last chain to avoid exits on megamorphic shapes
+ let megamorphic = asm.ctx.get_chain_depth() >= SET_IVAR_MAX_DEPTH;
+ if megamorphic {
+ gen_counter_incr(jit, asm, Counter::num_setivar_megamorphic);
+ }
+
+ // Get the iv index
+ let shape_too_complex = comptime_receiver.shape_too_complex();
+ let ivar_index = if !comptime_receiver.special_const_p() && !shape_too_complex {
+ let shape_id = comptime_receiver.shape_id_of();
+ let mut ivar_index: u16 = 0;
+ if unsafe { rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index) } {
+ Some(ivar_index as usize)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ // The current shape doesn't contain this iv, we need to transition to another shape.
+ let mut new_shape_too_complex = false;
+ let new_shape = if !shape_too_complex && receiver_t_object && ivar_index.is_none() {
+ let current_shape_id = comptime_receiver.shape_id_of();
+ // We don't need to check about imemo_fields here because we're definitely looking at a T_OBJECT.
+ let klass = unsafe { rb_obj_class(comptime_receiver) };
+ let next_shape_id = unsafe { rb_shape_transition_add_ivar_no_warnings(klass, current_shape_id, ivar_name) };
+
+ // If the VM ran out of shapes, or this class generated too many leaf,
+ // it may be de-optimized into OBJ_TOO_COMPLEX_SHAPE (hash-table).
+ new_shape_too_complex = unsafe { rb_jit_shape_too_complex_p(next_shape_id) };
+ if new_shape_too_complex {
+ Some((next_shape_id, None, 0_usize))
+ } else {
+ let current_capacity = unsafe { rb_yjit_shape_capacity(current_shape_id) };
+ let next_capacity = unsafe { rb_yjit_shape_capacity(next_shape_id) };
+
+ // If the new shape has a different capacity, or is TOO_COMPLEX, we'll have to
+ // reallocate it.
+ let needs_extension = next_capacity != current_capacity;
+
+ // We can write to the object, but we need to transition the shape
+ let ivar_index = unsafe { rb_yjit_shape_index(next_shape_id) } as usize;
+
+ let needs_extension = if needs_extension {
+ Some((current_capacity, next_capacity))
+ } else {
+ None
+ };
+ Some((next_shape_id, needs_extension, ivar_index))
+ }
+ } else {
+ None
+ };
+
+ // If the receiver isn't a T_OBJECT, then just write out the IV write as a function call.
+ // too-complex shapes can't use index access, so we use rb_ivar_get for them too.
+ if !receiver_t_object || shape_too_complex || new_shape_too_complex || megamorphic {
+ // The function could raise FrozenError.
+ // Note that this modifies REG_SP, which is why we do it first
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Get the operands from the stack
+ let val_opnd = asm.stack_opnd(0);
+
+ if let StackOpnd(index) = recv_opnd { // attr_writer
+ let recv = asm.stack_opnd(index as i32);
+ asm_comment!(asm, "call rb_vm_set_ivar_id()");
+ asm.ccall(
+ rb_vm_set_ivar_id as *const u8,
+ vec![
+ recv,
+ Opnd::UImm(ivar_name),
+ val_opnd,
+ ],
+ );
+ } else { // setinstancevariable
+ asm_comment!(asm, "call rb_vm_setinstancevariable()");
+ asm.ccall(
+ rb_vm_setinstancevariable as *const u8,
+ vec![
+ VALUE(jit.iseq as usize).into(),
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF),
+ ivar_name.into(),
+ val_opnd,
+ Opnd::const_ptr(ic.unwrap() as *const u8),
+ ],
+ );
+ }
+ } else {
+ // Get the receiver
+ let mut recv = asm.load(if let StackOpnd(index) = recv_opnd {
+ asm.stack_opnd(index as i32)
+ } else {
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF)
+ });
+
+ // Upgrade type
+ guard_object_is_heap(asm, recv, recv_opnd, Counter::setivar_not_heap);
+
+ let expected_shape = unsafe { rb_obj_shape_id(comptime_receiver) };
+ let shape_id_offset = unsafe { rb_shape_id_offset() };
+ let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
+
+ asm_comment!(asm, "guard shape");
+ asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SET_IVAR_MAX_DEPTH,
+ Counter::setivar_megamorphic,
+ );
+
+ let write_val;
+
+ match ivar_index {
+ // If we don't have an instance variable index, then we need to
+ // transition out of the current shape.
+ None => {
+ let (new_shape_id, needs_extension, ivar_index) = new_shape.unwrap();
+ if let Some((current_capacity, new_capacity)) = needs_extension {
+ // Generate the C call so that runtime code will increase
+ // the capacity and set the buffer.
+ asm_comment!(asm, "call rb_ensure_iv_list_size");
+
+ // It allocates so can trigger GC, which takes the VM lock
+ // so could yield to a different ractor.
+ jit_prepare_call_with_gc(jit, asm);
+ asm.ccall(rb_ensure_iv_list_size as *const u8,
+ vec![
+ recv,
+ Opnd::UImm(current_capacity.into()),
+ Opnd::UImm(new_capacity.into())
+ ]
+ );
+
+ // Load the receiver again after the function call
+ recv = asm.load(if let StackOpnd(index) = recv_opnd {
+ asm.stack_opnd(index as i32)
+ } else {
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF)
+ });
+ }
+
+ write_val = asm.stack_opnd(0);
+ gen_write_iv(asm, comptime_receiver, recv, ivar_index, write_val, needs_extension.is_some());
+
+ asm_comment!(asm, "write shape");
+
+ let shape_id_offset = unsafe { rb_shape_id_offset() };
+ let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
+
+ // Store the new shape
+ asm.store(shape_opnd, Opnd::UImm(new_shape_id as u64));
+ },
+
+ Some(ivar_index) => {
+ // If the iv index already exists, then we don't need to
+ // transition to a new shape. The reason is because we find
+ // the iv index by searching up the shape tree. If we've
+ // made the transition already, then there's no reason to
+ // update the shape on the object. Just set the IV.
+ write_val = asm.stack_opnd(0);
+ gen_write_iv(asm, comptime_receiver, recv, ivar_index, write_val, false);
+ },
+ }
+
+ // If we know the stack value is an immediate, there's no need to
+ // generate WB code.
+ if !stack_type.is_imm() {
+ asm.spill_regs(); // for ccall (unconditionally spill them for RegMappings consistency)
+ let skip_wb = asm.new_label("skip_wb");
+ // If the value we're writing is an immediate, we don't need to WB
+ asm.test(write_val, (RUBY_IMMEDIATE_MASK as u64).into());
+ asm.jnz(skip_wb);
+
+ // If the value we're writing is nil or false, we don't need to WB
+ asm.cmp(write_val, Qnil.into());
+ asm.jbe(skip_wb);
+
+ asm_comment!(asm, "write barrier");
+ asm.ccall(
+ rb_gc_writebarrier as *const u8,
+ vec![
+ recv,
+ write_val,
+ ]
+ );
+
+ asm.write_label(skip_wb);
+ }
+ }
+ let write_val = asm.stack_pop(1); // Keep write_val on stack during ccall for GC
+
+ // If it's attr_writer, i.e. recv_opnd is StackOpnd, we need to pop
+ // the receiver and push the written value onto the stack.
+ if let StackOpnd(_) = recv_opnd {
+ asm.stack_pop(1); // Pop receiver
+
+ let out_opnd = asm.stack_push(Type::Unknown); // Push a return value
+ asm.mov(out_opnd, write_val);
+ }
+
+ Some(KeepCompiling)
+}
+
+fn gen_defined(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let op_type = jit.get_arg(0).as_u64();
+ let obj = jit.get_arg(1);
+ let pushval = jit.get_arg(2);
+
+ match op_type as u32 {
+ DEFINED_YIELD => {
+ asm.stack_pop(1); // v operand is not used
+ let out_opnd = asm.stack_push(Type::Unknown); // nil or "yield"
+
+ gen_block_given(jit, asm, out_opnd, pushval.into(), Qnil.into());
+ }
+ _ => {
+ // Save the PC and SP because the callee may allocate or call #respond_to?
+ // Note that this modifies REG_SP, which is why we do it first
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Get the operands from the stack
+ let v_opnd = asm.stack_opnd(0);
+
+ // Call vm_defined(ec, reg_cfp, op_type, obj, v)
+ let def_result = asm.ccall(rb_vm_defined as *const u8, vec![EC, CFP, op_type.into(), obj.into(), v_opnd]);
+ asm.stack_pop(1); // Keep it on stack during ccall for GC
+
+ // if (vm_defined(ec, GET_CFP(), op_type, obj, v)) {
+ // val = pushval;
+ // }
+ asm.test(def_result, Opnd::UImm(255));
+ let out_value = asm.csel_nz(pushval.into(), Qnil.into());
+
+ // Push the return value onto the stack
+ let out_type = if pushval.special_const_p() {
+ Type::UnknownImm
+ } else {
+ Type::Unknown
+ };
+ let stack_ret = asm.stack_push(out_type);
+ asm.mov(stack_ret, out_value);
+ }
+ }
+
+ Some(KeepCompiling)
+}
+
+fn gen_definedivar(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Defer compilation so we can specialize base on a runtime receiver
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let ivar_name = jit.get_arg(0).as_u64();
+ // Value that will be pushed on the stack if the ivar is defined. In practice this is always the
+ // string "instance-variable". If the ivar is not defined, nil will be pushed instead.
+ let pushval = jit.get_arg(2);
+
+ // Get the receiver
+ let recv = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
+
+ // Specialize base on compile time values
+ let comptime_receiver = jit.peek_at_self();
+
+ if comptime_receiver.special_const_p() || comptime_receiver.shape_too_complex() || asm.ctx.get_chain_depth() >= GET_IVAR_MAX_DEPTH {
+ // Fall back to calling rb_ivar_defined
+
+ // Save the PC and SP because the callee may allocate
+ // Note that this modifies REG_SP, which is why we do it first
+ jit_prepare_call_with_gc(jit, asm);
+
+ // Call rb_ivar_defined(recv, ivar_name)
+ let def_result = asm.ccall(rb_ivar_defined as *const u8, vec![recv, ivar_name.into()]);
+
+ // if (rb_ivar_defined(recv, ivar_name)) {
+ // val = pushval;
+ // }
+ asm.test(def_result, Opnd::UImm(255));
+ let out_value = asm.csel_nz(pushval.into(), Qnil.into());
+
+ // Push the return value onto the stack
+ let out_type = if pushval.special_const_p() { Type::UnknownImm } else { Type::Unknown };
+ let stack_ret = asm.stack_push(out_type);
+ asm.mov(stack_ret, out_value);
+
+ return Some(KeepCompiling)
+ }
+
+ let shape_id = comptime_receiver.shape_id_of();
+ let ivar_exists = unsafe {
+ let mut ivar_index: u16 = 0;
+ rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index)
+ };
+
+ // Guard heap object (recv_opnd must be used before stack_pop)
+ guard_object_is_heap(asm, recv, SelfOpnd, Counter::definedivar_not_heap);
+
+ let shape_id_offset = unsafe { rb_shape_id_offset() };
+ let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
+
+ asm_comment!(asm, "guard shape");
+ asm.cmp(shape_opnd, Opnd::UImm(shape_id as u64));
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ GET_IVAR_MAX_DEPTH,
+ Counter::definedivar_megamorphic,
+ );
+
+ let result = if ivar_exists { pushval } else { Qnil };
+ jit_putobject(asm, result);
+
+ // Jump to next instruction. This allows guard chains to share the same successor.
+ return jump_to_next_insn(jit, asm);
+}
+
+fn gen_checktype(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let type_val = jit.get_arg(0).as_u32();
+
+ // Only three types are emitted by compile.c at the moment
+ if let RUBY_T_STRING | RUBY_T_ARRAY | RUBY_T_HASH = type_val {
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val = asm.stack_pop(1);
+
+ // Check if we know from type information
+ match val_type.known_value_type() {
+ Some(value_type) => {
+ if value_type == type_val {
+ jit_putobject(asm, Qtrue);
+ return Some(KeepCompiling);
+ } else {
+ jit_putobject(asm, Qfalse);
+ return Some(KeepCompiling);
+ }
+ },
+ _ => (),
+ }
+
+ let ret = asm.new_label("ret");
+
+ let val = asm.load(val);
+ if !val_type.is_heap() {
+ // if (SPECIAL_CONST_P(val)) {
+ // Return Qfalse via REG1 if not on heap
+ asm.test(val, (RUBY_IMMEDIATE_MASK as u64).into());
+ asm.jnz(ret);
+ asm.cmp(val, Qfalse.into());
+ asm.je(ret);
+ }
+
+ // Check type on object
+ let object_type = asm.and(
+ Opnd::mem(64, val, RUBY_OFFSET_RBASIC_FLAGS),
+ Opnd::UImm(RUBY_T_MASK.into()));
+ asm.cmp(object_type, Opnd::UImm(type_val.into()));
+ let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
+
+ asm.write_label(ret);
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret_opnd);
+
+ Some(KeepCompiling)
+ } else {
+ None
+ }
+}
+
+fn gen_concatstrings(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let n = jit.get_arg(0).as_usize();
+
+ // rb_str_concat_literals may raise Encoding::CompatibilityError
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let values_ptr = asm.lea(asm.ctx.sp_opnd(-(n as i32)));
+
+ // call rb_str_concat_literals(size_t n, const VALUE *strings);
+ let return_value = asm.ccall(
+ rb_str_concat_literals as *const u8,
+ vec![n.into(), values_ptr]
+ );
+
+ asm.stack_pop(n);
+ let stack_ret = asm.stack_push(Type::TString);
+ asm.mov(stack_ret, return_value);
+
+ Some(KeepCompiling)
+}
+
+fn guard_two_fixnums(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) {
+ let counter = Counter::guard_send_not_fixnums;
+
+ // Get stack operands without popping them
+ let arg1 = asm.stack_opnd(0);
+ let arg0 = asm.stack_opnd(1);
+
+ // Get the stack operand types
+ let arg1_type = asm.ctx.get_opnd_type(arg1.into());
+ let arg0_type = asm.ctx.get_opnd_type(arg0.into());
+
+ if arg0_type.is_heap() || arg1_type.is_heap() {
+ asm_comment!(asm, "arg is heap object");
+ asm.jmp(Target::side_exit(counter));
+ return;
+ }
+
+ if arg0_type != Type::Fixnum && arg0_type.is_specific() {
+ asm_comment!(asm, "arg0 not fixnum");
+ asm.jmp(Target::side_exit(counter));
+ return;
+ }
+
+ if arg1_type != Type::Fixnum && arg1_type.is_specific() {
+ asm_comment!(asm, "arg1 not fixnum");
+ asm.jmp(Target::side_exit(counter));
+ return;
+ }
+
+ assert!(!arg0_type.is_heap());
+ assert!(!arg1_type.is_heap());
+ assert!(arg0_type == Type::Fixnum || arg0_type.is_unknown());
+ assert!(arg1_type == Type::Fixnum || arg1_type.is_unknown());
+
+ // If not fixnums at run-time, fall back
+ if arg0_type != Type::Fixnum {
+ asm_comment!(asm, "guard arg0 fixnum");
+ asm.test(arg0, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ jit_chain_guard(
+ JCC_JZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ counter,
+ );
+ }
+ if arg1_type != Type::Fixnum {
+ asm_comment!(asm, "guard arg1 fixnum");
+ asm.test(arg1, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ jit_chain_guard(
+ JCC_JZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ counter,
+ );
+ }
+
+ // Set stack types in context
+ asm.ctx.upgrade_opnd_type(arg1.into(), Type::Fixnum);
+ asm.ctx.upgrade_opnd_type(arg0.into(), Type::Fixnum);
+}
+
+// Conditional move operation used by comparison operators
+type CmovFn = fn(cb: &mut Assembler, opnd0: Opnd, opnd1: Opnd) -> Opnd;
+
+fn gen_fixnum_cmp(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cmov_op: CmovFn,
+ bop: ruby_basic_operators,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ // Defer compilation so we can specialize based on a runtime receiver
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, bop) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Compare the arguments
+ asm.cmp(arg0, arg1);
+ let bool_opnd = cmov_op(asm, Qtrue.into(), Qfalse.into());
+
+ // Push the output on the stack
+ let dst = asm.stack_push(Type::UnknownImm);
+ asm.mov(dst, bool_opnd);
+
+ Some(KeepCompiling)
+ } else {
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_lt(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ gen_fixnum_cmp(jit, asm, Assembler::csel_l, BOP_LT)
+}
+
+fn gen_opt_le(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ gen_fixnum_cmp(jit, asm, Assembler::csel_le, BOP_LE)
+}
+
+fn gen_opt_ge(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ gen_fixnum_cmp(jit, asm, Assembler::csel_ge, BOP_GE)
+}
+
+fn gen_opt_gt(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ gen_fixnum_cmp(jit, asm, Assembler::csel_g, BOP_GT)
+}
+
+// Implements specialized equality for either two fixnum or two strings
+// Returns None if enough type information isn't available, Some(true)
+// if code was generated, otherwise Some(false).
+fn gen_equality_specialized(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ gen_eq: bool,
+) -> Option<bool> {
+ let a_opnd = asm.stack_opnd(1);
+ let b_opnd = asm.stack_opnd(0);
+
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => return None,
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_EQ) {
+ // if overridden, emit the generic version
+ return Some(false);
+ }
+
+ guard_two_fixnums(jit, asm);
+
+ asm.cmp(a_opnd, b_opnd);
+ let val = if gen_eq {
+ asm.csel_e(Qtrue.into(), Qfalse.into())
+ } else {
+ asm.csel_ne(Qtrue.into(), Qfalse.into())
+ };
+
+ // Push the output on the stack
+ asm.stack_pop(2);
+ let dst = asm.stack_push(Type::UnknownImm);
+ asm.mov(dst, val);
+
+ return Some(true);
+ }
+
+ if !jit.at_compile_target() {
+ return None;
+ }
+ let comptime_a = jit.peek_at_stack(&asm.ctx, 1);
+ let comptime_b = jit.peek_at_stack(&asm.ctx, 0);
+
+ if unsafe { comptime_a.class_of() == rb_cString && comptime_b.class_of() == rb_cString } {
+ if !assume_bop_not_redefined(jit, asm, STRING_REDEFINED_OP_FLAG, BOP_EQ) {
+ // if overridden, emit the generic version
+ return Some(false);
+ }
+
+ // Guard that a is a String
+ jit_guard_known_klass(
+ jit,
+ asm,
+ a_opnd,
+ a_opnd.into(),
+ comptime_a,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_string,
+ );
+
+ let equal = asm.new_label("equal");
+ let ret = asm.new_label("ret");
+
+ // Spill for ccall. For safety, unconditionally spill temps before branching.
+ asm.spill_regs();
+
+ // If they are equal by identity, return true
+ asm.cmp(a_opnd, b_opnd);
+ asm.je(equal);
+
+ // Otherwise guard that b is a T_STRING (from type info) or String (from runtime guard)
+ let btype = asm.ctx.get_opnd_type(b_opnd.into());
+ if btype.known_value_type() != Some(RUBY_T_STRING) {
+ // Note: any T_STRING is valid here, but we check for a ::String for simplicity
+ // To pass a mutable static variable (rb_cString) requires an unsafe block
+ jit_guard_known_klass(
+ jit,
+ asm,
+ b_opnd,
+ b_opnd.into(),
+ comptime_b,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_string,
+ );
+ }
+
+ // Call rb_str_eql_internal(a, b)
+ let val = asm.ccall(
+ if gen_eq { rb_str_eql_internal } else { rb_str_neq_internal } as *const u8,
+ vec![a_opnd, b_opnd],
+ );
+
+ // Push the output on the stack
+ asm.stack_pop(2);
+ let dst = asm.stack_push(Type::UnknownImm);
+ asm.mov(dst, val);
+ asm.jmp(ret);
+
+ asm.write_label(equal);
+ asm.mov(dst, if gen_eq { Qtrue } else { Qfalse }.into());
+
+ asm.write_label(ret);
+
+ Some(true)
+ } else {
+ Some(false)
+ }
+}
+
+fn gen_opt_eq(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let specialized = match gen_equality_specialized(jit, asm, true) {
+ Some(specialized) => specialized,
+ None => {
+ // Defer compilation so we can specialize base on a runtime receiver
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if specialized {
+ jump_to_next_insn(jit, asm)
+ } else {
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_neq(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // opt_neq is passed two rb_call_data as arguments:
+ // first for ==, second for !=
+ let cd = jit.get_arg(1).as_ptr();
+ perf_call! { gen_send_general(jit, asm, cd, None) }
+}
+
+fn gen_opt_aref(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let cd: *const rb_call_data = jit.get_arg(0).as_ptr();
+ let argc = unsafe { vm_ci_argc((*cd).ci) };
+
+ // Only JIT one arg calls like `ary[6]`
+ if argc != 1 {
+ gen_counter_incr(jit, asm, Counter::opt_aref_argc_not_one);
+ return None;
+ }
+
+ // Defer compilation so we can specialize base on a runtime receiver
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ // Specialize base on compile time values
+ let comptime_idx = jit.peek_at_stack(&asm.ctx, 0);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 1);
+
+ if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_idx.fixnum_p() {
+ if !assume_bop_not_redefined(jit, asm, ARRAY_REDEFINED_OP_FLAG, BOP_AREF) {
+ return None;
+ }
+
+ // Get the stack operands
+ let idx_opnd = asm.stack_opnd(0);
+ let recv_opnd = asm.stack_opnd(1);
+
+ // Guard that the receiver is an ::Array
+ // BOP_AREF check above is only good for ::Array.
+ jit_guard_known_klass(
+ jit,
+ asm,
+ recv_opnd,
+ recv_opnd.into(),
+ comptime_recv,
+ OPT_AREF_MAX_CHAIN_DEPTH,
+ Counter::opt_aref_not_array,
+ );
+
+ // Bail if idx is not a FIXNUM
+ let idx_reg = asm.load(idx_opnd);
+ asm.test(idx_reg, (RUBY_FIXNUM_FLAG as u64).into());
+ asm.jz(Target::side_exit(Counter::opt_aref_arg_not_fixnum));
+
+ // Call VALUE rb_ary_entry_internal(VALUE ary, long offset).
+ // It never raises or allocates, so we don't need to write to cfp->pc.
+ {
+ // Pop the argument and the receiver
+ asm.stack_pop(2);
+
+ let idx_reg = asm.rshift(idx_reg, Opnd::UImm(1)); // Convert fixnum to int
+ let val = asm.ccall(rb_ary_entry_internal as *const u8, vec![recv_opnd, idx_reg]);
+
+ // Push the return value onto the stack
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+ }
+
+ // Jump to next instruction. This allows guard chains to share the same successor.
+ return jump_to_next_insn(jit, asm);
+ } else if comptime_recv.class_of() == unsafe { rb_cHash } {
+ if !assume_bop_not_redefined(jit, asm, HASH_REDEFINED_OP_FLAG, BOP_AREF) {
+ return None;
+ }
+
+ let recv_opnd = asm.stack_opnd(1);
+
+ // Guard that the receiver is a hash
+ jit_guard_known_klass(
+ jit,
+ asm,
+ recv_opnd,
+ recv_opnd.into(),
+ comptime_recv,
+ OPT_AREF_MAX_CHAIN_DEPTH,
+ Counter::opt_aref_not_hash,
+ );
+
+ // Prepare to call rb_hash_aref(). It might call #hash on the key.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Call rb_hash_aref
+ let key_opnd = asm.stack_opnd(0);
+ let recv_opnd = asm.stack_opnd(1);
+ let val = asm.ccall(rb_hash_aref as *const u8, vec![recv_opnd, key_opnd]);
+
+ // Pop the key and the receiver
+ asm.stack_pop(2);
+
+ // Push the return value onto the stack
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ // Jump to next instruction. This allows guard chains to share the same successor.
+ jump_to_next_insn(jit, asm)
+ } else {
+ // General case. Call the [] method.
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_aset(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Defer compilation so we can specialize on a runtime `self`
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 2);
+ let comptime_key = jit.peek_at_stack(&asm.ctx, 1);
+
+ // Get the operands from the stack
+ let recv = asm.stack_opnd(2);
+ let key = asm.stack_opnd(1);
+ let _val = asm.stack_opnd(0);
+
+ if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_key.fixnum_p() {
+ // Guard receiver is an Array
+ jit_guard_known_klass(
+ jit,
+ asm,
+ recv,
+ recv.into(),
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::opt_aset_not_array,
+ );
+
+ // Guard key is a fixnum
+ jit_guard_known_klass(
+ jit,
+ asm,
+ key,
+ key.into(),
+ comptime_key,
+ SEND_MAX_DEPTH,
+ Counter::opt_aset_not_fixnum,
+ );
+
+ // We might allocate or raise
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Call rb_ary_store
+ let recv = asm.stack_opnd(2);
+ let key = asm.load(asm.stack_opnd(1));
+ let key = asm.rshift(key, Opnd::UImm(1)); // FIX2LONG(key)
+ let val = asm.stack_opnd(0);
+ asm.ccall(rb_ary_store as *const u8, vec![recv, key, val]);
+
+ // rb_ary_store returns void
+ // stored value should still be on stack
+ let val = asm.load(asm.stack_opnd(0));
+
+ // Push the return value onto the stack
+ asm.stack_pop(3);
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ return jump_to_next_insn(jit, asm)
+ } else if comptime_recv.class_of() == unsafe { rb_cHash } {
+ // Guard receiver is a Hash
+ jit_guard_known_klass(
+ jit,
+ asm,
+ recv,
+ recv.into(),
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::opt_aset_not_hash,
+ );
+
+ // We might allocate or raise
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Call rb_hash_aset
+ let recv = asm.stack_opnd(2);
+ let key = asm.stack_opnd(1);
+ let val = asm.stack_opnd(0);
+ let ret = asm.ccall(rb_hash_aset as *const u8, vec![recv, key, val]);
+
+ // Push the return value onto the stack
+ asm.stack_pop(3);
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, ret);
+
+ jump_to_next_insn(jit, asm)
+ } else {
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_and(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ // Defer compilation so we can specialize on a runtime `self`
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_AND) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands and destination from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Do the bitwise and arg0 & arg1
+ let val = asm.and(arg0, arg1);
+
+ // Push the output on the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, val);
+
+ Some(KeepCompiling)
+ } else {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_or(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ // Defer compilation so we can specialize on a runtime `self`
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_OR) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands and destination from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Do the bitwise or arg0 | arg1
+ let val = asm.or(arg0, arg1);
+
+ // Push the output on the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, val);
+
+ Some(KeepCompiling)
+ } else {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_minus(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ // Defer compilation so we can specialize on a runtime `self`
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_MINUS) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands and destination from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Subtract arg0 - arg1 and test for overflow
+ let val_untag = asm.sub(arg0, arg1);
+ asm.jo(Target::side_exit(Counter::opt_minus_overflow));
+ let val = asm.add(val_untag, Opnd::Imm(1));
+
+ // Push the output on the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, val);
+
+ Some(KeepCompiling)
+ } else {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_mult(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ // Fallback to a method call if it overflows
+ if two_fixnums && asm.ctx.get_chain_depth() == 0 {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_MULT) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Do some bitwise gymnastics to handle tag bits
+ // x * y is translated to (x >> 1) * (y - 1) + 1
+ let arg0_untag = asm.rshift(arg0, Opnd::UImm(1));
+ let arg1_untag = asm.sub(arg1, Opnd::UImm(1));
+ let out_val = asm.mul(arg0_untag, arg1_untag);
+ jit_chain_guard(JCC_JO_MUL, jit, asm, 1, Counter::opt_mult_overflow);
+ let out_val = asm.add(out_val, Opnd::UImm(1));
+
+ // Push the output on the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, out_val);
+
+ Some(KeepCompiling)
+ } else {
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_div(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+}
+
+fn gen_opt_mod(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
+ Some(two_fixnums) => two_fixnums,
+ None => {
+ // Defer compilation so we can specialize on a runtime `self`
+ return jit.defer_compilation(asm);
+ }
+ };
+
+ if two_fixnums {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_MOD) {
+ return None;
+ }
+
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Get the operands and destination from the stack
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+
+ // Check for arg0 % 0
+ asm.cmp(arg1, Opnd::Imm(VALUE::fixnum_from_usize(0).as_i64()));
+ asm.je(Target::side_exit(Counter::opt_mod_zero));
+
+ // Call rb_fix_mod_fix(VALUE recv, VALUE obj)
+ let ret = asm.ccall(rb_fix_mod_fix as *const u8, vec![arg0, arg1]);
+
+ // Push the return value onto the stack
+ // When the two arguments are fixnums, the modulo output is always a fixnum
+ let stack_ret = asm.stack_push(Type::Fixnum);
+ asm.mov(stack_ret, ret);
+
+ Some(KeepCompiling)
+ } else {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+ }
+}
+
+fn gen_opt_ltlt(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+}
+
+fn gen_opt_nil_p(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+}
+
+fn gen_opt_empty_p(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+}
+
+fn gen_opt_succ(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Delegate to send, call the method on the recv
+ gen_opt_send_without_block(jit, asm)
+}
+
+fn gen_opt_str_freeze(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ if !assume_bop_not_redefined(jit, asm, STRING_REDEFINED_OP_FLAG, BOP_FREEZE) {
+ return None;
+ }
+
+ let str = jit.get_arg(0);
+
+ // Push the return value onto the stack
+ let stack_ret = asm.stack_push(Type::CString);
+ asm.mov(stack_ret, str.into());
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_ary_freeze(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ if !assume_bop_not_redefined(jit, asm, ARRAY_REDEFINED_OP_FLAG, BOP_FREEZE) {
+ return None;
+ }
+
+ let ary = jit.get_arg(0);
+
+ // Push the return value onto the stack
+ let stack_ret = asm.stack_push(Type::CArray);
+ asm.mov(stack_ret, ary.into());
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_hash_freeze(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ if !assume_bop_not_redefined(jit, asm, HASH_REDEFINED_OP_FLAG, BOP_FREEZE) {
+ return None;
+ }
+
+ let hash = jit.get_arg(0);
+
+ // Push the return value onto the stack
+ let stack_ret = asm.stack_push(Type::CHash);
+ asm.mov(stack_ret, hash.into());
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_str_uminus(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ if !assume_bop_not_redefined(jit, asm, STRING_REDEFINED_OP_FLAG, BOP_UMINUS) {
+ return None;
+ }
+
+ let str = jit.get_arg(0);
+
+ // Push the return value onto the stack
+ let stack_ret = asm.stack_push(Type::CString);
+ asm.mov(stack_ret, str.into());
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_newarray_max(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let num = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we may call #max
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_newarray_max(ec: EcPtr, num: u32, elts: *const VALUE) -> VALUE;
+ }
+
+ let values_opnd = asm.ctx.sp_opnd(-(num as i32));
+ let values_ptr = asm.lea(values_opnd);
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_newarray_max as *const u8,
+ vec![
+ EC,
+ num.into(),
+ values_ptr
+ ],
+ );
+
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_duparray_send(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let method = jit.get_arg(1).as_u64();
+
+ if method == ID!(include_p) {
+ gen_opt_duparray_send_include_p(jit, asm)
+ } else {
+ None
+ }
+}
+
+fn gen_opt_duparray_send_include_p(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ asm_comment!(asm, "opt_duparray_send include_p");
+
+ let ary = jit.get_arg(0);
+ let argc = jit.get_arg(2).as_usize();
+
+ // Save the PC and SP because we may call #include?
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_duparray_include_p(ec: EcPtr, ary: VALUE, target: VALUE) -> VALUE;
+ }
+
+ let target = asm.ctx.sp_opnd(-1);
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_duparray_include_p as *const u8,
+ vec![
+ EC,
+ ary.into(),
+ target,
+ ],
+ );
+
+ asm.stack_pop(argc);
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_newarray_send(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let method = jit.get_arg(1).as_u32();
+
+ if method == VM_OPT_NEWARRAY_SEND_MIN {
+ gen_opt_newarray_min(jit, asm)
+ } else if method == VM_OPT_NEWARRAY_SEND_MAX {
+ gen_opt_newarray_max(jit, asm)
+ } else if method == VM_OPT_NEWARRAY_SEND_HASH {
+ gen_opt_newarray_hash(jit, asm)
+ } else if method == VM_OPT_NEWARRAY_SEND_INCLUDE_P {
+ gen_opt_newarray_include_p(jit, asm)
+ } else if method == VM_OPT_NEWARRAY_SEND_PACK {
+ gen_opt_newarray_pack_buffer(jit, asm, 1, None)
+ } else if method == VM_OPT_NEWARRAY_SEND_PACK_BUFFER {
+ gen_opt_newarray_pack_buffer(jit, asm, 2, Some(1))
+ } else {
+ None
+ }
+}
+
+fn gen_opt_newarray_pack_buffer(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ fmt_offset: u32,
+ buffer: Option<u32>,
+) -> Option<CodegenStatus> {
+ asm_comment!(asm, "opt_newarray_send pack");
+
+ let num = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we may call #pack
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_newarray_pack_buffer(ec: EcPtr, num: u32, elts: *const VALUE, fmt: VALUE, buffer: VALUE) -> VALUE;
+ }
+
+ let values_opnd = asm.ctx.sp_opnd(-(num as i32));
+ let values_ptr = asm.lea(values_opnd);
+
+ let fmt_string = asm.ctx.sp_opnd(-(fmt_offset as i32));
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_newarray_pack_buffer as *const u8,
+ vec![
+ EC,
+ (num - fmt_offset).into(),
+ values_ptr,
+ fmt_string,
+ match buffer {
+ None => Qundef.into(),
+ Some(i) => asm.ctx.sp_opnd(-(i as i32)),
+ },
+ ],
+ );
+
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::CString);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_newarray_hash(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+
+ let num = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we may call #hash
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_newarray_hash(ec: EcPtr, num: u32, elts: *const VALUE) -> VALUE;
+ }
+
+ let values_opnd = asm.ctx.sp_opnd(-(num as i32));
+ let values_ptr = asm.lea(values_opnd);
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_newarray_hash as *const u8,
+ vec![
+ EC,
+ num.into(),
+ values_ptr
+ ],
+ );
+
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_newarray_include_p(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ asm_comment!(asm, "opt_newarray_send include?");
+
+ let num = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we may call customized methods.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_newarray_include_p(ec: EcPtr, num: u32, elts: *const VALUE, target: VALUE) -> VALUE;
+ }
+
+ let values_opnd = asm.ctx.sp_opnd(-(num as i32));
+ let values_ptr = asm.lea(values_opnd);
+ let target = asm.ctx.sp_opnd(-1);
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_newarray_include_p as *const u8,
+ vec![
+ EC,
+ (num - 1).into(),
+ values_ptr,
+ target
+ ],
+ );
+
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_newarray_min(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+
+ let num = jit.get_arg(0).as_u32();
+
+ // Save the PC and SP because we may call #min
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_opt_newarray_min(ec: EcPtr, num: u32, elts: *const VALUE) -> VALUE;
+ }
+
+ let values_opnd = asm.ctx.sp_opnd(-(num as i32));
+ let values_ptr = asm.lea(values_opnd);
+
+ let val_opnd = asm.ccall(
+ rb_vm_opt_newarray_min as *const u8,
+ vec![
+ EC,
+ num.into(),
+ values_ptr
+ ],
+ );
+
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_not(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ return gen_opt_send_without_block(jit, asm);
+}
+
+fn gen_opt_size(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ return gen_opt_send_without_block(jit, asm);
+}
+
+fn gen_opt_length(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ return gen_opt_send_without_block(jit, asm);
+}
+
+fn gen_opt_regexpmatch2(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ return gen_opt_send_without_block(jit, asm);
+}
+
+fn gen_opt_case_dispatch(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Normally this instruction would lookup the key in a hash and jump to an
+ // offset based on that.
+ // Instead we can take the fallback case and continue with the next
+ // instruction.
+ // We'd hope that our jitted code will be sufficiently fast without the
+ // hash lookup, at least for small hashes, but it's worth revisiting this
+ // assumption in the future.
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let case_hash = jit.get_arg(0);
+ let else_offset = jit.get_arg(1).as_u32();
+
+ // Try to reorder case/else branches so that ones that are actually used come first.
+ // Supporting only Fixnum for now so that the implementation can be an equality check.
+ let key_opnd = asm.stack_opnd(0);
+ let comptime_key = jit.peek_at_stack(&asm.ctx, 0);
+
+ // Check that all cases are fixnums to avoid having to register BOP assumptions on
+ // all the types that case hashes support. This spends compile time to save memory.
+ fn case_hash_all_fixnum_p(hash: VALUE) -> bool {
+ let mut all_fixnum = true;
+ unsafe {
+ unsafe extern "C" fn per_case(key: st_data_t, _value: st_data_t, data: st_data_t) -> c_int {
+ (if VALUE(key as usize).fixnum_p() {
+ ST_CONTINUE
+ } else {
+ (data as *mut bool).write(false);
+ ST_STOP
+ }) as c_int
+ }
+ rb_hash_stlike_foreach(hash, Some(per_case), (&mut all_fixnum) as *mut _ as st_data_t);
+ }
+
+ all_fixnum
+ }
+
+ // If megamorphic, fallback to compiling branch instructions after opt_case_dispatch
+ let megamorphic = asm.ctx.get_chain_depth() >= CASE_WHEN_MAX_DEPTH;
+ if megamorphic {
+ gen_counter_incr(jit, asm, Counter::num_opt_case_dispatch_megamorphic);
+ }
+
+ if comptime_key.fixnum_p() && comptime_key.0 <= u32::MAX.as_usize() && case_hash_all_fixnum_p(case_hash) && !megamorphic {
+ if !assume_bop_not_redefined(jit, asm, INTEGER_REDEFINED_OP_FLAG, BOP_EQQ) {
+ return None;
+ }
+
+ // Check if the key is the same value
+ asm.cmp(key_opnd, comptime_key.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ CASE_WHEN_MAX_DEPTH,
+ Counter::opt_case_dispatch_megamorphic,
+ );
+ asm.stack_pop(1); // Pop key_opnd
+
+ // Get the offset for the compile-time key
+ let mut offset = 0;
+ unsafe { rb_hash_stlike_lookup(case_hash, comptime_key.0 as _, &mut offset) };
+ let jump_offset = if offset == 0 {
+ // NOTE: If we hit the else branch with various values, it could negatively impact the performance.
+ else_offset
+ } else {
+ (offset as u32) >> 1 // FIX2LONG
+ };
+
+ // Jump to the offset of case or else
+ let jump_idx = jit.next_insn_idx() as u32 + jump_offset;
+ let jump_block = BlockId { iseq: jit.iseq, idx: jump_idx.try_into().unwrap() };
+ gen_direct_jump(jit, &asm.ctx.clone(), jump_block, asm);
+ Some(EndBlock)
+ } else {
+ asm.stack_pop(1); // Pop key_opnd
+ Some(KeepCompiling) // continue with === branches
+ }
+}
+
+fn gen_branchif(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let jump_offset = jit.get_arg(0).as_i32();
+
+ // Check for interrupts, but only on backward branches that may create loops
+ if jump_offset < 0 {
+ gen_check_ints(asm, Counter::branchif_interrupted);
+ }
+
+ // Get the branch target instruction offsets
+ let next_idx = jit.next_insn_idx();
+ let jump_idx = (next_idx as i32) + jump_offset;
+ let next_block = BlockId {
+ iseq: jit.iseq,
+ idx: next_idx,
+ };
+ let jump_block = BlockId {
+ iseq: jit.iseq,
+ idx: jump_idx.try_into().unwrap(),
+ };
+
+ // Test if any bit (outside of the Qnil bit) is on
+ // See RB_TEST()
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val_opnd = asm.stack_pop(1);
+
+ incr_counter!(branch_insn_count);
+
+ if let Some(result) = val_type.known_truthy() {
+ let target = if result { jump_block } else { next_block };
+ gen_direct_jump(jit, &asm.ctx.clone(), target, asm);
+ incr_counter!(branch_known_count);
+ } else {
+ asm.test(val_opnd, Opnd::Imm(!Qnil.as_i64()));
+
+ // Generate the branch instructions
+ let ctx = asm.ctx;
+ jit.gen_branch(
+ asm,
+ jump_block,
+ &ctx,
+ Some(next_block),
+ Some(&ctx),
+ BranchGenFn::BranchIf(Cell::new(BranchShape::Default)),
+ );
+ }
+
+ Some(EndBlock)
+}
+
+fn gen_branchunless(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let jump_offset = jit.get_arg(0).as_i32();
+
+ // Check for interrupts, but only on backward branches that may create loops
+ if jump_offset < 0 {
+ gen_check_ints(asm, Counter::branchunless_interrupted);
+ }
+
+ // Get the branch target instruction offsets
+ let next_idx = jit.next_insn_idx() as i32;
+ let jump_idx = next_idx + jump_offset;
+ let next_block = BlockId {
+ iseq: jit.iseq,
+ idx: next_idx.try_into().unwrap(),
+ };
+ let jump_block = BlockId {
+ iseq: jit.iseq,
+ idx: jump_idx.try_into().unwrap(),
+ };
+
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val_opnd = asm.stack_pop(1);
+
+ incr_counter!(branch_insn_count);
+
+ if let Some(result) = val_type.known_truthy() {
+ let target = if result { next_block } else { jump_block };
+ gen_direct_jump(jit, &asm.ctx.clone(), target, asm);
+ incr_counter!(branch_known_count);
+ } else {
+ // Test if any bit (outside of the Qnil bit) is on
+ // See RB_TEST()
+ let not_qnil = !Qnil.as_i64();
+ asm.test(val_opnd, not_qnil.into());
+
+ // Generate the branch instructions
+ let ctx = asm.ctx;
+ jit.gen_branch(
+ asm,
+ jump_block,
+ &ctx,
+ Some(next_block),
+ Some(&ctx),
+ BranchGenFn::BranchUnless(Cell::new(BranchShape::Default)),
+ );
+ }
+
+ Some(EndBlock)
+}
+
+fn gen_branchnil(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let jump_offset = jit.get_arg(0).as_i32();
+
+ // Check for interrupts, but only on backward branches that may create loops
+ if jump_offset < 0 {
+ gen_check_ints(asm, Counter::branchnil_interrupted);
+ }
+
+ // Get the branch target instruction offsets
+ let next_idx = jit.next_insn_idx() as i32;
+ let jump_idx = next_idx + jump_offset;
+ let next_block = BlockId {
+ iseq: jit.iseq,
+ idx: next_idx.try_into().unwrap(),
+ };
+ let jump_block = BlockId {
+ iseq: jit.iseq,
+ idx: jump_idx.try_into().unwrap(),
+ };
+
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val_opnd = asm.stack_pop(1);
+
+ incr_counter!(branch_insn_count);
+
+ if let Some(result) = val_type.known_nil() {
+ let target = if result { jump_block } else { next_block };
+ gen_direct_jump(jit, &asm.ctx.clone(), target, asm);
+ incr_counter!(branch_known_count);
+ } else {
+ // Test if the value is Qnil
+ asm.cmp(val_opnd, Opnd::UImm(Qnil.into()));
+ // Generate the branch instructions
+ let ctx = asm.ctx;
+ jit.gen_branch(
+ asm,
+ jump_block,
+ &ctx,
+ Some(next_block),
+ Some(&ctx),
+ BranchGenFn::BranchNil(Cell::new(BranchShape::Default)),
+ );
+ }
+
+ Some(EndBlock)
+}
+
+fn gen_throw(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let throw_state = jit.get_arg(0).as_u64();
+ let throwobj = asm.stack_pop(1);
+ let throwobj = asm.load(throwobj);
+
+ // Gather some statistics about throw
+ gen_counter_incr(jit, asm, Counter::num_throw);
+ match (throw_state & VM_THROW_STATE_MASK as u64) as u32 {
+ RUBY_TAG_BREAK => gen_counter_incr(jit, asm, Counter::num_throw_break),
+ RUBY_TAG_RETRY => gen_counter_incr(jit, asm, Counter::num_throw_retry),
+ RUBY_TAG_RETURN => gen_counter_incr(jit, asm, Counter::num_throw_return),
+ _ => {},
+ }
+
+ // THROW_DATA_NEW allocates. Save SP for GC and PC for allocation tracing as
+ // well as handling the catch table. However, not using jit_prepare_call_with_gc
+ // since we don't need a patch point for this implementation.
+ jit_save_pc(jit, asm);
+ gen_save_sp(asm);
+
+ // rb_vm_throw verifies it's a valid throw, sets ec->tag->state, and returns throw
+ // data, which is throwobj or a vm_throw_data wrapping it. When ec->tag->state is
+ // set, JIT code callers will handle the throw with vm_exec_handle_exception.
+ extern "C" {
+ fn rb_vm_throw(ec: EcPtr, reg_cfp: CfpPtr, throw_state: u32, throwobj: VALUE) -> VALUE;
+ }
+ let val = asm.ccall(rb_vm_throw as *mut u8, vec![EC, CFP, throw_state.into(), throwobj]);
+
+ asm_comment!(asm, "exit from throw");
+ asm.cpop_into(SP);
+ asm.cpop_into(EC);
+ asm.cpop_into(CFP);
+
+ asm.frame_teardown();
+
+ asm.cret(val);
+ Some(EndBlock)
+}
+
+fn gen_opt_new(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let cd = jit.get_arg(0).as_ptr();
+ let jump_offset = jit.get_arg(1).as_i32();
+
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let ci = unsafe { get_call_data_ci(cd) }; // info about the call site
+ let mid = unsafe { vm_ci_mid(ci) };
+ let argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap();
+
+ let recv_idx = argc;
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, recv_idx as isize);
+
+ // This is a singleton class
+ let comptime_recv_klass = comptime_recv.class_of();
+
+ let recv = asm.stack_opnd(recv_idx);
+
+ perf_call!("opt_new: ", jit_guard_known_klass(
+ jit,
+ asm,
+ recv,
+ recv.into(),
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_klass_megamorphic,
+ ));
+
+ // We now know that it's always comptime_recv_klass
+ if jit.assume_expected_cfunc(asm, comptime_recv_klass, mid, rb_class_new_instance_pass_kw as _) {
+ // Fast path
+ // call rb_class_alloc to actually allocate
+ jit_prepare_non_leaf_call(jit, asm);
+ let obj = asm.ccall(rb_obj_alloc as _, vec![comptime_recv.into()]);
+
+ // Get a reference to the stack location where we need to save the
+ // return instance.
+ let result = asm.stack_opnd(recv_idx + 1);
+ let recv = asm.stack_opnd(recv_idx);
+
+ // Replace the receiver for the upcoming initialize call
+ asm.ctx.set_opnd_mapping(recv.into(), TempMapping::MapToStack(Type::UnknownHeap));
+ asm.mov(recv, obj);
+
+ // Save the allocated object for return
+ asm.ctx.set_opnd_mapping(result.into(), TempMapping::MapToStack(Type::UnknownHeap));
+ asm.mov(result, obj);
+
+ jump_to_next_insn(jit, asm)
+ } else {
+ // general case
+
+ // Get the branch target instruction offsets
+ let jump_idx = jit.next_insn_idx() as i32 + jump_offset;
+ return end_block_with_jump(jit, asm, jump_idx as u16);
+ }
+}
+
+fn gen_jump(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let jump_offset = jit.get_arg(0).as_i32();
+
+ // Check for interrupts, but only on backward branches that may create loops
+ if jump_offset < 0 {
+ gen_check_ints(asm, Counter::jump_interrupted);
+ }
+
+ // Get the branch target instruction offsets
+ let jump_idx = jit.next_insn_idx() as i32 + jump_offset;
+ let jump_block = BlockId {
+ iseq: jit.iseq,
+ idx: jump_idx.try_into().unwrap(),
+ };
+
+ // Generate the jump instruction
+ gen_direct_jump(jit, &asm.ctx.clone(), jump_block, asm);
+
+ Some(EndBlock)
+}
+
+/// Guard that self or a stack operand has the same class as `known_klass`, using
+/// `sample_instance` to speculate about the shape of the runtime value.
+/// FIXNUM and on-heap integers are treated as if they have distinct classes, and
+/// the guard generated for one will fail for the other.
+///
+/// Recompile as contingency if possible, or take side exit a last resort.
+fn jit_guard_known_klass(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ obj_opnd: Opnd,
+ insn_opnd: YARVOpnd,
+ sample_instance: VALUE,
+ max_chain_depth: u8,
+ counter: Counter,
+) {
+ let known_klass = sample_instance.class_of();
+ let val_type = asm.ctx.get_opnd_type(insn_opnd);
+
+ if val_type.known_class() == Some(known_klass) {
+ // Unless frozen, Array, Hash, and String objects may change their RBASIC_CLASS
+ // when they get a singleton class. Those types need invalidations.
+ if unsafe { [rb_cArray, rb_cHash, rb_cString].contains(&known_klass) } {
+ if jit.assume_no_singleton_class(asm, known_klass) {
+ // Speculate that this object will not have a singleton class,
+ // and invalidate the block in case it does.
+ return;
+ }
+ } else {
+ // We already know from type information that this is a match
+ return;
+ }
+ }
+
+ if unsafe { known_klass == rb_cNilClass } {
+ assert!(!val_type.is_heap());
+ assert!(val_type.is_unknown());
+
+ asm_comment!(asm, "guard object is nil");
+ asm.cmp(obj_opnd, Qnil.into());
+ jit_chain_guard(JCC_JNE, jit, asm, max_chain_depth, counter);
+
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::Nil);
+ } else if unsafe { known_klass == rb_cTrueClass } {
+ assert!(!val_type.is_heap());
+ assert!(val_type.is_unknown());
+
+ asm_comment!(asm, "guard object is true");
+ asm.cmp(obj_opnd, Qtrue.into());
+ jit_chain_guard(JCC_JNE, jit, asm, max_chain_depth, counter);
+
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::True);
+ } else if unsafe { known_klass == rb_cFalseClass } {
+ assert!(!val_type.is_heap());
+ assert!(val_type.is_unknown());
+
+ asm_comment!(asm, "guard object is false");
+ assert!(Qfalse.as_i32() == 0);
+ asm.test(obj_opnd, obj_opnd);
+ jit_chain_guard(JCC_JNZ, jit, asm, max_chain_depth, counter);
+
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::False);
+ } else if unsafe { known_klass == rb_cInteger } && sample_instance.fixnum_p() {
+ // We will guard fixnum and bignum as though they were separate classes
+ // BIGNUM can be handled by the general else case below
+ assert!(val_type.is_unknown());
+
+ asm_comment!(asm, "guard object is fixnum");
+ asm.test(obj_opnd, Opnd::Imm(RUBY_FIXNUM_FLAG as i64));
+ jit_chain_guard(JCC_JZ, jit, asm, max_chain_depth, counter);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::Fixnum);
+ } else if unsafe { known_klass == rb_cSymbol } && sample_instance.static_sym_p() {
+ assert!(!val_type.is_heap());
+ // We will guard STATIC vs DYNAMIC as though they were separate classes
+ // DYNAMIC symbols can be handled by the general else case below
+ if val_type != Type::ImmSymbol || !val_type.is_imm() {
+ assert!(val_type.is_unknown());
+
+ asm_comment!(asm, "guard object is static symbol");
+ assert!(RUBY_SPECIAL_SHIFT == 8);
+ asm.cmp(obj_opnd.with_num_bits(8).unwrap(), Opnd::UImm(RUBY_SYMBOL_FLAG as u64));
+ jit_chain_guard(JCC_JNE, jit, asm, max_chain_depth, counter);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::ImmSymbol);
+ }
+ } else if unsafe { known_klass == rb_cFloat } && sample_instance.flonum_p() {
+ assert!(!val_type.is_heap());
+ if val_type != Type::Flonum || !val_type.is_imm() {
+ assert!(val_type.is_unknown());
+
+ // We will guard flonum vs heap float as though they were separate classes
+ asm_comment!(asm, "guard object is flonum");
+ let flag_bits = asm.and(obj_opnd, Opnd::UImm(RUBY_FLONUM_MASK as u64));
+ asm.cmp(flag_bits, Opnd::UImm(RUBY_FLONUM_FLAG as u64));
+ jit_chain_guard(JCC_JNE, jit, asm, max_chain_depth, counter);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::Flonum);
+ }
+ } else if unsafe {
+ FL_TEST(known_klass, VALUE(RUBY_FL_SINGLETON as usize)) != VALUE(0)
+ && sample_instance == rb_class_attached_object(known_klass)
+ && !rb_obj_is_kind_of(sample_instance, rb_cIO).test()
+ } {
+ // Singleton classes are attached to one specific object, so we can
+ // avoid one memory access (and potentially the is_heap check) by
+ // looking for the expected object directly.
+ // Note that in case the sample instance has a singleton class that
+ // doesn't attach to the sample instance, it means the sample instance
+ // has an empty singleton class that hasn't been materialized yet. In
+ // this case, comparing against the sample instance doesn't guarantee
+ // that its singleton class is empty, so we can't avoid the memory
+ // access. As an example, `Object.new.singleton_class` is an object in
+ // this situation.
+ // Also, guarding by identity is incorrect for IO objects because
+ // IO#reopen can be used to change the class and singleton class of IO objects!
+ asm_comment!(asm, "guard known object with singleton class");
+ asm.cmp(obj_opnd, sample_instance.into());
+ jit_chain_guard(JCC_JNE, jit, asm, max_chain_depth, counter);
+ } else if val_type == Type::CString && unsafe { known_klass == rb_cString } {
+ // guard elided because the context says we've already checked
+ unsafe {
+ assert_eq!(sample_instance.class_of(), rb_cString, "context says class is exactly ::String")
+ };
+ } else {
+ assert!(!val_type.is_imm(), "{insn_opnd:?} should be a heap object, but was {val_type:?} for {sample_instance:?}");
+
+ // Check that the receiver is a heap object
+ // Note: if we get here, the class doesn't have immediate instances.
+ if !val_type.is_heap() {
+ asm_comment!(asm, "guard not immediate");
+ asm.test(obj_opnd, (RUBY_IMMEDIATE_MASK as u64).into());
+ jit_chain_guard(JCC_JNZ, jit, asm, max_chain_depth, counter);
+ asm.cmp(obj_opnd, Qfalse.into());
+ jit_chain_guard(JCC_JE, jit, asm, max_chain_depth, counter);
+
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::UnknownHeap);
+ }
+
+ // If obj_opnd isn't already a register, load it.
+ let obj_opnd = match obj_opnd {
+ Opnd::InsnOut { .. } => obj_opnd,
+ _ => asm.load(obj_opnd),
+ };
+ let klass_opnd = Opnd::mem(64, obj_opnd, RUBY_OFFSET_RBASIC_KLASS);
+
+ // Bail if receiver class is different from known_klass
+ // TODO: jit_mov_gc_ptr keeps a strong reference, which leaks the class.
+ asm_comment!(asm, "guard known class");
+ asm.cmp(klass_opnd, known_klass.into());
+ jit_chain_guard(JCC_JNE, jit, asm, max_chain_depth, counter);
+
+ if known_klass == unsafe { rb_cString } {
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::CString);
+ } else if known_klass == unsafe { rb_cArray } {
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::CArray);
+ } else if known_klass == unsafe { rb_cHash } {
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::CHash);
+ }
+ }
+}
+
+// Generate ancestry guard for protected callee.
+// Calls to protected callees only go through when self.is_a?(klass_that_defines_the_callee).
+fn jit_protected_callee_ancestry_guard(
+ asm: &mut Assembler,
+ cme: *const rb_callable_method_entry_t,
+) {
+ // See vm_call_method().
+ let def_class = unsafe { (*cme).defined_class };
+ // Note: PC isn't written to current control frame as rb_is_kind_of() shouldn't raise.
+ // VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass);
+
+ let val = asm.ccall(
+ rb_obj_is_kind_of as *mut u8,
+ vec![
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF),
+ def_class.into(),
+ ],
+ );
+ asm.test(val, val);
+ asm.jz(Target::side_exit(Counter::guard_send_se_protected_check_failed))
+}
+
+// Codegen for rb_obj_not().
+// Note, caller is responsible for generating all the right guards, including
+// arity guards.
+fn jit_rb_obj_not(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ let recv_opnd = asm.ctx.get_opnd_type(StackOpnd(0));
+
+ match recv_opnd.known_truthy() {
+ Some(false) => {
+ asm_comment!(asm, "rb_obj_not(nil_or_false)");
+ asm.stack_pop(1);
+ let out_opnd = asm.stack_push(Type::True);
+ asm.mov(out_opnd, Qtrue.into());
+ },
+ Some(true) => {
+ // Note: recv_opnd != Type::Nil && recv_opnd != Type::False.
+ asm_comment!(asm, "rb_obj_not(truthy)");
+ asm.stack_pop(1);
+ let out_opnd = asm.stack_push(Type::False);
+ asm.mov(out_opnd, Qfalse.into());
+ },
+ _ => {
+ return false;
+ },
+ }
+
+ true
+}
+
+// Codegen for rb_true()
+fn jit_rb_true(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "nil? == true");
+ asm.stack_pop(1);
+ let stack_ret = asm.stack_push(Type::True);
+ asm.mov(stack_ret, Qtrue.into());
+ true
+}
+
+// Codegen for rb_false()
+fn jit_rb_false(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "nil? == false");
+ asm.stack_pop(1);
+ let stack_ret = asm.stack_push(Type::False);
+ asm.mov(stack_ret, Qfalse.into());
+ true
+}
+
+/// Codegen for Kernel#is_a?
+fn jit_rb_kernel_is_a(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ if argc != 1 {
+ return false;
+ }
+
+ // If this is a super call we might not know the class
+ if known_recv_class.is_none() {
+ return false;
+ }
+
+ // Important note: The output code will simply `return true/false`.
+ // Correctness follows from:
+ // - `known_recv_class` implies there is a guard scheduled before here
+ // for a particular `CLASS_OF(lhs)`.
+ // - We guard that rhs is identical to the compile-time sample
+ // - In general, for any two Class instances A, B, `A < B` does not change at runtime.
+ // Class#superclass is stable.
+
+ let sample_rhs = jit.peek_at_stack(&asm.ctx, 0);
+ let sample_lhs = jit.peek_at_stack(&asm.ctx, 1);
+
+ // We are not allowing module here because the module hierarchy can change at runtime.
+ if !unsafe { RB_TYPE_P(sample_rhs, RUBY_T_CLASS) } {
+ return false;
+ }
+ let sample_is_a = unsafe { rb_obj_is_kind_of(sample_lhs, sample_rhs) == Qtrue };
+
+ asm_comment!(asm, "Kernel#is_a?");
+ asm.cmp(asm.stack_opnd(0), sample_rhs.into());
+ asm.jne(Target::side_exit(Counter::guard_send_is_a_class_mismatch));
+
+ asm.stack_pop(2);
+
+ if sample_is_a {
+ let stack_ret = asm.stack_push(Type::True);
+ asm.mov(stack_ret, Qtrue.into());
+ } else {
+ let stack_ret = asm.stack_push(Type::False);
+ asm.mov(stack_ret, Qfalse.into());
+ }
+ return true;
+}
+
+/// Codegen for Kernel#instance_of?
+fn jit_rb_kernel_instance_of(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ if argc != 1 {
+ return false;
+ }
+
+ // If this is a super call we might not know the class
+ if known_recv_class.is_none() {
+ return false;
+ }
+
+ // Important note: The output code will simply `return true/false`.
+ // Correctness follows from:
+ // - `known_recv_class` implies there is a guard scheduled before here
+ // for a particular `CLASS_OF(lhs)`.
+ // - We guard that rhs is identical to the compile-time sample
+ // - For a particular `CLASS_OF(lhs)`, `rb_obj_class(lhs)` does not change.
+ // (because for any singleton class `s`, `s.superclass.equal?(s.attached_object.class)`)
+
+ let sample_rhs = jit.peek_at_stack(&asm.ctx, 0);
+ let sample_lhs = jit.peek_at_stack(&asm.ctx, 1);
+
+ // Filters out cases where the C implementation raises
+ if unsafe { !(RB_TYPE_P(sample_rhs, RUBY_T_CLASS) || RB_TYPE_P(sample_rhs, RUBY_T_MODULE)) } {
+ return false;
+ }
+
+ // We need to grab the class here to deal with singleton classes.
+ // Instance of grabs the "real class" of the object rather than the
+ // singleton class.
+ let sample_lhs_real_class = unsafe { rb_obj_class(sample_lhs) };
+
+ let sample_instance_of = sample_lhs_real_class == sample_rhs;
+
+ asm_comment!(asm, "Kernel#instance_of?");
+ asm.cmp(asm.stack_opnd(0), sample_rhs.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_instance_of_class_mismatch,
+ );
+
+ asm.stack_pop(2);
+
+ if sample_instance_of {
+ let stack_ret = asm.stack_push(Type::True);
+ asm.mov(stack_ret, Qtrue.into());
+ } else {
+ let stack_ret = asm.stack_push(Type::False);
+ asm.mov(stack_ret, Qfalse.into());
+ }
+ return true;
+}
+
+fn jit_rb_mod_eqq(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if argc != 1 {
+ return false;
+ }
+
+ asm_comment!(asm, "Module#===");
+ // By being here, we know that the receiver is a T_MODULE or a T_CLASS, because Module#=== can
+ // only live on these objects. With that, we can call rb_obj_is_kind_of() without
+ // jit_prepare_non_leaf_call() or a control frame push because it can't raise, allocate, or call
+ // Ruby methods with these inputs.
+ // Note the difference in approach from Kernel#is_a? because we don't get a free guard for the
+ // right hand side.
+ let rhs = asm.stack_pop(1);
+ let lhs = asm.stack_pop(1); // the module
+ let ret = asm.ccall(rb_obj_is_kind_of as *const u8, vec![rhs, lhs]);
+
+ // Return the result
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret);
+
+ return true;
+}
+
+// Substitution for rb_mod_name(). Returns the name of a module/class.
+fn jit_rb_mod_name(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if argc != 0 {
+ return false;
+ }
+
+ asm_comment!(asm, "Module#name");
+
+ // rb_mod_name() never allocates, so no preparation needed.
+ let name = asm.ccall(rb_mod_name as _, vec![asm.stack_opnd(0)]);
+
+ let _ = asm.stack_pop(1); // pop self
+ // call-seq: mod.name -> string or nil
+ let ret = asm.stack_push(Type::Unknown);
+ asm.mov(ret, name);
+
+ true
+}
+
+// Codegen for rb_obj_equal()
+// object identity comparison
+fn jit_rb_obj_equal(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "equal?");
+ let obj1 = asm.stack_pop(1);
+ let obj2 = asm.stack_pop(1);
+
+ asm.cmp(obj1, obj2);
+ let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
+
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret_opnd);
+ true
+}
+
+// Codegen for rb_obj_not_equal()
+// object identity comparison
+fn jit_rb_obj_not_equal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ gen_equality_specialized(jit, asm, false) == Some(true)
+}
+
+// Codegen for rb_int_equal()
+fn jit_rb_int_equal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Check that both operands are fixnums
+ guard_two_fixnums(jit, asm);
+
+ // Compare the arguments
+ asm_comment!(asm, "rb_int_equal");
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+ asm.cmp(arg0, arg1);
+ let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
+
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret_opnd);
+ true
+}
+
+fn jit_rb_int_succ(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Guard the receiver is fixnum
+ let recv_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let recv = asm.stack_pop(1);
+ if recv_type != Type::Fixnum {
+ asm_comment!(asm, "guard object is fixnum");
+ asm.test(recv, Opnd::Imm(RUBY_FIXNUM_FLAG as i64));
+ asm.jz(Target::side_exit(Counter::opt_succ_not_fixnum));
+ }
+
+ asm_comment!(asm, "Integer#succ");
+ let out_val = asm.add(recv, Opnd::Imm(2)); // 2 is untagged Fixnum 1
+ asm.jo(Target::side_exit(Counter::opt_succ_overflow));
+
+ // Push the output onto the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, out_val);
+
+ true
+}
+
+fn jit_rb_int_pred(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Guard the receiver is fixnum
+ let recv_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let recv = asm.stack_pop(1);
+ if recv_type != Type::Fixnum {
+ asm_comment!(asm, "guard object is fixnum");
+ asm.test(recv, Opnd::Imm(RUBY_FIXNUM_FLAG as i64));
+ asm.jz(Target::side_exit(Counter::send_pred_not_fixnum));
+ }
+
+ asm_comment!(asm, "Integer#pred");
+ let out_val = asm.sub(recv, Opnd::Imm(2)); // 2 is untagged Fixnum 1
+ asm.jo(Target::side_exit(Counter::send_pred_underflow));
+
+ // Push the output onto the stack
+ let dst = asm.stack_push(Type::Fixnum);
+ asm.mov(dst, out_val);
+
+ true
+}
+
+fn jit_rb_int_div(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
+ return false;
+ }
+ guard_two_fixnums(jit, asm);
+
+ // rb_fix_div_fix may GC-allocate for Bignum
+ jit_prepare_call_with_gc(jit, asm);
+
+ asm_comment!(asm, "Integer#/");
+ let obj = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ // Check for arg0 % 0
+ asm.cmp(obj, VALUE::fixnum_from_usize(0).as_i64().into());
+ asm.je(Target::side_exit(Counter::opt_div_zero));
+
+ let ret = asm.ccall(rb_fix_div_fix as *const u8, vec![recv, obj]);
+ asm.stack_pop(2); // Keep them during ccall for GC
+
+ let ret_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+fn jit_rb_int_lshift(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
+ return false;
+ }
+ guard_two_fixnums(jit, asm);
+
+ let comptime_shift = jit.peek_at_stack(&asm.ctx, 0);
+
+ if !comptime_shift.fixnum_p() {
+ return false;
+ }
+
+ // Untag the fixnum shift amount
+ let shift_amt = comptime_shift.as_isize() >> 1;
+ if shift_amt > 63 || shift_amt < 0 {
+ return false;
+ }
+
+ // Fallback to a C call if the shift amount varies
+ // This check is needed because the chain guard will side-exit
+ // if its max depth is reached
+ if asm.ctx.get_chain_depth() > 0 {
+ return false;
+ }
+
+ let rhs = asm.stack_pop(1);
+ let lhs = asm.stack_pop(1);
+
+ // Guard on the shift amount we speculated on
+ asm.cmp(rhs, comptime_shift.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ 1,
+ Counter::lshift_amount_changed,
+ );
+
+ fixnum_left_shift_body(asm, lhs, shift_amt as u64);
+ true
+}
+
+fn fixnum_left_shift_body(asm: &mut Assembler, lhs: Opnd, shift_amt: u64) {
+ let in_val = asm.sub(lhs, 1.into());
+ let shift_opnd = Opnd::UImm(shift_amt);
+ let out_val = asm.lshift(in_val, shift_opnd);
+ let unshifted = asm.rshift(out_val, shift_opnd);
+
+ // Guard that we did not overflow
+ asm.cmp(unshifted, in_val);
+ asm.jne(Target::side_exit(Counter::lshift_overflow));
+
+ // Re-tag the output value
+ let out_val = asm.add(out_val, 1.into());
+
+ let ret_opnd = asm.stack_push(Type::Fixnum);
+ asm.mov(ret_opnd, out_val);
+}
+
+fn jit_rb_int_rshift(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
+ return false;
+ }
+ guard_two_fixnums(jit, asm);
+
+ let comptime_shift = jit.peek_at_stack(&asm.ctx, 0);
+
+ // Untag the fixnum shift amount
+ let shift_amt = comptime_shift.as_isize() >> 1;
+ if shift_amt > 63 || shift_amt < 0 {
+ return false;
+ }
+
+ // Fallback to a C call if the shift amount varies
+ // This check is needed because the chain guard will side-exit
+ // if its max depth is reached
+ if asm.ctx.get_chain_depth() > 0 {
+ return false;
+ }
+
+ let rhs = asm.stack_pop(1);
+ let lhs = asm.stack_pop(1);
+
+ // Guard on the shift amount we speculated on
+ asm.cmp(rhs, comptime_shift.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ 1,
+ Counter::rshift_amount_changed,
+ );
+
+ let shift_opnd = Opnd::UImm(shift_amt as u64);
+ let out_val = asm.rshift(lhs, shift_opnd);
+ let out_val = asm.or(out_val, 1.into());
+
+ let ret_opnd = asm.stack_push(Type::Fixnum);
+ asm.mov(ret_opnd, out_val);
+ true
+}
+
+fn jit_rb_int_xor(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
+ return false;
+ }
+ guard_two_fixnums(jit, asm);
+
+ let rhs = asm.stack_pop(1);
+ let lhs = asm.stack_pop(1);
+
+ // XOR and then re-tag the resulting fixnum
+ let out_val = asm.xor(lhs, rhs);
+ let out_val = asm.or(out_val, 1.into());
+
+ let ret_opnd = asm.stack_push(Type::Fixnum);
+ asm.mov(ret_opnd, out_val);
+ true
+}
+
+fn jit_rb_int_aref(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if argc != 1 {
+ return false;
+ }
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
+ return false;
+ }
+ guard_two_fixnums(jit, asm);
+
+ asm_comment!(asm, "Integer#[]");
+ let obj = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
+
+ let ret = asm.ccall(rb_fix_aref as *const u8, vec![recv, obj]);
+
+ let ret_opnd = asm.stack_push(Type::Fixnum);
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+fn jit_rb_float_plus(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Guard obj is Fixnum or Flonum to avoid rb_funcall on rb_num_coerce_bin
+ let comptime_obj = jit.peek_at_stack(&asm.ctx, 0);
+ if comptime_obj.fixnum_p() || comptime_obj.flonum_p() {
+ let obj = asm.stack_opnd(0);
+ jit_guard_known_klass(
+ jit,
+ asm,
+ obj,
+ obj.into(),
+ comptime_obj,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_fixnum_or_flonum,
+ );
+ } else {
+ return false;
+ }
+
+ // Save the PC and SP because the callee may allocate Float on heap
+ jit_prepare_call_with_gc(jit, asm);
+
+ asm_comment!(asm, "Float#+");
+ let obj = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ let ret = asm.ccall(rb_float_plus as *const u8, vec![recv, obj]);
+ asm.stack_pop(2); // Keep recv during ccall for GC
+
+ let ret_opnd = asm.stack_push(Type::Unknown); // Flonum or heap Float
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+fn jit_rb_float_minus(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Guard obj is Fixnum or Flonum to avoid rb_funcall on rb_num_coerce_bin
+ let comptime_obj = jit.peek_at_stack(&asm.ctx, 0);
+ if comptime_obj.fixnum_p() || comptime_obj.flonum_p() {
+ let obj = asm.stack_opnd(0);
+ jit_guard_known_klass(
+ jit,
+ asm,
+ obj,
+ obj.into(),
+ comptime_obj,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_fixnum_or_flonum,
+ );
+ } else {
+ return false;
+ }
+
+ // Save the PC and SP because the callee may allocate Float on heap
+ jit_prepare_call_with_gc(jit, asm);
+
+ asm_comment!(asm, "Float#-");
+ let obj = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ let ret = asm.ccall(rb_float_minus as *const u8, vec![recv, obj]);
+ asm.stack_pop(2); // Keep recv during ccall for GC
+
+ let ret_opnd = asm.stack_push(Type::Unknown); // Flonum or heap Float
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+fn jit_rb_float_mul(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Guard obj is Fixnum or Flonum to avoid rb_funcall on rb_num_coerce_bin
+ let comptime_obj = jit.peek_at_stack(&asm.ctx, 0);
+ if comptime_obj.fixnum_p() || comptime_obj.flonum_p() {
+ let obj = asm.stack_opnd(0);
+ jit_guard_known_klass(
+ jit,
+ asm,
+ obj,
+ obj.into(),
+ comptime_obj,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_fixnum_or_flonum,
+ );
+ } else {
+ return false;
+ }
+
+ // Save the PC and SP because the callee may allocate Float on heap
+ jit_prepare_call_with_gc(jit, asm);
+
+ asm_comment!(asm, "Float#*");
+ let obj = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ let ret = asm.ccall(rb_float_mul as *const u8, vec![recv, obj]);
+ asm.stack_pop(2); // Keep recv during ccall for GC
+
+ let ret_opnd = asm.stack_push(Type::Unknown); // Flonum or heap Float
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+fn jit_rb_float_div(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Guard obj is Fixnum or Flonum to avoid rb_funcall on rb_num_coerce_bin
+ let comptime_obj = jit.peek_at_stack(&asm.ctx, 0);
+ if comptime_obj.fixnum_p() || comptime_obj.flonum_p() {
+ let obj = asm.stack_opnd(0);
+ jit_guard_known_klass(
+ jit,
+ asm,
+ obj,
+ obj.into(),
+ comptime_obj,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_not_fixnum_or_flonum,
+ );
+ } else {
+ return false;
+ }
+
+ // Save the PC and SP because the callee may allocate Float on heap
+ jit_prepare_call_with_gc(jit, asm);
+
+ asm_comment!(asm, "Float#/");
+ let obj = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ let ret = asm.ccall(rb_float_div as *const u8, vec![recv, obj]);
+ asm.stack_pop(2); // Keep recv during ccall for GC
+
+ let ret_opnd = asm.stack_push(Type::Unknown); // Flonum or heap Float
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+/// If string is frozen, duplicate it to get a non-frozen string. Otherwise, return it.
+fn jit_rb_str_uplus(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool
+{
+ if argc != 0 {
+ return false;
+ }
+
+ // We allocate when we dup the string
+ jit_prepare_call_with_gc(jit, asm);
+ asm.spill_regs(); // For ccall. Unconditionally spill them for RegMappings consistency.
+
+ asm_comment!(asm, "Unary plus on string");
+ let recv_opnd = asm.stack_pop(1);
+ let recv_opnd = asm.load(recv_opnd);
+ let flags_opnd = asm.load(Opnd::mem(64, recv_opnd, RUBY_OFFSET_RBASIC_FLAGS));
+ asm.test(flags_opnd, Opnd::Imm(RUBY_FL_FREEZE as i64 | RSTRING_CHILLED as i64));
+
+ let ret_label = asm.new_label("stack_ret");
+
+ // String#+@ can only exist on T_STRING
+ let stack_ret = asm.stack_push(Type::TString);
+
+ // If the string isn't frozen, we just return it.
+ asm.mov(stack_ret, recv_opnd);
+ asm.jz(ret_label);
+
+ // Str is frozen - duplicate it
+ asm.spill_regs(); // for ccall
+ let ret_opnd = asm.ccall(rb_str_dup as *const u8, vec![recv_opnd]);
+ asm.mov(stack_ret, ret_opnd);
+
+ asm.write_label(ret_label);
+
+ true
+}
+
+fn jit_rb_str_length(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "String#length");
+ extern "C" {
+ fn rb_str_length(str: VALUE) -> VALUE;
+ }
+
+ // This function cannot allocate or raise an exceptions
+ let recv = asm.stack_opnd(0);
+ let ret_opnd = asm.ccall(rb_str_length as *const u8, vec![recv]);
+ asm.stack_pop(1); // Keep recv on stack during ccall for GC
+
+ // Should be guaranteed to be a fixnum on 64-bit systems
+ let out_opnd = asm.stack_push(Type::Fixnum);
+ asm.mov(out_opnd, ret_opnd);
+
+ true
+}
+
+fn jit_rb_str_bytesize(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "String#bytesize");
+
+ let recv = asm.stack_pop(1);
+
+ asm_comment!(asm, "get string length");
+ let str_len_opnd = Opnd::mem(
+ std::os::raw::c_long::BITS as u8,
+ asm.load(recv),
+ RUBY_OFFSET_RSTRING_LEN as i32,
+ );
+
+ let len = asm.load(str_len_opnd);
+ let shifted_val = asm.lshift(len, Opnd::UImm(1));
+ let out_val = asm.or(shifted_val, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ let out_opnd = asm.stack_push(Type::Fixnum);
+
+ asm.mov(out_opnd, out_val);
+
+ true
+}
+
+fn jit_rb_str_byteslice(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ if argc != 2 {
+ return false
+ }
+
+ // rb_str_byte_substr should be leaf if indexes are fixnums
+ match (asm.ctx.get_opnd_type(StackOpnd(0)), asm.ctx.get_opnd_type(StackOpnd(1))) {
+ (Type::Fixnum, Type::Fixnum) => {},
+ // Raises when non-integers are passed in, which requires the method frame
+ // to be pushed for the backtrace
+ _ => if !jit_prepare_lazy_frame_call(jit, asm, cme, StackOpnd(2)) {
+ return false;
+ }
+ }
+ asm_comment!(asm, "String#byteslice");
+
+ // rb_str_byte_substr allocates a substring
+ jit_prepare_call_with_gc(jit, asm);
+
+ // Get stack operands after potential SP change
+ let len = asm.stack_opnd(0);
+ let beg = asm.stack_opnd(1);
+ let recv = asm.stack_opnd(2);
+
+ let ret_opnd = asm.ccall(rb_str_byte_substr as *const u8, vec![recv, beg, len]);
+ asm.stack_pop(3);
+
+ let out_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(out_opnd, ret_opnd);
+
+ true
+}
+
+fn jit_rb_str_aref_m(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // In yjit-bench the most common usages by far are single fixnum or two fixnums.
+ // rb_str_substr should be leaf if indexes are fixnums
+ if argc == 2 {
+ match (asm.ctx.get_opnd_type(StackOpnd(0)), asm.ctx.get_opnd_type(StackOpnd(1))) {
+ (Type::Fixnum, Type::Fixnum) => {},
+ // There is a two-argument form of (RegExp, Fixnum) which needs a different c func.
+ // Other types will raise.
+ _ => { return false },
+ }
+ } else if argc == 1 {
+ match asm.ctx.get_opnd_type(StackOpnd(0)) {
+ Type::Fixnum => {},
+ // Besides Fixnum this could also be a Range or a RegExp which are handled by separate c funcs.
+ // Other types will raise.
+ _ => {
+ // If the context doesn't have the type info we try a little harder.
+ let comptime_arg = jit.peek_at_stack(&asm.ctx, 0);
+ let arg0 = asm.stack_opnd(0);
+ if comptime_arg.fixnum_p() {
+ asm.test(arg0, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ jit_chain_guard(
+ JCC_JZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_str_aref_not_fixnum,
+ );
+ } else {
+ return false
+ }
+ },
+ }
+ } else {
+ return false
+ }
+
+ asm_comment!(asm, "String#[]");
+
+ // rb_str_substr allocates a substring
+ jit_prepare_call_with_gc(jit, asm);
+
+ // Get stack operands after potential SP change
+
+ // The "empty" arg distinguishes between the normal "one arg" behavior
+ // and the "two arg" special case that returns an empty string
+ // when the begin index is the length of the string.
+ // See the usages of rb_str_substr in string.c for more information.
+ let (beg_idx, empty, len) = if argc == 2 {
+ (1, Opnd::Imm(1), asm.stack_opnd(0))
+ } else {
+ // If there is only one arg, the length will be 1.
+ (0, Opnd::Imm(0), VALUE::fixnum_from_usize(1).into())
+ };
+
+ let beg = asm.stack_opnd(beg_idx);
+ let recv = asm.stack_opnd(beg_idx + 1);
+
+ let ret_opnd = asm.ccall(rb_str_substr_two_fixnums as *const u8, vec![recv, beg, len, empty]);
+ asm.stack_pop(beg_idx as usize + 2);
+
+ let out_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(out_opnd, ret_opnd);
+
+ true
+}
+
+fn jit_rb_str_getbyte(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "String#getbyte");
+
+ // Don't pop since we may bail
+ let idx = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ let comptime_idx = jit.peek_at_stack(&asm.ctx, 0);
+ if comptime_idx.fixnum_p(){
+ jit_guard_known_klass(
+ jit,
+ asm,
+ idx,
+ idx.into(),
+ comptime_idx,
+ SEND_MAX_DEPTH,
+ Counter::getbyte_idx_not_fixnum,
+ );
+ } else {
+ return false;
+ }
+
+ // Untag the index
+ let idx = asm.rshift(idx, Opnd::UImm(1));
+
+ // If index is negative, exit
+ asm.cmp(idx, Opnd::UImm(0));
+ asm.jl(Target::side_exit(Counter::getbyte_idx_negative));
+
+ asm_comment!(asm, "get string length");
+ let recv = asm.load(recv);
+ let str_len_opnd = Opnd::mem(
+ std::os::raw::c_long::BITS as u8,
+ asm.load(recv),
+ RUBY_OFFSET_RSTRING_LEN as i32,
+ );
+
+ // Exit if the index is out of bounds
+ asm.cmp(idx, str_len_opnd);
+ asm.jge(Target::side_exit(Counter::getbyte_idx_out_of_bounds));
+
+ let str_ptr = get_string_ptr(asm, recv);
+ // FIXME: could use SIB indexing here with proper support in backend
+ let str_ptr = asm.add(str_ptr, idx);
+ let byte = asm.load(Opnd::mem(8, str_ptr, 0));
+
+ // Zero-extend the byte to 64 bits
+ let byte = byte.with_num_bits(64).unwrap();
+ let byte = asm.and(byte, 0xFF.into());
+
+ // Tag the byte
+ let byte = asm.lshift(byte, Opnd::UImm(1));
+ let byte = asm.or(byte, Opnd::UImm(1));
+
+ asm.stack_pop(2); // Keep them on stack during ccall for GC
+ let out_opnd = asm.stack_push(Type::Fixnum);
+ asm.mov(out_opnd, byte);
+
+ true
+}
+
+fn jit_rb_str_setbyte(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Raises when index is out of range. Lazily push a frame in that case.
+ if !jit_prepare_lazy_frame_call(jit, asm, cme, StackOpnd(2)) {
+ return false;
+ }
+ asm_comment!(asm, "String#setbyte");
+
+ let value = asm.stack_opnd(0);
+ let index = asm.stack_opnd(1);
+ let recv = asm.stack_opnd(2);
+
+ let ret_opnd = asm.ccall(rb_str_setbyte as *const u8, vec![recv, index, value]);
+ asm.stack_pop(3); // Keep them on stack during ccall for GC
+
+ let out_opnd = asm.stack_push(Type::UnknownImm);
+ asm.mov(out_opnd, ret_opnd);
+
+ true
+}
+
+// Codegen for rb_str_to_s()
+// When String#to_s is called on a String instance, the method returns self and
+// most of the overhead comes from setting up the method call. We observed that
+// this situation happens a lot in some workloads.
+fn jit_rb_str_to_s(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ if unsafe { known_recv_class == Some(rb_cString) } {
+ asm_comment!(asm, "to_s on plain string");
+ // The method returns the receiver, which is already on the stack.
+ // No stack movement.
+ return true;
+ }
+ false
+}
+
+fn jit_rb_str_dup(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ // We specialize only the BARE_STRING_P case. Otherwise it's not leaf.
+ if unsafe { known_recv_class != Some(rb_cString) } {
+ return false;
+ }
+ asm_comment!(asm, "String#dup");
+
+ jit_prepare_call_with_gc(jit, asm);
+
+ let recv_opnd = asm.stack_opnd(0);
+ let recv_opnd = asm.load(recv_opnd);
+
+ let shape_id_offset = unsafe { rb_shape_id_offset() };
+ let shape_opnd = Opnd::mem(64, recv_opnd, shape_id_offset);
+ asm.test(shape_opnd, Opnd::UImm(SHAPE_ID_HAS_IVAR_MASK as u64));
+ asm.jnz(Target::side_exit(Counter::send_str_dup_exivar));
+
+ // Call rb_str_dup
+ let ret_opnd = asm.ccall(rb_str_dup as *const u8, vec![recv_opnd]);
+
+ asm.stack_pop(1);
+ let stack_ret = asm.stack_push(Type::CString);
+ asm.mov(stack_ret, ret_opnd);
+
+ true
+}
+
+// Codegen for rb_str_empty_p()
+fn jit_rb_str_empty_p(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ let recv_opnd = asm.stack_pop(1);
+
+ asm_comment!(asm, "get string length");
+ let str_len_opnd = Opnd::mem(
+ std::os::raw::c_long::BITS as u8,
+ asm.load(recv_opnd),
+ RUBY_OFFSET_RSTRING_LEN as i32,
+ );
+
+ asm.cmp(str_len_opnd, Opnd::UImm(0));
+ let string_empty = asm.csel_e(Qtrue.into(), Qfalse.into());
+ let out_opnd = asm.stack_push(Type::UnknownImm);
+ asm.mov(out_opnd, string_empty);
+
+ return true;
+}
+
+// Codegen for rb_str_concat() with an integer argument -- *not* String#concat
+// Using strings as a byte buffer often includes appending byte values to the end of the string.
+fn jit_rb_str_concat_codepoint(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "String#<< with codepoint argument");
+
+ // Either of the string concatenation functions we call will reallocate the string to grow its
+ // capacity if necessary. In extremely rare cases (i.e., string exceeds `LONG_MAX` bytes),
+ // either of the called functions will raise an exception.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let codepoint = asm.stack_opnd(0);
+ let recv = asm.stack_opnd(1);
+
+ guard_object_is_fixnum(jit, asm, codepoint, StackOpnd(0));
+
+ asm.ccall(rb_jit_str_concat_codepoint as *const u8, vec![recv, codepoint]);
+
+ // The receiver is the return value, so we only need to pop the codepoint argument off the stack.
+ // We can reuse the receiver slot in the stack as the return value.
+ asm.stack_pop(1);
+
+ true
+}
+
+// Codegen for rb_str_concat() -- *not* String#concat
+// Frequently strings are concatenated using "out_str << next_str".
+// This is common in Erb and similar templating languages.
+fn jit_rb_str_concat(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ block: Option<BlockHandler>,
+ argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ // The << operator can accept integer codepoints for characters
+ // as the argument. We only specially optimise string arguments.
+ // If the peeked-at compile time argument is something other than
+ // a string, assume it won't be a string later either.
+ let comptime_arg = jit.peek_at_stack(&asm.ctx, 0);
+ if unsafe { RB_TYPE_P(comptime_arg, RUBY_T_FIXNUM) } {
+ return jit_rb_str_concat_codepoint(jit, asm, ci, cme, block, argc, known_recv_class);
+ }
+
+ if ! unsafe { RB_TYPE_P(comptime_arg, RUBY_T_STRING) } {
+ return false;
+ }
+
+ // Guard that the concat argument is a string
+ guard_object_is_string(asm, asm.stack_opnd(0), StackOpnd(0), Counter::guard_send_not_string);
+
+ // Guard buffers from GC since rb_str_buf_append may allocate.
+ // rb_str_buf_append may raise Encoding::CompatibilityError, but we accept compromised
+ // backtraces on this method since the interpreter does the same thing on opt_ltlt.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Explicitly spill temps before making any C calls. `ccall` will spill temps, but it does a
+ // check to only spill if it thinks it's necessary. That logic can't see through the runtime
+ // branching occurring in the code generated for this function. Consequently, the branch for
+ // the first `ccall` will spill registers but the second one will not. At run time, we may
+ // jump over that spill code when executing the second branch, leading situations that are
+ // quite hard to debug. If we spill up front we avoid diverging behavior.
+ asm.spill_regs();
+
+ let concat_arg = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
+
+ // Test if string encodings differ. If different, use rb_str_append. If the same,
+ // use rb_yjit_str_simple_append, which calls rb_str_cat.
+ asm_comment!(asm, "<< on strings");
+
+ // Take receiver's object flags XOR arg's flags. If any
+ // string-encoding flags are different between the two,
+ // the encodings don't match.
+ let recv_reg = asm.load(recv);
+ let concat_arg_reg = asm.load(concat_arg);
+ let flags_xor = asm.xor(
+ Opnd::mem(64, recv_reg, RUBY_OFFSET_RBASIC_FLAGS),
+ Opnd::mem(64, concat_arg_reg, RUBY_OFFSET_RBASIC_FLAGS)
+ );
+ asm.test(flags_xor, Opnd::UImm(RUBY_ENCODING_MASK as u64));
+
+ let enc_mismatch = asm.new_label("enc_mismatch");
+ asm.jnz(enc_mismatch);
+
+ // If encodings match, call the simple append function and jump to return
+ let ret_opnd = asm.ccall(rb_yjit_str_simple_append as *const u8, vec![recv, concat_arg]);
+ let ret_label = asm.new_label("func_return");
+ let stack_ret = asm.stack_push(Type::TString);
+ asm.mov(stack_ret, ret_opnd);
+ asm.stack_pop(1); // forget stack_ret to re-push after ccall
+ asm.jmp(ret_label);
+
+ // If encodings are different, use a slower encoding-aware concatenate
+ asm.write_label(enc_mismatch);
+ asm.spill_regs(); // Ignore the register for the other local branch
+ let ret_opnd = asm.ccall(rb_str_buf_append as *const u8, vec![recv, concat_arg]);
+ let stack_ret = asm.stack_push(Type::TString);
+ asm.mov(stack_ret, ret_opnd);
+ // Drop through to return
+
+ asm.write_label(ret_label);
+
+ true
+}
+
+// Codegen for rb_ary_empty_p()
+fn jit_rb_ary_empty_p(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ let array_opnd = asm.stack_pop(1);
+ let array_reg = asm.load(array_opnd);
+ let len_opnd = get_array_len(asm, array_reg);
+
+ asm.test(len_opnd, len_opnd);
+ let bool_val = asm.csel_z(Qtrue.into(), Qfalse.into());
+
+ let out_opnd = asm.stack_push(Type::UnknownImm);
+ asm.store(out_opnd, bool_val);
+
+ return true;
+}
+
+// Codegen for rb_ary_length()
+fn jit_rb_ary_length(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ let array_opnd = asm.stack_pop(1);
+ let array_reg = asm.load(array_opnd);
+ let len_opnd = get_array_len(asm, array_reg);
+
+ // Convert the length to a fixnum
+ let shifted_val = asm.lshift(len_opnd, Opnd::UImm(1));
+ let out_val = asm.or(shifted_val, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
+
+ let out_opnd = asm.stack_push(Type::Fixnum);
+ asm.store(out_opnd, out_val);
+
+ return true;
+}
+
+fn jit_rb_ary_push(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "Array#<<");
+
+ // rb_ary_push allocates memory for buffer extension and can raise FrozenError
+ // Not using a lazy frame here since the interpreter also has a truncated
+ // stack trace from opt_ltlt.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let item_opnd = asm.stack_opnd(0);
+ let ary_opnd = asm.stack_opnd(1);
+ let ret = asm.ccall(rb_ary_push as *const u8, vec![ary_opnd, item_opnd]);
+ asm.stack_pop(2); // Keep them on stack during ccall for GC
+
+ let ret_opnd = asm.stack_push(Type::TArray);
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+// Just a leaf method, but not using `Primitive.attr! :leaf` since BOP methods can't use it.
+fn jit_rb_hash_empty_p(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "Hash#empty?");
+
+ let hash_opnd = asm.stack_pop(1);
+ let ret = asm.ccall(rb_hash_empty_p as *const u8, vec![hash_opnd]);
+
+ let ret_opnd = asm.stack_push(Type::UnknownImm);
+ asm.mov(ret_opnd, ret);
+ true
+}
+
+fn jit_obj_respond_to(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ // respond_to(:sym) or respond_to(:sym, true)
+ if argc != 1 && argc != 2 {
+ return false;
+ }
+
+ let recv_class = match known_recv_class {
+ Some(class) => class,
+ None => return false,
+ };
+
+ // Get the method_id from compile time. We will later add a guard against it.
+ let mid_sym = jit.peek_at_stack(&asm.ctx, (argc - 1) as isize);
+ if !mid_sym.static_sym_p() {
+ return false
+ }
+ let mid = unsafe { rb_sym2id(mid_sym) };
+
+ // Option<bool> representing the value of the "include_all" argument and whether it's known
+ let allow_priv = if argc == 1 {
+ // Default is false
+ Some(false)
+ } else {
+ // Get value from type information (may or may not be known)
+ asm.ctx.get_opnd_type(StackOpnd(0)).known_truthy()
+ };
+
+ let target_cme = unsafe { rb_callable_method_entry_or_negative(recv_class, mid) };
+
+ // Should never be null, as in that case we will be returned a "negative CME"
+ assert!(!target_cme.is_null());
+
+ let cme_def_type = unsafe { get_cme_def_type(target_cme) };
+
+ if cme_def_type == VM_METHOD_TYPE_REFINED {
+ return false;
+ }
+
+ let visibility = if cme_def_type == VM_METHOD_TYPE_UNDEF {
+ METHOD_VISI_UNDEF
+ } else {
+ unsafe { METHOD_ENTRY_VISI(target_cme) }
+ };
+
+ let result = match (visibility, allow_priv) {
+ (METHOD_VISI_UNDEF, _) => {
+ // No method, we can return false given respond_to_missing? hasn't been overridden.
+ // In the future, we might want to jit the call to respond_to_missing?
+ if !assume_method_basic_definition(jit, asm, recv_class, ID!(respond_to_missing)) {
+ return false;
+ }
+ Qfalse
+ }
+ (METHOD_VISI_PUBLIC, _) | // Public method => fine regardless of include_all
+ (_, Some(true)) => { // include_all => all visibility are acceptable
+ // Method exists and has acceptable visibility
+ if cme_def_type == VM_METHOD_TYPE_NOTIMPLEMENTED {
+ // C method with rb_f_notimplement(). `respond_to?` returns false
+ // without consulting `respond_to_missing?`. See also: rb_add_method_cfunc()
+ Qfalse
+ } else {
+ Qtrue
+ }
+ }
+ (_, _) => return false // not public and include_all not known, can't compile
+ };
+
+ // Invalidate this block if method lookup changes for the method being queried. This works
+ // both for the case where a method does or does not exist, as for the latter we asked for a
+ // "negative CME" earlier.
+ jit.assume_method_lookup_stable(asm, target_cme);
+
+ if argc == 2 {
+ // pop include_all argument (we only use its type info)
+ asm.stack_pop(1);
+ }
+
+ let sym_opnd = asm.stack_pop(1);
+ let _recv_opnd = asm.stack_pop(1);
+
+ // This is necessary because we have no guarantee that sym_opnd is a constant
+ asm_comment!(asm, "guard known mid");
+ asm.cmp(sym_opnd, mid_sym.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_respond_to_mid_mismatch,
+ );
+
+ jit_putobject(asm, result);
+
+ true
+}
+
+fn jit_rb_f_block_given_p(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm.stack_pop(1);
+ let out_opnd = asm.stack_push(Type::UnknownImm);
+
+ gen_block_given(jit, asm, out_opnd, Qtrue.into(), Qfalse.into());
+
+ true
+}
+
+/// Codegen for `block_given?` and `defined?(yield)`
+fn gen_block_given(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ out_opnd: Opnd,
+ true_opnd: Opnd,
+ false_opnd: Opnd,
+) {
+ asm_comment!(asm, "block_given?");
+
+ // `yield` goes to the block handler stowed in the "local" iseq which is
+ // the current iseq or a parent. Only the "method" iseq type can be passed a
+ // block handler. (e.g. `yield` in the top level script is a syntax error.)
+ let local_iseq = unsafe { rb_get_iseq_body_local_iseq(jit.iseq) };
+ if unsafe { rb_get_iseq_body_type(local_iseq) } == ISEQ_TYPE_METHOD {
+ // Same as rb_vm_frame_block_handler
+ let ep_opnd = gen_get_lep(jit, asm);
+ let block_handler = asm.load(
+ Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)
+ );
+
+ // Return `block_handler != VM_BLOCK_HANDLER_NONE`
+ asm.cmp(block_handler, VM_BLOCK_HANDLER_NONE.into());
+ let block_given = asm.csel_ne(true_opnd, false_opnd);
+ asm.mov(out_opnd, block_given);
+ } else {
+ asm.mov(out_opnd, false_opnd);
+ }
+}
+
+// Codegen for rb_class_superclass()
+fn jit_rb_class_superclass(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ _block: Option<crate::codegen::BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ extern "C" {
+ fn rb_class_superclass(klass: VALUE) -> VALUE;
+ }
+
+ // It may raise "uninitialized class"
+ if !jit_prepare_lazy_frame_call(jit, asm, cme, StackOpnd(0)) {
+ return false;
+ }
+
+ asm_comment!(asm, "Class#superclass");
+ let recv_opnd = asm.stack_opnd(0);
+ let ret = asm.ccall(rb_class_superclass as *const u8, vec![recv_opnd]);
+
+ asm.stack_pop(1);
+ let ret_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(ret_opnd, ret);
+
+ true
+}
+
+fn jit_rb_case_equal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool {
+ if !jit.assume_expected_cfunc(asm, known_recv_class.unwrap(), ID!(eq), rb_obj_equal as _) {
+ return false;
+ }
+
+ asm_comment!(asm, "case_equal: {}#===", get_class_name(known_recv_class));
+
+ // Compare the arguments
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
+ asm.cmp(arg0, arg1);
+ let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
+
+ let stack_ret = asm.stack_push(Type::UnknownImm);
+ asm.mov(stack_ret, ret_opnd);
+
+ true
+}
+
+fn jit_thread_s_current(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ asm_comment!(asm, "Thread.current");
+ asm.stack_pop(1);
+
+ // ec->thread_ptr
+ let ec_thread_opnd = asm.load(Opnd::mem(64, EC, RUBY_OFFSET_EC_THREAD_PTR as i32));
+
+ // thread->self
+ let thread_self = Opnd::mem(64, ec_thread_opnd, RUBY_OFFSET_THREAD_SELF);
+
+ let stack_ret = asm.stack_push(Type::UnknownHeap);
+ asm.mov(stack_ret, thread_self);
+ true
+}
+
+/// Specialization for rb_obj_dup() (Kernel#dup)
+fn jit_rb_obj_dup(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+ _ci: *const rb_callinfo,
+ _cme: *const rb_callable_method_entry_t,
+ _block: Option<BlockHandler>,
+ _argc: i32,
+ _known_recv_class: Option<VALUE>,
+) -> bool {
+ // Kernel#dup has arity=0, and caller already did argument count check.
+ let self_type = asm.ctx.get_opnd_type(StackOpnd(0));
+
+ if self_type.is_imm() {
+ // Method is no-op when receiver is an immediate value.
+ true
+ } else {
+ false
+ }
+}
+
+/// Check if we know how to codegen for a particular cfunc method
+/// See also: [reg_method_codegen].
+fn lookup_cfunc_codegen(def: *const rb_method_definition_t) -> Option<MethodGenFn> {
+ let method_serial = unsafe { get_def_method_serial(def) };
+ let table = unsafe { METHOD_CODEGEN_TABLE.as_ref().unwrap() };
+
+ let option_ref = table.get(&method_serial);
+ match option_ref {
+ None => None,
+ Some(&mgf) => Some(mgf), // Deref
+ }
+}
+
+// Is anyone listening for :c_call and :c_return event currently?
+fn c_method_tracing_currently_enabled(jit: &JITState) -> bool {
+ // Defer to C implementation in yjit.c
+ unsafe {
+ rb_c_method_tracing_currently_enabled(jit.ec)
+ }
+}
+
+// Similar to args_kw_argv_to_hash. It is called at runtime from within the
+// generated assembly to build a Ruby hash of the passed keyword arguments. The
+// keys are the Symbol objects associated with the keywords and the values are
+// the actual values. In the representation, both keys and values are VALUEs.
+unsafe extern "C" fn build_kwhash(ci: *const rb_callinfo, sp: *const VALUE) -> VALUE {
+ let kw_arg = vm_ci_kwarg(ci);
+ let kw_len: usize = get_cikw_keyword_len(kw_arg).try_into().unwrap();
+ let hash = rb_hash_new_with_size(kw_len as u64);
+
+ for kwarg_idx in 0..kw_len {
+ let key = get_cikw_keywords_idx(kw_arg, kwarg_idx.try_into().unwrap());
+ let val = sp.sub(kw_len).add(kwarg_idx).read();
+ rb_hash_aset(hash, key, val);
+ }
+ hash
+}
+
+// SpecVal is a single value in an iseq invocation's environment on the stack,
+// at sp[-2]. Depending on the frame type, it can serve different purposes,
+// which are covered here by enum variants.
+enum SpecVal {
+ BlockHandler(Option<BlockHandler>),
+ PrevEP(*const VALUE),
+ PrevEPOpnd(Opnd),
+}
+
+// Each variant represents a branch in vm_caller_setup_arg_block.
+#[derive(Clone, Copy)]
+pub enum BlockHandler {
+ // send, invokesuper: blockiseq operand
+ BlockISeq(IseqPtr),
+ // invokesuper: GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
+ LEPSpecVal,
+ // part of the allocate-free block forwarding scheme
+ BlockParamProxy,
+ // To avoid holding the block arg (e.g. proc and symbol) across C calls,
+ // we might need to set the block handler early in the call sequence
+ AlreadySet,
+}
+
+struct ControlFrame {
+ recv: Opnd,
+ sp: Opnd,
+ iseq: Option<IseqPtr>,
+ pc: Option<u64>,
+ frame_type: u32,
+ specval: SpecVal,
+ cme: *const rb_callable_method_entry_t,
+}
+
+// Codegen performing a similar (but not identical) function to vm_push_frame
+//
+// This will generate the code to:
+// * initialize locals to Qnil
+// * push the environment (cme, block handler, frame type)
+// * push a new CFP
+// * save the new CFP to ec->cfp
+//
+// Notes:
+// * Provided sp should point to the new frame's sp, immediately following locals and the environment
+// * At entry, CFP points to the caller (not callee) frame
+// * At exit, ec->cfp is updated to the pushed CFP
+// * SP register is updated only if frame.iseq is set
+// * Stack overflow is not checked (should be done by the caller)
+// * Interrupts are not checked (should be done by the caller)
+fn gen_push_frame(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ frame: ControlFrame,
+) {
+ let sp = frame.sp;
+
+ asm_comment!(asm, "push cme, specval, frame type");
+
+ // Write method entry at sp[-3]
+ // sp[-3] = me;
+ // Use compile time cme. It's assumed to be valid because we are notified when
+ // any cme we depend on become outdated. See yjit_method_lookup_change().
+ asm.store(Opnd::mem(64, sp, SIZEOF_VALUE_I32 * -3), VALUE::from(frame.cme).into());
+
+ // Write special value at sp[-2]. It's either a block handler or a pointer to
+ // the outer environment depending on the frame type.
+ // sp[-2] = specval;
+ let specval: Opnd = match frame.specval {
+ SpecVal::BlockHandler(None) => VM_BLOCK_HANDLER_NONE.into(),
+ SpecVal::BlockHandler(Some(block_handler)) => {
+ match block_handler {
+ BlockHandler::BlockISeq(block_iseq) => {
+ // Change cfp->block_code in the current frame. See vm_caller_setup_arg_block().
+ // VM_CFP_TO_CAPTURED_BLOCK does &cfp->self, rb_captured_block->code.iseq aliases
+ // with cfp->block_code.
+ asm.store(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_BLOCK_CODE), VALUE::from(block_iseq).into());
+
+ let cfp_self = asm.lea(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
+ asm.or(cfp_self, Opnd::Imm(1))
+ }
+ BlockHandler::LEPSpecVal => {
+ let lep_opnd = gen_get_lep(jit, asm);
+ asm.load(Opnd::mem(64, lep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL))
+ }
+ BlockHandler::BlockParamProxy => {
+ let ep_opnd = gen_get_lep(jit, asm);
+ let block_handler = asm.load(
+ Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)
+ );
+ block_handler
+ }
+ BlockHandler::AlreadySet => 0.into(), // unused
+ }
+ }
+ SpecVal::PrevEP(prev_ep) => {
+ let tagged_prev_ep = (prev_ep as usize) | 1;
+ VALUE(tagged_prev_ep).into()
+ }
+ SpecVal::PrevEPOpnd(ep_opnd) => {
+ asm.or(ep_opnd, 1.into())
+ }
+ };
+ if let SpecVal::BlockHandler(Some(BlockHandler::AlreadySet)) = frame.specval {
+ asm_comment!(asm, "specval should have been set");
+ } else {
+ asm.store(Opnd::mem(64, sp, SIZEOF_VALUE_I32 * -2), specval);
+ }
+
+ // Write env flags at sp[-1]
+ // sp[-1] = frame_type;
+ asm.store(Opnd::mem(64, sp, SIZEOF_VALUE_I32 * -1), frame.frame_type.into());
+
+ // Allocate a new CFP (ec->cfp--)
+ fn cfp_opnd(offset: i32) -> Opnd {
+ Opnd::mem(64, CFP, offset - (RUBY_SIZEOF_CONTROL_FRAME as i32))
+ }
+
+ // Setup the new frame
+ // *cfp = (const struct rb_control_frame_struct) {
+ // .pc = <unset for iseq, 0 for cfunc>,
+ // .sp = sp,
+ // .iseq = <iseq for iseq, 0 for cfunc>,
+ // .self = recv,
+ // .ep = <sp - 1>,
+ // .block_code = 0,
+ // };
+ asm_comment!(asm, "push callee control frame");
+
+ // For an iseq call PC may be None, in which case we will not set PC and will allow jitted code
+ // to set it as necessary.
+ if let Some(pc) = frame.pc {
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_PC), pc.into());
+ };
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_SP), sp);
+ let iseq: Opnd = if let Some(iseq) = frame.iseq {
+ VALUE::from(iseq).into()
+ } else {
+ 0.into()
+ };
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_ISEQ), iseq);
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_SELF), frame.recv);
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_BLOCK_CODE), 0.into());
+
+ let ep = asm.sub(sp, SIZEOF_VALUE.into());
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_EP), ep);
+}
+
+fn gen_send_cfunc(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ block: Option<BlockHandler>,
+ recv_known_class: Option<VALUE>,
+ flags: u32,
+ argc: i32,
+) -> Option<CodegenStatus> {
+ let cfunc = unsafe { get_cme_def_body_cfunc(cme) };
+ let cfunc_argc = unsafe { get_mct_argc(cfunc) };
+ let mut argc = argc;
+
+ // Splat call to a C method that takes `VALUE *` and `len`
+ let variable_splat = flags & VM_CALL_ARGS_SPLAT != 0 && cfunc_argc == -1;
+ let block_arg = flags & VM_CALL_ARGS_BLOCKARG != 0;
+
+ // If it's a splat and the method expects a Ruby array of arguments
+ if cfunc_argc == -2 && flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_splat_neg2);
+ return None;
+ }
+
+ exit_if_kwsplat_non_nil(jit, asm, flags, Counter::send_cfunc_kw_splat_non_nil)?;
+ let kw_splat = flags & VM_CALL_KW_SPLAT != 0;
+
+ let kw_arg = unsafe { vm_ci_kwarg(ci) };
+ let kw_arg_num = if kw_arg.is_null() {
+ 0
+ } else {
+ unsafe { get_cikw_keyword_len(kw_arg) }
+ };
+
+ if kw_arg_num != 0 && flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_splat_with_kw);
+ return None;
+ }
+
+ if c_method_tracing_currently_enabled(jit) {
+ // Don't JIT if tracing c_call or c_return
+ gen_counter_incr(jit, asm, Counter::send_cfunc_tracing);
+ return None;
+ }
+
+ // Increment total cfunc send count
+ gen_counter_incr(jit, asm, Counter::num_send_cfunc);
+
+ // Delegate to codegen for C methods if we have it and the callsite is simple enough.
+ if kw_arg.is_null() &&
+ !kw_splat &&
+ flags & VM_CALL_OPT_SEND == 0 &&
+ flags & VM_CALL_ARGS_SPLAT == 0 &&
+ flags & VM_CALL_ARGS_BLOCKARG == 0 &&
+ (cfunc_argc == -1 || argc == cfunc_argc) {
+ let expected_stack_after = asm.ctx.get_stack_size() as i32 - argc;
+ if let Some(known_cfunc_codegen) = lookup_cfunc_codegen(unsafe { (*cme).def }) {
+ // We don't push a frame for specialized cfunc codegen, so the generated code must be leaf.
+ // However, the interpreter doesn't push a frame on opt_* instruction either, so we allow
+ // non-sendish instructions to break this rule as an exception.
+ let cfunc_codegen = if jit.is_sendish() {
+ asm.with_leaf_ccall(|asm|
+ perf_call!("gen_send_cfunc: ", known_cfunc_codegen(jit, asm, ci, cme, block, argc, recv_known_class))
+ )
+ } else {
+ perf_call!("gen_send_cfunc: ", known_cfunc_codegen(jit, asm, ci, cme, block, argc, recv_known_class))
+ };
+
+ if cfunc_codegen {
+ assert_eq!(expected_stack_after, asm.ctx.get_stack_size() as i32);
+ gen_counter_incr(jit, asm, Counter::num_send_cfunc_inline);
+ // cfunc codegen generated code. Terminate the block so
+ // there isn't multiple calls in the same block.
+ return jump_to_next_insn(jit, asm);
+ }
+ }
+ }
+
+ // Check for interrupts
+ gen_check_ints(asm, Counter::guard_send_interrupted);
+
+ // Stack overflow check
+ // #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
+ // REG_CFP <= REG_SP + 4 * SIZEOF_VALUE + sizeof(rb_control_frame_t)
+ asm_comment!(asm, "stack overflow check");
+ const _: () = assert!(RUBY_SIZEOF_CONTROL_FRAME % SIZEOF_VALUE == 0, "sizeof(rb_control_frame_t) is a multiple of sizeof(VALUE)");
+ let stack_limit = asm.lea(asm.ctx.sp_opnd((4 + 2 * (RUBY_SIZEOF_CONTROL_FRAME / SIZEOF_VALUE)) as i32));
+ asm.cmp(CFP, stack_limit);
+ asm.jbe(Target::side_exit(Counter::guard_send_se_cf_overflow));
+
+ // Guard for variable length splat call before any modifications to the stack
+ if variable_splat {
+ let splat_array_idx = i32::from(kw_splat) + i32::from(block_arg);
+ let comptime_splat_array = jit.peek_at_stack(&asm.ctx, splat_array_idx as isize);
+ if unsafe { rb_yjit_ruby2_keywords_splat_p(comptime_splat_array) } != 0 {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_splat_varg_ruby2_keywords);
+ return None;
+ }
+
+ let splat_array = asm.stack_opnd(splat_array_idx);
+ guard_object_is_array(asm, splat_array, splat_array.into(), Counter::guard_send_splat_not_array);
+
+ asm_comment!(asm, "guard variable length splat call servicable");
+ let sp = asm.ctx.sp_opnd(0);
+ let proceed = asm.ccall(rb_yjit_splat_varg_checks as _, vec![sp, splat_array, CFP]);
+ asm.cmp(proceed, Qfalse.into());
+ asm.je(Target::side_exit(Counter::guard_send_cfunc_bad_splat_vargs));
+ }
+
+ // Number of args which will be passed through to the callee
+ // This is adjusted by the kwargs being combined into a hash.
+ let mut passed_argc = if kw_arg.is_null() {
+ argc
+ } else {
+ argc - kw_arg_num + 1
+ };
+
+ // Exclude the kw_splat hash from arity check
+ if kw_splat {
+ passed_argc -= 1;
+ }
+
+ // If the argument count doesn't match
+ if cfunc_argc >= 0 && cfunc_argc != passed_argc && flags & VM_CALL_ARGS_SPLAT == 0 {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_argc_mismatch);
+ return None;
+ }
+
+ // Don't JIT functions that need C stack arguments for now
+ if cfunc_argc >= 0 && passed_argc + 1 > (C_ARG_OPNDS.len() as i32) {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_toomany_args);
+ return None;
+ }
+
+ let mut block_arg_type = if block_arg {
+ Some(asm.ctx.get_opnd_type(StackOpnd(0)))
+ } else {
+ None
+ };
+
+ match block_arg_type {
+ Some(Type::Nil | Type::BlockParamProxy) => {
+ // We don't need the actual stack value for these
+ asm.stack_pop(1);
+ }
+ Some(Type::Unknown | Type::UnknownImm) if jit.peek_at_stack(&asm.ctx, 0).nil_p() => {
+ // The sample blockarg is nil, so speculate that's the case.
+ asm.cmp(asm.stack_opnd(0), Qnil.into());
+ asm.jne(Target::side_exit(Counter::guard_send_cfunc_block_not_nil));
+ block_arg_type = Some(Type::Nil);
+ asm.stack_pop(1);
+ }
+ None => {
+ // Nothing to do
+ }
+ _ => {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_block_arg);
+ return None;
+ }
+ }
+ let block_arg_type = block_arg_type; // drop `mut`
+
+ // Pop the empty kw_splat hash
+ if kw_splat {
+ // Only `**nil` is supported right now. Checked in exit_if_kwsplat_non_nil()
+ assert_eq!(Type::Nil, asm.ctx.get_opnd_type(StackOpnd(0)));
+ asm.stack_pop(1);
+ argc -= 1;
+ }
+
+ // Splat handling when C method takes a static number of arguments.
+ // push_splat_args() does stack manipulation so we can no longer side exit
+ if flags & VM_CALL_ARGS_SPLAT != 0 && cfunc_argc >= 0 {
+ let required_args : u32 = (cfunc_argc as u32).saturating_sub(argc as u32 - 1);
+ // + 1 because we pass self
+ if required_args + 1 >= C_ARG_OPNDS.len() as u32 {
+ gen_counter_incr(jit, asm, Counter::send_cfunc_toomany_args);
+ return None;
+ }
+
+ // We are going to assume that the splat fills
+ // all the remaining arguments. So the number of args
+ // should just equal the number of args the cfunc takes.
+ // In the generated code we test if this is true
+ // and if not side exit.
+ argc = cfunc_argc;
+ passed_argc = argc;
+ push_splat_args(required_args, asm)
+ }
+
+ // This is a .send call and we need to adjust the stack
+ if flags & VM_CALL_OPT_SEND != 0 {
+ handle_opt_send_shift_stack(asm, argc);
+ }
+
+ // Push a dynamic number of items from the splat array to the stack when calling a vargs method
+ let dynamic_splat_size = if variable_splat {
+ asm_comment!(asm, "variable length splat");
+ let stack_splat_array = asm.lea(asm.stack_opnd(0));
+ Some(asm.ccall(rb_yjit_splat_varg_cfunc as _, vec![stack_splat_array]))
+ } else {
+ None
+ };
+
+ // Points to the receiver operand on the stack
+ let recv = asm.stack_opnd(argc);
+
+ // Store incremented PC into current control frame in case callee raises.
+ jit_save_pc(jit, asm);
+
+ // Find callee's SP with space for metadata.
+ // Usually sp+3.
+ let sp = if let Some(splat_size) = dynamic_splat_size {
+ // Compute the callee's SP at runtime in case we accept a variable size for the splat array
+ const _: () = assert!(SIZEOF_VALUE == 8, "opting for a shift since mul on A64 takes no immediates");
+ let splat_size_bytes = asm.lshift(splat_size, 3usize.into());
+ // 3 items for method metadata, minus one to remove the splat array
+ let static_stack_top = asm.lea(asm.ctx.sp_opnd(2));
+ asm.add(static_stack_top, splat_size_bytes)
+ } else {
+ asm.lea(asm.ctx.sp_opnd(3))
+ };
+
+ let specval = if block_arg_type == Some(Type::BlockParamProxy) {
+ SpecVal::BlockHandler(Some(BlockHandler::BlockParamProxy))
+ } else {
+ SpecVal::BlockHandler(block)
+ };
+
+ let mut frame_type = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
+ if !kw_arg.is_null() {
+ frame_type |= VM_FRAME_FLAG_CFRAME_KW
+ }
+
+ perf_call!("gen_send_cfunc: ", gen_push_frame(jit, asm, ControlFrame {
+ frame_type,
+ specval,
+ cme,
+ recv,
+ sp,
+ pc: if cfg!(feature = "runtime_checks") {
+ Some(!0) // Poison value. Helps to fail fast.
+ } else {
+ None // Leave PC uninitialized as cfuncs shouldn't read it
+ },
+ iseq: None,
+ }));
+
+ asm_comment!(asm, "set ec->cfp");
+ let new_cfp = asm.lea(Opnd::mem(64, CFP, -(RUBY_SIZEOF_CONTROL_FRAME as i32)));
+ asm.store(Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP as i32), new_cfp);
+
+ if !kw_arg.is_null() {
+ // Build a hash from all kwargs passed
+ asm_comment!(asm, "build_kwhash");
+ let imemo_ci = VALUE(ci as usize);
+ assert_ne!(0, unsafe { rb_IMEMO_TYPE_P(imemo_ci, imemo_callinfo) },
+ "we assume all callinfos with kwargs are on the GC heap");
+ let sp = asm.lea(asm.ctx.sp_opnd(0));
+ let kwargs = asm.ccall(build_kwhash as *const u8, vec![imemo_ci.into(), sp]);
+
+ // Replace the stack location at the start of kwargs with the new hash
+ let stack_opnd = asm.stack_opnd(argc - passed_argc);
+ asm.mov(stack_opnd, kwargs);
+ }
+
+ // Write interpreter SP into CFP.
+ // We don't pop arguments yet to use registers for passing them, but we
+ // have to set cfp->sp below them for full_cfunc_return() invalidation.
+ gen_save_sp_with_offset(asm, -(argc + 1) as i8);
+
+ // Non-variadic method
+ let args = if cfunc_argc >= 0 {
+ // Copy the arguments from the stack to the C argument registers
+ // self is the 0th argument and is at index argc from the stack top
+ (0..=passed_argc).map(|i|
+ asm.stack_opnd(argc - i)
+ ).collect()
+ }
+ // Variadic method
+ else if cfunc_argc == -1 {
+ // The method gets a pointer to the first argument
+ // rb_f_puts(int argc, VALUE *argv, VALUE recv)
+
+ let passed_argc_opnd = if let Some(splat_size) = dynamic_splat_size {
+ // The final argc is the size of the splat, minus one for the splat array itself
+ asm.add(splat_size, (passed_argc - 1).into())
+ } else {
+ // Without a splat, passed_argc is static
+ Opnd::Imm(passed_argc.into())
+ };
+
+ vec![
+ passed_argc_opnd,
+ asm.lea(asm.ctx.sp_opnd(-argc)),
+ asm.stack_opnd(argc),
+ ]
+ }
+ // Variadic method taking a Ruby array
+ else if cfunc_argc == -2 {
+ // Slurp up all the arguments into an array
+ let stack_args = asm.lea(asm.ctx.sp_opnd(-argc));
+ let args_array = asm.ccall(
+ rb_ec_ary_new_from_values as _,
+ vec![EC, passed_argc.into(), stack_args]
+ );
+
+ // Example signature:
+ // VALUE neg2_method(VALUE self, VALUE argv)
+ vec![asm.stack_opnd(argc), args_array]
+ } else {
+ panic!("unexpected cfunc_args: {}", cfunc_argc)
+ };
+
+ // Call the C function
+ // VALUE ret = (cfunc->func)(recv, argv[0], argv[1]);
+ // cfunc comes from compile-time cme->def, which we assume to be stable.
+ // Invalidation logic is in yjit_method_lookup_change()
+ asm_comment!(asm, "call C function");
+ let ret = asm.ccall(unsafe { get_mct_func(cfunc) }.cast(), args);
+ asm.stack_pop((argc + 1).try_into().unwrap()); // Pop arguments after ccall to use registers for passing them.
+
+ // Record code position for TracePoint patching. See full_cfunc_return().
+ record_global_inval_patch(asm, CodegenGlobals::get_outline_full_cfunc_return_pos());
+
+ // Push the return value on the Ruby stack
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, ret);
+
+ // Log the name of the method we're calling to. We intentionally don't do this for inlined cfuncs.
+ // We also do this after the C call to minimize the impact of spill_temps() on asm.ccall().
+ if get_option!(gen_stats) {
+ // Assemble the method name string
+ let mid = unsafe { rb_get_def_original_id((*cme).def) };
+ let name_str = get_method_name(Some(unsafe { (*cme).owner }), mid);
+
+ // Get an index for this cfunc name
+ let cfunc_idx = get_cfunc_idx(&name_str);
+
+ // Increment the counter for this cfunc
+ asm.ccall(incr_cfunc_counter as *const u8, vec![cfunc_idx.into()]);
+ }
+
+ // Pop the stack frame (ec->cfp++)
+ // Instead of recalculating, we can reuse the previous CFP, which is stored in a callee-saved
+ // register
+ let ec_cfp_opnd = Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP as i32);
+ asm.store(ec_cfp_opnd, CFP);
+
+ // cfunc calls may corrupt types
+ asm.clear_local_types();
+
+ // Note: the return block of gen_send_iseq() has ctx->sp_offset == 1
+ // which allows for sharing the same successor.
+
+ // Jump (fall through) to the call continuation block
+ // We do this to end the current block after the call
+ jump_to_next_insn(jit, asm)
+}
+
+// Generate RARRAY_LEN. For array_opnd, use Opnd::Reg to reduce memory access,
+// and use Opnd::Mem to save registers.
+fn get_array_len(asm: &mut Assembler, array_opnd: Opnd) -> Opnd {
+ asm_comment!(asm, "get array length for embedded or heap");
+
+ // Pull out the embed flag to check if it's an embedded array.
+ let array_reg = match array_opnd {
+ Opnd::InsnOut { .. } => array_opnd,
+ _ => asm.load(array_opnd),
+ };
+ let flags_opnd = Opnd::mem(VALUE_BITS, array_reg, RUBY_OFFSET_RBASIC_FLAGS);
+
+ // Get the length of the array
+ let emb_len_opnd = asm.and(flags_opnd, (RARRAY_EMBED_LEN_MASK as u64).into());
+ let emb_len_opnd = asm.rshift(emb_len_opnd, (RARRAY_EMBED_LEN_SHIFT as u64).into());
+
+ // Conditionally move the length of the heap array
+ let flags_opnd = Opnd::mem(VALUE_BITS, array_reg, RUBY_OFFSET_RBASIC_FLAGS);
+ asm.test(flags_opnd, (RARRAY_EMBED_FLAG as u64).into());
+
+ let array_reg = match array_opnd {
+ Opnd::InsnOut { .. } => array_opnd,
+ _ => asm.load(array_opnd),
+ };
+ let array_len_opnd = Opnd::mem(
+ std::os::raw::c_long::BITS as u8,
+ array_reg,
+ RUBY_OFFSET_RARRAY_AS_HEAP_LEN,
+ );
+
+ // Select the array length value
+ asm.csel_nz(emb_len_opnd, array_len_opnd)
+}
+
+// Generate RARRAY_CONST_PTR (part of RARRAY_AREF)
+fn get_array_ptr(asm: &mut Assembler, array_reg: Opnd) -> Opnd {
+ asm_comment!(asm, "get array pointer for embedded or heap");
+
+ let flags_opnd = Opnd::mem(VALUE_BITS, array_reg, RUBY_OFFSET_RBASIC_FLAGS);
+ asm.test(flags_opnd, (RARRAY_EMBED_FLAG as u64).into());
+ let heap_ptr_opnd = Opnd::mem(
+ usize::BITS as u8,
+ array_reg,
+ RUBY_OFFSET_RARRAY_AS_HEAP_PTR,
+ );
+
+ // Load the address of the embedded array
+ // (struct RArray *)(obj)->as.ary
+ let ary_opnd = asm.lea(Opnd::mem(VALUE_BITS, array_reg, RUBY_OFFSET_RARRAY_AS_ARY));
+ asm.csel_nz(ary_opnd, heap_ptr_opnd)
+}
+
+// Generate RSTRING_PTR
+fn get_string_ptr(asm: &mut Assembler, string_reg: Opnd) -> Opnd {
+ asm_comment!(asm, "get string pointer for embedded or heap");
+
+ let flags_opnd = Opnd::mem(VALUE_BITS, string_reg, RUBY_OFFSET_RBASIC_FLAGS);
+ asm.test(flags_opnd, (RSTRING_NOEMBED as u64).into());
+ let heap_ptr_opnd = asm.load(Opnd::mem(
+ usize::BITS as u8,
+ string_reg,
+ RUBY_OFFSET_RSTRING_AS_HEAP_PTR,
+ ));
+
+ // Load the address of the embedded array
+ // (struct RString *)(obj)->as.ary
+ let ary_opnd = asm.lea(Opnd::mem(VALUE_BITS, string_reg, RUBY_OFFSET_RSTRING_AS_ARY));
+ asm.csel_nz(heap_ptr_opnd, ary_opnd)
+}
+
+/// Pushes arguments from an array to the stack. Differs from push splat because
+/// the array can have items left over. Array is assumed to be T_ARRAY without guards.
+fn copy_splat_args_for_rest_callee(array: Opnd, num_args: u32, asm: &mut Assembler) {
+ asm_comment!(asm, "copy_splat_args_for_rest_callee");
+
+ // Unused operands cause the backend to panic
+ if num_args == 0 {
+ return;
+ }
+
+ asm_comment!(asm, "Push arguments from array");
+
+ let array_reg = asm.load(array);
+ let ary_opnd = get_array_ptr(asm, array_reg);
+ for i in 0..num_args {
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, Opnd::mem(64, ary_opnd, i as i32 * SIZEOF_VALUE_I32));
+ }
+}
+
+/// Pushes arguments from an array to the stack that are passed with a splat (i.e. *args)
+/// It optimistically compiles to a static size that is the exact number of arguments
+/// needed for the function.
+fn push_splat_args(required_args: u32, asm: &mut Assembler) {
+ asm_comment!(asm, "push_splat_args");
+
+ let array_opnd = asm.stack_opnd(0);
+ guard_object_is_array(
+ asm,
+ array_opnd,
+ array_opnd.into(),
+ Counter::guard_send_splat_not_array,
+ );
+
+ let array_len_opnd = get_array_len(asm, array_opnd);
+
+ asm_comment!(asm, "Guard for expected splat length");
+ asm.cmp(array_len_opnd, required_args.into());
+ asm.jne(Target::side_exit(Counter::guard_send_splatarray_length_not_equal));
+
+ // Check last element of array if present
+ if required_args > 0 {
+ asm_comment!(asm, "Check last argument is not ruby2keyword hash");
+
+ // Need to repeat this here to deal with register allocation
+ let array_reg = asm.load(asm.stack_opnd(0));
+ let ary_opnd = get_array_ptr(asm, array_reg);
+ let last_array_value = asm.load(Opnd::mem(64, ary_opnd, (required_args as i32 - 1) * (SIZEOF_VALUE as i32)));
+ guard_object_is_not_ruby2_keyword_hash(
+ asm,
+ last_array_value,
+ Counter::guard_send_splatarray_last_ruby2_keywords,
+ );
+ }
+
+ asm_comment!(asm, "Push arguments from array");
+ let array_opnd = asm.stack_pop(1);
+
+ if required_args > 0 {
+ let array_reg = asm.load(array_opnd);
+ let ary_opnd = get_array_ptr(asm, array_reg);
+
+ for i in 0..required_args {
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, Opnd::mem(64, ary_opnd, i as i32 * SIZEOF_VALUE_I32));
+ }
+
+ asm_comment!(asm, "end push_each");
+ }
+}
+
+fn gen_send_bmethod(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ block: Option<BlockHandler>,
+ flags: u32,
+ argc: i32,
+) -> Option<CodegenStatus> {
+ let procv = unsafe { rb_get_def_bmethod_proc((*cme).def) };
+
+ let proc = unsafe { rb_jit_get_proc_ptr(procv) };
+ let proc_block = unsafe { &(*proc).block };
+
+ if proc_block.type_ != block_type_iseq {
+ return None;
+ }
+
+ let capture = unsafe { proc_block.as_.captured.as_ref() };
+ let iseq = unsafe { *capture.code.iseq.as_ref() };
+
+ if !procv.shareable_p() {
+ let ractor_serial = unsafe { rb_yjit_cme_ractor_serial(cme) };
+ asm_comment!(asm, "guard current ractor == {}", ractor_serial);
+ let current_ractor_serial = asm.load(Opnd::mem(64, EC, RUBY_OFFSET_EC_RACTOR_ID as i32));
+ asm.cmp(current_ractor_serial, ractor_serial.into());
+ asm.jne(Target::side_exit(Counter::send_bmethod_ractor));
+ }
+
+ // Passing a block to a block needs logic different from passing
+ // a block to a method and sometimes requires allocation. Bail for now.
+ if block.is_some() {
+ gen_counter_incr(jit, asm, Counter::send_bmethod_block_arg);
+ return None;
+ }
+
+ let frame_type = VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA;
+ perf_call! { gen_send_iseq(jit, asm, iseq, ci, frame_type, Some(capture.ep), cme, block, flags, argc, None) }
+}
+
+/// The kind of a value an ISEQ returns
+enum IseqReturn {
+ Value(VALUE),
+ LocalVariable(u32),
+ Receiver,
+}
+
+extern "C" {
+ fn rb_simple_iseq_p(iseq: IseqPtr) -> bool;
+ fn rb_iseq_only_kwparam_p(iseq: IseqPtr) -> bool;
+}
+
+/// Return the ISEQ's return value if it consists of one simple instruction and leave.
+fn iseq_get_return_value(iseq: IseqPtr, captured_opnd: Option<Opnd>, block: Option<BlockHandler>, ci_flags: u32) -> Option<IseqReturn> {
+ // Expect only two instructions and one possible operand
+ // NOTE: If an ISEQ has an optional keyword parameter with a default value that requires
+ // computation, the ISEQ will always have more than two instructions and won't be inlined.
+ let iseq_size = unsafe { get_iseq_encoded_size(iseq) };
+ if !(2..=3).contains(&iseq_size) {
+ return None;
+ }
+
+ // Get the first two instructions
+ let first_insn = iseq_opcode_at_idx(iseq, 0);
+ let second_insn = iseq_opcode_at_idx(iseq, insn_len(first_insn as usize));
+
+ // Extract the return value if known
+ if second_insn != YARVINSN_leave {
+ return None;
+ }
+ match first_insn {
+ YARVINSN_getlocal_WC_0 => {
+ // Accept only cases where only positional arguments are used by both the callee and the caller.
+ // Keyword arguments may be specified by the callee or the caller but not used.
+ // Reject block ISEQs to avoid autosplat and other block parameter complications.
+ if captured_opnd.is_some()
+ // Reject if block ISEQ is present
+ || block.is_some()
+ // Equivalent to `VM_CALL_ARGS_SIMPLE - VM_CALL_KWARG - has_block_iseq`
+ || ci_flags & (
+ VM_CALL_ARGS_SPLAT
+ | VM_CALL_KW_SPLAT
+ | VM_CALL_ARGS_BLOCKARG
+ | VM_CALL_FORWARDING
+ ) != 0
+ {
+ return None;
+ }
+
+ let ep_offset = unsafe { *rb_iseq_pc_at_idx(iseq, 1) }.as_u32();
+ let local_idx = ep_offset_to_local_idx(iseq, ep_offset);
+
+ // Only inline getlocal on a parameter. DCE in the IESQ builder can
+ // make a two-instruction ISEQ that does not return a parameter.
+ if local_idx >= unsafe { get_iseq_body_param_size(iseq) } {
+ return None;
+ }
+
+ if unsafe { rb_simple_iseq_p(iseq) } {
+ return Some(IseqReturn::LocalVariable(local_idx));
+ } else if unsafe { rb_iseq_only_kwparam_p(iseq) } {
+ // Inline if only positional parameters are used
+ if let Ok(i) = i32::try_from(local_idx) {
+ if i < unsafe { rb_get_iseq_body_param_lead_num(iseq) } {
+ return Some(IseqReturn::LocalVariable(local_idx));
+ }
+ }
+ }
+
+ return None;
+ }
+ YARVINSN_putnil => Some(IseqReturn::Value(Qnil)),
+ YARVINSN_putobject => Some(IseqReturn::Value(unsafe { *rb_iseq_pc_at_idx(iseq, 1) })),
+ YARVINSN_putobject_INT2FIX_0_ => Some(IseqReturn::Value(VALUE::fixnum_from_usize(0))),
+ YARVINSN_putobject_INT2FIX_1_ => Some(IseqReturn::Value(VALUE::fixnum_from_usize(1))),
+ // We don't support invokeblock for now. Such ISEQs are likely not used by blocks anyway.
+ YARVINSN_putself if captured_opnd.is_none() => Some(IseqReturn::Receiver),
+ _ => None,
+ }
+}
+
+fn gen_send_iseq(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ iseq: *const rb_iseq_t,
+ ci: *const rb_callinfo,
+ frame_type: u32,
+ prev_ep: Option<*const VALUE>,
+ cme: *const rb_callable_method_entry_t,
+ block: Option<BlockHandler>,
+ flags: u32,
+ argc: i32,
+ captured_opnd: Option<Opnd>,
+) -> Option<CodegenStatus> {
+ // Argument count. We will change this as we gather values from
+ // sources to satisfy the callee's parameters. To help make sense
+ // of changes, note that:
+ // - Parameters syntactically on the left have lower addresses.
+ // For example, all the lead (required) and optional parameters
+ // have lower addresses than the rest parameter array.
+ // - The larger the index one passes to Assembler::stack_opnd(),
+ // the *lower* the address.
+ let mut argc = argc;
+
+ // Iseqs with keyword parameters have a hidden, unnamed parameter local
+ // that the callee could use to know which keywords are unspecified
+ // (see the `checkkeyword` instruction and check `ruby --dump=insn -e 'def foo(k:itself)=k'`).
+ // We always need to set up this local if the call goes through.
+ let has_kwrest = unsafe { get_iseq_flags_has_kwrest(iseq) };
+ let doing_kw_call = unsafe { get_iseq_flags_has_kw(iseq) } || has_kwrest;
+ let supplying_kws = unsafe { vm_ci_flag(ci) & VM_CALL_KWARG } != 0;
+ let iseq_has_rest = unsafe { get_iseq_flags_has_rest(iseq) };
+ let iseq_has_block_param = unsafe { get_iseq_flags_has_block(iseq) };
+ let arg_setup_block = captured_opnd.is_some(); // arg_setup_type: arg_setup_block (invokeblock)
+
+ // Is this iseq tagged as "forwardable"? Iseqs that take `...` as a
+ // parameter are tagged as forwardable (e.g. `def foo(...); end`)
+ let forwarding = unsafe { rb_get_iseq_flags_forwardable(iseq) };
+
+ // If a "forwardable" iseq has been called with a splat, then we _do not_
+ // want to expand the splat to the stack. So we'll only consider this
+ // a splat call if the callee iseq is not forwardable. For example,
+ // we do not want to handle the following code:
+ //
+ // `def foo(...); end; foo(*blah)`
+ let splat_call = (flags & VM_CALL_ARGS_SPLAT != 0) && !forwarding;
+ let kw_splat = (flags & VM_CALL_KW_SPLAT != 0) && !forwarding;
+
+ // For computing offsets to callee locals
+ let num_params = unsafe { get_iseq_body_param_size(iseq) as i32 };
+ let num_locals = unsafe { get_iseq_body_local_table_size(iseq) as i32 };
+
+ let mut start_pc_offset: u16 = 0;
+ let required_num = unsafe { get_iseq_body_param_lead_num(iseq) };
+
+ // This struct represents the metadata about the caller-specified
+ // keyword arguments.
+ let kw_arg = unsafe { vm_ci_kwarg(ci) };
+ let kw_arg_num = if kw_arg.is_null() {
+ 0
+ } else {
+ unsafe { get_cikw_keyword_len(kw_arg) }
+ };
+
+ // Arity handling and optional parameter setup for positional arguments.
+ // Splats are handled later.
+ let mut opts_filled = argc - required_num - kw_arg_num - i32::from(kw_splat) - i32::from(splat_call);
+ let opt_num = unsafe { get_iseq_body_param_opt_num(iseq) };
+ // With a rest parameter or a yield to a block,
+ // callers can pass more than required + optional.
+ // So we cap ops_filled at opt_num.
+ if iseq_has_rest || arg_setup_block {
+ opts_filled = min(opts_filled, opt_num);
+ }
+ let mut opts_missing: i32 = opt_num - opts_filled;
+
+ let block_arg = flags & VM_CALL_ARGS_BLOCKARG != 0;
+ // Stack index of the splat array
+ let splat_pos = i32::from(block_arg) + i32::from(kw_splat) + kw_arg_num;
+
+ exit_if_stack_too_large(iseq)?;
+ exit_if_tail_call(jit, asm, ci)?;
+ exit_if_has_post(jit, asm, iseq)?;
+ exit_if_kwsplat_non_nil(jit, asm, flags, Counter::send_iseq_kw_splat_non_nil)?;
+ exit_if_has_rest_and_captured(jit, asm, iseq_has_rest, captured_opnd)?;
+ exit_if_has_kwrest_and_captured(jit, asm, has_kwrest, captured_opnd)?;
+ exit_if_has_rest_and_supplying_kws(jit, asm, iseq_has_rest, supplying_kws)?;
+ exit_if_supplying_kw_and_has_no_kw(jit, asm, supplying_kws, doing_kw_call)?;
+ exit_if_supplying_kws_and_accept_no_kwargs(jit, asm, supplying_kws, iseq)?;
+ exit_if_doing_kw_and_splat(jit, asm, doing_kw_call, flags)?;
+ if !forwarding {
+ exit_if_wrong_number_arguments(jit, asm, arg_setup_block, opts_filled, flags, opt_num, iseq_has_rest)?;
+ }
+ exit_if_doing_kw_and_opts_missing(jit, asm, doing_kw_call, opts_missing)?;
+ exit_if_has_rest_and_optional_and_block(jit, asm, iseq_has_rest, opt_num, iseq, block_arg)?;
+ if forwarding && flags & VM_CALL_OPT_SEND != 0 {
+ gen_counter_incr(jit, asm, Counter::send_iseq_send_forwarding);
+ return None;
+ }
+ let block_arg_type = exit_if_unsupported_block_arg_type(jit, asm, block_arg)?;
+
+ // Bail if we can't drop extra arguments for a yield by just popping them
+ if supplying_kws && arg_setup_block && argc > (kw_arg_num + required_num + opt_num) {
+ gen_counter_incr(jit, asm, Counter::send_iseq_complex_discard_extras);
+ return None;
+ }
+
+ // Block parameter handling. This mirrors setup_parameters_complex().
+ if iseq_has_block_param {
+ if unsafe { get_iseq_body_local_iseq(iseq) == iseq } {
+ // Do nothing
+ } else {
+ // In this case (param.flags.has_block && local_iseq != iseq),
+ // the block argument is setup as a local variable and requires
+ // materialization (allocation). Bail.
+ gen_counter_incr(jit, asm, Counter::send_iseq_materialized_block);
+ return None;
+ }
+ }
+
+ // Check that required keyword arguments are supplied and find any extras
+ // that should go into the keyword rest parameter (**kw_rest).
+ if doing_kw_call {
+ gen_iseq_kw_call_checks(jit, asm, iseq, kw_arg, has_kwrest, kw_arg_num)?;
+ }
+
+ let splat_array_length = if splat_call {
+ let array = jit.peek_at_stack(&asm.ctx, splat_pos as isize);
+ let array_length = if array == Qnil {
+ 0
+ } else if unsafe { !RB_TYPE_P(array, RUBY_T_ARRAY) } {
+ gen_counter_incr(jit, asm, Counter::send_iseq_splat_not_array);
+ return None;
+ } else {
+ unsafe { rb_jit_array_len(array) as u32}
+ };
+
+ // Arity check accounting for size of the splat. When callee has rest parameters, we insert
+ // runtime guards later in copy_splat_args_for_rest_callee()
+ if !iseq_has_rest {
+ let supplying = argc - 1 - i32::from(kw_splat) + array_length as i32;
+ if (required_num..=required_num + opt_num).contains(&supplying) == false {
+ gen_counter_incr(jit, asm, Counter::send_iseq_splat_arity_error);
+ return None;
+ }
+ }
+
+ if iseq_has_rest && opt_num > 0 {
+ // If we have a rest and option arguments
+ // we are going to set the pc_offset for where
+ // to jump in the called method.
+ // If the number of args change, that would need to
+ // change and we don't change that dynmically so we side exit.
+ // On a normal splat without rest and option args this is handled
+ // elsewhere depending on the case
+ asm_comment!(asm, "Side exit if length doesn't not equal compile time length");
+ let array_len_opnd = get_array_len(asm, asm.stack_opnd(splat_pos));
+ asm.cmp(array_len_opnd, array_length.into());
+ asm.jne(Target::side_exit(Counter::guard_send_splatarray_length_not_equal));
+ }
+
+ Some(array_length)
+ } else {
+ None
+ };
+
+ // Check if we need the arg0 splat handling of vm_callee_setup_block_arg()
+ // Also known as "autosplat" inside setup_parameters_complex().
+ // Autosplat checks argc == 1 after splat and kwsplat processing, so make
+ // sure to amend this if we start support kw_splat.
+ let block_arg0_splat = arg_setup_block
+ && (argc == 1 || (argc == 2 && splat_array_length == Some(0)))
+ && !supplying_kws && !doing_kw_call
+ && unsafe {
+ (get_iseq_flags_has_lead(iseq) || opt_num > 1)
+ && !get_iseq_flags_ambiguous_param0(iseq)
+ };
+ if block_arg0_splat {
+ // If block_arg0_splat, we still need side exits after splat, but
+ // the splat modifies the stack which breaks side exits. So bail out.
+ if splat_call {
+ gen_counter_incr(jit, asm, Counter::invokeblock_iseq_arg0_args_splat);
+ return None;
+ }
+ // The block_arg0_splat implementation cannot deal with optional parameters.
+ // This is a setup_parameters_complex() situation and interacts with the
+ // starting position of the callee.
+ if opt_num > 1 {
+ gen_counter_incr(jit, asm, Counter::invokeblock_iseq_arg0_optional);
+ return None;
+ }
+ }
+
+ // Adjust `opts_filled` and `opts_missing` taking
+ // into account the size of the splat expansion.
+ if let Some(len) = splat_array_length {
+ assert_eq!(kw_arg_num, 0); // Due to exit_if_doing_kw_and_splat().
+ // Simplifies calculation below.
+ let num_args = argc - 1 - i32::from(kw_splat) + len as i32;
+
+ opts_filled = if num_args >= required_num {
+ min(num_args - required_num, opt_num)
+ } else {
+ 0
+ };
+ opts_missing = opt_num - opts_filled;
+ }
+
+ assert_eq!(opts_missing + opts_filled, opt_num);
+ assert!(opts_filled >= 0);
+
+ // ISeq with optional parameters start at different
+ // locations depending on the number of optionals given.
+ if opt_num > 0 {
+ assert!(opts_filled >= 0);
+ unsafe {
+ let opt_table = get_iseq_body_param_opt_table(iseq);
+ start_pc_offset = opt_table.offset(opts_filled as isize).read().try_into().unwrap();
+ }
+ }
+
+ // Increment total ISEQ send count
+ gen_counter_incr(jit, asm, Counter::num_send_iseq);
+
+ // Shortcut for special `Primitive.attr! :leaf` builtins
+ let builtin_attrs = unsafe { rb_jit_iseq_builtin_attrs(iseq) };
+ let builtin_func_raw = unsafe { rb_yjit_builtin_function(iseq) };
+ let builtin_func = if builtin_func_raw.is_null() { None } else { Some(builtin_func_raw) };
+ let opt_send_call = flags & VM_CALL_OPT_SEND != 0; // .send call is not currently supported for builtins
+ if let (None, Some(builtin_info), true, false, None | Some(0)) =
+ (block, builtin_func, builtin_attrs & BUILTIN_ATTR_LEAF != 0, opt_send_call, splat_array_length) {
+ let builtin_argc = unsafe { (*builtin_info).argc };
+ if builtin_argc + 1 < (C_ARG_OPNDS.len() as i32) {
+ // We pop the block arg without using it because:
+ // - the builtin is leaf, so it promises to not `yield`.
+ // - no leaf builtins have block param at the time of writing, and
+ // adding one requires interpreter changes to support.
+ if block_arg_type.is_some() {
+ if iseq_has_block_param {
+ gen_counter_incr(jit, asm, Counter::send_iseq_leaf_builtin_block_arg_block_param);
+ return None;
+ }
+ asm.stack_pop(1);
+ }
+
+ // Pop empty kw_splat hash which passes nothing (exit_if_kwsplat_non_nil())
+ if kw_splat {
+ asm.stack_pop(1);
+ }
+
+ // Pop empty splat array which passes nothing
+ if let Some(0) = splat_array_length {
+ asm.stack_pop(1);
+ }
+
+ asm_comment!(asm, "inlined leaf builtin");
+ gen_counter_incr(jit, asm, Counter::num_send_iseq_leaf);
+
+ // The callee may allocate, e.g. Integer#abs on a Bignum.
+ // Save SP for GC, save PC for allocation tracing, and prepare
+ // for global invalidation after GC's VM lock contention.
+ jit_prepare_call_with_gc(jit, asm);
+
+ // Call the builtin func (ec, recv, arg1, arg2, ...)
+ let mut args = vec![EC];
+
+ // Copy self and arguments
+ for i in 0..=builtin_argc {
+ let stack_opnd = asm.stack_opnd(builtin_argc - i);
+ args.push(stack_opnd);
+ }
+ let val = asm.ccall(unsafe { (*builtin_info).func_ptr as *const u8 }, args);
+ asm.stack_pop((builtin_argc + 1).try_into().unwrap()); // Keep them on stack during ccall for GC
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ // Note: assuming that the leaf builtin doesn't change local variables here.
+ // Seems like a safe assumption.
+
+ // Let guard chains share the same successor
+ return jump_to_next_insn(jit, asm);
+ }
+ }
+
+ // Inline simple ISEQs whose return value is known at compile time
+ if let (Some(value), None, false) = (iseq_get_return_value(iseq, captured_opnd, block, flags), block_arg_type, opt_send_call) {
+ asm_comment!(asm, "inlined simple ISEQ");
+ gen_counter_incr(jit, asm, Counter::num_send_iseq_inline);
+
+ match value {
+ IseqReturn::LocalVariable(local_idx) => {
+ // Put the local variable at the return slot
+ let stack_local = asm.stack_opnd(argc - 1 - local_idx as i32);
+ let stack_return = asm.stack_opnd(argc);
+ asm.mov(stack_return, stack_local);
+
+ // Update the mapping for the return value
+ let mapping = asm.ctx.get_opnd_mapping(stack_local.into());
+ asm.ctx.set_opnd_mapping(stack_return.into(), mapping);
+
+ // Pop everything but the return value
+ asm.stack_pop(argc as usize);
+ }
+ IseqReturn::Value(value) => {
+ // Pop receiver and arguments
+ asm.stack_pop(argc as usize + if captured_opnd.is_some() { 0 } else { 1 });
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::from(value));
+ asm.mov(stack_ret, value.into());
+ },
+ IseqReturn::Receiver => {
+ // Just pop arguments and leave the receiver on stack
+ asm.stack_pop(argc as usize);
+ }
+ }
+
+ // Let guard chains share the same successor
+ return jump_to_next_insn(jit, asm);
+ }
+
+ // Stack overflow check
+ // Note that vm_push_frame checks it against a decremented cfp, hence the multiply by 2.
+ // #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
+ asm_comment!(asm, "stack overflow check");
+ const _: () = assert!(RUBY_SIZEOF_CONTROL_FRAME % SIZEOF_VALUE == 0, "sizeof(rb_control_frame_t) is a multiple of sizeof(VALUE)");
+ let stack_max: i32 = unsafe { get_iseq_body_stack_max(iseq) }.try_into().unwrap();
+ let locals_offs = (num_locals + stack_max) + 2 * (RUBY_SIZEOF_CONTROL_FRAME / SIZEOF_VALUE) as i32;
+ let stack_limit = asm.lea(asm.ctx.sp_opnd(locals_offs));
+ asm.cmp(CFP, stack_limit);
+ asm.jbe(Target::side_exit(Counter::guard_send_se_cf_overflow));
+
+ if iseq_has_rest && splat_call {
+ // Insert length guard for a call to copy_splat_args_for_rest_callee()
+ // that will come later. We will have made changes to
+ // the stack by spilling or handling __send__ shifting
+ // by the time we get to that code, so we need the
+ // guard here where we can still side exit.
+ let non_rest_arg_count = argc - i32::from(kw_splat) - 1;
+ if non_rest_arg_count < required_num + opt_num {
+ let take_count: u32 = (required_num - non_rest_arg_count + opts_filled)
+ .try_into().unwrap();
+
+ if take_count > 0 {
+ asm_comment!(asm, "guard splat_array_length >= {take_count}");
+
+ let splat_array = asm.stack_opnd(splat_pos);
+ let array_len_opnd = get_array_len(asm, splat_array);
+ asm.cmp(array_len_opnd, take_count.into());
+ asm.jl(Target::side_exit(Counter::guard_send_iseq_has_rest_and_splat_too_few));
+ }
+ }
+
+ // All splats need to guard for ruby2_keywords hash. Check with a function call when
+ // splatting into a rest param since the index for the last item in the array is dynamic.
+ asm_comment!(asm, "guard no ruby2_keywords hash in splat");
+ let bad_splat = asm.ccall(rb_yjit_ruby2_keywords_splat_p as _, vec![asm.stack_opnd(splat_pos)]);
+ asm.cmp(bad_splat, 0.into());
+ asm.jnz(Target::side_exit(Counter::guard_send_splatarray_last_ruby2_keywords));
+ }
+
+ match block_arg_type {
+ Some(BlockArg::Nil) => {
+ // We have a nil block arg, so let's pop it off the args
+ asm.stack_pop(1);
+ }
+ Some(BlockArg::BlockParamProxy) => {
+ // We don't need the actual stack value
+ asm.stack_pop(1);
+ }
+ Some(BlockArg::TProc) => {
+ // Place the proc as the block handler. We do this early because
+ // the block arg being at the top of the stack gets in the way of
+ // rest param handling later. Also, since there are C calls that
+ // come later, we can't hold this value in a register and place it
+ // near the end when we push a new control frame.
+ asm_comment!(asm, "guard block arg is a proc");
+ // Simple predicate, no need for jit_prepare_non_leaf_call().
+ let is_proc = asm.ccall(rb_obj_is_proc as _, vec![asm.stack_opnd(0)]);
+ asm.cmp(is_proc, Qfalse.into());
+ jit_chain_guard(
+ JCC_JE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_block_arg_type,
+ );
+
+ // If this is a forwardable iseq, adjust the stack size accordingly
+ let callee_ep = if forwarding {
+ -1 + num_locals + VM_ENV_DATA_SIZE as i32
+ } else {
+ -argc + num_locals + VM_ENV_DATA_SIZE as i32 - 1
+ };
+ let callee_specval = callee_ep + VM_ENV_DATA_INDEX_SPECVAL;
+ if callee_specval < 0 {
+ // Can't write to sp[-n] since that's where the arguments are
+ gen_counter_incr(jit, asm, Counter::send_iseq_clobbering_block_arg);
+ return None;
+ }
+ if iseq_has_rest || has_kwrest {
+ // The proc would be stored above the current stack top, where GC can't see it
+ gen_counter_incr(jit, asm, Counter::send_iseq_block_arg_gc_unsafe);
+ return None;
+ }
+ let proc = asm.stack_pop(1); // Pop first, as argc doesn't account for the block arg
+ let callee_specval = asm.ctx.sp_opnd(callee_specval);
+ asm.store(callee_specval, proc);
+ }
+ None => {
+ // Nothing to do
+ }
+ }
+
+ if kw_splat {
+ // Only `**nil` is supported right now. Checked in exit_if_kwsplat_non_nil()
+ assert_eq!(Type::Nil, asm.ctx.get_opnd_type(StackOpnd(0)));
+ asm.stack_pop(1);
+ argc -= 1;
+ }
+
+ // push_splat_args does stack manipulation so we can no longer side exit
+ if let Some(array_length) = splat_array_length {
+ if !iseq_has_rest {
+ // Speculate that future splats will be done with
+ // an array that has the same length. We will insert guards.
+ argc = argc - 1 + array_length as i32;
+ if argc + asm.ctx.get_stack_size() as i32 > MAX_SPLAT_LENGTH {
+ gen_counter_incr(jit, asm, Counter::send_splat_too_long);
+ return None;
+ }
+ push_splat_args(array_length, asm);
+ }
+ }
+
+ // This is a .send call and we need to adjust the stack
+ // TODO: This can be more efficient if we do it before
+ // extracting from the splat array above.
+ if flags & VM_CALL_OPT_SEND != 0 {
+ handle_opt_send_shift_stack(asm, argc);
+ }
+
+ if iseq_has_rest {
+ // We are going to allocate so setting pc and sp.
+ jit_save_pc(jit, asm);
+ gen_save_sp(asm);
+
+ let rest_param_array = if splat_call {
+ let non_rest_arg_count = argc - 1;
+ // We start by dupping the array because someone else might have
+ // a reference to it. This also normalizes to an ::Array instance.
+ let array = asm.stack_opnd(0);
+ let array = asm.ccall(
+ rb_ary_dup as *const u8,
+ vec![array],
+ );
+ asm.stack_pop(1); // Pop array after ccall to use a register for passing it.
+
+ // This is the end stack state of all `non_rest_arg_count` situations below
+ argc = required_num + opts_filled;
+
+ if non_rest_arg_count > required_num + opt_num {
+ // If we have more arguments than required, we need to prepend
+ // the items from the stack onto the array.
+ let diff: u32 = (non_rest_arg_count - (required_num + opt_num))
+ .try_into().unwrap();
+
+ // diff is >0 so no need to worry about null pointer
+ asm_comment!(asm, "load pointer to array elements");
+ let values_opnd = asm.ctx.sp_opnd(-(diff as i32));
+ let values_ptr = asm.lea(values_opnd);
+
+ asm_comment!(asm, "prepend stack values to rest array");
+ let array = asm.ccall(
+ rb_ary_unshift_m as *const u8,
+ vec![Opnd::UImm(diff as u64), values_ptr, array],
+ );
+ asm.stack_pop(diff as usize);
+
+ array
+ } else if non_rest_arg_count < required_num + opt_num {
+ // If we have fewer arguments than required, we need to take some
+ // from the array and move them to the stack.
+ asm_comment!(asm, "take items from splat array");
+
+ let take_count: u32 = (required_num - non_rest_arg_count + opts_filled)
+ .try_into().unwrap();
+
+ // Copy required arguments to the stack without modifying the array
+ copy_splat_args_for_rest_callee(array, take_count, asm);
+
+ // We will now slice the array to give us a new array of the correct size
+ let sliced = asm.ccall(rb_yjit_rb_ary_subseq_length as *const u8, vec![array, Opnd::UImm(take_count.into())]);
+
+ sliced
+ } else {
+ // The arguments are equal so we can just push to the stack
+ asm_comment!(asm, "same length for splat array and rest param");
+ assert!(non_rest_arg_count == required_num + opt_num);
+
+ array
+ }
+ } else {
+ asm_comment!(asm, "rest parameter without splat");
+
+ assert!(argc >= required_num);
+ let n = (argc - required_num - opts_filled) as u32;
+ argc = required_num + opts_filled;
+ // If n is 0, then elts is never going to be read, so we can just pass null
+ let values_ptr = if n == 0 {
+ Opnd::UImm(0)
+ } else {
+ asm_comment!(asm, "load pointer to array elements");
+ let values_opnd = asm.ctx.sp_opnd(-(n as i32));
+ asm.lea(values_opnd)
+ };
+
+ let new_ary = asm.ccall(
+ rb_ec_ary_new_from_values as *const u8,
+ vec![
+ EC,
+ Opnd::UImm(n.into()),
+ values_ptr
+ ]
+ );
+ asm.stack_pop(n.as_usize());
+
+ new_ary
+ };
+
+ // Find where to put the rest parameter array
+ let rest_param = if opts_missing == 0 {
+ // All optionals are filled, the rest param goes at the top of the stack
+ argc += 1;
+ asm.stack_push(Type::TArray)
+ } else {
+ // The top of the stack will be a missing optional, but the rest
+ // parameter needs to be placed after all the missing optionals.
+ // Place it using a stack operand with a negative stack index.
+ // (Higher magnitude negative stack index have higher address.)
+ assert!(opts_missing > 0);
+ // The argument deepest in the stack will be the 0th local in the callee.
+ let callee_locals_base = argc - 1;
+ let rest_param_stack_idx = callee_locals_base - required_num - opt_num;
+ assert!(rest_param_stack_idx < 0);
+ asm.stack_opnd(rest_param_stack_idx)
+ };
+ // Store rest param to memory to avoid register shuffle as
+ // we won't be reading it for the remainder of the block.
+ asm.ctx.dealloc_reg(rest_param.reg_opnd());
+ asm.store(rest_param, rest_param_array);
+ }
+
+ // Pop surplus positional arguments when yielding
+ if arg_setup_block {
+ let extras = argc - required_num - opt_num - kw_arg_num;
+ if extras > 0 {
+ // Checked earlier. If there are keyword args, then
+ // the positional arguments are not at the stack top.
+ assert_eq!(0, kw_arg_num);
+
+ asm.stack_pop(extras as usize);
+ argc = required_num + opt_num + kw_arg_num;
+ }
+ }
+
+ // Keyword argument passing
+ if doing_kw_call {
+ argc = gen_iseq_kw_call(jit, asm, kw_arg, iseq, argc, has_kwrest);
+ }
+
+ // Same as vm_callee_setup_block_arg_arg0_check and vm_callee_setup_block_arg_arg0_splat
+ // on vm_callee_setup_block_arg for arg_setup_block. This is done after CALLER_SETUP_ARG
+ // and CALLER_REMOVE_EMPTY_KW_SPLAT, so this implementation is put here. This may need
+ // side exits, so you still need to allow side exits here if block_arg0_splat is true.
+ // Note that you can't have side exits after this arg0 splat.
+ if block_arg0_splat {
+ let arg0_opnd = asm.stack_opnd(0);
+
+ // Only handle the case that you don't need to_ary conversion
+ let not_array_counter = Counter::invokeblock_iseq_arg0_not_array;
+ guard_object_is_array(asm, arg0_opnd, arg0_opnd.into(), not_array_counter);
+
+ // Only handle the same that the array length == ISEQ's lead_num (most common)
+ let arg0_len_opnd = get_array_len(asm, arg0_opnd);
+ let lead_num = unsafe { rb_get_iseq_body_param_lead_num(iseq) };
+ asm.cmp(arg0_len_opnd, lead_num.into());
+ asm.jne(Target::side_exit(Counter::invokeblock_iseq_arg0_wrong_len));
+
+ let arg0_reg = asm.load(arg0_opnd);
+ let array_opnd = get_array_ptr(asm, arg0_reg);
+ asm_comment!(asm, "push splat arg0 onto the stack");
+ asm.stack_pop(argc.try_into().unwrap());
+ for i in 0..lead_num {
+ let stack_opnd = asm.stack_push(Type::Unknown);
+ asm.mov(stack_opnd, Opnd::mem(64, array_opnd, SIZEOF_VALUE_I32 * i));
+ }
+ argc = lead_num;
+ }
+
+ fn nil_fill(comment: &'static str, fill_range: std::ops::Range<i32>, asm: &mut Assembler) {
+ if fill_range.is_empty() {
+ return;
+ }
+
+ asm_comment!(asm, "{}", comment);
+ for i in fill_range {
+ let value_slot = asm.ctx.sp_opnd(i);
+ asm.store(value_slot, Qnil.into());
+ }
+ }
+
+ if !forwarding {
+ // Nil-initialize missing optional parameters
+ nil_fill(
+ "nil-initialize missing optionals",
+ {
+ let begin = -argc + required_num + opts_filled;
+ let end = -argc + required_num + opt_num;
+
+ begin..end
+ },
+ asm
+ );
+ // Nil-initialize the block parameter. It's the last parameter local
+ if iseq_has_block_param {
+ let block_param = asm.ctx.sp_opnd(-argc + num_params - 1);
+ asm.store(block_param, Qnil.into());
+ }
+ // Nil-initialize non-parameter locals
+ nil_fill(
+ "nil-initialize locals",
+ {
+ let begin = -argc + num_params;
+ let end = -argc + num_locals;
+
+ begin..end
+ },
+ asm
+ );
+ }
+
+ if forwarding {
+ assert_eq!(1, num_params);
+ // Write the CI in to the stack and ensure that it actually gets
+ // flushed to memory
+ asm_comment!(asm, "put call info for forwarding");
+ let ci_opnd = asm.stack_opnd(-1);
+ asm.ctx.dealloc_reg(ci_opnd.reg_opnd());
+ asm.mov(ci_opnd, VALUE(ci as usize).into());
+
+ // Nil-initialize other locals which are above the CI
+ nil_fill("nil-initialize locals", 1..num_locals, asm);
+ }
+
+ // Points to the receiver operand on the stack unless a captured environment is used
+ let recv = match captured_opnd {
+ Some(captured_opnd) => asm.load(Opnd::mem(64, captured_opnd, 0)), // captured->self
+ _ => asm.stack_opnd(argc),
+ };
+ let captured_self = captured_opnd.is_some();
+ let sp_offset = argc + if captured_self { 0 } else { 1 };
+
+ // Store the updated SP on the current frame (pop arguments and receiver)
+ asm_comment!(asm, "store caller sp");
+ let caller_sp = asm.lea(asm.ctx.sp_opnd(-sp_offset));
+ asm.store(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP), caller_sp);
+
+ // Store the next PC in the current frame
+ jit_save_pc(jit, asm);
+
+ // Adjust the callee's stack pointer
+ let callee_sp = if forwarding {
+ let offs = num_locals + VM_ENV_DATA_SIZE as i32;
+ asm.lea(asm.ctx.sp_opnd(offs))
+ } else {
+ let offs = -argc + num_locals + VM_ENV_DATA_SIZE as i32;
+ asm.lea(asm.ctx.sp_opnd(offs))
+ };
+
+ let specval = if let Some(prev_ep) = prev_ep {
+ // We've already side-exited if the callee expects a block, so we
+ // ignore any supplied block here
+ SpecVal::PrevEP(prev_ep)
+ } else if let Some(captured_opnd) = captured_opnd {
+ let ep_opnd = asm.load(Opnd::mem(64, captured_opnd, SIZEOF_VALUE_I32)); // captured->ep
+ SpecVal::PrevEPOpnd(ep_opnd)
+ } else if let Some(BlockArg::TProc) = block_arg_type {
+ SpecVal::BlockHandler(Some(BlockHandler::AlreadySet))
+ } else if let Some(BlockArg::BlockParamProxy) = block_arg_type {
+ SpecVal::BlockHandler(Some(BlockHandler::BlockParamProxy))
+ } else {
+ SpecVal::BlockHandler(block)
+ };
+
+ // Setup the new frame
+ perf_call!("gen_send_iseq: ", gen_push_frame(jit, asm, ControlFrame {
+ frame_type,
+ specval,
+ cme,
+ recv,
+ sp: callee_sp,
+ iseq: Some(iseq),
+ pc: None, // We are calling into jitted code, which will set the PC as necessary
+ }));
+
+ // No need to set cfp->pc since the callee sets it whenever calling into routines
+ // that could look at it through jit_save_pc().
+ // mov(cb, REG0, const_ptr_opnd(start_pc));
+ // mov(cb, member_opnd(REG_CFP, rb_control_frame_t, pc), REG0);
+
+ // Create a blockid for the callee
+ let callee_blockid = BlockId { iseq, idx: start_pc_offset };
+
+ // Create a context for the callee
+ let mut callee_ctx = Context::default();
+
+ // If the callee has :inline_block annotation and the callsite has a block ISEQ,
+ // duplicate a callee block for each block ISEQ to make its `yield` monomorphic.
+ if let (Some(BlockHandler::BlockISeq(iseq)), true) = (block, builtin_attrs & BUILTIN_ATTR_INLINE_BLOCK != 0) {
+ callee_ctx.set_inline_block(iseq);
+ }
+
+ // Set the argument types in the callee's context
+ for arg_idx in 0..argc {
+ let stack_offs: u8 = (argc - arg_idx - 1).try_into().unwrap();
+ let arg_type = asm.ctx.get_opnd_type(StackOpnd(stack_offs));
+ callee_ctx.set_local_type(arg_idx.try_into().unwrap(), arg_type);
+ }
+
+ // If we're in a forwarding callee, there will be one unknown type
+ // written in to the local table (the caller's CI object)
+ if forwarding {
+ callee_ctx.set_local_type(0, Type::Unknown)
+ }
+
+ // Set the receiver type in the callee's context
+ let recv_type = if captured_self {
+ Type::Unknown // we don't track the type information of captured->self for now
+ } else {
+ asm.ctx.get_opnd_type(StackOpnd(argc.try_into().unwrap()))
+ };
+ callee_ctx.upgrade_opnd_type(SelfOpnd, recv_type);
+
+ // Spill or preserve argument registers
+ if forwarding {
+ // When forwarding, the callee's local table has only a callinfo,
+ // so we can't map the actual arguments to the callee's locals.
+ asm.spill_regs();
+ } else {
+ // Discover stack temp registers that can be used as the callee's locals
+ let mapped_temps = asm.map_temp_regs_to_args(&mut callee_ctx, argc);
+
+ // Spill stack temps and locals that are not used by the callee.
+ // This must be done before changing the SP register.
+ asm.spill_regs_except(&mapped_temps);
+
+ // If the callee block has been compiled before, spill/move registers to reuse the existing block
+ // for minimizing the number of blocks we need to compile.
+ if let Some(existing_reg_mapping) = find_most_compatible_reg_mapping(callee_blockid, &callee_ctx) {
+ asm_comment!(asm, "reuse maps: {:?} -> {:?}", callee_ctx.get_reg_mapping(), existing_reg_mapping);
+
+ // Spill the registers that are not used in the existing block.
+ // When the same ISEQ is compiled as an entry block, it starts with no registers allocated.
+ for &reg_opnd in callee_ctx.get_reg_mapping().get_reg_opnds().iter() {
+ if existing_reg_mapping.get_reg(reg_opnd).is_none() {
+ match reg_opnd {
+ RegOpnd::Local(local_idx) => {
+ let spilled_temp = asm.stack_opnd(argc - local_idx as i32 - 1);
+ asm.spill_reg(spilled_temp);
+ callee_ctx.dealloc_reg(reg_opnd);
+ }
+ RegOpnd::Stack(_) => unreachable!("callee {:?} should have been spilled", reg_opnd),
+ }
+ }
+ }
+ assert!(callee_ctx.get_reg_mapping().get_reg_opnds().len() <= existing_reg_mapping.get_reg_opnds().len());
+
+ // Load the registers that are spilled in this block but used in the existing block.
+ // When there are multiple callsites, some registers spilled in this block may be used at other callsites.
+ for &reg_opnd in existing_reg_mapping.get_reg_opnds().iter() {
+ if callee_ctx.get_reg_mapping().get_reg(reg_opnd).is_none() {
+ match reg_opnd {
+ RegOpnd::Local(local_idx) => {
+ callee_ctx.alloc_reg(reg_opnd);
+ let loaded_reg = TEMP_REGS[callee_ctx.get_reg_mapping().get_reg(reg_opnd).unwrap()];
+ let loaded_temp = asm.stack_opnd(argc - local_idx as i32 - 1);
+ asm.load_into(Opnd::Reg(loaded_reg), loaded_temp);
+ }
+ RegOpnd::Stack(_) => unreachable!("find_most_compatible_reg_mapping should not leave {:?}", reg_opnd),
+ }
+ }
+ }
+ assert_eq!(callee_ctx.get_reg_mapping().get_reg_opnds().len(), existing_reg_mapping.get_reg_opnds().len());
+
+ // Shuffle registers to make the register mappings compatible
+ let mut moves = vec![];
+ for &reg_opnd in callee_ctx.get_reg_mapping().get_reg_opnds().iter() {
+ let old_reg = TEMP_REGS[callee_ctx.get_reg_mapping().get_reg(reg_opnd).unwrap()];
+ let new_reg = TEMP_REGS[existing_reg_mapping.get_reg(reg_opnd).unwrap()];
+ moves.push((new_reg, Opnd::Reg(old_reg)));
+ }
+ for (reg, opnd) in Assembler::reorder_reg_moves(&moves) {
+ asm.load_into(Opnd::Reg(reg), opnd);
+ }
+ callee_ctx.set_reg_mapping(existing_reg_mapping);
+ }
+ }
+
+ // Update SP register for the callee. This must be done after referencing frame.recv,
+ // which may be SP-relative.
+ asm.mov(SP, callee_sp);
+
+ // Log the name of the method we're calling to. We intentionally don't do this for inlined ISEQs.
+ // We also do this after spill_regs() to avoid doubly spilling the same thing on asm.ccall().
+ if get_option!(gen_stats) {
+ // Protect caller-saved registers in case they're used for arguments
+ let mapping = asm.cpush_all();
+
+ // Assemble the ISEQ name string
+ let name_str = get_iseq_name(iseq);
+
+ // Get an index for this ISEQ name
+ let iseq_idx = get_iseq_idx(&name_str);
+
+ // Increment the counter for this cfunc
+ asm.ccall(incr_iseq_counter as *const u8, vec![iseq_idx.into()]);
+ asm.cpop_all(mapping);
+ }
+
+ // The callee might change locals through Kernel#binding and other means.
+ asm.clear_local_types();
+
+ // Pop arguments and receiver in return context and
+ // mark it as a continuation of gen_leave()
+ let mut return_asm = Assembler::new(jit.num_locals());
+ return_asm.ctx = asm.ctx;
+ return_asm.stack_pop(sp_offset.try_into().unwrap());
+ return_asm.ctx.set_sp_offset(0); // We set SP on the caller's frame above
+ return_asm.ctx.reset_chain_depth_and_defer();
+ return_asm.ctx.set_as_return_landing();
+
+ // Stub so we can return to JITted code
+ let return_block = BlockId {
+ iseq: jit.iseq,
+ idx: jit.next_insn_idx(),
+ };
+
+ // Write the JIT return address on the callee frame
+ jit.gen_branch(
+ asm,
+ return_block,
+ &return_asm.ctx,
+ None,
+ None,
+ BranchGenFn::JITReturn,
+ );
+
+ // ec->cfp is updated after cfp->jit_return for rb_profile_frames() safety
+ asm_comment!(asm, "switch to new CFP");
+ let new_cfp = asm.sub(CFP, RUBY_SIZEOF_CONTROL_FRAME.into());
+ asm.mov(CFP, new_cfp);
+ asm.store(Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP as i32), CFP);
+
+ // Directly jump to the entry point of the callee
+ gen_direct_jump(
+ jit,
+ &callee_ctx,
+ callee_blockid,
+ asm,
+ );
+
+ Some(EndBlock)
+}
+
+// Check if we can handle a keyword call
+fn gen_iseq_kw_call_checks(
+ jit: &JITState,
+ asm: &mut Assembler,
+ iseq: *const rb_iseq_t,
+ kw_arg: *const rb_callinfo_kwarg,
+ has_kwrest: bool,
+ caller_kw_num: i32
+) -> Option<()> {
+ // This struct represents the metadata about the callee-specified
+ // keyword parameters.
+ let keyword = unsafe { get_iseq_body_param_keyword(iseq) };
+ let keyword_num: usize = unsafe { (*keyword).num }.try_into().unwrap();
+ let keyword_required_num: usize = unsafe { (*keyword).required_num }.try_into().unwrap();
+
+ let mut required_kwargs_filled = 0;
+
+ if keyword_num > 30 || caller_kw_num > 64 {
+ // We have so many keywords that (1 << num) encoded as a FIXNUM
+ // (which shifts it left one more) no longer fits inside a 32-bit
+ // immediate. Similarly, we use a u64 in case of keyword rest parameter.
+ gen_counter_incr(jit, asm, Counter::send_iseq_too_many_kwargs);
+ return None;
+ }
+
+ // Check that the kwargs being passed are valid
+ if caller_kw_num > 0 {
+ // This is the list of keyword arguments that the callee specified
+ // in its initial declaration.
+ // SAFETY: see compile.c for sizing of this slice.
+ let callee_kwargs = if keyword_num == 0 {
+ &[]
+ } else {
+ unsafe { slice::from_raw_parts((*keyword).table, keyword_num) }
+ };
+
+ // Here we're going to build up a list of the IDs that correspond to
+ // the caller-specified keyword arguments. If they're not in the
+ // same order as the order specified in the callee declaration, then
+ // we're going to need to generate some code to swap values around
+ // on the stack.
+ let kw_arg_keyword_len = caller_kw_num as usize;
+ let mut caller_kwargs: Vec<ID> = vec![0; kw_arg_keyword_len];
+ for kwarg_idx in 0..kw_arg_keyword_len {
+ let sym = unsafe { get_cikw_keywords_idx(kw_arg, kwarg_idx.try_into().unwrap()) };
+ caller_kwargs[kwarg_idx] = unsafe { rb_sym2id(sym) };
+ }
+
+ // First, we're going to be sure that the names of every
+ // caller-specified keyword argument correspond to a name in the
+ // list of callee-specified keyword parameters.
+ for caller_kwarg in caller_kwargs {
+ let search_result = callee_kwargs
+ .iter()
+ .enumerate() // inject element index
+ .find(|(_, &kwarg)| kwarg == caller_kwarg);
+
+ match search_result {
+ None if !has_kwrest => {
+ // If the keyword was never found, then we know we have a
+ // mismatch in the names of the keyword arguments, so we need to
+ // bail.
+ gen_counter_incr(jit, asm, Counter::send_iseq_kwargs_mismatch);
+ return None;
+ }
+ Some((callee_idx, _)) if callee_idx < keyword_required_num => {
+ // Keep a count to ensure all required kwargs are specified
+ required_kwargs_filled += 1;
+ }
+ _ => (),
+ }
+ }
+ }
+ assert!(required_kwargs_filled <= keyword_required_num);
+ if required_kwargs_filled != keyword_required_num {
+ gen_counter_incr(jit, asm, Counter::send_iseq_kwargs_mismatch);
+ return None;
+ }
+
+ Some(())
+}
+
+// Codegen for keyword argument handling. Essentially private to gen_send_iseq() since
+// there are a lot of preconditions to check before reaching this code.
+fn gen_iseq_kw_call(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci_kwarg: *const rb_callinfo_kwarg,
+ iseq: *const rb_iseq_t,
+ mut argc: i32,
+ has_kwrest: bool,
+) -> i32 {
+ let caller_keyword_len_i32: i32 = if ci_kwarg.is_null() {
+ 0
+ } else {
+ unsafe { get_cikw_keyword_len(ci_kwarg) }
+ };
+ let caller_keyword_len: usize = caller_keyword_len_i32.try_into().unwrap();
+ let anon_kwrest = unsafe { rb_get_iseq_flags_anon_kwrest(iseq) && !get_iseq_flags_has_kw(iseq) };
+
+ // This struct represents the metadata about the callee-specified
+ // keyword parameters.
+ let keyword = unsafe { get_iseq_body_param_keyword(iseq) };
+
+ asm_comment!(asm, "keyword args");
+
+ // This is the list of keyword arguments that the callee specified
+ // in its initial declaration.
+ let callee_kwargs = unsafe { (*keyword).table };
+ let callee_kw_count_i32: i32 = unsafe { (*keyword).num };
+ let callee_kw_count: usize = callee_kw_count_i32.try_into().unwrap();
+ let keyword_required_num: usize = unsafe { (*keyword).required_num }.try_into().unwrap();
+
+ // Here we're going to build up a list of the IDs that correspond to
+ // the caller-specified keyword arguments. If they're not in the
+ // same order as the order specified in the callee declaration, then
+ // we're going to need to generate some code to swap values around
+ // on the stack.
+ let mut kwargs_order: Vec<ID> = vec![0; cmp::max(caller_keyword_len, callee_kw_count)];
+ for kwarg_idx in 0..caller_keyword_len {
+ let sym = unsafe { get_cikw_keywords_idx(ci_kwarg, kwarg_idx.try_into().unwrap()) };
+ kwargs_order[kwarg_idx] = unsafe { rb_sym2id(sym) };
+ }
+
+ let mut unspecified_bits = 0;
+
+ // The stack_opnd() index to the 0th keyword argument.
+ let kwargs_stack_base = caller_keyword_len_i32 - 1;
+
+ // Build the keyword rest parameter hash before we make any changes to the order of
+ // the supplied keyword arguments
+ let kwrest_type = if has_kwrest {
+ c_callable! {
+ fn build_kw_rest(rest_mask: u64, stack_kwargs: *const VALUE, keywords: *const rb_callinfo_kwarg) -> VALUE {
+ if keywords.is_null() {
+ return unsafe { rb_hash_new() };
+ }
+
+ // Use the total number of supplied keywords as a size upper bound
+ let keyword_len = unsafe { (*keywords).keyword_len } as usize;
+ let hash = unsafe { rb_hash_new_with_size(keyword_len as u64) };
+
+ // Put pairs into the kwrest hash as the mask describes
+ for kwarg_idx in 0..keyword_len {
+ if (rest_mask & (1 << kwarg_idx)) != 0 {
+ unsafe {
+ let keyword_symbol = (*keywords).keywords.as_ptr().add(kwarg_idx).read();
+ let keyword_value = stack_kwargs.add(kwarg_idx).read();
+ rb_hash_aset(hash, keyword_symbol, keyword_value);
+ }
+ }
+ }
+ return hash;
+ }
+ }
+
+ asm_comment!(asm, "build kwrest hash");
+
+ // Make a bit mask describing which keywords should go into kwrest.
+ let mut rest_mask: u64 = 0;
+ // Index for one argument that will go into kwrest.
+ let mut rest_collected_idx = None;
+ for (supplied_kw_idx, &supplied_kw) in kwargs_order.iter().take(caller_keyword_len).enumerate() {
+ let mut found = false;
+ for callee_idx in 0..callee_kw_count {
+ let callee_kw = unsafe { callee_kwargs.add(callee_idx).read() };
+ if callee_kw == supplied_kw {
+ found = true;
+ break;
+ }
+ }
+ if !found {
+ rest_mask |= 1 << supplied_kw_idx;
+ if rest_collected_idx.is_none() {
+ rest_collected_idx = Some(supplied_kw_idx as i32);
+ }
+ }
+ }
+
+ let (kwrest, kwrest_type) = if rest_mask == 0 && anon_kwrest {
+ // In case the kwrest hash should be empty and is anonymous in the callee,
+ // we can pass nil instead of allocating. Anonymous kwrest can only be
+ // delegated, and nil is the same as an empty hash when delegating.
+ (Qnil.into(), Type::Nil)
+ } else {
+ // Save PC and SP before allocating
+ jit_save_pc(jit, asm);
+ gen_save_sp(asm);
+
+ // Build the kwrest hash. `struct rb_callinfo_kwarg` is malloc'd, so no GC concerns.
+ let kwargs_start = asm.lea(asm.ctx.sp_opnd(-caller_keyword_len_i32));
+ let hash = asm.ccall(
+ build_kw_rest as _,
+ vec![rest_mask.into(), kwargs_start, Opnd::const_ptr(ci_kwarg.cast())]
+ );
+ (hash, Type::THash)
+ };
+
+ // The kwrest parameter sits after `unspecified_bits` if the callee specifies any
+ // keywords.
+ let stack_kwrest_idx = kwargs_stack_base - callee_kw_count_i32 - i32::from(callee_kw_count > 0);
+ let stack_kwrest = asm.stack_opnd(stack_kwrest_idx);
+ // If `stack_kwrest` already has another argument there, we need to stow it elsewhere
+ // first before putting kwrest there. Use `rest_collected_idx` because that value went
+ // into kwrest so the slot is now free.
+ let kwrest_idx = callee_kw_count + usize::from(callee_kw_count > 0);
+ if let (Some(rest_collected_idx), true) = (rest_collected_idx, kwrest_idx < caller_keyword_len) {
+ let rest_collected = asm.stack_opnd(kwargs_stack_base - rest_collected_idx);
+ let mapping = asm.ctx.get_opnd_mapping(stack_kwrest.into());
+ asm.mov(rest_collected, stack_kwrest);
+ asm.ctx.set_opnd_mapping(rest_collected.into(), mapping);
+ // Update our bookkeeping to inform the reordering step later.
+ kwargs_order[rest_collected_idx as usize] = kwargs_order[kwrest_idx];
+ kwargs_order[kwrest_idx] = 0;
+ }
+ // Put kwrest straight into memory, since we might pop it later
+ asm.ctx.dealloc_reg(stack_kwrest.reg_opnd());
+ asm.mov(stack_kwrest, kwrest);
+ if stack_kwrest_idx >= 0 {
+ asm.ctx.set_opnd_mapping(stack_kwrest.into(), TempMapping::MapToStack(kwrest_type));
+ }
+
+ Some(kwrest_type)
+ } else {
+ None
+ };
+
+ // Ensure the stack is large enough for the callee
+ for _ in caller_keyword_len..callee_kw_count {
+ argc += 1;
+ asm.stack_push(Type::Unknown);
+ }
+ // Now this is the stack_opnd() index to the 0th keyword argument.
+ let kwargs_stack_base = kwargs_order.len() as i32 - 1;
+
+ // Next, we're going to loop through every keyword that was
+ // specified by the caller and make sure that it's in the correct
+ // place. If it's not we're going to swap it around with another one.
+ for kwarg_idx in 0..callee_kw_count {
+ let callee_kwarg = unsafe { callee_kwargs.add(kwarg_idx).read() };
+
+ // If the argument is already in the right order, then we don't
+ // need to generate any code since the expected value is already
+ // in the right place on the stack.
+ if callee_kwarg == kwargs_order[kwarg_idx] {
+ continue;
+ }
+
+ // In this case the argument is not in the right place, so we
+ // need to find its position where it _should_ be and swap with
+ // that location.
+ for swap_idx in 0..kwargs_order.len() {
+ if callee_kwarg == kwargs_order[swap_idx] {
+ // First we're going to generate the code that is going
+ // to perform the actual swapping at runtime.
+ let swap_idx_i32: i32 = swap_idx.try_into().unwrap();
+ let kwarg_idx_i32: i32 = kwarg_idx.try_into().unwrap();
+ let offset0 = kwargs_stack_base - swap_idx_i32;
+ let offset1 = kwargs_stack_base - kwarg_idx_i32;
+ stack_swap(asm, offset0, offset1);
+
+ // Next we're going to do some bookkeeping on our end so
+ // that we know the order that the arguments are
+ // actually in now.
+ kwargs_order.swap(kwarg_idx, swap_idx);
+
+ break;
+ }
+ }
+ }
+
+ // Now that every caller specified kwarg is in the right place, filling
+ // in unspecified default paramters won't overwrite anything.
+ for kwarg_idx in keyword_required_num..callee_kw_count {
+ if kwargs_order[kwarg_idx] != unsafe { callee_kwargs.add(kwarg_idx).read() } {
+ let default_param_idx = kwarg_idx - keyword_required_num;
+ let mut default_value = unsafe { (*keyword).default_values.add(default_param_idx).read() };
+
+ if default_value == Qundef {
+ // Qundef means that this value is not constant and must be
+ // recalculated at runtime, so we record it in unspecified_bits
+ // (Qnil is then used as a placeholder instead of Qundef).
+ unspecified_bits |= 0x01 << default_param_idx;
+ default_value = Qnil;
+ }
+
+ let default_param = asm.stack_opnd(kwargs_stack_base - kwarg_idx as i32);
+ let param_type = Type::from(default_value);
+ asm.mov(default_param, default_value.into());
+ asm.ctx.set_opnd_mapping(default_param.into(), TempMapping::MapToStack(param_type));
+ }
+ }
+
+ // Pop extra arguments that went into kwrest now that they're at stack top
+ if has_kwrest && caller_keyword_len > callee_kw_count {
+ let extra_kwarg_count = caller_keyword_len - callee_kw_count;
+ asm.stack_pop(extra_kwarg_count);
+ argc = argc - extra_kwarg_count as i32;
+ }
+
+ // Keyword arguments cause a special extra local variable to be
+ // pushed onto the stack that represents the parameters that weren't
+ // explicitly given a value and have a non-constant default.
+ if callee_kw_count > 0 {
+ let unspec_opnd = VALUE::fixnum_from_usize(unspecified_bits).as_u64();
+ let top = asm.stack_push(Type::Fixnum);
+ asm.mov(top, unspec_opnd.into());
+ argc += 1;
+ }
+
+ // The kwrest parameter sits after `unspecified_bits`
+ if let Some(kwrest_type) = kwrest_type {
+ let kwrest = asm.stack_push(kwrest_type);
+ // We put the kwrest parameter in memory earlier
+ asm.ctx.dealloc_reg(kwrest.reg_opnd());
+ argc += 1;
+ }
+
+ argc
+}
+
+/// This is a helper function to allow us to exit early
+/// during code generation if a predicate is true.
+/// We return Option<()> here because we will be able to
+/// short-circuit using the ? operator if we return None.
+/// It would be great if rust let you implement ? for your
+/// own types, but as of right now they don't.
+fn exit_if(jit: &JITState, asm: &mut Assembler, pred: bool, counter: Counter) -> Option<()> {
+ if pred {
+ gen_counter_incr(jit, asm, counter);
+ return None
+ }
+ Some(())
+}
+
+#[must_use]
+fn exit_if_tail_call(jit: &JITState, asm: &mut Assembler, ci: *const rb_callinfo) -> Option<()> {
+ exit_if(jit, asm, unsafe { vm_ci_flag(ci) } & VM_CALL_TAILCALL != 0, Counter::send_iseq_tailcall)
+}
+
+#[must_use]
+fn exit_if_has_post(jit: &JITState, asm: &mut Assembler, iseq: *const rb_iseq_t) -> Option<()> {
+ exit_if(jit, asm, unsafe { get_iseq_flags_has_post(iseq) }, Counter::send_iseq_has_post)
+}
+
+#[must_use]
+fn exit_if_kwsplat_non_nil(jit: &JITState, asm: &mut Assembler, flags: u32, counter: Counter) -> Option<()> {
+ let kw_splat = flags & VM_CALL_KW_SPLAT != 0;
+ let kw_splat_stack = StackOpnd((flags & VM_CALL_ARGS_BLOCKARG != 0).into());
+ exit_if(jit, asm, kw_splat && asm.ctx.get_opnd_type(kw_splat_stack) != Type::Nil, counter)
+}
+
+#[must_use]
+fn exit_if_has_rest_and_captured(jit: &JITState, asm: &mut Assembler, iseq_has_rest: bool, captured_opnd: Option<Opnd>) -> Option<()> {
+ exit_if(jit, asm, iseq_has_rest && captured_opnd.is_some(), Counter::send_iseq_has_rest_and_captured)
+}
+
+#[must_use]
+fn exit_if_has_kwrest_and_captured(jit: &JITState, asm: &mut Assembler, iseq_has_kwrest: bool, captured_opnd: Option<Opnd>) -> Option<()> {
+ // We need to call a C function to allocate the kwrest hash, but also need to hold the captred
+ // block across the call, which we can't do.
+ exit_if(jit, asm, iseq_has_kwrest && captured_opnd.is_some(), Counter::send_iseq_has_kwrest_and_captured)
+}
+
+#[must_use]
+fn exit_if_has_rest_and_supplying_kws(jit: &JITState, asm: &mut Assembler, iseq_has_rest: bool, supplying_kws: bool) -> Option<()> {
+ // There can be a gap between the rest parameter array and the supplied keywords, or
+ // no space to put the rest array (e.g. `def foo(*arr, k:) = arr; foo(k: 1)` 1 is
+ // sitting where the rest array should be).
+ exit_if(
+ jit,
+ asm,
+ iseq_has_rest && supplying_kws,
+ Counter::send_iseq_has_rest_and_kw_supplied,
+ )
+}
+
+#[must_use]
+fn exit_if_supplying_kw_and_has_no_kw(jit: &JITState, asm: &mut Assembler, supplying_kws: bool, callee_kws: bool) -> Option<()> {
+ // Passing keyword arguments to a callee means allocating a hash and treating
+ // that as a positional argument. Bail for now.
+ exit_if(
+ jit,
+ asm,
+ supplying_kws && !callee_kws,
+ Counter::send_iseq_has_no_kw,
+ )
+}
+
+#[must_use]
+fn exit_if_supplying_kws_and_accept_no_kwargs(jit: &JITState, asm: &mut Assembler, supplying_kws: bool, iseq: *const rb_iseq_t) -> Option<()> {
+ // If we have a method accepting no kwargs (**nil), exit if we have passed
+ // it any kwargs.
+ exit_if(
+ jit,
+ asm,
+ supplying_kws && unsafe { get_iseq_flags_accepts_no_kwarg(iseq) },
+ Counter::send_iseq_accepts_no_kwarg
+ )
+}
+
+#[must_use]
+fn exit_if_doing_kw_and_splat(jit: &JITState, asm: &mut Assembler, doing_kw_call: bool, flags: u32) -> Option<()> {
+ exit_if(jit, asm, doing_kw_call && flags & VM_CALL_ARGS_SPLAT != 0, Counter::send_iseq_splat_with_kw)
+}
+
+#[must_use]
+fn exit_if_wrong_number_arguments(
+ jit: &JITState,
+ asm: &mut Assembler,
+ args_setup_block: bool,
+ opts_filled: i32,
+ flags: u32,
+ opt_num: i32,
+ iseq_has_rest: bool,
+) -> Option<()> {
+ // Too few arguments and no splat to make up for it
+ let too_few = opts_filled < 0 && flags & VM_CALL_ARGS_SPLAT == 0;
+ // Too many arguments and no sink that take them
+ let too_many = opts_filled > opt_num && !(iseq_has_rest || args_setup_block);
+
+ exit_if(jit, asm, too_few || too_many, Counter::send_iseq_arity_error)
+}
+
+#[must_use]
+fn exit_if_doing_kw_and_opts_missing(jit: &JITState, asm: &mut Assembler, doing_kw_call: bool, opts_missing: i32) -> Option<()> {
+ // If we have unfilled optional arguments and keyword arguments then we
+ // would need to adjust the arguments location to account for that.
+ // For now we aren't handling this case.
+ exit_if(jit, asm, doing_kw_call && opts_missing > 0, Counter::send_iseq_missing_optional_kw)
+}
+
+#[must_use]
+fn exit_if_has_rest_and_optional_and_block(jit: &JITState, asm: &mut Assembler, iseq_has_rest: bool, opt_num: i32, iseq: *const rb_iseq_t, block_arg: bool) -> Option<()> {
+ exit_if(
+ jit,
+ asm,
+ iseq_has_rest && opt_num != 0 && (unsafe { get_iseq_flags_has_block(iseq) } || block_arg),
+ Counter::send_iseq_has_rest_opt_and_block
+ )
+}
+
+#[derive(Clone, Copy)]
+enum BlockArg {
+ Nil,
+ /// A special sentinel value indicating the block parameter should be read from
+ /// the current surrounding cfp
+ BlockParamProxy,
+ /// A proc object. Could be an instance of a subclass of ::rb_cProc
+ TProc,
+}
+
+#[must_use]
+fn exit_if_unsupported_block_arg_type(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ supplying_block_arg: bool
+) -> Option<Option<BlockArg>> {
+ let block_arg_type = if supplying_block_arg {
+ asm.ctx.get_opnd_type(StackOpnd(0))
+ } else {
+ // Passing no block argument
+ return Some(None);
+ };
+
+ match block_arg_type {
+ // We'll handle Nil and BlockParamProxy later
+ Type::Nil => Some(Some(BlockArg::Nil)),
+ Type::BlockParamProxy => Some(Some(BlockArg::BlockParamProxy)),
+ _ if {
+ let sample_block_arg = jit.peek_at_stack(&asm.ctx, 0);
+ unsafe { rb_obj_is_proc(sample_block_arg) }.test()
+ } => {
+ // Speculate that we'll have a proc as the block arg
+ Some(Some(BlockArg::TProc))
+ }
+ _ => {
+ gen_counter_incr(jit, asm, Counter::send_iseq_block_arg_type);
+ None
+ }
+ }
+}
+
+#[must_use]
+fn exit_if_stack_too_large(iseq: *const rb_iseq_t) -> Option<()> {
+ let stack_max = unsafe { rb_get_iseq_body_stack_max(iseq) };
+ // Reject ISEQs with very large temp stacks,
+ // this will allow us to use u8/i8 values to track stack_size and sp_offset
+ if stack_max >= i8::MAX as u32 {
+ incr_counter!(iseq_stack_too_large);
+ return None;
+ }
+ Some(())
+}
+
+fn gen_struct_aref(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ comptime_recv: VALUE,
+ flags: u32,
+ argc: i32,
+) -> Option<CodegenStatus> {
+
+ if unsafe { vm_ci_argc(ci) } != 0 {
+ return None;
+ }
+
+ let off: i32 = unsafe { get_cme_def_body_optimized_index(cme) }
+ .try_into()
+ .unwrap();
+
+ // Confidence checks
+ assert!(unsafe { RB_TYPE_P(comptime_recv, RUBY_T_STRUCT) });
+ assert!((off as i64) < unsafe { RSTRUCT_LEN(comptime_recv) });
+
+ // We are going to use an encoding that takes a 4-byte immediate which
+ // limits the offset to INT32_MAX.
+ {
+ let native_off = (off as i64) * (SIZEOF_VALUE as i64);
+ if native_off > (i32::MAX as i64) {
+ return None;
+ }
+ }
+
+ if c_method_tracing_currently_enabled(jit) {
+ // Struct accesses need fire c_call and c_return events, which we can't support
+ // See :attr-tracing:
+ gen_counter_incr(jit, asm, Counter::send_cfunc_tracing);
+ return None;
+ }
+
+ // This is a .send call and we need to adjust the stack
+ if flags & VM_CALL_OPT_SEND != 0 {
+ handle_opt_send_shift_stack(asm, argc);
+ }
+
+ // All structs from the same Struct class should have the same
+ // length. So if our comptime_recv is embedded all runtime
+ // structs of the same class should be as well, and the same is
+ // true of the converse.
+ let embedded = unsafe { FL_TEST_RAW(comptime_recv, VALUE(RSTRUCT_EMBED_LEN_MASK)) };
+
+ asm_comment!(asm, "struct aref");
+
+ let recv = asm.stack_pop(1);
+ let recv = asm.load(recv);
+
+ let val = if embedded != VALUE(0) {
+ Opnd::mem(64, recv, RUBY_OFFSET_RSTRUCT_AS_ARY + (SIZEOF_VALUE_I32 * off))
+ } else {
+ let rstruct_ptr = asm.load(Opnd::mem(64, recv, RUBY_OFFSET_RSTRUCT_AS_HEAP_PTR));
+ Opnd::mem(64, rstruct_ptr, SIZEOF_VALUE_I32 * off)
+ };
+
+ let ret = asm.stack_push(Type::Unknown);
+ asm.mov(ret, val);
+
+ jump_to_next_insn(jit, asm)
+}
+
+fn gen_struct_aset(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ comptime_recv: VALUE,
+ flags: u32,
+ argc: i32,
+) -> Option<CodegenStatus> {
+ if unsafe { vm_ci_argc(ci) } != 1 {
+ return None;
+ }
+
+ // If the comptime receiver is frozen, writing a struct member will raise an exception
+ // and we don't want to JIT code to deal with that situation.
+ if comptime_recv.is_frozen() {
+ return None;
+ }
+
+ if c_method_tracing_currently_enabled(jit) {
+ // Struct accesses need fire c_call and c_return events, which we can't support
+ // See :attr-tracing:
+ gen_counter_incr(jit, asm, Counter::send_cfunc_tracing);
+ return None;
+ }
+
+ // This is a .send call and we need to adjust the stack
+ if flags & VM_CALL_OPT_SEND != 0 {
+ handle_opt_send_shift_stack(asm, argc);
+ }
+
+ let off: i32 = unsafe { get_cme_def_body_optimized_index(cme) }
+ .try_into()
+ .unwrap();
+
+ // Confidence checks
+ assert!(unsafe { RB_TYPE_P(comptime_recv, RUBY_T_STRUCT) });
+ assert!((off as i64) < unsafe { RSTRUCT_LEN(comptime_recv) });
+
+ // Even if the comptime recv was not frozen, future recv may be. So we need to emit a guard
+ // that the recv is not frozen.
+ // We know all structs are heap objects, so we can check the flag directly.
+ let recv = asm.stack_opnd(1);
+ let recv = asm.load(recv);
+ let flags = asm.load(Opnd::mem(VALUE_BITS, recv, RUBY_OFFSET_RBASIC_FLAGS));
+ asm.test(flags, (RUBY_FL_FREEZE as u64).into());
+ asm.jnz(Target::side_exit(Counter::opt_aset_frozen));
+
+ // Not frozen, so we can proceed.
+
+ asm_comment!(asm, "struct aset");
+
+ let val = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
+
+ let val = asm.ccall(RSTRUCT_SET as *const u8, vec![recv, (off as i64).into(), val]);
+
+ let ret = asm.stack_push(Type::Unknown);
+ asm.mov(ret, val);
+
+ jump_to_next_insn(jit, asm)
+}
+
+// Generate code that calls a method with dynamic dispatch
+fn gen_send_dynamic<F: Fn(&mut Assembler) -> Opnd>(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cd: *const rb_call_data,
+ sp_pops: usize,
+ vm_sendish: F,
+) -> Option<CodegenStatus> {
+ // Our frame handling is not compatible with tailcall
+ if unsafe { vm_ci_flag((*cd).ci) } & VM_CALL_TAILCALL != 0 {
+ return None;
+ }
+ jit_perf_symbol_push!(jit, asm, "gen_send_dynamic", PerfMap::Codegen);
+
+ // Rewind stack_size using ctx.with_stack_size to allow stack_size changes
+ // before you return None.
+ asm.ctx = asm.ctx.with_stack_size(jit.stack_size_for_pc);
+
+ // Save PC and SP to prepare for dynamic dispatch
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Dispatch a method
+ let ret = vm_sendish(asm);
+
+ // Pop arguments and a receiver
+ asm.stack_pop(sp_pops);
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, ret);
+
+ // Fix the interpreter SP deviated by vm_sendish
+ asm.mov(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP), SP);
+
+ gen_counter_incr(jit, asm, Counter::num_send_dynamic);
+
+ jit_perf_symbol_pop!(jit, asm, PerfMap::Codegen);
+
+ // End the current block for invalidationg and sharing the same successor
+ jump_to_next_insn(jit, asm)
+}
+
+fn gen_send_general(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cd: *const rb_call_data,
+ block: Option<BlockHandler>,
+) -> Option<CodegenStatus> {
+ // Relevant definitions:
+ // rb_execution_context_t : vm_core.h
+ // invoker, cfunc logic : method.h, vm_method.c
+ // rb_callinfo : vm_callinfo.h
+ // rb_callable_method_entry_t : method.h
+ // vm_call_cfunc_with_frame : vm_insnhelper.c
+ //
+ // For a general overview for how the interpreter calls methods,
+ // see vm_call_method().
+
+ let ci = unsafe { get_call_data_ci(cd) }; // info about the call site
+ let mut argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap();
+ let mut mid = unsafe { vm_ci_mid(ci) };
+ let mut flags = unsafe { vm_ci_flag(ci) };
+
+ // Defer compilation so we can specialize on class of receiver
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let ci_flags = unsafe { vm_ci_flag(ci) };
+
+ // Dynamic stack layout. No good way to support without inlining.
+ if ci_flags & VM_CALL_FORWARDING != 0 {
+ gen_counter_incr(jit, asm, Counter::send_forwarding);
+ return None;
+ }
+
+ let recv_idx = argc + if flags & VM_CALL_ARGS_BLOCKARG != 0 { 1 } else { 0 };
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, recv_idx as isize);
+ let comptime_recv_klass = comptime_recv.class_of();
+ assert_eq!(RUBY_T_CLASS, comptime_recv_klass.builtin_type(),
+ "objects visible to ruby code should have a T_CLASS in their klass field");
+
+ // Don't compile calls through singleton classes to avoid retaining the receiver.
+ // Make an exception for class methods since classes tend to be retained anyways.
+ // Also compile calls on top_self to help tests.
+ if VALUE(0) != unsafe { FL_TEST(comptime_recv_klass, VALUE(RUBY_FL_SINGLETON as usize)) }
+ && comptime_recv != unsafe { rb_vm_top_self() }
+ && !unsafe { RB_TYPE_P(comptime_recv, RUBY_T_CLASS) }
+ && !unsafe { RB_TYPE_P(comptime_recv, RUBY_T_MODULE) } {
+ gen_counter_incr(jit, asm, Counter::send_singleton_class);
+ return None;
+ }
+
+ // Points to the receiver operand on the stack
+ let recv = asm.stack_opnd(recv_idx);
+ let recv_opnd: YARVOpnd = recv.into();
+
+ // Log the name of the method we're calling to
+ asm_comment!(asm, "call to {}", get_method_name(Some(comptime_recv_klass), mid));
+
+ // Gather some statistics about sends
+ gen_counter_incr(jit, asm, Counter::num_send);
+ if let Some(_known_klass) = asm.ctx.get_opnd_type(recv_opnd).known_class() {
+ gen_counter_incr(jit, asm, Counter::num_send_known_class);
+ }
+ if asm.ctx.get_chain_depth() > 1 {
+ gen_counter_incr(jit, asm, Counter::num_send_polymorphic);
+ }
+ // If megamorphic, let the caller fallback to dynamic dispatch
+ if asm.ctx.get_chain_depth() >= SEND_MAX_DEPTH {
+ gen_counter_incr(jit, asm, Counter::send_megamorphic);
+ return None;
+ }
+
+ perf_call!("gen_send_general: ", jit_guard_known_klass(
+ jit,
+ asm,
+ recv,
+ recv_opnd,
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_klass_megamorphic,
+ ));
+
+ // Do method lookup
+ let mut cme = unsafe { rb_callable_method_entry(comptime_recv_klass, mid) };
+ if cme.is_null() {
+ gen_counter_incr(jit, asm, Counter::send_cme_not_found);
+ return None;
+ }
+
+ // Load an overloaded cme if applicable. See vm_search_cc().
+ // It allows you to use a faster ISEQ if possible.
+ cme = unsafe { rb_check_overloaded_cme(cme, ci) };
+
+ let visi = unsafe { METHOD_ENTRY_VISI(cme) };
+ match visi {
+ METHOD_VISI_PUBLIC => {
+ // Can always call public methods
+ }
+ METHOD_VISI_PRIVATE => {
+ if flags & VM_CALL_FCALL == 0 {
+ // Can only call private methods with FCALL callsites.
+ // (at the moment they are callsites without a receiver or an explicit `self` receiver)
+ gen_counter_incr(jit, asm, Counter::send_private_not_fcall);
+ return None;
+ }
+ }
+ METHOD_VISI_PROTECTED => {
+ // If the method call is an FCALL, it is always valid
+ if flags & VM_CALL_FCALL == 0 {
+ // otherwise we need an ancestry check to ensure the receiver is valid to be called
+ // as protected
+ jit_protected_callee_ancestry_guard(asm, cme);
+ }
+ }
+ _ => {
+ panic!("cmes should always have a visibility!");
+ }
+ }
+
+ // Register block for invalidation
+ //assert!(cme->called_id == mid);
+ jit.assume_method_lookup_stable(asm, cme);
+
+ // To handle the aliased method case (VM_METHOD_TYPE_ALIAS)
+ loop {
+ let def_type = unsafe { get_cme_def_type(cme) };
+
+ match def_type {
+ VM_METHOD_TYPE_ISEQ => {
+ let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
+ let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
+ return perf_call! { gen_send_iseq(jit, asm, iseq, ci, frame_type, None, cme, block, flags, argc, None) };
+ }
+ VM_METHOD_TYPE_CFUNC => {
+ return perf_call! { gen_send_cfunc(
+ jit,
+ asm,
+ ci,
+ cme,
+ block,
+ Some(comptime_recv_klass),
+ flags,
+ argc,
+ ) };
+ }
+ VM_METHOD_TYPE_IVAR => {
+ // This is a .send call not supported right now for attr_reader
+ if flags & VM_CALL_OPT_SEND != 0 {
+ gen_counter_incr(jit, asm, Counter::send_send_attr_reader);
+ return None;
+ }
+
+ if flags & VM_CALL_ARGS_BLOCKARG != 0 {
+ match asm.ctx.get_opnd_type(StackOpnd(0)) {
+ Type::Nil | Type::BlockParamProxy => {
+ // Getters ignore the block arg, and these types of block args can be
+ // passed without side-effect (never any `to_proc` call).
+ asm.stack_pop(1);
+ }
+ _ => {
+ gen_counter_incr(jit, asm, Counter::send_getter_block_arg);
+ return None;
+ }
+ }
+ }
+
+ if argc != 0 {
+ // Guard for simple splat of empty array
+ if VM_CALL_ARGS_SPLAT == flags & (VM_CALL_ARGS_SPLAT | VM_CALL_KWARG | VM_CALL_KW_SPLAT)
+ && argc == 1 {
+ // Not using chain guards since on failure these likely end up just raising
+ // ArgumentError
+ let splat = asm.stack_opnd(0);
+ guard_object_is_array(asm, splat, splat.into(), Counter::guard_send_getter_splat_non_empty);
+ let splat_len = get_array_len(asm, splat);
+ asm.cmp(splat_len, 0.into());
+ asm.jne(Target::side_exit(Counter::guard_send_getter_splat_non_empty));
+ asm.stack_pop(1);
+ } else {
+ // Argument count mismatch. Getters take no arguments.
+ gen_counter_incr(jit, asm, Counter::send_getter_arity);
+ return None;
+ }
+ }
+
+ if c_method_tracing_currently_enabled(jit) {
+ // Can't generate code for firing c_call and c_return events
+ // :attr-tracing:
+ // Handling the C method tracing events for attr_accessor
+ // methods is easier than regular C methods as we know the
+ // "method" we are calling into never enables those tracing
+ // events. We are never inside the code that needs to be
+ // invalidated when invalidation happens.
+ gen_counter_incr(jit, asm, Counter::send_cfunc_tracing);
+ return None;
+ }
+
+ let recv = asm.stack_opnd(0); // the receiver should now be the stack top
+ let ivar_name = unsafe { get_cme_def_body_attr_id(cme) };
+
+ return gen_get_ivar(
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ comptime_recv,
+ ivar_name,
+ recv,
+ recv.into(),
+ );
+ }
+ VM_METHOD_TYPE_ATTRSET => {
+ // This is a .send call not supported right now for attr_writer
+ if flags & VM_CALL_OPT_SEND != 0 {
+ gen_counter_incr(jit, asm, Counter::send_send_attr_writer);
+ return None;
+ }
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_args_splat_attrset);
+ return None;
+ }
+ if flags & VM_CALL_KWARG != 0 {
+ gen_counter_incr(jit, asm, Counter::send_attrset_kwargs);
+ return None;
+ } else if argc != 1 || unsafe { !RB_TYPE_P(comptime_recv, RUBY_T_OBJECT) } {
+ gen_counter_incr(jit, asm, Counter::send_ivar_set_method);
+ return None;
+ } else if c_method_tracing_currently_enabled(jit) {
+ // Can't generate code for firing c_call and c_return events
+ // See :attr-tracing:
+ gen_counter_incr(jit, asm, Counter::send_cfunc_tracing);
+ return None;
+ } else if flags & VM_CALL_ARGS_BLOCKARG != 0 {
+ gen_counter_incr(jit, asm, Counter::send_attrset_block_arg);
+ return None;
+ } else {
+ let ivar_name = unsafe { get_cme_def_body_attr_id(cme) };
+ return gen_set_ivar(jit, asm, comptime_recv, ivar_name, StackOpnd(1), None);
+ }
+ }
+ // Block method, e.g. define_method(:foo) { :my_block }
+ VM_METHOD_TYPE_BMETHOD => {
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_args_splat_bmethod);
+ return None;
+ }
+ return gen_send_bmethod(jit, asm, ci, cme, block, flags, argc);
+ }
+ VM_METHOD_TYPE_ALIAS => {
+ // Retrieve the aliased method and re-enter the switch
+ cme = unsafe { rb_aliased_callable_method_entry(cme) };
+ continue;
+ }
+ // Send family of methods, e.g. call/apply
+ VM_METHOD_TYPE_OPTIMIZED => {
+ if flags & VM_CALL_ARGS_BLOCKARG != 0 {
+ gen_counter_incr(jit, asm, Counter::send_optimized_block_arg);
+ return None;
+ }
+
+ let opt_type = unsafe { get_cme_def_body_optimized_type(cme) };
+ match opt_type {
+ OPTIMIZED_METHOD_TYPE_SEND => {
+ // This is for method calls like `foo.send(:bar)`
+ // The `send` method does not get its own stack frame.
+ // instead we look up the method and call it,
+ // doing some stack shifting based on the VM_CALL_OPT_SEND flag
+
+ // Reject nested cases such as `send(:send, :alias_for_send, :foo))`.
+ // We would need to do some stack manipulation here or keep track of how
+ // many levels deep we need to stack manipulate. Because of how exits
+ // currently work, we can't do stack manipulation until we will no longer
+ // side exit.
+ if flags & VM_CALL_OPT_SEND != 0 {
+ gen_counter_incr(jit, asm, Counter::send_send_nested);
+ return None;
+ }
+
+ if argc == 0 {
+ gen_counter_incr(jit, asm, Counter::send_send_wrong_args);
+ return None;
+ }
+
+ argc -= 1;
+
+ let compile_time_name = jit.peek_at_stack(&asm.ctx, argc as isize);
+
+ mid = unsafe { rb_get_symbol_id(compile_time_name) };
+ if mid == 0 {
+ // This also rejects method names that need conversion
+ gen_counter_incr(jit, asm, Counter::send_send_null_mid);
+ return None;
+ }
+
+ cme = unsafe { rb_callable_method_entry(comptime_recv_klass, mid) };
+ if cme.is_null() {
+ gen_counter_incr(jit, asm, Counter::send_send_null_cme);
+ return None;
+ }
+
+ flags |= VM_CALL_FCALL | VM_CALL_OPT_SEND;
+
+ jit.assume_method_lookup_stable(asm, cme);
+
+ asm_comment!(
+ asm,
+ "guard sending method name \'{}\'",
+ unsafe { cstr_to_rust_string(rb_id2name(mid)) }.unwrap_or_else(|| "<unknown>".to_owned()),
+ );
+
+ let name_opnd = asm.stack_opnd(argc);
+ let symbol_id_opnd = asm.ccall(rb_get_symbol_id as *const u8, vec![name_opnd]);
+
+ asm.cmp(symbol_id_opnd, mid.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_send_send_name_chain,
+ );
+
+ // We have changed the argc, flags, mid, and cme, so we need to re-enter the match
+ // and compile whatever method we found from send.
+ continue;
+
+ }
+ OPTIMIZED_METHOD_TYPE_CALL => {
+ if block.is_some() {
+ gen_counter_incr(jit, asm, Counter::send_call_block);
+ return None;
+ }
+
+ if flags & VM_CALL_KWARG != 0 {
+ gen_counter_incr(jit, asm, Counter::send_call_kwarg);
+ return None;
+ }
+
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_args_splat_opt_call);
+ return None;
+ }
+
+ // If this is a .send call we need to adjust the stack
+ if flags & VM_CALL_OPT_SEND != 0 {
+ handle_opt_send_shift_stack(asm, argc);
+ }
+
+ // About to reset the SP, need to load this here
+ let recv_load = asm.load(recv);
+
+ let sp = asm.lea(asm.ctx.sp_opnd(0));
+
+ // Save the PC and SP because the callee can make Ruby calls
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let kw_splat = flags & VM_CALL_KW_SPLAT;
+ let stack_argument_pointer = asm.lea(Opnd::mem(64, sp, -(argc) * SIZEOF_VALUE_I32));
+
+ let ret = asm.ccall(rb_optimized_call as *const u8, vec![
+ recv_load,
+ EC,
+ argc.into(),
+ stack_argument_pointer,
+ kw_splat.into(),
+ VM_BLOCK_HANDLER_NONE.into(),
+ ]);
+
+ asm.stack_pop(argc as usize + 1);
+
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, ret);
+
+ // End the block to allow invalidating the next instruction
+ return jump_to_next_insn(jit, asm);
+ }
+ OPTIMIZED_METHOD_TYPE_BLOCK_CALL => {
+ gen_counter_incr(jit, asm, Counter::send_optimized_method_block_call);
+ return None;
+ }
+ OPTIMIZED_METHOD_TYPE_STRUCT_AREF => {
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_args_splat_aref);
+ return None;
+ }
+ return gen_struct_aref(
+ jit,
+ asm,
+ ci,
+ cme,
+ comptime_recv,
+ flags,
+ argc,
+ );
+ }
+ OPTIMIZED_METHOD_TYPE_STRUCT_ASET => {
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::send_args_splat_aset);
+ return None;
+ }
+ return gen_struct_aset(
+ jit,
+ asm,
+ ci,
+ cme,
+ comptime_recv,
+ flags,
+ argc,
+ );
+ }
+ _ => {
+ panic!("unknown optimized method type!")
+ }
+ }
+ }
+ VM_METHOD_TYPE_ZSUPER => {
+ gen_counter_incr(jit, asm, Counter::send_zsuper_method);
+ return None;
+ }
+ VM_METHOD_TYPE_UNDEF => {
+ gen_counter_incr(jit, asm, Counter::send_undef_method);
+ return None;
+ }
+ VM_METHOD_TYPE_NOTIMPLEMENTED => {
+ gen_counter_incr(jit, asm, Counter::send_not_implemented_method);
+ return None;
+ }
+ VM_METHOD_TYPE_MISSING => {
+ gen_counter_incr(jit, asm, Counter::send_missing_method);
+ return None;
+ }
+ VM_METHOD_TYPE_REFINED => {
+ gen_counter_incr(jit, asm, Counter::send_refined_method);
+ return None;
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
+
+/// Get class name from a class pointer.
+fn get_class_name(class: Option<VALUE>) -> String {
+ class.filter(|&class| {
+ // type checks for rb_class2name()
+ unsafe { RB_TYPE_P(class, RUBY_T_MODULE) || RB_TYPE_P(class, RUBY_T_CLASS) }
+ }).and_then(|class| unsafe {
+ cstr_to_rust_string(rb_class2name(class))
+ }).unwrap_or_else(|| "Unknown".to_string())
+}
+
+/// Assemble "{class_name}#{method_name}" from a class pointer and a method ID
+fn get_method_name(class: Option<VALUE>, mid: u64) -> String {
+ let class_name = get_class_name(class);
+ let method_name = if mid != 0 {
+ unsafe { cstr_to_rust_string(rb_id2name(mid)) }
+ } else {
+ None
+ }.unwrap_or_else(|| "Unknown".to_string());
+ format!("{}#{}", class_name, method_name)
+}
+
+/// Assemble "{label}@{iseq_path}:{lineno}" (iseq_inspect() format) from an ISEQ
+fn get_iseq_name(iseq: IseqPtr) -> String {
+ let c_string = unsafe { rb_yjit_iseq_inspect(iseq) };
+ let string = unsafe { CStr::from_ptr(c_string) }.to_str()
+ .unwrap_or_else(|_| "not UTF-8").to_string();
+ unsafe { ruby_xfree(c_string as *mut c_void); }
+ string
+}
+
+/// Shifts the stack for send in order to remove the name of the method
+/// Comment below borrow from vm_call_opt_send in vm_insnhelper.c
+/// E.g. when argc == 2
+/// | | | | TOPN
+/// +------+ | |
+/// | arg1 | ---+ | | 0
+/// +------+ | +------+
+/// | arg0 | -+ +-> | arg1 | 1
+/// +------+ | +------+
+/// | sym | +---> | arg0 | 2
+/// +------+ +------+
+/// | recv | | recv | 3
+///--+------+--------+------+------
+///
+/// We do this for our compiletime context and the actual stack
+fn handle_opt_send_shift_stack(asm: &mut Assembler, argc: i32) {
+ asm_comment!(asm, "shift_stack");
+ for j in (0..argc).rev() {
+ let opnd = asm.stack_opnd(j);
+ let opnd2 = asm.stack_opnd(j + 1);
+ asm.mov(opnd2, opnd);
+ }
+ asm.shift_stack(argc as usize);
+}
+
+fn gen_opt_send_without_block(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = perf_call! { gen_send_general(jit, asm, cd, None) } {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_opt_send_without_block(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_opt_send_without_block as *const u8,
+ vec![EC, CFP, (cd as usize).into()],
+ )
+ })
+}
+
+fn gen_send(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ let block = jit.get_arg(1).as_optional_ptr().map(|iseq| BlockHandler::BlockISeq(iseq));
+ if let Some(status) = perf_call! { gen_send_general(jit, asm, cd, block) } {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ let blockiseq = jit.get_arg(1).as_iseq();
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_send(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_send as *const u8,
+ vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
+ )
+ })
+}
+
+fn gen_sendforward(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ let block = jit.get_arg(1).as_optional_ptr().map(|iseq| BlockHandler::BlockISeq(iseq));
+ if let Some(status) = perf_call! { gen_send_general(jit, asm, cd, block) } {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of sendforward
+ let blockiseq = jit.get_arg(1).as_iseq();
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_sendforward(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_sendforward as *const u8,
+ vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
+ )
+ })
+}
+
+fn gen_invokeblock(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = gen_invokeblock_specialized(jit, asm, cd) {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_invokeblock_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_invokeblock(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_invokeblock as *const u8,
+ vec![EC, CFP, (cd as usize).into()],
+ )
+ })
+}
+
+fn gen_invokeblock_specialized(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cd: *const rb_call_data,
+) -> Option<CodegenStatus> {
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ // Fallback to dynamic dispatch if this callsite is megamorphic
+ if asm.ctx.get_chain_depth() >= SEND_MAX_DEPTH {
+ gen_counter_incr(jit, asm, Counter::invokeblock_megamorphic);
+ return None;
+ }
+
+ // Get call info
+ let ci = unsafe { get_call_data_ci(cd) };
+ let argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap();
+ let flags = unsafe { vm_ci_flag(ci) };
+
+ // Get block_handler
+ let cfp = jit.get_cfp();
+ let lep = unsafe { rb_vm_ep_local_ep(get_cfp_ep(cfp)) };
+ let comptime_handler = unsafe { *lep.offset(VM_ENV_DATA_INDEX_SPECVAL.try_into().unwrap()) };
+
+ // Handle each block_handler type
+ if comptime_handler.0 == VM_BLOCK_HANDLER_NONE as usize { // no block given
+ gen_counter_incr(jit, asm, Counter::invokeblock_none);
+ None
+ } else if comptime_handler.0 & 0x3 == 0x1 { // VM_BH_ISEQ_BLOCK_P
+ asm_comment!(asm, "get local EP");
+ let ep_opnd = gen_get_lep(jit, asm);
+ let block_handler_opnd = asm.load(
+ Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)
+ );
+
+ asm_comment!(asm, "guard block_handler type");
+ let tag_opnd = asm.and(block_handler_opnd, 0x3.into()); // block_handler is a tagged pointer
+ asm.cmp(tag_opnd, 0x1.into()); // VM_BH_ISEQ_BLOCK_P
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_invokeblock_tag_changed,
+ );
+
+ // If the current ISEQ is annotated to be inlined but it's not being inlined here,
+ // generate a dynamic dispatch to avoid making this yield megamorphic.
+ if unsafe { rb_jit_iseq_builtin_attrs(jit.iseq) } & BUILTIN_ATTR_INLINE_BLOCK != 0 && !asm.ctx.inline() {
+ gen_counter_incr(jit, asm, Counter::invokeblock_iseq_not_inlined);
+ return None;
+ }
+
+ let comptime_captured = unsafe { ((comptime_handler.0 & !0x3) as *const rb_captured_block).as_ref().unwrap() };
+ let comptime_iseq = unsafe { *comptime_captured.code.iseq.as_ref() };
+
+ asm_comment!(asm, "guard known ISEQ");
+ let captured_opnd = asm.and(block_handler_opnd, Opnd::Imm(!0x3));
+ let iseq_opnd = asm.load(Opnd::mem(64, captured_opnd, SIZEOF_VALUE_I32 * 2));
+ asm.cmp(iseq_opnd, VALUE::from(comptime_iseq).into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_invokeblock_iseq_block_changed,
+ );
+
+ perf_call! { gen_send_iseq(jit, asm, comptime_iseq, ci, VM_FRAME_MAGIC_BLOCK, None, 0 as _, None, flags, argc, Some(captured_opnd)) }
+ } else if comptime_handler.0 & 0x3 == 0x3 { // VM_BH_IFUNC_P
+ // We aren't handling CALLER_SETUP_ARG and CALLER_REMOVE_EMPTY_KW_SPLAT yet.
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::invokeblock_ifunc_args_splat);
+ return None;
+ }
+ if flags & VM_CALL_KW_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::invokeblock_ifunc_kw_splat);
+ return None;
+ }
+
+ asm_comment!(asm, "get local EP");
+ let ep_opnd = gen_get_lep(jit, asm);
+ let block_handler_opnd = asm.load(
+ Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)
+ );
+
+ asm_comment!(asm, "guard block_handler type");
+ let tag_opnd = asm.and(block_handler_opnd, 0x3.into()); // block_handler is a tagged pointer
+ asm.cmp(tag_opnd, 0x3.into()); // VM_BH_IFUNC_P
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_invokeblock_tag_changed,
+ );
+
+ // The cfunc may not be leaf
+ jit_prepare_non_leaf_call(jit, asm);
+
+ extern "C" {
+ fn rb_vm_yield_with_cfunc(ec: EcPtr, captured: *const rb_captured_block, argc: c_int, argv: *const VALUE) -> VALUE;
+ }
+ asm_comment!(asm, "call ifunc");
+ let captured_opnd = asm.and(block_handler_opnd, Opnd::Imm(!0x3));
+ let argv = asm.lea(asm.ctx.sp_opnd(-argc));
+ let ret = asm.ccall(
+ rb_vm_yield_with_cfunc as *const u8,
+ vec![EC, captured_opnd, argc.into(), argv],
+ );
+
+ asm.stack_pop(argc.try_into().unwrap());
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, ret);
+
+ // cfunc calls may corrupt types
+ asm.clear_local_types();
+
+ // Share the successor with other chains
+ jump_to_next_insn(jit, asm)
+ } else if comptime_handler.symbol_p() {
+ gen_counter_incr(jit, asm, Counter::invokeblock_symbol);
+ None
+ } else { // Proc
+ gen_counter_incr(jit, asm, Counter::invokeblock_proc);
+ None
+ }
+}
+
+fn gen_invokesuper(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = gen_invokesuper_specialized(jit, asm, cd) {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of invokesuper
+ let blockiseq = jit.get_arg(1).as_iseq();
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_invokesuper(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_invokesuper as *const u8,
+ vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
+ )
+ })
+}
+
+fn gen_invokesuperforward(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Generate specialized code if possible
+ let cd = jit.get_arg(0).as_ptr();
+ if let Some(status) = gen_invokesuper_specialized(jit, asm, cd) {
+ return Some(status);
+ }
+
+ // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of invokesuperforward
+ let blockiseq = jit.get_arg(1).as_iseq();
+ gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
+ extern "C" {
+ fn rb_vm_invokesuperforward(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
+ }
+ asm.ccall(
+ rb_vm_invokesuperforward as *const u8,
+ vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
+ )
+ })
+}
+
+fn gen_invokesuper_specialized(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ cd: *const rb_call_data,
+) -> Option<CodegenStatus> {
+ // Defer compilation so we can specialize on class of receiver
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ // Handle the last two branches of vm_caller_setup_arg_block
+ let block = if let Some(iseq) = jit.get_arg(1).as_optional_ptr() {
+ BlockHandler::BlockISeq(iseq)
+ } else {
+ BlockHandler::LEPSpecVal
+ };
+
+ // Fallback to dynamic dispatch if this callsite is megamorphic
+ if asm.ctx.get_chain_depth() >= SEND_MAX_DEPTH {
+ gen_counter_incr(jit, asm, Counter::invokesuper_megamorphic);
+ return None;
+ }
+
+ let me = unsafe { rb_vm_frame_method_entry(jit.get_cfp()) };
+ if me.is_null() {
+ gen_counter_incr(jit, asm, Counter::invokesuper_no_me);
+ return None;
+ }
+
+ // FIXME: We should track and invalidate this block when this cme is invalidated
+ let current_defined_class = unsafe { (*me).defined_class };
+ let mid = unsafe { get_def_original_id((*me).def) };
+
+ // vm_search_normal_superclass
+ let rbasic_ptr: *const RBasic = current_defined_class.as_ptr();
+ if current_defined_class.builtin_type() == RUBY_T_ICLASS
+ && unsafe { RB_TYPE_P((*rbasic_ptr).klass, RUBY_T_MODULE) && FL_TEST_RAW((*rbasic_ptr).klass, VALUE(RMODULE_IS_REFINEMENT.as_usize())) != VALUE(0) }
+ {
+ gen_counter_incr(jit, asm, Counter::invokesuper_refinement);
+ return None;
+ }
+ let comptime_superclass =
+ unsafe { rb_class_get_superclass(RCLASS_ORIGIN(current_defined_class)) };
+
+ let ci = unsafe { get_call_data_ci(cd) };
+ let argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap();
+
+ let ci_flags = unsafe { vm_ci_flag(ci) };
+
+ // Don't JIT calls that aren't simple
+ // Note, not using VM_CALL_ARGS_SIMPLE because sometimes we pass a block.
+
+ if ci_flags & VM_CALL_KWARG != 0 {
+ gen_counter_incr(jit, asm, Counter::invokesuper_kwarg);
+ return None;
+ }
+ if ci_flags & VM_CALL_KW_SPLAT != 0 {
+ gen_counter_incr(jit, asm, Counter::invokesuper_kw_splat);
+ return None;
+ }
+ if ci_flags & VM_CALL_FORWARDING != 0 {
+ gen_counter_incr(jit, asm, Counter::invokesuper_forwarding);
+ return None;
+ }
+
+ // Ensure we haven't rebound this method onto an incompatible class.
+ // In the interpreter we try to avoid making this check by performing some
+ // cheaper calculations first, but since we specialize on the method entry
+ // and so only have to do this once at compile time this is fine to always
+ // check and side exit.
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, argc as isize);
+ if unsafe { rb_obj_is_kind_of(comptime_recv, current_defined_class) } == VALUE(0) {
+ gen_counter_incr(jit, asm, Counter::invokesuper_defined_class_mismatch);
+ return None;
+ }
+
+ // Don't compile `super` on objects with singleton class to avoid retaining the receiver.
+ if VALUE(0) != unsafe { FL_TEST(comptime_recv.class_of(), VALUE(RUBY_FL_SINGLETON as usize)) } {
+ gen_counter_incr(jit, asm, Counter::invokesuper_singleton_class);
+ return None;
+ }
+
+ // Do method lookup
+ let cme = unsafe { rb_callable_method_entry(comptime_superclass, mid) };
+ if cme.is_null() {
+ gen_counter_incr(jit, asm, Counter::invokesuper_no_cme);
+ return None;
+ }
+
+ // Check that we'll be able to write this method dispatch before generating checks
+ let cme_def_type = unsafe { get_cme_def_type(cme) };
+ if cme_def_type != VM_METHOD_TYPE_ISEQ && cme_def_type != VM_METHOD_TYPE_CFUNC {
+ // others unimplemented
+ gen_counter_incr(jit, asm, Counter::invokesuper_not_iseq_or_cfunc);
+ return None;
+ }
+
+ asm_comment!(asm, "guard known me");
+ let lep_opnd = gen_get_lep(jit, asm);
+ let ep_me_opnd = Opnd::mem(
+ 64,
+ lep_opnd,
+ SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_ME_CREF,
+ );
+
+ let me_as_value = VALUE(me as usize);
+ asm.cmp(ep_me_opnd, me_as_value.into());
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::guard_invokesuper_me_changed,
+ );
+
+ // We need to assume that both our current method entry and the super
+ // method entry we invoke remain stable
+ jit.assume_method_lookup_stable(asm, me);
+ jit.assume_method_lookup_stable(asm, cme);
+
+ // Method calls may corrupt types
+ asm.clear_local_types();
+
+ match cme_def_type {
+ VM_METHOD_TYPE_ISEQ => {
+ let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
+ let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
+ perf_call! { gen_send_iseq(jit, asm, iseq, ci, frame_type, None, cme, Some(block), ci_flags, argc, None) }
+ }
+ VM_METHOD_TYPE_CFUNC => {
+ perf_call! { gen_send_cfunc(jit, asm, ci, cme, Some(block), None, ci_flags, argc) }
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn gen_leave(
+ _jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Only the return value should be on the stack
+ assert_eq!(1, asm.ctx.get_stack_size(), "leave instruction expects stack size 1, but was: {}", asm.ctx.get_stack_size());
+
+ // Check for interrupts
+ gen_check_ints(asm, Counter::leave_se_interrupt);
+
+ // Pop the current frame (ec->cfp++)
+ // Note: the return PC is already in the previous CFP
+ asm_comment!(asm, "pop stack frame");
+ let incr_cfp = asm.add(CFP, RUBY_SIZEOF_CONTROL_FRAME.into());
+ asm.mov(CFP, incr_cfp);
+ asm.mov(Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP as i32), CFP);
+
+ // Load the return value
+ let retval_opnd = asm.stack_pop(1);
+
+ // Move the return value into the C return register
+ asm.mov(C_RET_OPND, retval_opnd);
+
+ // Jump to the JIT return address on the frame that was just popped.
+ // There are a few possible jump targets:
+ // - gen_leave_exit() and gen_leave_exception(), for C callers
+ // - Return context set up by gen_send_iseq()
+ // We don't write the return value to stack memory like the interpreter here.
+ // Each jump target do it as necessary.
+ let offset_to_jit_return =
+ -(RUBY_SIZEOF_CONTROL_FRAME as i32) + RUBY_OFFSET_CFP_JIT_RETURN;
+ asm.jmp_opnd(Opnd::mem(64, CFP, offset_to_jit_return));
+
+ Some(EndBlock)
+}
+
+fn gen_getglobal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let gid = jit.get_arg(0).as_usize();
+
+ // Save the PC and SP because we might make a Ruby call for warning
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let val_opnd = asm.ccall(
+ rb_gvar_get as *const u8,
+ vec![ gid.into() ]
+ );
+
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_setglobal(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let gid = jit.get_arg(0).as_usize();
+
+ // Save the PC and SP because we might make a Ruby call for
+ // Kernel#set_trace_var
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let val = asm.stack_opnd(0);
+ asm.ccall(
+ rb_gvar_set as *const u8,
+ vec![
+ gid.into(),
+ val,
+ ],
+ );
+ asm.stack_pop(1); // Keep it during ccall for GC
+
+ Some(KeepCompiling)
+}
+
+fn gen_anytostring(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Save the PC and SP since we might call #to_s
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let str = asm.stack_opnd(0);
+ let val = asm.stack_opnd(1);
+
+ let val = asm.ccall(rb_obj_as_string_result as *const u8, vec![str, val]);
+ asm.stack_pop(2); // Keep them during ccall for GC
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::TString);
+ asm.mov(stack_ret, val);
+
+ Some(KeepCompiling)
+}
+
+fn gen_objtostring(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ let recv = asm.stack_opnd(0);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 0);
+
+ if unsafe { RB_TYPE_P(comptime_recv, RUBY_T_STRING) } {
+ jit_guard_known_klass(
+ jit,
+ asm,
+ recv,
+ recv.into(),
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::objtostring_not_string,
+ );
+
+ // No work needed. The string value is already on the top of the stack.
+ Some(KeepCompiling)
+ } else if unsafe { RB_TYPE_P(comptime_recv, RUBY_T_SYMBOL) } && assume_method_basic_definition(jit, asm, comptime_recv.class_of(), ID!(to_s)) {
+ jit_guard_known_klass(
+ jit,
+ asm,
+ recv,
+ recv.into(),
+ comptime_recv,
+ SEND_MAX_DEPTH,
+ Counter::objtostring_not_string,
+ );
+
+ extern "C" {
+ fn rb_sym2str(sym: VALUE) -> VALUE;
+ }
+
+ // Same optimization done in the interpreter: rb_sym_to_s() allocates a mutable string, but since we are only
+ // going to use this string for interpolation, it's fine to use the
+ // frozen string.
+ // rb_sym2str does not allocate.
+ let sym = recv;
+ let str = asm.ccall(rb_sym2str as *const u8, vec![sym]);
+ asm.stack_pop(1);
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::TString);
+ asm.mov(stack_ret, str);
+
+ Some(KeepCompiling)
+ } else {
+ let cd = jit.get_arg(0).as_ptr();
+ perf_call! { gen_send_general(jit, asm, cd, None) }
+ }
+}
+
+fn gen_intern(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // Save the PC and SP because we might allocate
+ jit_prepare_call_with_gc(jit, asm);
+
+ let str = asm.stack_opnd(0);
+ let sym = asm.ccall(rb_str_intern as *const u8, vec![str]);
+ asm.stack_pop(1); // Keep it during ccall for GC
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, sym);
+
+ Some(KeepCompiling)
+}
+
+fn gen_toregexp(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let opt = jit.get_arg(0).as_i64();
+ let cnt = jit.get_arg(1).as_usize();
+
+ // Save the PC and SP because this allocates an object and could
+ // raise an exception.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let values_ptr = asm.lea(asm.ctx.sp_opnd(-(cnt as i32)));
+
+ let ary = asm.ccall(
+ rb_ary_tmp_new_from_values as *const u8,
+ vec![
+ Opnd::Imm(0),
+ cnt.into(),
+ values_ptr,
+ ]
+ );
+ asm.stack_pop(cnt); // Let ccall spill them
+
+ // Save the array so we can clear it later
+ asm.cpush(ary);
+ asm.cpush(ary); // Alignment
+
+ let val = asm.ccall(
+ rb_reg_new_ary as *const u8,
+ vec![
+ ary,
+ Opnd::Imm(opt),
+ ]
+ );
+
+ // The actual regex is in RAX now. Pop the temp array from
+ // rb_ary_tmp_new_from_values into C arg regs so we can clear it
+ let ary = asm.cpop(); // Alignment
+ asm.cpop_into(ary);
+
+ // The value we want to push on the stack is in RAX right now
+ let stack_ret = asm.stack_push(Type::UnknownHeap);
+ asm.mov(stack_ret, val);
+
+ // Clear the temp array.
+ asm.ccall(rb_ary_clear as *const u8, vec![ary]);
+
+ Some(KeepCompiling)
+}
+
+fn gen_getspecial(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // This takes two arguments, key and type
+ // key is only used when type == 0
+ // A non-zero type determines which type of backref to fetch
+ //rb_num_t key = jit.jit_get_arg(0);
+ let rtype = jit.get_arg(1).as_u64();
+
+ if rtype == 0 {
+ // not yet implemented
+ return None;
+ } else if rtype & 0x01 != 0 {
+ // Fetch a "special" backref based on a char encoded by shifting by 1
+
+ // Can raise if matchdata uninitialized
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // call rb_backref_get()
+ asm_comment!(asm, "rb_backref_get");
+ let backref = asm.ccall(rb_backref_get as *const u8, vec![]);
+
+ let rt_u8: u8 = (rtype >> 1).try_into().unwrap();
+ let val = match rt_u8.into() {
+ '&' => {
+ asm_comment!(asm, "rb_reg_last_match");
+ asm.ccall(rb_reg_last_match as *const u8, vec![backref])
+ }
+ '`' => {
+ asm_comment!(asm, "rb_reg_match_pre");
+ asm.ccall(rb_reg_match_pre as *const u8, vec![backref])
+ }
+ '\'' => {
+ asm_comment!(asm, "rb_reg_match_post");
+ asm.ccall(rb_reg_match_post as *const u8, vec![backref])
+ }
+ '+' => {
+ asm_comment!(asm, "rb_reg_match_last");
+ asm.ccall(rb_reg_match_last as *const u8, vec![backref])
+ }
+ _ => panic!("invalid back-ref"),
+ };
+
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ Some(KeepCompiling)
+ } else {
+ // Fetch the N-th match from the last backref based on type shifted by 1
+
+ // Can raise if matchdata uninitialized
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // call rb_backref_get()
+ asm_comment!(asm, "rb_backref_get");
+ let backref = asm.ccall(rb_backref_get as *const u8, vec![]);
+
+ // rb_reg_nth_match((int)(type >> 1), backref);
+ asm_comment!(asm, "rb_reg_nth_match");
+ let val = asm.ccall(
+ rb_reg_nth_match as *const u8,
+ vec![
+ Opnd::Imm((rtype >> 1).try_into().unwrap()),
+ backref,
+ ]
+ );
+
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ Some(KeepCompiling)
+ }
+}
+
+fn gen_getclassvariable(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // rb_vm_getclassvariable can raise exceptions.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let val_opnd = asm.ccall(
+ rb_vm_getclassvariable as *const u8,
+ vec![
+ VALUE(jit.iseq as usize).into(),
+ CFP,
+ Opnd::UImm(jit.get_arg(0).as_u64()),
+ Opnd::UImm(jit.get_arg(1).as_u64()),
+ ],
+ );
+
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_setclassvariable(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // rb_vm_setclassvariable can raise exceptions.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let val = asm.stack_opnd(0);
+ asm.ccall(
+ rb_vm_setclassvariable as *const u8,
+ vec![
+ VALUE(jit.iseq as usize).into(),
+ CFP,
+ Opnd::UImm(jit.get_arg(0).as_u64()),
+ val,
+ Opnd::UImm(jit.get_arg(1).as_u64()),
+ ],
+ );
+ asm.stack_pop(1); // Keep it during ccall for GC
+
+ Some(KeepCompiling)
+}
+
+fn gen_getconstant(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+
+ let id = jit.get_arg(0).as_usize();
+
+ // vm_get_ev_const can raise exceptions.
+ jit_prepare_non_leaf_call(jit, asm);
+
+ let allow_nil_opnd = asm.stack_opnd(0);
+ let klass_opnd = asm.stack_opnd(1);
+
+ extern "C" {
+ fn rb_vm_get_ev_const(ec: EcPtr, klass: VALUE, id: ID, allow_nil: VALUE) -> VALUE;
+ }
+
+ let val_opnd = asm.ccall(
+ rb_vm_get_ev_const as *const u8,
+ vec![
+ EC,
+ klass_opnd,
+ id.into(),
+ allow_nil_opnd
+ ],
+ );
+ asm.stack_pop(2); // Keep them during ccall for GC
+
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, val_opnd);
+
+ Some(KeepCompiling)
+}
+
+fn gen_opt_getconstant_path(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let const_cache_as_value = jit.get_arg(0);
+ let ic: *const iseq_inline_constant_cache = const_cache_as_value.as_ptr();
+ let idlist: *const ID = unsafe { (*ic).segments };
+
+ // Make sure there is an exit for this block as the interpreter might want
+ // to invalidate this block from yjit_constant_ic_update().
+ jit_ensure_block_entry_exit(jit, asm)?;
+
+ // See vm_ic_hit_p(). The same conditions are checked in yjit_constant_ic_update().
+ // If a cache is not filled, fallback to the general C call.
+ let ice = unsafe { (*ic).entry };
+ if ice.is_null() {
+ // Prepare for const_missing
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // If this does not trigger const_missing, vm_ic_update will invalidate this block.
+ extern "C" {
+ fn rb_vm_opt_getconstant_path(ec: EcPtr, cfp: CfpPtr, ic: *const u8) -> VALUE;
+ }
+ let val = asm.ccall(
+ rb_vm_opt_getconstant_path as *const u8,
+ vec![EC, CFP, Opnd::const_ptr(ic as *const u8)],
+ );
+
+ let stack_top = asm.stack_push(Type::Unknown);
+ asm.store(stack_top, val);
+
+ return jump_to_next_insn(jit, asm);
+ }
+
+ let cref_sensitive = !unsafe { (*ice).ic_cref }.is_null();
+ let is_shareable = unsafe { rb_yjit_constcache_shareable(ice) };
+ let needs_checks = cref_sensitive || (!is_shareable && !assume_single_ractor_mode(jit, asm));
+
+ if needs_checks {
+ // Cache is keyed on a certain lexical scope. Use the interpreter's cache.
+ let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
+
+ // Call function to verify the cache. It doesn't allocate or call methods.
+ // This includes a check for Ractor safety
+ let ret_val = asm.ccall(
+ rb_vm_ic_hit_p as *const u8,
+ vec![inline_cache, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_EP)]
+ );
+
+ // Check the result. SysV only specifies one byte for _Bool return values,
+ // so it's important we only check one bit to ignore the higher bits in the register.
+ asm.test(ret_val, 1.into());
+ asm.jz(Target::side_exit(Counter::opt_getconstant_path_ic_miss));
+
+ let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
+
+ let ic_entry = asm.load(Opnd::mem(
+ 64,
+ inline_cache,
+ RUBY_OFFSET_IC_ENTRY
+ ));
+
+ let ic_entry_val = asm.load(Opnd::mem(
+ 64,
+ ic_entry,
+ RUBY_OFFSET_ICE_VALUE
+ ));
+
+ // Push ic->entry->value
+ let stack_top = asm.stack_push(Type::Unknown);
+ asm.store(stack_top, ic_entry_val);
+ } else {
+ // Invalidate output code on any constant writes associated with
+ // constants referenced within the current block.
+ jit.assume_stable_constant_names(asm, idlist);
+
+ jit_putobject(asm, unsafe { (*ice).value });
+ }
+
+ jump_to_next_insn(jit, asm)
+}
+
+// Push the explicit block parameter onto the temporary stack. Part of the
+// interpreter's scheme for avoiding Proc allocations when delegating
+// explicit block parameters.
+fn gen_getblockparamproxy(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ if !jit.at_compile_target() {
+ return jit.defer_compilation(asm);
+ }
+
+ // EP level
+ let level = jit.get_arg(1).as_u32();
+
+ // Peek at the block handler so we can check whether it's nil
+ let comptime_handler = jit.peek_at_block_handler(level);
+
+ // Filter for the 4 cases we currently handle
+ if !(comptime_handler.as_u64() == 0 || // no block given
+ comptime_handler.as_u64() & 0x3 == 0x1 || // iseq block (no associated GC managed object)
+ comptime_handler.as_u64() & 0x3 == 0x3 || // ifunc block (no associated GC managed object)
+ unsafe { rb_obj_is_proc(comptime_handler) }.test() // block is a Proc
+ ) {
+ // Missing the symbol case, where we basically need to call Symbol#to_proc at runtime
+ gen_counter_incr(jit, asm, Counter::gbpp_unsupported_type);
+ return None;
+ }
+
+ // Load environment pointer EP from CFP
+ let ep_opnd = gen_get_ep(asm, level);
+
+ // Bail when VM_ENV_FLAGS(ep, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM) is non zero
+ let flag_check = Opnd::mem(
+ 64,
+ ep_opnd,
+ SIZEOF_VALUE_I32 * (VM_ENV_DATA_INDEX_FLAGS as i32),
+ );
+ asm.test(flag_check, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM.into());
+ asm.jnz(Target::side_exit(Counter::gbpp_block_param_modified));
+
+ // Load the block handler for the current frame
+ // note, VM_ASSERT(VM_ENV_LOCAL_P(ep))
+ let block_handler = asm.load(
+ Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)
+ );
+
+ // Use block handler sample to guide specialization...
+ // NOTE: we use jit_chain_guard() in this decision tree, and since
+ // there are only a few cases, it should never reach the depth limit use
+ // the exit counter we pass to it.
+ //
+ // No block given
+ if comptime_handler.as_u64() == 0 {
+ // Bail if there is a block handler
+ asm.cmp(block_handler, Opnd::UImm(0));
+
+ jit_chain_guard(
+ JCC_JNZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::gbpp_block_handler_not_none,
+ );
+
+ jit_putobject(asm, Qnil);
+ } else if comptime_handler.as_u64() & 0x1 == 0x1 {
+ // This handles two cases which are nearly identical
+ // Block handler is a tagged pointer. Look at the tag.
+ // VM_BH_ISEQ_BLOCK_P(): block_handler & 0x03 == 0x01
+ // VM_BH_IFUNC_P(): block_handler & 0x03 == 0x03
+ // So to check for either of those cases we can use: val & 0x1 == 0x1
+ const _: () = assert!(RUBY_SYMBOL_FLAG & 1 == 0, "guard below rejects symbol block handlers");
+ // Procs are aligned heap pointers so testing the bit rejects them too.
+
+ asm.test(block_handler, 0x1.into());
+ jit_chain_guard(
+ JCC_JZ,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::gbpp_block_handler_not_iseq,
+ );
+
+ // Push rb_block_param_proxy. It's a root, so no need to use jit_mov_gc_ptr.
+ assert!(!unsafe { rb_block_param_proxy }.special_const_p());
+
+ let top = asm.stack_push(Type::BlockParamProxy);
+ asm.mov(top, Opnd::const_ptr(unsafe { rb_block_param_proxy }.as_ptr()));
+ } else if unsafe { rb_obj_is_proc(comptime_handler) }.test() {
+ // The block parameter is a Proc
+ c_callable! {
+ // We can't hold values across C calls due to a backend limitation,
+ // so we'll use this thin wrapper around rb_obj_is_proc().
+ fn is_proc(object: VALUE) -> VALUE {
+ if unsafe { rb_obj_is_proc(object) }.test() {
+ // VM_BH_TO_PROC() is the identify function.
+ object
+ } else {
+ Qfalse
+ }
+ }
+ }
+
+ // Simple predicate, no need to jit_prepare_non_leaf_call()
+ let proc_or_false = asm.ccall(is_proc as _, vec![block_handler]);
+
+ // Guard for proc
+ asm.cmp(proc_or_false, Qfalse.into());
+ jit_chain_guard(
+ JCC_JE,
+ jit,
+ asm,
+ SEND_MAX_DEPTH,
+ Counter::gbpp_block_handler_not_proc,
+ );
+
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, proc_or_false);
+ } else {
+ unreachable!("absurd given initial filtering");
+ }
+
+ jump_to_next_insn(jit, asm)
+}
+
+fn gen_getblockparam(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ // EP level
+ let level = jit.get_arg(1).as_u32();
+
+ // Save the PC and SP because we might allocate
+ jit_prepare_call_with_gc(jit, asm);
+ asm.spill_regs(); // For ccall. Unconditionally spill them for RegMappings consistency.
+
+ // A mirror of the interpreter code. Checking for the case
+ // where it's pushing rb_block_param_proxy.
+
+ // Load environment pointer EP from CFP
+ let ep_opnd = gen_get_ep(asm, level);
+
+ // Bail when VM_ENV_FLAGS(ep, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM) is non zero
+ let flag_check = Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * (VM_ENV_DATA_INDEX_FLAGS as i32));
+ // FIXME: This is testing bits in the same place that the WB check is testing.
+ // We should combine these at some point
+ asm.test(flag_check, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM.into());
+
+ // If the frame flag has been modified, then the actual proc value is
+ // already in the EP and we should just use the value.
+ let frame_flag_modified = asm.new_label("frame_flag_modified");
+ asm.jnz(frame_flag_modified);
+
+ // This instruction writes the block handler to the EP. If we need to
+ // fire a write barrier for the write, then exit (we'll let the
+ // interpreter handle it so it can fire the write barrier).
+ // flags & VM_ENV_FLAG_WB_REQUIRED
+ let flags_opnd = Opnd::mem(
+ 64,
+ ep_opnd,
+ SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_FLAGS as i32,
+ );
+ asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
+
+ // if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
+ asm.jnz(Target::side_exit(Counter::gbp_wb_required));
+
+ // Convert the block handler in to a proc
+ // call rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
+ let proc = asm.ccall(
+ rb_vm_bh_to_procval as *const u8,
+ vec![
+ EC,
+ // The block handler for the current frame
+ // note, VM_ASSERT(VM_ENV_LOCAL_P(ep))
+ Opnd::mem(
+ 64,
+ ep_opnd,
+ SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL,
+ ),
+ ]
+ );
+
+ // Load environment pointer EP from CFP (again)
+ let ep_opnd = gen_get_ep(asm, level);
+
+ // Write the value at the environment pointer
+ let idx = jit.get_arg(0).as_i32();
+ let offs = -(SIZEOF_VALUE_I32 * idx);
+ asm.mov(Opnd::mem(64, ep_opnd, offs), proc);
+
+ // Set the frame modified flag
+ let flag_check = Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * (VM_ENV_DATA_INDEX_FLAGS as i32));
+ let modified_flag = asm.or(flag_check, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM.into());
+ asm.store(flag_check, modified_flag);
+
+ asm.write_label(frame_flag_modified);
+
+ // Push the proc on the stack
+ let stack_ret = asm.stack_push(Type::Unknown);
+ let ep_opnd = gen_get_ep(asm, level);
+ asm.mov(stack_ret, Opnd::mem(64, ep_opnd, offs));
+
+ Some(KeepCompiling)
+}
+
+fn gen_invokebuiltin(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let bf: *const rb_builtin_function = jit.get_arg(0).as_ptr();
+ let bf_argc: usize = unsafe { (*bf).argc }.try_into().expect("non negative argc");
+
+ // ec, self, and arguments
+ if bf_argc + 2 > C_ARG_OPNDS.len() {
+ incr_counter!(invokebuiltin_too_many_args);
+ return None;
+ }
+
+ // If the calls don't allocate, do they need up to date PC, SP?
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Call the builtin func (ec, recv, arg1, arg2, ...)
+ let mut args = vec![EC, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF)];
+
+ // Copy arguments from locals
+ for i in 0..bf_argc {
+ let stack_opnd = asm.stack_opnd((bf_argc - i - 1) as i32);
+ args.push(stack_opnd);
+ }
+
+ let val = asm.ccall(unsafe { (*bf).func_ptr } as *const u8, args);
+
+ // Push the return value
+ asm.stack_pop(bf_argc);
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ Some(KeepCompiling)
+}
+
+// opt_invokebuiltin_delegate calls a builtin function, like
+// invokebuiltin does, but instead of taking arguments from the top of the
+// stack uses the argument locals (and self) from the current method.
+fn gen_opt_invokebuiltin_delegate(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+) -> Option<CodegenStatus> {
+ let bf: *const rb_builtin_function = jit.get_arg(0).as_ptr();
+ let bf_argc = unsafe { (*bf).argc };
+ let start_index = jit.get_arg(1).as_i32();
+
+ // ec, self, and arguments
+ if bf_argc + 2 > (C_ARG_OPNDS.len() as i32) {
+ incr_counter!(invokebuiltin_too_many_args);
+ return None;
+ }
+
+ // If the calls don't allocate, do they need up to date PC, SP?
+ jit_prepare_non_leaf_call(jit, asm);
+
+ // Call the builtin func (ec, recv, arg1, arg2, ...)
+ let mut args = vec![EC, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF)];
+
+ // Copy arguments from locals
+ if bf_argc > 0 {
+ // Load environment pointer EP from CFP
+ let ep = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_EP));
+
+ for i in 0..bf_argc {
+ let table_size = unsafe { get_iseq_body_local_table_size(jit.iseq) };
+ let offs: i32 = -(table_size as i32) - (VM_ENV_DATA_SIZE as i32) + 1 + start_index + i;
+ let local_opnd = Opnd::mem(64, ep, offs * SIZEOF_VALUE_I32);
+ args.push(local_opnd);
+ }
+ }
+ let val = asm.ccall(unsafe { (*bf).func_ptr } as *const u8, args);
+
+ // Push the return value
+ let stack_ret = asm.stack_push(Type::Unknown);
+ asm.mov(stack_ret, val);
+
+ Some(KeepCompiling)
+}
+
+/// Maps a YARV opcode to a code generation function (if supported)
+fn get_gen_fn(opcode: VALUE) -> Option<InsnGenFn> {
+ let VALUE(opcode) = opcode;
+ let opcode = opcode as ruby_vminsn_type;
+ assert!(opcode < VM_INSTRUCTION_SIZE);
+
+ match opcode {
+ YARVINSN_nop => Some(gen_nop),
+ YARVINSN_pop => Some(gen_pop),
+ YARVINSN_dup => Some(gen_dup),
+ YARVINSN_dupn => Some(gen_dupn),
+ YARVINSN_swap => Some(gen_swap),
+ YARVINSN_opt_reverse => Some(gen_opt_reverse),
+ YARVINSN_putnil => Some(gen_putnil),
+ YARVINSN_putobject => Some(gen_putobject),
+ YARVINSN_putobject_INT2FIX_0_ => Some(gen_putobject_int2fix),
+ YARVINSN_putobject_INT2FIX_1_ => Some(gen_putobject_int2fix),
+ YARVINSN_putself => Some(gen_putself),
+ YARVINSN_putspecialobject => Some(gen_putspecialobject),
+ YARVINSN_setn => Some(gen_setn),
+ YARVINSN_topn => Some(gen_topn),
+ YARVINSN_adjuststack => Some(gen_adjuststack),
+
+ YARVINSN_getlocal => Some(gen_getlocal),
+ YARVINSN_getlocal_WC_0 => Some(gen_getlocal_wc0),
+ YARVINSN_getlocal_WC_1 => Some(gen_getlocal_wc1),
+ YARVINSN_setlocal => Some(gen_setlocal),
+ YARVINSN_setlocal_WC_0 => Some(gen_setlocal_wc0),
+ YARVINSN_setlocal_WC_1 => Some(gen_setlocal_wc1),
+ YARVINSN_opt_plus => Some(gen_opt_plus),
+ YARVINSN_opt_minus => Some(gen_opt_minus),
+ YARVINSN_opt_and => Some(gen_opt_and),
+ YARVINSN_opt_or => Some(gen_opt_or),
+ YARVINSN_newhash => Some(gen_newhash),
+ YARVINSN_duphash => Some(gen_duphash),
+ YARVINSN_newarray => Some(gen_newarray),
+ YARVINSN_duparray => Some(gen_duparray),
+ YARVINSN_checktype => Some(gen_checktype),
+ YARVINSN_opt_lt => Some(gen_opt_lt),
+ YARVINSN_opt_le => Some(gen_opt_le),
+ YARVINSN_opt_gt => Some(gen_opt_gt),
+ YARVINSN_opt_ge => Some(gen_opt_ge),
+ YARVINSN_opt_mod => Some(gen_opt_mod),
+ YARVINSN_opt_ary_freeze => Some(gen_opt_ary_freeze),
+ YARVINSN_opt_hash_freeze => Some(gen_opt_hash_freeze),
+ YARVINSN_opt_str_freeze => Some(gen_opt_str_freeze),
+ YARVINSN_opt_str_uminus => Some(gen_opt_str_uminus),
+ YARVINSN_opt_duparray_send => Some(gen_opt_duparray_send),
+ YARVINSN_opt_newarray_send => Some(gen_opt_newarray_send),
+ YARVINSN_splatarray => Some(gen_splatarray),
+ YARVINSN_splatkw => Some(gen_splatkw),
+ YARVINSN_concatarray => Some(gen_concatarray),
+ YARVINSN_concattoarray => Some(gen_concattoarray),
+ YARVINSN_pushtoarray => Some(gen_pushtoarray),
+ YARVINSN_newrange => Some(gen_newrange),
+ YARVINSN_putstring => Some(gen_putstring),
+ YARVINSN_putchilledstring => Some(gen_putchilledstring),
+ YARVINSN_expandarray => Some(gen_expandarray),
+ YARVINSN_defined => Some(gen_defined),
+ YARVINSN_definedivar => Some(gen_definedivar),
+ YARVINSN_checkmatch => Some(gen_checkmatch),
+ YARVINSN_checkkeyword => Some(gen_checkkeyword),
+ YARVINSN_concatstrings => Some(gen_concatstrings),
+ YARVINSN_getinstancevariable => Some(gen_getinstancevariable),
+ YARVINSN_setinstancevariable => Some(gen_setinstancevariable),
+
+ YARVINSN_opt_eq => Some(gen_opt_eq),
+ YARVINSN_opt_neq => Some(gen_opt_neq),
+ YARVINSN_opt_aref => Some(gen_opt_aref),
+ YARVINSN_opt_aset => Some(gen_opt_aset),
+ YARVINSN_opt_mult => Some(gen_opt_mult),
+ YARVINSN_opt_div => Some(gen_opt_div),
+ YARVINSN_opt_ltlt => Some(gen_opt_ltlt),
+ YARVINSN_opt_nil_p => Some(gen_opt_nil_p),
+ YARVINSN_opt_empty_p => Some(gen_opt_empty_p),
+ YARVINSN_opt_succ => Some(gen_opt_succ),
+ YARVINSN_opt_not => Some(gen_opt_not),
+ YARVINSN_opt_size => Some(gen_opt_size),
+ YARVINSN_opt_length => Some(gen_opt_length),
+ YARVINSN_opt_regexpmatch2 => Some(gen_opt_regexpmatch2),
+ YARVINSN_getconstant => Some(gen_getconstant),
+ YARVINSN_opt_getconstant_path => Some(gen_opt_getconstant_path),
+ YARVINSN_invokebuiltin => Some(gen_invokebuiltin),
+ YARVINSN_opt_invokebuiltin_delegate => Some(gen_opt_invokebuiltin_delegate),
+ YARVINSN_opt_invokebuiltin_delegate_leave => Some(gen_opt_invokebuiltin_delegate),
+ YARVINSN_opt_case_dispatch => Some(gen_opt_case_dispatch),
+ YARVINSN_branchif => Some(gen_branchif),
+ YARVINSN_branchunless => Some(gen_branchunless),
+ YARVINSN_branchnil => Some(gen_branchnil),
+ YARVINSN_throw => Some(gen_throw),
+ YARVINSN_jump => Some(gen_jump),
+ YARVINSN_opt_new => Some(gen_opt_new),
+
+ YARVINSN_getblockparamproxy => Some(gen_getblockparamproxy),
+ YARVINSN_getblockparam => Some(gen_getblockparam),
+ YARVINSN_opt_send_without_block => Some(gen_opt_send_without_block),
+ YARVINSN_send => Some(gen_send),
+ YARVINSN_sendforward => Some(gen_sendforward),
+ YARVINSN_invokeblock => Some(gen_invokeblock),
+ YARVINSN_invokesuper => Some(gen_invokesuper),
+ YARVINSN_invokesuperforward => Some(gen_invokesuperforward),
+ YARVINSN_leave => Some(gen_leave),
+
+ YARVINSN_getglobal => Some(gen_getglobal),
+ YARVINSN_setglobal => Some(gen_setglobal),
+ YARVINSN_anytostring => Some(gen_anytostring),
+ YARVINSN_objtostring => Some(gen_objtostring),
+ YARVINSN_intern => Some(gen_intern),
+ YARVINSN_toregexp => Some(gen_toregexp),
+ YARVINSN_getspecial => Some(gen_getspecial),
+ YARVINSN_getclassvariable => Some(gen_getclassvariable),
+ YARVINSN_setclassvariable => Some(gen_setclassvariable),
+
+ // Unimplemented opcode, YJIT won't generate code for this yet
+ _ => None,
+ }
+}
+
+/// Return true when the codegen function generates code.
+/// known_recv_class has Some value when the caller has used jit_guard_known_klass().
+/// See [reg_method_codegen]
+type MethodGenFn = fn(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ ci: *const rb_callinfo,
+ cme: *const rb_callable_method_entry_t,
+ block: Option<BlockHandler>,
+ argc: i32,
+ known_recv_class: Option<VALUE>,
+) -> bool;
+
+/// Methods for generating code for hardcoded (usually C) methods
+static mut METHOD_CODEGEN_TABLE: Option<HashMap<usize, MethodGenFn>> = None;
+
+/// Register codegen functions for some Ruby core methods
+pub fn yjit_reg_method_codegen_fns() {
+ unsafe {
+ assert!(METHOD_CODEGEN_TABLE.is_none());
+ METHOD_CODEGEN_TABLE = Some(HashMap::default());
+
+ // Specialization for C methods. See the function's docs for details.
+ reg_method_codegen(rb_cBasicObject, "!", jit_rb_obj_not);
+
+ reg_method_codegen(rb_cNilClass, "nil?", jit_rb_true);
+ reg_method_codegen(rb_mKernel, "nil?", jit_rb_false);
+ reg_method_codegen(rb_mKernel, "is_a?", jit_rb_kernel_is_a);
+ reg_method_codegen(rb_mKernel, "kind_of?", jit_rb_kernel_is_a);
+ reg_method_codegen(rb_mKernel, "instance_of?", jit_rb_kernel_instance_of);
+
+ reg_method_codegen(rb_cBasicObject, "==", jit_rb_obj_equal);
+ reg_method_codegen(rb_cBasicObject, "equal?", jit_rb_obj_equal);
+ reg_method_codegen(rb_cBasicObject, "!=", jit_rb_obj_not_equal);
+ reg_method_codegen(rb_mKernel, "eql?", jit_rb_obj_equal);
+ reg_method_codegen(rb_cModule, "==", jit_rb_obj_equal);
+ reg_method_codegen(rb_cModule, "===", jit_rb_mod_eqq);
+ reg_method_codegen(rb_cModule, "name", jit_rb_mod_name);
+ reg_method_codegen(rb_cSymbol, "==", jit_rb_obj_equal);
+ reg_method_codegen(rb_cSymbol, "===", jit_rb_obj_equal);
+ reg_method_codegen(rb_cInteger, "==", jit_rb_int_equal);
+ reg_method_codegen(rb_cInteger, "===", jit_rb_int_equal);
+
+ reg_method_codegen(rb_cInteger, "succ", jit_rb_int_succ);
+ reg_method_codegen(rb_cInteger, "pred", jit_rb_int_pred);
+ reg_method_codegen(rb_cInteger, "/", jit_rb_int_div);
+ reg_method_codegen(rb_cInteger, "<<", jit_rb_int_lshift);
+ reg_method_codegen(rb_cInteger, ">>", jit_rb_int_rshift);
+ reg_method_codegen(rb_cInteger, "^", jit_rb_int_xor);
+ reg_method_codegen(rb_cInteger, "[]", jit_rb_int_aref);
+
+ reg_method_codegen(rb_cFloat, "+", jit_rb_float_plus);
+ reg_method_codegen(rb_cFloat, "-", jit_rb_float_minus);
+ reg_method_codegen(rb_cFloat, "*", jit_rb_float_mul);
+ reg_method_codegen(rb_cFloat, "/", jit_rb_float_div);
+
+ reg_method_codegen(rb_cString, "dup", jit_rb_str_dup);
+ reg_method_codegen(rb_cString, "empty?", jit_rb_str_empty_p);
+ reg_method_codegen(rb_cString, "to_s", jit_rb_str_to_s);
+ reg_method_codegen(rb_cString, "to_str", jit_rb_str_to_s);
+ reg_method_codegen(rb_cString, "length", jit_rb_str_length);
+ reg_method_codegen(rb_cString, "size", jit_rb_str_length);
+ reg_method_codegen(rb_cString, "bytesize", jit_rb_str_bytesize);
+ reg_method_codegen(rb_cString, "getbyte", jit_rb_str_getbyte);
+ reg_method_codegen(rb_cString, "setbyte", jit_rb_str_setbyte);
+ reg_method_codegen(rb_cString, "byteslice", jit_rb_str_byteslice);
+ reg_method_codegen(rb_cString, "[]", jit_rb_str_aref_m);
+ reg_method_codegen(rb_cString, "slice", jit_rb_str_aref_m);
+ reg_method_codegen(rb_cString, "<<", jit_rb_str_concat);
+ reg_method_codegen(rb_cString, "+@", jit_rb_str_uplus);
+
+ reg_method_codegen(rb_cNilClass, "===", jit_rb_case_equal);
+ reg_method_codegen(rb_cTrueClass, "===", jit_rb_case_equal);
+ reg_method_codegen(rb_cFalseClass, "===", jit_rb_case_equal);
+
+ reg_method_codegen(rb_cArray, "empty?", jit_rb_ary_empty_p);
+ reg_method_codegen(rb_cArray, "length", jit_rb_ary_length);
+ reg_method_codegen(rb_cArray, "size", jit_rb_ary_length);
+ reg_method_codegen(rb_cArray, "<<", jit_rb_ary_push);
+
+ reg_method_codegen(rb_cHash, "empty?", jit_rb_hash_empty_p);
+
+ reg_method_codegen(rb_mKernel, "respond_to?", jit_obj_respond_to);
+ reg_method_codegen(rb_mKernel, "block_given?", jit_rb_f_block_given_p);
+ reg_method_codegen(rb_mKernel, "dup", jit_rb_obj_dup);
+
+ reg_method_codegen(rb_cClass, "superclass", jit_rb_class_superclass);
+
+ reg_method_codegen(rb_singleton_class(rb_cThread), "current", jit_thread_s_current);
+ }
+}
+
+/// Register a specialized codegen function for a particular method. Note that
+/// if the function returns true, the code it generates runs without a
+/// control frame and without interrupt checks, completely substituting the
+/// original implementation of the method. To avoid creating observable
+/// behavior changes, prefer targeting simple code paths that do not allocate
+/// and do not make method calls.
+///
+/// See also: [lookup_cfunc_codegen].
+fn reg_method_codegen(klass: VALUE, method_name: &str, gen_fn: MethodGenFn) {
+ let mid = unsafe { rb_intern2(method_name.as_ptr().cast(), method_name.len().try_into().unwrap()) };
+ let me = unsafe { rb_method_entry_at(klass, mid) };
+
+ if me.is_null() {
+ panic!("undefined optimized method!: {method_name}");
+ }
+
+ // For now, only cfuncs are supported (me->cme cast fine since it's just me->def->type).
+ debug_assert_eq!(VM_METHOD_TYPE_CFUNC, unsafe { get_cme_def_type(me.cast()) });
+
+ let method_serial = unsafe {
+ let def = (*me).def;
+ get_def_method_serial(def)
+ };
+
+ unsafe { METHOD_CODEGEN_TABLE.as_mut().unwrap().insert(method_serial, gen_fn); }
+}
+
+pub fn yjit_shutdown_free_codegen_table() {
+ unsafe { METHOD_CODEGEN_TABLE = None; };
+}
+
+/// Global state needed for code generation
+pub struct CodegenGlobals {
+ /// Flat vector of bits to store compressed context data
+ context_data: BitVector,
+
+ /// Inline code block (fast path)
+ inline_cb: CodeBlock,
+
+ /// Outlined code block (slow path)
+ outlined_cb: OutlinedCb,
+
+ /// Code for exiting back to the interpreter from the leave instruction
+ leave_exit_code: CodePtr,
+
+ /// Code for exiting back to the interpreter after handling an exception
+ leave_exception_code: CodePtr,
+
+ // For exiting from YJIT frame from branch_stub_hit().
+ // Filled by gen_stub_exit().
+ stub_exit_code: CodePtr,
+
+ // For servicing branch stubs
+ branch_stub_hit_trampoline: CodePtr,
+
+ // For servicing entry stubs
+ entry_stub_hit_trampoline: CodePtr,
+
+ // Code for full logic of returning from C method and exiting to the interpreter
+ outline_full_cfunc_return_pos: CodePtr,
+
+ /// For implementing global code invalidation
+ global_inval_patches: Vec<CodepagePatch>,
+
+ /// Page indexes for outlined code that are not associated to any ISEQ.
+ ocb_pages: Vec<usize>,
+
+ /// Map of cfunc YARV PCs to CMEs and receiver indexes, used to lazily push
+ /// a frame when rb_yjit_lazy_push_frame() is called with a PC in this HashMap.
+ pc_to_cfunc: HashMap<*mut VALUE, (*const rb_callable_method_entry_t, u8)>,
+}
+
+/// For implementing global code invalidation. A position in the inline
+/// codeblock to patch into a JMP rel32 which jumps into some code in
+/// the outlined codeblock to exit to the interpreter.
+pub struct CodepagePatch {
+ pub inline_patch_pos: CodePtr,
+ pub outlined_target_pos: CodePtr,
+}
+
+/// Private singleton instance of the codegen globals
+static mut CODEGEN_GLOBALS: Option<CodegenGlobals> = None;
+
+impl CodegenGlobals {
+ /// Initialize the codegen globals
+ pub fn init() {
+ // Executable memory and code page size in bytes
+ let exec_mem_size = get_option!(exec_mem_size).unwrap_or(get_option!(mem_size));
+
+ #[cfg(not(test))]
+ let (mut cb, mut ocb) = {
+ let virt_block: *mut u8 = unsafe { rb_jit_reserve_addr_space(exec_mem_size as u32) };
+
+ // Memory protection syscalls need page-aligned addresses, so check it here. Assuming
+ // `virt_block` is page-aligned, `second_half` should be page-aligned as long as the
+ // page size in bytes is a power of two 2¹⹠or smaller. This is because the user
+ // requested size is half of mem_option × 2²Ⱐas it's in MiB.
+ //
+ // Basically, we don't support x86-64 2MiB and 1GiB pages. ARMv8 can do up to 64KiB
+ // (2¹ⶠbytes) pages, which should be fine. 4KiB pages seem to be the most popular though.
+ let page_size = unsafe { rb_jit_get_page_size() };
+ assert_eq!(
+ virt_block as usize % page_size.as_usize(), 0,
+ "Start of virtual address block should be page-aligned",
+ );
+
+ use crate::virtualmem::*;
+ use std::ptr::NonNull;
+
+ let mem_block = VirtualMem::new(
+ SystemAllocator {},
+ page_size,
+ NonNull::new(virt_block).unwrap(),
+ exec_mem_size,
+ get_option!(mem_size),
+ );
+ let mem_block = Rc::new(mem_block);
+
+ let freed_pages = Rc::new(None);
+
+ let asm_comments = get_option_ref!(dump_disasm).is_some();
+ let cb = CodeBlock::new(mem_block.clone(), false, freed_pages.clone(), asm_comments);
+ let ocb = OutlinedCb::wrap(CodeBlock::new(mem_block, true, freed_pages, asm_comments));
+
+ (cb, ocb)
+ };
+
+ // In test mode we're not linking with the C code
+ // so we don't allocate executable memory
+ #[cfg(test)]
+ let mut cb = CodeBlock::new_dummy(exec_mem_size / 2);
+ #[cfg(test)]
+ let mut ocb = OutlinedCb::wrap(CodeBlock::new_dummy(exec_mem_size / 2));
+
+ let ocb_start_addr = ocb.unwrap().get_write_ptr();
+ let leave_exit_code = gen_leave_exit(&mut ocb).unwrap();
+ let leave_exception_code = gen_leave_exception(&mut ocb).unwrap();
+
+ let stub_exit_code = gen_stub_exit(&mut ocb).unwrap();
+
+ let branch_stub_hit_trampoline = gen_branch_stub_hit_trampoline(&mut ocb).unwrap();
+ let entry_stub_hit_trampoline = gen_entry_stub_hit_trampoline(&mut ocb).unwrap();
+
+ // Generate full exit code for C func
+ let cfunc_exit_code = gen_full_cfunc_return(&mut ocb).unwrap();
+
+ let ocb_end_addr = ocb.unwrap().get_write_ptr();
+ let ocb_pages = ocb.unwrap().addrs_to_pages(ocb_start_addr, ocb_end_addr).collect();
+
+ // Mark all code memory as executable
+ cb.mark_all_executable();
+
+ let codegen_globals = CodegenGlobals {
+ context_data: BitVector::new(),
+ inline_cb: cb,
+ outlined_cb: ocb,
+ ocb_pages,
+ leave_exit_code,
+ leave_exception_code,
+ stub_exit_code,
+ outline_full_cfunc_return_pos: cfunc_exit_code,
+ branch_stub_hit_trampoline,
+ entry_stub_hit_trampoline,
+ global_inval_patches: Vec::new(),
+ pc_to_cfunc: HashMap::new(),
+ };
+
+ // Initialize the codegen globals instance
+ unsafe {
+ CODEGEN_GLOBALS = Some(codegen_globals);
+ }
+ }
+
+ /// Get a mutable reference to the codegen globals instance
+ pub fn get_instance() -> &'static mut CodegenGlobals {
+ unsafe { CODEGEN_GLOBALS.as_mut().unwrap() }
+ }
+
+ pub fn has_instance() -> bool {
+ unsafe { CODEGEN_GLOBALS.as_mut().is_some() }
+ }
+
+ /// Get a mutable reference to the context data
+ pub fn get_context_data() -> &'static mut BitVector {
+ &mut CodegenGlobals::get_instance().context_data
+ }
+
+ /// Get a mutable reference to the inline code block
+ pub fn get_inline_cb() -> &'static mut CodeBlock {
+ &mut CodegenGlobals::get_instance().inline_cb
+ }
+
+ /// Get a mutable reference to the outlined code block
+ pub fn get_outlined_cb() -> &'static mut OutlinedCb {
+ &mut CodegenGlobals::get_instance().outlined_cb
+ }
+
+ pub fn get_leave_exit_code() -> CodePtr {
+ CodegenGlobals::get_instance().leave_exit_code
+ }
+
+ pub fn get_leave_exception_code() -> CodePtr {
+ CodegenGlobals::get_instance().leave_exception_code
+ }
+
+ pub fn get_stub_exit_code() -> CodePtr {
+ CodegenGlobals::get_instance().stub_exit_code
+ }
+
+ pub fn push_global_inval_patch(inline_pos: CodePtr, outlined_pos: CodePtr, cb: &CodeBlock) {
+ if let Some(last_patch) = CodegenGlobals::get_instance().global_inval_patches.last() {
+ let patch_offset = inline_pos.as_offset() - last_patch.inline_patch_pos.as_offset();
+ assert!(
+ patch_offset < 0 || cb.jmp_ptr_bytes() as i64 <= patch_offset,
+ "patches should not overlap (patch_offset: {patch_offset})",
+ );
+ }
+
+ let patch = CodepagePatch {
+ inline_patch_pos: inline_pos,
+ outlined_target_pos: outlined_pos,
+ };
+ CodegenGlobals::get_instance()
+ .global_inval_patches
+ .push(patch);
+ }
+
+ // Drain the list of patches and return it
+ pub fn take_global_inval_patches() -> Vec<CodepagePatch> {
+ let globals = CodegenGlobals::get_instance();
+ mem::take(&mut globals.global_inval_patches)
+ }
+
+ pub fn get_outline_full_cfunc_return_pos() -> CodePtr {
+ CodegenGlobals::get_instance().outline_full_cfunc_return_pos
+ }
+
+ pub fn get_branch_stub_hit_trampoline() -> CodePtr {
+ CodegenGlobals::get_instance().branch_stub_hit_trampoline
+ }
+
+ pub fn get_entry_stub_hit_trampoline() -> CodePtr {
+ CodegenGlobals::get_instance().entry_stub_hit_trampoline
+ }
+
+ pub fn get_ocb_pages() -> &'static Vec<usize> {
+ &CodegenGlobals::get_instance().ocb_pages
+ }
+
+ pub fn get_pc_to_cfunc() -> &'static mut HashMap<*mut VALUE, (*const rb_callable_method_entry_t, u8)> {
+ &mut CodegenGlobals::get_instance().pc_to_cfunc
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn setup_codegen() -> (Context, Assembler, CodeBlock, OutlinedCb) {
+ let cb = CodeBlock::new_dummy(256 * 1024);
+
+ return (
+ Context::default(),
+ Assembler::new(0),
+ cb,
+ OutlinedCb::wrap(CodeBlock::new_dummy(256 * 1024)),
+ );
+ }
+
+ fn dummy_jit_state<'a>(cb: &mut CodeBlock, ocb: &'a mut OutlinedCb) -> JITState<'a> {
+ JITState::new(
+ BlockId { iseq: std::ptr::null(), idx: 0 },
+ Context::default(),
+ cb.get_write_ptr(),
+ ptr::null(), // No execution context in tests. No peeking!
+ ocb,
+ true,
+ )
+ }
+
+ #[test]
+ fn test_gen_leave_exit() {
+ let mut ocb = OutlinedCb::wrap(CodeBlock::new_dummy(256 * 1024));
+ gen_leave_exit(&mut ocb);
+ assert!(ocb.unwrap().get_write_pos() > 0);
+ }
+
+ #[test]
+ fn test_gen_exit() {
+ let (_ctx, mut asm, mut cb, _) = setup_codegen();
+ gen_exit(0 as *mut VALUE, &mut asm);
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0);
+ }
+
+ #[test]
+ fn test_get_side_exit() {
+ let (ctx, mut asm, _, mut ocb) = setup_codegen();
+ let side_exit_context = SideExitContext::new(0 as _, ctx);
+ asm.get_side_exit(&side_exit_context, None, &mut ocb);
+ assert!(ocb.unwrap().get_write_pos() > 0);
+ }
+
+ #[test]
+ fn test_gen_check_ints() {
+ let (_ctx, mut asm, _cb, _ocb) = setup_codegen();
+ asm.set_side_exit_context(0 as _, 0);
+ gen_check_ints(&mut asm, Counter::guard_send_interrupted);
+ }
+
+ #[test]
+ fn test_gen_nop() {
+ let (context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ let status = gen_nop(&mut jit, &mut asm);
+ asm.compile(&mut cb, None).unwrap();
+
+ assert_eq!(status, Some(KeepCompiling));
+ assert_eq!(context.diff(&Context::default()), TypeDiff::Compatible(0));
+ assert_eq!(cb.get_write_pos(), 0);
+ }
+
+ #[test]
+ fn test_gen_pop() {
+ let (_, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ let context = Context::default();
+ asm.stack_push(Type::Fixnum);
+ let status = gen_pop(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+ let mut default = Context::default();
+ default.set_reg_mapping(context.get_reg_mapping());
+ assert_eq!(context.diff(&default), TypeDiff::Compatible(0));
+ }
+
+ #[test]
+ fn test_gen_dup() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ asm.stack_push(Type::Fixnum);
+ let status = gen_dup(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ // Did we duplicate the type information for the Fixnum type?
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(1)));
+
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0); // Write some movs
+ }
+
+ #[test]
+ fn test_gen_dupn() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
+
+ let mut value_array: [u64; 2] = [0, 2]; // We only compile for n == 2
+ let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
+ jit.pc = pc;
+
+ let status = gen_dupn(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(3)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
+
+ // TODO: this is writing zero bytes on x86. Why?
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0); // Write some movs
+ }
+
+ #[test]
+ fn test_gen_opt_reverse() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+
+ // Odd number of elements
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
+
+ let mut value_array: [u64; 2] = [0, 3];
+ let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
+ jit.pc = pc;
+
+ let mut status = gen_opt_reverse(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(0)));
+
+ // Try again with an even number of elements.
+ asm.stack_push(Type::Nil);
+ value_array[1] = 4;
+ status = gen_opt_reverse(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ assert_eq!(Type::Nil, asm.ctx.get_opnd_type(StackOpnd(3)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(0)));
+ }
+
+ #[test]
+ fn test_gen_swap() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
+
+ let status = gen_swap(&mut jit, &mut asm);
+
+ let tmp_type_top = asm.ctx.get_opnd_type(StackOpnd(0));
+ let tmp_type_next = asm.ctx.get_opnd_type(StackOpnd(1));
+
+ assert_eq!(status, Some(KeepCompiling));
+ assert_eq!(tmp_type_top, Type::Fixnum);
+ assert_eq!(tmp_type_next, Type::Flonum);
+ }
+
+ #[test]
+ fn test_putnil() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ let status = gen_putnil(&mut jit, &mut asm);
+
+ let tmp_type_top = asm.ctx.get_opnd_type(StackOpnd(0));
+
+ assert_eq!(status, Some(KeepCompiling));
+ assert_eq!(tmp_type_top, Type::Nil);
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0);
+ }
+
+
+ #[test]
+ fn test_putself() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ let status = gen_putself(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0);
+ }
+
+ #[test]
+ fn test_gen_setn() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
+
+ let mut value_array: [u64; 2] = [0, 2];
+ let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
+ jit.pc = pc;
+
+ let status = gen_setn(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(0)));
+
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0);
+ }
+
+ #[test]
+ fn test_gen_topn() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
+
+ let mut value_array: [u64; 2] = [0, 1];
+ let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
+ jit.pc = pc;
+
+ let status = gen_topn(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
+
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() > 0); // Write some movs
+ }
+
+ #[test]
+ fn test_gen_adjuststack() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
+ asm.stack_push(Type::Fixnum);
+
+ let mut value_array: [u64; 3] = [0, 2, 0];
+ let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
+ jit.pc = pc;
+
+ let status = gen_adjuststack(&mut jit, &mut asm);
+
+ assert_eq!(status, Some(KeepCompiling));
+
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
+
+ asm.compile(&mut cb, None).unwrap();
+ assert!(cb.get_write_pos() == 0); // No instructions written
+ }
+
+ #[test]
+ fn test_gen_leave() {
+ let (_context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let mut jit = dummy_jit_state(&mut cb, &mut ocb);
+ // Push return value
+ asm.stack_push(Type::Fixnum);
+ asm.set_side_exit_context(0 as _, 0);
+ gen_leave(&mut jit, &mut asm);
+ }
+}
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
new file mode 100644
index 0000000000..0590135392
--- /dev/null
+++ b/yjit/src/core.rs
@@ -0,0 +1,4603 @@
+//! Code versioning, retained live control flow graph mutations, type tracking, etc.
+
+// So we can comment on individual uses of `unsafe` in `unsafe` functions
+#![warn(unsafe_op_in_unsafe_fn)]
+
+use crate::asm::*;
+use crate::backend::ir::*;
+use crate::codegen::*;
+use crate::virtualmem::CodePtr;
+use crate::cruby::*;
+use crate::options::*;
+use crate::stats::*;
+use crate::utils::*;
+#[cfg(feature="disasm")]
+use crate::disasm::*;
+use core::ffi::c_void;
+use std::cell::*;
+use std::fmt;
+use std::mem;
+use std::mem::transmute;
+use std::ops::Range;
+use std::rc::Rc;
+use std::collections::HashSet;
+use std::collections::hash_map::DefaultHasher;
+use std::hash::{Hash, Hasher};
+use mem::MaybeUninit;
+use std::ptr;
+use ptr::NonNull;
+use YARVOpnd::*;
+use TempMapping::*;
+use crate::invariants::*;
+
+// Maximum number of temp value types or registers we keep track of
+pub const MAX_CTX_TEMPS: usize = 8;
+
+// Maximum number of local variable types or registers we keep track of
+const MAX_CTX_LOCALS: usize = 8;
+
+/// An index into `ISEQ_BODY(iseq)->iseq_encoded`. Points
+/// to a YARV instruction or an instruction operand.
+pub type IseqIdx = u16;
+
+// Represent the type of a value (local/stack/self) in YJIT
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+#[repr(u8)]
+pub enum Type {
+ Unknown = 0,
+ UnknownImm,
+ UnknownHeap,
+ Nil,
+ True,
+ False,
+ Fixnum,
+ Flonum,
+ ImmSymbol,
+
+ TString, // An object with the T_STRING flag set, possibly an rb_cString
+ CString, // An object that at one point had its class field equal rb_cString (creating a singleton class changes it)
+ TArray, // An object with the T_ARRAY flag set, possibly an rb_cArray
+ CArray, // An object that at one point had its class field equal rb_cArray (creating a singleton class changes it)
+ THash, // An object with the T_HASH flag set, possibly an rb_cHash
+ CHash, // An object that at one point had its class field equal rb_cHash (creating a singleton class changes it)
+
+ BlockParamProxy, // A special sentinel value indicating the block parameter should be read from
+ // the current surrounding cfp
+
+ // The context currently relies on types taking at most 4 bits (max value 15)
+ // to encode, so if we add any more, we will need to refactor the context.
+}
+
+// Default initialization
+impl Default for Type {
+ fn default() -> Self {
+ Type::Unknown
+ }
+}
+
+impl Type {
+ /// This returns an appropriate Type based on a known value
+ pub fn from(val: VALUE) -> Type {
+ if val.special_const_p() {
+ if val.fixnum_p() {
+ Type::Fixnum
+ } else if val.nil_p() {
+ Type::Nil
+ } else if val == Qtrue {
+ Type::True
+ } else if val == Qfalse {
+ Type::False
+ } else if val.static_sym_p() {
+ Type::ImmSymbol
+ } else if val.flonum_p() {
+ Type::Flonum
+ } else {
+ unreachable!("Illegal value: {:?}", val)
+ }
+ } else {
+ // Core.rs can't reference rb_cString because it's linked by Rust-only tests.
+ // But CString vs TString is only an optimisation and shouldn't affect correctness.
+ #[cfg(not(test))]
+ match val.class_of() {
+ class if class == unsafe { rb_cArray } => return Type::CArray,
+ class if class == unsafe { rb_cHash } => return Type::CHash,
+ class if class == unsafe { rb_cString } => return Type::CString,
+ _ => {}
+ }
+ // We likewise can't reference rb_block_param_proxy, but it's again an optimisation;
+ // we can just treat it as a normal Object.
+ #[cfg(not(test))]
+ if val == unsafe { rb_block_param_proxy } {
+ return Type::BlockParamProxy;
+ }
+ match val.builtin_type() {
+ RUBY_T_ARRAY => Type::TArray,
+ RUBY_T_HASH => Type::THash,
+ RUBY_T_STRING => Type::TString,
+ _ => Type::UnknownHeap,
+ }
+ }
+ }
+
+ /// Check if the type is an immediate
+ pub fn is_imm(&self) -> bool {
+ match self {
+ Type::UnknownImm => true,
+ Type::Nil => true,
+ Type::True => true,
+ Type::False => true,
+ Type::Fixnum => true,
+ Type::Flonum => true,
+ Type::ImmSymbol => true,
+ _ => false,
+ }
+ }
+
+ /// Returns true when the type is not specific.
+ pub fn is_unknown(&self) -> bool {
+ match self {
+ Type::Unknown | Type::UnknownImm | Type::UnknownHeap => true,
+ _ => false,
+ }
+ }
+
+ /// Returns true when we know the VALUE is a specific handle type,
+ /// such as a static symbol ([Type::ImmSymbol], i.e. true from RB_STATIC_SYM_P()).
+ /// Opposite of [Self::is_unknown].
+ pub fn is_specific(&self) -> bool {
+ !self.is_unknown()
+ }
+
+ /// Check if the type is a heap object
+ pub fn is_heap(&self) -> bool {
+ match self {
+ Type::UnknownHeap => true,
+ Type::TArray => true,
+ Type::CArray => true,
+ Type::THash => true,
+ Type::CHash => true,
+ Type::TString => true,
+ Type::CString => true,
+ Type::BlockParamProxy => true,
+ _ => false,
+ }
+ }
+
+ /// Check if it's a T_ARRAY object (both TArray and CArray are T_ARRAY)
+ pub fn is_array(&self) -> bool {
+ matches!(self, Type::TArray | Type::CArray)
+ }
+
+ /// Check if it's a T_HASH object (both THash and CHash are T_HASH)
+ pub fn is_hash(&self) -> bool {
+ matches!(self, Type::THash | Type::CHash)
+ }
+
+ /// Check if it's a T_STRING object (both TString and CString are T_STRING)
+ pub fn is_string(&self) -> bool {
+ matches!(self, Type::TString | Type::CString)
+ }
+
+ /// Returns an Option with the T_ value type if it is known, otherwise None
+ pub fn known_value_type(&self) -> Option<ruby_value_type> {
+ match self {
+ Type::Nil => Some(RUBY_T_NIL),
+ Type::True => Some(RUBY_T_TRUE),
+ Type::False => Some(RUBY_T_FALSE),
+ Type::Fixnum => Some(RUBY_T_FIXNUM),
+ Type::Flonum => Some(RUBY_T_FLOAT),
+ Type::TArray | Type::CArray => Some(RUBY_T_ARRAY),
+ Type::THash | Type::CHash => Some(RUBY_T_HASH),
+ Type::ImmSymbol => Some(RUBY_T_SYMBOL),
+ Type::TString | Type::CString => Some(RUBY_T_STRING),
+ Type::Unknown | Type::UnknownImm | Type::UnknownHeap => None,
+ Type::BlockParamProxy => None,
+ }
+ }
+
+ /// Returns an Option with the class if it is known, otherwise None
+ pub fn known_class(&self) -> Option<VALUE> {
+ unsafe {
+ match self {
+ Type::Nil => Some(rb_cNilClass),
+ Type::True => Some(rb_cTrueClass),
+ Type::False => Some(rb_cFalseClass),
+ Type::Fixnum => Some(rb_cInteger),
+ Type::Flonum => Some(rb_cFloat),
+ Type::ImmSymbol => Some(rb_cSymbol),
+ Type::CArray => Some(rb_cArray),
+ Type::CHash => Some(rb_cHash),
+ Type::CString => Some(rb_cString),
+ _ => None,
+ }
+ }
+ }
+
+ /// Returns an Option with the exact value if it is known, otherwise None
+ #[allow(unused)] // not yet used
+ pub fn known_exact_value(&self) -> Option<VALUE> {
+ match self {
+ Type::Nil => Some(Qnil),
+ Type::True => Some(Qtrue),
+ Type::False => Some(Qfalse),
+ _ => None,
+ }
+ }
+
+ /// Returns an Option boolean representing whether the value is truthy if known, otherwise None
+ pub fn known_truthy(&self) -> Option<bool> {
+ match self {
+ Type::Nil => Some(false),
+ Type::False => Some(false),
+ Type::UnknownHeap => Some(true),
+ Type::Unknown | Type::UnknownImm => None,
+ _ => Some(true)
+ }
+ }
+
+ /// Returns an Option boolean representing whether the value is equal to nil if known, otherwise None
+ pub fn known_nil(&self) -> Option<bool> {
+ match (self, self.known_truthy()) {
+ (Type::Nil, _) => Some(true),
+ (Type::False, _) => Some(false), // Qfalse is not nil
+ (_, Some(true)) => Some(false), // if truthy, can't be nil
+ (_, _) => None // otherwise unknown
+ }
+ }
+
+ /// Compute a difference between two value types
+ pub fn diff(self, dst: Self) -> TypeDiff {
+ // Perfect match, difference is zero
+ if self == dst {
+ return TypeDiff::Compatible(0);
+ }
+
+ // Any type can flow into an unknown type
+ if dst == Type::Unknown {
+ return TypeDiff::Compatible(1);
+ }
+
+ // A CArray is also a TArray.
+ if self == Type::CArray && dst == Type::TArray {
+ return TypeDiff::Compatible(1);
+ }
+
+ // A CHash is also a THash.
+ if self == Type::CHash && dst == Type::THash {
+ return TypeDiff::Compatible(1);
+ }
+
+ // A CString is also a TString.
+ if self == Type::CString && dst == Type::TString {
+ return TypeDiff::Compatible(1);
+ }
+
+ // Specific heap type into unknown heap type is imperfect but valid
+ if self.is_heap() && dst == Type::UnknownHeap {
+ return TypeDiff::Compatible(1);
+ }
+
+ // Specific immediate type into unknown immediate type is imperfect but valid
+ if self.is_imm() && dst == Type::UnknownImm {
+ return TypeDiff::Compatible(1);
+ }
+
+ // Incompatible types
+ return TypeDiff::Incompatible;
+ }
+
+ /// Upgrade this type into a more specific compatible type
+ /// The new type must be compatible and at least as specific as the previously known type.
+ fn upgrade(&mut self, new_type: Self) {
+ // We can only upgrade to a type that is more specific
+ assert!(new_type.diff(*self) != TypeDiff::Incompatible);
+ *self = new_type;
+ }
+}
+
+#[derive(Debug, Eq, PartialEq)]
+pub enum TypeDiff {
+ // usize == 0: Same type
+ // usize >= 1: Different but compatible. The smaller, the more compatible.
+ Compatible(usize),
+ Incompatible,
+}
+
+#[derive(Copy, Clone, Eq, Hash, PartialEq, Debug)]
+pub enum TempMapping {
+ MapToStack(Type),
+ MapToSelf,
+ MapToLocal(u8),
+}
+
+impl Default for TempMapping {
+ fn default() -> Self {
+ TempMapping::MapToStack(Type::default())
+ }
+}
+
+impl TempMapping {
+ /// Return TempMapping without type information in MapToStack
+ pub fn without_type(&self) -> TempMapping {
+ match self {
+ MapToStack(_) => TempMapping::MapToStack(Type::default()),
+ _ => *self,
+ }
+ }
+}
+
+// Operand to a YARV bytecode instruction
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum YARVOpnd {
+ // The value is self
+ SelfOpnd,
+
+ // Temporary stack operand with stack index
+ StackOpnd(u8),
+}
+
+impl From<Opnd> for YARVOpnd {
+ fn from(value: Opnd) -> Self {
+ match value {
+ Opnd::Stack { idx, .. } => StackOpnd(idx.try_into().unwrap()),
+ _ => unreachable!("{:?} cannot be converted to YARVOpnd", value)
+ }
+ }
+}
+
+/// Number of registers that can be used for stack temps or locals
+pub const MAX_MAPPED_REGS: usize = 5;
+
+/// A stack slot or a local variable. u8 represents the index of it (<= 8).
+#[derive(Copy, Clone, Eq, Hash, PartialEq, Debug)]
+pub enum RegOpnd {
+ Stack(u8),
+ Local(u8),
+}
+
+/// RegMappings manages a set of registers used for stack temps and locals.
+/// Each element of the array represents each of the registers.
+/// If an element is Some, the stack temp or the local uses a register.
+///
+/// Note that Opnd::InsnOut uses a separate set of registers at the moment.
+#[derive(Copy, Clone, Default, Eq, Hash, PartialEq)]
+pub struct RegMapping([Option<RegOpnd>; MAX_MAPPED_REGS]);
+
+impl RegMapping {
+ /// Return the index of the register for a given operand if allocated.
+ pub fn get_reg(&self, opnd: RegOpnd) -> Option<usize> {
+ self.0.iter().enumerate()
+ .find(|(_, &reg_opnd)| reg_opnd == Some(opnd))
+ .map(|(reg_idx, _)| reg_idx)
+ }
+
+ /// Set a given operand to the register at a given index.
+ pub fn set_reg(&mut self, opnd: RegOpnd, reg_idx: usize) {
+ assert!(self.0[reg_idx].is_none());
+ self.0[reg_idx] = Some(opnd);
+ }
+
+ /// Allocate a register for a given operand if available.
+ /// Return true if self is updated.
+ pub fn alloc_reg(&mut self, opnd: RegOpnd) -> bool {
+ // If a given opnd already has a register, skip allocation.
+ if self.get_reg(opnd).is_some() {
+ return false;
+ }
+
+ // If the index is too large to encode with with 3 bits, give up.
+ match opnd {
+ RegOpnd::Stack(stack_idx) => if stack_idx >= MAX_CTX_TEMPS as u8 {
+ return false;
+ }
+ RegOpnd::Local(local_idx) => if local_idx >= MAX_CTX_LOCALS as u8 {
+ return false;
+ }
+ };
+
+ // Allocate a register if available.
+ if let Some(reg_idx) = self.find_unused_reg(opnd) {
+ self.0[reg_idx] = Some(opnd);
+ return true;
+ }
+ false
+ }
+
+ /// Deallocate a register for a given operand if in use.
+ /// Return true if self is updated.
+ pub fn dealloc_reg(&mut self, opnd: RegOpnd) -> bool {
+ for reg_opnd in self.0.iter_mut() {
+ if *reg_opnd == Some(opnd) {
+ *reg_opnd = None;
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Find an available register and return the index of it.
+ fn find_unused_reg(&self, opnd: RegOpnd) -> Option<usize> {
+ let num_regs = get_option!(num_temp_regs);
+ if num_regs == 0 {
+ return None;
+ }
+ assert!(num_regs <= MAX_MAPPED_REGS);
+
+ // If the default index for the operand is available, use that to minimize
+ // discrepancies among Contexts.
+ let default_idx = match opnd {
+ RegOpnd::Stack(stack_idx) => stack_idx.as_usize() % num_regs,
+ RegOpnd::Local(local_idx) => num_regs - (local_idx.as_usize() % num_regs) - 1,
+ };
+ if self.0[default_idx].is_none() {
+ return Some(default_idx);
+ }
+
+ // If not, pick any other available register. Like default indexes, prefer
+ // lower indexes for Stack, and higher indexes for Local.
+ let mut index_temps = self.0.iter().enumerate();
+ match opnd {
+ RegOpnd::Stack(_) => index_temps.find(|(_, reg_opnd)| reg_opnd.is_none()),
+ RegOpnd::Local(_) => index_temps.rev().find(|(_, reg_opnd)| reg_opnd.is_none()),
+ }.map(|(index, _)| index)
+ }
+
+ /// Return a vector of RegOpnds that have an allocated register
+ pub fn get_reg_opnds(&self) -> Vec<RegOpnd> {
+ self.0.iter().filter_map(|&reg_opnd| reg_opnd).collect()
+ }
+
+ /// Count the number of registers that store a different operand from `dst`.
+ pub fn diff(&self, dst: RegMapping) -> usize {
+ self.0.iter().enumerate().filter(|&(reg_idx, &reg)| reg != dst.0[reg_idx]).count()
+ }
+}
+
+impl fmt::Debug for RegMapping {
+ /// Print `[None, ...]` instead of the default `RegMappings([None, ...])`
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "{:?}", self.0)
+ }
+}
+
+/// Maximum value of the chain depth (should fit in 5 bits)
+const CHAIN_DEPTH_MAX: u8 = 0b11111; // 31
+
+/// Code generation context
+/// Contains information we can use to specialize/optimize code
+#[derive(Copy, Clone, Default, Eq, Hash, PartialEq, Debug)]
+pub struct Context {
+ // Number of values currently on the temporary stack
+ stack_size: u8,
+
+ // Offset of the JIT SP relative to the interpreter SP
+ // This represents how far the JIT's SP is from the "real" SP
+ sp_offset: i8,
+
+ /// Which stack temps or locals are in a register
+ reg_mapping: RegMapping,
+
+ // Depth of this block in the sidechain (eg: inline-cache chain)
+ // 6 bits, max 63
+ chain_depth: u8,
+
+ // Whether this code is the target of a JIT-to-JIT Ruby return ([Self::is_return_landing])
+ is_return_landing: bool,
+
+ // Whether the compilation of this code has been deferred ([Self::is_deferred])
+ is_deferred: bool,
+
+ // Type we track for self
+ self_type: Type,
+
+ // Local variable types we keep track of
+ local_types: [Type; MAX_CTX_LOCALS],
+
+ // Temp mapping type/local_idx we track
+ temp_mapping: [TempMapping; MAX_CTX_TEMPS],
+
+ /// A pointer to a block ISEQ supplied by the caller. 0 if not inlined.
+ inline_block: Option<IseqPtr>,
+}
+
+#[derive(Clone)]
+pub struct BitVector {
+ // Flat vector of bytes to write into
+ bytes: Vec<u8>,
+
+ // Number of bits taken out of bytes allocated
+ num_bits: usize,
+}
+
+impl BitVector {
+ pub fn new() -> Self {
+ Self {
+ bytes: Vec::with_capacity(4096),
+ num_bits: 0,
+ }
+ }
+
+ #[allow(unused)]
+ pub fn num_bits(&self) -> usize {
+ self.num_bits
+ }
+
+ // Total number of bytes taken
+ #[allow(unused)]
+ pub fn num_bytes(&self) -> usize {
+ (self.num_bits / 8) + if (self.num_bits % 8) != 0 { 1 } else { 0 }
+ }
+
+ // Write/append an unsigned integer value
+ fn push_uint(&mut self, mut val: u64, mut num_bits: usize) {
+ assert!(num_bits <= 64);
+
+ // Mask out bits above the number of bits requested
+ let mut val_bits = val;
+ if num_bits < 64 {
+ val_bits &= (1 << num_bits) - 1;
+ assert!(val == val_bits);
+ }
+
+ // Number of bits encoded in the last byte
+ let rem_bits = self.num_bits % 8;
+
+ // Encode as many bits as we can in this last byte
+ if rem_bits != 0 {
+ let num_enc = std::cmp::min(num_bits, 8 - rem_bits);
+ let bit_mask = (1 << num_enc) - 1;
+ let frac_bits = (val & bit_mask) << rem_bits;
+ let frac_bits: u8 = frac_bits.try_into().unwrap();
+ let last_byte_idx = self.bytes.len() - 1;
+ self.bytes[last_byte_idx] |= frac_bits;
+
+ self.num_bits += num_enc;
+ num_bits -= num_enc;
+ val >>= num_enc;
+ }
+
+ // While we have bits left to encode
+ while num_bits > 0 {
+ // Grow with a 1.2x growth factor instead of 2x
+ assert!(self.num_bits % 8 == 0);
+ let num_bytes = self.num_bits / 8;
+ if num_bytes == self.bytes.capacity() {
+ self.bytes.reserve_exact(self.bytes.len() / 5);
+ }
+
+ let bits = val & 0xFF;
+ let bits: u8 = bits.try_into().unwrap();
+ self.bytes.push(bits);
+
+ let bits_to_encode = std::cmp::min(num_bits, 8);
+ self.num_bits += bits_to_encode;
+ num_bits -= bits_to_encode;
+ val >>= bits_to_encode;
+ }
+ }
+
+ fn push_u8(&mut self, val: u8) {
+ self.push_uint(val as u64, 8);
+ }
+
+ fn push_u5(&mut self, val: u8) {
+ assert!(val <= 0b11111);
+ self.push_uint(val as u64, 5);
+ }
+
+ fn push_u4(&mut self, val: u8) {
+ assert!(val <= 0b1111);
+ self.push_uint(val as u64, 4);
+ }
+
+ fn push_u3(&mut self, val: u8) {
+ assert!(val <= 0b111);
+ self.push_uint(val as u64, 3);
+ }
+
+ fn push_u2(&mut self, val: u8) {
+ assert!(val <= 0b11);
+ self.push_uint(val as u64, 2);
+ }
+
+ fn push_u1(&mut self, val: u8) {
+ assert!(val <= 0b1);
+ self.push_uint(val as u64, 1);
+ }
+
+ fn push_bool(&mut self, val: bool) {
+ self.push_u1(if val { 1 } else { 0 });
+ }
+
+ // Push a context encoding opcode
+ fn push_op(&mut self, op: CtxOp) {
+ self.push_u4(op as u8);
+ }
+
+ // Read a uint value at a given bit index
+ // The bit index is incremented after the value is read
+ fn read_uint(&self, bit_idx: &mut usize, mut num_bits: usize) -> u64 {
+ let start_bit_idx = *bit_idx;
+ let mut cur_idx = *bit_idx;
+
+ // Read the bits in the first byte
+ let bit_mod = cur_idx % 8;
+ let bits_in_byte = self.bytes[cur_idx / 8] >> bit_mod;
+
+ let num_bits_in_byte = std::cmp::min(num_bits, 8 - bit_mod);
+ cur_idx += num_bits_in_byte;
+ num_bits -= num_bits_in_byte;
+
+ let mut out_bits = (bits_in_byte as u64) & ((1 << num_bits_in_byte) - 1);
+
+ // While we have bits left to read
+ while num_bits > 0 {
+ let num_bits_in_byte = std::cmp::min(num_bits, 8);
+ assert!(cur_idx % 8 == 0);
+ let byte = self.bytes[cur_idx / 8] as u64;
+
+ let bits_in_byte = byte & ((1 << num_bits) - 1);
+ out_bits |= bits_in_byte << (cur_idx - start_bit_idx);
+
+ // Move to the next byte/offset
+ cur_idx += num_bits_in_byte;
+ num_bits -= num_bits_in_byte;
+ }
+
+ // Update the read index
+ *bit_idx = cur_idx;
+
+ out_bits
+ }
+
+ fn read_u8(&self, bit_idx: &mut usize) -> u8 {
+ self.read_uint(bit_idx, 8) as u8
+ }
+
+ fn read_u5(&self, bit_idx: &mut usize) -> u8 {
+ self.read_uint(bit_idx, 5) as u8
+ }
+
+ fn read_u4(&self, bit_idx: &mut usize) -> u8 {
+ self.read_uint(bit_idx, 4) as u8
+ }
+
+ fn read_u3(&self, bit_idx: &mut usize) -> u8 {
+ self.read_uint(bit_idx, 3) as u8
+ }
+
+ fn read_u2(&self, bit_idx: &mut usize) -> u8 {
+ self.read_uint(bit_idx, 2) as u8
+ }
+
+ fn read_u1(&self, bit_idx: &mut usize) -> u8 {
+ self.read_uint(bit_idx, 1) as u8
+ }
+
+ fn read_bool(&self, bit_idx: &mut usize) -> bool {
+ self.read_u1(bit_idx) != 0
+ }
+
+ fn read_op(&self, bit_idx: &mut usize) -> CtxOp {
+ unsafe { std::mem::transmute(self.read_u4(bit_idx)) }
+ }
+}
+
+impl fmt::Debug for BitVector {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // We print the higher bytes first
+ for (idx, byte) in self.bytes.iter().enumerate().rev() {
+ write!(f, "{:08b}", byte)?;
+
+ // Insert a separator between each byte
+ if idx > 0 {
+ write!(f, "|")?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod bitvector_tests {
+ use super::*;
+
+ #[test]
+ fn write_3() {
+ let mut arr = BitVector::new();
+ arr.push_uint(3, 2);
+ assert!(arr.read_uint(&mut 0, 2) == 3);
+ }
+
+ #[test]
+ fn write_11() {
+ let mut arr = BitVector::new();
+ arr.push_uint(1, 1);
+ arr.push_uint(1, 1);
+ assert!(arr.read_uint(&mut 0, 2) == 3);
+ }
+
+ #[test]
+ fn write_11_overlap() {
+ let mut arr = BitVector::new();
+ arr.push_uint(0, 7);
+ arr.push_uint(3, 2);
+ arr.push_uint(1, 1);
+
+ //dbg!(arr.read_uint(7, 2));
+ assert!(arr.read_uint(&mut 7, 2) == 3);
+ }
+
+ #[test]
+ fn write_ff_0() {
+ let mut arr = BitVector::new();
+ arr.push_uint(0xFF, 8);
+ assert!(arr.read_uint(&mut 0, 8) == 0xFF);
+ }
+
+ #[test]
+ fn write_ff_3() {
+ // Write 0xFF at bit index 3
+ let mut arr = BitVector::new();
+ arr.push_uint(0, 3);
+ arr.push_uint(0xFF, 8);
+ assert!(arr.read_uint(&mut 3, 8) == 0xFF);
+ }
+
+ #[test]
+ fn write_ff_sandwich() {
+ // Write 0xFF sandwiched between zeros
+ let mut arr = BitVector::new();
+ arr.push_uint(0, 3);
+ arr.push_u8(0xFF);
+ arr.push_uint(0, 3);
+ assert!(arr.read_uint(&mut 3, 8) == 0xFF);
+ }
+
+ #[test]
+ fn write_read_u32_max() {
+ let mut arr = BitVector::new();
+ arr.push_uint(0xFF_FF_FF_FF, 32);
+ assert!(arr.read_uint(&mut 0, 32) == 0xFF_FF_FF_FF);
+ }
+
+ #[test]
+ fn write_read_u32_max_64b() {
+ let mut arr = BitVector::new();
+ arr.push_uint(0xFF_FF_FF_FF, 64);
+ assert!(arr.read_uint(&mut 0, 64) == 0xFF_FF_FF_FF);
+ }
+
+ #[test]
+ fn write_read_u64_max() {
+ let mut arr = BitVector::new();
+ arr.push_uint(u64::MAX, 64);
+ assert!(arr.read_uint(&mut 0, 64) == u64::MAX);
+ }
+
+ #[test]
+ fn encode_default() {
+ let mut bits = BitVector::new();
+ let ctx = Context::default();
+ let start_idx = ctx.encode_into(&mut bits);
+ assert!(start_idx == 0);
+ assert!(bits.num_bits() > 0);
+ assert!(bits.num_bytes() > 0);
+
+ // Make sure that the round trip matches the input
+ let ctx2 = Context::decode_from(&bits, 0);
+ assert!(ctx2 == ctx);
+ }
+
+ #[test]
+ fn encode_default_2x() {
+ let mut bits = BitVector::new();
+
+ let ctx0 = Context::default();
+ let idx0 = ctx0.encode_into(&mut bits);
+
+ let mut ctx1 = Context::default();
+ ctx1.reg_mapping = RegMapping([Some(RegOpnd::Stack(0)), None, None, None, None]);
+ let idx1 = ctx1.encode_into(&mut bits);
+
+ // Make sure that we can encode two contexts successively
+ let ctx0_dec = Context::decode_from(&bits, idx0);
+ let ctx1_dec = Context::decode_from(&bits, idx1);
+ assert!(ctx0_dec == ctx0);
+ assert!(ctx1_dec == ctx1);
+ }
+
+ #[test]
+ fn regress_reg_mapping() {
+ let mut bits = BitVector::new();
+ let mut ctx = Context::default();
+ ctx.reg_mapping = RegMapping([Some(RegOpnd::Stack(0)), None, None, None, None]);
+ ctx.encode_into(&mut bits);
+
+ let b0 = bits.read_u1(&mut 0);
+ assert!(b0 == 1);
+
+ // Make sure that the round trip matches the input
+ let ctx2 = Context::decode_from(&bits, 0);
+ assert!(ctx2 == ctx);
+ }
+}
+
+// Context encoding opcodes (4 bits)
+#[derive(Debug, Copy, Clone)]
+#[repr(u8)]
+enum CtxOp {
+ // Self type (4 bits)
+ SetSelfType = 0,
+
+ // Local idx (3 bits), temp type (4 bits)
+ SetLocalType,
+
+ // Map stack temp to self with known type
+ // Temp idx (3 bits), known type (4 bits)
+ SetTempType,
+
+ // Map stack temp to a local variable
+ // Temp idx (3 bits), local idx (3 bits)
+ MapTempLocal,
+
+ // Map a stack temp to self
+ // Temp idx (3 bits)
+ MapTempSelf,
+
+ // Set inline block pointer (8 bytes)
+ SetInlineBlock,
+
+ // End of encoding
+ EndOfCode,
+}
+
+// Number of entries in the context cache
+const CTX_ENCODE_CACHE_SIZE: usize = 1024;
+const CTX_DECODE_CACHE_SIZE: usize = 1024;
+
+// Cache of the last contexts encoded/decoded
+// Empirically this saves a few percent of memory and speeds up compilation
+// We can experiment with varying the size of this cache
+pub type CtxEncodeCache = [(Context, u32); CTX_ENCODE_CACHE_SIZE];
+static mut CTX_ENCODE_CACHE: Option<Box<CtxEncodeCache>> = None;
+
+// Cache of the last contexts encoded/decoded
+// This speeds up compilation
+pub type CtxDecodeCache = [(Context, u32); CTX_DECODE_CACHE_SIZE];
+static mut CTX_DECODE_CACHE: Option<Box<CtxDecodeCache>> = None;
+
+// Size of the context cache in bytes
+pub const CTX_ENCODE_CACHE_BYTES: usize = std::mem::size_of::<CtxEncodeCache>();
+pub const CTX_DECODE_CACHE_BYTES: usize = std::mem::size_of::<CtxDecodeCache>();
+
+impl Context {
+ // Encode a context into the global context data, or return
+ // a cached previously encoded offset if one is found
+ pub fn encode(&self) -> u32 {
+ incr_counter!(num_contexts_encoded);
+
+ if *self == Context::default() {
+ incr_counter!(context_cache_hits);
+ return 0;
+ }
+
+ if let Some(idx) = Self::encode_cache_get(self) {
+ incr_counter!(context_cache_hits);
+ debug_assert!(Self::decode(idx) == *self);
+ return idx;
+ }
+
+ let context_data = CodegenGlobals::get_context_data();
+
+ // Make sure we don't use offset 0 because
+ // it's is reserved for the default context
+ if context_data.num_bits() == 0 {
+ context_data.push_u1(0);
+ }
+
+ let idx = self.encode_into(context_data);
+ let idx: u32 = idx.try_into().unwrap();
+
+ // Save this offset into the cache
+ Self::encode_cache_set(self, idx);
+ Self::decode_cache_set(self, idx);
+
+ // In debug mode, check that the round-trip decoding always matches
+ debug_assert!(Self::decode(idx) == *self);
+
+ idx
+ }
+
+ pub fn decode(start_idx: u32) -> Context {
+ if start_idx == 0 {
+ return Context::default();
+ };
+
+ if let Some(ctx) = Self::decode_cache_get(start_idx) {
+ return ctx;
+ }
+
+ let context_data = CodegenGlobals::get_context_data();
+ let ctx = Self::decode_from(context_data, start_idx as usize);
+
+ Self::encode_cache_set(&ctx, start_idx);
+ Self::decode_cache_set(&ctx, start_idx);
+
+ ctx
+ }
+
+ // Store an entry in a cache of recently encoded/decoded contexts for encoding
+ fn encode_cache_set(ctx: &Context, idx: u32)
+ {
+ // Compute the hash for this context
+ let mut hasher = DefaultHasher::new();
+ ctx.hash(&mut hasher);
+ let ctx_hash = hasher.finish() as usize;
+
+ unsafe {
+ // Lazily initialize the context cache
+ if CTX_ENCODE_CACHE == None {
+ // Here we use the vec syntax to avoid allocating the large table on the stack,
+ // as this can cause a stack overflow
+ let tbl = vec![(Context::default(), 0); CTX_ENCODE_CACHE_SIZE].into_boxed_slice().try_into().unwrap();
+ CTX_ENCODE_CACHE = Some(tbl);
+ }
+
+ // Write a cache entry for this context
+ let cache = CTX_ENCODE_CACHE.as_mut().unwrap();
+ cache[ctx_hash % CTX_ENCODE_CACHE_SIZE] = (*ctx, idx);
+ }
+ }
+
+ // Store an entry in a cache of recently encoded/decoded contexts for decoding
+ fn decode_cache_set(ctx: &Context, idx: u32) {
+ unsafe {
+ // Lazily initialize the context cache
+ if CTX_DECODE_CACHE == None {
+ // Here we use the vec syntax to avoid allocating the large table on the stack,
+ // as this can cause a stack overflow
+ let tbl = vec![(Context::default(), 0); CTX_DECODE_CACHE_SIZE].into_boxed_slice().try_into().unwrap();
+ CTX_DECODE_CACHE = Some(tbl);
+ }
+
+ // Write a cache entry for this context
+ let cache = CTX_DECODE_CACHE.as_mut().unwrap();
+ cache[idx as usize % CTX_DECODE_CACHE_SIZE] = (*ctx, idx);
+ }
+ }
+
+ // Lookup the context in a cache of recently encoded/decoded contexts for encoding
+ fn encode_cache_get(ctx: &Context) -> Option<u32>
+ {
+ // Compute the hash for this context
+ let mut hasher = DefaultHasher::new();
+ ctx.hash(&mut hasher);
+ let ctx_hash = hasher.finish() as usize;
+
+ unsafe {
+ if CTX_ENCODE_CACHE == None {
+ return None;
+ }
+
+ let cache = CTX_ENCODE_CACHE.as_mut().unwrap();
+
+ // Check that the context for this cache entry matches
+ let cache_entry = &cache[ctx_hash % CTX_ENCODE_CACHE_SIZE];
+ if cache_entry.0 == *ctx {
+ debug_assert!(cache_entry.1 != 0);
+ return Some(cache_entry.1);
+ }
+
+ return None;
+ }
+ }
+
+ // Lookup the context in a cache of recently encoded/decoded contexts for decoding
+ fn decode_cache_get(start_idx: u32) -> Option<Context> {
+ unsafe {
+ if CTX_DECODE_CACHE == None {
+ return None;
+ }
+
+ let cache = CTX_DECODE_CACHE.as_mut().unwrap();
+
+ // Check that the start_idx for this cache entry matches
+ let cache_entry = &cache[start_idx as usize % CTX_DECODE_CACHE_SIZE];
+ if cache_entry.1 == start_idx {
+ return Some(cache_entry.0);
+ }
+
+ return None;
+ }
+ }
+
+ // Encode into a compressed context representation in a bit vector
+ fn encode_into(&self, bits: &mut BitVector) -> usize {
+ let start_idx = bits.num_bits();
+
+ // Most of the time, the stack size is small and sp offset has the same value
+ if (self.stack_size as i64) == (self.sp_offset as i64) && self.stack_size < 4 {
+ // One single bit to signify a compact stack_size/sp_offset encoding
+ debug_assert!(self.sp_offset >= 0);
+ bits.push_u1(1);
+ bits.push_u2(self.stack_size);
+ } else {
+ // Full stack size encoding
+ bits.push_u1(0);
+
+ // Number of values currently on the temporary stack
+ bits.push_u8(self.stack_size);
+
+ // sp_offset: i8,
+ bits.push_u8(self.sp_offset as u8);
+ }
+
+ // Which stack temps or locals are in a register
+ for &temp in self.reg_mapping.0.iter() {
+ if let Some(temp) = temp {
+ bits.push_u1(1); // Some
+ match temp {
+ RegOpnd::Stack(stack_idx) => {
+ bits.push_u1(0); // Stack
+ bits.push_u3(stack_idx);
+ }
+ RegOpnd::Local(local_idx) => {
+ bits.push_u1(1); // Local
+ bits.push_u3(local_idx);
+ }
+ }
+ } else {
+ bits.push_u1(0); // None
+ }
+ }
+
+ bits.push_bool(self.is_deferred);
+ bits.push_bool(self.is_return_landing);
+
+ // The chain depth is most often 0 or 1
+ if self.chain_depth < 2 {
+ bits.push_u1(0);
+ bits.push_u1(self.chain_depth);
+
+ } else {
+ bits.push_u1(1);
+ bits.push_u5(self.chain_depth);
+ }
+
+ // Encode the self type if known
+ if self.self_type != Type::Unknown {
+ bits.push_op(CtxOp::SetSelfType);
+ bits.push_u4(self.self_type as u8);
+ }
+
+ // Encode the local types if known
+ for local_idx in 0..MAX_CTX_LOCALS {
+ let t = self.get_local_type(local_idx);
+ if t != Type::Unknown {
+ bits.push_op(CtxOp::SetLocalType);
+ bits.push_u3(local_idx as u8);
+ bits.push_u4(t as u8);
+ }
+ }
+
+ // Encode stack temps
+ for stack_idx in 0..MAX_CTX_TEMPS {
+ let mapping = self.get_temp_mapping(stack_idx);
+
+ match mapping {
+ MapToStack(temp_type) => {
+ if temp_type != Type::Unknown {
+ // Temp idx (3 bits), known type (4 bits)
+ bits.push_op(CtxOp::SetTempType);
+ bits.push_u3(stack_idx as u8);
+ bits.push_u4(temp_type as u8);
+ }
+ }
+
+ MapToLocal(local_idx) => {
+ bits.push_op(CtxOp::MapTempLocal);
+ bits.push_u3(stack_idx as u8);
+ bits.push_u3(local_idx);
+ }
+
+ MapToSelf => {
+ // Temp idx (3 bits)
+ bits.push_op(CtxOp::MapTempSelf);
+ bits.push_u3(stack_idx as u8);
+ }
+ }
+ }
+
+ // Inline block pointer
+ if let Some(iseq) = self.inline_block {
+ bits.push_op(CtxOp::SetInlineBlock);
+ bits.push_uint(iseq as u64, 64);
+ }
+
+ // TODO: should we add an op for end-of-encoding,
+ // or store num ops at the beginning?
+ bits.push_op(CtxOp::EndOfCode);
+
+ start_idx
+ }
+
+ // Decode a compressed context representation from a bit vector
+ fn decode_from(bits: &BitVector, start_idx: usize) -> Context {
+ let mut ctx = Context::default();
+
+ let mut idx = start_idx;
+
+ // Small vs large stack size encoding
+ if bits.read_u1(&mut idx) == 1 {
+ ctx.stack_size = bits.read_u2(&mut idx);
+ ctx.sp_offset = ctx.stack_size as i8;
+ } else {
+ ctx.stack_size = bits.read_u8(&mut idx);
+ let sp_offset_bits = bits.read_u8(&mut idx);
+ ctx.sp_offset = sp_offset_bits as i8;
+
+ // If the top bit is set, then the sp offset must be negative
+ debug_assert!(!( (sp_offset_bits & 0x80) != 0 && ctx.sp_offset > 0 ));
+ }
+
+ // Which stack temps or locals are in a register
+ for index in 0..MAX_MAPPED_REGS {
+ if bits.read_u1(&mut idx) == 1 { // Some
+ let temp = if bits.read_u1(&mut idx) == 0 { // RegMapping::Stack
+ RegOpnd::Stack(bits.read_u3(&mut idx))
+ } else {
+ RegOpnd::Local(bits.read_u3(&mut idx))
+ };
+ ctx.reg_mapping.0[index] = Some(temp);
+ }
+ }
+
+ ctx.is_deferred = bits.read_bool(&mut idx);
+ ctx.is_return_landing = bits.read_bool(&mut idx);
+
+ if bits.read_u1(&mut idx) == 0 {
+ ctx.chain_depth = bits.read_u1(&mut idx)
+ } else {
+ ctx.chain_depth = bits.read_u5(&mut idx)
+ }
+
+ loop {
+ //println!("reading op");
+ let op = bits.read_op(&mut idx);
+ //println!("got op {:?}", op);
+
+ match op {
+ CtxOp::SetSelfType => {
+ ctx.self_type = unsafe { transmute(bits.read_u4(&mut idx)) };
+ }
+
+ CtxOp::SetLocalType => {
+ let local_idx = bits.read_u3(&mut idx) as usize;
+ let t = unsafe { transmute(bits.read_u4(&mut idx)) };
+ ctx.set_local_type(local_idx, t);
+ }
+
+ // Map temp to stack (known type)
+ CtxOp::SetTempType => {
+ let temp_idx = bits.read_u3(&mut idx) as usize;
+ let temp_type = unsafe { transmute(bits.read_u4(&mut idx)) };
+ ctx.set_temp_mapping(temp_idx, TempMapping::MapToStack(temp_type));
+ }
+
+ // Map temp to local
+ CtxOp::MapTempLocal => {
+ let temp_idx = bits.read_u3(&mut idx) as usize;
+ let local_idx = bits.read_u3(&mut idx);
+ ctx.set_temp_mapping(temp_idx, TempMapping::MapToLocal(local_idx));
+ }
+
+ // Map temp to self
+ CtxOp::MapTempSelf => {
+ let temp_idx = bits.read_u3(&mut idx) as usize;
+ ctx.set_temp_mapping(temp_idx, TempMapping::MapToSelf);
+ }
+
+ // Inline block pointer
+ CtxOp::SetInlineBlock => {
+ ctx.inline_block = Some(bits.read_uint(&mut idx, 64) as IseqPtr);
+ }
+
+ CtxOp::EndOfCode => break,
+ }
+ }
+
+ ctx
+ }
+}
+
+/// Tuple of (iseq, idx) used to identify basic blocks
+/// There are a lot of blockid objects so we try to keep the size small.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[repr(packed)]
+pub struct BlockId {
+ /// Instruction sequence
+ pub iseq: IseqPtr,
+
+ /// Index in the iseq where the block starts
+ pub idx: u16,
+}
+
+/// Branch code shape enumeration
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum BranchShape {
+ Next0, // Target 0 is next
+ Next1, // Target 1 is next
+ Default, // Neither target is next
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum BranchGenFn {
+ BranchIf(Cell<BranchShape>),
+ BranchNil(Cell<BranchShape>),
+ BranchUnless(Cell<BranchShape>),
+ JumpToTarget0(Cell<BranchShape>),
+ JNZToTarget0,
+ JZToTarget0,
+ JBEToTarget0,
+ JBToTarget0,
+ JOMulToTarget0,
+ JITReturn,
+}
+
+impl BranchGenFn {
+ pub fn call(&self, asm: &mut Assembler, target0: Target, target1: Option<Target>) {
+ match self {
+ BranchGenFn::BranchIf(shape) => {
+ match shape.get() {
+ BranchShape::Next0 => asm.jz(target1.unwrap()),
+ BranchShape::Next1 => asm.jnz(target0),
+ BranchShape::Default => {
+ asm.jnz(target0);
+ asm.jmp(target1.unwrap());
+ }
+ }
+ }
+ BranchGenFn::BranchNil(shape) => {
+ match shape.get() {
+ BranchShape::Next0 => asm.jne(target1.unwrap()),
+ BranchShape::Next1 => asm.je(target0),
+ BranchShape::Default => {
+ asm.je(target0);
+ asm.jmp(target1.unwrap());
+ }
+ }
+ }
+ BranchGenFn::BranchUnless(shape) => {
+ match shape.get() {
+ BranchShape::Next0 => asm.jnz(target1.unwrap()),
+ BranchShape::Next1 => asm.jz(target0),
+ BranchShape::Default => {
+ asm.jz(target0);
+ asm.jmp(target1.unwrap());
+ }
+ }
+ }
+ BranchGenFn::JumpToTarget0(shape) => {
+ if shape.get() == BranchShape::Next1 {
+ panic!("Branch shape Next1 not allowed in JumpToTarget0!");
+ }
+ if shape.get() == BranchShape::Default {
+ asm.jmp(target0);
+ }
+ }
+ BranchGenFn::JNZToTarget0 => {
+ asm.jnz(target0)
+ }
+ BranchGenFn::JZToTarget0 => {
+ asm.jz(target0)
+ }
+ BranchGenFn::JBEToTarget0 => {
+ asm.jbe(target0)
+ }
+ BranchGenFn::JBToTarget0 => {
+ asm.jb(target0)
+ }
+ BranchGenFn::JOMulToTarget0 => {
+ asm.jo_mul(target0)
+ }
+ BranchGenFn::JITReturn => {
+ asm_comment!(asm, "update cfp->jit_return");
+ let jit_return = RUBY_OFFSET_CFP_JIT_RETURN - RUBY_SIZEOF_CONTROL_FRAME as i32;
+ let raw_ptr = asm.lea_jump_target(target0);
+ asm.mov(Opnd::mem(64, CFP, jit_return), raw_ptr);
+ }
+ }
+ }
+
+ pub fn get_shape(&self) -> BranchShape {
+ match self {
+ BranchGenFn::BranchIf(shape) |
+ BranchGenFn::BranchNil(shape) |
+ BranchGenFn::BranchUnless(shape) |
+ BranchGenFn::JumpToTarget0(shape) => shape.get(),
+ BranchGenFn::JNZToTarget0 |
+ BranchGenFn::JZToTarget0 |
+ BranchGenFn::JBEToTarget0 |
+ BranchGenFn::JBToTarget0 |
+ BranchGenFn::JOMulToTarget0 |
+ BranchGenFn::JITReturn => BranchShape::Default,
+ }
+ }
+
+ pub fn set_shape(&self, new_shape: BranchShape) {
+ match self {
+ BranchGenFn::BranchIf(shape) |
+ BranchGenFn::BranchNil(shape) |
+ BranchGenFn::BranchUnless(shape) => {
+ shape.set(new_shape);
+ }
+ BranchGenFn::JumpToTarget0(shape) => {
+ if new_shape == BranchShape::Next1 {
+ panic!("Branch shape Next1 not allowed in JumpToTarget0!");
+ }
+ shape.set(new_shape);
+ }
+ BranchGenFn::JNZToTarget0 |
+ BranchGenFn::JZToTarget0 |
+ BranchGenFn::JBEToTarget0 |
+ BranchGenFn::JBToTarget0 |
+ BranchGenFn::JOMulToTarget0 |
+ BranchGenFn::JITReturn => {
+ assert_eq!(new_shape, BranchShape::Default);
+ }
+ }
+ }
+}
+
+/// A place that a branch could jump to
+#[derive(Debug, Clone)]
+enum BranchTarget {
+ Stub(Box<BranchStub>), // Not compiled yet
+ Block(BlockRef), // Already compiled
+}
+
+impl BranchTarget {
+ fn get_address(&self) -> Option<CodePtr> {
+ match self {
+ BranchTarget::Stub(stub) => stub.address,
+ BranchTarget::Block(blockref) => Some(unsafe { blockref.as_ref() }.start_addr),
+ }
+ }
+
+ fn get_blockid(&self) -> BlockId {
+ match self {
+ BranchTarget::Stub(stub) => BlockId { iseq: stub.iseq.get(), idx: stub.iseq_idx },
+ BranchTarget::Block(blockref) => unsafe { blockref.as_ref() }.get_blockid(),
+ }
+ }
+
+ fn get_ctx(&self) -> u32 {
+ match self {
+ BranchTarget::Stub(stub) => stub.ctx,
+ BranchTarget::Block(blockref) => unsafe { blockref.as_ref() }.ctx,
+ }
+ }
+
+ fn get_block(&self) -> Option<BlockRef> {
+ match self {
+ BranchTarget::Stub(_) => None,
+ BranchTarget::Block(blockref) => Some(*blockref),
+ }
+ }
+
+ fn set_iseq(&self, iseq: IseqPtr) {
+ match self {
+ BranchTarget::Stub(stub) => stub.iseq.set(iseq),
+ BranchTarget::Block(blockref) => unsafe { blockref.as_ref() }.iseq.set(iseq),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+struct BranchStub {
+ address: Option<CodePtr>,
+ iseq: Cell<IseqPtr>,
+ iseq_idx: IseqIdx,
+ ctx: u32,
+}
+
+/// Store info about an outgoing branch in a code segment
+/// Note: care must be taken to minimize the size of branch objects
+pub struct Branch {
+ // Block this is attached to
+ block: Cell<BlockRef>,
+
+ // Positions where the generated code starts and ends
+ start_addr: CodePtr,
+ end_addr: Cell<CodePtr>, // exclusive
+
+ // Branch target blocks and their contexts
+ targets: [Cell<Option<Box<BranchTarget>>>; 2],
+
+ // Branch code generation function
+ gen_fn: BranchGenFn,
+}
+
+/// A [Branch] for a [Block] that is under construction.
+/// Fields correspond, but may be `None` during construction.
+pub struct PendingBranch {
+ /// Allocation holder for the address of the constructed branch
+ /// in error paths Box deallocates it.
+ uninit_branch: Box<MaybeUninit<Branch>>,
+
+ /// Branch code generation function
+ gen_fn: BranchGenFn,
+
+ /// Positions where the generated code starts and ends
+ start_addr: Cell<Option<CodePtr>>,
+ end_addr: Cell<Option<CodePtr>>, // exclusive
+
+ /// Branch target blocks and their contexts
+ targets: [Cell<Option<Box<BranchTarget>>>; 2],
+}
+
+impl Branch {
+ // Compute the size of the branch code
+ fn code_size(&self) -> usize {
+ (self.end_addr.get().as_offset() - self.start_addr.as_offset()) as usize
+ }
+
+ /// Get the address of one of the branch destination
+ fn get_target_address(&self, target_idx: usize) -> Option<CodePtr> {
+ unsafe {
+ self.targets[target_idx]
+ .ref_unchecked()
+ .as_ref()
+ .and_then(|target| target.get_address())
+ }
+ }
+
+ fn get_stub_count(&self) -> usize {
+ let mut count = 0;
+ for target in self.targets.iter() {
+ if unsafe {
+ // SAFETY: no mutation
+ matches!(
+ target.ref_unchecked().as_ref().map(Box::as_ref),
+ Some(BranchTarget::Stub(_))
+ )
+ } {
+ count += 1;
+ }
+ }
+ count
+ }
+
+ fn assert_layout(&self) {
+ let shape = self.gen_fn.get_shape();
+ assert!(
+ !(shape == BranchShape::Default && 0 == self.code_size()),
+ "zero-size branches are incorrect when code for neither targets are adjacent"
+ // One needs to issue some instruction to steer to the branch target
+ // when falling through isn't an option.
+ );
+ }
+}
+
+impl std::fmt::Debug for Branch {
+ // Can't derive this because `targets: !Copy` due to Cell.
+ fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let targets = unsafe {
+ // SAFETY:
+ // While the references are live for the result of this function,
+ // no mutation happens because we are only calling derived fmt::Debug functions.
+ [self.targets[0].as_ptr().as_ref().unwrap(), self.targets[1].as_ptr().as_ref().unwrap()]
+ };
+
+ formatter
+ .debug_struct("Branch")
+ .field("block", &self.block)
+ .field("start", &self.start_addr)
+ .field("end", &self.end_addr)
+ .field("targets", &targets)
+ .field("gen_fn", &self.gen_fn)
+ .finish()
+ }
+}
+
+impl PendingBranch {
+ /// Set up a branch target at `target_idx`. Find an existing block to branch to
+ /// or generate a stub for one.
+ #[must_use]
+ fn set_target(
+ &self,
+ target_idx: u32,
+ target: BlockId,
+ ctx: &Context,
+ jit: &mut JITState,
+ ) -> Option<CodePtr> {
+ // If the block already exists
+ if let Some(blockref) = find_block_version(target, ctx) {
+ let block = unsafe { blockref.as_ref() };
+
+ // Fill out the target with this block
+ self.targets[target_idx.as_usize()]
+ .set(Some(Box::new(BranchTarget::Block(blockref))));
+ return Some(block.start_addr);
+ }
+
+ // Compress/encode the context
+ let ctx = Context::encode(ctx);
+
+ // The branch struct is uninitialized right now but as a stable address.
+ // We make sure the stub runs after the branch is initialized.
+ let branch_struct_addr = self.uninit_branch.as_ptr() as usize;
+ let stub_addr = gen_branch_stub(ctx, jit.iseq, jit.get_ocb(), branch_struct_addr, target_idx);
+
+ if let Some(stub_addr) = stub_addr {
+ // Fill the branch target with a stub
+ self.targets[target_idx.as_usize()].set(Some(Box::new(BranchTarget::Stub(Box::new(BranchStub {
+ address: Some(stub_addr),
+ iseq: Cell::new(target.iseq),
+ iseq_idx: target.idx,
+ ctx,
+ })))));
+ }
+
+ stub_addr
+ }
+
+ // Construct the branch and wire it up in the grpah
+ fn into_branch(mut self, uninit_block: BlockRef) -> BranchRef {
+ // Make the branch
+ let branch = Branch {
+ block: Cell::new(uninit_block),
+ start_addr: self.start_addr.get().unwrap(),
+ end_addr: Cell::new(self.end_addr.get().unwrap()),
+ targets: self.targets,
+ gen_fn: self.gen_fn,
+ };
+ // Move it to the designated place on
+ // the heap and unwrap MaybeUninit.
+ self.uninit_branch.write(branch);
+ let raw_branch: *mut MaybeUninit<Branch> = Box::into_raw(self.uninit_branch);
+ let branchref = NonNull::new(raw_branch as *mut Branch).expect("no null from Box");
+
+ // SAFETY: just allocated it
+ let branch = unsafe { branchref.as_ref() };
+ // For block branch targets, put the new branch in the
+ // appropriate incoming list.
+ for target in branch.targets.iter() {
+ // SAFETY: no mutation
+ let out_block: Option<BlockRef> = unsafe {
+ target.ref_unchecked().as_ref().and_then(|target| target.get_block())
+ };
+
+ if let Some(out_block) = out_block {
+ // SAFETY: These blockrefs come from set_target() which only puts blocks from
+ // ISeqs, which are all initialized. Note that uninit_block isn't in any ISeq
+ // payload yet.
+ unsafe { out_block.as_ref() }.incoming.push(branchref);
+ }
+ }
+
+ branch.assert_layout();
+ incr_counter!(compiled_branch_count);
+
+ branchref
+ }
+}
+
+// Store info about code used on YJIT entry
+pub struct Entry {
+ // Positions where the generated code starts and ends
+ start_addr: CodePtr,
+ end_addr: CodePtr, // exclusive
+}
+
+/// A [Branch] for a [Block] that is under construction.
+pub struct PendingEntry {
+ pub uninit_entry: Box<MaybeUninit<Entry>>,
+ start_addr: Cell<Option<CodePtr>>,
+ end_addr: Cell<Option<CodePtr>>, // exclusive
+}
+
+impl PendingEntry {
+ // Construct the entry in the heap
+ pub fn into_entry(mut self) -> EntryRef {
+ // Make the entry
+ let entry = Entry {
+ start_addr: self.start_addr.get().unwrap(),
+ end_addr: self.end_addr.get().unwrap(),
+ };
+ // Move it to the designated place on the heap and unwrap MaybeUninit.
+ self.uninit_entry.write(entry);
+ let raw_entry: *mut MaybeUninit<Entry> = Box::into_raw(self.uninit_entry);
+ NonNull::new(raw_entry as *mut Entry).expect("no null from Box")
+ }
+}
+
+// In case a block is invalidated, this helps to remove all pointers to the block.
+pub type CmePtr = *const rb_callable_method_entry_t;
+
+/// Basic block version
+/// Represents a portion of an iseq compiled with a given context
+/// Note: care must be taken to minimize the size of block_t objects
+#[derive(Debug)]
+pub struct Block {
+ // The byte code instruction sequence this is a version of.
+ // Can change due to moving GC.
+ iseq: Cell<IseqPtr>,
+
+ // Index range covered by this version in `ISEQ_BODY(iseq)->iseq_encoded`.
+ iseq_range: Range<IseqIdx>,
+
+ // Context at the start of the block
+ // This should never be mutated
+ ctx: u32,
+
+ // Positions where the generated code starts and ends
+ start_addr: CodePtr,
+ end_addr: Cell<CodePtr>,
+
+ // List of incoming branches (from predecessors)
+ incoming: MutableBranchList,
+
+ // List of outgoing branches (to successors)
+ // Infrequently mutated for control flow graph edits for saving memory.
+ outgoing: MutableBranchList,
+
+ // FIXME: should these be code pointers instead?
+ // Offsets for GC managed objects in the mainline code block
+ gc_obj_offsets: Box<[u32]>,
+
+ // CME dependencies of this block, to help to remove all pointers to this
+ // block in the system.
+ cme_dependencies: Box<[Cell<CmePtr>]>,
+
+ // Code address of an exit for `ctx` and `blockid`.
+ // Used for block invalidation.
+ entry_exit: Option<CodePtr>,
+}
+
+/// Pointer to a [Block].
+///
+/// # Safety
+///
+/// _Never_ derive a `&mut Block` from this and always use
+/// [std::ptr::NonNull::as_ref] to get a `&Block`. `&'a mut`
+/// in Rust asserts that there are no other references live
+/// over the lifetime `'a`. This uniqueness assertion does
+/// not hold in many situations for us, even when you ignore
+/// the fact that our control flow graph can have cycles.
+/// Here are just two examples where we have overlapping references:
+/// - Yielding to a different OS thread within the same
+/// ractor during compilation
+/// - The GC calling [rb_yjit_iseq_mark] during compilation
+///
+/// Technically, for soundness, we also need to ensure that
+/// the we have the VM lock while the result of `as_ref()`
+/// is live, so that no deallocation happens while the
+/// shared reference is live. The vast majority of our code run while
+/// holding the VM lock, though.
+pub type BlockRef = NonNull<Block>;
+
+/// Pointer to a [Branch]. See [BlockRef] for notes about
+/// proper usage.
+pub type BranchRef = NonNull<Branch>;
+
+/// Pointer to an entry that is already added to an ISEQ
+pub type EntryRef = NonNull<Entry>;
+
+/// List of block versions for a given blockid
+type VersionList = Vec<BlockRef>;
+
+/// Map from iseq indices to lists of versions for that given blockid
+/// An instance of this is stored on each iseq
+type VersionMap = Vec<VersionList>;
+
+/// [Interior mutability][1] wrapper for a list of branches.
+/// O(n) insertion, but space efficient. We generally expect
+/// blocks to have only a few branches.
+///
+/// [1]: https://doc.rust-lang.org/std/cell/struct.UnsafeCell.html
+#[repr(transparent)]
+struct MutableBranchList(Cell<Box<[BranchRef]>>);
+
+impl MutableBranchList {
+ fn push(&self, branch: BranchRef) {
+ // Temporary move the boxed slice out of self.
+ // oom=abort is load bearing here...
+ let mut current_list = self.0.take().into_vec();
+ current_list.push(branch);
+ self.0.set(current_list.into_boxed_slice());
+ }
+
+ /// Iterate through branches in the list by moving out of the cell
+ /// and then putting it back when done. Modifications to this cell
+ /// during iteration will be discarded.
+ ///
+ /// Assumes panic=abort since panic=unwind during iteration would
+ /// leave the cell empty.
+ fn for_each(&self, mut f: impl FnMut(BranchRef)) {
+ let list = self.0.take();
+ for branch in list.iter() {
+ f(*branch);
+ }
+ self.0.set(list);
+ }
+
+ /// Length of the list.
+ fn len(&self) -> usize {
+ // SAFETY: No cell mutation inside unsafe.
+ unsafe { self.0.ref_unchecked().len() }
+ }
+}
+
+impl fmt::Debug for MutableBranchList {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: the derived Clone for boxed slices does not mutate this Cell
+ let branches = unsafe { self.0.ref_unchecked().clone() };
+
+ formatter.debug_list().entries(branches.iter()).finish()
+ }
+}
+
+/// This is all the data YJIT stores on an iseq
+/// This will be dynamically allocated by C code
+/// C code should pass an &mut IseqPayload to us
+/// when calling into YJIT
+#[derive(Default)]
+pub struct IseqPayload {
+ // Basic block versions
+ pub version_map: VersionMap,
+
+ // Indexes of code pages used by this ISEQ
+ pub pages: HashSet<usize>,
+
+ // List of ISEQ entry codes
+ pub entries: Vec<EntryRef>,
+
+ // Blocks that are invalidated but are not yet deallocated.
+ // The code GC will free them later.
+ pub dead_blocks: Vec<BlockRef>,
+}
+
+impl IseqPayload {
+ /// Remove all block versions from the payload and then return them as an iterator
+ pub fn take_all_blocks(&mut self) -> impl Iterator<Item = BlockRef> {
+ // Empty the blocks
+ let version_map = mem::take(&mut self.version_map);
+
+ // Turn it into an iterator that owns the blocks and return
+ version_map.into_iter().flatten()
+ }
+}
+
+/// Get the payload for an iseq. For safety it's up to the caller to ensure the returned `&mut`
+/// upholds aliasing rules and that the argument is a valid iseq.
+pub fn get_iseq_payload(iseq: IseqPtr) -> Option<&'static mut IseqPayload> {
+ let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
+ let payload: *mut IseqPayload = payload.cast();
+ unsafe { payload.as_mut() }
+}
+
+/// Get the payload object associated with an iseq. Create one if none exists.
+pub fn get_or_create_iseq_payload(iseq: IseqPtr) -> &'static mut IseqPayload {
+ type VoidPtr = *mut c_void;
+
+ let payload_non_null = unsafe {
+ let payload = rb_iseq_get_yjit_payload(iseq);
+ if payload.is_null() {
+ // Increment the compiled iseq count
+ incr_counter!(compiled_iseq_count);
+
+ // Allocate a new payload with Box and transfer ownership to the GC.
+ // We drop the payload with Box::from_raw when the GC frees the iseq and calls us.
+ // NOTE(alan): Sometimes we read from an iseq without ever writing to it.
+ // We allocate in those cases anyways.
+ let new_payload = IseqPayload::default();
+ let new_payload = Box::into_raw(Box::new(new_payload));
+ rb_iseq_set_yjit_payload(iseq, new_payload as VoidPtr);
+
+ new_payload
+ } else {
+ payload as *mut IseqPayload
+ }
+ };
+
+ // SAFETY: we should have the VM lock and all other Ruby threads should be asleep. So we have
+ // exclusive mutable access.
+ // Hmm, nothing seems to stop calling this on the same
+ // iseq twice, though, which violates aliasing rules.
+ unsafe { payload_non_null.as_mut() }.unwrap()
+}
+
+/// Iterate over all existing ISEQs
+pub fn for_each_iseq<F: FnMut(IseqPtr)>(mut callback: F) {
+ unsafe extern "C" fn callback_wrapper(iseq: IseqPtr, data: *mut c_void) {
+ // SAFETY: points to the local below
+ let callback: &mut &mut dyn FnMut(IseqPtr) -> bool = unsafe { std::mem::transmute(&mut *data) };
+ callback(iseq);
+ }
+ let mut data: &mut dyn FnMut(IseqPtr) = &mut callback;
+ unsafe { rb_jit_for_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) };
+}
+
+/// Iterate over all on-stack ISEQs
+pub fn for_each_on_stack_iseq<F: FnMut(IseqPtr)>(mut callback: F) {
+ unsafe extern "C" fn callback_wrapper(iseq: IseqPtr, data: *mut c_void) {
+ // SAFETY: points to the local below
+ let callback: &mut &mut dyn FnMut(IseqPtr) -> bool = unsafe { std::mem::transmute(&mut *data) };
+ callback(iseq);
+ }
+ let mut data: &mut dyn FnMut(IseqPtr) = &mut callback;
+ unsafe { rb_jit_cont_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) };
+}
+
+/// Iterate over all on-stack ISEQ payloads
+pub fn for_each_on_stack_iseq_payload<F: FnMut(&IseqPayload)>(mut callback: F) {
+ for_each_on_stack_iseq(|iseq| {
+ if let Some(iseq_payload) = get_iseq_payload(iseq) {
+ callback(iseq_payload);
+ }
+ });
+}
+
+/// Iterate over all NOT on-stack ISEQ payloads
+pub fn for_each_off_stack_iseq_payload<F: FnMut(&mut IseqPayload)>(mut callback: F) {
+ // Get all ISEQs on the heap. Note that rb_objspace_each_objects() runs GC first,
+ // which could move ISEQ pointers when GC.auto_compact = true.
+ // So for_each_on_stack_iseq() must be called after this, which doesn't run GC.
+ let mut iseqs: Vec<IseqPtr> = vec![];
+ for_each_iseq(|iseq| iseqs.push(iseq));
+
+ // Get all ISEQs that are on a CFP of existing ECs.
+ let mut on_stack_iseqs: HashSet<IseqPtr> = HashSet::new();
+ for_each_on_stack_iseq(|iseq| { on_stack_iseqs.insert(iseq); });
+
+ // Invoke the callback for iseqs - on_stack_iseqs
+ for iseq in iseqs {
+ if !on_stack_iseqs.contains(&iseq) {
+ if let Some(iseq_payload) = get_iseq_payload(iseq) {
+ callback(iseq_payload);
+ }
+ }
+ }
+}
+
+/// Free the per-iseq payload
+#[no_mangle]
+pub extern "C" fn rb_yjit_iseq_free(iseq: IseqPtr) {
+ // Free invariants for the ISEQ
+ iseq_free_invariants(iseq);
+
+ let payload = {
+ let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
+ if payload.is_null() {
+ // Nothing to free.
+ return;
+ } else {
+ payload as *mut IseqPayload
+ }
+ };
+
+ // Take ownership of the payload with Box::from_raw().
+ // It drops right before this function returns.
+ // SAFETY: We got the pointer from Box::into_raw().
+ let payload = unsafe { Box::from_raw(payload) };
+
+ // Free all blocks in version_map. The GC doesn't free running iseqs.
+ for versions in &payload.version_map {
+ for block in versions {
+ // SAFETY: blocks in the version_map are always well connected
+ unsafe { free_block(*block, true) };
+ }
+ }
+
+ // Free dead blocks
+ for block in payload.dead_blocks {
+ unsafe { free_block(block, false) };
+ }
+
+ // Free all entries
+ for entryref in payload.entries.iter() {
+ let entry = unsafe { Box::from_raw(entryref.as_ptr()) };
+ mem::drop(entry);
+ }
+
+ // Increment the freed iseq count
+ incr_counter!(freed_iseq_count);
+}
+
+/// GC callback for marking GC objects in the per-iseq payload.
+#[no_mangle]
+pub extern "C" fn rb_yjit_iseq_mark(payload: *mut c_void) {
+ let payload = if payload.is_null() {
+ // Nothing to mark.
+ return;
+ } else {
+ // SAFETY: The GC takes the VM lock while marking, which
+ // we assert, so we should be synchronized and data race free.
+ //
+ // For aliasing, having the VM lock hopefully also implies that no one
+ // else has an overlapping &mut IseqPayload.
+ unsafe {
+ rb_assert_holding_vm_lock();
+ &*(payload as *const IseqPayload)
+ }
+ };
+
+ // For marking VALUEs written into the inline code block.
+ // We don't write VALUEs in the outlined block.
+ let cb: &CodeBlock = CodegenGlobals::get_inline_cb();
+
+ for versions in &payload.version_map {
+ for block in versions {
+ // SAFETY: all blocks inside version_map are initialized.
+ let block = unsafe { block.as_ref() };
+ mark_block(block, cb, false);
+ }
+ }
+ // Mark dead blocks, since there could be stubs pointing at them
+ for blockref in &payload.dead_blocks {
+ // SAFETY: dead blocks come from version_map, which only have initialized blocks
+ let block = unsafe { blockref.as_ref() };
+ mark_block(block, cb, true);
+ }
+
+ return;
+
+ fn mark_block(block: &Block, cb: &CodeBlock, dead: bool) {
+ unsafe { rb_gc_mark_movable(block.iseq.get().into()) };
+
+ // Mark method entry dependencies
+ for cme_dep in block.cme_dependencies.iter() {
+ unsafe { rb_gc_mark_movable(cme_dep.get().into()) };
+ }
+
+ // Mark outgoing branch entries
+ block.outgoing.for_each(|branch| {
+ let branch = unsafe { branch.as_ref() };
+ for target in branch.targets.iter() {
+ // SAFETY: no mutation inside unsafe
+ let target_iseq = unsafe {
+ target.ref_unchecked().as_ref().and_then(|target| {
+ // Avoid get_blockid() on blockref. Can be dangling on dead blocks,
+ // and the iseq housing the block already naturally handles it.
+ if target.get_block().is_some() {
+ None
+ } else {
+ Some(target.get_blockid().iseq)
+ }
+ })
+ };
+
+ if let Some(target_iseq) = target_iseq {
+ unsafe { rb_gc_mark_movable(target_iseq.into()) };
+ }
+ }
+ });
+
+ // Mark references to objects in generated code.
+ // Skip for dead blocks since they shouldn't run.
+ if !dead {
+ for offset in block.gc_obj_offsets.iter() {
+ let value_address: *const u8 = cb.get_ptr(offset.as_usize()).raw_ptr(cb);
+ // Creating an unaligned pointer is well defined unlike in C.
+ let value_address = value_address as *const VALUE;
+
+ // SAFETY: these point to YJIT's code buffer
+ unsafe {
+ let object = value_address.read_unaligned();
+ rb_gc_mark_movable(object);
+ };
+ }
+ }
+ }
+}
+
+/// GC callback for updating GC objects in the per-iseq payload.
+/// This is a mirror of [rb_yjit_iseq_mark].
+#[no_mangle]
+pub extern "C" fn rb_yjit_iseq_update_references(iseq: IseqPtr) {
+ let payload = unsafe { rb_iseq_get_yjit_payload(iseq) };
+ let payload = if payload.is_null() {
+ // Nothing to update.
+ return;
+ } else {
+ // SAFETY: The GC takes the VM lock while marking, which
+ // we assert, so we should be synchronized and data race free.
+ //
+ // For aliasing, having the VM lock hopefully also implies that no one
+ // else has an overlapping &mut IseqPayload.
+ unsafe {
+ rb_assert_holding_vm_lock();
+ &*(payload as *const IseqPayload)
+ }
+ };
+
+ // Evict other threads from generated code since we are about to patch them.
+ // Also acts as an assert that we hold the VM lock.
+ unsafe { rb_vm_barrier() };
+
+ // For updating VALUEs written into the inline code block.
+ let cb = CodegenGlobals::get_inline_cb();
+
+ for versions in &payload.version_map {
+ for version in versions {
+ // SAFETY: all blocks inside version_map are initialized
+ let block = unsafe { version.as_ref() };
+ block_update_references(block, cb, false);
+ }
+ }
+ // Update dead blocks, since there could be stubs pointing at them
+ for blockref in &payload.dead_blocks {
+ // SAFETY: dead blocks come from version_map, which only have initialized blocks
+ let block = unsafe { blockref.as_ref() };
+ block_update_references(block, cb, true);
+ }
+
+ return;
+
+ fn block_update_references(block: &Block, cb: &mut CodeBlock, dead: bool) {
+ block.iseq.set(unsafe { rb_gc_location(block.iseq.get().into()) }.as_iseq());
+
+ // Update method entry dependencies
+ for cme_dep in block.cme_dependencies.iter() {
+ let cur_cme: VALUE = cme_dep.get().into();
+ let new_cme = unsafe { rb_gc_location(cur_cme) }.as_cme();
+ cme_dep.set(new_cme);
+ }
+
+ // Update outgoing branch entries
+ block.outgoing.for_each(|branch| {
+ let branch = unsafe { branch.as_ref() };
+ for target in branch.targets.iter() {
+ // SAFETY: no mutation inside unsafe
+ let current_iseq = unsafe {
+ target.ref_unchecked().as_ref().and_then(|target| {
+ // Avoid get_blockid() on blockref. Can be dangling on dead blocks,
+ // and the iseq housing the block already naturally handles it.
+ if target.get_block().is_some() {
+ None
+ } else {
+ Some(target.get_blockid().iseq)
+ }
+ })
+ };
+
+ if let Some(current_iseq) = current_iseq {
+ let updated_iseq = unsafe { rb_gc_location(current_iseq.into()) }
+ .as_iseq();
+ // SAFETY: the Cell::set is not on the reference given out
+ // by ref_unchecked.
+ unsafe { target.ref_unchecked().as_ref().unwrap().set_iseq(updated_iseq) };
+ }
+ }
+ });
+
+ // Update references to objects in generated code.
+ // Skip for dead blocks since they shouldn't run and
+ // so there is no potential of writing over invalidation jumps
+ if !dead {
+ for offset in block.gc_obj_offsets.iter() {
+ let offset_to_value = offset.as_usize();
+ let value_code_ptr = cb.get_ptr(offset_to_value);
+ let value_ptr: *const u8 = value_code_ptr.raw_ptr(cb);
+ // Creating an unaligned pointer is well defined unlike in C.
+ let value_ptr = value_ptr as *mut VALUE;
+
+ // SAFETY: these point to YJIT's code buffer
+ let object = unsafe { value_ptr.read_unaligned() };
+ let new_addr = unsafe { rb_gc_location(object) };
+
+ // Only write when the VALUE moves, to be copy-on-write friendly.
+ if new_addr != object {
+ // SAFETY: Since we already set code memory writable before the compacting phase,
+ // we can use raw memory accesses directly.
+ unsafe { value_ptr.write_unaligned(new_addr); }
+ }
+ }
+ }
+
+ }
+}
+
+/// Mark all code memory as writable.
+/// This function is useful for garbage collectors that update references in JIT-compiled code in
+/// bulk.
+#[no_mangle]
+pub extern "C" fn rb_yjit_mark_all_writeable() {
+ if CodegenGlobals::has_instance() {
+ CodegenGlobals::get_inline_cb().mark_all_writeable();
+
+ CodegenGlobals::get_outlined_cb()
+ .unwrap()
+ .mark_all_writeable();
+ }
+}
+
+/// Mark all code memory as executable.
+/// This function is useful for garbage collectors that update references in JIT-compiled code in
+/// bulk.
+#[no_mangle]
+pub extern "C" fn rb_yjit_mark_all_executable() {
+ if CodegenGlobals::has_instance() {
+ CodegenGlobals::get_inline_cb().mark_all_executable();
+
+ CodegenGlobals::get_outlined_cb()
+ .unwrap()
+ .mark_all_executable();
+ }
+}
+
+/// Get all blocks for a particular place in an iseq.
+fn get_version_list(blockid: BlockId) -> Option<&'static mut VersionList> {
+ let insn_idx = blockid.idx.as_usize();
+ match get_iseq_payload(blockid.iseq) {
+ Some(payload) if insn_idx < payload.version_map.len() => {
+ Some(payload.version_map.get_mut(insn_idx).unwrap())
+ },
+ _ => None
+ }
+}
+
+/// Get or create all blocks for a particular place in an iseq.
+fn get_or_create_version_list(blockid: BlockId) -> &'static mut VersionList {
+ let payload = get_or_create_iseq_payload(blockid.iseq);
+ let insn_idx = blockid.idx.as_usize();
+
+ // Expand the version map as necessary
+ if insn_idx >= payload.version_map.len() {
+ payload
+ .version_map
+ .resize(insn_idx + 1, VersionList::default());
+ }
+
+ return payload.version_map.get_mut(insn_idx).unwrap();
+}
+
+/// Take all of the blocks for a particular place in an iseq
+pub fn take_version_list(blockid: BlockId) -> VersionList {
+ let insn_idx = blockid.idx.as_usize();
+ match get_iseq_payload(blockid.iseq) {
+ Some(payload) if insn_idx < payload.version_map.len() => {
+ mem::take(&mut payload.version_map[insn_idx])
+ },
+ _ => VersionList::default(),
+ }
+}
+
+/// Count the number of block versions that match a given BlockId and part of a Context
+fn get_num_versions(blockid: BlockId, ctx: &Context) -> usize {
+ let insn_idx = blockid.idx.as_usize();
+ match get_iseq_payload(blockid.iseq) {
+
+ // FIXME: this counting logic is going to be expensive.
+ // We should avoid it if possible
+
+ Some(payload) => {
+ payload
+ .version_map
+ .get(insn_idx)
+ .map(|versions| {
+ versions.iter().filter(|&&version| {
+ let version_ctx = Context::decode(unsafe { version.as_ref() }.ctx);
+ // Inline versions are counted separately towards MAX_INLINE_VERSIONS.
+ version_ctx.inline() == ctx.inline() &&
+ // find_block_versions() finds only blocks with compatible reg_mapping,
+ // so count only versions with compatible reg_mapping.
+ version_ctx.reg_mapping == ctx.reg_mapping
+ }).count()
+ })
+ .unwrap_or(0)
+ }
+ None => 0,
+ }
+}
+
+/// Get or create a list of block versions generated for an iseq
+/// This is used for disassembly (see disasm.rs)
+pub fn get_or_create_iseq_block_list(iseq: IseqPtr) -> Vec<BlockRef> {
+ let payload = get_or_create_iseq_payload(iseq);
+
+ let mut blocks = Vec::<BlockRef>::new();
+
+ // For each instruction index
+ for insn_idx in 0..payload.version_map.len() {
+ let version_list = &payload.version_map[insn_idx];
+
+ // For each version at this instruction index
+ for version in version_list {
+ // Clone the block ref and add it to the list
+ blocks.push(*version);
+ }
+ }
+
+ return blocks;
+}
+
+/// Retrieve a basic block version for an (iseq, idx) tuple
+/// This will return None if no version is found
+fn find_block_version(blockid: BlockId, ctx: &Context) -> Option<BlockRef> {
+ let versions = get_version_list(blockid)?;
+
+ // Best match found
+ let mut best_version: Option<BlockRef> = None;
+ let mut best_diff = usize::MAX;
+
+ // For each version matching the blockid
+ for blockref in versions.iter() {
+ let block = unsafe { blockref.as_ref() };
+ let block_ctx = Context::decode(block.ctx);
+
+ // Note that we always prefer the first matching
+ // version found because of inline-cache chains
+ match ctx.diff(&block_ctx) {
+ TypeDiff::Compatible(diff) if diff < best_diff => {
+ best_version = Some(*blockref);
+ best_diff = diff;
+ }
+ _ => {}
+ }
+ }
+
+ return best_version;
+}
+
+/// Find the closest RegMapping among ones that have already been compiled.
+pub fn find_most_compatible_reg_mapping(blockid: BlockId, ctx: &Context) -> Option<RegMapping> {
+ let versions = get_version_list(blockid)?;
+
+ // Best match found
+ let mut best_mapping: Option<RegMapping> = None;
+ let mut best_diff = usize::MAX;
+
+ // For each version matching the blockid
+ for blockref in versions.iter() {
+ let block = unsafe { blockref.as_ref() };
+ let block_ctx = Context::decode(block.ctx);
+
+ // Discover the best block that is compatible if we load/spill registers
+ match ctx.diff_allowing_reg_mismatch(&block_ctx) {
+ TypeDiff::Compatible(diff) if diff < best_diff => {
+ best_mapping = Some(block_ctx.get_reg_mapping());
+ best_diff = diff;
+ }
+ _ => {}
+ }
+ }
+
+ best_mapping
+}
+
+/// Allow inlining a Block up to MAX_INLINE_VERSIONS times.
+const MAX_INLINE_VERSIONS: usize = 1000;
+
+/// Produce a generic context when the block version limit is hit for a blockid
+pub fn limit_block_versions(blockid: BlockId, ctx: &Context) -> Context {
+ // Guard chains implement limits separately, do nothing
+ if ctx.get_chain_depth() > 0 {
+ return *ctx;
+ }
+
+ let next_versions = get_num_versions(blockid, ctx) + 1;
+ let max_versions = if ctx.inline() {
+ MAX_INLINE_VERSIONS
+ } else {
+ get_option!(max_versions)
+ };
+
+ // If this block version we're about to add will hit the version limit
+ if next_versions >= max_versions {
+ // Produce a generic context that stores no type information,
+ // but still respects the stack_size and sp_offset constraints.
+ // This new context will then match all future requests.
+ let generic_ctx = ctx.get_generic_ctx();
+
+ if cfg!(debug_assertions) {
+ let mut ctx = ctx.clone();
+ if ctx.inline() {
+ // Suppress TypeDiff::Incompatible from ctx.diff(). We return TypeDiff::Incompatible
+ // to keep inlining blocks until we hit the limit, but it's safe to give up inlining.
+ ctx.inline_block = None;
+ assert!(generic_ctx.inline_block == None);
+ }
+
+ assert_ne!(
+ TypeDiff::Incompatible,
+ ctx.diff(&generic_ctx),
+ "should substitute a compatible context",
+ );
+ }
+
+ return generic_ctx;
+ }
+ if ctx.inline() {
+ incr_counter_to!(max_inline_versions, next_versions);
+ }
+
+ return *ctx;
+}
+
+/// Install a block version into its [IseqPayload], letting the GC track its
+/// lifetime, and allowing it to be considered for use for other
+/// blocks we might generate. Uses `cb` for running write barriers.
+///
+/// # Safety
+///
+/// The block must be fully initialized. Its incoming and outgoing edges,
+/// if there are any, must point to initialized blocks, too.
+///
+/// Note that the block might gain edges after this function returns,
+/// as can happen during [gen_block_series]. Initialized here doesn't mean
+/// ready to be consumed or that the machine code tracked by the block is
+/// ready to be run.
+///
+/// Due to this transient state where a block is tracked by the GC by
+/// being inside an [IseqPayload] but not ready to be executed, it's
+/// generally unsound to call any Ruby methods during codegen. That has
+/// the potential to run blocks which are not ready.
+unsafe fn add_block_version(blockref: BlockRef, cb: &CodeBlock) {
+ // SAFETY: caller ensures initialization
+ let block = unsafe { blockref.as_ref() };
+
+ // Function entry blocks must have stack size 0
+ debug_assert!(!(block.iseq_range.start == 0 && Context::decode(block.ctx).stack_size > 0));
+
+ let version_list = get_or_create_version_list(block.get_blockid());
+
+ // If this the first block being compiled with this block id
+ if version_list.len() == 0 {
+ incr_counter!(compiled_blockid_count);
+ }
+
+ version_list.push(blockref);
+ version_list.shrink_to_fit();
+
+ // By writing the new block to the iseq, the iseq now
+ // contains new references to Ruby objects. Run write barriers.
+ let iseq: VALUE = block.iseq.get().into();
+ for dep in block.iter_cme_deps() {
+ obj_written!(iseq, dep.into());
+ }
+
+ // Run write barriers for all objects in generated code.
+ for offset in block.gc_obj_offsets.iter() {
+ let value_address: *const u8 = cb.get_ptr(offset.as_usize()).raw_ptr(cb);
+ // Creating an unaligned pointer is well defined unlike in C.
+ let value_address: *const VALUE = value_address.cast();
+
+ let object = unsafe { value_address.read_unaligned() };
+ obj_written!(iseq, object);
+ }
+
+ incr_counter!(compiled_block_count);
+ if Context::decode(block.ctx).inline() {
+ incr_counter!(inline_block_count);
+ }
+
+ // Mark code pages for code GC
+ let iseq_payload = get_iseq_payload(block.iseq.get()).unwrap();
+ for page in cb.addrs_to_pages(block.start_addr, block.end_addr.get()) {
+ iseq_payload.pages.insert(page);
+ }
+}
+
+/// Remove a block version from the version map of its parent ISEQ
+fn remove_block_version(blockref: &BlockRef) {
+ let block = unsafe { blockref.as_ref() };
+ let version_list = match get_version_list(block.get_blockid()) {
+ Some(version_list) => version_list,
+ None => return,
+ };
+
+ // Retain the versions that are not this one
+ version_list.retain(|other| blockref != other);
+}
+
+impl<'a> JITState<'a> {
+ // Finish compiling and turn a jit state into a block
+ // note that the block is still not in shape.
+ pub fn into_block(self, end_insn_idx: IseqIdx, start_addr: CodePtr, end_addr: CodePtr, gc_obj_offsets: Vec<u32>) -> BlockRef {
+ // Allocate the block and get its pointer
+ let blockref: *mut MaybeUninit<Block> = Box::into_raw(Box::new(MaybeUninit::uninit()));
+
+ incr_counter_by!(num_gc_obj_refs, gc_obj_offsets.len());
+
+ let ctx = Context::encode(&self.get_starting_ctx());
+
+ // Make the new block
+ let block = MaybeUninit::new(Block {
+ start_addr,
+ iseq: Cell::new(self.get_iseq()),
+ iseq_range: self.get_starting_insn_idx()..end_insn_idx,
+ ctx,
+ end_addr: Cell::new(end_addr),
+ incoming: MutableBranchList(Cell::default()),
+ gc_obj_offsets: gc_obj_offsets.into_boxed_slice(),
+ entry_exit: self.get_block_entry_exit(),
+ cme_dependencies: self.method_lookup_assumptions.into_iter().map(Cell::new).collect(),
+ // Pending branches => actual branches
+ outgoing: MutableBranchList(Cell::new(self.pending_outgoing.into_iter().map(|pending_out| {
+ let pending_out = Rc::try_unwrap(pending_out)
+ .unwrap_or_else(|rc| panic!(
+ "PendingBranchRef should be unique when ready to construct a Block. \
+ strong={} weak={}", Rc::strong_count(&rc), Rc::weak_count(&rc)));
+ pending_out.into_branch(NonNull::new(blockref as *mut Block).expect("no null from Box"))
+ }).collect()))
+ });
+ // Initialize it on the heap
+ // SAFETY: allocated with Box above
+ unsafe { ptr::write(blockref, block) };
+
+ // Block is initialized now. Note that MaybeUninit<T> has the same layout as T.
+ let blockref = NonNull::new(blockref as *mut Block).expect("no null from Box");
+
+ // Track all the assumptions the block makes as invariants
+ if self.block_assumes_single_ractor {
+ track_single_ractor_assumption(blockref);
+ }
+ for bop in self.bop_assumptions {
+ track_bop_assumption(blockref, bop);
+ }
+ // SAFETY: just allocated it above
+ for cme in unsafe { blockref.as_ref() }.cme_dependencies.iter() {
+ track_method_lookup_stability_assumption(blockref, cme.get());
+ }
+ if let Some(idlist) = self.stable_constant_names_assumption {
+ track_stable_constant_names_assumption(blockref, idlist);
+ }
+ for klass in self.no_singleton_class_assumptions {
+ track_no_singleton_class_assumption(blockref, klass);
+ }
+ if self.no_ep_escape {
+ track_no_ep_escape_assumption(blockref, self.iseq);
+ }
+
+ blockref
+ }
+}
+
+impl Block {
+ pub fn get_blockid(&self) -> BlockId {
+ BlockId { iseq: self.iseq.get(), idx: self.iseq_range.start }
+ }
+
+ pub fn get_end_idx(&self) -> IseqIdx {
+ self.iseq_range.end
+ }
+
+ pub fn get_ctx_count(&self) -> usize {
+ let mut count = 1; // block.ctx
+ self.outgoing.for_each(|branch| {
+ // SAFETY: &self implies it's initialized
+ count += unsafe { branch.as_ref() }.get_stub_count();
+ });
+ count
+ }
+
+ #[allow(unused)]
+ pub fn get_start_addr(&self) -> CodePtr {
+ self.start_addr
+ }
+
+ #[allow(unused)]
+ pub fn get_end_addr(&self) -> CodePtr {
+ self.end_addr.get()
+ }
+
+ /// Get an immutable iterator over cme dependencies
+ pub fn iter_cme_deps(&self) -> impl Iterator<Item = CmePtr> + '_ {
+ self.cme_dependencies.iter().map(Cell::get)
+ }
+
+ // Push an incoming branch ref and shrink the vector
+ fn push_incoming(&self, branch: BranchRef) {
+ self.incoming.push(branch);
+ }
+
+ // Compute the size of the block code
+ pub fn code_size(&self) -> usize {
+ (self.end_addr.get().as_offset() - self.start_addr.as_offset()).try_into().unwrap()
+ }
+}
+
+impl Context {
+ pub fn get_stack_size(&self) -> u8 {
+ self.stack_size
+ }
+
+ pub fn set_stack_size(&mut self, stack_size: u8) {
+ self.stack_size = stack_size;
+ }
+
+ /// Create a new Context that is compatible with self but doesn't have type information.
+ pub fn get_generic_ctx(&self) -> Context {
+ let mut generic_ctx = Context::default();
+ generic_ctx.stack_size = self.stack_size;
+ generic_ctx.sp_offset = self.sp_offset;
+ generic_ctx.reg_mapping = self.reg_mapping;
+ if self.is_return_landing() {
+ generic_ctx.set_as_return_landing();
+ }
+ if self.is_deferred() {
+ generic_ctx.mark_as_deferred();
+ }
+ generic_ctx
+ }
+
+ /// Create a new Context instance with a given stack_size and sp_offset adjusted
+ /// accordingly. This is useful when you want to virtually rewind a stack_size for
+ /// generating a side exit while considering past sp_offset changes on gen_save_sp.
+ pub fn with_stack_size(&self, stack_size: u8) -> Context {
+ let mut ctx = *self;
+ ctx.sp_offset -= (ctx.get_stack_size() as isize - stack_size as isize) as i8;
+ ctx.stack_size = stack_size;
+ ctx
+ }
+
+ pub fn get_sp_offset(&self) -> i8 {
+ self.sp_offset
+ }
+
+ pub fn set_sp_offset(&mut self, offset: i8) {
+ self.sp_offset = offset;
+ }
+
+ pub fn get_reg_mapping(&self) -> RegMapping {
+ self.reg_mapping
+ }
+
+ pub fn set_reg_mapping(&mut self, reg_mapping: RegMapping) {
+ self.reg_mapping = reg_mapping;
+ }
+
+ pub fn get_chain_depth(&self) -> u8 {
+ self.chain_depth
+ }
+
+ pub fn reset_chain_depth_and_defer(&mut self) {
+ self.chain_depth = 0;
+ self.is_deferred = false;
+ }
+
+ pub fn increment_chain_depth(&mut self) {
+ if self.get_chain_depth() == CHAIN_DEPTH_MAX {
+ panic!("max block version chain depth reached!");
+ }
+ self.chain_depth += 1;
+ }
+
+ pub fn set_as_return_landing(&mut self) {
+ self.is_return_landing = true;
+ }
+
+ pub fn clear_return_landing(&mut self) {
+ self.is_return_landing = false;
+ }
+
+ pub fn is_return_landing(&self) -> bool {
+ self.is_return_landing
+ }
+
+ pub fn mark_as_deferred(&mut self) {
+ self.is_deferred = true;
+ }
+
+ pub fn is_deferred(&self) -> bool {
+ self.is_deferred
+ }
+
+ /// Get an operand for the adjusted stack pointer address
+ pub fn sp_opnd(&self, offset: i32) -> Opnd {
+ let offset = (self.sp_offset as i32 + offset) * SIZEOF_VALUE_I32;
+ return Opnd::mem(64, SP, offset);
+ }
+
+ /// Get an operand for the adjusted environment pointer address using SP register.
+ /// This is valid only when a Binding object hasn't been created for the frame.
+ pub fn ep_opnd(&self, offset: i32) -> Opnd {
+ let ep_offset = self.get_stack_size() as i32 + 1;
+ self.sp_opnd(-ep_offset + offset)
+ }
+
+ /// Start using a register for a given stack temp or a local.
+ pub fn alloc_reg(&mut self, opnd: RegOpnd) {
+ let mut reg_mapping = self.get_reg_mapping();
+ if reg_mapping.alloc_reg(opnd) {
+ self.set_reg_mapping(reg_mapping);
+ }
+ }
+
+ /// Stop using a register for a given stack temp or a local.
+ /// This allows us to reuse the register for a value that we know is dead
+ /// and will no longer be used (e.g. popped stack temp).
+ pub fn dealloc_reg(&mut self, opnd: RegOpnd) {
+ let mut reg_mapping = self.get_reg_mapping();
+ if reg_mapping.dealloc_reg(opnd) {
+ self.set_reg_mapping(reg_mapping);
+ }
+ }
+
+ /// Get the type of an instruction operand
+ pub fn get_opnd_type(&self, opnd: YARVOpnd) -> Type {
+ match opnd {
+ SelfOpnd => self.self_type,
+ StackOpnd(idx) => {
+ assert!(idx < self.stack_size);
+ let stack_idx: usize = (self.stack_size - 1 - idx).into();
+
+ // If outside of tracked range, do nothing
+ if stack_idx >= MAX_CTX_TEMPS {
+ return Type::Unknown;
+ }
+
+ let mapping = self.get_temp_mapping(stack_idx);
+
+ match mapping {
+ MapToSelf => self.self_type,
+ MapToStack(temp_type) => temp_type,
+ MapToLocal(local_idx) => {
+ assert!((local_idx as usize) < MAX_CTX_LOCALS);
+ return self.get_local_type(local_idx.into());
+ }
+ }
+ }
+ }
+ }
+
+ /// Get the currently tracked type for a local variable
+ pub fn get_local_type(&self, local_idx: usize) -> Type {
+ if local_idx >= MAX_CTX_LOCALS {
+ Type::Unknown
+ } else {
+ self.local_types[local_idx]
+ }
+ }
+
+ /// Get the current temp mapping for a given stack slot
+ fn get_temp_mapping(&self, temp_idx: usize) -> TempMapping {
+ assert!(temp_idx < MAX_CTX_TEMPS);
+ self.temp_mapping[temp_idx]
+ }
+
+ /// Set the current temp mapping for a given stack slot
+ fn set_temp_mapping(&mut self, temp_idx: usize, mapping: TempMapping) {
+ assert!(temp_idx < MAX_CTX_TEMPS);
+ self.temp_mapping[temp_idx] = mapping;
+ }
+
+ /// Upgrade (or "learn") the type of an instruction operand
+ /// This value must be compatible and at least as specific as the previously known type.
+ /// If this value originated from self, or an lvar, the learned type will be
+ /// propagated back to its source.
+ pub fn upgrade_opnd_type(&mut self, opnd: YARVOpnd, opnd_type: Type) {
+ // If type propagation is disabled, store no types
+ if get_option!(no_type_prop) {
+ return;
+ }
+
+ match opnd {
+ SelfOpnd => self.self_type.upgrade(opnd_type),
+ StackOpnd(idx) => {
+ assert!(idx < self.stack_size);
+ let stack_idx = (self.stack_size - 1 - idx) as usize;
+
+ // If outside of tracked range, do nothing
+ if stack_idx >= MAX_CTX_TEMPS {
+ return;
+ }
+
+ let mapping = self.get_temp_mapping(stack_idx);
+
+ match mapping {
+ MapToSelf => self.self_type.upgrade(opnd_type),
+ MapToStack(mut temp_type) => {
+ temp_type.upgrade(opnd_type);
+ self.set_temp_mapping(stack_idx, TempMapping::MapToStack(temp_type));
+ }
+ MapToLocal(local_idx) => {
+ let idx = local_idx as usize;
+ assert!(idx < MAX_CTX_LOCALS);
+ let mut new_type = self.get_local_type(idx);
+ new_type.upgrade(opnd_type);
+ self.set_local_type(idx, new_type);
+ // Re-attach MapToLocal for this StackOpnd(idx). set_local_type() detaches
+ // all MapToLocal mappings, including the one we're upgrading here.
+ self.set_opnd_mapping(opnd, mapping);
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ Get both the type and mapping (where the value originates) of an operand.
+ This is can be used with stack_push_mapping or set_opnd_mapping to copy
+ a stack value's type while maintaining the mapping.
+ */
+ pub fn get_opnd_mapping(&self, opnd: YARVOpnd) -> TempMapping {
+ let opnd_type = self.get_opnd_type(opnd);
+
+ match opnd {
+ SelfOpnd => TempMapping::MapToSelf,
+ StackOpnd(idx) => {
+ assert!(idx < self.stack_size);
+ let stack_idx = (self.stack_size - 1 - idx) as usize;
+
+ if stack_idx < MAX_CTX_TEMPS {
+ self.get_temp_mapping(stack_idx)
+ } else {
+ // We can't know the source of this stack operand, so we assume it is
+ // a stack-only temporary. type will be UNKNOWN
+ assert!(opnd_type == Type::Unknown);
+ TempMapping::MapToStack(opnd_type)
+ }
+ }
+ }
+ }
+
+ /// Overwrite both the type and mapping of a stack operand.
+ pub fn set_opnd_mapping(&mut self, opnd: YARVOpnd, mapping: TempMapping) {
+ match opnd {
+ SelfOpnd => unreachable!("self always maps to self"),
+ StackOpnd(idx) => {
+ assert!(idx < self.stack_size);
+ let stack_idx = (self.stack_size - 1 - idx) as usize;
+
+ // If type propagation is disabled, store no types
+ if get_option!(no_type_prop) {
+ return;
+ }
+
+ // If outside of tracked range, do nothing
+ if stack_idx >= MAX_CTX_TEMPS {
+ return;
+ }
+
+ self.set_temp_mapping(stack_idx, mapping);
+ }
+ }
+ }
+
+ /// Set the type of a local variable
+ pub fn set_local_type(&mut self, local_idx: usize, local_type: Type) {
+ // If type propagation is disabled, store no types
+ if get_option!(no_type_prop) {
+ return;
+ }
+
+ if local_idx >= MAX_CTX_LOCALS {
+ return
+ }
+
+ // If any values on the stack map to this local we must detach them
+ for mapping_idx in 0..MAX_CTX_TEMPS {
+ let mapping = self.get_temp_mapping(mapping_idx);
+ let tm = match mapping {
+ MapToStack(_) => mapping,
+ MapToSelf => mapping,
+ MapToLocal(idx) => {
+ if idx as usize == local_idx {
+ let local_type = self.get_local_type(local_idx);
+ TempMapping::MapToStack(local_type)
+ } else {
+ TempMapping::MapToLocal(idx)
+ }
+ }
+ };
+ self.set_temp_mapping(mapping_idx, tm);
+ }
+
+ // Update the type
+ self.local_types[local_idx] = local_type;
+ }
+
+ /// Erase local variable type information
+ /// eg: because of a call we can't track
+ pub fn clear_local_types(&mut self) {
+ // When clearing local types we must detach any stack mappings to those
+ // locals. Even if local values may have changed, stack values will not.
+
+ for mapping_idx in 0..MAX_CTX_TEMPS {
+ let mapping = self.get_temp_mapping(mapping_idx);
+ if let MapToLocal(local_idx) = mapping {
+ let local_idx = local_idx as usize;
+ self.set_temp_mapping(mapping_idx, TempMapping::MapToStack(self.get_local_type(local_idx)));
+ }
+ }
+
+ // Clear the local types
+ self.local_types = [Type::default(); MAX_CTX_LOCALS];
+ }
+
+ /// Return true if the code is inlined by the caller
+ pub fn inline(&self) -> bool {
+ self.inline_block.is_some()
+ }
+
+ /// Set a block ISEQ given to the Block of this Context
+ pub fn set_inline_block(&mut self, iseq: IseqPtr) {
+ self.inline_block = Some(iseq);
+ }
+
+ /// Compute a difference score for two context objects
+ pub fn diff(&self, dst: &Context) -> TypeDiff {
+ // Self is the source context (at the end of the predecessor)
+ let src = self;
+
+ // Can only lookup the first version in the chain
+ if dst.get_chain_depth() != 0 {
+ return TypeDiff::Incompatible;
+ }
+
+ // Blocks with depth > 0 always produce new versions
+ // Sidechains cannot overlap
+ if src.get_chain_depth() != 0 {
+ return TypeDiff::Incompatible;
+ }
+
+ if src.is_return_landing() != dst.is_return_landing() {
+ return TypeDiff::Incompatible;
+ }
+
+ if src.is_deferred() != dst.is_deferred() {
+ return TypeDiff::Incompatible;
+ }
+
+ if dst.stack_size != src.stack_size {
+ return TypeDiff::Incompatible;
+ }
+
+ if dst.sp_offset != src.sp_offset {
+ return TypeDiff::Incompatible;
+ }
+
+ if dst.reg_mapping != src.reg_mapping {
+ return TypeDiff::Incompatible;
+ }
+
+ // Difference sum
+ let mut diff = 0;
+
+ // Check the type of self
+ diff += match src.self_type.diff(dst.self_type) {
+ TypeDiff::Compatible(diff) => diff,
+ TypeDiff::Incompatible => return TypeDiff::Incompatible,
+ };
+
+ // Check the block to inline
+ if src.inline_block != dst.inline_block {
+ // find_block_version should not find existing blocks with different
+ // inline_block so that their yield will not be megamorphic.
+ return TypeDiff::Incompatible;
+ }
+
+ // For each local type we track
+ for i in 0.. MAX_CTX_LOCALS {
+ let t_src = src.get_local_type(i);
+ let t_dst = dst.get_local_type(i);
+ diff += match t_src.diff(t_dst) {
+ TypeDiff::Compatible(diff) => diff,
+ TypeDiff::Incompatible => return TypeDiff::Incompatible,
+ };
+ }
+
+ // For each value on the temp stack
+ for i in 0..src.stack_size {
+ let src_mapping = src.get_opnd_mapping(StackOpnd(i));
+ let dst_mapping = dst.get_opnd_mapping(StackOpnd(i));
+
+ // If the two mappings aren't the same
+ if src_mapping != dst_mapping {
+ if matches!(dst_mapping, MapToStack(_)) {
+ // We can safely drop information about the source of the temp
+ // stack operand.
+ diff += 1;
+ } else {
+ return TypeDiff::Incompatible;
+ }
+ }
+
+ let src_type = src.get_opnd_type(StackOpnd(i));
+ let dst_type = dst.get_opnd_type(StackOpnd(i));
+
+ diff += match src_type.diff(dst_type) {
+ TypeDiff::Compatible(diff) => diff,
+ TypeDiff::Incompatible => return TypeDiff::Incompatible,
+ };
+ }
+
+ return TypeDiff::Compatible(diff);
+ }
+
+ /// Basically diff() but allows RegMapping incompatibility that could be fixed by
+ /// spilling, loading, or shuffling registers.
+ pub fn diff_allowing_reg_mismatch(&self, dst: &Context) -> TypeDiff {
+ // We shuffle only RegOpnd::Local and spill any other RegOpnd::Stack.
+ // If dst has RegOpnd::Stack, we can't reuse the block as a callee.
+ for reg_opnd in dst.get_reg_mapping().get_reg_opnds() {
+ if matches!(reg_opnd, RegOpnd::Stack(_)) {
+ return TypeDiff::Incompatible;
+ }
+ }
+
+ // Prepare a Context with the same registers
+ let mut dst_with_same_regs = dst.clone();
+ dst_with_same_regs.set_reg_mapping(self.get_reg_mapping());
+
+ // Diff registers and other stuff separately, and merge them
+ if let TypeDiff::Compatible(ctx_diff) = self.diff(&dst_with_same_regs) {
+ TypeDiff::Compatible(ctx_diff + self.get_reg_mapping().diff(dst.get_reg_mapping()))
+ } else {
+ TypeDiff::Incompatible
+ }
+ }
+
+ pub fn two_fixnums_on_stack(&self, jit: &mut JITState) -> Option<bool> {
+ if jit.at_compile_target() {
+ let comptime_recv = jit.peek_at_stack(self, 1);
+ let comptime_arg = jit.peek_at_stack(self, 0);
+ return Some(comptime_recv.fixnum_p() && comptime_arg.fixnum_p());
+ }
+
+ let recv_type = self.get_opnd_type(StackOpnd(1));
+ let arg_type = self.get_opnd_type(StackOpnd(0));
+ match (recv_type, arg_type) {
+ (Type::Fixnum, Type::Fixnum) => Some(true),
+ (Type::Unknown | Type::UnknownImm, Type::Unknown | Type::UnknownImm) => None,
+ _ => Some(false),
+ }
+ }
+}
+
+impl Assembler {
+ /// Push one new value on the temp stack with an explicit mapping
+ /// Return a pointer to the new stack top
+ pub fn stack_push_mapping(&mut self, mapping: TempMapping) -> Opnd {
+ // If type propagation is disabled, store no types
+ if get_option!(no_type_prop) {
+ return self.stack_push_mapping(mapping.without_type());
+ }
+
+ let stack_size: usize = self.ctx.stack_size.into();
+
+ // Keep track of the type and mapping of the value
+ if stack_size < MAX_CTX_TEMPS {
+ self.ctx.set_temp_mapping(stack_size, mapping);
+
+ if let MapToLocal(local_idx) = mapping {
+ assert!((local_idx as usize) < MAX_CTX_LOCALS);
+ }
+ }
+
+ self.ctx.stack_size += 1;
+ self.ctx.sp_offset += 1;
+
+ // Allocate a register to the new stack operand
+ let stack_opnd = self.stack_opnd(0);
+ self.alloc_reg(stack_opnd.reg_opnd());
+
+ stack_opnd
+ }
+
+ /// Push one new value on the temp stack
+ /// Return a pointer to the new stack top
+ pub fn stack_push(&mut self, val_type: Type) -> Opnd {
+ return self.stack_push_mapping(TempMapping::MapToStack(val_type));
+ }
+
+ /// Push the self value on the stack
+ pub fn stack_push_self(&mut self) -> Opnd {
+ return self.stack_push_mapping(TempMapping::MapToSelf);
+ }
+
+ /// Push a local variable on the stack
+ pub fn stack_push_local(&mut self, local_idx: usize) -> Opnd {
+ if local_idx >= MAX_CTX_LOCALS {
+ return self.stack_push(Type::Unknown);
+ }
+
+ return self.stack_push_mapping(TempMapping::MapToLocal(local_idx as u8));
+ }
+
+ // Pop N values off the stack
+ // Return a pointer to the stack top before the pop operation
+ pub fn stack_pop(&mut self, n: usize) -> Opnd {
+ assert!(n <= self.ctx.stack_size.into());
+
+ let top = self.stack_opnd(0);
+
+ // Clear the types of the popped values
+ for i in 0..n {
+ let idx: usize = (self.ctx.stack_size as usize) - i - 1;
+
+ if idx < MAX_CTX_TEMPS {
+ self.ctx.set_temp_mapping(idx, TempMapping::MapToStack(Type::Unknown));
+ }
+ }
+
+ self.ctx.stack_size -= n as u8;
+ self.ctx.sp_offset -= n as i8;
+
+ return top;
+ }
+
+ /// Shift stack temps to remove a Symbol for #send.
+ pub fn shift_stack(&mut self, argc: usize) {
+ assert!(argc < self.ctx.stack_size.into());
+
+ let method_name_index = (self.ctx.stack_size as usize) - argc - 1;
+
+ for i in method_name_index..(self.ctx.stack_size - 1) as usize {
+ if i < MAX_CTX_TEMPS {
+ let next_arg_mapping = if i + 1 < MAX_CTX_TEMPS {
+ self.ctx.get_temp_mapping(i + 1)
+ } else {
+ TempMapping::MapToStack(Type::Unknown)
+ };
+ self.ctx.set_temp_mapping(i, next_arg_mapping);
+ }
+ }
+ self.stack_pop(1);
+ }
+
+ /// Get an operand pointing to a slot on the temp stack
+ pub fn stack_opnd(&self, idx: i32) -> Opnd {
+ Opnd::Stack {
+ idx,
+ num_bits: 64,
+ stack_size: self.ctx.stack_size,
+ num_locals: None, // not needed for stack temps
+ sp_offset: self.ctx.sp_offset,
+ reg_mapping: None, // push_insn will set this
+ }
+ }
+
+ /// Get an operand pointing to a local variable
+ pub fn local_opnd(&self, ep_offset: u32) -> Opnd {
+ let idx = self.ctx.stack_size as i32 + ep_offset as i32;
+ Opnd::Stack {
+ idx,
+ num_bits: 64,
+ stack_size: self.ctx.stack_size,
+ num_locals: Some(self.get_num_locals().unwrap()), // this must exist for locals
+ sp_offset: self.ctx.sp_offset,
+ reg_mapping: None, // push_insn will set this
+ }
+ }
+}
+
+impl BlockId {
+ /// Print Ruby source location for debugging
+ #[cfg(debug_assertions)]
+ #[allow(dead_code)]
+ pub fn dump_src_loc(&self) {
+ unsafe { rb_yjit_dump_iseq_loc(self.iseq, self.idx as u32) }
+ }
+}
+
+/// See [gen_block_series_body]. This simply counts compilation failures.
+fn gen_block_series(
+ blockid: BlockId,
+ start_ctx: &Context,
+ ec: EcPtr,
+ cb: &mut CodeBlock,
+ ocb: &mut OutlinedCb,
+) -> Option<BlockRef> {
+ let result = gen_block_series_body(blockid, start_ctx, ec, cb, ocb);
+ if result.is_none() {
+ incr_counter!(compilation_failure);
+ }
+
+ result
+}
+
+/// Immediately compile a series of block versions at a starting point and
+/// return the starting block.
+fn gen_block_series_body(
+ blockid: BlockId,
+ start_ctx: &Context,
+ ec: EcPtr,
+ cb: &mut CodeBlock,
+ ocb: &mut OutlinedCb,
+) -> Option<BlockRef> {
+ // Keep track of all blocks compiled in this batch
+ const EXPECTED_BATCH_SIZE: usize = 4;
+ let mut batch = Vec::with_capacity(EXPECTED_BATCH_SIZE);
+
+ // Generate code for the first block
+ let first_block = gen_single_block(blockid, start_ctx, ec, cb, ocb, true).ok()?;
+ batch.push(first_block); // Keep track of this block version
+
+ // Add the block version to the VersionMap for this ISEQ
+ unsafe { add_block_version(first_block, cb) };
+
+ // Loop variable
+ let mut last_blockref = first_block;
+ loop {
+ // Get the last outgoing branch from the previous block.
+ // SAFETY: No cell mutation inside unsafe. Copying out a BranchRef.
+ let last_branchref: BranchRef = unsafe {
+ let last_block = last_blockref.as_ref();
+ match last_block.outgoing.0.ref_unchecked().last() {
+ Some(branch) => *branch,
+ None => {
+ break;
+ } // If last block has no branches, stop.
+ }
+ };
+ let last_branch = unsafe { last_branchref.as_ref() };
+
+ incr_counter!(block_next_count);
+
+ // gen_direct_jump() can request a block to be placed immediately after by
+ // leaving a single target that has a `None` address.
+ // SAFETY: no mutation inside the unsafe block
+ let (requested_blockid, requested_ctx) = unsafe {
+ match (last_branch.targets[0].ref_unchecked(), last_branch.targets[1].ref_unchecked()) {
+ (Some(last_target), None) if last_target.get_address().is_none() => {
+ (last_target.get_blockid(), last_target.get_ctx())
+ }
+ _ => {
+ // We're done when no fallthrough block is requested
+ break;
+ }
+ }
+ };
+
+ // Generate new block using context from the last branch.
+ let requested_ctx = Context::decode(requested_ctx);
+ let result = gen_single_block(requested_blockid, &requested_ctx, ec, cb, ocb, false);
+
+ // If the block failed to compile
+ if result.is_err() {
+ // Remove previously compiled block
+ // versions from the version map
+ for blockref in batch {
+ remove_block_version(&blockref);
+ // SAFETY: block was well connected because it was in a version_map
+ unsafe { free_block(blockref, false) };
+ }
+
+ // Stop compiling
+ return None;
+ }
+
+ let new_blockref = result.unwrap();
+
+ // Add the block version to the VersionMap for this ISEQ
+ unsafe { add_block_version(new_blockref, cb) };
+
+ // Connect the last branch and the new block
+ last_branch.targets[0].set(Some(Box::new(BranchTarget::Block(new_blockref))));
+ unsafe { new_blockref.as_ref().incoming.push(last_branchref) };
+
+ // Track the block
+ batch.push(new_blockref);
+
+ // Repeat with newest block
+ last_blockref = new_blockref;
+ }
+
+ #[cfg(feature = "disasm")]
+ {
+ // If dump_iseq_disasm is active, see if this iseq's location matches the given substring.
+ // If so, we print the new blocks to the console.
+ if let Some(substr) = get_option_ref!(dump_iseq_disasm).as_ref() {
+ let iseq_location = iseq_get_location(blockid.iseq, blockid.idx);
+ if iseq_location.contains(substr) {
+ let last_block = unsafe { last_blockref.as_ref() };
+ let iseq_range = &last_block.iseq_range;
+ println!("Compiling {} block(s) for {}, ISEQ offsets [{}, {})", batch.len(), iseq_location, iseq_range.start, iseq_range.end);
+ print!("{}", disasm_iseq_insn_range(blockid.iseq, iseq_range.start, iseq_range.end));
+ }
+ }
+ }
+
+ Some(first_block)
+}
+
+/// Generate a block version that is an entry point inserted into an iseq
+/// NOTE: this function assumes that the VM lock has been taken
+/// If jit_exception is true, compile JIT code for handling exceptions.
+/// See jit_compile_exception() for details.
+pub fn gen_entry_point(iseq: IseqPtr, ec: EcPtr, jit_exception: bool) -> Option<*const u8> {
+ // Compute the current instruction index based on the current PC
+ let cfp = unsafe { get_ec_cfp(ec) };
+ let insn_idx: u16 = unsafe {
+ let ec_pc = get_cfp_pc(cfp);
+ iseq_pc_to_insn_idx(iseq, ec_pc)?
+ };
+ let stack_size: u8 = unsafe {
+ u8::try_from(get_cfp_sp(cfp).offset_from(get_cfp_bp(cfp))).ok()?
+ };
+
+ // The entry context makes no assumptions about types
+ let blockid = BlockId {
+ iseq,
+ idx: insn_idx,
+ };
+
+ // Get the inline and outlined code blocks
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+
+ let code_ptr = gen_entry_point_body(blockid, stack_size, ec, jit_exception, cb, ocb);
+
+ cb.mark_all_executable();
+ ocb.unwrap().mark_all_executable();
+
+ code_ptr
+}
+
+fn gen_entry_point_body(blockid: BlockId, stack_size: u8, ec: EcPtr, jit_exception: bool, cb: &mut CodeBlock, ocb: &mut OutlinedCb) -> Option<*const u8> {
+ // Write the interpreter entry prologue. Might be NULL when out of memory.
+ let (code_ptr, reg_mapping) = gen_entry_prologue(cb, ocb, blockid, stack_size, jit_exception)?;
+
+ // Find or compile a block version
+ let mut ctx = Context::default();
+ ctx.stack_size = stack_size;
+ ctx.reg_mapping = reg_mapping;
+ let block = match find_block_version(blockid, &ctx) {
+ // If an existing block is found, generate a jump to the block.
+ Some(blockref) => {
+ let mut asm = Assembler::new_without_iseq();
+ asm.jmp(unsafe { blockref.as_ref() }.start_addr.into());
+ asm.compile(cb, Some(ocb))?;
+ Some(blockref)
+ }
+ // If this block hasn't yet been compiled, generate blocks after the entry guard.
+ None => gen_block_series(blockid, &ctx, ec, cb, ocb),
+ };
+
+ match block {
+ // Compilation failed
+ None => {
+ // Trigger code GC. This entry point will be recompiled later.
+ if get_option!(code_gc) {
+ cb.code_gc(ocb);
+ }
+ return None;
+ }
+
+ // If the block contains no Ruby instructions
+ Some(block) => {
+ let block = unsafe { block.as_ref() };
+ if block.iseq_range.is_empty() {
+ return None;
+ }
+ }
+ }
+
+ // Count the number of entry points we compile
+ incr_counter!(compiled_iseq_entry);
+
+ // Compilation successful and block not empty
+ Some(code_ptr.raw_ptr(cb))
+}
+
+// Change the entry's jump target from an entry stub to a next entry
+pub fn regenerate_entry(cb: &mut CodeBlock, entryref: &EntryRef, next_entry: CodePtr) {
+ let mut asm = Assembler::new_without_iseq();
+ asm_comment!(asm, "regenerate_entry");
+
+ // gen_entry_guard generates cmp + jne. We're rewriting only jne.
+ asm.jne(next_entry.into());
+
+ // Move write_pos to rewrite the entry
+ let old_write_pos = cb.get_write_pos();
+ let old_dropped_bytes = cb.has_dropped_bytes();
+ cb.set_write_ptr(unsafe { entryref.as_ref() }.start_addr);
+ cb.set_dropped_bytes(false);
+ asm.compile(cb, None).expect("can rewrite existing code");
+
+ // Rewind write_pos to the original one
+ assert_eq!(cb.get_write_ptr(), unsafe { entryref.as_ref() }.end_addr);
+ cb.set_pos(old_write_pos);
+ cb.set_dropped_bytes(old_dropped_bytes);
+}
+
+pub type PendingEntryRef = Rc<PendingEntry>;
+
+/// Create a new entry reference for an ISEQ
+pub fn new_pending_entry() -> PendingEntryRef {
+ let entry = PendingEntry {
+ uninit_entry: Box::new(MaybeUninit::uninit()),
+ start_addr: Cell::new(None),
+ end_addr: Cell::new(None),
+ };
+ return Rc::new(entry);
+}
+
+c_callable! {
+ /// Generated code calls this function with the SysV calling convention.
+ /// See [gen_entry_stub].
+ fn entry_stub_hit(entry_ptr: *const c_void, ec: EcPtr) -> *const u8 {
+ with_compile_time(|| {
+ with_vm_lock(src_loc!(), || {
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+
+ let addr = entry_stub_hit_body(entry_ptr, ec, cb, ocb)
+ .unwrap_or_else(|| {
+ // Trigger code GC (e.g. no space).
+ // This entry point will be recompiled later.
+ if get_option!(code_gc) {
+ cb.code_gc(ocb);
+ }
+ CodegenGlobals::get_stub_exit_code().raw_ptr(cb)
+ });
+
+ cb.mark_all_executable();
+ ocb.unwrap().mark_all_executable();
+
+ addr
+ })
+ })
+ }
+}
+
+/// Called by the generated code when an entry stub is executed
+fn entry_stub_hit_body(
+ entry_ptr: *const c_void,
+ ec: EcPtr,
+ cb: &mut CodeBlock,
+ ocb: &mut OutlinedCb
+) -> Option<*const u8> {
+ // Get ISEQ and insn_idx from the current ec->cfp
+ let cfp = unsafe { get_ec_cfp(ec) };
+ let iseq = unsafe { get_cfp_iseq(cfp) };
+ let insn_idx = iseq_pc_to_insn_idx(iseq, unsafe { get_cfp_pc(cfp) })?;
+ let blockid = BlockId { iseq, idx: insn_idx };
+ let stack_size: u8 = unsafe {
+ u8::try_from(get_cfp_sp(cfp).offset_from(get_cfp_bp(cfp))).ok()?
+ };
+
+ // Compile a new entry guard as a next entry
+ let next_entry = cb.get_write_ptr();
+ let mut asm = Assembler::new(unsafe { get_iseq_body_local_table_size(iseq) });
+ let pending_entry = gen_entry_chain_guard(&mut asm, ocb, blockid)?;
+ let reg_mapping = gen_entry_reg_mapping(&mut asm, blockid, stack_size);
+ asm.compile(cb, Some(ocb))?;
+
+ // Find or compile a block version
+ let mut ctx = Context::default();
+ ctx.stack_size = stack_size;
+ ctx.reg_mapping = reg_mapping;
+ let blockref = match find_block_version(blockid, &ctx) {
+ // If an existing block is found, generate a jump to the block.
+ Some(blockref) => {
+ let mut asm = Assembler::new_without_iseq();
+ asm.jmp(unsafe { blockref.as_ref() }.start_addr.into());
+ asm.compile(cb, Some(ocb))?;
+ Some(blockref)
+ }
+ // If this block hasn't yet been compiled, generate blocks after the entry guard.
+ None => gen_block_series(blockid, &ctx, ec, cb, ocb),
+ };
+
+ // Commit or retry the entry
+ if blockref.is_some() {
+ // Regenerate the previous entry
+ let entryref = NonNull::<Entry>::new(entry_ptr as *mut Entry).expect("Entry should not be null");
+ regenerate_entry(cb, &entryref, next_entry);
+
+ // Write an entry to the heap and push it to the ISEQ
+ let pending_entry = Rc::try_unwrap(pending_entry).ok().expect("PendingEntry should be unique");
+ get_or_create_iseq_payload(iseq).entries.push(pending_entry.into_entry());
+ }
+
+ // Return a code pointer if the block is successfully compiled. The entry stub needs
+ // to jump to the entry preceding the block to load the registers in reg_mapping.
+ blockref.map(|_block| next_entry.raw_ptr(cb))
+}
+
+/// Generate a stub that calls entry_stub_hit
+pub fn gen_entry_stub(entry_address: usize, ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+
+ let mut asm = Assembler::new_without_iseq();
+ asm_comment!(asm, "entry stub hit");
+
+ asm.mov(C_ARG_OPNDS[0], entry_address.into());
+
+ // Jump to trampoline to call entry_stub_hit()
+ // Not really a side exit, just don't need a padded jump here.
+ asm.jmp(CodegenGlobals::get_entry_stub_hit_trampoline().as_side_exit());
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// A trampoline used by gen_entry_stub. entry_stub_hit may issue Code GC, so
+/// it's useful for Code GC to call entry_stub_hit from a globally shared code.
+pub fn gen_entry_stub_hit_trampoline(ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+ let mut asm = Assembler::new_without_iseq();
+
+ // See gen_entry_guard for how it's used.
+ asm_comment!(asm, "entry_stub_hit() trampoline");
+ let jump_addr = asm.ccall(entry_stub_hit as *mut u8, vec![C_ARG_OPNDS[0], EC]);
+
+ // Jump to the address returned by the entry_stub_hit() call
+ asm.jmp_opnd(jump_addr);
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// Generate code for a branch, possibly rewriting and changing the size of it
+fn regenerate_branch(cb: &mut CodeBlock, branch: &Branch) {
+ // Remove old comments
+ cb.remove_comments(branch.start_addr, branch.end_addr.get());
+
+ // SAFETY: having a &Branch implies branch.block is initialized.
+ let block = unsafe { branch.block.get().as_ref() };
+
+ let branch_terminates_block = branch.end_addr.get() == block.get_end_addr();
+
+ // Generate the branch
+ let mut asm = Assembler::new_without_iseq();
+ asm_comment!(asm, "regenerate_branch");
+ branch.gen_fn.call(
+ &mut asm,
+ Target::CodePtr(branch.get_target_address(0).unwrap()),
+ branch.get_target_address(1).map(|addr| Target::CodePtr(addr)),
+ );
+
+ // If the entire block is the branch and the block could be invalidated,
+ // we need to pad to ensure there is room for invalidation patching.
+ if branch.start_addr == block.start_addr && branch_terminates_block && block.entry_exit.is_some() {
+ asm.pad_inval_patch();
+ }
+
+ // Rewrite the branch
+ let old_write_pos = cb.get_write_pos();
+ let old_dropped_bytes = cb.has_dropped_bytes();
+ cb.set_write_ptr(branch.start_addr);
+ cb.set_dropped_bytes(false);
+ asm.compile(cb, None).expect("can rewrite existing code");
+ let new_end_addr = cb.get_write_ptr();
+
+ branch.end_addr.set(new_end_addr);
+
+ // The block may have shrunk after the branch is rewritten
+ if branch_terminates_block {
+ // Adjust block size
+ block.end_addr.set(new_end_addr);
+ }
+
+ // cb.write_pos is both a write cursor and a marker for the end of
+ // everything written out so far. Leave cb->write_pos at the end of the
+ // block before returning. This function only ever bump or retain the end
+ // of block marker since that's what the majority of callers want. When the
+ // branch sits at the very end of the codeblock and it shrinks after
+ // regeneration, it's up to the caller to drop bytes off the end to
+ // not leave a gap and implement branch->shape.
+ if old_write_pos > cb.get_write_pos() {
+ // We rewound cb->write_pos to generate the branch, now restore it.
+ cb.set_pos(old_write_pos);
+ cb.set_dropped_bytes(old_dropped_bytes);
+ } else {
+ // The branch sits at the end of cb and consumed some memory.
+ // Keep cb.write_pos.
+ }
+
+ branch.assert_layout();
+}
+
+pub type PendingBranchRef = Rc<PendingBranch>;
+
+/// Create a new outgoing branch entry for a block
+fn new_pending_branch(jit: &mut JITState, gen_fn: BranchGenFn) -> PendingBranchRef {
+ let branch = Rc::new(PendingBranch {
+ uninit_branch: Box::new(MaybeUninit::uninit()),
+ gen_fn,
+ start_addr: Cell::new(None),
+ end_addr: Cell::new(None),
+ targets: [Cell::new(None), Cell::new(None)],
+ });
+
+ // Add to the list of outgoing branches for the block
+ jit.queue_outgoing_branch(branch.clone());
+
+ branch
+}
+
+c_callable! {
+ /// Generated code calls this function with the SysV calling convention.
+ /// See [gen_branch_stub].
+ fn branch_stub_hit(
+ branch_ptr: *const c_void,
+ target_idx: u32,
+ ec: EcPtr,
+ ) -> *const u8 {
+ with_vm_lock(src_loc!(), || {
+ with_compile_time(|| { branch_stub_hit_body(branch_ptr, target_idx, ec) })
+ })
+ }
+}
+
+/// Called by the generated code when a branch stub is executed
+/// Triggers compilation of branches and code patching
+fn branch_stub_hit_body(branch_ptr: *const c_void, target_idx: u32, ec: EcPtr) -> *const u8 {
+ if get_option!(dump_insns) {
+ println!("branch_stub_hit");
+ }
+
+ let branch_ref = NonNull::<Branch>::new(branch_ptr as *mut Branch)
+ .expect("Branches should not be null");
+
+ // SAFETY: We have the VM lock, and the branch is initialized by the time generated
+ // code calls this function.
+ //
+ // Careful, don't make a `&Block` from `branch.block` here because we might
+ // delete it later in delete_empty_defer_block().
+ let branch = unsafe { branch_ref.as_ref() };
+ let branch_size_on_entry = branch.code_size();
+
+ let target_idx: usize = target_idx.as_usize();
+ let target_branch_shape = match target_idx {
+ 0 => BranchShape::Next0,
+ 1 => BranchShape::Next1,
+ _ => unreachable!("target_idx < 2 must always hold"),
+ };
+
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+
+ let (target_blockid, target_ctx): (BlockId, Context) = unsafe {
+ // SAFETY: no mutation of the target's Cell. Just reading out data.
+ let target = branch.targets[target_idx].ref_unchecked().as_ref().unwrap();
+
+ // If this branch has already been patched, return the dst address
+ // Note: recursion can cause the same stub to be hit multiple times
+ if let BranchTarget::Block(_) = target.as_ref() {
+ return target.get_address().unwrap().raw_ptr(cb);
+ }
+
+ let target_ctx = Context::decode(target.get_ctx());
+ (target.get_blockid(), target_ctx)
+ };
+
+ let (cfp, original_interp_sp) = unsafe {
+ let cfp = get_ec_cfp(ec);
+ let original_interp_sp = get_cfp_sp(cfp);
+
+ let running_iseq = get_cfp_iseq(cfp);
+ assert_eq!(running_iseq, target_blockid.iseq as _, "each stub expects a particular iseq");
+
+ let reconned_pc = rb_iseq_pc_at_idx(running_iseq, target_blockid.idx.into());
+ let reconned_sp = original_interp_sp.offset(target_ctx.sp_offset.into());
+ // Unlike in the interpreter, our `leave` doesn't write to the caller's
+ // SP -- we do it in the returned-to code. Account for this difference.
+ let reconned_sp = reconned_sp.add(target_ctx.is_return_landing().into());
+
+ // Update the PC in the current CFP, because it may be out of sync in JITted code
+ rb_set_cfp_pc(cfp, reconned_pc);
+
+ // :stub-sp-flush:
+ // Generated code do stack operations without modifying cfp->sp, while the
+ // cfp->sp tells the GC what values on the stack to root. Generated code
+ // generally takes care of updating cfp->sp when it calls runtime routines that
+ // could trigger GC, but it's inconvenient to do it before calling this function.
+ // So we do it here instead.
+ rb_set_cfp_sp(cfp, reconned_sp);
+
+ // Bail if code GC is disabled and we've already run out of spaces.
+ if !get_option!(code_gc) && (cb.has_dropped_bytes() || ocb.unwrap().has_dropped_bytes()) {
+ return CodegenGlobals::get_stub_exit_code().raw_ptr(cb);
+ }
+
+ // Bail if we're about to run out of native stack space.
+ // We've just reconstructed interpreter state.
+ if rb_ec_stack_check(ec as _) != 0 {
+ return CodegenGlobals::get_stub_exit_code().raw_ptr(cb);
+ }
+
+ // Bail if this branch is housed in an invalidated (dead) block.
+ // This only happens in rare invalidation scenarios and we need
+ // to avoid linking a dead block to a live block with a branch.
+ if branch.block.get().as_ref().iseq.get().is_null() {
+ return CodegenGlobals::get_stub_exit_code().raw_ptr(cb);
+ }
+
+ (cfp, original_interp_sp)
+ };
+
+ // Try to find an existing compiled version of this block
+ let mut block = find_block_version(target_blockid, &target_ctx);
+ let mut branch_modified = false;
+ // If this block hasn't yet been compiled
+ if block.is_none() {
+ let branch_old_shape = branch.gen_fn.get_shape();
+
+ // If the new block can be generated right after the branch (at cb->write_pos)
+ if cb.get_write_ptr() == branch.end_addr.get() {
+ // This branch should be terminating its block
+ assert!(branch.end_addr == unsafe { branch.block.get().as_ref() }.end_addr);
+
+ // Change the branch shape to indicate the target block will be placed next
+ branch.gen_fn.set_shape(target_branch_shape);
+
+ // Rewrite the branch with the new, potentially more compact shape
+ regenerate_branch(cb, branch);
+ branch_modified = true;
+
+ // Ensure that the branch terminates the codeblock just like
+ // before entering this if block. This drops bytes off the end
+ // in case we shrank the branch when regenerating.
+ cb.set_write_ptr(branch.end_addr.get());
+ }
+
+ // Compile the new block version
+ block = gen_block_series(target_blockid, &target_ctx, ec, cb, ocb);
+
+ if block.is_none() && branch_modified {
+ // We couldn't generate a new block for the branch, but we modified the branch.
+ // Restore the branch by regenerating it.
+ branch.gen_fn.set_shape(branch_old_shape);
+ regenerate_branch(cb, branch);
+ }
+ }
+
+ // Finish building the new block
+ let dst_addr = match block {
+ Some(new_block) => {
+ let new_block = unsafe { new_block.as_ref() };
+
+ // Branch shape should reflect layout
+ assert!(!(branch.gen_fn.get_shape() == target_branch_shape && new_block.start_addr != branch.end_addr.get()));
+
+ // When block housing this branch is empty, try to free it
+ delete_empty_defer_block(branch, new_block, target_ctx, target_blockid);
+
+ // Add this branch to the list of incoming branches for the target
+ new_block.push_incoming(branch_ref);
+
+ // Update the branch target address
+ branch.targets[target_idx].set(Some(Box::new(BranchTarget::Block(new_block.into()))));
+
+ // Rewrite the branch with the new jump target address
+ regenerate_branch(cb, branch);
+
+ // Restore interpreter sp, since the code hitting the stub expects the original.
+ unsafe { rb_set_cfp_sp(cfp, original_interp_sp) };
+
+ new_block.start_addr
+ }
+ None => {
+ // Trigger code GC. The whole ISEQ will be recompiled later.
+ // We shouldn't trigger it in the middle of compilation in branch_stub_hit
+ // because incomplete code could be used when cb.dropped_bytes is flipped
+ // by code GC. So this place, after all compilation, is the safest place
+ // to hook code GC on branch_stub_hit.
+ if get_option!(code_gc) {
+ cb.code_gc(ocb);
+ }
+
+ // Failed to service the stub by generating a new block so now we
+ // need to exit to the interpreter at the stubbed location. We are
+ // intentionally *not* restoring original_interp_sp. At the time of
+ // writing, reconstructing interpreter state only involves setting
+ // cfp->sp and cfp->pc. We set both before trying to generate the
+ // block. All there is left to do to exit is to pop the native
+ // frame. We do that in code_for_exit_from_stub.
+ CodegenGlobals::get_stub_exit_code()
+ }
+ };
+
+ ocb.unwrap().mark_all_executable();
+ cb.mark_all_executable();
+
+ let new_branch_size = branch.code_size();
+ assert!(
+ new_branch_size <= branch_size_on_entry,
+ "branch stubs should never enlarge branches (start_addr: {:?}, old_size: {}, new_size: {})",
+ branch.start_addr.raw_ptr(cb), branch_size_on_entry, new_branch_size,
+ );
+
+ // Return a pointer to the compiled block version
+ dst_addr.raw_ptr(cb)
+}
+
+/// Part of branch_stub_hit().
+/// If we've hit a deferred branch, and the housing block consists solely of the branch, rewire
+/// incoming branches to the new block and delete the housing block.
+fn delete_empty_defer_block(branch: &Branch, new_block: &Block, target_ctx: Context, target_blockid: BlockId)
+{
+ // This &Block should be unique, relying on the VM lock
+ let housing_block: &Block = unsafe { branch.block.get().as_ref() };
+ if target_ctx.is_deferred() &&
+ target_blockid == housing_block.get_blockid() &&
+ housing_block.outgoing.len() == 1 &&
+ {
+ // The block is empty when iseq_range is one instruction long.
+ let range = &housing_block.iseq_range;
+ let iseq = housing_block.iseq.get();
+ let start_opcode = iseq_opcode_at_idx(iseq, range.start.into()) as usize;
+ let empty_end = range.start + insn_len(start_opcode) as IseqIdx;
+ range.end == empty_end
+ }
+ {
+ // Divert incoming branches of housing_block to the new block
+ housing_block.incoming.for_each(|incoming| {
+ let incoming = unsafe { incoming.as_ref() };
+ for target in 0..incoming.targets.len() {
+ // SAFETY: No cell mutation; copying out a BlockRef.
+ if Some(BlockRef::from(housing_block)) == unsafe {
+ incoming.targets[target]
+ .ref_unchecked()
+ .as_ref()
+ .and_then(|target| target.get_block())
+ } {
+ incoming.targets[target].set(Some(Box::new(BranchTarget::Block(new_block.into()))));
+ }
+ }
+ new_block.push_incoming(incoming.into());
+ });
+
+ // Transplant the branch we've just hit to the new block
+ mem::drop(housing_block.outgoing.0.take());
+ new_block.outgoing.push(branch.into());
+ let housing_block: BlockRef = branch.block.replace(new_block.into());
+ // Free the old housing block; there should now be no live &Block.
+ remove_block_version(&housing_block);
+ unsafe { free_block(housing_block, false) };
+
+ incr_counter!(deleted_defer_block_count);
+ }
+}
+
+/// Generate a "stub", a piece of code that calls the compiler back when run.
+/// A piece of code that redeems for more code; a thunk for code.
+fn gen_branch_stub(
+ ctx: u32,
+ iseq: IseqPtr,
+ ocb: &mut OutlinedCb,
+ branch_struct_address: usize,
+ target_idx: u32,
+) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+
+ let mut asm = Assembler::new(unsafe { get_iseq_body_local_table_size(iseq) });
+ asm.ctx = Context::decode(ctx);
+ asm.set_reg_mapping(asm.ctx.reg_mapping);
+ asm_comment!(asm, "branch stub hit");
+
+ if asm.ctx.is_return_landing() {
+ asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
+ let top = asm.stack_push(Type::Unknown);
+ asm.mov(top, C_RET_OPND);
+ }
+
+ // Save caller-saved registers before C_ARG_OPNDS get clobbered.
+ // Spill all registers for consistency with the trampoline.
+ for &reg in caller_saved_temp_regs() {
+ asm.cpush(Opnd::Reg(reg));
+ }
+
+ // Spill temps to the VM stack as well for jit.peek_at_stack()
+ asm.spill_regs();
+
+ // Set up the arguments unique to this stub for:
+ //
+ // branch_stub_hit(branch_ptr, target_idx, ec)
+ //
+ // Bake pointer to Branch into output code.
+ // We make sure the block housing the branch is still alive when branch_stub_hit() is running.
+ asm.mov(C_ARG_OPNDS[0], branch_struct_address.into());
+ asm.mov(C_ARG_OPNDS[1], target_idx.into());
+
+ // Jump to trampoline to call branch_stub_hit()
+ // Not really a side exit, just don't need a padded jump here.
+ asm.jmp(CodegenGlobals::get_branch_stub_hit_trampoline().as_side_exit());
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> Option<CodePtr> {
+ let ocb = ocb.unwrap();
+ let mut asm = Assembler::new_without_iseq();
+
+ // For `branch_stub_hit(branch_ptr, target_idx, ec)`,
+ // `branch_ptr` and `target_idx` are different for each stub,
+ // but the call and what's after is the same. This trampoline
+ // is the unchanging part.
+ // Since this trampoline is static, it allows code GC inside
+ // branch_stub_hit() to free stubs without problems.
+ asm_comment!(asm, "branch_stub_hit() trampoline");
+ let stub_hit_ret = asm.ccall(
+ branch_stub_hit as *mut u8,
+ vec![
+ C_ARG_OPNDS[0],
+ C_ARG_OPNDS[1],
+ EC,
+ ]
+ );
+ let jump_addr = asm.load(stub_hit_ret);
+
+ // Restore caller-saved registers for stack temps
+ for &reg in caller_saved_temp_regs().rev() {
+ asm.cpop_into(Opnd::Reg(reg));
+ }
+
+ // Jump to the address returned by the branch_stub_hit() call
+ asm.jmp_opnd(jump_addr);
+
+ // HACK: popping into C_RET_REG clobbers the return value of branch_stub_hit() we need to jump
+ // to, so we need a scratch register to preserve it. This extends the live range of the C
+ // return register so we get something else for the return value.
+ let _ = asm.live_reg_opnd(stub_hit_ret);
+
+ asm.compile(ocb, None).map(|(code_ptr, _)| code_ptr)
+}
+
+/// Return registers to be pushed and popped on branch_stub_hit.
+pub fn caller_saved_temp_regs() -> impl Iterator<Item = &'static Reg> + DoubleEndedIterator {
+ let temp_regs = Assembler::get_temp_regs().iter();
+ let len = temp_regs.len();
+ // The return value gen_leave() leaves in C_RET_REG
+ // needs to survive the branch_stub_hit() call.
+ let regs = temp_regs.chain(std::iter::once(&C_RET_REG));
+
+ // On x86_64, maintain 16-byte stack alignment
+ if cfg!(target_arch = "x86_64") && len % 2 == 0 {
+ static ONE_MORE: [Reg; 1] = [C_RET_REG];
+ regs.chain(ONE_MORE.iter())
+ } else {
+ regs.chain(&[])
+ }
+}
+
+impl Assembler
+{
+ /// Mark the start position of a patchable entry point in the machine code
+ pub fn mark_entry_start(&mut self, entryref: &PendingEntryRef) {
+ // We need to create our own entry rc object
+ // so that we can move the closure below
+ let entryref = entryref.clone();
+
+ self.pos_marker(move |code_ptr, _| {
+ entryref.start_addr.set(Some(code_ptr));
+ });
+ }
+
+ /// Mark the end position of a patchable entry point in the machine code
+ pub fn mark_entry_end(&mut self, entryref: &PendingEntryRef) {
+ // We need to create our own entry rc object
+ // so that we can move the closure below
+ let entryref = entryref.clone();
+
+ self.pos_marker(move |code_ptr, _| {
+ entryref.end_addr.set(Some(code_ptr));
+ });
+ }
+
+ // Mark the start position of a patchable branch in the machine code
+ fn mark_branch_start(&mut self, branchref: &PendingBranchRef)
+ {
+ // We need to create our own branch rc object
+ // so that we can move the closure below
+ let branchref = branchref.clone();
+
+ self.pos_marker(move |code_ptr, _| {
+ branchref.start_addr.set(Some(code_ptr));
+ });
+ }
+
+ // Mark the end position of a patchable branch in the machine code
+ fn mark_branch_end(&mut self, branchref: &PendingBranchRef)
+ {
+ // We need to create our own branch rc object
+ // so that we can move the closure below
+ let branchref = branchref.clone();
+
+ self.pos_marker(move |code_ptr, _| {
+ branchref.end_addr.set(Some(code_ptr));
+ });
+ }
+}
+
+#[must_use]
+pub fn gen_branch(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ target0: BlockId,
+ ctx0: &Context,
+ target1: Option<BlockId>,
+ ctx1: Option<&Context>,
+ gen_fn: BranchGenFn,
+) -> Option<()> {
+ let branch = new_pending_branch(jit, gen_fn);
+
+ // Get the branch targets or stubs
+ let target0_addr = branch.set_target(0, target0, ctx0, jit)?;
+ let target1_addr = if let Some(ctx) = ctx1 {
+ let addr = branch.set_target(1, target1.unwrap(), ctx, jit);
+ if addr.is_none() {
+ // target1 requested but we're out of memory.
+ // Avoid unwrap() in gen_fn()
+ return None;
+ }
+
+ addr
+ } else { None };
+
+ // Call the branch generation function
+ asm.mark_branch_start(&branch);
+ branch.gen_fn.call(asm, Target::CodePtr(target0_addr), target1_addr.map(|addr| Target::CodePtr(addr)));
+ asm.mark_branch_end(&branch);
+
+ Some(())
+}
+
+pub fn gen_direct_jump(jit: &mut JITState, ctx: &Context, target0: BlockId, asm: &mut Assembler) {
+ let branch = new_pending_branch(jit, BranchGenFn::JumpToTarget0(Cell::new(BranchShape::Default)));
+ let maybe_block = find_block_version(target0, ctx);
+
+ // If the block already exists
+ let new_target = if let Some(blockref) = maybe_block {
+ let block = unsafe { blockref.as_ref() };
+ let block_addr = block.start_addr;
+
+ // Call the branch generation function
+ asm_comment!(asm, "gen_direct_jmp: existing block");
+ asm.mark_branch_start(&branch);
+ branch.gen_fn.call(asm, Target::CodePtr(block_addr), None);
+ asm.mark_branch_end(&branch);
+
+ BranchTarget::Block(blockref)
+ } else {
+ // The branch is effectively empty (a noop)
+ asm_comment!(asm, "gen_direct_jmp: fallthrough");
+ asm.mark_branch_start(&branch);
+ asm.mark_branch_end(&branch);
+ branch.gen_fn.set_shape(BranchShape::Next0);
+
+ // `None` in new_target.address signals gen_block_series() to
+ // compile the target block right after this one (fallthrough).
+ BranchTarget::Stub(Box::new(BranchStub {
+ address: None,
+ ctx: Context::encode(ctx),
+ iseq: Cell::new(target0.iseq),
+ iseq_idx: target0.idx,
+ }))
+ };
+
+ branch.targets[0].set(Some(Box::new(new_target)));
+}
+
+/// Create a stub to force the code up to this point to be executed
+pub fn defer_compilation(jit: &mut JITState, asm: &mut Assembler) -> Result<(), ()> {
+ if asm.ctx.is_deferred() {
+ panic!("Double defer!");
+ }
+
+ let mut next_ctx = asm.ctx;
+
+ next_ctx.mark_as_deferred();
+
+ let branch = new_pending_branch(jit, BranchGenFn::JumpToTarget0(Cell::new(BranchShape::Default)));
+
+ let blockid = BlockId {
+ iseq: jit.get_iseq(),
+ idx: jit.get_insn_idx(),
+ };
+
+ // Likely a stub since the context is marked as deferred().
+ let dst_addr = branch.set_target(0, blockid, &next_ctx, jit).ok_or(())?;
+
+ // Pad the block if it has the potential to be invalidated. This must be
+ // done before gen_fn() in case the jump is overwritten by a fallthrough.
+ if jit.block_entry_exit.is_some() {
+ asm.pad_inval_patch();
+ }
+
+ // Call the branch generation function
+ asm_comment!(asm, "defer_compilation");
+ asm.mark_branch_start(&branch);
+ branch.gen_fn.call(asm, Target::CodePtr(dst_addr), None);
+ asm.mark_branch_end(&branch);
+
+ // If the block we're deferring from is empty
+ if jit.get_starting_insn_idx() == jit.get_insn_idx() {
+ incr_counter!(defer_empty_count);
+ }
+
+ incr_counter!(defer_count);
+
+ Ok(())
+}
+
+/// Remove a block from the live control flow graph.
+/// Block must be initialized and incoming/outgoing edges
+/// must also point to initialized blocks.
+unsafe fn remove_from_graph(blockref: BlockRef) {
+ let block = unsafe { blockref.as_ref() };
+
+ // Remove this block from the predecessor's targets
+ for pred_branchref in block.incoming.0.take().iter() {
+ // Branch from the predecessor to us
+ let pred_branch = unsafe { pred_branchref.as_ref() };
+
+ // If this is us, nullify the target block
+ for target_idx in 0..pred_branch.targets.len() {
+ // SAFETY: no mutation inside unsafe
+ let target_is_us = unsafe {
+ pred_branch.targets[target_idx]
+ .ref_unchecked()
+ .as_ref()
+ .and_then(|target| target.get_block())
+ .and_then(|target_block| (target_block == blockref).then(|| ()))
+ .is_some()
+ };
+
+ if target_is_us {
+ pred_branch.targets[target_idx].set(None);
+ }
+ }
+ }
+
+ // For each outgoing branch
+ block.outgoing.for_each(|out_branchref| {
+ let out_branch = unsafe { out_branchref.as_ref() };
+ // For each successor block
+ for out_target in out_branch.targets.iter() {
+ // SAFETY: copying out an Option<BlockRef>. No mutation.
+ let succ_block: Option<BlockRef> = unsafe {
+ out_target.ref_unchecked().as_ref().and_then(|target| target.get_block())
+ };
+
+ if let Some(succ_block) = succ_block {
+ // Remove outgoing branch from the successor's incoming list
+ // SAFETY: caller promises the block has valid outgoing edges.
+ let succ_block = unsafe { succ_block.as_ref() };
+ // Temporarily move out of succ_block.incoming.
+ let succ_incoming = succ_block.incoming.0.take();
+ let mut succ_incoming = succ_incoming.into_vec();
+ succ_incoming.retain(|branch| *branch != out_branchref);
+ succ_block.incoming.0.set(succ_incoming.into_boxed_slice()); // allocs. Rely on oom=abort
+ }
+ }
+ });
+}
+
+/// Tear down a block and deallocate it.
+/// Caller has to ensure that the code tracked by the block is not
+/// running, as running code may hit [branch_stub_hit] who expects
+/// [Branch] to be live.
+///
+/// We currently ensure this through the `jit_cont` system in cont.c
+/// and sometimes through the GC calling [rb_yjit_iseq_free]. The GC
+/// has proven that an ISeq is not running if it calls us to free it.
+///
+/// For delayed deallocation, since dead blocks don't keep
+/// blocks they refer alive, by the time we get here their outgoing
+/// edges may be dangling. Pass `graph_intact=false` such these cases.
+pub unsafe fn free_block(blockref: BlockRef, graph_intact: bool) {
+ // Careful with order here.
+ // First, remove all pointers to the referent block
+ unsafe {
+ block_assumptions_free(blockref);
+
+ if graph_intact {
+ remove_from_graph(blockref);
+ }
+ }
+
+ // SAFETY: we should now have a unique pointer to the block
+ unsafe { dealloc_block(blockref) }
+}
+
+/// Deallocate a block and its outgoing branches. Blocks own their outgoing branches.
+/// Caller must ensure that we have unique ownership for the referent block
+unsafe fn dealloc_block(blockref: BlockRef) {
+ unsafe {
+ for outgoing in blockref.as_ref().outgoing.0.take().iter() {
+ // this Box::from_raw matches the Box::into_raw from PendingBranch::into_branch
+ mem::drop(Box::from_raw(outgoing.as_ptr()));
+ }
+ }
+
+ // Deallocate the referent Block
+ unsafe {
+ // this Box::from_raw matches the Box::into_raw from JITState::into_block
+ mem::drop(Box::from_raw(blockref.as_ptr()));
+ }
+}
+
+// Some runtime checks for integrity of a program location
+pub fn verify_blockid(blockid: BlockId) {
+ unsafe {
+ assert!(rb_IMEMO_TYPE_P(blockid.iseq.into(), imemo_iseq) != 0);
+ assert!(u32::from(blockid.idx) < get_iseq_encoded_size(blockid.iseq));
+ }
+}
+
+// Invalidate one specific block version
+pub fn invalidate_block_version(blockref: &BlockRef) {
+ //ASSERT_vm_locking();
+
+ // TODO: want to assert that all other ractors are stopped here. Can't patch
+ // machine code that some other thread is running.
+
+ let block = unsafe { (*blockref).as_ref() };
+ let id_being_invalidated = block.get_blockid();
+ let mut cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+
+ verify_blockid(id_being_invalidated);
+
+ #[cfg(feature = "disasm")]
+ {
+ // If dump_iseq_disasm is specified, print to console that blocks for matching ISEQ names were invalidated.
+ if let Some(substr) = get_option_ref!(dump_iseq_disasm).as_ref() {
+ let iseq_range = &block.iseq_range;
+ let iseq_location = iseq_get_location(block.iseq.get(), iseq_range.start);
+ if iseq_location.contains(substr) {
+ println!("Invalidating block from {}, ISEQ offsets [{}, {})", iseq_location, iseq_range.start, iseq_range.end);
+ }
+ }
+ }
+
+ // Remove this block from the version array
+ remove_block_version(blockref);
+
+ // Get a pointer to the generated code for this block
+ let block_start = block.start_addr;
+
+ // Make the start of the block do an exit. This handles OOM situations
+ // and some cases where we can't efficiently patch incoming branches.
+ // Do this first, since in case there is a fallthrough branch into this
+ // block, the patching loop below can overwrite the start of the block.
+ // In those situations, there is hopefully no jumps to the start of the block
+ // after patching as the start of the block would be in the middle of something
+ // generated by branch_t::gen_fn.
+ let block_entry_exit = block
+ .entry_exit
+ .expect("invalidation needs the entry_exit field");
+ {
+ let block_end = block.get_end_addr();
+
+ if block_start == block_entry_exit {
+ // Some blocks exit on entry. Patching a jump to the entry at the
+ // entry makes an infinite loop.
+ } else {
+ // Patch in a jump to block.entry_exit.
+
+ let cur_pos = cb.get_write_ptr();
+ let cur_dropped_bytes = cb.has_dropped_bytes();
+ cb.set_write_ptr(block_start);
+
+ let mut asm = Assembler::new_without_iseq();
+ asm.jmp(block_entry_exit.as_side_exit());
+ cb.set_dropped_bytes(false);
+ asm.compile(&mut cb, Some(ocb)).expect("can rewrite existing code");
+
+ assert!(
+ cb.get_write_ptr() <= block_end,
+ "invalidation wrote past end of block (code_size: {:?}, new_size: {}, start_addr: {:?})",
+ block.code_size(),
+ cb.get_write_ptr().as_offset() - block_start.as_offset(),
+ block.start_addr.raw_ptr(cb),
+ );
+ cb.set_write_ptr(cur_pos);
+ cb.set_dropped_bytes(cur_dropped_bytes);
+ }
+ }
+
+ // For each incoming branch
+ let mut incoming_branches = block.incoming.0.take();
+
+ // An adjacent branch will write into the start of the block being invalidated, possibly
+ // overwriting the block's exit. If we run out of memory after doing this, any subsequent
+ // incoming branches we rewrite won't be able use the block's exit as a fallback when they
+ // are unable to generate a stub. To avoid this, if there's an incoming branch that's
+ // adjacent to the invalidated block, make sure we process it last.
+ let adjacent_branch_idx = incoming_branches.iter().position(|branchref| {
+ let branch = unsafe { branchref.as_ref() };
+ let target_next = block.start_addr == branch.end_addr.get();
+ target_next
+ });
+ if let Some(adjacent_branch_idx) = adjacent_branch_idx {
+ incoming_branches.swap(adjacent_branch_idx, incoming_branches.len() - 1)
+ }
+
+ for (i, branchref) in incoming_branches.iter().enumerate() {
+ let branch = unsafe { branchref.as_ref() };
+ let target_idx = if branch.get_target_address(0) == Some(block_start) {
+ 0
+ } else {
+ 1
+ };
+
+ // Assert that the incoming branch indeed points to the block being invalidated
+ // SAFETY: no mutation.
+ unsafe {
+ let incoming_target = branch.targets[target_idx].ref_unchecked().as_ref().unwrap();
+ assert_eq!(Some(block_start), incoming_target.get_address());
+ if let Some(incoming_block) = &incoming_target.get_block() {
+ assert_eq!(blockref, incoming_block);
+ }
+ }
+
+ // Create a stub for this branch target
+ let stub_addr = gen_branch_stub(block.ctx, block.iseq.get(), ocb, branchref.as_ptr() as usize, target_idx as u32);
+
+ // In case we were unable to generate a stub (e.g. OOM). Use the block's
+ // exit instead of a stub for the block. It's important that we
+ // still patch the branch in this situation so stubs are unique
+ // to branches. Think about what could go wrong if we run out of
+ // memory in the middle of this loop.
+ let stub_addr = stub_addr.unwrap_or(block_entry_exit);
+
+ // Fill the branch target with a stub
+ branch.targets[target_idx].set(Some(Box::new(BranchTarget::Stub(Box::new(BranchStub {
+ address: Some(stub_addr),
+ iseq: block.iseq.clone(),
+ iseq_idx: block.iseq_range.start,
+ ctx: block.ctx,
+ })))));
+
+ // Check if the invalidated block immediately follows
+ let target_next = block.start_addr == branch.end_addr.get();
+
+ if target_next {
+ if stub_addr != block.start_addr {
+ // The new block will no longer be adjacent.
+ // Note that we could be enlarging the branch and writing into the
+ // start of the block being invalidated.
+ branch.gen_fn.set_shape(BranchShape::Default);
+ } else {
+ // The branch target is still adjacent, so the branch must remain
+ // a fallthrough so we don't overwrite the target with a jump.
+ //
+ // This can happen if we're unable to generate a stub and the
+ // target block also exits on entry (block_start == block_entry_exit).
+ }
+ }
+
+ // Rewrite the branch with the new jump target address
+ let old_branch_size = branch.code_size();
+ regenerate_branch(cb, branch);
+
+ if target_next && branch.end_addr > block.end_addr {
+ panic!("yjit invalidate rewrote branch past end of invalidated block: {:?} (code_size: {})", branch, block.code_size());
+ }
+ let is_last_incoming_branch = i == incoming_branches.len() - 1;
+ if target_next && branch.end_addr.get() > block_entry_exit && !is_last_incoming_branch {
+ // We might still need to jump to this exit if we run out of memory when rewriting another incoming branch.
+ panic!("yjit invalidate rewrote branch over exit of invalidated block: {:?}", branch);
+ }
+ if !target_next && branch.code_size() > old_branch_size {
+ panic!(
+ "invalidated branch grew in size (start_addr: {:?}, old_size: {}, new_size: {})",
+ branch.start_addr.raw_ptr(cb), old_branch_size, branch.code_size()
+ );
+ }
+ }
+
+ // Clear out the JIT func so that we can recompile later and so the
+ // interpreter will run the iseq.
+ //
+ // Only clear the jit_func when we're invalidating the JIT entry block.
+ // We only support compiling iseqs from index 0 right now. So entry
+ // points will always have an instruction index of 0. We'll need to
+ // change this in the future when we support optional parameters because
+ // they enter the function with a non-zero PC
+ if block.iseq_range.start == 0 {
+ // TODO:
+ // We could reset the exec counter to zero in rb_iseq_reset_jit_func()
+ // so that we eventually compile a new entry point when useful
+ unsafe { rb_iseq_reset_jit_func(block.iseq.get()) };
+ }
+
+ // FIXME:
+ // Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub.
+
+ // SAFETY: This block was in a version_map earlier
+ // in this function before we removed it, so it's well connected.
+ unsafe { remove_from_graph(*blockref) };
+
+ delayed_deallocation(*blockref);
+
+ ocb.unwrap().mark_all_executable();
+ cb.mark_all_executable();
+
+ incr_counter!(invalidation_count);
+}
+
+// We cannot deallocate blocks immediately after invalidation since patching the code for setting
+// up return addresses does not affect outstanding return addresses that are on stack and will use
+// invalidated branch pointers when hit. Example:
+// def foo(n)
+// if n == 2
+// # 1.times.each to create a cfunc frame to preserve the JIT frame
+// # which will return to a stub housed in an invalidated block
+// return 1.times.each { Object.define_method(:foo) {} }
+// end
+//
+// foo(n + 1) # The block for this call houses the return branch stub
+// end
+// p foo(1)
+pub fn delayed_deallocation(blockref: BlockRef) {
+ block_assumptions_free(blockref);
+
+ let block = unsafe { blockref.as_ref() };
+ // Set null ISEQ on the block to signal that it's dead.
+ let iseq = block.iseq.replace(ptr::null());
+ let payload = get_iseq_payload(iseq).unwrap();
+ payload.dead_blocks.push(blockref);
+}
+
+trait RefUnchecked {
+ type Contained;
+ unsafe fn ref_unchecked(&self) -> &Self::Contained;
+}
+
+impl<T> RefUnchecked for Cell<T> {
+ type Contained = T;
+
+ /// Gives a reference to the contents of a [Cell].
+ /// Dangerous; please include a SAFETY note.
+ ///
+ /// An easy way to use this without triggering Undefined Behavior is to
+ /// 1. ensure there is transitively no Cell/UnsafeCell mutation in the `unsafe` block
+ /// 2. ensure the `unsafe` block does not return any references, so our
+ /// analysis is lexically confined. This is trivially true if the block
+ /// returns a `bool`, for example. Aggregates that store references have
+ /// explicit lifetime parameters that look like `<'a>`.
+ ///
+ /// There are other subtler situations that don't follow these rules yet
+ /// are still sound.
+ /// See `test_miri_ref_unchecked()` for examples. You can play with it
+ /// with `cargo +nightly miri test miri`.
+ unsafe fn ref_unchecked(&self) -> &Self::Contained {
+ // SAFETY: pointer is dereferenceable because it's from a &Cell.
+ // It's up to the caller to follow aliasing rules with the output
+ // reference.
+ unsafe { self.as_ptr().as_ref().unwrap() }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::core::*;
+
+ #[test]
+ fn type_size() {
+ // Check that we can store types in 4 bits,
+ // and all local types in 32 bits
+ assert_eq!(mem::size_of::<Type>(), 1);
+ assert!(Type::BlockParamProxy as usize <= 0b1111);
+ assert!(MAX_CTX_LOCALS * 4 <= 32);
+ }
+
+ #[test]
+ fn local_types() {
+ let mut ctx = Context::default();
+
+ for i in 0..MAX_CTX_LOCALS {
+ ctx.set_local_type(i, Type::Fixnum);
+ assert_eq!(ctx.get_local_type(i), Type::Fixnum);
+ ctx.set_local_type(i, Type::BlockParamProxy);
+ assert_eq!(ctx.get_local_type(i), Type::BlockParamProxy);
+ }
+
+ ctx.set_local_type(0, Type::Fixnum);
+ ctx.clear_local_types();
+ assert!(ctx.get_local_type(0) == Type::Unknown);
+
+ // Make sure we don't accidentally set bits incorrectly
+ let mut ctx = Context::default();
+ ctx.set_local_type(0, Type::Fixnum);
+ assert_eq!(ctx.get_local_type(0), Type::Fixnum);
+ ctx.set_local_type(2, Type::Fixnum);
+ ctx.set_local_type(1, Type::BlockParamProxy);
+ assert_eq!(ctx.get_local_type(0), Type::Fixnum);
+ assert_eq!(ctx.get_local_type(2), Type::Fixnum);
+ }
+
+ #[test]
+ fn types() {
+ // Valid src => dst
+ assert_eq!(Type::Unknown.diff(Type::Unknown), TypeDiff::Compatible(0));
+ assert_eq!(Type::UnknownImm.diff(Type::UnknownImm), TypeDiff::Compatible(0));
+ assert_ne!(Type::UnknownImm.diff(Type::Unknown), TypeDiff::Incompatible);
+ assert_ne!(Type::Fixnum.diff(Type::Unknown), TypeDiff::Incompatible);
+ assert_ne!(Type::Fixnum.diff(Type::UnknownImm), TypeDiff::Incompatible);
+
+ // Invalid src => dst
+ assert_eq!(Type::Unknown.diff(Type::UnknownImm), TypeDiff::Incompatible);
+ assert_eq!(Type::Unknown.diff(Type::Fixnum), TypeDiff::Incompatible);
+ assert_eq!(Type::Fixnum.diff(Type::UnknownHeap), TypeDiff::Incompatible);
+ }
+
+ #[test]
+ fn reg_mapping() {
+ let mut reg_mapping = RegMapping([None, None, None, None, None]);
+
+ // 0 means every slot is not spilled
+ for stack_idx in 0..MAX_CTX_TEMPS as u8 {
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(stack_idx)), None);
+ }
+
+ // Set 0, 2, 6 (RegMapping: [Some(0), Some(6), Some(2), None, None])
+ reg_mapping.alloc_reg(RegOpnd::Stack(0));
+ reg_mapping.alloc_reg(RegOpnd::Stack(2));
+ reg_mapping.alloc_reg(RegOpnd::Stack(3));
+ reg_mapping.dealloc_reg(RegOpnd::Stack(3));
+ reg_mapping.alloc_reg(RegOpnd::Stack(6));
+
+ // Get 0..8
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(0)), Some(0));
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(1)), None);
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(2)), Some(2));
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(3)), None);
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(4)), None);
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(5)), None);
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(6)), Some(1));
+ assert_eq!(reg_mapping.get_reg(RegOpnd::Stack(7)), None);
+ }
+
+ #[test]
+ fn context() {
+ // Valid src => dst
+ assert_eq!(Context::default().diff(&Context::default()), TypeDiff::Compatible(0));
+
+ // Try pushing an operand and getting its type
+ let mut asm = Assembler::new(0);
+ asm.stack_push(Type::Fixnum);
+ let top_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ assert!(top_type == Type::Fixnum);
+
+ // TODO: write more tests for Context type diff
+ }
+
+ #[test]
+ fn context_upgrade_local() {
+ let mut asm = Assembler::new(0);
+ asm.stack_push_local(0);
+ asm.ctx.upgrade_opnd_type(StackOpnd(0), Type::Nil);
+ assert_eq!(Type::Nil, asm.ctx.get_opnd_type(StackOpnd(0)));
+ }
+
+ #[test]
+ fn context_chain_depth() {
+ let mut ctx = Context::default();
+ assert_eq!(ctx.get_chain_depth(), 0);
+ assert_eq!(ctx.is_return_landing(), false);
+ assert_eq!(ctx.is_deferred(), false);
+
+ for _ in 0..5 {
+ ctx.increment_chain_depth();
+ }
+ assert_eq!(ctx.get_chain_depth(), 5);
+
+ ctx.set_as_return_landing();
+ assert_eq!(ctx.is_return_landing(), true);
+
+ ctx.clear_return_landing();
+ assert_eq!(ctx.is_return_landing(), false);
+
+ ctx.mark_as_deferred();
+ assert_eq!(ctx.is_deferred(), true);
+
+ ctx.reset_chain_depth_and_defer();
+ assert_eq!(ctx.get_chain_depth(), 0);
+ assert_eq!(ctx.is_deferred(), false);
+ }
+
+ #[test]
+ fn shift_stack_for_send() {
+ let mut asm = Assembler::new(0);
+
+ // Push values to simulate send(:name, arg) with 6 items already on-stack
+ for _ in 0..6 {
+ asm.stack_push(Type::Fixnum);
+ }
+ asm.stack_push(Type::Unknown);
+ asm.stack_push(Type::ImmSymbol);
+ asm.stack_push(Type::Unknown);
+
+ // This method takes argc of the sendee, not argc of send
+ asm.shift_stack(1);
+
+ // The symbol should be gone
+ assert_eq!(Type::Unknown, asm.ctx.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::Unknown, asm.ctx.get_opnd_type(StackOpnd(1)));
+ }
+
+ #[test]
+ fn test_miri_ref_unchecked() {
+ let blockid = BlockId {
+ iseq: ptr::null(),
+ idx: 0,
+ };
+ let cb = CodeBlock::new_dummy(1024);
+ let mut ocb = OutlinedCb::wrap(CodeBlock::new_dummy(1024));
+ let dumm_addr = cb.get_write_ptr();
+ let block = JITState::new(blockid, Context::default(), dumm_addr, ptr::null(), &mut ocb, true)
+ .into_block(0, dumm_addr, dumm_addr, vec![]);
+ let _dropper = BlockDropper(block);
+
+ // Outside of brief moments during construction,
+ // we're always working with &Branch (a shared reference to a Branch).
+ let branch: &Branch = &Branch {
+ gen_fn: BranchGenFn::JZToTarget0,
+ block: Cell::new(block),
+ start_addr: dumm_addr,
+ end_addr: Cell::new(dumm_addr),
+ targets: [Cell::new(None), Cell::new(Some(Box::new(BranchTarget::Stub(Box::new(BranchStub {
+ iseq: Cell::new(ptr::null()),
+ iseq_idx: 0,
+ address: None,
+ ctx: 0,
+ })))))]
+ };
+ // For easier soundness reasoning, make sure the reference returned does not out live the
+ // `unsafe` block! It's tempting to do, but it leads to non-local issues.
+ // Here is an example where it goes wrong:
+ if false {
+ for target in branch.targets.iter().as_ref() {
+ if let Some(btarget) = unsafe { target.ref_unchecked() } {
+ // btarget is derived from the usnafe block!
+ target.set(None); // This drops the contents of the cell...
+ assert!(btarget.get_address().is_none()); // but `btarget` is still live! UB.
+ }
+ }
+ }
+
+ // Do something like this instead. It's not pretty, but it's easier to vet for UB this way.
+ for target in branch.targets.iter().as_ref() {
+ // SAFETY: no mutation within unsafe
+ if unsafe { target.ref_unchecked().is_none() } {
+ continue;
+ }
+ // SAFETY: no mutation within unsafe
+ assert!(unsafe { target.ref_unchecked().as_ref().unwrap().get_address().is_none() });
+ target.set(None);
+ }
+
+ // A more subtle situation where we do Cell/UnsafeCell mutation over the
+ // lifetime of the reference released by ref_unchecked().
+ branch.targets[0].set(Some(Box::new(BranchTarget::Stub(Box::new(BranchStub {
+ iseq: Cell::new(ptr::null()),
+ iseq_idx: 0,
+ address: None,
+ ctx: 0,
+ })))));
+ // Invalid ISeq; we never dereference it.
+ let secret_iseq = NonNull::<rb_iseq_t>::dangling().as_ptr();
+ unsafe {
+ if let Some(branch_target) = branch.targets[0].ref_unchecked().as_ref() {
+ if let BranchTarget::Stub(stub) = branch_target.as_ref() {
+ // SAFETY:
+ // This is a Cell mutation, but it mutates the contents
+ // of a a Cell<IseqPtr>, which is a different type
+ // from the type of Cell found in `Branch::targets`, so
+ // there is no chance of mutating the Cell that we called
+ // ref_unchecked() on above.
+ Cell::set(&stub.iseq, secret_iseq);
+ }
+ }
+ };
+ // Check that we indeed changed the iseq of the stub
+ // Cell::take moves out of the cell.
+ assert_eq!(
+ secret_iseq as usize,
+ branch.targets[0].take().unwrap().get_blockid().iseq as usize
+ );
+
+ struct BlockDropper(BlockRef);
+ impl Drop for BlockDropper {
+ fn drop(&mut self) {
+ // SAFETY: we have ownership because the test doesn't stash
+ // the block away in any global structure.
+ // Note that the test being self-contained is also why we
+ // use dealloc_block() over free_block(), as free_block() touches
+ // the global invariants tables unavailable in tests.
+ unsafe { dealloc_block(self.0) };
+ }
+ }
+ }
+}
diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs
new file mode 100644
index 0000000000..d34b049a45
--- /dev/null
+++ b/yjit/src/cruby.rs
@@ -0,0 +1,831 @@
+//! This module deals with making relevant C functions available to Rust YJIT.
+//! Some C functions we use we maintain, some are public C extension APIs,
+//! some are internal CRuby APIs.
+//!
+//! ## General notes about linking
+//!
+//! The YJIT crate compiles to a native static library, which for our purposes
+//! we can understand as a collection of object files. On ELF platforms at least,
+//! object files can refer to "external symbols" which we could take some
+//! liberty and understand as assembly labels that refer to code defined in other
+//! object files resolved when linking. When we are linking, say to produce miniruby,
+//! the linker resolves and put concrete addresses for each usage of C function in
+//! the Rust static library.
+//!
+//! By declaring external functions and using them, we are asserting the symbols
+//! we use have definition in one of the object files we pass to the linker. Declaring
+//! a function here that has no definition anywhere causes a linking error.
+//!
+//! There are more things going on during linking and this section makes a lot of
+//! simplifications but hopefully this gives a good enough working mental model.
+//!
+//! ## Difference from example in the Rustonomicon
+//!
+//! You might be wondering about why this is different from the [FFI example]
+//! in the Nomicon, an official book about Unsafe Rust.
+//!
+//! There is no `#[link]` attribute because we are not linking against an external
+//! library, but rather implicitly asserting that we'll supply a concrete definition
+//! for all C functions we call, similar to how pure C projects put functions
+//! across different compilation units and link them together.
+//!
+//! TODO(alan): is the model different enough on Windows that this setup is unworkable?
+//! Seems prudent to at least learn more about Windows binary tooling before
+//! committing to a design.
+//!
+//! Alan recommends reading the Nomicon cover to cover as he thinks the book is
+//! not very long in general and especially for something that can save hours of
+//! debugging Undefined Behavior (UB) down the road.
+//!
+//! UBs can cause Safe Rust to crash, at which point it's hard to tell which
+//! usage of `unsafe` in the codebase invokes UB. Providing safe Rust interface
+//! wrapping `unsafe` Rust is a good technique, but requires practice and knowledge
+//! about what's well defined and what's undefined.
+//!
+//! For an extremely advanced example of building safe primitives using Unsafe Rust,
+//! see the [GhostCell] paper. Some parts of the paper assume less background knowledge
+//! than other parts, so there should be learning opportunities in it for all experience
+//! levels.
+//!
+//! ## Binding generation
+//!
+//! For the moment declarations on the Rust side are hand written. The code is boilerplate
+//! and could be generated automatically with a custom tooling that depend on
+//! rust-lang/rust-bindgen. The output Rust code could be checked in to version control
+//! and verified on CI like `make update-deps`.
+//!
+//! Upsides for this design:
+//! - the YJIT static lib that links with miniruby and friends will not need bindgen
+//! as a dependency at all. This is an important property so Ruby end users can
+//! build a YJIT enabled Ruby with no internet connection using a release tarball
+//! - Less hand-typed boilerplate
+//! - Helps reduce risk of C definitions and Rust declaration going out of sync since
+//! CI verifies synchronicity
+//!
+//! Downsides and known unknowns:
+//! - Using rust-bindgen this way seems unusual. We might be depending on parts
+//! that the project is not committed to maintaining
+//! - This setup assumes rust-bindgen gives deterministic output, which can't be taken
+//! for granted
+//! - YJIT contributors will need to install libclang on their system to get rust-bindgen
+//! to work if they want to run the generation tool locally
+//!
+//! The elephant in the room is that we'll still need to use Unsafe Rust to call C functions,
+//! and the binding generation can't magically save us from learning Unsafe Rust.
+//!
+//!
+//! [FFI example]: https://doc.rust-lang.org/nomicon/ffi.html
+//! [GhostCell]: http://plv.mpi-sws.org/rustbelt/ghostcell/
+
+// CRuby types use snake_case. Allow them so we use one name across languages.
+#![allow(non_camel_case_types)]
+// A lot of imported CRuby globals aren't all-caps
+#![allow(non_upper_case_globals)]
+
+use std::convert::From;
+use std::ffi::{CString, CStr};
+use std::fmt::{Debug, Formatter};
+use std::os::raw::{c_char, c_int, c_uint};
+use std::panic::{catch_unwind, UnwindSafe};
+
+// We check that we can do this with the configure script and a couple of
+// static asserts. u64 and not usize to play nice with lowering to x86.
+pub type size_t = u64;
+
+/// A type alias for the redefinition flags coming from CRuby. These are just
+/// shifted 1s but not explicitly an enum.
+pub type RedefinitionFlag = u32;
+
+#[allow(dead_code)]
+#[allow(clippy::all)]
+mod autogened {
+ use super::*;
+ // Textually include output from rust-bindgen as suggested by its user guide.
+ include!("cruby_bindings.inc.rs");
+}
+pub use autogened::*;
+
+// TODO: For #defines that affect memory layout, we need to check for them
+// on build and fail if they're wrong. e.g. USE_FLONUM *must* be true.
+
+// These are functions we expose from C files, not in any header.
+// Parsing it would result in a lot of duplicate definitions.
+// Use bindgen for functions that are defined in headers or in yjit.c.
+#[cfg_attr(test, allow(unused))] // We don't link against C code when testing
+extern "C" {
+ pub fn rb_check_overloaded_cme(
+ me: *const rb_callable_method_entry_t,
+ ci: *const rb_callinfo,
+ ) -> *const rb_callable_method_entry_t;
+
+ // Floats within range will be encoded without creating objects in the heap.
+ // (Range is 0x3000000000000001 to 0x4fffffffffffffff (1.7272337110188893E-77 to 2.3158417847463237E+77).
+ pub fn rb_float_new(d: f64) -> VALUE;
+
+ pub fn rb_hash_empty_p(hash: VALUE) -> VALUE;
+ pub fn rb_str_setbyte(str: VALUE, index: VALUE, value: VALUE) -> VALUE;
+ pub fn rb_vm_splat_array(flag: VALUE, ary: VALUE) -> VALUE;
+ pub fn rb_vm_concat_array(ary1: VALUE, ary2st: VALUE) -> VALUE;
+ pub fn rb_vm_concat_to_array(ary1: VALUE, ary2st: VALUE) -> VALUE;
+ pub fn rb_vm_defined(
+ ec: EcPtr,
+ reg_cfp: CfpPtr,
+ op_type: rb_num_t,
+ obj: VALUE,
+ v: VALUE,
+ ) -> bool;
+ pub fn rb_vm_set_ivar_id(obj: VALUE, idx: u32, val: VALUE) -> VALUE;
+ pub fn rb_vm_setinstancevariable(iseq: IseqPtr, obj: VALUE, id: ID, val: VALUE, ic: IVC);
+ pub fn rb_aliased_callable_method_entry(
+ me: *const rb_callable_method_entry_t,
+ ) -> *const rb_callable_method_entry_t;
+ pub fn rb_vm_getclassvariable(iseq: IseqPtr, cfp: CfpPtr, id: ID, ic: ICVARC) -> VALUE;
+ pub fn rb_vm_setclassvariable(
+ iseq: IseqPtr,
+ cfp: CfpPtr,
+ id: ID,
+ val: VALUE,
+ ic: ICVARC,
+ ) -> VALUE;
+ pub fn rb_vm_ic_hit_p(ic: IC, reg_ep: *const VALUE) -> bool;
+ pub fn rb_vm_stack_canary() -> VALUE;
+ pub fn rb_vm_push_cfunc_frame(cme: *const rb_callable_method_entry_t, recv_idx: c_int);
+}
+
+// Renames
+pub use rb_insn_name as raw_insn_name;
+pub use rb_get_ec_cfp as get_ec_cfp;
+pub use rb_get_cfp_iseq as get_cfp_iseq;
+pub use rb_get_cfp_pc as get_cfp_pc;
+pub use rb_get_cfp_sp as get_cfp_sp;
+pub use rb_get_cfp_self as get_cfp_self;
+pub use rb_get_cfp_ep as get_cfp_ep;
+pub use rb_get_cfp_ep_level as get_cfp_ep_level;
+pub use rb_vm_base_ptr as get_cfp_bp;
+pub use rb_get_cme_def_type as get_cme_def_type;
+pub use rb_get_cme_def_body_attr_id as get_cme_def_body_attr_id;
+pub use rb_get_cme_def_body_optimized_type as get_cme_def_body_optimized_type;
+pub use rb_get_cme_def_body_optimized_index as get_cme_def_body_optimized_index;
+pub use rb_get_cme_def_body_cfunc as get_cme_def_body_cfunc;
+pub use rb_get_def_method_serial as get_def_method_serial;
+pub use rb_get_def_original_id as get_def_original_id;
+pub use rb_get_mct_argc as get_mct_argc;
+pub use rb_get_mct_func as get_mct_func;
+pub use rb_get_def_iseq_ptr as get_def_iseq_ptr;
+pub use rb_iseq_encoded_size as get_iseq_encoded_size;
+pub use rb_get_iseq_body_local_iseq as get_iseq_body_local_iseq;
+pub use rb_get_iseq_body_iseq_encoded as get_iseq_body_iseq_encoded;
+pub use rb_get_iseq_body_stack_max as get_iseq_body_stack_max;
+pub use rb_get_iseq_body_type as get_iseq_body_type;
+pub use rb_get_iseq_flags_has_lead as get_iseq_flags_has_lead;
+pub use rb_get_iseq_flags_has_opt as get_iseq_flags_has_opt;
+pub use rb_get_iseq_flags_has_kw as get_iseq_flags_has_kw;
+pub use rb_get_iseq_flags_has_rest as get_iseq_flags_has_rest;
+pub use rb_get_iseq_flags_has_post as get_iseq_flags_has_post;
+pub use rb_get_iseq_flags_has_kwrest as get_iseq_flags_has_kwrest;
+pub use rb_get_iseq_flags_has_block as get_iseq_flags_has_block;
+pub use rb_get_iseq_flags_ambiguous_param0 as get_iseq_flags_ambiguous_param0;
+pub use rb_get_iseq_flags_accepts_no_kwarg as get_iseq_flags_accepts_no_kwarg;
+pub use rb_get_iseq_body_local_table_size as get_iseq_body_local_table_size;
+pub use rb_get_iseq_body_param_keyword as get_iseq_body_param_keyword;
+pub use rb_get_iseq_body_param_size as get_iseq_body_param_size;
+pub use rb_get_iseq_body_param_lead_num as get_iseq_body_param_lead_num;
+pub use rb_get_iseq_body_param_opt_num as get_iseq_body_param_opt_num;
+pub use rb_get_iseq_body_param_opt_table as get_iseq_body_param_opt_table;
+pub use rb_get_cikw_keyword_len as get_cikw_keyword_len;
+pub use rb_get_cikw_keywords_idx as get_cikw_keywords_idx;
+pub use rb_get_call_data_ci as get_call_data_ci;
+pub use rb_yarv_str_eql_internal as rb_str_eql_internal;
+pub use rb_yarv_ary_entry_internal as rb_ary_entry_internal;
+pub use rb_jit_fix_div_fix as rb_fix_div_fix;
+pub use rb_jit_fix_mod_fix as rb_fix_mod_fix;
+pub use rb_FL_TEST as FL_TEST;
+pub use rb_FL_TEST_RAW as FL_TEST_RAW;
+pub use rb_RB_TYPE_P as RB_TYPE_P;
+pub use rb_BASIC_OP_UNREDEFINED_P as BASIC_OP_UNREDEFINED_P;
+pub use rb_RSTRUCT_LEN as RSTRUCT_LEN;
+pub use rb_RSTRUCT_SET as RSTRUCT_SET;
+pub use rb_vm_ci_argc as vm_ci_argc;
+pub use rb_vm_ci_mid as vm_ci_mid;
+pub use rb_vm_ci_flag as vm_ci_flag;
+pub use rb_vm_ci_kwarg as vm_ci_kwarg;
+pub use rb_METHOD_ENTRY_VISI as METHOD_ENTRY_VISI;
+pub use rb_RCLASS_ORIGIN as RCLASS_ORIGIN;
+
+/// Helper so we can get a Rust string for insn_name()
+pub fn insn_name(opcode: usize) -> String {
+ unsafe {
+ // Look up Ruby's NULL-terminated insn name string
+ let op_name = raw_insn_name(VALUE(opcode));
+
+ // Convert the op name C string to a Rust string and concat
+ let op_name = CStr::from_ptr(op_name).to_str().unwrap();
+
+ // Convert into an owned string
+ op_name.to_string()
+ }
+}
+
+#[allow(unused_variables)]
+pub fn insn_len(opcode: usize) -> u32 {
+ #[cfg(test)]
+ panic!("insn_len is a CRuby function, and we don't link against CRuby for Rust testing!");
+
+ #[cfg(not(test))]
+ unsafe {
+ rb_insn_len(VALUE(opcode)).try_into().unwrap()
+ }
+}
+
+/// Opaque iseq type for opaque iseq pointers from vm_core.h
+/// See: <https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs>
+#[repr(C)]
+pub struct rb_iseq_t {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+/// An object handle similar to VALUE in the C code. Our methods assume
+/// that this is a handle. Sometimes the C code briefly uses VALUE as
+/// an unsigned integer type and don't necessarily store valid handles but
+/// thankfully those cases are rare and don't cross the FFI boundary.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+#[repr(transparent)] // same size and alignment as simply `usize`
+pub struct VALUE(pub usize);
+
+/// Pointer to an ISEQ
+pub type IseqPtr = *const rb_iseq_t;
+
+// Given an ISEQ pointer, convert PC to insn_idx
+pub fn iseq_pc_to_insn_idx(iseq: IseqPtr, pc: *mut VALUE) -> Option<u16> {
+ let pc_zero = unsafe { rb_iseq_pc_at_idx(iseq, 0) };
+ unsafe { pc.offset_from(pc_zero) }.try_into().ok()
+}
+
+/// Given an ISEQ pointer and an instruction index, return an opcode.
+pub fn iseq_opcode_at_idx(iseq: IseqPtr, insn_idx: u32) -> u32 {
+ let pc = unsafe { rb_iseq_pc_at_idx(iseq, insn_idx) };
+ unsafe { rb_iseq_opcode_at_pc(iseq, pc) as u32 }
+}
+
+/// Return a poison value to be set above the stack top to verify leafness.
+#[cfg(not(test))]
+pub fn vm_stack_canary() -> u64 {
+ unsafe { rb_vm_stack_canary() }.as_u64()
+}
+
+/// Avoid linking the C function in `cargo test`
+#[cfg(test)]
+pub fn vm_stack_canary() -> u64 {
+ 0
+}
+
+/// Opaque execution-context type from vm_core.h
+#[repr(C)]
+pub struct rb_execution_context_struct {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+/// Alias for rb_execution_context_struct used by CRuby sometimes
+pub type rb_execution_context_t = rb_execution_context_struct;
+
+/// Pointer to an execution context (rb_execution_context_struct)
+pub type EcPtr = *const rb_execution_context_struct;
+
+// From method.h
+#[repr(C)]
+pub struct rb_method_definition_t {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+type rb_method_definition_struct = rb_method_definition_t;
+
+/// Opaque cfunc type from method.h
+#[repr(C)]
+pub struct rb_method_cfunc_t {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+/// Opaque call-cache type from vm_callinfo.h
+#[repr(C)]
+pub struct rb_callcache {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+/// Opaque control_frame (CFP) struct from vm_core.h
+#[repr(C)]
+pub struct rb_control_frame_struct {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+/// Pointer to a control frame pointer (CFP)
+pub type CfpPtr = *mut rb_control_frame_struct;
+
+/// Opaque struct from vm_core.h
+#[repr(C)]
+pub struct rb_cref_t {
+ _data: [u8; 0],
+ _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
+}
+
+impl VALUE {
+ /// Dump info about the value to the console similarly to rp(VALUE)
+ pub fn dump_info(self) {
+ unsafe { rb_obj_info_dump(self) }
+ }
+
+ /// Return whether the value is truthy or falsy in Ruby -- only nil and false are falsy.
+ pub fn test(self) -> bool {
+ let VALUE(cval) = self;
+ let VALUE(qnilval) = Qnil;
+ (cval & !qnilval) != 0
+ }
+
+ /// Return true if the number is an immediate integer, flonum or static symbol
+ fn immediate_p(self) -> bool {
+ let VALUE(cval) = self;
+ let mask = RUBY_IMMEDIATE_MASK as usize;
+ (cval & mask) != 0
+ }
+
+ /// Return true if the value is a Ruby immediate integer, flonum, static symbol, nil or false
+ pub fn special_const_p(self) -> bool {
+ self.immediate_p() || !self.test()
+ }
+
+ /// Return true if the value is a heap object
+ pub fn heap_object_p(self) -> bool {
+ !self.special_const_p()
+ }
+
+ /// Shareability between ractors. `RB_OBJ_SHAREABLE_P()`.
+ pub fn shareable_p(self) -> bool {
+ (self.builtin_flags() & RUBY_FL_SHAREABLE as usize) != 0
+ }
+
+ /// Return true if the value is a Ruby Fixnum (immediate-size integer)
+ pub fn fixnum_p(self) -> bool {
+ let VALUE(cval) = self;
+ let flag = RUBY_FIXNUM_FLAG as usize;
+ (cval & flag) == flag
+ }
+
+ /// Return true if the value is an immediate Ruby floating-point number (flonum)
+ pub fn flonum_p(self) -> bool {
+ let VALUE(cval) = self;
+ let mask = RUBY_FLONUM_MASK as usize;
+ let flag = RUBY_FLONUM_FLAG as usize;
+ (cval & mask) == flag
+ }
+
+ /// Return true if the value is a Ruby symbol (RB_SYMBOL_P)
+ pub fn symbol_p(self) -> bool {
+ self.static_sym_p() || self.dynamic_sym_p()
+ }
+
+ /// Return true for a static (non-heap) Ruby symbol (RB_STATIC_SYM_P)
+ pub fn static_sym_p(self) -> bool {
+ let VALUE(cval) = self;
+ let flag = RUBY_SYMBOL_FLAG as usize;
+ (cval & 0xff) == flag
+ }
+
+ /// Return true for a dynamic Ruby symbol (RB_DYNAMIC_SYM_P)
+ fn dynamic_sym_p(self) -> bool {
+ return if self.special_const_p() {
+ false
+ } else {
+ self.builtin_type() == RUBY_T_SYMBOL
+ }
+ }
+
+ /// Returns true if the value is T_HASH
+ pub fn hash_p(self) -> bool {
+ !self.special_const_p() && self.builtin_type() == RUBY_T_HASH
+ }
+
+ /// Returns true or false depending on whether the value is nil
+ pub fn nil_p(self) -> bool {
+ self == Qnil
+ }
+
+ pub fn string_p(self) -> bool {
+ self.class_of() == unsafe { rb_cString }
+ }
+
+ /// Read the flags bits from the RBasic object, then return a Ruby type enum (e.g. RUBY_T_ARRAY)
+ pub fn builtin_type(self) -> ruby_value_type {
+ (self.builtin_flags() & (RUBY_T_MASK as usize)) as ruby_value_type
+ }
+
+ pub fn builtin_flags(self) -> usize {
+ assert!(!self.special_const_p());
+
+ let VALUE(cval) = self;
+ let rbasic_ptr = cval as *const RBasic;
+ let flags_bits: usize = unsafe { (*rbasic_ptr).flags }.as_usize();
+ return flags_bits;
+ }
+
+ pub fn class_of(self) -> VALUE {
+ if !self.special_const_p() {
+ let builtin_type = self.builtin_type();
+ assert_ne!(builtin_type, RUBY_T_NONE, "YJIT should only see live objects");
+ assert_ne!(builtin_type, RUBY_T_MOVED, "YJIT should only see live objects");
+ }
+
+ unsafe { rb_yarv_class_of(self) }
+ }
+
+ pub fn is_frozen(self) -> bool {
+ unsafe { rb_obj_frozen_p(self) != VALUE(0) }
+ }
+
+ pub fn shape_too_complex(self) -> bool {
+ unsafe { rb_yjit_shape_obj_too_complex_p(self) }
+ }
+
+ pub fn shape_id_of(self) -> u32 {
+ unsafe { rb_obj_shape_id(self) }
+ }
+
+ pub fn embedded_p(self) -> bool {
+ unsafe {
+ FL_TEST_RAW(self, VALUE(ROBJECT_HEAP as usize)) == VALUE(0)
+ }
+ }
+
+ pub fn as_isize(self) -> isize {
+ let VALUE(is) = self;
+ is as isize
+ }
+
+ pub fn as_i32(self) -> i32 {
+ self.as_i64().try_into().unwrap()
+ }
+
+ pub fn as_u32(self) -> u32 {
+ let VALUE(i) = self;
+ i.try_into().unwrap()
+ }
+
+ pub fn as_i64(self) -> i64 {
+ let VALUE(i) = self;
+ i as i64
+ }
+
+ pub fn as_u64(self) -> u64 {
+ let VALUE(i) = self;
+ i.try_into().unwrap()
+ }
+
+ pub fn as_usize(self) -> usize {
+ let VALUE(us) = self;
+ us
+ }
+
+ pub fn as_ptr<T>(self) -> *const T {
+ let VALUE(us) = self;
+ us as *const T
+ }
+
+ pub fn as_mut_ptr<T>(self) -> *mut T {
+ let VALUE(us) = self;
+ us as *mut T
+ }
+
+ /// For working with opaque pointers and encoding null check.
+ /// Similar to [std::ptr::NonNull], but for `*const T`. `NonNull<T>`
+ /// is for `*mut T` while our C functions are setup to use `*const T`.
+ /// Casting from `NonNull<T>` to `*const T` is too noisy.
+ pub fn as_optional_ptr<T>(self) -> Option<*const T> {
+ let ptr: *const T = self.as_ptr();
+
+ if ptr.is_null() {
+ None
+ } else {
+ Some(ptr)
+ }
+ }
+
+ /// Assert that `self` is an iseq in debug builds
+ pub fn as_iseq(self) -> IseqPtr {
+ let ptr: IseqPtr = self.as_ptr();
+
+ #[cfg(debug_assertions)]
+ if !ptr.is_null() {
+ unsafe { rb_assert_iseq_handle(self) }
+ }
+
+ ptr
+ }
+
+ /// Assert that `self` is a method entry in debug builds
+ pub fn as_cme(self) -> *const rb_callable_method_entry_t {
+ let ptr: *const rb_callable_method_entry_t = self.as_ptr();
+
+ #[cfg(debug_assertions)]
+ if !ptr.is_null() {
+ unsafe { rb_assert_cme_handle(self) }
+ }
+
+ ptr
+ }
+
+ pub fn fixnum_from_usize(item: usize) -> Self {
+ assert!(item <= (RUBY_FIXNUM_MAX as usize)); // An unsigned will always be greater than RUBY_FIXNUM_MIN
+ let k: usize = item.wrapping_add(item.wrapping_add(1));
+ VALUE(k)
+ }
+}
+
+impl From<IseqPtr> for VALUE {
+ /// For `.into()` convenience
+ fn from(iseq: IseqPtr) -> Self {
+ VALUE(iseq as usize)
+ }
+}
+
+impl From<*const rb_callable_method_entry_t> for VALUE {
+ /// For `.into()` convenience
+ fn from(cme: *const rb_callable_method_entry_t) -> Self {
+ VALUE(cme as usize)
+ }
+}
+
+impl From<&str> for VALUE {
+ fn from(value: &str) -> Self {
+ rust_str_to_ruby(value)
+ }
+}
+
+impl From<String> for VALUE {
+ fn from(value: String) -> Self {
+ rust_str_to_ruby(&value)
+ }
+}
+
+impl From<VALUE> for u64 {
+ fn from(value: VALUE) -> Self {
+ let VALUE(uimm) = value;
+ uimm as u64
+ }
+}
+
+impl From<VALUE> for i64 {
+ fn from(value: VALUE) -> Self {
+ let VALUE(uimm) = value;
+ assert!(uimm <= (i64::MAX as usize));
+ uimm as i64
+ }
+}
+
+impl From<VALUE> for i32 {
+ fn from(value: VALUE) -> Self {
+ let VALUE(uimm) = value;
+ assert!(uimm <= (i32::MAX as usize));
+ uimm.try_into().unwrap()
+ }
+}
+
+impl From<VALUE> for u16 {
+ fn from(value: VALUE) -> Self {
+ let VALUE(uimm) = value;
+ uimm.try_into().unwrap()
+ }
+}
+
+/// Produce a Ruby string from a Rust string slice
+pub fn rust_str_to_ruby(str: &str) -> VALUE {
+ unsafe { rb_utf8_str_new(str.as_ptr() as *const _, str.len() as i64) }
+}
+
+/// Produce a Ruby symbol from a Rust string slice
+pub fn rust_str_to_sym(str: &str) -> VALUE {
+ let id = rust_str_to_id(str);
+ unsafe { rb_id2sym(id) }
+}
+
+/// Produce an ID from a Rust string slice
+pub fn rust_str_to_id(str: &str) -> ID {
+ let c_str = CString::new(str).unwrap();
+ let c_ptr: *const c_char = c_str.as_ptr();
+ unsafe { rb_intern(c_ptr) }
+}
+
+/// Produce an owned Rust String from a C char pointer
+pub fn cstr_to_rust_string(c_char_ptr: *const c_char) -> Option<String> {
+ assert!(c_char_ptr != std::ptr::null());
+
+ let c_str: &CStr = unsafe { CStr::from_ptr(c_char_ptr) };
+
+ match c_str.to_str() {
+ Ok(rust_str) => Some(rust_str.to_string()),
+ Err(_) => None
+ }
+}
+
+/// A location in Rust code for integrating with debugging facilities defined in C.
+/// Use the [src_loc!] macro to crate an instance.
+pub struct SourceLocation {
+ pub file: &'static CStr,
+ pub line: c_int,
+}
+
+impl Debug for SourceLocation {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.write_fmt(format_args!("{}:{}", self.file.to_string_lossy(), self.line))
+ }
+}
+
+/// Make a [SourceLocation] at the current spot.
+macro_rules! src_loc {
+ () => {
+ {
+ // Nul-terminated string with static lifetime, make a CStr out of it safely.
+ let file: &'static str = concat!(file!(), '\0');
+ $crate::cruby::SourceLocation {
+ file: unsafe { std::ffi::CStr::from_ptr(file.as_ptr().cast()) },
+ line: line!().try_into().unwrap(),
+ }
+ }
+ };
+}
+
+pub(crate) use src_loc;
+
+/// Run GC write barrier. Required after making a new edge in the object reference
+/// graph from `old` to `young`.
+macro_rules! obj_written {
+ ($old: expr, $young: expr) => {
+ let (old, young): (VALUE, VALUE) = ($old, $young);
+ let src_loc = $crate::cruby::src_loc!();
+ unsafe { rb_yjit_obj_written(old, young, src_loc.file.as_ptr(), src_loc.line) };
+ };
+}
+pub(crate) use obj_written;
+
+/// Acquire the VM lock, make sure all other Ruby threads are asleep then run
+/// some code while holding the lock. Returns whatever `func` returns.
+/// Use with [src_loc!].
+///
+/// Required for code patching in the presence of ractors.
+pub fn with_vm_lock<F, R>(loc: SourceLocation, func: F) -> R
+where
+ F: FnOnce() -> R + UnwindSafe,
+{
+ let file = loc.file.as_ptr();
+ let line = loc.line;
+ let mut recursive_lock_level: c_uint = 0;
+
+ unsafe { rb_jit_vm_lock_then_barrier(&mut recursive_lock_level, file, line) };
+
+ let ret = match catch_unwind(func) {
+ Ok(result) => result,
+ Err(_) => {
+ // Theoretically we can recover from some of these panics,
+ // but it's too late if the unwind reaches here.
+
+ let _ = catch_unwind(|| {
+ // IO functions can panic too.
+ eprintln!(
+ "YJIT panicked while holding VM lock acquired at {}:{}. Aborting...",
+ loc.file.to_string_lossy(),
+ line,
+ );
+ });
+ std::process::abort();
+ }
+ };
+
+ unsafe { rb_jit_vm_unlock(&mut recursive_lock_level, file, line) };
+
+ ret
+}
+
+// Non-idiomatic capitalization for consistency with CRuby code
+#[allow(non_upper_case_globals)]
+pub const Qfalse: VALUE = VALUE(RUBY_Qfalse as usize);
+#[allow(non_upper_case_globals)]
+pub const Qnil: VALUE = VALUE(RUBY_Qnil as usize);
+#[allow(non_upper_case_globals)]
+pub const Qtrue: VALUE = VALUE(RUBY_Qtrue as usize);
+#[allow(non_upper_case_globals)]
+pub const Qundef: VALUE = VALUE(RUBY_Qundef as usize);
+
+#[allow(unused)]
+mod manual_defs {
+ use super::*;
+
+ pub const SIZEOF_VALUE: usize = 8;
+ pub const SIZEOF_VALUE_I32: i32 = SIZEOF_VALUE as i32;
+ pub const VALUE_BITS: u8 = 8 * SIZEOF_VALUE as u8;
+
+ pub const RUBY_LONG_MIN: isize = std::os::raw::c_long::MIN as isize;
+ pub const RUBY_LONG_MAX: isize = std::os::raw::c_long::MAX as isize;
+
+ pub const RUBY_FIXNUM_MIN: isize = RUBY_LONG_MIN / 2;
+ pub const RUBY_FIXNUM_MAX: isize = RUBY_LONG_MAX / 2;
+
+ // From vm_callinfo.h - uses calculation that seems to confuse bindgen
+ pub const VM_CALL_ARGS_SIMPLE: u32 = 1 << VM_CALL_ARGS_SIMPLE_bit;
+ pub const VM_CALL_ARGS_SPLAT: u32 = 1 << VM_CALL_ARGS_SPLAT_bit;
+ pub const VM_CALL_ARGS_BLOCKARG: u32 = 1 << VM_CALL_ARGS_BLOCKARG_bit;
+ pub const VM_CALL_FORWARDING: u32 = 1 << VM_CALL_FORWARDING_bit;
+ pub const VM_CALL_FCALL: u32 = 1 << VM_CALL_FCALL_bit;
+ pub const VM_CALL_KWARG: u32 = 1 << VM_CALL_KWARG_bit;
+ pub const VM_CALL_KW_SPLAT: u32 = 1 << VM_CALL_KW_SPLAT_bit;
+ pub const VM_CALL_TAILCALL: u32 = 1 << VM_CALL_TAILCALL_bit;
+ pub const VM_CALL_ZSUPER : u32 = 1 << VM_CALL_ZSUPER_bit;
+ pub const VM_CALL_OPT_SEND : u32 = 1 << VM_CALL_OPT_SEND_bit;
+
+ // From internal/struct.h - in anonymous enum, so we can't easily import it
+ pub const RSTRUCT_EMBED_LEN_MASK: usize = (RUBY_FL_USER7 | RUBY_FL_USER6 | RUBY_FL_USER5 | RUBY_FL_USER4 | RUBY_FL_USER3 |RUBY_FL_USER2 | RUBY_FL_USER1) as usize;
+
+ // From iseq.h - via a different constant, which seems to confuse bindgen
+ pub const ISEQ_TRANSLATED: usize = RUBY_FL_USER7 as usize;
+
+ // We'll need to encode a lot of Ruby struct/field offsets as constants unless we want to
+ // redeclare all the Ruby C structs and write our own offsetof macro. For now, we use constants.
+ pub const RUBY_OFFSET_RBASIC_FLAGS: i32 = 0; // struct RBasic, field "flags"
+ pub const RUBY_OFFSET_RBASIC_KLASS: i32 = 8; // struct RBasic, field "klass"
+ pub const RUBY_OFFSET_RARRAY_AS_HEAP_LEN: i32 = 16; // struct RArray, subfield "as.heap.len"
+ pub const RUBY_OFFSET_RARRAY_AS_HEAP_PTR: i32 = 32; // struct RArray, subfield "as.heap.ptr"
+ pub const RUBY_OFFSET_RARRAY_AS_ARY: i32 = 16; // struct RArray, subfield "as.ary"
+
+ pub const RUBY_OFFSET_RSTRUCT_AS_HEAP_PTR: i32 = 24; // struct RStruct, subfield "as.heap.ptr"
+ pub const RUBY_OFFSET_RSTRUCT_AS_ARY: i32 = 16; // struct RStruct, subfield "as.ary"
+
+ pub const RUBY_OFFSET_RSTRING_AS_HEAP_PTR: i32 = 24; // struct RString, subfield "as.heap.ptr"
+ pub const RUBY_OFFSET_RSTRING_AS_ARY: i32 = 24; // struct RString, subfield "as.embed.ary"
+
+ // Constants from rb_control_frame_t vm_core.h
+ pub const RUBY_OFFSET_CFP_PC: i32 = 0;
+ pub const RUBY_OFFSET_CFP_SP: i32 = 8;
+ pub const RUBY_OFFSET_CFP_ISEQ: i32 = 16;
+ pub const RUBY_OFFSET_CFP_SELF: i32 = 24;
+ pub const RUBY_OFFSET_CFP_EP: i32 = 32;
+ pub const RUBY_OFFSET_CFP_BLOCK_CODE: i32 = 40;
+ pub const RUBY_OFFSET_CFP_JIT_RETURN: i32 = 48;
+ pub const RUBY_SIZEOF_CONTROL_FRAME: usize = 56;
+
+ // Constants from rb_thread_t in vm_core.h
+ pub const RUBY_OFFSET_THREAD_SELF: i32 = 16;
+
+ // Constants from iseq_inline_constant_cache (IC) and iseq_inline_constant_cache_entry (ICE) in vm_core.h
+ pub const RUBY_OFFSET_IC_ENTRY: i32 = 0;
+ pub const RUBY_OFFSET_ICE_VALUE: i32 = 8;
+}
+pub use manual_defs::*;
+
+/// Interned ID values for Ruby symbols and method names.
+/// See [type@crate::cruby::ID] and usages outside of YJIT.
+pub(crate) mod ids {
+ use std::sync::atomic::AtomicU64;
+ /// Globals to cache IDs on boot. Atomic to use with relaxed ordering
+ /// so reads can happen without `unsafe`. Synchronization done through
+ /// the VM lock.
+ macro_rules! def_ids {
+ ($(name: $ident:ident content: $str:literal)*) => {
+ $(
+ #[doc = concat!("[type@crate::cruby::ID] for `", stringify!($str), "`")]
+ pub static $ident: AtomicU64 = AtomicU64::new(0);
+ )*
+
+ pub(crate) fn init() {
+ $(
+ let content = &$str;
+ let ptr: *const u8 = content.as_ptr();
+
+ // Lookup and cache each ID
+ $ident.store(
+ unsafe { $crate::cruby::rb_intern2(ptr.cast(), content.len() as _) },
+ std::sync::atomic::Ordering::Relaxed
+ );
+ )*
+
+ }
+ }
+ }
+
+ def_ids! {
+ name: NULL content: b""
+ name: respond_to_missing content: b"respond_to_missing?"
+ name: method_missing content: b"method_missing"
+ name: to_ary content: b"to_ary"
+ name: to_s content: b"to_s"
+ name: eq content: b"=="
+ name: include_p content: b"include?"
+ }
+}
+
+/// Get an CRuby `ID` to an interned string, e.g. a particular method name.
+macro_rules! ID {
+ ($id_name:ident) => {
+ $crate::cruby::ids::$id_name.load(std::sync::atomic::Ordering::Relaxed)
+ }
+}
+pub(crate) use ID;
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
new file mode 100644
index 0000000000..56994388a3
--- /dev/null
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -0,0 +1,1322 @@
+/* automatically generated by rust-bindgen 0.70.1 */
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit<Storage> {
+ storage: Storage,
+}
+impl<Storage> __BindgenBitfieldUnit<Storage> {
+ #[inline]
+ pub const fn new(storage: Storage) -> Self {
+ Self { storage }
+ }
+}
+impl<Storage> __BindgenBitfieldUnit<Storage>
+where
+ Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+ #[inline]
+ pub fn get_bit(&self, index: usize) -> bool {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = self.storage.as_ref()[byte_index];
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ byte & mask == mask
+ }
+ #[inline]
+ pub fn set_bit(&mut self, index: usize, val: bool) {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = &mut self.storage.as_mut()[byte_index];
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ if val {
+ *byte |= mask;
+ } else {
+ *byte &= !mask;
+ }
+ }
+ #[inline]
+ pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ let mut val = 0;
+ for i in 0..(bit_width as usize) {
+ if self.get_bit(i + bit_offset) {
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ val |= 1 << index;
+ }
+ }
+ val
+ }
+ #[inline]
+ pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ for i in 0..(bit_width as usize) {
+ let mask = 1 << i;
+ let val_bit_is_set = val & mask == mask;
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ self.set_bit(index + bit_offset, val_bit_is_set);
+ }
+ }
+}
+#[repr(C)]
+#[derive(Default)]
+pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
+impl<T> __IncompleteArrayField<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ __IncompleteArrayField(::std::marker::PhantomData, [])
+ }
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ self as *const _ as *const T
+ }
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self as *mut _ as *mut T
+ }
+ #[inline]
+ pub unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::std::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+impl<T> ::std::fmt::Debug for __IncompleteArrayField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ fmt.write_str("__IncompleteArrayField")
+ }
+}
+#[repr(C)]
+pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
+impl<T> __BindgenUnionField<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ __BindgenUnionField(::std::marker::PhantomData)
+ }
+ #[inline]
+ pub unsafe fn as_ref(&self) -> &T {
+ ::std::mem::transmute(self)
+ }
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut T {
+ ::std::mem::transmute(self)
+ }
+}
+impl<T> ::std::default::Default for __BindgenUnionField<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
+impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ fmt.write_str("__BindgenUnionField")
+ }
+}
+impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
+ fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
+}
+impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
+ fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
+ true
+ }
+}
+impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
+pub const INTEGER_REDEFINED_OP_FLAG: u32 = 1;
+pub const FLOAT_REDEFINED_OP_FLAG: u32 = 2;
+pub const STRING_REDEFINED_OP_FLAG: u32 = 4;
+pub const ARRAY_REDEFINED_OP_FLAG: u32 = 8;
+pub const HASH_REDEFINED_OP_FLAG: u32 = 16;
+pub const SYMBOL_REDEFINED_OP_FLAG: u32 = 64;
+pub const TIME_REDEFINED_OP_FLAG: u32 = 128;
+pub const REGEXP_REDEFINED_OP_FLAG: u32 = 256;
+pub const NIL_REDEFINED_OP_FLAG: u32 = 512;
+pub const TRUE_REDEFINED_OP_FLAG: u32 = 1024;
+pub const FALSE_REDEFINED_OP_FLAG: u32 = 2048;
+pub const PROC_REDEFINED_OP_FLAG: u32 = 4096;
+pub const VM_KW_SPECIFIED_BITS_MAX: u32 = 31;
+pub const VM_ENV_DATA_SIZE: u32 = 3;
+pub const VM_ENV_DATA_INDEX_ME_CREF: i32 = -2;
+pub const VM_ENV_DATA_INDEX_SPECVAL: i32 = -1;
+pub const VM_ENV_DATA_INDEX_FLAGS: u32 = 0;
+pub const VM_BLOCK_HANDLER_NONE: u32 = 0;
+pub const SHAPE_ID_NUM_BITS: u32 = 32;
+pub type ID = ::std::os::raw::c_ulong;
+pub type rb_alloc_func_t = ::std::option::Option<unsafe extern "C" fn(klass: VALUE) -> VALUE>;
+pub const RUBY_Qfalse: ruby_special_consts = 0;
+pub const RUBY_Qnil: ruby_special_consts = 4;
+pub const RUBY_Qtrue: ruby_special_consts = 20;
+pub const RUBY_Qundef: ruby_special_consts = 36;
+pub const RUBY_IMMEDIATE_MASK: ruby_special_consts = 7;
+pub const RUBY_FIXNUM_FLAG: ruby_special_consts = 1;
+pub const RUBY_FLONUM_MASK: ruby_special_consts = 3;
+pub const RUBY_FLONUM_FLAG: ruby_special_consts = 2;
+pub const RUBY_SYMBOL_FLAG: ruby_special_consts = 12;
+pub const RUBY_SPECIAL_SHIFT: ruby_special_consts = 8;
+pub type ruby_special_consts = u32;
+#[repr(C)]
+pub struct RBasic {
+ pub flags: VALUE,
+ pub klass: VALUE,
+}
+pub const RUBY_T_NONE: ruby_value_type = 0;
+pub const RUBY_T_OBJECT: ruby_value_type = 1;
+pub const RUBY_T_CLASS: ruby_value_type = 2;
+pub const RUBY_T_MODULE: ruby_value_type = 3;
+pub const RUBY_T_FLOAT: ruby_value_type = 4;
+pub const RUBY_T_STRING: ruby_value_type = 5;
+pub const RUBY_T_REGEXP: ruby_value_type = 6;
+pub const RUBY_T_ARRAY: ruby_value_type = 7;
+pub const RUBY_T_HASH: ruby_value_type = 8;
+pub const RUBY_T_STRUCT: ruby_value_type = 9;
+pub const RUBY_T_BIGNUM: ruby_value_type = 10;
+pub const RUBY_T_FILE: ruby_value_type = 11;
+pub const RUBY_T_DATA: ruby_value_type = 12;
+pub const RUBY_T_MATCH: ruby_value_type = 13;
+pub const RUBY_T_COMPLEX: ruby_value_type = 14;
+pub const RUBY_T_RATIONAL: ruby_value_type = 15;
+pub const RUBY_T_NIL: ruby_value_type = 17;
+pub const RUBY_T_TRUE: ruby_value_type = 18;
+pub const RUBY_T_FALSE: ruby_value_type = 19;
+pub const RUBY_T_SYMBOL: ruby_value_type = 20;
+pub const RUBY_T_FIXNUM: ruby_value_type = 21;
+pub const RUBY_T_UNDEF: ruby_value_type = 22;
+pub const RUBY_T_IMEMO: ruby_value_type = 26;
+pub const RUBY_T_NODE: ruby_value_type = 27;
+pub const RUBY_T_ICLASS: ruby_value_type = 28;
+pub const RUBY_T_ZOMBIE: ruby_value_type = 29;
+pub const RUBY_T_MOVED: ruby_value_type = 30;
+pub const RUBY_T_MASK: ruby_value_type = 31;
+pub type ruby_value_type = u32;
+pub const RUBY_FL_USHIFT: ruby_fl_ushift = 12;
+pub type ruby_fl_ushift = u32;
+pub const RUBY_FL_WB_PROTECTED: ruby_fl_type = 32;
+pub const RUBY_FL_PROMOTED: ruby_fl_type = 32;
+pub const RUBY_FL_USERPRIV0: ruby_fl_type = 64;
+pub const RUBY_FL_FINALIZE: ruby_fl_type = 128;
+pub const RUBY_FL_EXIVAR: ruby_fl_type = 0;
+pub const RUBY_FL_SHAREABLE: ruby_fl_type = 256;
+pub const RUBY_FL_WEAK_REFERENCE: ruby_fl_type = 512;
+pub const RUBY_FL_UNUSED10: ruby_fl_type = 1024;
+pub const RUBY_FL_FREEZE: ruby_fl_type = 2048;
+pub const RUBY_FL_USER0: ruby_fl_type = 4096;
+pub const RUBY_FL_USER1: ruby_fl_type = 8192;
+pub const RUBY_FL_USER2: ruby_fl_type = 16384;
+pub const RUBY_FL_USER3: ruby_fl_type = 32768;
+pub const RUBY_FL_USER4: ruby_fl_type = 65536;
+pub const RUBY_FL_USER5: ruby_fl_type = 131072;
+pub const RUBY_FL_USER6: ruby_fl_type = 262144;
+pub const RUBY_FL_USER7: ruby_fl_type = 524288;
+pub const RUBY_FL_USER8: ruby_fl_type = 1048576;
+pub const RUBY_FL_USER9: ruby_fl_type = 2097152;
+pub const RUBY_FL_USER10: ruby_fl_type = 4194304;
+pub const RUBY_FL_USER11: ruby_fl_type = 8388608;
+pub const RUBY_FL_USER12: ruby_fl_type = 16777216;
+pub const RUBY_FL_USER13: ruby_fl_type = 33554432;
+pub const RUBY_FL_USER14: ruby_fl_type = 67108864;
+pub const RUBY_FL_USER15: ruby_fl_type = 134217728;
+pub const RUBY_FL_USER16: ruby_fl_type = 268435456;
+pub const RUBY_FL_USER17: ruby_fl_type = 536870912;
+pub const RUBY_FL_USER18: ruby_fl_type = 1073741824;
+pub const RUBY_FL_USER19: ruby_fl_type = -2147483648;
+pub const RUBY_ELTS_SHARED: ruby_fl_type = 4096;
+pub const RUBY_FL_SINGLETON: ruby_fl_type = 8192;
+pub type ruby_fl_type = i32;
+pub const RSTRING_NOEMBED: ruby_rstring_flags = 8192;
+pub const RSTRING_FSTR: ruby_rstring_flags = 536870912;
+pub type ruby_rstring_flags = u32;
+pub type st_data_t = ::std::os::raw::c_ulong;
+pub type st_index_t = st_data_t;
+pub const ST_CONTINUE: st_retval = 0;
+pub const ST_STOP: st_retval = 1;
+pub const ST_DELETE: st_retval = 2;
+pub const ST_CHECK: st_retval = 3;
+pub const ST_REPLACE: st_retval = 4;
+pub type st_retval = u32;
+pub type st_foreach_callback_func = ::std::option::Option<
+ unsafe extern "C" fn(
+ arg1: st_data_t,
+ arg2: st_data_t,
+ arg3: st_data_t,
+ ) -> ::std::os::raw::c_int,
+>;
+pub const RARRAY_EMBED_FLAG: ruby_rarray_flags = 8192;
+pub const RARRAY_EMBED_LEN_MASK: ruby_rarray_flags = 4161536;
+pub type ruby_rarray_flags = u32;
+pub const RARRAY_EMBED_LEN_SHIFT: ruby_rarray_consts = 15;
+pub type ruby_rarray_consts = u32;
+pub const RMODULE_IS_REFINEMENT: ruby_rmodule_flags = 8192;
+pub type ruby_rmodule_flags = u32;
+pub const ROBJECT_HEAP: ruby_robject_flags = 65536;
+pub type ruby_robject_flags = u32;
+pub type rb_block_call_func = ::std::option::Option<
+ unsafe extern "C" fn(
+ yielded_arg: VALUE,
+ callback_arg: VALUE,
+ argc: ::std::os::raw::c_int,
+ argv: *const VALUE,
+ blockarg: VALUE,
+ ) -> VALUE,
+>;
+pub type rb_block_call_func_t = rb_block_call_func;
+pub const RUBY_ENCODING_INLINE_MAX: ruby_encoding_consts = 127;
+pub const RUBY_ENCODING_SHIFT: ruby_encoding_consts = 22;
+pub const RUBY_ENCODING_MASK: ruby_encoding_consts = 532676608;
+pub const RUBY_ENCODING_MAXNAMELEN: ruby_encoding_consts = 42;
+pub type ruby_encoding_consts = u32;
+pub const RUBY_ENCINDEX_ASCII_8BIT: ruby_preserved_encindex = 0;
+pub const RUBY_ENCINDEX_UTF_8: ruby_preserved_encindex = 1;
+pub const RUBY_ENCINDEX_US_ASCII: ruby_preserved_encindex = 2;
+pub const RUBY_ENCINDEX_UTF_16BE: ruby_preserved_encindex = 3;
+pub const RUBY_ENCINDEX_UTF_16LE: ruby_preserved_encindex = 4;
+pub const RUBY_ENCINDEX_UTF_32BE: ruby_preserved_encindex = 5;
+pub const RUBY_ENCINDEX_UTF_32LE: ruby_preserved_encindex = 6;
+pub const RUBY_ENCINDEX_UTF_16: ruby_preserved_encindex = 7;
+pub const RUBY_ENCINDEX_UTF_32: ruby_preserved_encindex = 8;
+pub const RUBY_ENCINDEX_UTF8_MAC: ruby_preserved_encindex = 9;
+pub const RUBY_ENCINDEX_EUC_JP: ruby_preserved_encindex = 10;
+pub const RUBY_ENCINDEX_Windows_31J: ruby_preserved_encindex = 11;
+pub const RUBY_ENCINDEX_BUILTIN_MAX: ruby_preserved_encindex = 12;
+pub type ruby_preserved_encindex = u32;
+pub const BOP_PLUS: ruby_basic_operators = 0;
+pub const BOP_MINUS: ruby_basic_operators = 1;
+pub const BOP_MULT: ruby_basic_operators = 2;
+pub const BOP_DIV: ruby_basic_operators = 3;
+pub const BOP_MOD: ruby_basic_operators = 4;
+pub const BOP_EQ: ruby_basic_operators = 5;
+pub const BOP_EQQ: ruby_basic_operators = 6;
+pub const BOP_LT: ruby_basic_operators = 7;
+pub const BOP_LE: ruby_basic_operators = 8;
+pub const BOP_LTLT: ruby_basic_operators = 9;
+pub const BOP_AREF: ruby_basic_operators = 10;
+pub const BOP_ASET: ruby_basic_operators = 11;
+pub const BOP_LENGTH: ruby_basic_operators = 12;
+pub const BOP_SIZE: ruby_basic_operators = 13;
+pub const BOP_EMPTY_P: ruby_basic_operators = 14;
+pub const BOP_NIL_P: ruby_basic_operators = 15;
+pub const BOP_SUCC: ruby_basic_operators = 16;
+pub const BOP_GT: ruby_basic_operators = 17;
+pub const BOP_GE: ruby_basic_operators = 18;
+pub const BOP_GTGT: ruby_basic_operators = 19;
+pub const BOP_NOT: ruby_basic_operators = 20;
+pub const BOP_NEQ: ruby_basic_operators = 21;
+pub const BOP_MATCH: ruby_basic_operators = 22;
+pub const BOP_FREEZE: ruby_basic_operators = 23;
+pub const BOP_UMINUS: ruby_basic_operators = 24;
+pub const BOP_MAX: ruby_basic_operators = 25;
+pub const BOP_MIN: ruby_basic_operators = 26;
+pub const BOP_HASH: ruby_basic_operators = 27;
+pub const BOP_CALL: ruby_basic_operators = 28;
+pub const BOP_AND: ruby_basic_operators = 29;
+pub const BOP_OR: ruby_basic_operators = 30;
+pub const BOP_CMP: ruby_basic_operators = 31;
+pub const BOP_DEFAULT: ruby_basic_operators = 32;
+pub const BOP_PACK: ruby_basic_operators = 33;
+pub const BOP_INCLUDE_P: ruby_basic_operators = 34;
+pub const BOP_LAST_: ruby_basic_operators = 35;
+pub type ruby_basic_operators = u32;
+pub type rb_serial_t = ::std::os::raw::c_ulonglong;
+pub const imemo_env: imemo_type = 0;
+pub const imemo_cref: imemo_type = 1;
+pub const imemo_svar: imemo_type = 2;
+pub const imemo_throw_data: imemo_type = 3;
+pub const imemo_ifunc: imemo_type = 4;
+pub const imemo_memo: imemo_type = 5;
+pub const imemo_ment: imemo_type = 6;
+pub const imemo_iseq: imemo_type = 7;
+pub const imemo_tmpbuf: imemo_type = 8;
+pub const imemo_callinfo: imemo_type = 10;
+pub const imemo_callcache: imemo_type = 11;
+pub const imemo_constcache: imemo_type = 12;
+pub const imemo_fields: imemo_type = 13;
+pub type imemo_type = u32;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct vm_ifunc_argc {
+ pub min: ::std::os::raw::c_int,
+ pub max: ::std::os::raw::c_int,
+}
+#[repr(C)]
+pub struct vm_ifunc {
+ pub flags: VALUE,
+ pub svar_lep: *mut VALUE,
+ pub func: rb_block_call_func_t,
+ pub data: *const ::std::os::raw::c_void,
+ pub argc: vm_ifunc_argc,
+}
+pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0;
+pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1;
+pub const METHOD_VISI_PRIVATE: rb_method_visibility_t = 2;
+pub const METHOD_VISI_PROTECTED: rb_method_visibility_t = 3;
+pub const METHOD_VISI_MASK: rb_method_visibility_t = 3;
+pub type rb_method_visibility_t = u32;
+#[repr(C)]
+pub struct rb_method_entry_struct {
+ pub flags: VALUE,
+ pub defined_class: VALUE,
+ pub def: *mut rb_method_definition_struct,
+ pub called_id: ID,
+ pub owner: VALUE,
+}
+pub type rb_method_entry_t = rb_method_entry_struct;
+#[repr(C)]
+pub struct rb_callable_method_entry_struct {
+ pub flags: VALUE,
+ pub defined_class: VALUE,
+ pub def: *mut rb_method_definition_struct,
+ pub called_id: ID,
+ pub owner: VALUE,
+}
+pub type rb_callable_method_entry_t = rb_callable_method_entry_struct;
+pub const VM_METHOD_TYPE_ISEQ: rb_method_type_t = 0;
+pub const VM_METHOD_TYPE_CFUNC: rb_method_type_t = 1;
+pub const VM_METHOD_TYPE_ATTRSET: rb_method_type_t = 2;
+pub const VM_METHOD_TYPE_IVAR: rb_method_type_t = 3;
+pub const VM_METHOD_TYPE_BMETHOD: rb_method_type_t = 4;
+pub const VM_METHOD_TYPE_ZSUPER: rb_method_type_t = 5;
+pub const VM_METHOD_TYPE_ALIAS: rb_method_type_t = 6;
+pub const VM_METHOD_TYPE_UNDEF: rb_method_type_t = 7;
+pub const VM_METHOD_TYPE_NOTIMPLEMENTED: rb_method_type_t = 8;
+pub const VM_METHOD_TYPE_OPTIMIZED: rb_method_type_t = 9;
+pub const VM_METHOD_TYPE_MISSING: rb_method_type_t = 10;
+pub const VM_METHOD_TYPE_REFINED: rb_method_type_t = 11;
+pub type rb_method_type_t = u32;
+pub type rb_cfunc_t = ::std::option::Option<unsafe extern "C" fn() -> VALUE>;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct rb_method_cfunc_struct {
+ pub func: rb_cfunc_t,
+ pub invoker: ::std::option::Option<
+ unsafe extern "C" fn(
+ recv: VALUE,
+ argc: ::std::os::raw::c_int,
+ argv: *const VALUE,
+ func: ::std::option::Option<unsafe extern "C" fn() -> VALUE>,
+ ) -> VALUE,
+ >,
+ pub argc: ::std::os::raw::c_int,
+}
+pub const OPTIMIZED_METHOD_TYPE_SEND: method_optimized_type = 0;
+pub const OPTIMIZED_METHOD_TYPE_CALL: method_optimized_type = 1;
+pub const OPTIMIZED_METHOD_TYPE_BLOCK_CALL: method_optimized_type = 2;
+pub const OPTIMIZED_METHOD_TYPE_STRUCT_AREF: method_optimized_type = 3;
+pub const OPTIMIZED_METHOD_TYPE_STRUCT_ASET: method_optimized_type = 4;
+pub const OPTIMIZED_METHOD_TYPE__MAX: method_optimized_type = 5;
+pub type method_optimized_type = u32;
+pub type rb_num_t = ::std::os::raw::c_ulong;
+pub const RUBY_TAG_NONE: ruby_tag_type = 0;
+pub const RUBY_TAG_RETURN: ruby_tag_type = 1;
+pub const RUBY_TAG_BREAK: ruby_tag_type = 2;
+pub const RUBY_TAG_NEXT: ruby_tag_type = 3;
+pub const RUBY_TAG_RETRY: ruby_tag_type = 4;
+pub const RUBY_TAG_REDO: ruby_tag_type = 5;
+pub const RUBY_TAG_RAISE: ruby_tag_type = 6;
+pub const RUBY_TAG_THROW: ruby_tag_type = 7;
+pub const RUBY_TAG_FATAL: ruby_tag_type = 8;
+pub const RUBY_TAG_MASK: ruby_tag_type = 15;
+pub type ruby_tag_type = u32;
+pub const VM_THROW_NO_ESCAPE_FLAG: ruby_vm_throw_flags = 32768;
+pub const VM_THROW_STATE_MASK: ruby_vm_throw_flags = 255;
+pub type ruby_vm_throw_flags = u32;
+#[repr(C)]
+pub struct iseq_inline_constant_cache_entry {
+ pub flags: VALUE,
+ pub value: VALUE,
+ pub ic_cref: *const rb_cref_t,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct iseq_inline_constant_cache {
+ pub entry: *mut iseq_inline_constant_cache_entry,
+ pub segments: *const ID,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct iseq_inline_iv_cache_entry {
+ pub value: u64,
+ pub iv_set_name: ID,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct iseq_inline_cvar_cache_entry {
+ pub entry: *mut rb_cvar_class_tbl_entry,
+}
+pub const ISEQ_TYPE_TOP: rb_iseq_type = 0;
+pub const ISEQ_TYPE_METHOD: rb_iseq_type = 1;
+pub const ISEQ_TYPE_BLOCK: rb_iseq_type = 2;
+pub const ISEQ_TYPE_CLASS: rb_iseq_type = 3;
+pub const ISEQ_TYPE_RESCUE: rb_iseq_type = 4;
+pub const ISEQ_TYPE_ENSURE: rb_iseq_type = 5;
+pub const ISEQ_TYPE_EVAL: rb_iseq_type = 6;
+pub const ISEQ_TYPE_MAIN: rb_iseq_type = 7;
+pub const ISEQ_TYPE_PLAIN: rb_iseq_type = 8;
+pub type rb_iseq_type = u32;
+pub const BUILTIN_ATTR_LEAF: rb_builtin_attr = 1;
+pub const BUILTIN_ATTR_SINGLE_NOARG_LEAF: rb_builtin_attr = 2;
+pub const BUILTIN_ATTR_INLINE_BLOCK: rb_builtin_attr = 4;
+pub const BUILTIN_ATTR_C_TRACE: rb_builtin_attr = 8;
+pub type rb_builtin_attr = u32;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct rb_iseq_constant_body_rb_iseq_parameters_rb_iseq_param_keyword {
+ pub num: ::std::os::raw::c_int,
+ pub required_num: ::std::os::raw::c_int,
+ pub bits_start: ::std::os::raw::c_int,
+ pub rest_start: ::std::os::raw::c_int,
+ pub table: *const ID,
+ pub default_values: *mut VALUE,
+}
+#[repr(C)]
+pub struct rb_captured_block {
+ pub self_: VALUE,
+ pub ep: *const VALUE,
+ pub code: rb_captured_block__bindgen_ty_1,
+}
+#[repr(C)]
+pub struct rb_captured_block__bindgen_ty_1 {
+ pub iseq: __BindgenUnionField<*const rb_iseq_t>,
+ pub ifunc: __BindgenUnionField<*const vm_ifunc>,
+ pub val: __BindgenUnionField<VALUE>,
+ pub bindgen_union_field: u64,
+}
+pub const block_type_iseq: rb_block_type = 0;
+pub const block_type_ifunc: rb_block_type = 1;
+pub const block_type_symbol: rb_block_type = 2;
+pub const block_type_proc: rb_block_type = 3;
+pub type rb_block_type = u32;
+#[repr(C)]
+pub struct rb_block {
+ pub as_: rb_block__bindgen_ty_1,
+ pub type_: rb_block_type,
+}
+#[repr(C)]
+pub struct rb_block__bindgen_ty_1 {
+ pub captured: __BindgenUnionField<rb_captured_block>,
+ pub symbol: __BindgenUnionField<VALUE>,
+ pub proc_: __BindgenUnionField<VALUE>,
+ pub bindgen_union_field: [u64; 3usize],
+}
+pub type rb_control_frame_t = rb_control_frame_struct;
+#[repr(C)]
+pub struct rb_proc_t {
+ pub block: rb_block,
+ pub _bitfield_align_1: [u8; 0],
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
+ pub __bindgen_padding_0: [u8; 7usize],
+}
+impl rb_proc_t {
+ #[inline]
+ pub fn is_from_method(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_from_method(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn is_lambda(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_lambda(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(1usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn is_isolated(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_isolated(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(2usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ is_from_method: ::std::os::raw::c_uint,
+ is_lambda: ::std::os::raw::c_uint,
+ is_isolated: ::std::os::raw::c_uint,
+ ) -> __BindgenBitfieldUnit<[u8; 1usize]> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
+ __bindgen_bitfield_unit.set(0usize, 1u8, {
+ let is_from_method: u32 = unsafe { ::std::mem::transmute(is_from_method) };
+ is_from_method as u64
+ });
+ __bindgen_bitfield_unit.set(1usize, 1u8, {
+ let is_lambda: u32 = unsafe { ::std::mem::transmute(is_lambda) };
+ is_lambda as u64
+ });
+ __bindgen_bitfield_unit.set(2usize, 1u8, {
+ let is_isolated: u32 = unsafe { ::std::mem::transmute(is_isolated) };
+ is_isolated as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
+pub const VM_CHECKMATCH_TYPE_WHEN: vm_check_match_type = 1;
+pub const VM_CHECKMATCH_TYPE_CASE: vm_check_match_type = 2;
+pub const VM_CHECKMATCH_TYPE_RESCUE: vm_check_match_type = 3;
+pub type vm_check_match_type = u32;
+pub const VM_OPT_NEWARRAY_SEND_MAX: vm_opt_newarray_send_type = 1;
+pub const VM_OPT_NEWARRAY_SEND_MIN: vm_opt_newarray_send_type = 2;
+pub const VM_OPT_NEWARRAY_SEND_HASH: vm_opt_newarray_send_type = 3;
+pub const VM_OPT_NEWARRAY_SEND_PACK: vm_opt_newarray_send_type = 4;
+pub const VM_OPT_NEWARRAY_SEND_PACK_BUFFER: vm_opt_newarray_send_type = 5;
+pub const VM_OPT_NEWARRAY_SEND_INCLUDE_P: vm_opt_newarray_send_type = 6;
+pub type vm_opt_newarray_send_type = u32;
+pub const VM_SPECIAL_OBJECT_VMCORE: vm_special_object_type = 1;
+pub const VM_SPECIAL_OBJECT_CBASE: vm_special_object_type = 2;
+pub const VM_SPECIAL_OBJECT_CONST_BASE: vm_special_object_type = 3;
+pub type vm_special_object_type = u32;
+pub type IC = *mut iseq_inline_constant_cache;
+pub type IVC = *mut iseq_inline_iv_cache_entry;
+pub type ICVARC = *mut iseq_inline_cvar_cache_entry;
+pub const VM_FRAME_MAGIC_METHOD: vm_frame_env_flags = 286326785;
+pub const VM_FRAME_MAGIC_BLOCK: vm_frame_env_flags = 572653569;
+pub const VM_FRAME_MAGIC_CLASS: vm_frame_env_flags = 858980353;
+pub const VM_FRAME_MAGIC_TOP: vm_frame_env_flags = 1145307137;
+pub const VM_FRAME_MAGIC_CFUNC: vm_frame_env_flags = 1431633921;
+pub const VM_FRAME_MAGIC_IFUNC: vm_frame_env_flags = 1717960705;
+pub const VM_FRAME_MAGIC_EVAL: vm_frame_env_flags = 2004287489;
+pub const VM_FRAME_MAGIC_RESCUE: vm_frame_env_flags = 2022178817;
+pub const VM_FRAME_MAGIC_DUMMY: vm_frame_env_flags = 2040070145;
+pub const VM_FRAME_MAGIC_MASK: vm_frame_env_flags = 2147418113;
+pub const VM_FRAME_FLAG_FINISH: vm_frame_env_flags = 32;
+pub const VM_FRAME_FLAG_BMETHOD: vm_frame_env_flags = 64;
+pub const VM_FRAME_FLAG_CFRAME: vm_frame_env_flags = 128;
+pub const VM_FRAME_FLAG_LAMBDA: vm_frame_env_flags = 256;
+pub const VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM: vm_frame_env_flags = 512;
+pub const VM_FRAME_FLAG_CFRAME_KW: vm_frame_env_flags = 1024;
+pub const VM_FRAME_FLAG_PASSED: vm_frame_env_flags = 2048;
+pub const VM_FRAME_FLAG_BOX_REQUIRE: vm_frame_env_flags = 4096;
+pub const VM_ENV_FLAG_LOCAL: vm_frame_env_flags = 2;
+pub const VM_ENV_FLAG_ESCAPED: vm_frame_env_flags = 4;
+pub const VM_ENV_FLAG_WB_REQUIRED: vm_frame_env_flags = 8;
+pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16;
+pub type vm_frame_env_flags = u32;
+pub type attr_index_t = u16;
+pub type shape_id_t = u32;
+pub const SHAPE_ID_HAS_IVAR_MASK: shape_id_mask = 134742014;
+pub type shape_id_mask = u32;
+#[repr(C)]
+pub struct rb_cvar_class_tbl_entry {
+ pub index: u32,
+ pub global_cvar_state: rb_serial_t,
+ pub cref: *const rb_cref_t,
+ pub class_value: VALUE,
+}
+pub const VM_CALL_ARGS_SPLAT_bit: vm_call_flag_bits = 0;
+pub const VM_CALL_ARGS_BLOCKARG_bit: vm_call_flag_bits = 1;
+pub const VM_CALL_FCALL_bit: vm_call_flag_bits = 2;
+pub const VM_CALL_VCALL_bit: vm_call_flag_bits = 3;
+pub const VM_CALL_ARGS_SIMPLE_bit: vm_call_flag_bits = 4;
+pub const VM_CALL_KWARG_bit: vm_call_flag_bits = 5;
+pub const VM_CALL_KW_SPLAT_bit: vm_call_flag_bits = 6;
+pub const VM_CALL_TAILCALL_bit: vm_call_flag_bits = 7;
+pub const VM_CALL_SUPER_bit: vm_call_flag_bits = 8;
+pub const VM_CALL_ZSUPER_bit: vm_call_flag_bits = 9;
+pub const VM_CALL_OPT_SEND_bit: vm_call_flag_bits = 10;
+pub const VM_CALL_KW_SPLAT_MUT_bit: vm_call_flag_bits = 11;
+pub const VM_CALL_ARGS_SPLAT_MUT_bit: vm_call_flag_bits = 12;
+pub const VM_CALL_FORWARDING_bit: vm_call_flag_bits = 13;
+pub const VM_CALL__END: vm_call_flag_bits = 14;
+pub type vm_call_flag_bits = u32;
+#[repr(C)]
+pub struct rb_callinfo_kwarg {
+ pub keyword_len: ::std::os::raw::c_int,
+ pub references: ::std::os::raw::c_int,
+ pub keywords: __IncompleteArrayField<VALUE>,
+}
+#[repr(C)]
+pub struct rb_callinfo {
+ pub flags: VALUE,
+ pub kwarg: *const rb_callinfo_kwarg,
+ pub mid: VALUE,
+ pub flag: VALUE,
+ pub argc: VALUE,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct rb_call_data {
+ pub ci: *const rb_callinfo,
+ pub cc: *const rb_callcache,
+}
+pub const RSTRING_CHILLED: ruby_rstring_private_flags = 49152;
+pub type ruby_rstring_private_flags = u32;
+pub const RHASH_PASS_AS_KEYWORDS: ruby_rhash_flags = 8192;
+pub const RHASH_PROC_DEFAULT: ruby_rhash_flags = 16384;
+pub const RHASH_ST_TABLE_FLAG: ruby_rhash_flags = 32768;
+pub const RHASH_AR_TABLE_SIZE_MASK: ruby_rhash_flags = 983040;
+pub const RHASH_AR_TABLE_SIZE_SHIFT: ruby_rhash_flags = 16;
+pub const RHASH_AR_TABLE_BOUND_MASK: ruby_rhash_flags = 15728640;
+pub const RHASH_AR_TABLE_BOUND_SHIFT: ruby_rhash_flags = 20;
+pub const RHASH_LEV_SHIFT: ruby_rhash_flags = 25;
+pub const RHASH_LEV_MAX: ruby_rhash_flags = 127;
+pub type ruby_rhash_flags = u32;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct rb_builtin_function {
+ pub func_ptr: *const ::std::os::raw::c_void,
+ pub argc: ::std::os::raw::c_int,
+ pub index: ::std::os::raw::c_int,
+ pub name: *const ::std::os::raw::c_char,
+}
+pub const YARVINSN_nop: ruby_vminsn_type = 0;
+pub const YARVINSN_getlocal: ruby_vminsn_type = 1;
+pub const YARVINSN_setlocal: ruby_vminsn_type = 2;
+pub const YARVINSN_getblockparam: ruby_vminsn_type = 3;
+pub const YARVINSN_setblockparam: ruby_vminsn_type = 4;
+pub const YARVINSN_getblockparamproxy: ruby_vminsn_type = 5;
+pub const YARVINSN_getspecial: ruby_vminsn_type = 6;
+pub const YARVINSN_setspecial: ruby_vminsn_type = 7;
+pub const YARVINSN_getinstancevariable: ruby_vminsn_type = 8;
+pub const YARVINSN_setinstancevariable: ruby_vminsn_type = 9;
+pub const YARVINSN_getclassvariable: ruby_vminsn_type = 10;
+pub const YARVINSN_setclassvariable: ruby_vminsn_type = 11;
+pub const YARVINSN_opt_getconstant_path: ruby_vminsn_type = 12;
+pub const YARVINSN_getconstant: ruby_vminsn_type = 13;
+pub const YARVINSN_setconstant: ruby_vminsn_type = 14;
+pub const YARVINSN_getglobal: ruby_vminsn_type = 15;
+pub const YARVINSN_setglobal: ruby_vminsn_type = 16;
+pub const YARVINSN_putnil: ruby_vminsn_type = 17;
+pub const YARVINSN_putself: ruby_vminsn_type = 18;
+pub const YARVINSN_putobject: ruby_vminsn_type = 19;
+pub const YARVINSN_putspecialobject: ruby_vminsn_type = 20;
+pub const YARVINSN_putstring: ruby_vminsn_type = 21;
+pub const YARVINSN_putchilledstring: ruby_vminsn_type = 22;
+pub const YARVINSN_concatstrings: ruby_vminsn_type = 23;
+pub const YARVINSN_anytostring: ruby_vminsn_type = 24;
+pub const YARVINSN_toregexp: ruby_vminsn_type = 25;
+pub const YARVINSN_intern: ruby_vminsn_type = 26;
+pub const YARVINSN_newarray: ruby_vminsn_type = 27;
+pub const YARVINSN_pushtoarraykwsplat: ruby_vminsn_type = 28;
+pub const YARVINSN_duparray: ruby_vminsn_type = 29;
+pub const YARVINSN_duphash: ruby_vminsn_type = 30;
+pub const YARVINSN_expandarray: ruby_vminsn_type = 31;
+pub const YARVINSN_concatarray: ruby_vminsn_type = 32;
+pub const YARVINSN_concattoarray: ruby_vminsn_type = 33;
+pub const YARVINSN_pushtoarray: ruby_vminsn_type = 34;
+pub const YARVINSN_splatarray: ruby_vminsn_type = 35;
+pub const YARVINSN_splatkw: ruby_vminsn_type = 36;
+pub const YARVINSN_newhash: ruby_vminsn_type = 37;
+pub const YARVINSN_newrange: ruby_vminsn_type = 38;
+pub const YARVINSN_pop: ruby_vminsn_type = 39;
+pub const YARVINSN_dup: ruby_vminsn_type = 40;
+pub const YARVINSN_dupn: ruby_vminsn_type = 41;
+pub const YARVINSN_swap: ruby_vminsn_type = 42;
+pub const YARVINSN_opt_reverse: ruby_vminsn_type = 43;
+pub const YARVINSN_topn: ruby_vminsn_type = 44;
+pub const YARVINSN_setn: ruby_vminsn_type = 45;
+pub const YARVINSN_adjuststack: ruby_vminsn_type = 46;
+pub const YARVINSN_defined: ruby_vminsn_type = 47;
+pub const YARVINSN_definedivar: ruby_vminsn_type = 48;
+pub const YARVINSN_checkmatch: ruby_vminsn_type = 49;
+pub const YARVINSN_checkkeyword: ruby_vminsn_type = 50;
+pub const YARVINSN_checktype: ruby_vminsn_type = 51;
+pub const YARVINSN_defineclass: ruby_vminsn_type = 52;
+pub const YARVINSN_definemethod: ruby_vminsn_type = 53;
+pub const YARVINSN_definesmethod: ruby_vminsn_type = 54;
+pub const YARVINSN_send: ruby_vminsn_type = 55;
+pub const YARVINSN_sendforward: ruby_vminsn_type = 56;
+pub const YARVINSN_opt_send_without_block: ruby_vminsn_type = 57;
+pub const YARVINSN_opt_new: ruby_vminsn_type = 58;
+pub const YARVINSN_objtostring: ruby_vminsn_type = 59;
+pub const YARVINSN_opt_ary_freeze: ruby_vminsn_type = 60;
+pub const YARVINSN_opt_hash_freeze: ruby_vminsn_type = 61;
+pub const YARVINSN_opt_str_freeze: ruby_vminsn_type = 62;
+pub const YARVINSN_opt_nil_p: ruby_vminsn_type = 63;
+pub const YARVINSN_opt_str_uminus: ruby_vminsn_type = 64;
+pub const YARVINSN_opt_duparray_send: ruby_vminsn_type = 65;
+pub const YARVINSN_opt_newarray_send: ruby_vminsn_type = 66;
+pub const YARVINSN_invokesuper: ruby_vminsn_type = 67;
+pub const YARVINSN_invokesuperforward: ruby_vminsn_type = 68;
+pub const YARVINSN_invokeblock: ruby_vminsn_type = 69;
+pub const YARVINSN_leave: ruby_vminsn_type = 70;
+pub const YARVINSN_throw: ruby_vminsn_type = 71;
+pub const YARVINSN_jump: ruby_vminsn_type = 72;
+pub const YARVINSN_branchif: ruby_vminsn_type = 73;
+pub const YARVINSN_branchunless: ruby_vminsn_type = 74;
+pub const YARVINSN_branchnil: ruby_vminsn_type = 75;
+pub const YARVINSN_once: ruby_vminsn_type = 76;
+pub const YARVINSN_opt_case_dispatch: ruby_vminsn_type = 77;
+pub const YARVINSN_opt_plus: ruby_vminsn_type = 78;
+pub const YARVINSN_opt_minus: ruby_vminsn_type = 79;
+pub const YARVINSN_opt_mult: ruby_vminsn_type = 80;
+pub const YARVINSN_opt_div: ruby_vminsn_type = 81;
+pub const YARVINSN_opt_mod: ruby_vminsn_type = 82;
+pub const YARVINSN_opt_eq: ruby_vminsn_type = 83;
+pub const YARVINSN_opt_neq: ruby_vminsn_type = 84;
+pub const YARVINSN_opt_lt: ruby_vminsn_type = 85;
+pub const YARVINSN_opt_le: ruby_vminsn_type = 86;
+pub const YARVINSN_opt_gt: ruby_vminsn_type = 87;
+pub const YARVINSN_opt_ge: ruby_vminsn_type = 88;
+pub const YARVINSN_opt_ltlt: ruby_vminsn_type = 89;
+pub const YARVINSN_opt_and: ruby_vminsn_type = 90;
+pub const YARVINSN_opt_or: ruby_vminsn_type = 91;
+pub const YARVINSN_opt_aref: ruby_vminsn_type = 92;
+pub const YARVINSN_opt_aset: ruby_vminsn_type = 93;
+pub const YARVINSN_opt_length: ruby_vminsn_type = 94;
+pub const YARVINSN_opt_size: ruby_vminsn_type = 95;
+pub const YARVINSN_opt_empty_p: ruby_vminsn_type = 96;
+pub const YARVINSN_opt_succ: ruby_vminsn_type = 97;
+pub const YARVINSN_opt_not: ruby_vminsn_type = 98;
+pub const YARVINSN_opt_regexpmatch2: ruby_vminsn_type = 99;
+pub const YARVINSN_invokebuiltin: ruby_vminsn_type = 100;
+pub const YARVINSN_opt_invokebuiltin_delegate: ruby_vminsn_type = 101;
+pub const YARVINSN_opt_invokebuiltin_delegate_leave: ruby_vminsn_type = 102;
+pub const YARVINSN_getlocal_WC_0: ruby_vminsn_type = 103;
+pub const YARVINSN_getlocal_WC_1: ruby_vminsn_type = 104;
+pub const YARVINSN_setlocal_WC_0: ruby_vminsn_type = 105;
+pub const YARVINSN_setlocal_WC_1: ruby_vminsn_type = 106;
+pub const YARVINSN_putobject_INT2FIX_0_: ruby_vminsn_type = 107;
+pub const YARVINSN_putobject_INT2FIX_1_: ruby_vminsn_type = 108;
+pub const YARVINSN_trace_nop: ruby_vminsn_type = 109;
+pub const YARVINSN_trace_getlocal: ruby_vminsn_type = 110;
+pub const YARVINSN_trace_setlocal: ruby_vminsn_type = 111;
+pub const YARVINSN_trace_getblockparam: ruby_vminsn_type = 112;
+pub const YARVINSN_trace_setblockparam: ruby_vminsn_type = 113;
+pub const YARVINSN_trace_getblockparamproxy: ruby_vminsn_type = 114;
+pub const YARVINSN_trace_getspecial: ruby_vminsn_type = 115;
+pub const YARVINSN_trace_setspecial: ruby_vminsn_type = 116;
+pub const YARVINSN_trace_getinstancevariable: ruby_vminsn_type = 117;
+pub const YARVINSN_trace_setinstancevariable: ruby_vminsn_type = 118;
+pub const YARVINSN_trace_getclassvariable: ruby_vminsn_type = 119;
+pub const YARVINSN_trace_setclassvariable: ruby_vminsn_type = 120;
+pub const YARVINSN_trace_opt_getconstant_path: ruby_vminsn_type = 121;
+pub const YARVINSN_trace_getconstant: ruby_vminsn_type = 122;
+pub const YARVINSN_trace_setconstant: ruby_vminsn_type = 123;
+pub const YARVINSN_trace_getglobal: ruby_vminsn_type = 124;
+pub const YARVINSN_trace_setglobal: ruby_vminsn_type = 125;
+pub const YARVINSN_trace_putnil: ruby_vminsn_type = 126;
+pub const YARVINSN_trace_putself: ruby_vminsn_type = 127;
+pub const YARVINSN_trace_putobject: ruby_vminsn_type = 128;
+pub const YARVINSN_trace_putspecialobject: ruby_vminsn_type = 129;
+pub const YARVINSN_trace_putstring: ruby_vminsn_type = 130;
+pub const YARVINSN_trace_putchilledstring: ruby_vminsn_type = 131;
+pub const YARVINSN_trace_concatstrings: ruby_vminsn_type = 132;
+pub const YARVINSN_trace_anytostring: ruby_vminsn_type = 133;
+pub const YARVINSN_trace_toregexp: ruby_vminsn_type = 134;
+pub const YARVINSN_trace_intern: ruby_vminsn_type = 135;
+pub const YARVINSN_trace_newarray: ruby_vminsn_type = 136;
+pub const YARVINSN_trace_pushtoarraykwsplat: ruby_vminsn_type = 137;
+pub const YARVINSN_trace_duparray: ruby_vminsn_type = 138;
+pub const YARVINSN_trace_duphash: ruby_vminsn_type = 139;
+pub const YARVINSN_trace_expandarray: ruby_vminsn_type = 140;
+pub const YARVINSN_trace_concatarray: ruby_vminsn_type = 141;
+pub const YARVINSN_trace_concattoarray: ruby_vminsn_type = 142;
+pub const YARVINSN_trace_pushtoarray: ruby_vminsn_type = 143;
+pub const YARVINSN_trace_splatarray: ruby_vminsn_type = 144;
+pub const YARVINSN_trace_splatkw: ruby_vminsn_type = 145;
+pub const YARVINSN_trace_newhash: ruby_vminsn_type = 146;
+pub const YARVINSN_trace_newrange: ruby_vminsn_type = 147;
+pub const YARVINSN_trace_pop: ruby_vminsn_type = 148;
+pub const YARVINSN_trace_dup: ruby_vminsn_type = 149;
+pub const YARVINSN_trace_dupn: ruby_vminsn_type = 150;
+pub const YARVINSN_trace_swap: ruby_vminsn_type = 151;
+pub const YARVINSN_trace_opt_reverse: ruby_vminsn_type = 152;
+pub const YARVINSN_trace_topn: ruby_vminsn_type = 153;
+pub const YARVINSN_trace_setn: ruby_vminsn_type = 154;
+pub const YARVINSN_trace_adjuststack: ruby_vminsn_type = 155;
+pub const YARVINSN_trace_defined: ruby_vminsn_type = 156;
+pub const YARVINSN_trace_definedivar: ruby_vminsn_type = 157;
+pub const YARVINSN_trace_checkmatch: ruby_vminsn_type = 158;
+pub const YARVINSN_trace_checkkeyword: ruby_vminsn_type = 159;
+pub const YARVINSN_trace_checktype: ruby_vminsn_type = 160;
+pub const YARVINSN_trace_defineclass: ruby_vminsn_type = 161;
+pub const YARVINSN_trace_definemethod: ruby_vminsn_type = 162;
+pub const YARVINSN_trace_definesmethod: ruby_vminsn_type = 163;
+pub const YARVINSN_trace_send: ruby_vminsn_type = 164;
+pub const YARVINSN_trace_sendforward: ruby_vminsn_type = 165;
+pub const YARVINSN_trace_opt_send_without_block: ruby_vminsn_type = 166;
+pub const YARVINSN_trace_opt_new: ruby_vminsn_type = 167;
+pub const YARVINSN_trace_objtostring: ruby_vminsn_type = 168;
+pub const YARVINSN_trace_opt_ary_freeze: ruby_vminsn_type = 169;
+pub const YARVINSN_trace_opt_hash_freeze: ruby_vminsn_type = 170;
+pub const YARVINSN_trace_opt_str_freeze: ruby_vminsn_type = 171;
+pub const YARVINSN_trace_opt_nil_p: ruby_vminsn_type = 172;
+pub const YARVINSN_trace_opt_str_uminus: ruby_vminsn_type = 173;
+pub const YARVINSN_trace_opt_duparray_send: ruby_vminsn_type = 174;
+pub const YARVINSN_trace_opt_newarray_send: ruby_vminsn_type = 175;
+pub const YARVINSN_trace_invokesuper: ruby_vminsn_type = 176;
+pub const YARVINSN_trace_invokesuperforward: ruby_vminsn_type = 177;
+pub const YARVINSN_trace_invokeblock: ruby_vminsn_type = 178;
+pub const YARVINSN_trace_leave: ruby_vminsn_type = 179;
+pub const YARVINSN_trace_throw: ruby_vminsn_type = 180;
+pub const YARVINSN_trace_jump: ruby_vminsn_type = 181;
+pub const YARVINSN_trace_branchif: ruby_vminsn_type = 182;
+pub const YARVINSN_trace_branchunless: ruby_vminsn_type = 183;
+pub const YARVINSN_trace_branchnil: ruby_vminsn_type = 184;
+pub const YARVINSN_trace_once: ruby_vminsn_type = 185;
+pub const YARVINSN_trace_opt_case_dispatch: ruby_vminsn_type = 186;
+pub const YARVINSN_trace_opt_plus: ruby_vminsn_type = 187;
+pub const YARVINSN_trace_opt_minus: ruby_vminsn_type = 188;
+pub const YARVINSN_trace_opt_mult: ruby_vminsn_type = 189;
+pub const YARVINSN_trace_opt_div: ruby_vminsn_type = 190;
+pub const YARVINSN_trace_opt_mod: ruby_vminsn_type = 191;
+pub const YARVINSN_trace_opt_eq: ruby_vminsn_type = 192;
+pub const YARVINSN_trace_opt_neq: ruby_vminsn_type = 193;
+pub const YARVINSN_trace_opt_lt: ruby_vminsn_type = 194;
+pub const YARVINSN_trace_opt_le: ruby_vminsn_type = 195;
+pub const YARVINSN_trace_opt_gt: ruby_vminsn_type = 196;
+pub const YARVINSN_trace_opt_ge: ruby_vminsn_type = 197;
+pub const YARVINSN_trace_opt_ltlt: ruby_vminsn_type = 198;
+pub const YARVINSN_trace_opt_and: ruby_vminsn_type = 199;
+pub const YARVINSN_trace_opt_or: ruby_vminsn_type = 200;
+pub const YARVINSN_trace_opt_aref: ruby_vminsn_type = 201;
+pub const YARVINSN_trace_opt_aset: ruby_vminsn_type = 202;
+pub const YARVINSN_trace_opt_length: ruby_vminsn_type = 203;
+pub const YARVINSN_trace_opt_size: ruby_vminsn_type = 204;
+pub const YARVINSN_trace_opt_empty_p: ruby_vminsn_type = 205;
+pub const YARVINSN_trace_opt_succ: ruby_vminsn_type = 206;
+pub const YARVINSN_trace_opt_not: ruby_vminsn_type = 207;
+pub const YARVINSN_trace_opt_regexpmatch2: ruby_vminsn_type = 208;
+pub const YARVINSN_trace_invokebuiltin: ruby_vminsn_type = 209;
+pub const YARVINSN_trace_opt_invokebuiltin_delegate: ruby_vminsn_type = 210;
+pub const YARVINSN_trace_opt_invokebuiltin_delegate_leave: ruby_vminsn_type = 211;
+pub const YARVINSN_trace_getlocal_WC_0: ruby_vminsn_type = 212;
+pub const YARVINSN_trace_getlocal_WC_1: ruby_vminsn_type = 213;
+pub const YARVINSN_trace_setlocal_WC_0: ruby_vminsn_type = 214;
+pub const YARVINSN_trace_setlocal_WC_1: ruby_vminsn_type = 215;
+pub const YARVINSN_trace_putobject_INT2FIX_0_: ruby_vminsn_type = 216;
+pub const YARVINSN_trace_putobject_INT2FIX_1_: ruby_vminsn_type = 217;
+pub const YARVINSN_zjit_getinstancevariable: ruby_vminsn_type = 218;
+pub const YARVINSN_zjit_setinstancevariable: ruby_vminsn_type = 219;
+pub const YARVINSN_zjit_definedivar: ruby_vminsn_type = 220;
+pub const YARVINSN_zjit_send: ruby_vminsn_type = 221;
+pub const YARVINSN_zjit_opt_send_without_block: ruby_vminsn_type = 222;
+pub const YARVINSN_zjit_objtostring: ruby_vminsn_type = 223;
+pub const YARVINSN_zjit_opt_nil_p: ruby_vminsn_type = 224;
+pub const YARVINSN_zjit_invokesuper: ruby_vminsn_type = 225;
+pub const YARVINSN_zjit_invokeblock: ruby_vminsn_type = 226;
+pub const YARVINSN_zjit_opt_plus: ruby_vminsn_type = 227;
+pub const YARVINSN_zjit_opt_minus: ruby_vminsn_type = 228;
+pub const YARVINSN_zjit_opt_mult: ruby_vminsn_type = 229;
+pub const YARVINSN_zjit_opt_div: ruby_vminsn_type = 230;
+pub const YARVINSN_zjit_opt_mod: ruby_vminsn_type = 231;
+pub const YARVINSN_zjit_opt_eq: ruby_vminsn_type = 232;
+pub const YARVINSN_zjit_opt_neq: ruby_vminsn_type = 233;
+pub const YARVINSN_zjit_opt_lt: ruby_vminsn_type = 234;
+pub const YARVINSN_zjit_opt_le: ruby_vminsn_type = 235;
+pub const YARVINSN_zjit_opt_gt: ruby_vminsn_type = 236;
+pub const YARVINSN_zjit_opt_ge: ruby_vminsn_type = 237;
+pub const YARVINSN_zjit_opt_ltlt: ruby_vminsn_type = 238;
+pub const YARVINSN_zjit_opt_and: ruby_vminsn_type = 239;
+pub const YARVINSN_zjit_opt_or: ruby_vminsn_type = 240;
+pub const YARVINSN_zjit_opt_aref: ruby_vminsn_type = 241;
+pub const YARVINSN_zjit_opt_aset: ruby_vminsn_type = 242;
+pub const YARVINSN_zjit_opt_length: ruby_vminsn_type = 243;
+pub const YARVINSN_zjit_opt_size: ruby_vminsn_type = 244;
+pub const YARVINSN_zjit_opt_empty_p: ruby_vminsn_type = 245;
+pub const YARVINSN_zjit_opt_succ: ruby_vminsn_type = 246;
+pub const YARVINSN_zjit_opt_not: ruby_vminsn_type = 247;
+pub const YARVINSN_zjit_opt_regexpmatch2: ruby_vminsn_type = 248;
+pub const VM_INSTRUCTION_SIZE: ruby_vminsn_type = 249;
+pub type ruby_vminsn_type = u32;
+pub type rb_iseq_callback = ::std::option::Option<
+ unsafe extern "C" fn(arg1: *const rb_iseq_t, arg2: *mut ::std::os::raw::c_void),
+>;
+pub const DEFINED_NOT_DEFINED: defined_type = 0;
+pub const DEFINED_NIL: defined_type = 1;
+pub const DEFINED_IVAR: defined_type = 2;
+pub const DEFINED_LVAR: defined_type = 3;
+pub const DEFINED_GVAR: defined_type = 4;
+pub const DEFINED_CVAR: defined_type = 5;
+pub const DEFINED_CONST: defined_type = 6;
+pub const DEFINED_METHOD: defined_type = 7;
+pub const DEFINED_YIELD: defined_type = 8;
+pub const DEFINED_ZSUPER: defined_type = 9;
+pub const DEFINED_SELF: defined_type = 10;
+pub const DEFINED_TRUE: defined_type = 11;
+pub const DEFINED_FALSE: defined_type = 12;
+pub const DEFINED_ASGN: defined_type = 13;
+pub const DEFINED_EXPR: defined_type = 14;
+pub const DEFINED_REF: defined_type = 15;
+pub const DEFINED_FUNC: defined_type = 16;
+pub const DEFINED_CONST_FROM: defined_type = 17;
+pub type defined_type = u32;
+pub type rb_seq_param_keyword_struct =
+ rb_iseq_constant_body_rb_iseq_parameters_rb_iseq_param_keyword;
+pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: jit_bindgen_constants = 16;
+pub const ROBJECT_OFFSET_AS_ARY: jit_bindgen_constants = 16;
+pub const RUBY_OFFSET_RSTRING_LEN: jit_bindgen_constants = 16;
+pub const RUBY_OFFSET_EC_CFP: jit_bindgen_constants = 16;
+pub const RUBY_OFFSET_EC_INTERRUPT_FLAG: jit_bindgen_constants = 32;
+pub const RUBY_OFFSET_EC_INTERRUPT_MASK: jit_bindgen_constants = 36;
+pub const RUBY_OFFSET_EC_THREAD_PTR: jit_bindgen_constants = 48;
+pub const RUBY_OFFSET_EC_RACTOR_ID: jit_bindgen_constants = 64;
+pub type jit_bindgen_constants = u32;
+pub type rb_iseq_param_keyword_struct =
+ rb_iseq_constant_body_rb_iseq_parameters_rb_iseq_param_keyword;
+extern "C" {
+ pub fn ruby_xfree(ptr: *mut ::std::os::raw::c_void);
+ pub fn rb_class_attached_object(klass: VALUE) -> VALUE;
+ pub fn rb_singleton_class(obj: VALUE) -> VALUE;
+ pub fn rb_get_alloc_func(klass: VALUE) -> rb_alloc_func_t;
+ pub fn rb_method_basic_definition_p(klass: VALUE, mid: ID) -> ::std::os::raw::c_int;
+ pub fn rb_bug(fmt: *const ::std::os::raw::c_char, ...) -> !;
+ pub fn rb_float_new(d: f64) -> VALUE;
+ pub fn rb_gc_mark(obj: VALUE);
+ pub fn rb_gc_mark_movable(obj: VALUE);
+ pub fn rb_gc_location(obj: VALUE) -> VALUE;
+ pub fn rb_gc_writebarrier(old: VALUE, young: VALUE);
+ pub fn rb_class_get_superclass(klass: VALUE) -> VALUE;
+ pub fn rb_funcall(recv: VALUE, mid: ID, n: ::std::os::raw::c_int, ...) -> VALUE;
+ pub static mut rb_mKernel: VALUE;
+ pub static mut rb_cBasicObject: VALUE;
+ pub static mut rb_cArray: VALUE;
+ pub static mut rb_cClass: VALUE;
+ pub static mut rb_cFalseClass: VALUE;
+ pub static mut rb_cFloat: VALUE;
+ pub static mut rb_cHash: VALUE;
+ pub static mut rb_cIO: VALUE;
+ pub static mut rb_cInteger: VALUE;
+ pub static mut rb_cModule: VALUE;
+ pub static mut rb_cNilClass: VALUE;
+ pub static mut rb_cNumeric: VALUE;
+ pub static mut rb_cString: VALUE;
+ pub static mut rb_cSymbol: VALUE;
+ pub static mut rb_cThread: VALUE;
+ pub static mut rb_cTrueClass: VALUE;
+ pub fn rb_obj_class(obj: VALUE) -> VALUE;
+ pub fn rb_ary_new_capa(capa: ::std::os::raw::c_long) -> VALUE;
+ pub fn rb_ary_store(ary: VALUE, key: ::std::os::raw::c_long, val: VALUE);
+ pub fn rb_ary_dup(ary: VALUE) -> VALUE;
+ pub fn rb_ary_resurrect(ary: VALUE) -> VALUE;
+ pub fn rb_ary_cat(ary: VALUE, train: *const VALUE, len: ::std::os::raw::c_long) -> VALUE;
+ pub fn rb_ary_push(ary: VALUE, elem: VALUE) -> VALUE;
+ pub fn rb_ary_clear(ary: VALUE) -> VALUE;
+ pub fn rb_hash_new() -> VALUE;
+ pub fn rb_hash_aref(hash: VALUE, key: VALUE) -> VALUE;
+ pub fn rb_hash_aset(hash: VALUE, key: VALUE, val: VALUE) -> VALUE;
+ pub fn rb_hash_bulk_insert(argc: ::std::os::raw::c_long, argv: *const VALUE, hash: VALUE);
+ pub fn rb_obj_is_proc(recv: VALUE) -> VALUE;
+ pub fn rb_sym2id(obj: VALUE) -> ID;
+ pub fn rb_id2sym(id: ID) -> VALUE;
+ pub fn rb_intern(name: *const ::std::os::raw::c_char) -> ID;
+ pub fn rb_intern2(name: *const ::std::os::raw::c_char, len: ::std::os::raw::c_long) -> ID;
+ pub fn rb_id2name(id: ID) -> *const ::std::os::raw::c_char;
+ pub fn rb_class2name(klass: VALUE) -> *const ::std::os::raw::c_char;
+ pub fn rb_class_new_instance_pass_kw(
+ argc: ::std::os::raw::c_int,
+ argv: *const VALUE,
+ klass: VALUE,
+ ) -> VALUE;
+ pub fn rb_obj_is_kind_of(obj: VALUE, klass: VALUE) -> VALUE;
+ pub fn rb_obj_alloc(klass: VALUE) -> VALUE;
+ pub fn rb_obj_frozen_p(obj: VALUE) -> VALUE;
+ pub fn rb_backref_get() -> VALUE;
+ pub fn rb_range_new(beg: VALUE, end: VALUE, excl: ::std::os::raw::c_int) -> VALUE;
+ pub fn rb_reg_nth_match(n: ::std::os::raw::c_int, md: VALUE) -> VALUE;
+ pub fn rb_reg_last_match(md: VALUE) -> VALUE;
+ pub fn rb_reg_match_pre(md: VALUE) -> VALUE;
+ pub fn rb_reg_match_post(md: VALUE) -> VALUE;
+ pub fn rb_reg_match_last(md: VALUE) -> VALUE;
+ pub fn rb_utf8_str_new(
+ ptr: *const ::std::os::raw::c_char,
+ len: ::std::os::raw::c_long,
+ ) -> VALUE;
+ pub fn rb_str_buf_append(dst: VALUE, src: VALUE) -> VALUE;
+ pub fn rb_str_dup(str_: VALUE) -> VALUE;
+ pub fn rb_str_intern(str_: VALUE) -> VALUE;
+ pub fn rb_mod_name(mod_: VALUE) -> VALUE;
+ pub fn rb_ivar_get(obj: VALUE, name: ID) -> VALUE;
+ pub fn rb_ivar_defined(obj: VALUE, name: ID) -> VALUE;
+ pub fn rb_attr_get(obj: VALUE, name: ID) -> VALUE;
+ pub fn rb_const_get(space: VALUE, name: ID) -> VALUE;
+ pub fn rb_obj_info_dump(obj: VALUE);
+ pub fn rb_class_allocate_instance(klass: VALUE) -> VALUE;
+ pub fn rb_obj_equal(obj1: VALUE, obj2: VALUE) -> VALUE;
+ pub fn rb_reg_new_ary(ary: VALUE, options: ::std::os::raw::c_int) -> VALUE;
+ pub fn rb_ary_tmp_new_from_values(
+ arg1: VALUE,
+ arg2: ::std::os::raw::c_long,
+ arg3: *const VALUE,
+ ) -> VALUE;
+ pub fn rb_ec_ary_new_from_values(
+ ec: *mut rb_execution_context_struct,
+ n: ::std::os::raw::c_long,
+ elts: *const VALUE,
+ ) -> VALUE;
+ pub fn rb_vm_top_self() -> VALUE;
+ pub static mut rb_vm_insn_count: u64;
+ pub fn rb_method_entry_at(obj: VALUE, id: ID) -> *const rb_method_entry_t;
+ pub fn rb_callable_method_entry(klass: VALUE, id: ID) -> *const rb_callable_method_entry_t;
+ pub fn rb_callable_method_entry_or_negative(
+ klass: VALUE,
+ id: ID,
+ ) -> *const rb_callable_method_entry_t;
+ pub static mut rb_cRubyVM: VALUE;
+ pub static mut rb_mRubyVMFrozenCore: VALUE;
+ pub static mut rb_block_param_proxy: VALUE;
+ pub fn rb_vm_ep_local_ep(ep: *const VALUE) -> *const VALUE;
+ pub fn rb_iseq_path(iseq: *const rb_iseq_t) -> VALUE;
+ pub fn rb_vm_env_write(ep: *const VALUE, index: ::std::os::raw::c_int, v: VALUE);
+ pub fn rb_vm_bh_to_procval(ec: *const rb_execution_context_t, block_handler: VALUE) -> VALUE;
+ pub fn rb_vm_frame_method_entry(
+ cfp: *const rb_control_frame_t,
+ ) -> *const rb_callable_method_entry_t;
+ pub fn rb_obj_info(obj: VALUE) -> *const ::std::os::raw::c_char;
+ pub fn rb_ec_stack_check(ec: *mut rb_execution_context_struct) -> ::std::os::raw::c_int;
+ pub fn rb_shape_id_offset() -> i32;
+ pub fn rb_obj_shape_id(obj: VALUE) -> shape_id_t;
+ pub fn rb_shape_get_iv_index(shape_id: shape_id_t, id: ID, value: *mut attr_index_t) -> bool;
+ pub fn rb_shape_transition_add_ivar_no_warnings(
+ klass: VALUE,
+ original_shape_id: shape_id_t,
+ id: ID,
+ ) -> shape_id_t;
+ pub fn rb_ivar_get_at(obj: VALUE, index: attr_index_t, id: ID) -> VALUE;
+ pub fn rb_ivar_get_at_no_ractor_check(obj: VALUE, index: attr_index_t) -> VALUE;
+ pub fn rb_gvar_get(arg1: ID) -> VALUE;
+ pub fn rb_gvar_set(arg1: ID, arg2: VALUE) -> VALUE;
+ pub fn rb_ensure_iv_list_size(obj: VALUE, current_len: u32, newsize: u32);
+ pub fn rb_vm_barrier();
+ pub fn rb_str_byte_substr(str_: VALUE, beg: VALUE, len: VALUE) -> VALUE;
+ pub fn rb_str_substr_two_fixnums(
+ str_: VALUE,
+ beg: VALUE,
+ len: VALUE,
+ empty: ::std::os::raw::c_int,
+ ) -> VALUE;
+ pub fn rb_obj_as_string_result(str_: VALUE, obj: VALUE) -> VALUE;
+ pub fn rb_str_concat_literals(num: usize, strary: *const VALUE) -> VALUE;
+ pub fn rb_ec_str_resurrect(
+ ec: *mut rb_execution_context_struct,
+ str_: VALUE,
+ chilled: bool,
+ ) -> VALUE;
+ pub fn rb_to_hash_type(obj: VALUE) -> VALUE;
+ pub fn rb_hash_stlike_foreach(
+ hash: VALUE,
+ func: st_foreach_callback_func,
+ arg: st_data_t,
+ ) -> ::std::os::raw::c_int;
+ pub fn rb_hash_new_with_size(size: st_index_t) -> VALUE;
+ pub fn rb_hash_resurrect(hash: VALUE) -> VALUE;
+ pub fn rb_hash_stlike_lookup(
+ hash: VALUE,
+ key: st_data_t,
+ pval: *mut st_data_t,
+ ) -> ::std::os::raw::c_int;
+ pub fn rb_insn_len(insn: VALUE) -> ::std::os::raw::c_int;
+ pub fn rb_vm_insn_decode(encoded: VALUE) -> ::std::os::raw::c_int;
+ pub fn rb_float_plus(x: VALUE, y: VALUE) -> VALUE;
+ pub fn rb_float_minus(x: VALUE, y: VALUE) -> VALUE;
+ pub fn rb_float_mul(x: VALUE, y: VALUE) -> VALUE;
+ pub fn rb_float_div(x: VALUE, y: VALUE) -> VALUE;
+ pub fn rb_fix_aref(fix: VALUE, idx: VALUE) -> VALUE;
+ pub fn rb_vm_insn_addr2opcode(addr: *const ::std::os::raw::c_void) -> ::std::os::raw::c_int;
+ pub fn rb_iseq_line_no(iseq: *const rb_iseq_t, pos: usize) -> ::std::os::raw::c_uint;
+ pub fn rb_iseqw_to_iseq(iseqw: VALUE) -> *const rb_iseq_t;
+ pub fn rb_iseq_label(iseq: *const rb_iseq_t) -> VALUE;
+ pub fn rb_profile_frames(
+ start: ::std::os::raw::c_int,
+ limit: ::std::os::raw::c_int,
+ buff: *mut VALUE,
+ lines: *mut ::std::os::raw::c_int,
+ ) -> ::std::os::raw::c_int;
+ pub fn rb_jit_cont_each_iseq(callback: rb_iseq_callback, data: *mut ::std::os::raw::c_void);
+ pub fn rb_yjit_exit_locations_dict(
+ yjit_raw_samples: *mut VALUE,
+ yjit_line_samples: *mut ::std::os::raw::c_int,
+ samples_len: ::std::os::raw::c_int,
+ ) -> VALUE;
+ pub fn rb_c_method_tracing_currently_enabled(ec: *const rb_execution_context_t) -> bool;
+ pub fn rb_full_cfunc_return(ec: *mut rb_execution_context_t, return_value: VALUE);
+ pub fn rb_iseq_get_yjit_payload(iseq: *const rb_iseq_t) -> *mut ::std::os::raw::c_void;
+ pub fn rb_iseq_set_yjit_payload(iseq: *const rb_iseq_t, payload: *mut ::std::os::raw::c_void);
+ pub fn rb_get_symbol_id(namep: VALUE) -> ID;
+ pub fn rb_yjit_builtin_function(iseq: *const rb_iseq_t) -> *const rb_builtin_function;
+ pub fn rb_yjit_str_simple_append(str1: VALUE, str2: VALUE) -> VALUE;
+ pub fn rb_vm_base_ptr(cfp: *mut rb_control_frame_struct) -> *mut VALUE;
+ pub fn rb_str_neq_internal(str1: VALUE, str2: VALUE) -> VALUE;
+ pub fn rb_ary_unshift_m(argc: ::std::os::raw::c_int, argv: *mut VALUE, ary: VALUE) -> VALUE;
+ pub fn rb_yjit_rb_ary_subseq_length(ary: VALUE, beg: ::std::os::raw::c_long) -> VALUE;
+ pub fn rb_yjit_ruby2_keywords_splat_p(obj: VALUE) -> usize;
+ pub fn rb_yjit_splat_varg_checks(
+ sp: *mut VALUE,
+ splat_array: VALUE,
+ cfp: *mut rb_control_frame_t,
+ ) -> VALUE;
+ pub fn rb_yjit_splat_varg_cfunc(stack_splat_array: *mut VALUE) -> ::std::os::raw::c_int;
+ pub fn rb_yjit_dump_iseq_loc(iseq: *const rb_iseq_t, insn_idx: u32);
+ pub fn rb_yjit_iseq_inspect(iseq: *const rb_iseq_t) -> *mut ::std::os::raw::c_char;
+ pub fn rb_RSTRUCT_SET(st: VALUE, k: ::std::os::raw::c_int, v: VALUE);
+ pub fn rb_ENCODING_GET(obj: VALUE) -> ::std::os::raw::c_int;
+ pub fn rb_yjit_constcache_shareable(ice: *const iseq_inline_constant_cache_entry) -> bool;
+ pub fn rb_yjit_obj_written(
+ old: VALUE,
+ young: VALUE,
+ file: *const ::std::os::raw::c_char,
+ line: ::std::os::raw::c_int,
+ );
+ pub fn rb_object_shape_count() -> VALUE;
+ pub fn rb_yjit_shape_obj_too_complex_p(obj: VALUE) -> bool;
+ pub fn rb_yjit_shape_capacity(shape_id: shape_id_t) -> attr_index_t;
+ pub fn rb_yjit_shape_index(shape_id: shape_id_t) -> attr_index_t;
+ pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize;
+ pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize;
+ pub fn rb_yjit_cme_ractor_serial(cme: *const rb_callable_method_entry_t) -> rb_serial_t;
+ pub fn rb_yjit_set_exception_return(
+ cfp: *mut rb_control_frame_t,
+ leave_exit: *mut ::std::os::raw::c_void,
+ leave_exception: *mut ::std::os::raw::c_void,
+ );
+ pub fn rb_vm_instruction_size() -> u32;
+ pub fn rb_iseq_encoded_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
+ pub fn rb_iseq_pc_at_idx(iseq: *const rb_iseq_t, insn_idx: u32) -> *mut VALUE;
+ pub fn rb_iseq_opcode_at_pc(iseq: *const rb_iseq_t, pc: *const VALUE) -> ::std::os::raw::c_int;
+ pub fn rb_RSTRING_LEN(str_: VALUE) -> ::std::os::raw::c_ulong;
+ pub fn rb_RSTRING_PTR(str_: VALUE) -> *mut ::std::os::raw::c_char;
+ pub fn rb_insn_name(insn: VALUE) -> *const ::std::os::raw::c_char;
+ pub fn rb_vm_ci_argc(ci: *const rb_callinfo) -> ::std::os::raw::c_uint;
+ pub fn rb_vm_ci_mid(ci: *const rb_callinfo) -> ID;
+ pub fn rb_vm_ci_flag(ci: *const rb_callinfo) -> ::std::os::raw::c_uint;
+ pub fn rb_vm_ci_kwarg(ci: *const rb_callinfo) -> *const rb_callinfo_kwarg;
+ pub fn rb_get_cikw_keyword_len(cikw: *const rb_callinfo_kwarg) -> ::std::os::raw::c_int;
+ pub fn rb_get_cikw_keywords_idx(
+ cikw: *const rb_callinfo_kwarg,
+ idx: ::std::os::raw::c_int,
+ ) -> VALUE;
+ pub fn rb_METHOD_ENTRY_VISI(me: *const rb_callable_method_entry_t) -> rb_method_visibility_t;
+ pub fn rb_get_cme_def_type(cme: *const rb_callable_method_entry_t) -> rb_method_type_t;
+ pub fn rb_get_cme_def_body_attr_id(cme: *const rb_callable_method_entry_t) -> ID;
+ pub fn rb_get_cme_def_body_optimized_type(
+ cme: *const rb_callable_method_entry_t,
+ ) -> method_optimized_type;
+ pub fn rb_get_cme_def_body_optimized_index(
+ cme: *const rb_callable_method_entry_t,
+ ) -> ::std::os::raw::c_uint;
+ pub fn rb_get_cme_def_body_cfunc(
+ cme: *const rb_callable_method_entry_t,
+ ) -> *mut rb_method_cfunc_t;
+ pub fn rb_get_def_method_serial(def: *const rb_method_definition_t) -> usize;
+ pub fn rb_get_def_original_id(def: *const rb_method_definition_t) -> ID;
+ pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE;
+ pub fn rb_jit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t;
+ pub fn rb_optimized_call(
+ recv: *mut VALUE,
+ ec: *mut rb_execution_context_t,
+ argc: ::std::os::raw::c_int,
+ argv: *mut VALUE,
+ kw_splat: ::std::os::raw::c_int,
+ block_handler: VALUE,
+ ) -> VALUE;
+ pub fn rb_jit_iseq_builtin_attrs(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
+ pub fn rb_get_mct_argc(mct: *const rb_method_cfunc_t) -> ::std::os::raw::c_int;
+ pub fn rb_get_mct_func(mct: *const rb_method_cfunc_t) -> *mut ::std::os::raw::c_void;
+ pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t;
+ pub fn rb_get_iseq_body_local_iseq(iseq: *const rb_iseq_t) -> *const rb_iseq_t;
+ pub fn rb_get_iseq_body_parent_iseq(iseq: *const rb_iseq_t) -> *const rb_iseq_t;
+ pub fn rb_get_iseq_body_local_table_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
+ pub fn rb_get_iseq_body_iseq_encoded(iseq: *const rb_iseq_t) -> *mut VALUE;
+ pub fn rb_get_iseq_body_stack_max(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
+ pub fn rb_get_iseq_body_type(iseq: *const rb_iseq_t) -> rb_iseq_type;
+ pub fn rb_get_iseq_flags_has_lead(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_has_opt(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_has_kw(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_has_post(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_has_kwrest(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_anon_kwrest(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_has_rest(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_ruby2_keywords(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_has_block(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_ambiguous_param0(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_accepts_no_kwarg(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_flags_forwardable(iseq: *const rb_iseq_t) -> bool;
+ pub fn rb_get_iseq_body_param_keyword(
+ iseq: *const rb_iseq_t,
+ ) -> *const rb_iseq_param_keyword_struct;
+ pub fn rb_get_iseq_body_param_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
+ pub fn rb_get_iseq_body_param_lead_num(iseq: *const rb_iseq_t) -> ::std::os::raw::c_int;
+ pub fn rb_get_iseq_body_param_opt_num(iseq: *const rb_iseq_t) -> ::std::os::raw::c_int;
+ pub fn rb_get_iseq_body_param_opt_table(iseq: *const rb_iseq_t) -> *const VALUE;
+ pub fn rb_get_ec_cfp(ec: *const rb_execution_context_t) -> *mut rb_control_frame_struct;
+ pub fn rb_get_cfp_iseq(cfp: *mut rb_control_frame_struct) -> *const rb_iseq_t;
+ pub fn rb_get_cfp_pc(cfp: *mut rb_control_frame_struct) -> *mut VALUE;
+ pub fn rb_get_cfp_sp(cfp: *mut rb_control_frame_struct) -> *mut VALUE;
+ pub fn rb_get_cfp_self(cfp: *mut rb_control_frame_struct) -> VALUE;
+ pub fn rb_get_cfp_ep(cfp: *mut rb_control_frame_struct) -> *mut VALUE;
+ pub fn rb_get_cfp_ep_level(cfp: *mut rb_control_frame_struct, lv: u32) -> *const VALUE;
+ pub fn rb_yarv_class_of(obj: VALUE) -> VALUE;
+ pub fn rb_FL_TEST(obj: VALUE, flags: VALUE) -> VALUE;
+ pub fn rb_FL_TEST_RAW(obj: VALUE, flags: VALUE) -> VALUE;
+ pub fn rb_RB_TYPE_P(obj: VALUE, t: ruby_value_type) -> bool;
+ pub fn rb_RSTRUCT_LEN(st: VALUE) -> ::std::os::raw::c_long;
+ pub fn rb_get_call_data_ci(cd: *const rb_call_data) -> *const rb_callinfo;
+ pub fn rb_BASIC_OP_UNREDEFINED_P(bop: ruby_basic_operators, klass: u32) -> bool;
+ pub fn rb_RCLASS_ORIGIN(c: VALUE) -> VALUE;
+ pub fn rb_assert_iseq_handle(handle: VALUE);
+ pub fn rb_assert_holding_vm_lock();
+ pub fn rb_IMEMO_TYPE_P(imemo: VALUE, imemo_type: imemo_type) -> ::std::os::raw::c_int;
+ pub fn rb_assert_cme_handle(handle: VALUE);
+ pub fn rb_yarv_ary_entry_internal(ary: VALUE, offset: ::std::os::raw::c_long) -> VALUE;
+ pub fn rb_jit_array_len(a: VALUE) -> ::std::os::raw::c_long;
+ pub fn rb_set_cfp_pc(cfp: *mut rb_control_frame_struct, pc: *const VALUE);
+ pub fn rb_set_cfp_sp(cfp: *mut rb_control_frame_struct, sp: *mut VALUE);
+ pub fn rb_jit_shape_too_complex_p(shape_id: shape_id_t) -> bool;
+ pub fn rb_jit_multi_ractor_p() -> bool;
+ pub fn rb_jit_vm_lock_then_barrier(
+ recursive_lock_level: *mut ::std::os::raw::c_uint,
+ file: *const ::std::os::raw::c_char,
+ line: ::std::os::raw::c_int,
+ );
+ pub fn rb_jit_vm_unlock(
+ recursive_lock_level: *mut ::std::os::raw::c_uint,
+ file: *const ::std::os::raw::c_char,
+ line: ::std::os::raw::c_int,
+ );
+ pub fn rb_iseq_reset_jit_func(iseq: *const rb_iseq_t);
+ pub fn rb_jit_get_page_size() -> u32;
+ pub fn rb_jit_reserve_addr_space(mem_size: u32) -> *mut u8;
+ pub fn rb_jit_for_each_iseq(callback: rb_iseq_callback, data: *mut ::std::os::raw::c_void);
+ pub fn rb_jit_mark_writable(mem_block: *mut ::std::os::raw::c_void, mem_size: u32) -> bool;
+ pub fn rb_jit_mark_executable(mem_block: *mut ::std::os::raw::c_void, mem_size: u32);
+ pub fn rb_jit_mark_unused(mem_block: *mut ::std::os::raw::c_void, mem_size: u32) -> bool;
+ pub fn rb_jit_icache_invalidate(
+ start: *mut ::std::os::raw::c_void,
+ end: *mut ::std::os::raw::c_void,
+ );
+ pub fn rb_jit_fix_mod_fix(recv: VALUE, obj: VALUE) -> VALUE;
+ pub fn rb_jit_fix_div_fix(recv: VALUE, obj: VALUE) -> VALUE;
+ pub fn rb_yarv_str_eql_internal(str1: VALUE, str2: VALUE) -> VALUE;
+ pub fn rb_jit_str_concat_codepoint(str_: VALUE, codepoint: VALUE);
+}
diff --git a/yjit/src/disasm.rs b/yjit/src/disasm.rs
new file mode 100644
index 0000000000..4f85937ee9
--- /dev/null
+++ b/yjit/src/disasm.rs
@@ -0,0 +1,400 @@
+use crate::core::*;
+use crate::cruby::*;
+use crate::yjit::yjit_enabled_p;
+use crate::asm::CodeBlock;
+use crate::codegen::CodePtr;
+use crate::options::DumpDisasm;
+
+use std::fmt::Write;
+
+#[cfg_attr(not(feature = "disasm"), allow(dead_code))]
+#[derive(Copy, Clone, Debug)]
+pub struct TerminalColor {
+ pub blue_begin: &'static str,
+ pub blue_end: &'static str,
+ pub bold_begin: &'static str,
+ pub bold_end: &'static str,
+}
+
+pub static TTY_TERMINAL_COLOR: TerminalColor = TerminalColor {
+ blue_begin: "\x1b[34m",
+ blue_end: "\x1b[0m",
+ bold_begin: "\x1b[1m",
+ bold_end: "\x1b[22m",
+};
+
+pub static NON_TTY_TERMINAL_COLOR: TerminalColor = TerminalColor {
+ blue_begin: "",
+ blue_end: "",
+ bold_begin: "",
+ bold_end: "",
+};
+
+/// Terminal escape codes for colors, font weight, etc. Only enabled if stdout is a TTY.
+pub fn get_colors() -> &'static TerminalColor {
+ if crate::utils::stdout_supports_colors() {
+ &TTY_TERMINAL_COLOR
+ } else {
+ &NON_TTY_TERMINAL_COLOR
+ }
+}
+
+/// Primitive called in yjit.rb
+/// Produce a string representing the disassembly for an ISEQ
+#[no_mangle]
+pub extern "C" fn rb_yjit_disasm_iseq(_ec: EcPtr, _ruby_self: VALUE, iseqw: VALUE) -> VALUE {
+ #[cfg(not(feature = "disasm"))]
+ {
+ let _ = iseqw;
+ return Qnil;
+ }
+
+ #[cfg(feature = "disasm")]
+ {
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ // Get the iseq pointer from the wrapper
+ let iseq = unsafe { rb_iseqw_to_iseq(iseqw) };
+
+ // This will truncate disassembly of methods with 10k+ bytecodes.
+ // That's a good thing - this prints to console.
+ let out_string = with_vm_lock(src_loc!(), || disasm_iseq_insn_range(iseq, 0, 9999));
+
+ return rust_str_to_ruby(&out_string);
+ }
+}
+
+/// Only call while holding the VM lock.
+#[cfg(feature = "disasm")]
+pub fn disasm_iseq_insn_range(iseq: IseqPtr, start_idx: u16, end_idx: u16) -> String {
+ let mut out = String::from("");
+
+ // Get a list of block versions generated for this iseq
+ let block_list = get_or_create_iseq_block_list(iseq);
+ let mut block_list: Vec<&Block> = block_list.into_iter().map(|blockref| {
+ // SAFETY: We have the VM lock here and all the blocks on iseqs are valid.
+ unsafe { blockref.as_ref() }
+ }).collect();
+
+ // Get a list of codeblocks relevant to this iseq
+ let global_cb = crate::codegen::CodegenGlobals::get_inline_cb();
+
+ // Sort the blocks by increasing start addresses
+ block_list.sort_by_key(|block| block.get_start_addr().as_offset());
+
+ // Compute total code size in bytes for all blocks in the function
+ let mut total_code_size = 0;
+ for blockref in &block_list {
+ total_code_size += blockref.code_size();
+ }
+
+ writeln!(out, "NUM BLOCK VERSIONS: {}", block_list.len()).unwrap();
+ writeln!(out, "TOTAL INLINE CODE SIZE: {} bytes", total_code_size).unwrap();
+
+ // For each block, sorted by increasing start address
+ for (block_idx, block) in block_list.iter().enumerate() {
+ let blockid = block.get_blockid();
+ if blockid.idx >= start_idx && blockid.idx < end_idx {
+ let end_idx = block.get_end_idx();
+ let start_addr = block.get_start_addr();
+ let end_addr = block.get_end_addr();
+ let code_size = block.code_size();
+
+ // Write some info about the current block
+ let blockid_idx = blockid.idx;
+ let block_ident = format!(
+ "BLOCK {}/{}, ISEQ RANGE [{},{}), {} bytes ",
+ block_idx + 1,
+ block_list.len(),
+ blockid_idx,
+ end_idx,
+ code_size
+ );
+ writeln!(out, "== {:=<60}", block_ident).unwrap();
+
+ // Disassemble the instructions
+ for (start_addr, end_addr) in global_cb.writable_addrs(start_addr, end_addr) {
+ out.push_str(&disasm_addr_range(global_cb, start_addr, end_addr));
+ writeln!(out).unwrap();
+ }
+
+ // If this is not the last block
+ if block_idx < block_list.len() - 1 {
+ // Compute the size of the gap between this block and the next
+ let next_block = block_list[block_idx + 1];
+ let next_start_addr = next_block.get_start_addr();
+ let gap_size = next_start_addr.as_offset() - end_addr.as_offset();
+
+ // Log the size of the gap between the blocks if nonzero
+ if gap_size > 0 {
+ writeln!(out, "... {} byte gap ...", gap_size).unwrap();
+ }
+ }
+ }
+ }
+
+ return out;
+}
+
+/// Dump dissassembly for a range in a [CodeBlock]. VM lock required.
+pub fn dump_disasm_addr_range(cb: &CodeBlock, start_addr: CodePtr, end_addr: CodePtr, dump_disasm: &DumpDisasm) {
+ for (start_addr, end_addr) in cb.writable_addrs(start_addr, end_addr) {
+ let disasm = disasm_addr_range(cb, start_addr, end_addr);
+ if disasm.len() > 0 {
+ match dump_disasm {
+ DumpDisasm::Stdout => println!("{disasm}"),
+ DumpDisasm::File(fd) => {
+ use std::os::unix::io::{FromRawFd, IntoRawFd};
+ use std::io::Write;
+
+ // Write with the fd opened during boot
+ let mut file = unsafe { std::fs::File::from_raw_fd(*fd) };
+ file.write_all(disasm.as_bytes()).unwrap();
+ let _ = file.into_raw_fd(); // keep the fd open
+ }
+ };
+ }
+ }
+}
+
+#[cfg(feature = "disasm")]
+pub fn disasm_addr_range(cb: &CodeBlock, start_addr: usize, end_addr: usize) -> String {
+ let mut out = String::from("");
+
+ // Initialize capstone
+ use capstone::prelude::*;
+
+ #[cfg(target_arch = "x86_64")]
+ let mut cs = Capstone::new()
+ .x86()
+ .mode(arch::x86::ArchMode::Mode64)
+ .syntax(arch::x86::ArchSyntax::Intel)
+ .build()
+ .unwrap();
+
+ #[cfg(target_arch = "aarch64")]
+ let mut cs = Capstone::new()
+ .arm64()
+ .mode(arch::arm64::ArchMode::Arm)
+ .detail(true)
+ .build()
+ .unwrap();
+ cs.set_skipdata(true).unwrap();
+
+ // Disassemble the instructions
+ let code_size = end_addr - start_addr;
+ let code_slice = unsafe { std::slice::from_raw_parts(start_addr as _, code_size) };
+ // Stabilize output for cargo test
+ #[cfg(test)]
+ let start_addr = 0;
+ let insns = cs.disasm_all(code_slice, start_addr as u64).unwrap();
+ let colors = get_colors();
+
+ // For each instruction in this block
+ for insn in insns.as_ref() {
+ // Comments for this block
+ if let Some(comment_list) = cb.comments_at(insn.address() as usize) {
+ for comment in comment_list {
+ if cb.outlined {
+ write!(&mut out, "{}", colors.blue_begin).unwrap(); // Make outlined code blue
+ }
+ writeln!(&mut out, " {}# {comment}{}", colors.bold_begin, colors.bold_end).unwrap(); // Make comments bold
+ }
+ }
+ if cb.outlined {
+ write!(&mut out, "{}", colors.blue_begin).unwrap(); // Make outlined code blue
+ }
+ writeln!(&mut out, " {insn}").unwrap();
+ if cb.outlined {
+ write!(&mut out, "{}", colors.blue_end).unwrap(); // Disable blue
+ }
+ }
+
+ return out;
+}
+
+/// Fallback version without dependency on a disassembler which prints just bytes and comments.
+#[cfg(not(feature = "disasm"))]
+pub fn disasm_addr_range(cb: &CodeBlock, start_addr: usize, end_addr: usize) -> String {
+ let mut out = String::new();
+ let mut line_byte_idx = 0;
+ const MAX_BYTES_PER_LINE: usize = 16;
+ let colors = get_colors();
+
+ for addr in start_addr..end_addr {
+ if let Some(comment_list) = cb.comments_at(addr) {
+ // Start a new line if we're in the middle of one
+ if line_byte_idx != 0 {
+ writeln!(&mut out).unwrap();
+ line_byte_idx = 0;
+ }
+ for comment in comment_list {
+ writeln!(&mut out, " {}# {comment}{}", colors.bold_begin, colors.bold_end).unwrap(); // Make comments bold
+ }
+ }
+ if line_byte_idx == 0 {
+ write!(&mut out, " 0x{addr:x}: ").unwrap();
+ } else {
+ write!(&mut out, " ").unwrap();
+ }
+ let byte = unsafe { (addr as *const u8).read() };
+ write!(&mut out, "{byte:02x}").unwrap();
+ line_byte_idx += 1;
+ if line_byte_idx == MAX_BYTES_PER_LINE - 1 {
+ writeln!(&mut out).unwrap();
+ line_byte_idx = 0;
+ }
+ }
+
+ if !out.is_empty() {
+ writeln!(&mut out).unwrap();
+ }
+
+ out
+}
+
+/// Assert that CodeBlock has the code specified with hex. In addition, if tested with
+/// `cargo test --all-features`, it also checks it generates the specified disasm.
+#[cfg(test)]
+macro_rules! assert_disasm {
+ ($cb:expr, $hex:expr, $disasm:expr) => {
+ #[cfg(feature = "disasm")]
+ {
+ let disasm = disasm_addr_range(
+ &$cb,
+ $cb.get_ptr(0).raw_addr(&$cb),
+ $cb.get_write_ptr().raw_addr(&$cb),
+ );
+ assert_eq!(unindent(&disasm, false), unindent(&$disasm, true));
+ }
+ assert_eq!(format!("{:x}", $cb), $hex);
+ };
+}
+#[cfg(test)]
+pub(crate) use assert_disasm;
+
+/// Remove the minimum indent from every line, skipping the first line if `skip_first`.
+#[cfg(all(feature = "disasm", test))]
+pub fn unindent(string: &str, trim_lines: bool) -> String {
+ fn split_lines(string: &str) -> Vec<String> {
+ let mut result: Vec<String> = vec![];
+ let mut buf: Vec<u8> = vec![];
+ for byte in string.as_bytes().iter() {
+ buf.push(*byte);
+ if *byte == b'\n' {
+ result.push(String::from_utf8(buf).unwrap());
+ buf = vec![];
+ }
+ }
+ if !buf.is_empty() {
+ result.push(String::from_utf8(buf).unwrap());
+ }
+ result
+ }
+
+ // Break up a string into multiple lines
+ let mut lines = split_lines(string);
+ if trim_lines { // raw string literals come with extra lines
+ lines.remove(0);
+ lines.remove(lines.len() - 1);
+ }
+
+ // Count the minimum number of spaces
+ let spaces = lines.iter().filter_map(|line| {
+ for (i, ch) in line.as_bytes().iter().enumerate() {
+ if *ch != b' ' {
+ return Some(i);
+ }
+ }
+ None
+ }).min().unwrap_or(0);
+
+ // Join lines, removing spaces
+ let mut unindented: Vec<u8> = vec![];
+ for line in lines.iter() {
+ if line.len() > spaces {
+ unindented.extend_from_slice(&line.as_bytes()[spaces..]);
+ } else {
+ unindented.extend_from_slice(&line.as_bytes());
+ }
+ }
+ String::from_utf8(unindented).unwrap()
+}
+
+/// Primitive called in yjit.rb
+/// Produce a list of instructions compiled for an isew
+#[no_mangle]
+pub extern "C" fn rb_yjit_insns_compiled(_ec: EcPtr, _ruby_self: VALUE, iseqw: VALUE) -> VALUE {
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ // Get the iseq pointer from the wrapper
+ let iseq = unsafe { rb_iseqw_to_iseq(iseqw) };
+
+ // Get the list of instructions compiled
+ let insn_vec = insns_compiled(iseq);
+
+ unsafe {
+ let insn_ary = rb_ary_new_capa((insn_vec.len() * 2) as i64);
+
+ // For each instruction compiled
+ for idx in 0..insn_vec.len() {
+ let op_name = &insn_vec[idx].0;
+ let insn_idx = insn_vec[idx].1;
+
+ let op_sym = rust_str_to_sym(&op_name);
+
+ // Store the instruction index and opcode symbol
+ rb_ary_store(
+ insn_ary,
+ (2 * idx + 0) as i64,
+ VALUE::fixnum_from_usize(insn_idx as usize),
+ );
+ rb_ary_store(insn_ary, (2 * idx + 1) as i64, op_sym);
+ }
+
+ insn_ary
+ }
+}
+
+fn insns_compiled(iseq: IseqPtr) -> Vec<(String, u16)> {
+ let mut insn_vec = Vec::new();
+
+ // Get a list of block versions generated for this iseq
+ let block_list = get_or_create_iseq_block_list(iseq);
+
+ // For each block associated with this iseq
+ for blockref in &block_list {
+ // SAFETY: Called as part of a Ruby method, which ensures the graph is
+ // well connected for the given iseq.
+ let block = unsafe { blockref.as_ref() };
+ let start_idx = block.get_blockid().idx;
+ let end_idx = block.get_end_idx();
+ assert!(u32::from(end_idx) <= unsafe { get_iseq_encoded_size(iseq) });
+
+ // For each YARV instruction in the block
+ let mut insn_idx = start_idx;
+ while insn_idx < end_idx {
+ // Get the current pc and opcode
+ let pc = unsafe { rb_iseq_pc_at_idx(iseq, insn_idx.into()) };
+ // try_into() call below is unfortunate. Maybe pick i32 instead of usize for opcodes.
+ let opcode: usize = unsafe { rb_iseq_opcode_at_pc(iseq, pc) }
+ .try_into()
+ .unwrap();
+
+ // Get the mnemonic for this opcode
+ let op_name = insn_name(opcode);
+
+ // Add the instruction to the list
+ insn_vec.push((op_name, insn_idx));
+
+ // Move to the next instruction
+ insn_idx += insn_len(opcode) as u16;
+ }
+ }
+
+ return insn_vec;
+}
diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs
new file mode 100644
index 0000000000..0f22fba6b8
--- /dev/null
+++ b/yjit/src/invariants.rs
@@ -0,0 +1,709 @@
+//! Code to track assumptions made during code generation and invalidate
+//! generated code if and when these assumptions are invalidated.
+
+use crate::backend::ir::Assembler;
+use crate::codegen::*;
+use crate::core::*;
+use crate::cruby::*;
+use crate::stats::*;
+use crate::utils::IntoUsize;
+use crate::yjit::yjit_enabled_p;
+
+use std::collections::{HashMap, HashSet};
+use std::os::raw::c_void;
+use std::mem;
+
+// Invariants to track:
+// assume_bop_not_redefined(jit, INTEGER_REDEFINED_OP_FLAG, BOP_PLUS)
+// assume_method_lookup_stable(comptime_recv_klass, cme, jit);
+// assume_single_ractor_mode()
+// track_stable_constant_names_assumption()
+
+/// Used to track all of the various block references that contain assumptions
+/// about the state of the virtual machine.
+pub struct Invariants {
+ /// Tracks block assumptions about callable method entry validity.
+ cme_validity: HashMap<*const rb_callable_method_entry_t, HashSet<BlockRef>>,
+
+ /// A map from a class and its associated basic operator to a set of blocks
+ /// that are assuming that that operator is not redefined. This is used for
+ /// quick access to all of the blocks that are making this assumption when
+ /// the operator is redefined.
+ basic_operator_blocks: HashMap<(RedefinitionFlag, ruby_basic_operators), HashSet<BlockRef>>,
+ /// A map from a block to a set of classes and their associated basic
+ /// operators that the block is assuming are not redefined. This is used for
+ /// quick access to all of the assumptions that a block is making when it
+ /// needs to be invalidated.
+ block_basic_operators: HashMap<BlockRef, HashSet<(RedefinitionFlag, ruby_basic_operators)>>,
+
+ /// Tracks the set of blocks that are assuming the interpreter is running
+ /// with only one ractor. This is important for things like accessing
+ /// constants which can have different semantics when multiple ractors are
+ /// running.
+ single_ractor: HashSet<BlockRef>,
+
+ /// A map from an ID to the set of blocks that are assuming a constant with
+ /// that ID as part of its name has not been redefined. For example, if
+ /// a constant `A::B` is redefined, then all blocks that are assuming that
+ /// `A` and `B` have not be redefined must be invalidated.
+ constant_state_blocks: HashMap<ID, HashSet<BlockRef>>,
+ /// A map from a block to a set of IDs that it is assuming have not been
+ /// redefined.
+ block_constant_states: HashMap<BlockRef, HashSet<ID>>,
+
+ /// A map from a class to a set of blocks that assume objects of the class
+ /// will have no singleton class. When the set is empty, it means that
+ /// there has been a singleton class for the class after boot, so you cannot
+ /// assume no singleton class going forward.
+ /// For now, the key can be only Array, Hash, or String. Consider making
+ /// an inverted HashMap if we start using this for user-defined classes
+ /// to maintain the performance of block_assumptions_free().
+ no_singleton_classes: HashMap<VALUE, HashSet<BlockRef>>,
+
+ /// A map from an ISEQ to a set of blocks that assume base pointer is equal
+ /// to environment pointer. When the set is empty, it means that EP has been
+ /// escaped in the ISEQ.
+ no_ep_escape_iseqs: HashMap<IseqPtr, HashSet<BlockRef>>,
+}
+
+/// Private singleton instance of the invariants global struct.
+static mut INVARIANTS: Option<Invariants> = None;
+
+impl Invariants {
+ pub fn init() {
+ // Wrapping this in unsafe to assign directly to a global.
+ unsafe {
+ INVARIANTS = Some(Invariants {
+ cme_validity: HashMap::new(),
+ basic_operator_blocks: HashMap::new(),
+ block_basic_operators: HashMap::new(),
+ single_ractor: HashSet::new(),
+ constant_state_blocks: HashMap::new(),
+ block_constant_states: HashMap::new(),
+ no_singleton_classes: HashMap::new(),
+ no_ep_escape_iseqs: HashMap::new(),
+ });
+ }
+ }
+
+ /// Get a mutable reference to the codegen globals instance
+ pub fn get_instance() -> &'static mut Invariants {
+ unsafe { INVARIANTS.as_mut().unwrap() }
+ }
+}
+
+/// Mark the pending block as assuming that certain basic operators (e.g. Integer#==)
+/// have not been redefined.
+#[must_use]
+pub fn assume_bop_not_redefined(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ klass: RedefinitionFlag,
+ bop: ruby_basic_operators,
+) -> bool {
+ if unsafe { BASIC_OP_UNREDEFINED_P(bop, klass) } {
+ if jit_ensure_block_entry_exit(jit, asm).is_none() {
+ return false;
+ }
+ jit.bop_assumptions.push((klass, bop));
+
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/// Track that a block is only valid when a certain basic operator has not been redefined
+/// since the block's inception.
+pub fn track_bop_assumption(uninit_block: BlockRef, bop: (RedefinitionFlag, ruby_basic_operators)) {
+ let invariants = Invariants::get_instance();
+ invariants
+ .basic_operator_blocks
+ .entry(bop)
+ .or_default()
+ .insert(uninit_block);
+ invariants
+ .block_basic_operators
+ .entry(uninit_block)
+ .or_default()
+ .insert(bop);
+}
+
+/// Track that a block will assume that `cme` is valid (false == METHOD_ENTRY_INVALIDATED(cme)).
+/// [rb_yjit_cme_invalidate] invalidates the block when `cme` is invalidated.
+pub fn track_method_lookup_stability_assumption(
+ uninit_block: BlockRef,
+ callee_cme: *const rb_callable_method_entry_t,
+) {
+ Invariants::get_instance()
+ .cme_validity
+ .entry(callee_cme)
+ .or_default()
+ .insert(uninit_block);
+}
+
+/// Track that a block will assume that `klass` objects will have no singleton class.
+pub fn track_no_singleton_class_assumption(uninit_block: BlockRef, klass: VALUE) {
+ Invariants::get_instance()
+ .no_singleton_classes
+ .entry(klass)
+ .or_default()
+ .insert(uninit_block);
+}
+
+/// Returns true if we've seen a singleton class of a given class since boot.
+pub fn has_singleton_class_of(klass: VALUE) -> bool {
+ Invariants::get_instance()
+ .no_singleton_classes
+ .get(&klass)
+ .map_or(false, |blocks| blocks.is_empty())
+}
+
+/// Track that a block will assume that base pointer is equal to environment pointer.
+pub fn track_no_ep_escape_assumption(uninit_block: BlockRef, iseq: IseqPtr) {
+ Invariants::get_instance()
+ .no_ep_escape_iseqs
+ .entry(iseq)
+ .or_default()
+ .insert(uninit_block);
+}
+
+/// Returns true if a given ISEQ has previously escaped an environment.
+pub fn iseq_escapes_ep(iseq: IseqPtr) -> bool {
+ Invariants::get_instance()
+ .no_ep_escape_iseqs
+ .get(&iseq)
+ .map_or(false, |blocks| blocks.is_empty())
+}
+
+/// Forget an ISEQ remembered in invariants
+pub fn iseq_free_invariants(iseq: IseqPtr) {
+ if unsafe { INVARIANTS.is_none() } {
+ return;
+ }
+ Invariants::get_instance().no_ep_escape_iseqs.remove(&iseq);
+}
+
+// Checks rb_method_basic_definition_p and registers the current block for invalidation if method
+// lookup changes.
+// A "basic method" is one defined during VM boot, so we can use this to check assumptions based on
+// default behavior.
+pub fn assume_method_basic_definition(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ klass: VALUE,
+ mid: ID
+) -> bool {
+ if unsafe { rb_method_basic_definition_p(klass, mid) } != 0 {
+ let cme = unsafe { rb_callable_method_entry(klass, mid) };
+ jit.assume_method_lookup_stable(asm, cme);
+ true
+ } else {
+ false
+ }
+}
+
+/// Tracks that a block is assuming it is operating in single-ractor mode.
+#[must_use]
+pub fn assume_single_ractor_mode(jit: &mut JITState, asm: &mut Assembler) -> bool {
+ if unsafe { rb_jit_multi_ractor_p() } {
+ false
+ } else {
+ if jit_ensure_block_entry_exit(jit, asm).is_none() {
+ return false;
+ }
+ jit.block_assumes_single_ractor = true;
+
+ true
+ }
+}
+
+/// Track that the block will assume single ractor mode.
+pub fn track_single_ractor_assumption(uninit_block: BlockRef) {
+ Invariants::get_instance()
+ .single_ractor
+ .insert(uninit_block);
+}
+
+/// Track that a block will assume that the name components of a constant path expression
+/// has not changed since the block's full initialization.
+pub fn track_stable_constant_names_assumption(uninit_block: BlockRef, idlist: *const ID) {
+ fn assume_stable_constant_name(
+ uninit_block: BlockRef,
+ id: ID,
+ ) {
+ if id == ID!(NULL) {
+ // Used for :: prefix
+ return;
+ }
+
+ let invariants = Invariants::get_instance();
+ invariants
+ .constant_state_blocks
+ .entry(id)
+ .or_default()
+ .insert(uninit_block);
+ invariants
+ .block_constant_states
+ .entry(uninit_block)
+ .or_default()
+ .insert(id);
+ }
+
+
+ for i in 0.. {
+ match unsafe { *idlist.offset(i) } {
+ 0 => break, // End of NULL terminated list
+ id => assume_stable_constant_name(uninit_block, id),
+ }
+ }
+}
+
+/// Called when a basic operator is redefined. Note that all the blocks assuming
+/// the stability of different operators are invalidated together and we don't
+/// do fine-grained tracking.
+#[no_mangle]
+pub extern "C" fn rb_yjit_bop_redefined(klass: RedefinitionFlag, bop: ruby_basic_operators) {
+ // If YJIT isn't enabled, do nothing
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ // Loop through the blocks that are associated with this class and basic
+ // operator and invalidate them.
+ if let Some(blocks) = Invariants::get_instance()
+ .basic_operator_blocks
+ .remove(&(klass, bop))
+ {
+ for block in blocks.iter() {
+ invalidate_block_version(block);
+ incr_counter!(invalidate_bop_redefined);
+ }
+ }
+ });
+}
+
+/// Callback for when a cme becomes invalid. Invalidate all blocks that depend
+/// on the given cme being valid.
+#[no_mangle]
+pub extern "C" fn rb_yjit_cme_invalidate(callee_cme: *const rb_callable_method_entry_t) {
+ // If YJIT isn't enabled, do nothing
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ if let Some(blocks) = Invariants::get_instance().cme_validity.remove(&callee_cme) {
+ for block in blocks.iter() {
+ invalidate_block_version(block);
+ incr_counter!(invalidate_method_lookup);
+ }
+ }
+ });
+}
+
+/// Callback for when Ruby is about to spawn a ractor. In that case we need to
+/// invalidate every block that is assuming single ractor mode.
+#[no_mangle]
+pub extern "C" fn rb_yjit_before_ractor_spawn() {
+ // If YJIT isn't enabled, do nothing
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ // Clear the set of blocks inside Invariants
+ let blocks = mem::take(&mut Invariants::get_instance().single_ractor);
+
+ // Invalidate the blocks
+ for block in &blocks {
+ invalidate_block_version(block);
+ incr_counter!(invalidate_ractor_spawn);
+ }
+ });
+}
+
+/// Callback for when the global constant state changes.
+#[no_mangle]
+pub extern "C" fn rb_yjit_constant_state_changed(id: ID) {
+ // If YJIT isn't enabled, do nothing
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ // Invalidate the blocks that are associated with the given ID.
+ if let Some(blocks) = Invariants::get_instance().constant_state_blocks.remove(&id) {
+ for block in &blocks {
+ invalidate_block_version(block);
+ incr_counter!(invalidate_constant_state_bump);
+ }
+ }
+ });
+}
+
+/// Callback for marking GC objects inside [Invariants].
+/// See `struct yjijt_root_struct` in C.
+#[no_mangle]
+pub extern "C" fn rb_yjit_root_mark() {
+ // Call rb_gc_mark on exit location's raw_samples to
+ // wrap frames in a GC allocated object. This needs to be called
+ // at the same time as root mark.
+ YjitExitLocations::gc_mark_raw_samples();
+
+ // Comment from C YJIT:
+ //
+ // Why not let the GC move the cme keys in this table?
+ // Because this is basically a compare_by_identity Hash.
+ // If a key moves, we would need to reinsert it into the table so it is rehashed.
+ // That is tricky to do, especially as it could trigger allocation which could
+ // trigger GC. Not sure if it is okay to trigger GC while the GC is updating
+ // references.
+ //
+ // NOTE(alan): since we are using Rust data structures that don't interact
+ // with the Ruby GC now, it might be feasible to allow movement.
+
+ let invariants = Invariants::get_instance();
+
+ // Mark CME imemos
+ for cme in invariants.cme_validity.keys() {
+ let cme: VALUE = (*cme).into();
+
+ unsafe { rb_gc_mark(cme) };
+ }
+}
+
+#[no_mangle]
+pub extern "C" fn rb_yjit_root_update_references() {
+ if unsafe { INVARIANTS.is_none() } {
+ return;
+ }
+ let no_ep_escape_iseqs = &mut Invariants::get_instance().no_ep_escape_iseqs;
+
+ // Make a copy of the table with updated ISEQ keys
+ let mut updated_copy = HashMap::with_capacity(no_ep_escape_iseqs.len());
+ for (iseq, blocks) in mem::take(no_ep_escape_iseqs) {
+ let new_iseq = unsafe { rb_gc_location(iseq.into()) }.as_iseq();
+ updated_copy.insert(new_iseq, blocks);
+ }
+
+ *no_ep_escape_iseqs = updated_copy;
+}
+
+/// Remove all invariant assumptions made by the block by removing the block as
+/// as a key in all of the relevant tables.
+/// For safety, the block has to be initialized and the vm lock must be held.
+/// However, outgoing/incoming references to the block does _not_ need to be valid.
+pub fn block_assumptions_free(blockref: BlockRef) {
+ let invariants = Invariants::get_instance();
+
+ {
+ // SAFETY: caller ensures that this reference is valid
+ let block = unsafe { blockref.as_ref() };
+
+ // For each method lookup dependency
+ for dep in block.iter_cme_deps() {
+ // Remove tracking for cme validity
+ if let Some(blockset) = invariants.cme_validity.get_mut(&dep) {
+ blockset.remove(&blockref);
+ if blockset.is_empty() {
+ invariants.cme_validity.remove(&dep);
+ }
+ }
+ }
+ if invariants.cme_validity.is_empty() {
+ invariants.cme_validity.shrink_to_fit();
+ }
+ }
+
+ // Remove tracking for basic operators that the given block assumes have
+ // not been redefined.
+ if let Some(bops) = invariants.block_basic_operators.remove(&blockref) {
+ // Remove tracking for the given block from the list of blocks associated
+ // with the given basic operator.
+ for key in &bops {
+ if let Some(blocks) = invariants.basic_operator_blocks.get_mut(key) {
+ blocks.remove(&blockref);
+ if blocks.is_empty() {
+ invariants.basic_operator_blocks.remove(key);
+ }
+ }
+ }
+ }
+ if invariants.block_basic_operators.is_empty() {
+ invariants.block_basic_operators.shrink_to_fit();
+ }
+ if invariants.basic_operator_blocks.is_empty() {
+ invariants.basic_operator_blocks.shrink_to_fit();
+ }
+
+ // Remove tracking for blocks assuming single ractor mode
+ invariants.single_ractor.remove(&blockref);
+ if invariants.single_ractor.is_empty() {
+ invariants.single_ractor.shrink_to_fit();
+ }
+
+ // Remove tracking for constant state for a given ID.
+ if let Some(ids) = invariants.block_constant_states.remove(&blockref) {
+ for id in ids {
+ if let Some(blocks) = invariants.constant_state_blocks.get_mut(&id) {
+ blocks.remove(&blockref);
+ if blocks.is_empty() {
+ invariants.constant_state_blocks.remove(&id);
+ }
+ }
+ }
+ }
+ if invariants.block_constant_states.is_empty() {
+ invariants.block_constant_states.shrink_to_fit();
+ }
+ if invariants.constant_state_blocks.is_empty() {
+ invariants.constant_state_blocks.shrink_to_fit();
+ }
+
+ // Remove tracking for blocks assuming no singleton class
+ // NOTE: no_singleton_class has up to 3 keys (Array, Hash, or String) for now.
+ // This is effectively an O(1) access unless we start using it for more classes.
+ for (_, blocks) in invariants.no_singleton_classes.iter_mut() {
+ blocks.remove(&blockref);
+ }
+
+ // Remove tracking for blocks assuming EP doesn't escape
+ let iseq = unsafe { blockref.as_ref() }.get_blockid().iseq;
+ if let Some(blocks) = invariants.no_ep_escape_iseqs.get_mut(&iseq) {
+ blocks.remove(&blockref);
+ }
+}
+
+/// Callback from the opt_setinlinecache instruction in the interpreter.
+/// Invalidate the block for the matching opt_getinlinecache so it could regenerate code
+/// using the new value in the constant cache.
+#[no_mangle]
+pub extern "C" fn rb_yjit_constant_ic_update(iseq: *const rb_iseq_t, ic: IC, insn_idx: std::os::raw::c_uint) {
+ // If YJIT isn't enabled, do nothing
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ // Try to downcast the iseq index
+ let insn_idx: IseqIdx = if let Ok(idx) = insn_idx.try_into() {
+ idx
+ } else {
+ // The index is too large, YJIT can't possibly have code for it,
+ // so there is nothing to invalidate.
+ return;
+ };
+
+ if !unsafe { (*(*ic).entry).ic_cref }.is_null() || unsafe { rb_jit_multi_ractor_p() } {
+ // We can't generate code in these situations, so no need to invalidate.
+ // See gen_opt_getinlinecache.
+ return;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ let code = unsafe { get_iseq_body_iseq_encoded(iseq) };
+
+ // This should come from a running iseq, so direct threading translation
+ // should have been done
+ assert!(unsafe { FL_TEST(iseq.into(), VALUE(ISEQ_TRANSLATED)) } != VALUE(0));
+ assert!(u32::from(insn_idx) < unsafe { get_iseq_encoded_size(iseq) });
+
+ // Ensure that the instruction the insn_idx is pointing to is in
+ // fact a opt_getconstant_path instruction.
+ assert_eq!(
+ unsafe {
+ let opcode_pc = code.add(insn_idx.as_usize());
+ let translated_opcode: VALUE = opcode_pc.read();
+ rb_vm_insn_decode(translated_opcode)
+ },
+ YARVINSN_opt_getconstant_path.try_into().unwrap()
+ );
+
+ // Find the matching opt_getinlinecache and invalidate all the blocks there
+ // RUBY_ASSERT(insn_op_type(BIN(opt_getinlinecache), 1) == TS_IC);
+
+ let ic_pc = unsafe { code.add(insn_idx.as_usize() + 1) };
+ let ic_operand: IC = unsafe { ic_pc.read() }.as_mut_ptr();
+
+ if ic == ic_operand {
+ for block in take_version_list(BlockId {
+ iseq,
+ idx: insn_idx,
+ }) {
+ invalidate_block_version(&block);
+ incr_counter!(invalidate_constant_ic_fill);
+ }
+ } else {
+ panic!("ic->get_insn_index not set properly");
+ }
+ });
+}
+
+/// Invalidate blocks that assume objects of a given class will have no singleton class.
+#[no_mangle]
+pub extern "C" fn rb_yjit_invalidate_no_singleton_class(klass: VALUE) {
+ // Skip tracking singleton classes during boot. Such objects already have a singleton class
+ // before entering JIT code, so they get rejected when they're checked for the first time.
+ if unsafe { INVARIANTS.is_none() } {
+ return;
+ }
+
+ // We apply this optimization only to Array, Hash, and String for now.
+ if unsafe { [rb_cArray, rb_cHash, rb_cString].contains(&klass) } {
+ with_vm_lock(src_loc!(), || {
+ let no_singleton_classes = &mut Invariants::get_instance().no_singleton_classes;
+ match no_singleton_classes.get_mut(&klass) {
+ Some(blocks) => {
+ // Invalidate existing blocks and let has_singleton_class_of()
+ // return true when they are compiled again
+ for block in mem::take(blocks) {
+ invalidate_block_version(&block);
+ incr_counter!(invalidate_no_singleton_class);
+ }
+ }
+ None => {
+ // Let has_singleton_class_of() return true for this class
+ no_singleton_classes.insert(klass, HashSet::new());
+ }
+ }
+ });
+ }
+}
+
+/// Invalidate blocks for a given ISEQ that assumes environment pointer is
+/// equal to base pointer.
+#[no_mangle]
+pub extern "C" fn rb_yjit_invalidate_ep_is_bp(iseq: IseqPtr) {
+ // Skip tracking EP escapes on boot. We don't need to invalidate anything during boot.
+ if unsafe { INVARIANTS.is_none() } {
+ return;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ // If an EP escape for this ISEQ is detected for the first time, invalidate all blocks
+ // associated to the ISEQ.
+ let no_ep_escape_iseqs = &mut Invariants::get_instance().no_ep_escape_iseqs;
+ match no_ep_escape_iseqs.get_mut(&iseq) {
+ Some(blocks) => {
+ // Invalidate existing blocks and make jit.ep_is_bp() return false
+ for block in mem::take(blocks) {
+ invalidate_block_version(&block);
+ incr_counter!(invalidate_ep_escape);
+ }
+ }
+ None => {
+ // Let jit.ep_is_bp() return false for this ISEQ
+ no_ep_escape_iseqs.insert(iseq, HashSet::new());
+ }
+ }
+ });
+}
+
+// Invalidate all generated code and patch C method return code to contain
+// logic for firing the c_return TracePoint event. Once rb_vm_barrier()
+// returns, all other ractors are pausing inside RB_VM_LOCK_ENTER(), which
+// means they are inside a C routine. If there are any generated code on-stack,
+// they are waiting for a return from a C routine. For every routine call, we
+// patch in an exit after the body of the containing VM instruction. This makes
+// it so all the invalidated code exit as soon as execution logically reaches
+// the next VM instruction. The interpreter takes care of firing the tracing
+// event if it so happens that the next VM instruction has one attached.
+//
+// The c_return event needs special handling as our codegen never outputs code
+// that contains tracing logic. If we let the normal output code run until the
+// start of the next VM instruction by relying on the patching scheme above, we
+// would fail to fire the c_return event. The interpreter doesn't fire the
+// event at an instruction boundary, so simply exiting to the interpreter isn't
+// enough. To handle it, we patch in the full logic at the return address. See
+// full_cfunc_return().
+//
+// In addition to patching, we prevent future entries into invalidated code by
+// removing all live blocks from their iseq.
+#[no_mangle]
+pub extern "C" fn rb_yjit_tracing_invalidate_all() {
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ incr_counter!(invalidate_everything);
+
+ // Stop other ractors since we are going to patch machine code.
+ with_vm_lock(src_loc!(), || {
+ // Make it so all live block versions are no longer valid branch targets
+ let mut on_stack_iseqs = HashSet::new();
+ for_each_on_stack_iseq(|iseq| {
+ on_stack_iseqs.insert(iseq);
+ });
+ for_each_iseq(|iseq| {
+ if let Some(payload) = get_iseq_payload(iseq) {
+ let blocks = payload.take_all_blocks();
+
+ if on_stack_iseqs.contains(&iseq) {
+ // This ISEQ is running, so we can't free blocks immediately
+ for block in blocks {
+ delayed_deallocation(block);
+ }
+ payload.dead_blocks.shrink_to_fit();
+ } else {
+ // Safe to free dead blocks since the ISEQ isn't running
+ // Since we're freeing _all_ blocks, we don't need to keep the graph well formed
+ for block in blocks {
+ unsafe { free_block(block, false) };
+ }
+ mem::take(&mut payload.dead_blocks)
+ .into_iter()
+ .for_each(|block| unsafe { free_block(block, false) });
+ }
+ }
+
+ // Reset output code entry point
+ unsafe { rb_iseq_reset_jit_func(iseq) };
+ });
+
+ let cb = CodegenGlobals::get_inline_cb();
+
+ // Prevent on-stack frames from jumping to the caller on jit_exec_exception
+ extern "C" {
+ fn rb_yjit_cancel_jit_return(leave_exit: *mut c_void, leave_exception: *mut c_void) -> VALUE;
+ }
+ unsafe {
+ rb_yjit_cancel_jit_return(
+ CodegenGlobals::get_leave_exit_code().raw_ptr(cb) as _,
+ CodegenGlobals::get_leave_exception_code().raw_ptr(cb) as _,
+ );
+ }
+
+ // Apply patches
+ let old_pos = cb.get_write_pos();
+ let old_dropped_bytes = cb.has_dropped_bytes();
+ let mut patches = CodegenGlobals::take_global_inval_patches();
+ patches.sort_by_cached_key(|patch| patch.inline_patch_pos.raw_ptr(cb));
+ let mut last_patch_end = std::ptr::null();
+ for patch in &patches {
+ let patch_pos = patch.inline_patch_pos.raw_ptr(cb);
+ assert!(
+ last_patch_end <= patch_pos,
+ "patches should not overlap (last_patch_end: {last_patch_end:?}, patch_pos: {patch_pos:?})",
+ );
+
+ cb.set_write_ptr(patch.inline_patch_pos);
+ cb.set_dropped_bytes(false);
+ cb.without_page_end_reserve(|cb| {
+ let mut asm = crate::backend::ir::Assembler::new_without_iseq();
+ asm.jmp(patch.outlined_target_pos.as_side_exit());
+ if asm.compile(cb, None).is_none() {
+ panic!("Failed to apply patch at {:?}", patch.inline_patch_pos);
+ }
+ });
+ last_patch_end = cb.get_write_ptr().raw_ptr(cb);
+ }
+ cb.set_pos(old_pos);
+ cb.set_dropped_bytes(old_dropped_bytes);
+
+ CodegenGlobals::get_outlined_cb()
+ .unwrap()
+ .mark_all_executable();
+ cb.mark_all_executable();
+ });
+}
diff --git a/yjit/src/lib.rs b/yjit/src/lib.rs
new file mode 100644
index 0000000000..f3247fbf1a
--- /dev/null
+++ b/yjit/src/lib.rs
@@ -0,0 +1,31 @@
+// Clippy disagreements
+#![allow(clippy::style)] // We are laid back about style
+#![allow(clippy::too_many_arguments)] // :shrug:
+#![allow(clippy::identity_op)] // Sometimes we do it for style
+
+// TODO(alan): This lint is right -- the way we use `static mut` is UB happy. We have many globals
+// and take `&mut` frequently, sometimes with a method that easily allows calling it twice.
+//
+// All of our globals rely on us running single threaded, which outside of boot-time relies on the
+// VM lock (which signals and waits for all other threads to pause). To fix this properly, we should
+// gather up all the globals into a struct to centralize the safety reasoning. That way we can also
+// check for re-entrance in one place.
+//
+// We're too close to release to do that, though, so disable the lint for now.
+#![allow(unknown_lints)]
+#![allow(static_mut_refs)]
+#![warn(unknown_lints)]
+
+pub mod asm;
+mod backend;
+mod codegen;
+mod core;
+mod cruby;
+mod disasm;
+mod invariants;
+mod options;
+mod stats;
+mod utils;
+mod yjit;
+mod virtualmem;
+mod log;
diff --git a/yjit/src/log.rs b/yjit/src/log.rs
new file mode 100644
index 0000000000..c5a724f7e1
--- /dev/null
+++ b/yjit/src/log.rs
@@ -0,0 +1,179 @@
+use crate::core::BlockId;
+use crate::cruby::*;
+use crate::options::*;
+use crate::yjit::yjit_enabled_p;
+
+use std::fmt::{Display, Formatter};
+use std::os::raw::c_long;
+use crate::utils::iseq_get_location;
+
+type Timestamp = f64;
+
+#[derive(Clone, Debug)]
+pub struct LogEntry {
+ /// The time when the block was compiled.
+ pub timestamp: Timestamp,
+
+ /// The log message.
+ pub message: String,
+}
+
+impl Display for LogEntry {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{:15.6}: {}", self.timestamp, self.message)
+ }
+}
+
+pub type Log = CircularBuffer<LogEntry, 1024>;
+static mut LOG: Option<Log> = None;
+
+impl Log {
+ pub fn init() {
+ unsafe {
+ LOG = Some(Log::new());
+ }
+ }
+
+ pub fn get_instance() -> &'static mut Log {
+ unsafe {
+ LOG.as_mut().unwrap()
+ }
+ }
+
+ pub fn has_instance() -> bool {
+ unsafe {
+ LOG.as_mut().is_some()
+ }
+ }
+
+ pub fn add_block_with_chain_depth(block_id: BlockId, chain_depth: u8) {
+ if !Self::has_instance() {
+ return;
+ }
+
+ let print_log = get_option!(log);
+ let timestamp = std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64();
+
+ let location = iseq_get_location(block_id.iseq, block_id.idx);
+ let index = block_id.idx;
+ let message = if chain_depth > 0 {
+ format!("{} (index: {}, chain_depth: {})", location, index, chain_depth)
+ } else {
+ format!("{} (index: {})", location, index)
+ };
+
+ let entry = LogEntry {
+ timestamp,
+ message
+ };
+
+ if let Some(output) = print_log {
+ match output {
+ LogOutput::Stderr => {
+ eprintln!("{}", entry);
+ }
+
+ LogOutput::File(fd) => {
+ use std::os::unix::io::{FromRawFd, IntoRawFd};
+ use std::io::Write;
+
+ // Write with the fd opened during boot
+ let mut file = unsafe { std::fs::File::from_raw_fd(fd) };
+ writeln!(file, "{}", entry).unwrap();
+ file.flush().unwrap();
+ let _ = file.into_raw_fd(); // keep the fd open
+ }
+
+ LogOutput::MemoryOnly => () // Don't print or write anything
+ }
+ }
+
+ Self::get_instance().push(entry);
+ }
+}
+
+pub struct CircularBuffer<T, const N: usize> {
+ buffer: Vec<Option<T>>,
+ head: usize,
+ tail: usize,
+ size: usize
+}
+
+impl<T: Clone, const N: usize> CircularBuffer<T, N> {
+ pub fn new() -> Self {
+ Self {
+ buffer: vec![None; N],
+ head: 0,
+ tail: 0,
+ size: 0
+ }
+ }
+
+ pub fn push(&mut self, value: T) {
+ self.buffer[self.head] = Some(value);
+ self.head = (self.head + 1) % N;
+ if self.size == N {
+ self.tail = (self.tail + 1) % N;
+ } else {
+ self.size += 1;
+ }
+ }
+
+ pub fn pop(&mut self) -> Option<T> {
+ if self.size == 0 {
+ return None;
+ }
+
+ let value = self.buffer[self.tail].take();
+ self.tail = (self.tail + 1) % N;
+ self.size -= 1;
+ value
+ }
+
+ pub fn len(&self) -> usize {
+ self.size
+ }
+}
+
+
+//===========================================================================
+
+/// Primitive called in yjit.rb
+/// Check if log generation is enabled
+#[no_mangle]
+pub extern "C" fn rb_yjit_log_enabled_p(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ if get_option!(log).is_some() {
+ return Qtrue;
+ } else {
+ return Qfalse;
+ }
+}
+
+/// Primitive called in yjit.rb.
+/// Export all YJIT log entries as a Ruby array.
+#[no_mangle]
+pub extern "C" fn rb_yjit_get_log(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ with_vm_lock(src_loc!(), || rb_yjit_get_log_array())
+}
+
+fn rb_yjit_get_log_array() -> VALUE {
+ if !yjit_enabled_p() || get_option!(log).is_none() {
+ return Qnil;
+ }
+
+ let log = Log::get_instance();
+ let array = unsafe { rb_ary_new_capa(log.len() as c_long) };
+
+ while log.len() > 0 {
+ let entry = log.pop().unwrap();
+
+ unsafe {
+ let entry_array = rb_ary_new_capa(2);
+ rb_ary_push(entry_array, rb_float_new(entry.timestamp));
+ rb_ary_push(entry_array, entry.message.into());
+ rb_ary_push(array, entry_array);
+ }
+ }
+
+ return array;
+}
diff --git a/yjit/src/options.rs b/yjit/src/options.rs
new file mode 100644
index 0000000000..c87a436091
--- /dev/null
+++ b/yjit/src/options.rs
@@ -0,0 +1,432 @@
+use std::{ffi::{CStr, CString}, ptr::null, fs::File};
+use crate::{backend::current::TEMP_REGS, cruby::*, stats::Counter};
+use std::os::raw::{c_char, c_int, c_uint};
+
+// Call threshold for small deployments and command-line apps
+pub static SMALL_CALL_THRESHOLD: u64 = 30;
+
+// Call threshold for larger deployments and production-sized applications
+pub static LARGE_CALL_THRESHOLD: u64 = 120;
+
+// Number of live ISEQs after which we consider an app to be large
+pub static LARGE_ISEQ_COUNT: u64 = 40_000;
+
+// This option is exposed to the C side in a global variable for performance, see vm.c
+// Number of method calls after which to start generating code
+// Threshold==1 means compile on first execution
+#[no_mangle]
+pub static mut rb_yjit_call_threshold: u64 = SMALL_CALL_THRESHOLD;
+
+// This option is exposed to the C side in a global variable for performance, see vm.c
+// Number of execution requests after which a method is no longer
+// considered hot. Raising this results in more generated code.
+#[no_mangle]
+pub static mut rb_yjit_cold_threshold: u64 = 200_000;
+
+// Command-line options
+#[derive(Debug)]
+#[repr(C)]
+pub struct Options {
+ /// Soft limit of all memory used by YJIT in bytes
+ /// VirtualMem avoids allocating new pages if code_region_size + yjit_alloc_size
+ /// is larger than this threshold. Rust may still allocate memory beyond this limit.
+ pub mem_size: usize,
+
+ /// Hard limit of the executable memory block to allocate in bytes
+ /// Note that the command line argument is expressed in MiB and not bytes
+ pub exec_mem_size: Option<usize>,
+
+ // Disable the propagation of type information
+ pub no_type_prop: bool,
+
+ // Maximum number of versions per block
+ // 1 means always create generic versions
+ pub max_versions: usize,
+
+ // The number of registers allocated for stack temps
+ pub num_temp_regs: usize,
+
+ // Disable Ruby builtin methods defined by `with_jit` hooks, e.g. Array#each in Ruby
+ pub c_builtin: bool,
+
+ // Capture stats
+ pub gen_stats: bool,
+
+ // Print stats on exit (when gen_stats is also true)
+ pub print_stats: bool,
+
+ // Trace locations of exits
+ pub trace_exits: Option<TraceExits>,
+
+ // how often to sample exit trace data
+ pub trace_exits_sample_rate: usize,
+
+ // Whether to enable YJIT at boot. This option prevents other
+ // YJIT tuning options from enabling YJIT at boot.
+ pub disable: bool,
+
+ /// Dump compiled and executed instructions for debugging
+ pub dump_insns: bool,
+
+ /// Dump all compiled instructions of target cbs.
+ pub dump_disasm: Option<DumpDisasm>,
+
+ /// Print when specific ISEQ items are compiled or invalidated
+ pub dump_iseq_disasm: Option<String>,
+
+ /// Verify context objects (debug mode only)
+ pub verify_ctx: bool,
+
+ /// Enable generating frame pointers (for x86. arm64 always does this)
+ pub frame_pointer: bool,
+
+ /// Run code GC when exec_mem_size is reached.
+ pub code_gc: bool,
+
+ /// Enable writing /tmp/perf-{pid}.map for Linux perf
+ pub perf_map: Option<PerfMap>,
+
+ // Where to store the log. `None` disables the log.
+ pub log: Option<LogOutput>,
+}
+
+// Initialize the options to default values
+pub static mut OPTIONS: Options = Options {
+ mem_size: 128 * 1024 * 1024,
+ exec_mem_size: None,
+ no_type_prop: false,
+ max_versions: 4,
+ num_temp_regs: 5,
+ c_builtin: false,
+ gen_stats: false,
+ trace_exits: None,
+ print_stats: true,
+ trace_exits_sample_rate: 0,
+ disable: false,
+ dump_insns: false,
+ dump_disasm: None,
+ verify_ctx: false,
+ dump_iseq_disasm: None,
+ frame_pointer: false,
+ code_gc: false,
+ perf_map: None,
+ log: None,
+};
+
+/// YJIT option descriptions for `ruby --help`.
+/// Note that --help allows only 80 characters per line, including indentation. 80-character limit --> |
+pub const YJIT_OPTIONS: &'static [(&str, &str)] = &[
+ ("--yjit-mem-size=num", "Soft limit on YJIT memory usage in MiB (default: 128)."),
+ ("--yjit-exec-mem-size=num", "Hard limit on executable memory block in MiB."),
+ ("--yjit-call-threshold=num", "Number of calls to trigger JIT."),
+ ("--yjit-cold-threshold=num", "Global calls after which ISEQs not compiled (default: 200K)."),
+ ("--yjit-stats", "Enable collecting YJIT statistics."),
+ ("--yjit-log[=file|dir]", "Enable logging of YJIT's compilation activity."),
+ ("--yjit-disable", "Disable YJIT for lazily enabling it with RubyVM::YJIT.enable."),
+ ("--yjit-code-gc", "Run code GC when the code size reaches the limit."),
+ ("--yjit-perf", "Enable frame pointers and perf profiling."),
+ ("--yjit-trace-exits", "Record Ruby source location when exiting from generated code."),
+ ("--yjit-trace-exits-sample-rate=num", "Trace exit locations only every Nth occurrence."),
+];
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum TraceExits {
+ // Trace all exits
+ All,
+ // Trace a specific counter
+ Counter(Counter),
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum LogOutput {
+ // Dump to the log file as events occur.
+ File(std::os::unix::io::RawFd),
+ // Keep the log in memory only
+ MemoryOnly,
+ // Dump to stderr when the process exits
+ Stderr
+}
+
+#[derive(Debug)]
+pub enum DumpDisasm {
+ // Dump to stdout
+ Stdout,
+ // Dump to "yjit_{pid}.log" file under the specified directory
+ File(std::os::unix::io::RawFd),
+}
+
+/// Type of symbols to dump into /tmp/perf-{pid}.map
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum PerfMap {
+ // Dump ISEQ symbols
+ ISEQ,
+ // Dump YJIT codegen symbols
+ Codegen,
+}
+
+/// Macro to get an option value by name
+macro_rules! get_option {
+ // Unsafe is ok here because options are initialized
+ // once before any Ruby code executes
+ ($option_name:ident) => {
+ {
+ // Make this a statement since attributes on expressions are experimental
+ #[allow(unused_unsafe)]
+ let ret = unsafe { crate::options::OPTIONS.$option_name };
+ ret
+ }
+ };
+}
+pub(crate) use get_option;
+
+/// Macro to reference an option value by name; we assume it's a cloneable type like String or an Option of same.
+macro_rules! get_option_ref {
+ // Unsafe is ok here because options are initialized
+ // once before any Ruby code executes
+ ($option_name:ident) => {
+ unsafe { &($crate::options::OPTIONS.$option_name) }
+ };
+}
+pub(crate) use get_option_ref;
+use crate::log::Log;
+
+/// Expected to receive what comes after the third dash in "--yjit-*".
+/// Empty string means user passed only "--yjit". C code rejects when
+/// they pass exact "--yjit-".
+pub fn parse_option(str_ptr: *const std::os::raw::c_char) -> Option<()> {
+ let c_str: &CStr = unsafe { CStr::from_ptr(str_ptr) };
+ let opt_str: &str = c_str.to_str().ok()?;
+ //println!("{}", opt_str);
+
+ // Split the option name and value strings
+ // Note that some options do not contain an assignment
+ let parts = opt_str.split_once('=');
+ let (opt_name, opt_val) = match parts {
+ Some((before_eq, after_eq)) => (before_eq, after_eq),
+ None => (opt_str, ""),
+ };
+
+ // Match on the option name and value strings
+ match (opt_name, opt_val) {
+ ("", "") => (), // Simply --yjit
+
+ ("mem-size", _) => match opt_val.parse::<usize>() {
+ Ok(n) => {
+ if n == 0 || n > 2 * 1024 * 1024 {
+ return None
+ }
+
+ // Convert from MiB to bytes internally for convenience
+ unsafe { OPTIONS.mem_size = n * 1024 * 1024 }
+ }
+ Err(_) => {
+ return None;
+ }
+ },
+
+ ("exec-mem-size", _) => match opt_val.parse::<usize>() {
+ Ok(n) => {
+ if n == 0 || n > 2 * 1024 * 1024 {
+ return None
+ }
+
+ // Convert from MiB to bytes internally for convenience
+ unsafe { OPTIONS.exec_mem_size = Some(n * 1024 * 1024) }
+ }
+ Err(_) => {
+ return None;
+ }
+ },
+
+ ("call-threshold", _) => match opt_val.parse() {
+ Ok(n) => unsafe { rb_yjit_call_threshold = n },
+ Err(_) => {
+ return None;
+ }
+ },
+
+ ("cold-threshold", _) => match opt_val.parse() {
+ Ok(n) => unsafe { rb_yjit_cold_threshold = n },
+ Err(_) => {
+ return None;
+ }
+ },
+
+ ("max-versions", _) => match opt_val.parse() {
+ Ok(n) => unsafe { OPTIONS.max_versions = n },
+ Err(_) => {
+ return None;
+ }
+ },
+
+ ("disable", "") => unsafe {
+ OPTIONS.disable = true;
+ },
+
+ ("temp-regs", _) => match opt_val.parse() {
+ Ok(n) => {
+ assert!(n <= TEMP_REGS.len(), "--yjit-temp-regs must be <= {}", TEMP_REGS.len());
+ unsafe { OPTIONS.num_temp_regs = n }
+ }
+ Err(_) => {
+ return None;
+ }
+ },
+
+ ("c-builtin", _) => unsafe {
+ OPTIONS.c_builtin = true;
+ },
+
+ ("code-gc", _) => unsafe {
+ OPTIONS.code_gc = true;
+ },
+
+ ("perf", _) => match opt_val {
+ "" => unsafe {
+ OPTIONS.frame_pointer = true;
+ OPTIONS.perf_map = Some(PerfMap::ISEQ);
+ },
+ "fp" => unsafe { OPTIONS.frame_pointer = true },
+ "iseq" => unsafe { OPTIONS.perf_map = Some(PerfMap::ISEQ) },
+ // Accept --yjit-perf=map for backward compatibility
+ "codegen" | "map" => unsafe { OPTIONS.perf_map = Some(PerfMap::Codegen) },
+ _ => return None,
+ },
+
+ ("dump-disasm", _) => {
+ if !cfg!(feature = "disasm") {
+ eprintln!("WARNING: the {} option works best when YJIT is built in dev mode, i.e. ./configure --enable-yjit=dev", opt_name);
+ }
+
+ match opt_val {
+ "" => unsafe { OPTIONS.dump_disasm = Some(DumpDisasm::Stdout) },
+ directory => {
+ let path = format!("{directory}/yjit_{}.log", std::process::id());
+ match File::options().create(true).append(true).open(&path) {
+ Ok(file) => {
+ use std::os::unix::io::IntoRawFd;
+ eprintln!("YJIT disasm dump: {path}");
+ unsafe { OPTIONS.dump_disasm = Some(DumpDisasm::File(file.into_raw_fd())) }
+ }
+ Err(err) => eprintln!("Failed to create {path}: {err}"),
+ }
+ }
+ }
+ },
+
+ ("dump-iseq-disasm", _) => unsafe {
+ if !cfg!(feature = "disasm") {
+ eprintln!("WARNING: the {} option is only available when YJIT is built in dev mode, i.e. ./configure --enable-yjit=dev", opt_name);
+ }
+
+ OPTIONS.dump_iseq_disasm = Some(opt_val.to_string());
+ },
+
+ ("no-type-prop", "") => unsafe { OPTIONS.no_type_prop = true },
+ ("stats", _) => match opt_val {
+ "" => unsafe { OPTIONS.gen_stats = true },
+ "quiet" => unsafe {
+ OPTIONS.gen_stats = true;
+ OPTIONS.print_stats = false;
+ },
+ _ => {
+ return None;
+ }
+ },
+ ("log", _) => match opt_val {
+ "" => unsafe {
+ OPTIONS.log = Some(LogOutput::Stderr);
+ Log::init();
+ },
+ "quiet" => unsafe {
+ OPTIONS.log = Some(LogOutput::MemoryOnly);
+ Log::init();
+ },
+ arg_value => {
+ let log_file_path = if std::path::Path::new(arg_value).is_dir() {
+ format!("{arg_value}/yjit_{}.log", std::process::id())
+ } else {
+ arg_value.to_string()
+ };
+
+ match File::options().create(true).write(true).truncate(true).open(&log_file_path) {
+ Ok(file) => {
+ use std::os::unix::io::IntoRawFd;
+ eprintln!("YJIT log: {log_file_path}");
+
+ unsafe { OPTIONS.log = Some(LogOutput::File(file.into_raw_fd())) }
+ Log::init()
+ }
+ Err(err) => panic!("Failed to create {log_file_path}: {err}"),
+ }
+ }
+ },
+ ("trace-exits", _) => unsafe {
+ OPTIONS.gen_stats = true;
+ OPTIONS.trace_exits = match opt_val {
+ "" => Some(TraceExits::All),
+ name => match Counter::get(name) {
+ Some(counter) => Some(TraceExits::Counter(counter)),
+ None => return None,
+ },
+ };
+ },
+ ("trace-exits-sample-rate", sample_rate) => unsafe {
+ OPTIONS.gen_stats = true;
+ if OPTIONS.trace_exits.is_none() {
+ OPTIONS.trace_exits = Some(TraceExits::All);
+ }
+ OPTIONS.trace_exits_sample_rate = sample_rate.parse().unwrap();
+ },
+ ("dump-insns", "") => unsafe { OPTIONS.dump_insns = true },
+ ("verify-ctx", "") => unsafe { OPTIONS.verify_ctx = true },
+
+ // Option name not recognized
+ _ => {
+ return None;
+ }
+ }
+
+ // before we continue, check that sample_rate is either 0 or a prime number
+ let trace_sample_rate = unsafe { OPTIONS.trace_exits_sample_rate };
+ if trace_sample_rate > 1 {
+ let mut i = 2;
+ while i*i <= trace_sample_rate {
+ if trace_sample_rate % i == 0 {
+ println!("Warning: using a non-prime number as your sampling rate can result in less accurate sampling data");
+ return Some(());
+ }
+ i += 1;
+ }
+ }
+
+ // dbg!(unsafe {OPTIONS});
+
+ // Option successfully parsed
+ return Some(());
+}
+
+/// Print YJIT options for `ruby --help`. `width` is width of option parts, and
+/// `columns` is indent width of descriptions.
+#[no_mangle]
+pub extern "C" fn rb_yjit_show_usage(help: c_int, highlight: c_int, width: c_uint, columns: c_int) {
+ for &(name, description) in YJIT_OPTIONS.iter() {
+ extern "C" {
+ fn ruby_show_usage_line(name: *const c_char, secondary: *const c_char, description: *const c_char,
+ help: c_int, highlight: c_int, width: c_uint, columns: c_int);
+ }
+ let name = CString::new(name).unwrap();
+ let description = CString::new(description).unwrap();
+ unsafe { ruby_show_usage_line(name.as_ptr(), null(), description.as_ptr(), help, highlight, width, columns) }
+ }
+}
+
+/// Return true if --yjit-c-builtin is given
+#[no_mangle]
+pub extern "C" fn rb_yjit_c_builtin_p(_ec: EcPtr, _self: VALUE) -> VALUE {
+ if get_option!(c_builtin) {
+ Qtrue
+ } else {
+ Qfalse
+ }
+}
diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs
new file mode 100644
index 0000000000..105def2fff
--- /dev/null
+++ b/yjit/src/stats.rs
@@ -0,0 +1,1064 @@
+//! Everything related to the collection of runtime stats in YJIT
+//! See the --yjit-stats command-line option
+
+use std::ptr::addr_of_mut;
+use std::sync::atomic::Ordering;
+use std::time::Instant;
+use std::collections::HashMap;
+
+use crate::codegen::CodegenGlobals;
+use crate::cruby::*;
+use crate::options::*;
+use crate::yjit::{yjit_enabled_p, YJIT_INIT_TIME};
+
+#[cfg(feature = "stats_allocator")]
+#[path = "../../jit/src/lib.rs"]
+mod jit;
+
+/// Running total of how many ISeqs are in the system.
+#[no_mangle]
+pub static mut rb_yjit_live_iseq_count: u64 = 0;
+
+/// Monotonically increasing total of how many ISEQs were allocated
+#[no_mangle]
+pub static mut rb_yjit_iseq_alloc_count: u64 = 0;
+
+/// The number of bytes YJIT has allocated on the Rust heap.
+pub fn yjit_alloc_size() -> usize {
+ jit::GLOBAL_ALLOCATOR.alloc_size.load(Ordering::SeqCst)
+}
+
+/// Mapping of C function / ISEQ name to integer indices
+/// This is accessed at compilation time only (protected by a lock)
+static mut CFUNC_NAME_TO_IDX: Option<HashMap<String, usize>> = None;
+static mut ISEQ_NAME_TO_IDX: Option<HashMap<String, usize>> = None;
+
+/// Vector of call counts for each C function / ISEQ index
+/// This is modified (but not resized) by JITted code
+static mut CFUNC_CALL_COUNT: Option<Vec<u64>> = None;
+static mut ISEQ_CALL_COUNT: Option<Vec<u64>> = None;
+
+/// Assign an index to a given cfunc name string
+pub fn get_cfunc_idx(name: &str) -> usize {
+ // SAFETY: We acquire a VM lock and don't create multiple &mut references to these static mut variables.
+ unsafe { get_method_idx(name, &mut *addr_of_mut!(CFUNC_NAME_TO_IDX), &mut *addr_of_mut!(CFUNC_CALL_COUNT)) }
+}
+
+/// Assign an index to a given ISEQ name string
+pub fn get_iseq_idx(name: &str) -> usize {
+ // SAFETY: We acquire a VM lock and don't create multiple &mut references to these static mut variables.
+ unsafe { get_method_idx(name, &mut *addr_of_mut!(ISEQ_NAME_TO_IDX), &mut *addr_of_mut!(ISEQ_CALL_COUNT)) }
+}
+
+fn get_method_idx(
+ name: &str,
+ method_name_to_idx: &mut Option<HashMap<String, usize>>,
+ method_call_count: &mut Option<Vec<u64>>,
+) -> usize {
+ //println!("{}", name);
+
+ let name_to_idx = method_name_to_idx.get_or_insert_with(HashMap::default);
+ let call_count = method_call_count.get_or_insert_with(Vec::default);
+
+ match name_to_idx.get(name) {
+ Some(idx) => *idx,
+ None => {
+ let idx = name_to_idx.len();
+ name_to_idx.insert(name.to_string(), idx);
+
+ // Resize the call count vector
+ if idx >= call_count.len() {
+ call_count.resize(idx + 1, 0);
+ }
+
+ idx
+ }
+ }
+}
+
+// Increment the counter for a C function
+pub extern "C" fn incr_cfunc_counter(idx: usize) {
+ let cfunc_call_count = unsafe { CFUNC_CALL_COUNT.as_mut().unwrap() };
+ assert!(idx < cfunc_call_count.len());
+ cfunc_call_count[idx] += 1;
+}
+
+// Increment the counter for an ISEQ
+pub extern "C" fn incr_iseq_counter(idx: usize) {
+ let iseq_call_count = unsafe { ISEQ_CALL_COUNT.as_mut().unwrap() };
+ assert!(idx < iseq_call_count.len());
+ iseq_call_count[idx] += 1;
+}
+
+/// YJIT exit counts for each instruction type.
+/// Note that `VM_INSTRUCTION_SIZE` is an upper bound and the actual number
+/// of VM opcodes may be different in the build. See [`rb_vm_instruction_size()`]
+const VM_INSTRUCTION_SIZE_USIZE: usize = VM_INSTRUCTION_SIZE as usize;
+static mut EXIT_OP_COUNT: [u64; VM_INSTRUCTION_SIZE_USIZE] = [0; VM_INSTRUCTION_SIZE_USIZE];
+
+/// Global state needed for collecting backtraces of exits
+pub struct YjitExitLocations {
+ /// Vec to hold raw_samples which represent the control frames
+ /// of method entries.
+ raw_samples: Vec<VALUE>,
+ /// Vec to hold line_samples which represent line numbers of
+ /// the iseq caller.
+ line_samples: Vec<i32>,
+ /// Number of samples skipped when sampling
+ skipped_samples: usize
+}
+
+/// Private singleton instance of yjit exit locations
+static mut YJIT_EXIT_LOCATIONS: Option<YjitExitLocations> = None;
+
+impl YjitExitLocations {
+ /// Initialize the yjit exit locations
+ pub fn init() {
+ // Return if --yjit-trace-exits isn't enabled
+ if get_option!(trace_exits).is_none() {
+ return;
+ }
+
+ let yjit_exit_locations = YjitExitLocations {
+ raw_samples: Vec::new(),
+ line_samples: Vec::new(),
+ skipped_samples: 0
+ };
+
+ // Initialize the yjit exit locations instance
+ unsafe {
+ YJIT_EXIT_LOCATIONS = Some(yjit_exit_locations);
+ }
+ }
+
+ /// Get a mutable reference to the yjit exit locations globals instance
+ pub fn get_instance() -> &'static mut YjitExitLocations {
+ unsafe { YJIT_EXIT_LOCATIONS.as_mut().unwrap() }
+ }
+
+ /// Get a mutable reference to the yjit raw samples Vec
+ pub fn get_raw_samples() -> &'static mut Vec<VALUE> {
+ &mut YjitExitLocations::get_instance().raw_samples
+ }
+
+ /// Get a mutable reference to yjit the line samples Vec.
+ pub fn get_line_samples() -> &'static mut Vec<i32> {
+ &mut YjitExitLocations::get_instance().line_samples
+ }
+
+ /// Get the number of samples skipped
+ pub fn get_skipped_samples() -> &'static mut usize {
+ &mut YjitExitLocations::get_instance().skipped_samples
+ }
+
+ /// Mark the data stored in YjitExitLocations::get_raw_samples that needs to be used by
+ /// rb_yjit_add_frame. YjitExitLocations::get_raw_samples are an array of
+ /// VALUE pointers, exit instruction, and number of times we've seen this stack row
+ /// as collected by rb_yjit_record_exit_stack.
+ ///
+ /// These need to have rb_gc_mark called so they can be used by rb_yjit_add_frame.
+ pub fn gc_mark_raw_samples() {
+ // Return if YJIT is not enabled
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ // Return if --yjit-trace-exits isn't enabled
+ if get_option!(trace_exits).is_none() {
+ return;
+ }
+
+ let mut idx: size_t = 0;
+ let yjit_raw_samples = YjitExitLocations::get_raw_samples();
+
+ while idx < yjit_raw_samples.len() as size_t {
+ let num = yjit_raw_samples[idx as usize];
+ let mut i = 0;
+ idx += 1;
+
+ // Mark the yjit_raw_samples at the given index. These represent
+ // the data that needs to be GC'd which are the current frames.
+ while i < i32::from(num) {
+ unsafe { rb_gc_mark(yjit_raw_samples[idx as usize]); }
+ i += 1;
+ idx += 1;
+ }
+
+ // Increase index for exit instruction.
+ idx += 1;
+ // Increase index for bookkeeping value (number of times we've seen this
+ // row in a stack).
+ idx += 1;
+ }
+ }
+}
+
+// Macro to declare the stat counters
+macro_rules! make_counters {
+ ($($counter_name:ident,)+) => {
+ /// Struct containing the counter values
+ #[derive(Default, Debug)]
+ pub struct Counters { $(pub $counter_name: u64),+ }
+
+ /// Enum to represent a counter
+ #[allow(non_camel_case_types)]
+ #[derive(Clone, Copy, PartialEq, Eq, Debug)]
+ pub enum Counter { $($counter_name),+ }
+
+ impl Counter {
+ /// Map a counter name string to a counter enum
+ pub fn get(name: &str) -> Option<Counter> {
+ match name {
+ $( stringify!($counter_name) => { Some(Counter::$counter_name) } ),+
+ _ => None,
+ }
+ }
+
+ /// Get a counter name string
+ pub fn get_name(&self) -> String {
+ match self {
+ $( Counter::$counter_name => stringify!($counter_name).to_string() ),+
+ }
+ }
+ }
+
+ /// Global counters instance, initialized to zero
+ pub static mut COUNTERS: Counters = Counters { $($counter_name: 0),+ };
+
+ /// Counter names constant
+ const COUNTER_NAMES: &'static [&'static str] = &[ $(stringify!($counter_name)),+ ];
+
+ /// Map a counter name string to a counter pointer
+ pub fn get_counter_ptr(name: &str) -> *mut u64 {
+ match name {
+ $( stringify!($counter_name) => { ptr_to_counter!($counter_name) } ),+
+ _ => panic!()
+ }
+ }
+ }
+}
+
+/// The list of counters that are available without --yjit-stats.
+/// They are incremented only by `incr_counter!` and don't use `gen_counter_incr`.
+pub const DEFAULT_COUNTERS: &'static [Counter] = &[
+ Counter::code_gc_count,
+ Counter::compiled_iseq_entry,
+ Counter::cold_iseq_entry,
+ Counter::compiled_iseq_count,
+ Counter::compiled_blockid_count,
+ Counter::compiled_block_count,
+ Counter::deleted_defer_block_count,
+ Counter::compiled_branch_count,
+ Counter::compile_time_ns,
+ Counter::compilation_failure,
+ Counter::max_inline_versions,
+ Counter::inline_block_count,
+ Counter::num_contexts_encoded,
+ Counter::context_cache_hits,
+
+ Counter::invalidation_count,
+ Counter::invalidate_method_lookup,
+ Counter::invalidate_bop_redefined,
+ Counter::invalidate_ractor_spawn,
+ Counter::invalidate_constant_state_bump,
+ Counter::invalidate_constant_ic_fill,
+ Counter::invalidate_no_singleton_class,
+ Counter::invalidate_ep_escape,
+ Counter::invalidate_everything,
+];
+
+/// Macro to increase a counter by name and count
+macro_rules! incr_counter_by {
+ // Unsafe is ok here because options are initialized
+ // once before any Ruby code executes
+ ($counter_name:ident, $count:expr) => {
+ #[allow(unused_unsafe)]
+ {
+ unsafe { $crate::stats::COUNTERS.$counter_name += $count as u64 }
+ }
+ };
+}
+pub(crate) use incr_counter_by;
+
+/// Macro to increase a counter if the given value is larger
+macro_rules! incr_counter_to {
+ // Unsafe is ok here because options are initialized
+ // once before any Ruby code executes
+ ($counter_name:ident, $count:expr) => {
+ #[allow(unused_unsafe)]
+ {
+ unsafe {
+ $crate::stats::COUNTERS.$counter_name = u64::max(
+ $crate::stats::COUNTERS.$counter_name,
+ $count as u64,
+ )
+ }
+ }
+ };
+}
+pub(crate) use incr_counter_to;
+
+/// Macro to increment a counter by name
+macro_rules! incr_counter {
+ // Unsafe is ok here because options are initialized
+ // once before any Ruby code executes
+ ($counter_name:ident) => {
+ #[allow(unused_unsafe)]
+ {
+ unsafe { $crate::stats::COUNTERS.$counter_name += 1 }
+ }
+ };
+}
+pub(crate) use incr_counter;
+
+/// Macro to get a raw pointer to a given counter
+macro_rules! ptr_to_counter {
+ ($counter_name:ident) => {
+ unsafe {
+ let ctr_ptr = std::ptr::addr_of_mut!(COUNTERS.$counter_name);
+ ctr_ptr
+ }
+ };
+}
+
+// Declare all the counters we track
+make_counters! {
+ yjit_insns_count,
+
+ // Method calls that fallback to dynamic dispatch
+ send_singleton_class,
+ send_forwarding,
+ send_ivar_set_method,
+ send_zsuper_method,
+ send_undef_method,
+ send_optimized_method_block_call,
+ send_call_block,
+ send_call_kwarg,
+ send_call_multi_ractor,
+ send_cme_not_found,
+ send_megamorphic,
+ send_missing_method,
+ send_refined_method,
+ send_private_not_fcall,
+ send_cfunc_kw_splat_non_nil,
+ send_cfunc_splat_neg2,
+ send_cfunc_argc_mismatch,
+ send_cfunc_block_arg,
+ send_cfunc_toomany_args,
+ send_cfunc_tracing,
+ send_cfunc_splat_with_kw,
+ send_cfunc_splat_varg_ruby2_keywords,
+ send_attrset_kwargs,
+ send_attrset_block_arg,
+ send_iseq_tailcall,
+ send_iseq_arity_error,
+ send_iseq_block_arg_type,
+ send_iseq_clobbering_block_arg,
+ send_iseq_block_arg_gc_unsafe,
+ send_iseq_complex_discard_extras,
+ send_iseq_leaf_builtin_block_arg_block_param,
+ send_iseq_kw_splat_non_nil,
+ send_iseq_kwargs_mismatch,
+ send_iseq_has_post,
+ send_iseq_has_no_kw,
+ send_iseq_accepts_no_kwarg,
+ send_iseq_materialized_block,
+ send_iseq_send_forwarding,
+ send_iseq_splat_not_array,
+ send_iseq_splat_with_kw,
+ send_iseq_missing_optional_kw,
+ send_iseq_too_many_kwargs,
+ send_not_implemented_method,
+ send_getter_arity,
+ send_getter_block_arg,
+ send_args_splat_attrset,
+ send_args_splat_bmethod,
+ send_args_splat_aref,
+ send_args_splat_aset,
+ send_args_splat_opt_call,
+ send_iseq_splat_arity_error,
+ send_splat_too_long,
+ send_send_wrong_args,
+ send_send_null_mid,
+ send_send_null_cme,
+ send_send_nested,
+ send_send_attr_reader,
+ send_send_attr_writer,
+ send_iseq_has_rest_and_captured,
+ send_iseq_has_kwrest_and_captured,
+ send_iseq_has_rest_and_kw_supplied,
+ send_iseq_has_rest_opt_and_block,
+ send_bmethod_ractor,
+ send_bmethod_block_arg,
+ send_optimized_block_arg,
+ send_pred_not_fixnum,
+ send_pred_underflow,
+ send_str_dup_exivar,
+
+ invokesuper_defined_class_mismatch,
+ invokesuper_forwarding,
+ invokesuper_kw_splat,
+ invokesuper_kwarg,
+ invokesuper_megamorphic,
+ invokesuper_no_cme,
+ invokesuper_no_me,
+ invokesuper_not_iseq_or_cfunc,
+ invokesuper_refinement,
+ invokesuper_singleton_class,
+
+ invokeblock_megamorphic,
+ invokeblock_none,
+ invokeblock_iseq_arg0_optional,
+ invokeblock_iseq_arg0_args_splat,
+ invokeblock_iseq_arg0_not_array,
+ invokeblock_iseq_arg0_wrong_len,
+ invokeblock_iseq_not_inlined,
+ invokeblock_ifunc_args_splat,
+ invokeblock_ifunc_kw_splat,
+ invokeblock_proc,
+ invokeblock_symbol,
+
+ // Method calls that exit to the interpreter
+ guard_send_block_arg_type,
+ guard_send_getter_splat_non_empty,
+ guard_send_klass_megamorphic,
+ guard_send_se_cf_overflow,
+ guard_send_se_protected_check_failed,
+ guard_send_splatarray_length_not_equal,
+ guard_send_splatarray_last_ruby2_keywords,
+ guard_send_splat_not_array,
+ guard_send_send_name_chain,
+ guard_send_iseq_has_rest_and_splat_too_few,
+ guard_send_is_a_class_mismatch,
+ guard_send_instance_of_class_mismatch,
+ guard_send_interrupted,
+ guard_send_not_fixnums,
+ guard_send_not_fixnum,
+ guard_send_not_fixnum_or_flonum,
+ guard_send_not_string,
+ guard_send_respond_to_mid_mismatch,
+ guard_send_str_aref_not_fixnum,
+
+ guard_send_cfunc_bad_splat_vargs,
+ guard_send_cfunc_block_not_nil,
+
+ guard_invokesuper_me_changed,
+
+ guard_invokeblock_tag_changed,
+ guard_invokeblock_iseq_block_changed,
+
+ traced_cfunc_return,
+
+ leave_se_interrupt,
+ leave_interp_return,
+
+ getivar_megamorphic,
+ getivar_not_heap,
+
+ setivar_not_heap,
+ setivar_frozen,
+ setivar_megamorphic,
+
+ definedivar_not_heap,
+ definedivar_megamorphic,
+
+ setlocal_wb_required,
+
+ invokebuiltin_too_many_args,
+
+ opt_plus_overflow,
+ opt_minus_overflow,
+ opt_mult_overflow,
+
+ opt_succ_not_fixnum,
+ opt_succ_overflow,
+
+ opt_mod_zero,
+ opt_div_zero,
+
+ lshift_amount_changed,
+ lshift_overflow,
+
+ rshift_amount_changed,
+
+ opt_aref_argc_not_one,
+ opt_aref_arg_not_fixnum,
+ opt_aref_not_array,
+ opt_aref_not_hash,
+
+ opt_aset_not_array,
+ opt_aset_not_fixnum,
+ opt_aset_not_hash,
+ opt_aset_frozen,
+
+ opt_case_dispatch_megamorphic,
+
+ opt_getconstant_path_ic_miss,
+ opt_getconstant_path_multi_ractor,
+
+ expandarray_splat,
+ expandarray_postarg,
+ expandarray_not_array,
+ expandarray_to_ary,
+ expandarray_method_missing,
+ expandarray_chain_max_depth,
+
+ // getblockparam
+ gbp_wb_required,
+
+ // getblockparamproxy
+ gbpp_unsupported_type,
+ gbpp_block_param_modified,
+ gbpp_block_handler_not_none,
+ gbpp_block_handler_not_iseq,
+ gbpp_block_handler_not_proc,
+
+ branchif_interrupted,
+ branchunless_interrupted,
+ branchnil_interrupted,
+ jump_interrupted,
+
+ objtostring_not_string,
+
+ getbyte_idx_not_fixnum,
+ getbyte_idx_negative,
+ getbyte_idx_out_of_bounds,
+
+ splatkw_not_hash,
+ splatkw_not_nil,
+
+ binding_allocations,
+ binding_set,
+
+ compiled_iseq_entry,
+ cold_iseq_entry,
+ compiled_iseq_count,
+ compiled_blockid_count,
+ compiled_block_count,
+ compiled_branch_count,
+ compile_time_ns,
+ compilation_failure,
+ abandoned_block_count,
+ block_next_count,
+ defer_count,
+ defer_empty_count,
+ deleted_defer_block_count,
+ branch_insn_count,
+ branch_known_count,
+ max_inline_versions,
+ inline_block_count,
+ num_contexts_encoded,
+
+ freed_iseq_count,
+
+ exit_from_branch_stub,
+
+ invalidation_count,
+ invalidate_method_lookup,
+ invalidate_bop_redefined,
+ invalidate_ractor_spawn,
+ invalidate_constant_state_bump,
+ invalidate_constant_ic_fill,
+ invalidate_no_singleton_class,
+ invalidate_ep_escape,
+ invalidate_everything,
+
+ // Currently, it's out of the ordinary (might be impossible) for YJIT to leave gaps in
+ // executable memory, so this should be 0.
+ exec_mem_non_bump_alloc,
+
+ code_gc_count,
+
+ num_gc_obj_refs,
+
+ num_send,
+ num_send_known_class,
+ num_send_polymorphic,
+ num_send_x86_rel32,
+ num_send_x86_reg,
+ num_send_dynamic,
+ num_send_cfunc,
+ num_send_cfunc_inline,
+ num_send_iseq,
+ num_send_iseq_leaf,
+ num_send_iseq_inline,
+
+ num_getivar_megamorphic,
+ num_setivar_megamorphic,
+ num_opt_case_dispatch_megamorphic,
+
+ num_throw,
+ num_throw_break,
+ num_throw_retry,
+ num_throw_return,
+
+ num_lazy_frame_check,
+ num_lazy_frame_push,
+ lazy_frame_count,
+ lazy_frame_failure,
+
+ iseq_stack_too_large,
+ iseq_too_long,
+
+ temp_reg_opnd,
+ temp_mem_opnd,
+ temp_spill,
+
+ context_cache_hits,
+}
+
+//===========================================================================
+
+/// Primitive called in yjit.rb
+/// Check if stats generation is enabled
+#[no_mangle]
+pub extern "C" fn rb_yjit_stats_enabled_p(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+
+ if get_option!(gen_stats) {
+ return Qtrue;
+ } else {
+ return Qfalse;
+ }
+}
+
+/// Primitive called in yjit.rb
+/// Check if stats generation should print at exit
+#[no_mangle]
+pub extern "C" fn rb_yjit_print_stats_p(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ if yjit_enabled_p() && get_option!(print_stats) {
+ return Qtrue;
+ } else {
+ return Qfalse;
+ }
+}
+
+/// Primitive called in yjit.rb.
+/// Export all YJIT statistics as a Ruby hash.
+#[no_mangle]
+pub extern "C" fn rb_yjit_get_stats(_ec: EcPtr, _ruby_self: VALUE, key: VALUE) -> VALUE {
+ with_vm_lock(src_loc!(), || rb_yjit_gen_stats_dict(key))
+}
+
+/// Primitive called in yjit.rb
+///
+/// Check if trace_exits generation is enabled.
+#[no_mangle]
+pub extern "C" fn rb_yjit_trace_exit_locations_enabled_p(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ if get_option!(trace_exits).is_some() {
+ return Qtrue;
+ }
+
+ return Qfalse;
+}
+
+/// Call the C function to parse the raw_samples and line_samples
+/// into raw, lines, and frames hash for RubyVM::YJIT.exit_locations.
+#[no_mangle]
+pub extern "C" fn rb_yjit_get_exit_locations(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ // Return if YJIT is not enabled
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ // Return if --yjit-trace-exits isn't enabled
+ if get_option!(trace_exits).is_none() {
+ return Qnil;
+ }
+
+ // Pass yjit_raw_samples and yjit_line_samples
+ // to the C function called rb_yjit_exit_locations_dict for parsing.
+ let yjit_raw_samples = YjitExitLocations::get_raw_samples();
+ let yjit_line_samples = YjitExitLocations::get_line_samples();
+
+ // Assert that the two Vec's are the same length. If they aren't
+ // equal something went wrong.
+ assert_eq!(yjit_raw_samples.len(), yjit_line_samples.len());
+
+ // yjit_raw_samples and yjit_line_samples are the same length so
+ // pass only one of the lengths in the C function.
+ let samples_len = yjit_raw_samples.len() as i32;
+
+ unsafe {
+ rb_yjit_exit_locations_dict(yjit_raw_samples.as_mut_ptr(), yjit_line_samples.as_mut_ptr(), samples_len)
+ }
+}
+
+/// Increment a counter by name from the CRuby side
+/// Warning: this is not fast because it requires a hash lookup, so don't use in tight loops
+#[no_mangle]
+pub extern "C" fn rb_yjit_incr_counter(counter_name: *const std::os::raw::c_char) {
+ use std::ffi::CStr;
+ let counter_name = unsafe { CStr::from_ptr(counter_name).to_str().unwrap() };
+ let counter_ptr = get_counter_ptr(counter_name);
+ unsafe { *counter_ptr += 1 };
+}
+
+/// Export all YJIT statistics as a Ruby hash.
+fn rb_yjit_gen_stats_dict(key: VALUE) -> VALUE {
+ // If YJIT is not enabled, return Qnil
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ let hash = if key == Qnil {
+ unsafe { rb_hash_new() }
+ } else {
+ Qnil
+ };
+
+ macro_rules! set_stat {
+ ($hash:ident, $name:expr, $value:expr) => {
+ let rb_key = rust_str_to_sym($name);
+ if key == rb_key {
+ return $value;
+ } else if hash != Qnil {
+ rb_hash_aset($hash, rb_key, $value);
+ }
+ }
+ }
+
+ macro_rules! set_stat_usize {
+ ($hash:ident, $name:expr, $value:expr) => {
+ set_stat!($hash, $name, VALUE::fixnum_from_usize($value));
+ }
+ }
+
+ macro_rules! set_stat_double {
+ ($hash:ident, $name:expr, $value:expr) => {
+ set_stat!($hash, $name, rb_float_new($value));
+ }
+ }
+
+ unsafe {
+ // Get the inline and outlined code blocks
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+
+ // Inline code size
+ set_stat_usize!(hash, "inline_code_size", cb.code_size());
+
+ // Outlined code size
+ set_stat_usize!(hash, "outlined_code_size", ocb.unwrap().code_size());
+
+ // GCed pages
+ let freed_page_count = cb.num_freed_pages();
+ set_stat_usize!(hash, "freed_page_count", freed_page_count);
+
+ // GCed code size
+ set_stat_usize!(hash, "freed_code_size", freed_page_count * cb.page_size());
+
+ // Live pages
+ set_stat_usize!(hash, "live_page_count", cb.num_mapped_pages() - freed_page_count);
+
+ // Size of memory region allocated for JIT code
+ set_stat_usize!(hash, "code_region_size", cb.mapped_region_size());
+
+ // Rust global allocations in bytes
+ set_stat_usize!(hash, "yjit_alloc_size", yjit_alloc_size());
+
+ // How many bytes we are using to store context data
+ let context_data = CodegenGlobals::get_context_data();
+ set_stat_usize!(hash, "context_data_bytes", context_data.num_bytes());
+ set_stat_usize!(hash, "context_cache_bytes", crate::core::CTX_ENCODE_CACHE_BYTES + crate::core::CTX_DECODE_CACHE_BYTES);
+
+ // VM instructions count
+ if rb_vm_insn_count > 0 {
+ set_stat_usize!(hash, "vm_insns_count", rb_vm_insn_count as usize);
+ }
+
+ set_stat_usize!(hash, "live_iseq_count", rb_yjit_live_iseq_count as usize);
+ set_stat_usize!(hash, "iseq_alloc_count", rb_yjit_iseq_alloc_count as usize);
+
+ set_stat!(hash, "object_shape_count", rb_object_shape_count());
+
+ // Time since YJIT init in nanoseconds
+ let time_nanos = Instant::now().duration_since(YJIT_INIT_TIME.unwrap()).as_nanos();
+ set_stat_usize!(hash, "yjit_active_ns", time_nanos as usize);
+ }
+
+ // If we're not generating stats, put only default counters
+ if !get_option!(gen_stats) {
+ for counter in DEFAULT_COUNTERS {
+ // Get the counter value
+ let counter_ptr = get_counter_ptr(&counter.get_name());
+ let counter_val = unsafe { *counter_ptr };
+
+ // Put counter into hash
+ let key = &counter.get_name();
+ let value = VALUE::fixnum_from_usize(counter_val as usize);
+ unsafe { set_stat!(hash, key, value); }
+ }
+
+ return hash;
+ }
+
+ unsafe {
+ // Indicate that the complete set of stats is available
+ set_stat!(hash, "all_stats", Qtrue);
+
+ // For each counter we track
+ for counter_name in COUNTER_NAMES {
+ // Get the counter value
+ let counter_ptr = get_counter_ptr(counter_name);
+ let counter_val = *counter_ptr;
+ set_stat_usize!(hash, counter_name, counter_val as usize);
+ }
+
+ let mut side_exits = 0;
+
+ // For each entry in exit_op_count, add a stats entry with key "exit_INSTRUCTION_NAME"
+ // and the value is the count of side exits for that instruction.
+ use crate::utils::IntoUsize;
+ for op_idx in 0..rb_vm_instruction_size().as_usize() {
+ let op_name = insn_name(op_idx);
+ let key_string = "exit_".to_owned() + &op_name;
+ let count = EXIT_OP_COUNT[op_idx];
+ side_exits += count;
+ set_stat_usize!(hash, &key_string, count as usize);
+ }
+
+ set_stat_usize!(hash, "side_exit_count", side_exits as usize);
+
+ let total_exits = side_exits + *get_counter_ptr(&Counter::leave_interp_return.get_name());
+ set_stat_usize!(hash, "total_exit_count", total_exits as usize);
+
+ // Number of instructions that finish executing in YJIT.
+ // See :count-placement: about the subtraction.
+ let retired_in_yjit = *get_counter_ptr(&Counter::yjit_insns_count.get_name()) - side_exits;
+
+ // Average length of instruction sequences executed by YJIT
+ let avg_len_in_yjit: f64 = if total_exits > 0 {
+ retired_in_yjit as f64 / total_exits as f64
+ } else {
+ 0_f64
+ };
+ set_stat_double!(hash, "avg_len_in_yjit", avg_len_in_yjit);
+
+ // Proportion of instructions that retire in YJIT
+ if rb_vm_insn_count > 0 {
+ let total_insns_count = retired_in_yjit + rb_vm_insn_count;
+ set_stat_usize!(hash, "total_insns_count", total_insns_count as usize);
+
+ let ratio_in_yjit: f64 = 100.0 * retired_in_yjit as f64 / total_insns_count as f64;
+ set_stat_double!(hash, "ratio_in_yjit", ratio_in_yjit);
+ }
+
+ // Set method call counts in a Ruby dict
+ fn set_call_counts(
+ calls_hash: VALUE,
+ method_name_to_idx: &mut Option<HashMap<String, usize>>,
+ method_call_count: &mut Option<Vec<u64>>,
+ ) {
+ if let (Some(name_to_idx), Some(call_counts)) = (method_name_to_idx, method_call_count) {
+ // Create a list of (name, call_count) pairs
+ let mut pairs = Vec::new();
+ for (name, idx) in name_to_idx {
+ let count = call_counts[*idx];
+ pairs.push((name, count));
+ }
+
+ // Sort the vectors by decreasing call counts
+ pairs.sort_by_key(|e| -(e.1 as i64));
+
+ // Cap the number of counts reported to avoid
+ // bloating log files, etc.
+ pairs.truncate(20);
+
+ // Add the pairs to the dict
+ for (name, call_count) in pairs {
+ let key = rust_str_to_sym(name);
+ let value = VALUE::fixnum_from_usize(call_count as usize);
+ unsafe { rb_hash_aset(calls_hash, key, value); }
+ }
+ }
+ }
+
+ // Create a hash for the cfunc call counts
+ set_stat!(hash, "cfunc_calls", {
+ let cfunc_calls = rb_hash_new();
+ set_call_counts(cfunc_calls, &mut *addr_of_mut!(CFUNC_NAME_TO_IDX), &mut *addr_of_mut!(CFUNC_CALL_COUNT));
+ cfunc_calls
+ });
+
+ // Create a hash for the ISEQ call counts
+ set_stat!(hash, "iseq_calls", {
+ let iseq_calls = rb_hash_new();
+ set_call_counts(iseq_calls, &mut *addr_of_mut!(ISEQ_NAME_TO_IDX), &mut *addr_of_mut!(ISEQ_CALL_COUNT));
+ iseq_calls
+ });
+ }
+
+ hash
+}
+
+/// Record the backtrace when a YJIT exit occurs. This functionality requires
+/// the --yjit-trace-exits option.
+///
+/// This function will fill two Vec's in YjitExitLocations to record the raw samples
+/// and line samples. Their length should be the same, however the data stored in
+/// them is different.
+#[no_mangle]
+pub extern "C" fn rb_yjit_record_exit_stack(exit_pc: *const VALUE)
+{
+ // Return if YJIT is not enabled
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ // Return if --yjit-trace-exits isn't enabled
+ if get_option!(trace_exits).is_none() {
+ return;
+ }
+
+ if get_option!(trace_exits_sample_rate) > 0 {
+ if get_option!(trace_exits_sample_rate) <= *YjitExitLocations::get_skipped_samples() {
+ YjitExitLocations::get_instance().skipped_samples = 0;
+ } else {
+ YjitExitLocations::get_instance().skipped_samples += 1;
+ return;
+ }
+ }
+
+ // rb_vm_insn_addr2opcode won't work in cargo test --all-features
+ // because it's a C function. Without insn call, this function is useless
+ // so wrap the whole thing in a not test check.
+ let _ = exit_pc;
+ #[cfg(not(test))]
+ {
+ // Get the opcode from the encoded insn handler at this PC
+ let insn = unsafe { rb_vm_insn_addr2opcode((*exit_pc).as_ptr()) };
+
+ // Use the same buffer size as Stackprof.
+ const BUFF_LEN: usize = 2048;
+
+ // Create 2 array buffers to be used to collect frames and lines.
+ let mut frames_buffer = [VALUE(0_usize); BUFF_LEN];
+ let mut lines_buffer = [0; BUFF_LEN];
+
+ // Records call frame and line information for each method entry into two
+ // temporary buffers. Returns the number of times we added to the buffer (ie
+ // the length of the stack).
+ //
+ // Call frame info is stored in the frames_buffer, line number information
+ // in the lines_buffer. The first argument is the start point and the second
+ // argument is the buffer limit, set at 2048.
+ let stack_length = unsafe { rb_profile_frames(0, BUFF_LEN as i32, frames_buffer.as_mut_ptr(), lines_buffer.as_mut_ptr()) };
+ let samples_length = (stack_length as usize) + 3;
+
+ let yjit_raw_samples = YjitExitLocations::get_raw_samples();
+ let yjit_line_samples = YjitExitLocations::get_line_samples();
+
+ // If yjit_raw_samples is less than or equal to the current length of the samples
+ // we might have seen this stack trace previously.
+ if yjit_raw_samples.len() >= samples_length {
+ let prev_stack_len_index = yjit_raw_samples.len() - samples_length;
+ let prev_stack_len = i64::from(yjit_raw_samples[prev_stack_len_index]);
+ let mut idx = stack_length - 1;
+ let mut prev_frame_idx = 0;
+ let mut seen_already = true;
+
+ // If the previous stack length and current stack length are equal,
+ // loop and compare the current frame to the previous frame. If they are
+ // not equal, set seen_already to false and break out of the loop.
+ if prev_stack_len == stack_length as i64 {
+ while idx >= 0 {
+ let current_frame = frames_buffer[idx as usize];
+ let prev_frame = yjit_raw_samples[prev_stack_len_index + prev_frame_idx + 1];
+
+ // If the current frame and previous frame are not equal, set
+ // seen_already to false and break out of the loop.
+ if current_frame != prev_frame {
+ seen_already = false;
+ break;
+ }
+
+ idx -= 1;
+ prev_frame_idx += 1;
+ }
+
+ // If we know we've seen this stack before, increment the counter by 1.
+ if seen_already {
+ let prev_idx = yjit_raw_samples.len() - 1;
+ let prev_count = i64::from(yjit_raw_samples[prev_idx]);
+ let new_count = prev_count + 1;
+
+ yjit_raw_samples[prev_idx] = VALUE(new_count as usize);
+ yjit_line_samples[prev_idx] = new_count as i32;
+
+ return;
+ }
+ }
+ }
+
+ yjit_raw_samples.push(VALUE(stack_length as usize));
+ yjit_line_samples.push(stack_length);
+
+ let mut idx = stack_length - 1;
+
+ while idx >= 0 {
+ let frame = frames_buffer[idx as usize];
+ let line = lines_buffer[idx as usize];
+
+ yjit_raw_samples.push(frame);
+ yjit_line_samples.push(line);
+
+ idx -= 1;
+ }
+
+ // Push the insn value into the yjit_raw_samples Vec.
+ yjit_raw_samples.push(VALUE(insn as usize));
+
+ // We don't know the line
+ yjit_line_samples.push(0);
+
+ // Push number of times seen onto the stack, which is 1
+ // because it's the first time we've seen it.
+ yjit_raw_samples.push(VALUE(1_usize));
+ yjit_line_samples.push(1);
+ }
+}
+
+/// Primitive called in yjit.rb. Zero out all the counters.
+#[no_mangle]
+pub extern "C" fn rb_yjit_reset_stats_bang(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ unsafe {
+ EXIT_OP_COUNT = [0; VM_INSTRUCTION_SIZE_USIZE];
+ COUNTERS = Counters::default();
+ }
+
+ return Qnil;
+}
+
+#[no_mangle]
+pub extern "C" fn rb_yjit_collect_binding_alloc() {
+ incr_counter!(binding_allocations);
+}
+
+#[no_mangle]
+pub extern "C" fn rb_yjit_collect_binding_set() {
+ incr_counter!(binding_set);
+}
+
+#[no_mangle]
+pub extern "C" fn rb_yjit_count_side_exit_op(exit_pc: *const VALUE) -> *const VALUE {
+ #[cfg(not(test))]
+ unsafe {
+ // Get the opcode from the encoded insn handler at this PC
+ let opcode = rb_vm_insn_addr2opcode((*exit_pc).as_ptr());
+
+ // Increment the exit op count for this opcode
+ EXIT_OP_COUNT[opcode as usize] += 1;
+ };
+
+ // This function must return exit_pc!
+ return exit_pc;
+}
+
+/// Measure the time taken by func() and add that to yjit_compile_time.
+pub fn with_compile_time<F, R>(func: F) -> R where F: FnOnce() -> R {
+ let start = Instant::now();
+ let ret = func();
+ let nanos = Instant::now().duration_since(start).as_nanos();
+ incr_counter_by!(compile_time_ns, nanos);
+ ret
+}
diff --git a/yjit/src/utils.rs b/yjit/src/utils.rs
new file mode 100644
index 0000000000..251628fabf
--- /dev/null
+++ b/yjit/src/utils.rs
@@ -0,0 +1,287 @@
+#![allow(dead_code)] // Some functions for print debugging in here
+
+use crate::backend::ir::*;
+use crate::cruby::*;
+use std::slice;
+use std::os::raw::c_int;
+
+/// Trait for casting to [usize] that allows you to say `.as_usize()`.
+/// Implementation conditional on the cast preserving the numeric value on
+/// all inputs and being inexpensive.
+///
+/// [usize] is only guaranteed to be more than 16-bit wide, so we can't use
+/// `.into()` to cast an `u32` or an `u64` to a `usize` even though in all
+/// the platforms YJIT supports these two casts are pretty much no-ops.
+/// We could say `as usize` or `.try_convert().unwrap()` everywhere
+/// for those casts but they both have undesirable consequences if and when
+/// we decide to support 32-bit platforms. Unfortunately we can't implement
+/// [::core::convert::From] for [usize] since both the trait and the type are
+/// external. Naming the method `into()` also runs into naming conflicts.
+pub(crate) trait IntoUsize {
+ /// Convert to usize. Implementation conditional on width of [usize].
+ fn as_usize(self) -> usize;
+}
+
+#[cfg(target_pointer_width = "64")]
+impl IntoUsize for u64 {
+ fn as_usize(self) -> usize {
+ self as usize
+ }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl IntoUsize for u32 {
+ fn as_usize(self) -> usize {
+ self as usize
+ }
+}
+
+impl IntoUsize for u16 {
+ /// Alias for `.into()`. For convenience so you could use the trait for
+ /// all unsgined types.
+ fn as_usize(self) -> usize {
+ self.into()
+ }
+}
+
+impl IntoUsize for u8 {
+ /// Alias for `.into()`. For convenience so you could use the trait for
+ /// all unsgined types.
+ fn as_usize(self) -> usize {
+ self.into()
+ }
+}
+
+/// The `Into<u64>` Rust does not provide.
+/// Convert to u64 with assurance that the value is preserved.
+/// Currently, `usize::BITS == 64` holds for all platforms we support.
+pub(crate) trait IntoU64 {
+ fn as_u64(self) -> u64;
+}
+
+#[cfg(target_pointer_width = "64")]
+impl IntoU64 for usize {
+ fn as_u64(self) -> u64 {
+ self as u64
+ }
+}
+
+/// Compute an offset in bytes of a given struct field
+#[allow(unused)]
+macro_rules! offset_of {
+ ($struct_type:ty, $field_name:tt) => {{
+ // This is basically the exact example for
+ // "creating a pointer to uninitialized data" from `std::ptr::addr_of_mut`.
+ // We make a dummy local that hopefully is optimized away because we never
+ // read or write its contents. Doing this dance to avoid UB.
+ let mut instance = std::mem::MaybeUninit::<$struct_type>::uninit();
+
+ let base_ptr = instance.as_mut_ptr();
+ let field_ptr = unsafe { std::ptr::addr_of_mut!((*base_ptr).$field_name) };
+
+ (field_ptr as usize) - (base_ptr as usize)
+ }};
+}
+#[allow(unused)]
+pub(crate) use offset_of;
+
+// Convert a CRuby UTF-8-encoded RSTRING into a Rust string.
+// This should work fine on ASCII strings and anything else
+// that is considered legal UTF-8, including embedded nulls.
+pub fn ruby_str_to_rust(v: VALUE) -> String {
+ let str_ptr = unsafe { rb_RSTRING_PTR(v) } as *mut u8;
+ let str_len: usize = unsafe { rb_RSTRING_LEN(v) }.try_into().unwrap();
+ let str_slice: &[u8] = unsafe { slice::from_raw_parts(str_ptr, str_len) };
+ String::from_utf8(str_slice.to_vec()).unwrap_or_default()
+}
+
+// Location is the file defining the method, colon, method name.
+// Filenames are sometimes internal strings supplied to eval,
+// so be careful with them.
+pub fn iseq_get_location(iseq: IseqPtr, pos: u16) -> String {
+ let iseq_label = unsafe { rb_iseq_label(iseq) };
+ let iseq_path = unsafe { rb_iseq_path(iseq) };
+ let iseq_lineno = unsafe { rb_iseq_line_no(iseq, pos as usize) };
+
+ let mut s = if iseq_label == Qnil {
+ "None".to_string()
+ } else {
+ ruby_str_to_rust(iseq_label)
+ };
+ s.push_str("@");
+ if iseq_path == Qnil {
+ s.push_str("None");
+ } else {
+ s.push_str(&ruby_str_to_rust(iseq_path));
+ }
+ s.push_str(":");
+ s.push_str(&iseq_lineno.to_string());
+ s
+}
+
+// TODO: we may want to move this function into yjit.c, maybe add a convenient Rust-side wrapper
+/*
+// For debugging. Print the bytecode for an iseq.
+RBIMPL_ATTR_MAYBE_UNUSED()
+static void
+yjit_print_iseq(const rb_iseq_t *iseq)
+{
+ char *ptr;
+ long len;
+ VALUE disassembly = rb_iseq_disasm(iseq);
+ RSTRING_GETMEM(disassembly, ptr, len);
+ fprintf(stderr, "%.*s\n", (int)len, ptr);
+}
+*/
+
+#[cfg(target_arch = "aarch64")]
+macro_rules! c_callable {
+ ($(#[$outer:meta])*
+ fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => {
+ $(#[$outer])*
+ extern "C" fn $f $args $(-> $ret)? $body
+ };
+}
+
+#[cfg(target_arch = "x86_64")]
+macro_rules! c_callable {
+ ($(#[$outer:meta])*
+ fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => {
+ $(#[$outer])*
+ extern "sysv64" fn $f $args $(-> $ret)? $body
+ };
+}
+pub(crate) use c_callable;
+
+pub fn print_int(asm: &mut Assembler, opnd: Opnd) {
+ c_callable!{
+ fn print_int_fn(val: i64) {
+ println!("{}", val);
+ }
+ }
+
+ let argument = match opnd {
+ Opnd::Mem(_) | Opnd::Reg(_) | Opnd::InsnOut { .. } => {
+ // Sign-extend the value if necessary
+ if opnd.rm_num_bits() < 64 {
+ asm.load_sext(opnd)
+ } else {
+ opnd
+ }
+ },
+ Opnd::Imm(_) | Opnd::UImm(_) => opnd,
+ _ => unreachable!(),
+ };
+
+ asm.ccall(print_int_fn as *const u8, vec![argument]);
+}
+
+/// Generate code to print a pointer
+pub fn print_ptr(asm: &mut Assembler, opnd: Opnd) {
+ c_callable!{
+ fn print_ptr_fn(ptr: *const u8) {
+ println!("{:p}", ptr);
+ }
+ }
+
+ assert!(opnd.rm_num_bits() == 64);
+
+ asm.ccall(print_ptr_fn as *const u8, vec![opnd]);
+}
+
+/// Generate code to print a value
+pub fn print_value(asm: &mut Assembler, opnd: Opnd) {
+ c_callable!{
+ fn print_value_fn(val: VALUE) {
+ unsafe { rb_obj_info_dump(val) }
+ }
+ }
+
+ assert!(matches!(opnd, Opnd::Value(_)));
+
+ asm.ccall(print_value_fn as *const u8, vec![opnd]);
+}
+
+/// Generate code to print constant string to stdout
+pub fn print_str(asm: &mut Assembler, str: &str) {
+ c_callable!{
+ fn print_str_cfun(ptr: *const u8, num_bytes: usize) {
+ unsafe {
+ let slice = slice::from_raw_parts(ptr, num_bytes);
+ let str = std::str::from_utf8(slice).unwrap();
+ println!("{}", str);
+ }
+ }
+ }
+
+
+ let string_data = asm.new_label("string_data");
+ let after_string = asm.new_label("after_string");
+
+ asm.jmp(after_string);
+ asm.write_label(string_data);
+ asm.bake_string(str);
+ asm.write_label(after_string);
+
+ let opnd = asm.lea_jump_target(string_data);
+ asm.ccall(print_str_cfun as *const u8, vec![opnd, Opnd::UImm(str.len() as u64)]);
+}
+
+pub fn stdout_supports_colors() -> bool {
+ // TODO(max): Use std::io::IsTerminal after upgrading Rust to 1.70
+ extern "C" { fn isatty(fd: c_int) -> c_int; }
+ let stdout = 1;
+ let is_terminal = unsafe { isatty(stdout) } == 1;
+ is_terminal
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::asm::CodeBlock;
+
+ #[test]
+ fn min_max_preserved_after_cast_to_usize() {
+ use crate::utils::IntoUsize;
+
+ let min: usize = u64::MIN.as_usize();
+ assert_eq!(min, u64::MIN.try_into().unwrap());
+ let max: usize = u64::MAX.as_usize();
+ assert_eq!(max, u64::MAX.try_into().unwrap());
+
+ let min: usize = u32::MIN.as_usize();
+ assert_eq!(min, u32::MIN.try_into().unwrap());
+ let max: usize = u32::MAX.as_usize();
+ assert_eq!(max, u32::MAX.try_into().unwrap());
+ }
+
+ #[test]
+ fn test_offset_of() {
+ #[repr(C)]
+ struct Foo {
+ a: u8,
+ b: u64,
+ }
+
+ assert_eq!(0, offset_of!(Foo, a), "C99 6.7.2.1p13 says no padding at the front");
+ assert_eq!(8, offset_of!(Foo, b), "ABI dependent, but should hold");
+ }
+
+ #[test]
+ fn test_print_int() {
+ let mut asm = Assembler::new_without_iseq();
+ let mut cb = CodeBlock::new_dummy(1024);
+
+ print_int(&mut asm, Opnd::Imm(42));
+ asm.compile(&mut cb, None).unwrap();
+ }
+
+ #[test]
+ fn test_print_str() {
+ let mut asm = Assembler::new_without_iseq();
+ let mut cb = CodeBlock::new_dummy(1024);
+
+ print_str(&mut asm, "Hello, world!");
+ asm.compile(&mut cb, None).unwrap();
+ }
+}
diff --git a/yjit/src/virtualmem.rs b/yjit/src/virtualmem.rs
new file mode 100644
index 0000000000..9126cf300e
--- /dev/null
+++ b/yjit/src/virtualmem.rs
@@ -0,0 +1,488 @@
+//! Memory management stuff for YJIT's code storage. Deals with virtual memory.
+// I'm aware that there is an experiment in Rust Nightly right now for to see if banning
+// usize->pointer casts is viable. It seems like a lot of work for us to participate for not much
+// benefit.
+
+use std::{cell::RefCell, ptr::NonNull};
+
+use crate::{backend::ir::Target, stats::yjit_alloc_size, utils::IntoUsize};
+
+#[cfg(test)]
+use crate::options::get_option;
+
+#[cfg(not(test))]
+pub type VirtualMem = VirtualMemory<sys::SystemAllocator>;
+
+#[cfg(test)]
+pub type VirtualMem = VirtualMemory<tests::TestingAllocator>;
+
+/// Memory for generated executable machine code. When not testing, we reserve address space for
+/// the entire region upfront and map physical memory into the reserved address space as needed. On
+/// Linux, this is basically done using an `mmap` with `PROT_NONE` upfront and gradually using
+/// `mprotect` with `PROT_READ|PROT_WRITE` as needed. The WIN32 equivalent seems to be
+/// `VirtualAlloc` with `MEM_RESERVE` then later with `MEM_COMMIT`.
+///
+/// This handles ["W^X"](https://en.wikipedia.org/wiki/W%5EX) semi-automatically. Writes
+/// are always accepted and once writes are done a call to [Self::mark_all_executable] makes
+/// the code in the region executable.
+pub struct VirtualMemory<A: Allocator> {
+ /// Location of the virtual memory region.
+ region_start: NonNull<u8>,
+
+ /// Size of this virtual memory region in bytes.
+ region_size_bytes: usize,
+
+ /// mapped_region_bytes + yjit_alloc_size may not increase beyond this limit.
+ memory_limit_bytes: usize,
+
+ /// Number of bytes per "page", memory protection permission can only be controlled at this
+ /// granularity.
+ page_size_bytes: usize,
+
+ /// Mutable parts.
+ mutable: RefCell<VirtualMemoryMut<A>>,
+}
+
+/// Mutable parts of [`VirtualMemory`].
+pub struct VirtualMemoryMut<A: Allocator> {
+ /// Number of bytes that have we have allocated physical memory for starting at
+ /// [VirtualMemory::region_start].
+ mapped_region_bytes: usize,
+
+ /// Keep track of the address of the last written to page.
+ /// Used for changing protection to implement W^X.
+ current_write_page: Option<usize>,
+
+ /// Zero size member for making syscalls to get physical memory during normal operation.
+ /// When testing this owns some memory.
+ allocator: A,
+}
+
+/// Groups together the two syscalls to get get new physical memory and to change
+/// memory protection. See [VirtualMemory] for details.
+pub trait Allocator {
+ #[must_use]
+ fn mark_writable(&mut self, ptr: *const u8, size: u32) -> bool;
+
+ fn mark_executable(&mut self, ptr: *const u8, size: u32);
+
+ fn mark_unused(&mut self, ptr: *const u8, size: u32) -> bool;
+}
+
+/// Pointer into a [VirtualMemory] represented as an offset from the base.
+/// Note: there is no NULL constant for [CodePtr]. You should use `Option<CodePtr>` instead.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+#[repr(C, packed)]
+pub struct CodePtr(u32);
+
+impl CodePtr {
+ /// Advance the CodePtr. Can return a dangling pointer.
+ pub fn add_bytes(self, bytes: usize) -> Self {
+ let CodePtr(raw) = self;
+ let bytes: u32 = bytes.try_into().unwrap();
+ CodePtr(raw + bytes)
+ }
+
+ /// Note that the raw pointer might be dangling if there hasn't
+ /// been any writes to it through the [VirtualMemory] yet.
+ pub fn raw_ptr(self, base: &impl CodePtrBase) -> *const u8 {
+ let CodePtr(offset) = self;
+ return base.base_ptr().as_ptr().wrapping_add(offset.as_usize())
+ }
+
+ /// Get the address of the code pointer.
+ pub fn raw_addr(self, base: &impl CodePtrBase) -> usize {
+ self.raw_ptr(base) as usize
+ }
+
+ /// Get the offset component for the code pointer. Useful finding the distance between two
+ /// code pointers that share the same [VirtualMem].
+ pub fn as_offset(self) -> i64 {
+ let CodePtr(offset) = self;
+ offset.into()
+ }
+
+ pub fn as_side_exit(self) -> Target {
+ Target::SideExitPtr(self)
+ }
+}
+
+/// Errors that can happen when writing to [VirtualMemory]
+#[derive(Debug, PartialEq)]
+pub enum WriteError {
+ OutOfBounds,
+ FailedPageMapping,
+}
+
+use WriteError::*;
+
+impl<A: Allocator> VirtualMemory<A> {
+ /// Bring a part of the address space under management.
+ pub fn new(
+ allocator: A,
+ page_size: u32,
+ virt_region_start: NonNull<u8>,
+ region_size_bytes: usize,
+ memory_limit_bytes: usize,
+ ) -> Self {
+ assert_ne!(0, page_size);
+ let page_size_bytes = page_size.as_usize();
+
+ Self {
+ region_start: virt_region_start,
+ region_size_bytes,
+ memory_limit_bytes,
+ page_size_bytes,
+ mutable: RefCell::new(VirtualMemoryMut {
+ mapped_region_bytes: 0,
+ current_write_page: None,
+ allocator,
+ }),
+ }
+ }
+
+ /// Return the start of the region as a raw pointer. Note that it could be a dangling
+ /// pointer so be careful dereferencing it.
+ pub fn start_ptr(&self) -> CodePtr {
+ CodePtr(0)
+ }
+
+ pub fn mapped_end_ptr(&self) -> CodePtr {
+ self.start_ptr().add_bytes(self.mutable.borrow().mapped_region_bytes)
+ }
+
+ pub fn virtual_end_ptr(&self) -> CodePtr {
+ self.start_ptr().add_bytes(self.region_size_bytes)
+ }
+
+ /// Size of the region in bytes that we have allocated physical memory for.
+ pub fn mapped_region_size(&self) -> usize {
+ self.mutable.borrow().mapped_region_bytes
+ }
+
+ /// Size of the region in bytes where writes could be attempted.
+ pub fn virtual_region_size(&self) -> usize {
+ self.region_size_bytes
+ }
+
+ /// The granularity at which we can control memory permission.
+ /// On Linux, this is the page size that mmap(2) talks about.
+ pub fn system_page_size(&self) -> usize {
+ self.page_size_bytes
+ }
+
+ /// Write a single byte. The first write to a page makes it readable.
+ pub fn write_byte(&self, write_ptr: CodePtr, byte: u8) -> Result<(), WriteError> {
+ let mut mutable = self.mutable.borrow_mut();
+
+ let page_size = self.page_size_bytes;
+ let raw: *mut u8 = write_ptr.raw_ptr(self) as *mut u8;
+ let page_addr = (raw as usize / page_size) * page_size;
+
+ if mutable.current_write_page == Some(page_addr) {
+ // Writing within the last written to page, nothing to do
+ } else {
+ // Switching to a different and potentially new page
+ let start = self.region_start.as_ptr();
+ let mapped_region_end = start.wrapping_add(mutable.mapped_region_bytes);
+ let whole_region_end = start.wrapping_add(self.region_size_bytes);
+ let alloc = &mut mutable.allocator;
+
+ assert!((start..=whole_region_end).contains(&mapped_region_end));
+
+ if (start..mapped_region_end).contains(&raw) {
+ // Writing to a previously written to page.
+ // Need to make page writable, but no need to fill.
+ let page_size: u32 = page_size.try_into().unwrap();
+ if !alloc.mark_writable(page_addr as *const _, page_size) {
+ return Err(FailedPageMapping);
+ }
+
+ mutable.current_write_page = Some(page_addr);
+ } else if (start..whole_region_end).contains(&raw) &&
+ (page_addr + page_size - start as usize) + yjit_alloc_size() < self.memory_limit_bytes {
+ // Writing to a brand new page
+ let mapped_region_end_addr = mapped_region_end as usize;
+ let alloc_size = page_addr - mapped_region_end_addr + page_size;
+
+ assert_eq!(0, alloc_size % page_size, "allocation size should be page aligned");
+ assert_eq!(0, mapped_region_end_addr % page_size, "pointer should be page aligned");
+
+ if alloc_size > page_size {
+ // This is unusual for the current setup, so keep track of it.
+ crate::stats::incr_counter!(exec_mem_non_bump_alloc);
+ }
+
+ // Allocate new chunk
+ let alloc_size_u32: u32 = alloc_size.try_into().unwrap();
+ unsafe {
+ if !alloc.mark_writable(mapped_region_end.cast(), alloc_size_u32) {
+ return Err(FailedPageMapping);
+ }
+ if cfg!(target_arch = "x86_64") {
+ // Fill new memory with PUSH DS (0x1E) so that executing uninitialized memory
+ // will fault with #UD in 64-bit mode. On Linux it becomes SIGILL and use the
+ // usual Ruby crash reporter.
+ std::slice::from_raw_parts_mut(mapped_region_end, alloc_size).fill(0x1E);
+ } else if cfg!(target_arch = "aarch64") {
+ // In aarch64, all zeros encodes UDF, so it's already what we want.
+ } else {
+ unreachable!("unknown arch");
+ }
+ }
+ mutable.mapped_region_bytes = mutable.mapped_region_bytes + alloc_size;
+
+ mutable.current_write_page = Some(page_addr);
+ } else {
+ return Err(OutOfBounds);
+ }
+ }
+
+ // We have permission to write if we get here
+ unsafe { raw.write(byte) };
+
+ Ok(())
+ }
+
+ /// Make all the code in the region writeable.
+ /// Call this during GC before the phase of updating reference fields.
+ pub fn mark_all_writeable(&self) {
+ let mut mutable = self.mutable.borrow_mut();
+
+ mutable.current_write_page = None;
+
+ let region_start = self.region_start;
+ let mapped_region_bytes: u32 = mutable.mapped_region_bytes.try_into().unwrap();
+
+ // Make mapped region executable
+ if !mutable.allocator.mark_writable(region_start.as_ptr(), mapped_region_bytes) {
+ panic!("Cannot make memory region writable: {:?}-{:?}",
+ region_start.as_ptr(),
+ unsafe { region_start.as_ptr().add(mapped_region_bytes as usize)}
+ );
+ }
+ }
+
+ /// Make all the code in the region executable. Call this at the end of a write session.
+ /// See [Self] for usual usage flow.
+ pub fn mark_all_executable(&self) {
+ let mut mutable = self.mutable.borrow_mut();
+
+ mutable.current_write_page = None;
+
+ let region_start = self.region_start;
+ let mapped_region_bytes: u32 = mutable.mapped_region_bytes.try_into().unwrap();
+
+ // Make mapped region executable
+ mutable.allocator.mark_executable(region_start.as_ptr(), mapped_region_bytes);
+ }
+
+ /// Free a range of bytes. start_ptr must be memory page-aligned.
+ pub fn free_bytes(&self, start_ptr: CodePtr, size: u32) {
+ assert_eq!(start_ptr.raw_ptr(self) as usize % self.page_size_bytes, 0);
+
+ // Bounds check the request. We should only free memory we manage.
+ let mapped_region = self.start_ptr().raw_ptr(self)..self.mapped_end_ptr().raw_ptr(self);
+ let virtual_region = self.start_ptr().raw_ptr(self)..self.virtual_end_ptr().raw_ptr(self);
+ let last_byte_to_free = start_ptr.add_bytes(size.saturating_sub(1).as_usize()).raw_ptr(self);
+ assert!(mapped_region.contains(&start_ptr.raw_ptr(self)));
+ // On platforms where code page size != memory page size (e.g. Linux), we often need
+ // to free code pages that contain unmapped memory pages. When it happens on the last
+ // code page, it's more appropriate to check the last byte against the virtual region.
+ assert!(virtual_region.contains(&last_byte_to_free));
+
+ let mut mutable = self.mutable.borrow_mut();
+ mutable.allocator.mark_unused(start_ptr.raw_ptr(self), size);
+ }
+}
+
+/// Something that could provide a base pointer to compute a raw pointer from a [CodePtr].
+pub trait CodePtrBase {
+ fn base_ptr(&self) -> NonNull<u8>;
+}
+
+impl<A: Allocator> CodePtrBase for VirtualMemory<A> {
+ fn base_ptr(&self) -> NonNull<u8> {
+ self.region_start
+ }
+}
+
+/// Requires linking with CRuby to work
+#[cfg(not(test))]
+mod sys {
+ use crate::cruby::*;
+
+ /// Zero size! This just groups together syscalls that require linking with CRuby.
+ pub struct SystemAllocator;
+
+ type VoidPtr = *mut std::os::raw::c_void;
+
+ impl super::Allocator for SystemAllocator {
+ fn mark_writable(&mut self, ptr: *const u8, size: u32) -> bool {
+ unsafe { rb_jit_mark_writable(ptr as VoidPtr, size) }
+ }
+
+ fn mark_executable(&mut self, ptr: *const u8, size: u32) {
+ unsafe { rb_jit_mark_executable(ptr as VoidPtr, size) }
+ }
+
+ fn mark_unused(&mut self, ptr: *const u8, size: u32) -> bool {
+ unsafe { rb_jit_mark_unused(ptr as VoidPtr, size) }
+ }
+ }
+}
+
+#[cfg(not(test))]
+pub(crate) use sys::*;
+
+
+#[cfg(test)]
+pub mod tests {
+ use crate::utils::IntoUsize;
+ use super::*;
+
+ // Track allocation requests and owns some fixed size backing memory for requests.
+ // While testing we don't execute generated code.
+ pub struct TestingAllocator {
+ requests: Vec<AllocRequest>,
+ memory: Vec<u8>,
+ }
+
+ #[derive(Debug)]
+ enum AllocRequest {
+ MarkWritable{ start_idx: usize, length: usize },
+ MarkExecutable{ start_idx: usize, length: usize },
+ MarkUnused,
+ }
+ use AllocRequest::*;
+
+ impl TestingAllocator {
+ pub fn new(mem_size: usize) -> Self {
+ Self { requests: Vec::default(), memory: vec![0; mem_size] }
+ }
+
+ pub fn mem_start(&self) -> *const u8 {
+ self.memory.as_ptr()
+ }
+
+ // Verify that write_byte() bounds checks. Return `ptr` as an index.
+ fn bounds_check_request(&self, ptr: *const u8, size: u32) -> usize {
+ let mem_start = self.memory.as_ptr() as usize;
+ let index = ptr as usize - mem_start;
+
+ assert!(index < self.memory.len());
+ assert!(index + size.as_usize() <= self.memory.len());
+
+ index
+ }
+ }
+
+ // Bounds check and then record the request
+ impl super::Allocator for TestingAllocator {
+ fn mark_writable(&mut self, ptr: *const u8, length: u32) -> bool {
+ let index = self.bounds_check_request(ptr, length);
+ self.requests.push(MarkWritable { start_idx: index, length: length.as_usize() });
+
+ true
+ }
+
+ fn mark_executable(&mut self, ptr: *const u8, length: u32) {
+ let index = self.bounds_check_request(ptr, length);
+ self.requests.push(MarkExecutable { start_idx: index, length: length.as_usize() });
+
+ // We don't try to execute generated code in cfg(test)
+ // so no need to actually request executable memory.
+ }
+
+ fn mark_unused(&mut self, ptr: *const u8, length: u32) -> bool {
+ self.bounds_check_request(ptr, length);
+ self.requests.push(MarkUnused);
+
+ true
+ }
+ }
+
+ // Fictional architecture where each page is 4 bytes long
+ const PAGE_SIZE: usize = 4;
+ fn new_dummy_virt_mem() -> VirtualMemory<TestingAllocator> {
+ let mem_size = PAGE_SIZE * 10;
+ let alloc = TestingAllocator::new(mem_size);
+ let mem_start: *const u8 = alloc.mem_start();
+
+ VirtualMemory::new(
+ alloc,
+ PAGE_SIZE.try_into().unwrap(),
+ NonNull::new(mem_start as *mut u8).unwrap(),
+ mem_size,
+ get_option!(mem_size),
+ )
+ }
+
+ #[test]
+ #[cfg(target_arch = "x86_64")]
+ fn new_memory_is_initialized() {
+ let virt = new_dummy_virt_mem();
+
+ virt.write_byte(virt.start_ptr(), 1).unwrap();
+ assert!(
+ virt.mutable.borrow().allocator.memory[..PAGE_SIZE].iter().all(|&byte| byte != 0),
+ "Entire page should be initialized",
+ );
+
+ // Skip a few page
+ let three_pages = 3 * PAGE_SIZE;
+ virt.write_byte(virt.start_ptr().add_bytes(three_pages), 1).unwrap();
+ assert!(
+ virt.mutable.borrow().allocator.memory[..three_pages].iter().all(|&byte| byte != 0),
+ "Gaps between write requests should be filled",
+ );
+ }
+
+ #[test]
+ fn no_redundant_syscalls_when_writing_to_the_same_page() {
+ let virt = new_dummy_virt_mem();
+
+ virt.write_byte(virt.start_ptr(), 1).unwrap();
+ virt.write_byte(virt.start_ptr(), 0).unwrap();
+
+ assert!(
+ matches!(
+ virt.mutable.borrow().allocator.requests[..],
+ [MarkWritable { start_idx: 0, length: PAGE_SIZE }],
+ )
+ );
+ }
+
+ #[test]
+ fn bounds_checking() {
+ use super::WriteError::*;
+ let virt = new_dummy_virt_mem();
+
+ let one_past_end = virt.start_ptr().add_bytes(virt.virtual_region_size());
+ assert_eq!(Err(OutOfBounds), virt.write_byte(one_past_end, 0));
+
+ let end_of_addr_space = CodePtr(u32::MAX);
+ assert_eq!(Err(OutOfBounds), virt.write_byte(end_of_addr_space, 0));
+ }
+
+ #[test]
+ fn only_written_to_regions_become_executable() {
+ // ... so we catch attempts to read/write/execute never-written-to regions
+ const THREE_PAGES: usize = PAGE_SIZE * 3;
+ let virt = new_dummy_virt_mem();
+ let page_two_start = virt.start_ptr().add_bytes(PAGE_SIZE * 2);
+ virt.write_byte(page_two_start, 1).unwrap();
+ virt.mark_all_executable();
+
+ assert!(virt.virtual_region_size() > THREE_PAGES);
+ assert!(
+ matches!(
+ virt.mutable.borrow().allocator.requests[..],
+ [
+ MarkWritable { start_idx: 0, length: THREE_PAGES },
+ MarkExecutable { start_idx: 0, length: THREE_PAGES },
+ ]
+ ),
+ );
+ }
+}
diff --git a/yjit/src/yjit.rs b/yjit/src/yjit.rs
new file mode 100644
index 0000000000..517a0daae5
--- /dev/null
+++ b/yjit/src/yjit.rs
@@ -0,0 +1,277 @@
+use crate::codegen::*;
+use crate::core::*;
+use crate::cruby::*;
+use crate::invariants::*;
+use crate::options::*;
+use crate::stats::YjitExitLocations;
+use crate::stats::incr_counter;
+use crate::stats::with_compile_time;
+
+use std::os::raw::{c_char, c_int};
+use std::time::Instant;
+use crate::log::Log;
+
+/// Is YJIT on? The interpreter uses this variable to decide whether to trigger
+/// compilation. See jit_exec() and jit_compile().
+#[allow(non_upper_case_globals)]
+#[no_mangle]
+pub static mut rb_yjit_enabled_p: bool = false;
+
+// Time when YJIT was yjit was initialized (see yjit_init)
+pub static mut YJIT_INIT_TIME: Option<Instant> = None;
+
+/// Parse one command-line option.
+/// This is called from ruby.c
+#[no_mangle]
+pub extern "C" fn rb_yjit_parse_option(str_ptr: *const c_char) -> bool {
+ return parse_option(str_ptr).is_some();
+}
+
+#[no_mangle]
+pub extern "C" fn rb_yjit_option_disable() -> bool {
+ return get_option!(disable);
+}
+
+/// Like rb_yjit_enabled_p, but for Rust code.
+pub fn yjit_enabled_p() -> bool {
+ unsafe { rb_yjit_enabled_p }
+}
+
+/// This function is called from C code
+#[no_mangle]
+pub extern "C" fn rb_yjit_init(yjit_enabled: bool) {
+ // Register the method codegen functions. This must be done at boot.
+ yjit_reg_method_codegen_fns();
+
+ // If --yjit-disable, yjit_init() will not be called until RubyVM::YJIT.enable.
+ if yjit_enabled {
+ yjit_init();
+ }
+}
+
+/// Initialize and enable YJIT. You should call this at boot or with GVL.
+fn yjit_init() {
+ // TODO: need to make sure that command-line options have been
+ // initialized by CRuby
+
+ // Call YJIT hooks before enabling YJIT to avoid compiling the hooks themselves
+ unsafe {
+ let yjit = rb_const_get(rb_cRubyVM, rust_str_to_id("YJIT"));
+ rb_funcall(yjit, rust_str_to_id("call_jit_hooks"), 0);
+ }
+
+ // Catch panics to avoid UB for unwinding into C frames.
+ // See https://doc.rust-lang.org/nomicon/exception-safety.html
+ let result = std::panic::catch_unwind(|| {
+ Invariants::init();
+ CodegenGlobals::init();
+ YjitExitLocations::init();
+ ids::init();
+
+ rb_bug_panic_hook();
+
+ // YJIT enabled and initialized successfully
+ assert!(unsafe{ !rb_yjit_enabled_p });
+ unsafe { rb_yjit_enabled_p = true; }
+ });
+
+ if let Err(_) = result {
+ println!("YJIT: yjit_init() panicked. Aborting.");
+ std::process::abort();
+ }
+
+ // Make sure --yjit-perf doesn't append symbols to an old file
+ if get_option!(perf_map).is_some() {
+ let perf_map = format!("/tmp/perf-{}.map", std::process::id());
+ let _ = std::fs::remove_file(&perf_map);
+ println!("YJIT perf map: {perf_map}");
+ }
+
+ // Note the time when YJIT was initialized
+ unsafe {
+ YJIT_INIT_TIME = Some(Instant::now());
+ }
+}
+
+#[no_mangle]
+pub extern "C" fn rb_yjit_free_at_exit() {
+ yjit_shutdown_free_codegen_table();
+}
+
+/// At the moment, we abort in all cases we panic.
+/// To aid with getting diagnostics in the wild without requiring
+/// people to set RUST_BACKTRACE=1, register a panic hook that crash using rb_bug().
+/// rb_bug() might not be as good at printing a call trace as Rust's stdlib, but
+/// it dumps some other info that might be relevant.
+///
+/// In case we want to start doing fancier exception handling with panic=unwind,
+/// we can revisit this later. For now, this helps to get us good bug reports.
+fn rb_bug_panic_hook() {
+ use std::env;
+ use std::panic;
+ use std::io::{stderr, Write};
+
+ // Probably the default hook. We do this very early during process boot.
+ let previous_hook = panic::take_hook();
+
+ panic::set_hook(Box::new(move |panic_info| {
+ // Not using `eprintln` to avoid double panic.
+ let _ = stderr().write_all(b"ruby: YJIT has panicked. More info to follow...\n");
+
+ // Always show a Rust backtrace.
+ env::set_var("RUST_BACKTRACE", "1");
+ previous_hook(panic_info);
+
+ // Abort with rb_bug(). It has a length limit on the message.
+ let panic_message = &format!("{}", panic_info)[..];
+ let len = std::cmp::min(0x100, panic_message.len()) as c_int;
+ unsafe { rb_bug(b"YJIT: %*s\0".as_ref().as_ptr() as *const c_char, len, panic_message.as_ptr()); }
+ }));
+}
+
+/// Called from C code to begin compiling a function
+/// NOTE: this should be wrapped in RB_VM_LOCK_ENTER(), rb_vm_barrier() on the C side
+/// If jit_exception is true, compile JIT code for handling exceptions.
+/// See jit_compile_exception() for details.
+#[no_mangle]
+pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr, jit_exception: bool) -> *const u8 {
+ // Don't compile when there is insufficient native stack space
+ if unsafe { rb_ec_stack_check(ec as _) } != 0 {
+ return std::ptr::null();
+ }
+
+ // Reject ISEQs with very large temp stacks,
+ // this will allow us to use u8/i8 values to track stack_size and sp_offset
+ let stack_max = unsafe { rb_get_iseq_body_stack_max(iseq) };
+ if stack_max >= i8::MAX as u32 {
+ incr_counter!(iseq_stack_too_large);
+ return std::ptr::null();
+ }
+
+ // Reject ISEQs that are too long,
+ // this will allow us to use u16 for instruction indices if we want to,
+ // very long ISEQs are also much more likely to be initialization code
+ let iseq_size = unsafe { get_iseq_encoded_size(iseq) };
+ if iseq_size >= u16::MAX as u32 {
+ incr_counter!(iseq_too_long);
+ return std::ptr::null();
+ }
+
+ // If a custom call threshold was not specified on the command-line and
+ // this is a large application (has very many ISEQs), switch to
+ // using the call threshold for large applications after this entry point
+ use crate::stats::rb_yjit_live_iseq_count;
+ if unsafe { rb_yjit_call_threshold } == SMALL_CALL_THRESHOLD && unsafe { rb_yjit_live_iseq_count } > LARGE_ISEQ_COUNT {
+ unsafe { rb_yjit_call_threshold = LARGE_CALL_THRESHOLD; };
+ }
+
+ let maybe_code_ptr = with_compile_time(|| { gen_entry_point(iseq, ec, jit_exception) });
+
+ match maybe_code_ptr {
+ Some(ptr) => ptr,
+ None => std::ptr::null(),
+ }
+}
+
+/// Free and recompile all existing JIT code
+#[no_mangle]
+pub extern "C" fn rb_yjit_code_gc(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ with_vm_lock(src_loc!(), || {
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb();
+ cb.code_gc(ocb);
+ });
+
+ Qnil
+}
+
+/// Enable YJIT compilation, returning true if YJIT was previously disabled
+#[no_mangle]
+pub extern "C" fn rb_yjit_enable(_ec: EcPtr, _ruby_self: VALUE, gen_stats: VALUE, print_stats: VALUE, gen_log: VALUE, print_log: VALUE, mem_size: VALUE, call_threshold: VALUE) -> VALUE {
+ with_vm_lock(src_loc!(), || {
+
+ if !mem_size.nil_p() {
+ let mem_size_mb = mem_size.as_isize() >> 1;
+ let mem_size_bytes = mem_size_mb * 1024 * 1024;
+ unsafe {
+ OPTIONS.mem_size = mem_size_bytes as usize;
+ }
+ }
+
+ if !call_threshold.nil_p() {
+ let threshold = call_threshold.as_isize() >> 1;
+ unsafe {
+ rb_yjit_call_threshold = threshold as u64;
+ }
+ }
+
+ // Initialize and enable YJIT
+ if gen_stats.test() {
+ unsafe {
+ OPTIONS.gen_stats = gen_stats.test();
+ OPTIONS.print_stats = print_stats.test();
+ }
+ }
+
+ if gen_log.test() {
+ unsafe {
+ if print_log.test() {
+ OPTIONS.log = Some(LogOutput::Stderr);
+ } else {
+ OPTIONS.log = Some(LogOutput::MemoryOnly);
+ }
+
+ Log::init();
+ }
+ }
+
+ yjit_init();
+
+ // Add "+YJIT" to RUBY_DESCRIPTION
+ extern "C" {
+ fn ruby_set_yjit_description();
+ }
+ unsafe { ruby_set_yjit_description(); }
+
+ Qtrue
+ })
+}
+
+/// Simulate a situation where we are out of executable memory
+#[no_mangle]
+pub extern "C" fn rb_yjit_simulate_oom_bang(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {
+ // If YJIT is not enabled, do nothing
+ if !yjit_enabled_p() {
+ return Qnil;
+ }
+
+ // Enabled in debug mode only for security
+ if cfg!(debug_assertions) {
+ let cb = CodegenGlobals::get_inline_cb();
+ let ocb = CodegenGlobals::get_outlined_cb().unwrap();
+ cb.set_pos(cb.get_mem_size());
+ ocb.set_pos(ocb.get_mem_size());
+ }
+
+ return Qnil;
+}
+
+/// Push a C method frame if the given PC is supposed to lazily push one.
+/// This is called from rb_raise() (at rb_exc_new_str()) and other functions
+/// that may make a method call (e.g. rb_to_int()).
+#[no_mangle]
+pub extern "C" fn rb_yjit_lazy_push_frame(pc: *mut VALUE) {
+ if !yjit_enabled_p() {
+ return;
+ }
+
+ incr_counter!(num_lazy_frame_check);
+ if let Some(&(cme, recv_idx)) = CodegenGlobals::get_pc_to_cfunc().get(&pc) {
+ incr_counter!(num_lazy_frame_push);
+ unsafe { rb_vm_push_cfunc_frame(cme, recv_idx as i32) }
+ }
+}
c21b345c4e4'>test/rubygems/test_gem_commands_generate_index_command.rb17
-rw-r--r--test/rubygems/test_gem_commands_help_command.rb51
-rw-r--r--test/rubygems/test_gem_commands_info_command.rb31
-rw-r--r--test/rubygems/test_gem_commands_install_command.rb613
-rw-r--r--test/rubygems/test_gem_commands_list_command.rb13
-rw-r--r--test/rubygems/test_gem_commands_lock_command.rb25
-rw-r--r--test/rubygems/test_gem_commands_mirror.rb7
-rw-r--r--test/rubygems/test_gem_commands_open_command.rb30
-rw-r--r--test/rubygems/test_gem_commands_outdated_command.rb31
-rw-r--r--test/rubygems/test_gem_commands_owner_command.rb325
-rw-r--r--test/rubygems/test_gem_commands_pristine_command.rb246
-rw-r--r--test/rubygems/test_gem_commands_push_command.rb328
-rw-r--r--test/rubygems/test_gem_commands_query_command.rb171
-rw-r--r--test/rubygems/test_gem_commands_search_command.rb7
-rw-r--r--test/rubygems/test_gem_commands_server_command.rb57
-rw-r--r--test/rubygems/test_gem_commands_setup_command.rb346
-rw-r--r--test/rubygems/test_gem_commands_signin_command.rb214
-rw-r--r--test/rubygems/test_gem_commands_signout_command.rb14
-rw-r--r--test/rubygems/test_gem_commands_sources_command.rb172
-rw-r--r--test/rubygems/test_gem_commands_specification_command.rb101
-rw-r--r--test/rubygems/test_gem_commands_stale_command.rb11
-rw-r--r--test/rubygems/test_gem_commands_uninstall_command.rb188
-rw-r--r--test/rubygems/test_gem_commands_unpack_command.rb67
-rw-r--r--test/rubygems/test_gem_commands_update_command.rb398
-rw-r--r--test/rubygems/test_gem_commands_which_command.rb21
-rw-r--r--test/rubygems/test_gem_commands_yank_command.rb231
-rw-r--r--test/rubygems/test_gem_config_file.rb199
-rw-r--r--test/rubygems/test_gem_dependency.rb175
-rw-r--r--test/rubygems/test_gem_dependency_installer.rb576
-rw-r--r--test/rubygems/test_gem_dependency_list.rb113
-rw-r--r--test/rubygems/test_gem_dependency_resolution_error.rb11
-rw-r--r--test/rubygems/test_gem_doctor.rb99
-rw-r--r--test/rubygems/test_gem_ext_builder.rb206
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder.rb167
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/custom_name/.gitignore1
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/custom_name/custom_name.gemspec10
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/custom_name/ext/custom_name_lib/Cargo.lock233
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/custom_name/ext/custom_name_lib/Cargo.toml10
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/custom_name/ext/custom_name_lib/src/lib.rs27
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/custom_name/lib/custom_name.rb3
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/rust_ruby_example/.gitignore1
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/rust_ruby_example/Cargo.lock247
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/rust_ruby_example/Cargo.toml10
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/rust_ruby_example/rust_ruby_example.gemspec10
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder/rust_ruby_example/src/lib.rs51
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder_link_flag_converter.rb34
-rw-r--r--test/rubygems/test_gem_ext_cargo_builder_unit.rb60
-rw-r--r--test/rubygems/test_gem_ext_cmake_builder.rb59
-rw-r--r--test/rubygems/test_gem_ext_configure_builder.rb53
-rw-r--r--test/rubygems/test_gem_ext_ext_conf_builder.rb148
-rw-r--r--test/rubygems/test_gem_ext_rake_builder.rb64
-rw-r--r--test/rubygems/test_gem_gem_runner.rb37
-rw-r--r--test/rubygems/test_gem_gemcutter_utilities.rb278
-rw-r--r--test/rubygems/test_gem_impossible_dependencies_error.rb11
-rw-r--r--test/rubygems/test_gem_indexer.rb160
-rw-r--r--test/rubygems/test_gem_install_update_options.rb53
-rw-r--r--test/rubygems/test_gem_installer.rb1266
-rw-r--r--test/rubygems/test_gem_local_remote_options.rb25
-rw-r--r--test/rubygems/test_gem_name_tuple.rb11
-rw-r--r--test/rubygems/test_gem_package.rb629
-rw-r--r--test/rubygems/test_gem_package_old.rb39
-rw-r--r--test/rubygems/test_gem_package_tar_header.rb107
-rw-r--r--test/rubygems/test_gem_package_tar_reader.rb64
-rw-r--r--test/rubygems/test_gem_package_tar_reader_entry.rb194
-rw-r--r--test/rubygems/test_gem_package_tar_writer.rb197
-rw-r--r--test/rubygems/test_gem_package_task.rb78
-rw-r--r--test/rubygems/test_gem_path_support.rb37
-rw-r--r--test/rubygems/test_gem_platform.rb590
-rw-r--r--test/rubygems/test_gem_rdoc.rb41
-rw-r--r--test/rubygems/test_gem_remote_fetcher.rb429
-rw-r--r--test/rubygems/test_gem_request.rb188
-rw-r--r--test/rubygems/test_gem_request_connection_pools.rb65
-rw-r--r--test/rubygems/test_gem_request_set.rb240
-rw-r--r--test/rubygems/test_gem_request_set_gem_dependency_api.rb457
-rw-r--r--test/rubygems/test_gem_request_set_lockfile.rb189
-rw-r--r--test/rubygems/test_gem_request_set_lockfile_parser.rb141
-rw-r--r--test/rubygems/test_gem_request_set_lockfile_tokenizer.rb135
-rw-r--r--test/rubygems/test_gem_requirement.rb188
-rw-r--r--test/rubygems/test_gem_resolver.rb298
-rw-r--r--test/rubygems/test_gem_resolver_activation_request.rb15
-rw-r--r--test/rubygems/test_gem_resolver_api_set.rb156
-rw-r--r--test/rubygems/test_gem_resolver_api_specification.rb97
-rw-r--r--test/rubygems/test_gem_resolver_best_set.rb66
-rw-r--r--test/rubygems/test_gem_resolver_composed_set.rb5
-rw-r--r--test/rubygems/test_gem_resolver_conflict.rb27
-rw-r--r--test/rubygems/test_gem_resolver_dependency_request.rb33
-rw-r--r--test/rubygems/test_gem_resolver_git_set.rb47
-rw-r--r--test/rubygems/test_gem_resolver_git_specification.rb46
-rw-r--r--test/rubygems/test_gem_resolver_index_set.rb31
-rw-r--r--test/rubygems/test_gem_resolver_index_specification.rb35
-rw-r--r--test/rubygems/test_gem_resolver_installed_specification.rb13
-rw-r--r--test/rubygems/test_gem_resolver_installer_set.rb149
-rw-r--r--test/rubygems/test_gem_resolver_local_specification.rb17
-rw-r--r--test/rubygems/test_gem_resolver_lock_set.rb33
-rw-r--r--test/rubygems/test_gem_resolver_lock_specification.rb37
-rw-r--r--test/rubygems/test_gem_resolver_requirement_list.rb5
-rw-r--r--test/rubygems/test_gem_resolver_specification.rb21
-rw-r--r--test/rubygems/test_gem_resolver_vendor_set.rb21
-rw-r--r--test/rubygems/test_gem_resolver_vendor_specification.rb23
-rw-r--r--test/rubygems/test_gem_security.rb185
-rw-r--r--test/rubygems/test_gem_security_policy.rb209
-rw-r--r--test/rubygems/test_gem_security_signer.rb103
-rw-r--r--test/rubygems/test_gem_security_trust_dir.rb31
-rw-r--r--test/rubygems/test_gem_server.rb612
-rw-r--r--test/rubygems/test_gem_silent_ui.rb89
-rw-r--r--test/rubygems/test_gem_source.rb106
-rw-r--r--test/rubygems/test_gem_source_fetch_problem.rb25
-rw-r--r--test/rubygems/test_gem_source_git.rb151
-rw-r--r--test/rubygems/test_gem_source_installed.rb35
-rw-r--r--test/rubygems/test_gem_source_list.rb12
-rw-r--r--test/rubygems/test_gem_source_local.rb33
-rw-r--r--test/rubygems/test_gem_source_lock.rb65
-rw-r--r--test/rubygems/test_gem_source_specific_file.rb39
-rw-r--r--test/rubygems/test_gem_source_subpath_problem.rb50
-rw-r--r--test/rubygems/test_gem_source_vendor.rb29
-rw-r--r--test/rubygems/test_gem_spec_fetcher.rb149
-rw-r--r--test/rubygems/test_gem_specification.rb1922
-rw-r--r--test/rubygems/test_gem_stream_ui.rb77
-rw-r--r--test/rubygems/test_gem_stub_specification.rb97
-rw-r--r--test/rubygems/test_gem_text.rb11
-rw-r--r--test/rubygems/test_gem_uninstaller.rb292
-rw-r--r--test/rubygems/test_gem_unsatisfiable_dependency_error.rb9
-rw-r--r--test/rubygems/test_gem_update_suggestion.rb209
-rw-r--r--test/rubygems/test_gem_uri.rb41
-rw-r--r--test/rubygems/test_gem_uri_formatter.rb31
-rw-r--r--test/rubygems/test_gem_util.rb73
-rw-r--r--test/rubygems/test_gem_validator.rb22
-rw-r--r--test/rubygems/test_gem_version.rb63
-rw-r--r--test/rubygems/test_gem_version_option.rb35
-rw-r--r--test/rubygems/test_kernel.rb111
-rw-r--r--test/rubygems/test_project_sanity.rb40
-rw-r--r--test/rubygems/test_remote_fetch_error.rb17
-rw-r--r--test/rubygems/test_require.rb428
-rw-r--r--test/rubygems/test_rubygems.rb76
-rw-r--r--test/rubygems/test_webauthn_listener.rb143
-rw-r--r--test/rubygems/test_webauthn_listener_response.rb93
-rw-r--r--test/rubygems/test_webauthn_poller.rb124
-rw-r--r--test/rubygems/utilities.rb435
-rw-r--r--test/rubygems/wrong_key_cert.pem30
-rw-r--r--test/rubygems/wrong_key_cert_32.pem30
-rw-r--r--test/runner.rb15
-rw-r--r--test/sdbm/test_sdbm.rb544
-rw-r--r--test/socket/test_addrinfo.rb18
-rw-r--r--test/socket/test_basicsocket.rb4
-rw-r--r--test/socket/test_nonblock.rb12
-rw-r--r--test/socket/test_socket.rb23
-rw-r--r--test/socket/test_sockopt.rb2
-rw-r--r--test/socket/test_tcp.rb45
-rw-r--r--test/socket/test_udp.rb2
-rw-r--r--test/socket/test_unix.rb157
-rw-r--r--test/stringio/test_ractor.rb23
-rw-r--r--test/stringio/test_stringio.rb146
-rw-r--r--test/strscan/test_ractor.rb28
-rw-r--r--test/strscan/test_stringscanner.rb80
-rw-r--r--test/test_delegate.rb34
-rw-r--r--test/test_extlibs.rb4
-rw-r--r--test/test_find.rb16
-rw-r--r--test/test_getoptlong.rb163
-rw-r--r--test/test_ipaddr.rb86
-rw-r--r--test/test_mutex_m.rb32
-rw-r--r--test/test_open3.rb24
-rw-r--r--test/test_pp.rb21
-rw-r--r--test/test_prime.rb288
-rw-r--r--test/test_pstore.rb36
-rw-r--r--test/test_pty.rb30
-rw-r--r--test/test_rbconfig.rb9
-rw-r--r--test/test_securerandom.rb121
-rw-r--r--test/test_set.rb186
-rw-r--r--test/test_sorted_set.rb45
-rw-r--r--test/test_time.rb18
-rw-r--r--test/test_timeout.rb65
-rw-r--r--test/test_tmpdir.rb70
-rw-r--r--test/test_tracer.rb234
-rw-r--r--test/test_trick.rb214
-rw-r--r--test/test_win32api.rb27
-rw-r--r--test/uri/test_common.rb119
-rw-r--r--test/uri/test_generic.rb38
-rw-r--r--test/uri/test_http.rb31
-rw-r--r--test/uri/test_ldap.rb6
-rw-r--r--test/uri/test_parser.rb43
-rw-r--r--test/uri/test_wss.rb71
-rw-r--r--test/webrick/test_cgi.rb170
-rw-r--r--test/webrick/test_filehandler.rb355
-rw-r--r--test/webrick/test_httpproxy.rb466
-rw-r--r--test/webrick/test_httprequest.rb476
-rw-r--r--test/webrick/test_httpresponse.rb282
-rw-r--r--test/webrick/test_httpserver.rb543
-rw-r--r--test/webrick/test_server.rb163
-rw-r--r--test/webrick/utils.rb82
-rw-r--r--test/win32ole/test_err_in_callback.rb2
-rw-r--r--test/win32ole/test_win32ole.rb2
-rw-r--r--test/win32ole/test_win32ole_event.rb5
-rw-r--r--test/win32ole/test_win32ole_method_event.rb2
-rw-r--r--test/win32ole/test_win32ole_param_event.rb2
-rw-r--r--test/win32ole/test_win32ole_record.rb2
-rw-r--r--test/win32ole/test_win32ole_type_event.rb2
-rw-r--r--test/win32ole/test_win32ole_variable.rb4
-rw-r--r--test/win32ole/test_win32ole_variant.rb2
-rw-r--r--test/win32ole/test_win32ole_variant_outarg.rb2
-rw-r--r--test/win32ole/test_word.rb2
-rw-r--r--test/yaml/test_store.rb6
-rw-r--r--test/zlib/test_zlib.rb235
-rw-r--r--thread.c2951
-rw-r--r--thread_none.c284
-rw-r--r--thread_none.h20
-rw-r--r--thread_pthread.c1126
-rw-r--r--thread_pthread.h91
-rw-r--r--thread_sync.c923
-rw-r--r--thread_sync.rb68
-rw-r--r--thread_win32.c502
-rw-r--r--thread_win32.h40
-rw-r--r--time.c3072
-rw-r--r--timev.h11
-rw-r--r--timev.rb404
-rwxr-xr-xtool/actions-commit-info.sh17
-rw-r--r--tool/annocheck/Dockerfile4
-rw-r--r--tool/annocheck/Dockerfile-copy7
-rwxr-xr-xtool/bisect.sh17
-rw-r--r--tool/bundler/dev_gems.rb19
-rw-r--r--tool/bundler/dev_gems.rb.lock57
-rw-r--r--tool/bundler/rubocop_gems.rb12
-rw-r--r--tool/bundler/rubocop_gems.rb.lock73
-rw-r--r--tool/bundler/standard_gems.rb12
-rw-r--r--tool/bundler/standard_gems.rb.lock81
-rw-r--r--tool/bundler/test_gems.rb7
-rw-r--r--tool/bundler/test_gems.rb.lock27
-rwxr-xr-xtool/checksum.rb4
-rw-r--r--tool/ci_functions.sh29
-rwxr-xr-xtool/disable_ipv6.sh9
-rw-r--r--tool/downloader.rb119
-rwxr-xr-xtool/enc-case-folding.rb416
-rw-r--r--tool/enc-emoji-citrus-gen.rb4
-rwxr-xr-xtool/enc-unicode.rb35
-rwxr-xr-xtool/expand-config.rb14
-rwxr-xr-xtool/extlibs.rb178
-rw-r--r--tool/fake.rb13
-rwxr-xr-xtool/fetch-bundled_gems.rb18
-rwxr-xr-xtool/file2lastrev.rb91
-rwxr-xr-xtool/format-release39
-rw-r--r--tool/gem-unpack.rb18
-rwxr-xr-xtool/gen-mailmap.rb4
-rw-r--r--tool/generic_erb.rb44
-rw-r--r--tool/gperf.sed1
-rwxr-xr-xtool/id2token.rb11
-rwxr-xr-xtool/ifchange29
-rwxr-xr-xtool/intern_ids.rb35
-rwxr-xr-xtool/leaked-globals31
-rw-r--r--tool/lib/-test-/integer.rb4
-rw-r--r--tool/lib/bundled_gem.rb68
-rw-r--r--tool/lib/colorize.rb16
-rw-r--r--tool/lib/core_assertions.rb821
-rw-r--r--tool/lib/envutil.rb54
-rw-r--r--tool/lib/gc_checker.rb36
-rw-r--r--tool/lib/gc_compact_checker.rb10
-rw-r--r--tool/lib/leakchecker.rb36
-rw-r--r--tool/lib/memory_status.rb6
-rw-r--r--tool/lib/minitest/README.txt457
-rw-r--r--tool/lib/minitest/autorun.rb14
-rw-r--r--tool/lib/minitest/benchmark.rb418
-rw-r--r--tool/lib/minitest/mock.rb196
-rw-r--r--tool/lib/minitest/unit.rb1463
-rw-r--r--tool/lib/output.rb57
-rw-r--r--tool/lib/profile_test_all.rb2
-rw-r--r--tool/lib/test/unit.rb824
-rw-r--r--tool/lib/test/unit/assertions.rb679
-rw-r--r--tool/lib/test/unit/core_assertions.rb597
-rw-r--r--tool/lib/test/unit/parallel.rb48
-rw-r--r--tool/lib/test/unit/testcase.rb282
-rw-r--r--tool/lib/vcs.rb354
-rw-r--r--tool/lib/vpath.rb7
-rw-r--r--tool/lib/webrick.rb232
-rw-r--r--tool/lib/webrick/.document (renamed from lib/webrick/.document)0
-rw-r--r--tool/lib/webrick/accesslog.rb (renamed from lib/webrick/accesslog.rb)0
-rw-r--r--tool/lib/webrick/cgi.rb (renamed from lib/webrick/cgi.rb)0
-rw-r--r--tool/lib/webrick/compat.rb (renamed from lib/webrick/compat.rb)0
-rw-r--r--tool/lib/webrick/config.rb (renamed from lib/webrick/config.rb)0
-rw-r--r--tool/lib/webrick/cookie.rb (renamed from lib/webrick/cookie.rb)0
-rw-r--r--tool/lib/webrick/htmlutils.rb (renamed from lib/webrick/htmlutils.rb)0
-rw-r--r--tool/lib/webrick/httpauth.rb (renamed from lib/webrick/httpauth.rb)0
-rw-r--r--tool/lib/webrick/httpauth/authenticator.rb117
-rw-r--r--tool/lib/webrick/httpauth/basicauth.rb (renamed from lib/webrick/httpauth/basicauth.rb)0
-rw-r--r--tool/lib/webrick/httpauth/digestauth.rb (renamed from lib/webrick/httpauth/digestauth.rb)0
-rw-r--r--tool/lib/webrick/httpauth/htdigest.rb (renamed from lib/webrick/httpauth/htdigest.rb)0
-rw-r--r--tool/lib/webrick/httpauth/htgroup.rb (renamed from lib/webrick/httpauth/htgroup.rb)0
-rw-r--r--tool/lib/webrick/httpauth/htpasswd.rb (renamed from lib/webrick/httpauth/htpasswd.rb)0
-rw-r--r--tool/lib/webrick/httpauth/userdb.rb (renamed from lib/webrick/httpauth/userdb.rb)0
-rw-r--r--tool/lib/webrick/httpproxy.rb354
-rw-r--r--tool/lib/webrick/httprequest.rb636
-rw-r--r--tool/lib/webrick/httpresponse.rb (renamed from lib/webrick/httpresponse.rb)0
-rw-r--r--tool/lib/webrick/https.rb (renamed from lib/webrick/https.rb)0
-rw-r--r--tool/lib/webrick/httpserver.rb293
-rw-r--r--tool/lib/webrick/httpservlet.rb (renamed from lib/webrick/httpservlet.rb)0
-rw-r--r--tool/lib/webrick/httpservlet/abstract.rb (renamed from lib/webrick/httpservlet/abstract.rb)0
-rw-r--r--tool/lib/webrick/httpservlet/cgi_runner.rb (renamed from lib/webrick/httpservlet/cgi_runner.rb)0
-rw-r--r--tool/lib/webrick/httpservlet/cgihandler.rb (renamed from lib/webrick/httpservlet/cgihandler.rb)0
-rw-r--r--tool/lib/webrick/httpservlet/erbhandler.rb (renamed from lib/webrick/httpservlet/erbhandler.rb)0
-rw-r--r--tool/lib/webrick/httpservlet/filehandler.rb552
-rw-r--r--tool/lib/webrick/httpservlet/prochandler.rb (renamed from lib/webrick/httpservlet/prochandler.rb)0
-rw-r--r--tool/lib/webrick/httpstatus.rb (renamed from lib/webrick/httpstatus.rb)0
-rw-r--r--tool/lib/webrick/httputils.rb512
-rw-r--r--tool/lib/webrick/httpversion.rb (renamed from lib/webrick/httpversion.rb)0
-rw-r--r--tool/lib/webrick/log.rb (renamed from lib/webrick/log.rb)0
-rw-r--r--tool/lib/webrick/server.rb381
-rw-r--r--tool/lib/webrick/ssl.rb215
-rw-r--r--tool/lib/webrick/utils.rb265
-rw-r--r--tool/lib/webrick/version.rb18
-rwxr-xr-xtool/ln_sr.rb131
-rw-r--r--tool/m4/_colorize_result_prepare.m43
-rw-r--r--tool/m4/ac_msg_result.m42
-rw-r--r--tool/m4/colorize_result.m42
-rw-r--r--tool/m4/ruby_append_option.m42
-rw-r--r--tool/m4/ruby_append_options.m42
-rw-r--r--tool/m4/ruby_check_builtin_func.m42
-rw-r--r--tool/m4/ruby_check_builtin_setjmp.m410
-rw-r--r--tool/m4/ruby_check_printf_prefix.m411
-rw-r--r--tool/m4/ruby_check_setjmp.m410
-rw-r--r--tool/m4/ruby_check_signedness.m42
-rw-r--r--tool/m4/ruby_check_sizeof.m42
-rw-r--r--tool/m4/ruby_check_sysconf.m48
-rw-r--r--tool/m4/ruby_cppoutfile.m46
-rw-r--r--tool/m4/ruby_decl_attribute.m46
-rw-r--r--tool/m4/ruby_default_arch.m47
-rw-r--r--tool/m4/ruby_define_if.m412
-rw-r--r--tool/m4/ruby_defint.m42
-rw-r--r--tool/m4/ruby_dtrace_available.m44
-rw-r--r--tool/m4/ruby_dtrace_postprocess.m44
-rw-r--r--tool/m4/ruby_func_attribute.m42
-rw-r--r--tool/m4/ruby_mingw32.m46
-rw-r--r--tool/m4/ruby_prepend_option.m42
-rw-r--r--tool/m4/ruby_prog_gnu_ld.m42
-rw-r--r--tool/m4/ruby_prog_makedirs.m49
-rw-r--r--tool/m4/ruby_replace_funcs.m412
-rw-r--r--tool/m4/ruby_replace_type.m414
-rw-r--r--tool/m4/ruby_require_funcs.m413
-rw-r--r--tool/m4/ruby_rm_recursive.m46
-rw-r--r--tool/m4/ruby_setjmp_type.m42
-rw-r--r--tool/m4/ruby_stack_grow_direction.m46
-rw-r--r--tool/m4/ruby_thread.m480
-rw-r--r--tool/m4/ruby_try_cflags.m411
-rw-r--r--tool/m4/ruby_try_cxxflags.m44
-rw-r--r--tool/m4/ruby_try_ldflags.m44
-rw-r--r--tool/m4/ruby_type_attribute.m42
-rw-r--r--tool/m4/ruby_universal_arch.m439
-rw-r--r--tool/m4/ruby_wasm_tools.m424
-rw-r--r--tool/m4/ruby_werror_flag.m42
-rwxr-xr-xtool/make-snapshot68
-rw-r--r--tool/make_hgraph.rb7
-rwxr-xr-xtool/merger.rb192
-rwxr-xr-xtool/mjit/bindgen.rb435
-rw-r--r--tool/mjit_tabs.rb8
-rw-r--r--tool/mk_builtin_loader.rb257
-rwxr-xr-xtool/mkconfig.rb33
-rwxr-xr-xtool/mkrunnable.rb28
-rwxr-xr-xtool/outdate-bundled-gems.rb135
-rwxr-xr-xtool/pure_parser.rb24
-rwxr-xr-xtool/rbinstall.rb459
-rw-r--r--tool/rbs_skip_tests11
-rwxr-xr-xtool/redmine-backporter.rb171
-rwxr-xr-xtool/releng/gen-mail.rb55
-rwxr-xr-xtool/releng/gen-release-note.rb36
-rwxr-xr-xtool/releng/update-www-meta.rb213
-rw-r--r--tool/ruby_vm/controllers/application_controller.rb5
-rw-r--r--tool/ruby_vm/helpers/c_escape.rb6
-rw-r--r--tool/ruby_vm/helpers/dumper.rb7
-rw-r--r--tool/ruby_vm/loaders/insns_def.rb8
-rwxr-xr-xtool/ruby_vm/models/bare_instructions.rb4
-rw-r--r--tool/ruby_vm/models/typemap.rb2
-rw-r--r--tool/ruby_vm/scripts/insns2vm.rb12
-rw-r--r--tool/ruby_vm/views/_insn_entry.erb6
-rw-r--r--tool/ruby_vm/views/_insn_len_info.erb23
-rw-r--r--tool/ruby_vm/views/_insn_name_info.erb30
-rw-r--r--tool/ruby_vm/views/_insn_operand_info.erb29
-rw-r--r--tool/ruby_vm/views/_insn_type_chars.erb19
-rw-r--r--tool/ruby_vm/views/_leaf_helpers.erb62
-rw-r--r--tool/ruby_vm/views/_mjit_compile_insn.erb91
-rw-r--r--tool/ruby_vm/views/_mjit_compile_insn_body.erb115
-rw-r--r--tool/ruby_vm/views/_mjit_compile_ivar.erb81
-rw-r--r--tool/ruby_vm/views/_mjit_compile_pc_and_sp.erb37
-rw-r--r--tool/ruby_vm/views/_mjit_compile_send.erb112
-rw-r--r--tool/ruby_vm/views/_trace_instruction.erb9
-rw-r--r--tool/ruby_vm/views/lib/ruby_vm/mjit/instruction.rb.erb40
-rw-r--r--tool/ruby_vm/views/mjit_compile.inc.erb101
-rw-r--r--tool/ruby_vm/views/mjit_sp_inc.inc.erb17
-rw-r--r--tool/ruby_vm/views/optinsn.inc.erb4
-rwxr-xr-xtool/runruby.rb25
-rw-r--r--tool/search-cgvars.rb55
-rwxr-xr-xtool/strip-rdoc.rb30
-rwxr-xr-x[-rw-r--r--]tool/sync_default_gems.rb1144
-rwxr-xr-xtool/test-annocheck.sh33
-rw-r--r--tool/test-bundled-gems.rb82
-rw-r--r--tool/test-coverage.rb2
-rw-r--r--tool/test/minitest/metametameta.rb71
-rw-r--r--tool/test/minitest/test_minitest_benchmark.rb131
-rw-r--r--tool/test/minitest/test_minitest_mock.rb404
-rw-r--r--tool/test/minitest/test_minitest_unit.rb1793
-rw-r--r--tool/test/runner.rb2
-rwxr-xr-xtool/test/test_sync_default_gems.rb76
-rw-r--r--tool/test/testunit/metametameta.rb70
-rw-r--r--tool/test/testunit/test4test_hideskip.rb8
-rw-r--r--tool/test/testunit/test4test_sorting.rb2
-rw-r--r--tool/test/testunit/test_assertion.rb26
-rw-r--r--tool/test/testunit/test_hideskip.rb6
-rw-r--r--tool/test/testunit/test_minitest_unit.rb1488
-rw-r--r--tool/test/testunit/test_parallel.rb33
-rw-r--r--tool/test/testunit/test_redefinition.rb13
-rw-r--r--tool/test/testunit/test_sorting.rb57
-rw-r--r--tool/test/testunit/tests_for_parallel/test4test_hungup.rb15
-rw-r--r--tool/test/webrick/.htaccess (renamed from test/webrick/.htaccess)0
-rw-r--r--tool/test/webrick/test_cgi.rb170
-rw-r--r--tool/test/webrick/test_config.rb (renamed from test/webrick/test_config.rb)0
-rw-r--r--tool/test/webrick/test_cookie.rb (renamed from test/webrick/test_cookie.rb)0
-rw-r--r--tool/test/webrick/test_do_not_reverse_lookup.rb (renamed from test/webrick/test_do_not_reverse_lookup.rb)0
-rw-r--r--tool/test/webrick/test_filehandler.rb403
-rw-r--r--tool/test/webrick/test_htgroup.rb (renamed from test/webrick/test_htgroup.rb)0
-rw-r--r--tool/test/webrick/test_htmlutils.rb (renamed from test/webrick/test_htmlutils.rb)0
-rw-r--r--tool/test/webrick/test_httpauth.rb (renamed from test/webrick/test_httpauth.rb)0
-rw-r--r--tool/test/webrick/test_httpproxy.rb467
-rw-r--r--tool/test/webrick/test_httprequest.rb488
-rw-r--r--tool/test/webrick/test_httpresponse.rb282
-rw-r--r--tool/test/webrick/test_https.rb (renamed from test/webrick/test_https.rb)0
-rw-r--r--tool/test/webrick/test_httpserver.rb543
-rw-r--r--tool/test/webrick/test_httpstatus.rb (renamed from test/webrick/test_httpstatus.rb)0
-rw-r--r--tool/test/webrick/test_httputils.rb (renamed from test/webrick/test_httputils.rb)0
-rw-r--r--tool/test/webrick/test_httpversion.rb (renamed from test/webrick/test_httpversion.rb)0
-rw-r--r--tool/test/webrick/test_server.rb191
-rw-r--r--tool/test/webrick/test_ssl_server.rb (renamed from test/webrick/test_ssl_server.rb)0
-rw-r--r--tool/test/webrick/test_utils.rb (renamed from test/webrick/test_utils.rb)0
-rw-r--r--tool/test/webrick/utils.rb84
-rw-r--r--tool/test/webrick/webrick.cgi (renamed from test/webrick/webrick.cgi)0
-rw-r--r--tool/test/webrick/webrick.rhtml (renamed from test/webrick/webrick.rhtml)0
-rw-r--r--tool/test/webrick/webrick_long_filename.cgi (renamed from test/webrick/webrick_long_filename.cgi)0
-rw-r--r--tool/transcode-tblgen.rb24
-rw-r--r--tool/transform_mjit_header.rb22
-rwxr-xr-xtool/travis_wait.sh18
-rw-r--r--tool/update-NEWS-refs.rb37
-rwxr-xr-xtool/update-bundled_gems.rb23
-rwxr-xr-xtool/update-deps24
-rwxr-xr-xtool/wasm-clangw9
-rw-r--r--trace_point.rb142
-rw-r--r--transcode.c803
-rw-r--r--transcode_data.h32
-rw-r--r--transient_heap.c169
-rw-r--r--transient_heap.h4
-rw-r--r--util.c196
-rw-r--r--variable.c3023
-rw-r--r--variable.h12
-rw-r--r--version.c67
-rw-r--r--version.h64
-rw-r--r--vm.c2466
-rw-r--r--vm_args.c645
-rw-r--r--vm_backtrace.c1150
-rw-r--r--vm_callinfo.h243
-rw-r--r--vm_core.h834
-rw-r--r--vm_debug.h93
-rw-r--r--vm_dump.c1090
-rw-r--r--vm_eval.c1386
-rw-r--r--vm_exec.c50
-rw-r--r--vm_exec.h28
-rw-r--r--vm_insnhelper.c4947
-rw-r--r--vm_insnhelper.h54
-rw-r--r--vm_method.c1691
-rw-r--r--vm_opts.h4
-rw-r--r--vm_sync.c301
-rw-r--r--vm_sync.h137
-rw-r--r--vm_trace.c708
-rw-r--r--vsnprintf.c23
-rw-r--r--warning.rb14
-rw-r--r--wasm/GNUmakefile.in32
-rw-r--r--wasm/README.md70
-rw-r--r--wasm/asyncify.h23
-rw-r--r--wasm/fiber.c83
-rw-r--r--wasm/fiber.h43
-rw-r--r--wasm/machine.c62
-rw-r--r--wasm/machine.h25
-rw-r--r--wasm/machine_core.S25
-rw-r--r--wasm/missing.c199
-rw-r--r--wasm/runtime.c47
-rw-r--r--wasm/setjmp.c204
-rw-r--r--wasm/setjmp.h95
-rw-r--r--wasm/setjmp_core.S27
-rw-r--r--wasm/tests/fiber_test.c66
-rw-r--r--wasm/tests/machine_test.c115
-rw-r--r--wasm/tests/setjmp_test.c108
-rwxr-xr-xwasm/wasm-opt36
-rw-r--r--win32/Makefile.sub283
-rwxr-xr-xwin32/configure.bat51
-rw-r--r--win32/dir.h8
-rw-r--r--win32/file.c621
-rw-r--r--win32/file.h38
-rwxr-xr-xwin32/ifchange.bat31
-rwxr-xr-xwin32/mkexports.rb16
-rwxr-xr-xwin32/resource.rb4
-rw-r--r--win32/ruby.manifest8
-rw-r--r--win32/setup.mak107
-rw-r--r--win32/win32.c6147
-rw-r--r--win32/winmain.c4
-rw-r--r--yjit.c1109
-rw-r--r--yjit.h70
-rw-r--r--yjit.rb356
-rw-r--r--yjit/.gitignore2
-rw-r--r--yjit/Cargo.lock49
-rw-r--r--yjit/Cargo.toml47
-rw-r--r--yjit/bindgen/Cargo.lock311
-rw-r--r--yjit/bindgen/Cargo.toml10
-rw-r--r--yjit/bindgen/src/main.rs430
-rw-r--r--yjit/not_gmake.mk14
-rw-r--r--yjit/src/asm/arm64/README.md16
-rw-r--r--yjit/src/asm/arm64/arg/bitmask_imm.rs255
-rw-r--r--yjit/src/asm/arm64/arg/condition.rs52
-rw-r--r--yjit/src/asm/arm64/arg/inst_offset.rs47
-rw-r--r--yjit/src/asm/arm64/arg/mod.rs18
-rw-r--r--yjit/src/asm/arm64/arg/sf.rs19
-rw-r--r--yjit/src/asm/arm64/arg/shifted_imm.rs81
-rw-r--r--yjit/src/asm/arm64/arg/sys_reg.rs6
-rw-r--r--yjit/src/asm/arm64/arg/truncate.rs66
-rw-r--r--yjit/src/asm/arm64/inst/atomic.rs86
-rw-r--r--yjit/src/asm/arm64/inst/branch.rs100
-rw-r--r--yjit/src/asm/arm64/inst/branch_cond.rs78
-rw-r--r--yjit/src/asm/arm64/inst/breakpoint.rs55
-rw-r--r--yjit/src/asm/arm64/inst/call.rs104
-rw-r--r--yjit/src/asm/arm64/inst/conditional.rs73
-rw-r--r--yjit/src/asm/arm64/inst/data_imm.rs143
-rw-r--r--yjit/src/asm/arm64/inst/data_reg.rs192
-rw-r--r--yjit/src/asm/arm64/inst/halfword_imm.rs179
-rw-r--r--yjit/src/asm/arm64/inst/load_literal.rs89
-rw-r--r--yjit/src/asm/arm64/inst/load_register.rs108
-rw-r--r--yjit/src/asm/arm64/inst/load_store.rs249
-rw-r--r--yjit/src/asm/arm64/inst/load_store_exclusive.rs109
-rw-r--r--yjit/src/asm/arm64/inst/logical_imm.rs154
-rw-r--r--yjit/src/asm/arm64/inst/logical_reg.rs207
-rw-r--r--yjit/src/asm/arm64/inst/mod.rs50
-rw-r--r--yjit/src/asm/arm64/inst/mov.rs155
-rw-r--r--yjit/src/asm/arm64/inst/nop.rs44
-rw-r--r--yjit/src/asm/arm64/inst/pc_rel.rs107
-rw-r--r--yjit/src/asm/arm64/inst/reg_pair.rs212
-rw-r--r--yjit/src/asm/arm64/inst/sbfm.rs103
-rw-r--r--yjit/src/asm/arm64/inst/shift_imm.rs147
-rw-r--r--yjit/src/asm/arm64/inst/sys_reg.rs86
-rw-r--r--yjit/src/asm/arm64/inst/test_bit.rs133
-rw-r--r--yjit/src/asm/arm64/mod.rs1580
-rw-r--r--yjit/src/asm/arm64/opnd.rs195
-rw-r--r--yjit/src/asm/mod.rs792
-rw-r--r--yjit/src/asm/x86_64/mod.rs1415
-rw-r--r--yjit/src/asm/x86_64/tests.rs449
-rw-r--r--yjit/src/backend/arm64/mod.rs1491
-rw-r--r--yjit/src/backend/ir.rs1576
-rw-r--r--yjit/src/backend/mod.rs8
-rw-r--r--yjit/src/backend/tests.rs331
-rw-r--r--yjit/src/backend/x86_64/mod.rs895
-rw-r--r--yjit/src/codegen.rs7721
-rw-r--r--yjit/src/core.rs2400
-rw-r--r--yjit/src/cruby.rs715
-rw-r--r--yjit/src/cruby_bindings.inc.rs1310
-rw-r--r--yjit/src/disasm.rs269
-rw-r--r--yjit/src/invariants.rs567
-rw-r--r--yjit/src/lib.rs18
-rw-r--r--yjit/src/options.rs174
-rw-r--r--yjit/src/stats.rs640
-rw-r--r--yjit/src/utils.rs274
-rw-r--r--yjit/src/virtualmem.rs443
-rw-r--r--yjit/src/yjit.rs136
-rw-r--r--yjit/yjit.mk69
5513 files changed, 906225 insertions, 296133 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
new file mode 100644
index 0000000000..05ff204541
--- /dev/null
+++ b/.appveyor.yml
@@ -0,0 +1,134 @@
+---
+version: '{build}'
+init:
+ - git config --global user.name git
+ - git config --global user.email svn-admin@ruby-lang.org
+ - git config --global core.autocrlf false
+ - git config --global core.eol lf
+ - git config --global advice.detachedHead 0
+shallow_clone: true
+clone_depth: 10
+platform:
+ - x64
+skip_commits:
+ message: /\[DOC\]/
+ files:
+ - doc/*
+ - '**/*.md'
+ - '**/*.rdoc'
+ - '**/.document'
+ - '**/*.[1-8]'
+ - '**/*.ronn'
+environment:
+ ruby_version: "24-%Platform%"
+ matrix:
+ # Test only the oldest supported version because AppVeyor is unstable, its concurrency
+ # is limited, and compatibility issues that happen only in newer versions are rare.
+ # You may test some other stuff on GitHub Actions instead.
+ - build: vs
+ vs: 120 # Visual Studio 2013
+ ssl: OpenSSL-v111
+ # The worker image name. This is NOT the Visual Studio version we're using here.
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+ GEMS_FOR_TEST: ""
+ RELINE_TEST_ENCODING: "UTF-8"
+cache:
+ - c:\Tools\vcpkg\installed\
+for:
+-
+ matrix:
+ only:
+ - build: vs
+ install:
+ - ver
+ - chcp
+ - SET BITS=%Platform:x86=32%
+ - SET BITS=%BITS:x=%
+ - SET OPENSSL_DIR=C:\%ssl%-Win%BITS%
+ - cd C:\Tools\vcpkg
+ - git pull -q
+ - .\bootstrap-vcpkg.bat
+ - ps: Start-FileDownload 'https://github.com/microsoft/vcpkg-tool/releases/download/2023-08-09/vcpkg.exe' -FileName 'C:\Tools\vcpkg\vcpkg.exe'
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - vcpkg --triplet %Platform%-windows install --x-use-aria2 libffi libyaml readline zlib
+ - CALL SET vcvars=%%^VS%VS%COMNTOOLS^%%..\..\VC\vcvarsall.bat
+ - SET vcvars
+ - '"%vcvars%" %Platform:x64=amd64%'
+ - SET ruby_path=C:\Ruby%ruby_version:-x86=%
+ - SET PATH=\usr\local\bin;%ruby_path%\bin;%PATH%;C:\msys64\mingw64\bin;C:\msys64\usr\bin
+ - ruby --version
+ - 'cl'
+ - echo> Makefile srcdir=.
+ - echo>> Makefile MSC_VER=0
+ - echo>> Makefile RT=none
+ - echo>> Makefile RT_VER=0
+ - echo>> Makefile BUILTIN_ENCOBJS=nul
+ - type win32\Makefile.sub >> Makefile
+ - nmake %mflags% up VCSUP="echo Update OK"
+ - nmake %mflags% extract-extlibs
+ - del Makefile
+ - mkdir \usr\local\bin
+ - mkdir \usr\local\include
+ - mkdir \usr\local\lib
+ - for %%I in (%OPENSSL_DIR%\*.dll) do mklink /h \usr\local\bin\%%~nxI %%I
+ - for %%I in (c:\Tools\vcpkg\installed\%Platform%-windows\bin\*.dll) do (
+ if not %%~nI == readline mklink \usr\local\bin\%%~nxI %%I
+ )
+ - attrib +r /s /d
+ - mkdir %Platform%-mswin_%vs%
+ build_script:
+ - set HAVE_GIT=no
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - cd %Platform%-mswin_%vs%
+ - >-
+ ..\win32\configure.bat
+ --with-opt-dir="/usr/local;c:/Tools/vcpkg/installed/%Platform%-windows"
+ --with-openssl-dir=%OPENSSL_DIR:\=/%
+ - nmake -l
+ - nmake install-nodoc
+ - \usr\bin\ruby -v -e "p :locale => Encoding.find('locale'), :filesystem => Encoding.find('filesystem')"
+ - if not "%GEMS_FOR_TEST%" == "" \usr\bin\gem install --no-document %GEMS_FOR_TEST%
+ - \usr\bin\ruby -ropenssl -e "puts 'Build ' + OpenSSL::OPENSSL_VERSION, 'Runtime ' + OpenSSL::OPENSSL_LIBRARY_VERSION"
+ test_script:
+ - set /a JOBS=%NUMBER_OF_PROCESSORS%
+ - nmake -l "TESTOPTS=-v -q" btest
+ - nmake -l "TESTOPTS=-v -q" test-basic
+ - >-
+ nmake -l "TESTOPTS=--timeout-scale=3.0
+ --excludes=../test/excludes/_appveyor -j%JOBS%
+ --exclude win32ole
+ --exclude test_bignum
+ --exclude test_syntax
+ --exclude test_open-uri
+ --exclude test_bundled_ca
+ " test-all
+ # separately execute tests without -j which may crash worker with -j.
+ - >-
+ nmake -l
+ "TESTOPTS=--timeout-scale=3.0 --excludes=../test/excludes/_appveyor"
+ TESTS="
+ ../test/win32ole
+ ../test/ruby/test_bignum.rb
+ ../test/ruby/test_syntax.rb
+ ../test/open-uri/test_open-uri.rb
+ ../test/rubygems/test_bundled_ca.rb
+ " test-all
+ - nmake -l test-spec MSPECOPT=-fs # not using `-j` because sometimes `mspec -j` silently dies on Windows
+notifications:
+ - provider: Webhook
+ method: POST
+ url:
+ secure: CcFlJNDJ/a6to7u3Z4Fnz6dScEPNx7hTha2GkSRlV+1U6dqmxY/7uBcLXYb9gR3jfQk6w+2o/HrjNAyXMNGU/JOka3s2WRI4VKitzM+lQ08owvJIh0R7LxrGH0J2e81U # ruby-lang slack: ruby/simpler-alerts-bot
+ body: >-
+ {{^isPullRequest}}
+ {
+ "ci": "AppVeyor CI",
+ "env": "Visual Studio 2013",
+ "url": "{{buildUrl}}",
+ "commit": "{{commitId}}",
+ "branch": "{{branch}}"
+ }
+ {{/isPullRequest}}
+ on_build_success: false
+ on_build_failure: true
+ on_build_status_changed: false
diff --git a/.document b/.document
index b18ca80971..3a6b0c238c 100644
--- a/.document
+++ b/.document
@@ -10,15 +10,26 @@
# prelude
prelude.rb
rbconfig.rb
+
array.rb
ast.rb
dir.rb
gc.rb
io.rb
kernel.rb
+marshal.rb
+mjit.rb
+numeric.rb
+nilclass.rb
pack.rb
+ractor.rb
+string.rb
+symbol.rb
+timev.rb
+thread_sync.rb
trace_point.rb
warning.rb
+yjit.rb
# the lib/ directory (which has its own .document file)
lib
@@ -34,7 +45,6 @@ README.ja.md
COPYING
COPYING.ja
-CONTRIBUTING.md
LEGAL
diff --git a/.gdbinit b/.gdbinit
index f73c036299..34d044caf6 100644
--- a/.gdbinit
+++ b/.gdbinit
@@ -136,7 +136,7 @@ define rp
printf "%sT_ARRAY%s: len=%ld ", $color_type, $color_end, $len
if ($flags & RUBY_FL_USER2)
printf "(shared) shared="
- output/x ((struct RArray*)($arg0))->as.heap.aux.shared
+ output/x ((struct RArray*)($arg0))->as.heap.aux.shared_root
printf " "
else
printf "(ownership) capa=%ld ", ((struct RArray*)($arg0))->as.heap.aux.capa
@@ -156,12 +156,12 @@ define rp
else
if ($flags & RUBY_T_MASK) == RUBY_T_HASH
printf "%sT_HASH%s: ", $color_type, $color_end,
- if (((struct RHash *)($arg0))->basic->flags & RHASH_ST_TABLE_FLAG)
+ if (((struct RHash *)($arg0))->basic.flags & RHASH_ST_TABLE_FLAG)
printf "st len=%ld ", ((struct RHash *)($arg0))->as.st->num_entries
else
printf "li len=%ld bound=%ld ", \
- ((((struct RHash *)($arg0))->basic->flags & RHASH_AR_TABLE_SIZE_MASK) >> RHASH_AR_TABLE_SIZE_SHIFT), \
- ((((struct RHash *)($arg0))->basic->flags & RHASH_AR_TABLE_BOUND_MASK) >> RHASH_AR_TABLE_BOUND_SHIFT)
+ ((((struct RHash *)($arg0))->basic.flags & RHASH_AR_TABLE_SIZE_MASK) >> RHASH_AR_TABLE_SIZE_SHIFT), \
+ ((((struct RHash *)($arg0))->basic.flags & RHASH_AR_TABLE_BOUND_MASK) >> RHASH_AR_TABLE_BOUND_SHIFT)
end
print (struct RHash *)($arg0)
else
@@ -265,6 +265,10 @@ define rp
printf "%sT_ZOMBIE%s: ", $color_type, $color_end
print (struct RData *)($arg0)
else
+ if ($flags & RUBY_T_MASK) == RUBY_T_MOVED
+ printf "%sT_MOVED%s: ", $color_type, $color_end
+ print *(struct RMoved *)$arg0
+ else
printf "%sunknown%s: ", $color_type, $color_end
print (struct RBasic *)($arg0)
end
@@ -300,6 +304,7 @@ define rp
end
end
end
+ end
end
document rp
Print a Ruby's VALUE.
@@ -539,13 +544,13 @@ end
define rp_class
printf "(struct RClass *) %p", (void*)$arg0
- if ((struct RClass *)($arg0))->ptr.origin_ != $arg0
- printf " -> %p", ((struct RClass *)($arg0))->ptr.origin_
+ if RCLASS_ORIGIN((struct RClass *)($arg0)) != $arg0
+ printf " -> %p", RCLASS_ORIGIN((struct RClass *)($arg0))
end
printf "\n"
rb_classname $arg0
print/x *(struct RClass *)($arg0)
- print *((struct RClass *)($arg0))->ptr
+ print *RCLASS_EXT((struct RClass *)($arg0))
end
document rp_class
Print the content of a Class/Module.
@@ -974,8 +979,8 @@ end
define rb_ps_vm
print $ps_vm = (rb_vm_t*)$arg0
- set $ps_thread_ln = $ps_vm->living_threads.n.next
- set $ps_thread_ln_last = $ps_vm->living_threads.n.prev
+ set $ps_thread_ln = $ps_vm->ractor.main_ractor.threads.set.n.next
+ set $ps_thread_ln_last = $ps_vm->ractor.main_ractor.threads.set.n.prev
while 1
set $ps_thread_th = (rb_thread_t *)$ps_thread_ln
set $ps_thread = (VALUE)($ps_thread_th->self)
@@ -1097,11 +1102,11 @@ define print_id
set $arylen = $ary->as.heap.len
end
set $result = $aryptr[($serial % ID_ENTRY_UNIT) * ID_ENTRY_SIZE + $t]
- if $result != RUBY_Qnil
+ if $result != RUBY_Qnil
print_string $result
- else
- echo undef
- end
+ else
+ echo undef
+ end
end
end
end
@@ -1319,8 +1324,7 @@ define print_flags
printf "RUBY_FL_PROMOTED0 : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_PROMOTED0 ? "1" : "0"
printf "RUBY_FL_PROMOTED1 : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_PROMOTED1 ? "1" : "0"
printf "RUBY_FL_FINALIZE : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_FINALIZE ? "1" : "0"
- printf "RUBY_FL_TAINT : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_TAINT ? "1" : "0"
- printf "RUBY_FL_UNTRUSTED : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_UNTRUSTED ? "1" : "0"
+ printf "RUBY_FL_SHAREABLE : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_SHAREABLE ? "1" : "0"
printf "RUBY_FL_EXIVAR : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_EXIVAR ? "1" : "0"
printf "RUBY_FL_FREEZE : %s\n", ((struct RBasic*)($arg0))->flags & RUBY_FL_FREEZE ? "1" : "0"
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..6c5eac5a0f
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,23 @@
+# This is a file used by GitHub to ignore the following commits on `git blame`.
+#
+# You can also do the same thing in your local repository with:
+# $ git config --local blame.ignoreRevsFile .git-blame-ignore-revs
+
+# Expand tabs
+5b21e94bebed90180d8ff63dad03b8b948361089
+
+# Enable Style/StringLiterals cop for RubyGems/Bundler
+d7ffd3fea402239b16833cc434404a7af82d44f3
+
+# [ruby/digest] Revert tab-expansion in external files
+48b09aae7ec5632209229dcc294dd0d75a93a17f
+8a65cf3b61c60e4cb886f59a73ff6db44364bfa9
+39dc9f9093901d40d2998653948d5da38b18ee2c
+
+# [ruby/io-nonblock] Revert tab expansion
+f28287d34c03f472ffe90ea262bdde9affd4b965
+0d842fecb4f75ab3b1d4097ebdb8e88f51558041
+4ba2c66761d6a293abdfba409241d31063cefd62
+
+# Make benchmark indentation consistent
+fc4acf8cae82e5196186d3278d831f2438479d91
diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml
new file mode 100644
index 0000000000..91f82b842b
--- /dev/null
+++ b/.github/codeql/codeql-config.yml
@@ -0,0 +1,3 @@
+name: "CodeQL config for the Ruby language"
+
+languages: cpp
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..bc63aca35b
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,6 @@
+version: 2
+updates:
+ - package-ecosystem: 'github-actions'
+ directory: '/'
+ schedule:
+ interval: 'monthly'
diff --git a/.github/workflows/baseruby.yml b/.github/workflows/baseruby.yml
new file mode 100644
index 0000000000..ebaafe3bf0
--- /dev/null
+++ b/.github/workflows/baseruby.yml
@@ -0,0 +1,80 @@
+name: BASERUBY Check
+
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
+jobs:
+ baseruby:
+ name: BASERUBY
+ runs-on: ubuntu-22.04
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ strategy:
+ matrix:
+ ruby:
+ - ruby-2.2
+# - ruby-2.3
+# - ruby-2.4
+# - ruby-2.5
+# - ruby-2.6
+# - ruby-2.7
+ - ruby-3.0
+ - ruby-3.1
+
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: .downloaded-cache
+ key: downloaded-cache
+ - uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
+ with:
+ ruby-version: ${{ matrix.ruby }}
+ bundler: none
+ - run: echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
+ - run: sudo apt-get install build-essential autoconf bison libyaml-dev
+ - run: ./autogen.sh
+ - run: ./configure --disable-install-doc
+ - run: make common-srcs
+ - run: make incs
+ - run: make all
+ - run: make test
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ github.workflow }} / BASERUBY @ ${{ matrix.ruby }}",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() && github.event_name == 'push' }}
diff --git a/.github/workflows/bundled_gems.yml b/.github/workflows/bundled_gems.yml
new file mode 100644
index 0000000000..070c0fa1dd
--- /dev/null
+++ b/.github/workflows/bundled_gems.yml
@@ -0,0 +1,166 @@
+name: bundled_gems
+
+on:
+ push:
+ branches: [ "master" ]
+ paths:
+ - '.github/workflows/bundled_gems.yml'
+ - 'gems/bundled_gems'
+ pull_request:
+ branches: [ "master" ]
+ paths:
+ - '.github/workflows/bundled_gems.yml'
+ - 'gems/bundled_gems'
+ merge_group:
+ branches: [ "master" ]
+ paths:
+ - '.github/workflows/bundled_gems.yml'
+ - 'gems/bundled_gems'
+ schedule:
+ - cron: '45 6 * * *'
+ workflow_dispatch:
+
+permissions: # added using https://github.com/step-security/secure-workflows
+ contents: read
+
+jobs:
+ update:
+ permissions:
+ contents: write # for Git to git push
+ if: ${{ github.event_name != 'schedule' || github.repository == 'ruby/ruby' }}
+ name: update ${{ github.workflow }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+
+ - name: Set ENV
+ run: |
+ echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
+ echo "TODAY=$(date +%F)" >> $GITHUB_ENV
+
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: .downloaded-cache
+ key: downloaded-cache-${{ github.sha }}
+ restore-keys: |
+ downloaded-cache
+
+ - name: Download previous gems list
+ run: |
+ data=bundled_gems.json
+ mkdir -p .downloaded-cache
+ ln -s .downloaded-cache/$data .
+ curl -O -R -z ./$data https://stdgems.org/$data
+
+ - name: Update bundled gems list
+ run: |
+ ruby -i~ tool/update-bundled_gems.rb gems/bundled_gems
+
+ - name: Maintain updated gems list in NEWS
+ run: |
+ #!ruby
+ require 'json'
+ news = File.read("NEWS.md")
+ prev = news[/since the \*+(\d+\.\d+\.\d+)\*+/, 1]
+ prevs = [prev, prev.sub(/\.\d+\z/, '')]
+ %W[bundled].each do |type|
+ last = JSON.parse(File.read("#{type}_gems.json"))['gems'].filter_map do |g|
+ v = g['versions'].values_at(*prevs).compact.first
+ g = g['gem']
+ g = 'RubyGems' if g == 'rubygems'
+ [g, v] if v
+ end.to_h
+ changed = File.foreach("gems/#{type}_gems").filter_map do |l|
+ next if l.start_with?("#")
+ g, v = l.split(" ", 3)
+ [g, v] unless last[g] == v
+ end
+ changed, added = changed.partition {|g, _| last[g]}
+ news.sub!(/^\*( +)The following #{type} gems? are updated\.\n+\K(?: \1\*( +).*\n)*/) do
+ mark = "#{$1} *#{$2}"
+ changed.map {|g, v|"#{mark}#{g} #{v}\n"}.join("")
+ end or next
+ news.sub!(/^\*( +)The following default gems are now bundled gems\.\n+\K(?: \1\*( +).*\n)*/) do
+ mark = "#{$1} *#{$2}"
+ added.map {|g, v|"#{mark}#{g} #{v}\n"}.join("")
+ end or next unless added.empty?
+ File.write("NEWS.md", news)
+ end
+ shell: ruby {0}
+
+ - name: Check diffs
+ id: diff
+ run: |
+ git add -- NEWS.md
+ git diff --no-ext-diff --ignore-submodules --quiet -- gems/bundled_gems
+ continue-on-error: true
+
+ - name: Install libraries
+ run: |
+ set -x
+ sudo apt-get update -q || :
+ sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev bison autoconf ruby
+ if: ${{ steps.diff.outcome == 'failure' }}
+
+ - name: Build
+ run: |
+ ./autogen.sh
+ ./configure -C --disable-install-doc
+ make
+ if: ${{ steps.diff.outcome == 'failure' }}
+
+ - name: Prepare bundled gems
+ run: |
+ make -s prepare-gems
+ if: ${{ steps.diff.outcome == 'failure' }}
+
+ - name: Test bundled gems
+ run: |
+ make -s test-bundled-gems
+ git add -- gems/bundled_gems
+ timeout-minutes: 30
+ env:
+ RUBY_TESTOPTS: "-q --tty=no"
+ TEST_BUNDLED_GEMS_ALLOW_FAILURES: ""
+ if: ${{ steps.diff.outcome == 'failure' }}
+
+ - name: Show diffs
+ id: show
+ run: |
+ git diff --cached --color --no-ext-diff --ignore-submodules --exit-code --
+ continue-on-error: true
+
+ - name: Commit
+ run: |
+ git pull --ff-only origin ${GITHUB_REF#refs/heads/}
+ message="Update bundled gems list at "
+ if [ ${{ steps.diff.outcome }} = success ]; then
+ git commit --message="${message}${GITHUB_SHA:0:30} [ci skip]"
+ else
+ git commit --message="${message}${TODAY}"
+ fi
+ git push origin ${GITHUB_REF#refs/heads/}
+ env:
+ EMAIL: svn-admin@ruby-lang.org
+ GIT_AUTHOR_NAME: git
+ GIT_COMMITTER_NAME: git
+ if: ${{ github.repository == 'ruby/ruby' && !startsWith(github.event_name, 'pull') && steps.show.outcome == 'failure' }}
+
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ github.workflow }} / update",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() && github.event_name == 'push' }}
diff --git a/.github/workflows/check_branch.yml b/.github/workflows/check_branch.yml
deleted file mode 100644
index 37cf3a9a8f..0000000000
--- a/.github/workflows/check_branch.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# We bidirectionally synchronize github.com/ruby/ruby.git's master branch and
-# git.ruby-lang.org/ruby.git's master branch.
-# We can use a pull request's merge button only on the master branch.
-#
-# Therefore, we require to pass this "check_branch" on all protected branches
-# to prevent us from accidentally pushing commits to GitHub directly.
-#
-# Details: https://bugs.ruby-lang.org/issues/16094
-name: Pull Request
-on: [pull_request]
-jobs:
- check_branch:
- runs-on: ubuntu-latest
- steps:
- - name: Check if branch is master
- run: |
- if [ "$BASE_REF" != master ]; then
- echo "Only master branch accepts a pull request, but it's '$BASE_REF'."
- exit 1
- fi
- env:
- BASE_REF: ${{ github.base_ref }}
diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/check_dependencies.yml
new file mode 100644
index 0000000000..79b2916feb
--- /dev/null
+++ b/.github/workflows/check_dependencies.yml
@@ -0,0 +1,78 @@
+name: Check Dependencies
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
+jobs:
+ update-deps:
+ strategy:
+ matrix:
+ os: [ubuntu-22.04]
+ fail-fast: true
+ runs-on: ${{ matrix.os }}
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ steps:
+ - name: Install libraries
+ run: |
+ set -x
+ sudo apt-get update -q || :
+ sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev bison autoconf ruby
+ if: ${{ contains(matrix.os, 'ubuntu') }}
+ - name: Install libraries
+ run: |
+ brew install gmp libffi openssl@1.1 zlib autoconf automake libtool readline
+ if: ${{ contains(matrix.os, 'macos') }}
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: .downloaded-cache
+ key: downloaded-cache
+ - run: ./autogen.sh
+ - name: Run configure
+ run: ./configure -C --disable-install-doc --disable-rubygems --with-gcc 'optflags=-O0' 'debugflags=-save-temps=obj -g'
+ - run: make all golf
+ - run: ruby tool/update-deps --fix
+ - run: git diff --no-ext-diff --ignore-submodules --exit-code
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ matrix.os }} / Dependencies need to update",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() && github.event_name == 'push' }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000000..8dba76fbe2
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,75 @@
+name: "Code scanning - action"
+
+on:
+ # push:
+ # paths-ignore:
+ # - 'doc/**'
+ # - '**/man'
+ # - '**.md'
+ # - '**.rdoc'
+ # - '**/.document'
+ # pull_request:
+ # paths-ignore:
+ # - 'doc/**'
+ # - '**/man'
+ # - '**.md'
+ # - '**.rdoc'
+ # - '**/.document'
+ schedule:
+ - cron: '0 12 * * *'
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions: # added using https://github.com/step-security/secure-workflows
+ contents: read
+
+jobs:
+ CodeQL-Build:
+
+ # CodeQL runs on ubuntu-latest and windows-latest
+ permissions:
+ actions: read # for github/codeql-action/init to get workflow details
+ contents: read # for actions/checkout to fetch code
+ security-events: write # for github/codeql-action/autobuild to send a status report
+ runs-on: ubuntu-latest
+ # CodeQL fails to run pull requests from dependabot due to missing write access to upload results.
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') && github.event.head_commit.pusher.name != 'dependabot[bot]' }}
+
+ env:
+ enable_install_doc: no
+
+ steps:
+ - name: Install libraries
+ run: |
+ set -x
+ sudo apt-get update -q || :
+ sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev bison autoconf ruby
+
+ - name: Checkout repository
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: .downloaded-cache
+ key: downloaded-cache
+
+ - name: Remove an obsolete rubygems vendored file
+ run: sudo rm /usr/lib/ruby/vendor_ruby/rubygems/defaults/operating_system.rb
+
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@959cbb7472c4d4ad70cdfe6f4976053fe48ab394 # v2.1.37
+ with:
+ config-file: ./.github/codeql/codeql-config.yml
+ trap-caching: false
+
+ - name: Set ENV
+ run: echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
+
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@959cbb7472c4d4ad70cdfe6f4976053fe48ab394 # v2.1.37
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@959cbb7472c4d4ad70cdfe6f4976053fe48ab394 # v2.1.37
diff --git a/.github/workflows/compilers.yml b/.github/workflows/compilers.yml
index ab79032a39..caf12cc0f4 100644
--- a/.github/workflows/compilers.yml
+++ b/.github/workflows/compilers.yml
@@ -1,19 +1,42 @@
name: Compilations
-on: [push, pull_request]
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.rdoc'
+ - '**/.document'
-# Github actions does not support YAML anchors. This creative use of
-# environment variables (plus the "echo ::set-env" hack) is to reroute that
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+# GitHub actions does not support YAML anchors. This creative use of
+# environment variables (plus the "echo $GITHUB_ENV" hack) is to reroute that
# restriction.
env:
- default_cc: clang-11
+ default_cc: clang-15
append_cc: ''
# -O1 is faster than -O3 in our tests... Majority of time are consumed trying
- # to optimize binaries. Also Github Actions runs on a relatively modern CPU
+ # to optimize binaries. Also GitHub Actions run on relatively modern CPUs
# compared to, say, GCC 4 or Clang 3. We don't specify `-march=native`
# because compilers tend not understand what the CPU is.
- optflags: '-O1 -march=x86-64 -mtune=generic'
+ optflags: '-O1'
# -g0 disables backtraces when SEGV. Do not set that.
debugflags: '-ggdb3'
@@ -33,170 +56,226 @@ env:
UNICODE_AUXILIARY_FILES=.
UNICODE_EMOJI_FILES=.
CONFIGURE_TTY: never
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
RUBY_DEBUG: ci rgengc
RUBY_TESTOPTS: >-
-q
--color=always
--tty=no
+permissions:
+ contents: read
+
jobs:
compile:
strategy:
fail-fast: false
matrix:
+ env:
+ - {}
entry:
- - { key: default_cc, name: gcc-10, value: gcc-10 }
- - { key: default_cc, name: gcc-9, value: gcc-9 }
- - { key: default_cc, name: gcc-8, value: gcc-8 }
- - { key: default_cc, name: gcc-7, value: gcc-7 }
- - { key: default_cc, name: gcc-6, value: gcc-6 }
- - { key: default_cc, name: gcc-5, value: gcc-5 }
- - { key: default_cc, name: gcc-4.8, value: gcc-4.8 }
- - { key: default_cc, name: clang-11, value: clang-11 }
- - { key: default_cc, name: clang-10, value: clang-10 }
- - { key: default_cc, name: clang-9, value: clang-9 }
- - { key: default_cc, name: clang-8, value: clang-8 }
- - { key: default_cc, name: clang-7, value: clang-7 }
- - { key: default_cc, name: clang-6.0, value: clang-6.0 }
- - { key: default_cc, name: clang-5.0, value: clang-5.0 }
- - { key: default_cc, name: clang-4.0, value: clang-4.0 }
- - { key: default_cc, name: clang-3.9, value: clang-3.9 }
-
- - { key: append_cc, name: c99, value: '-std=c99 -Werror=pedantic -pedantic-errors' }
- - { key: append_cc, name: c11, value: '-std=c11 -Werror=pedantic -pedantic-errors' }
- - { key: append_cc, name: c17, value: '-std=c17 -Werror=pedantic -pedantic-errors' }
- - { key: append_cc, name: c2x, value: '-std=c2x -Werror=pedantic -pedantic-errors' }
- - { key: CXXFLAGS, name: c++98, value: '-std=c++98 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' }
- - { key: CXXFLAGS, name: c++11, value: '-std=c++11 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' }
- - { key: CXXFLAGS, name: c++14, value: '-std=c++14 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' }
- - { key: CXXFLAGS, name: c++17, value: '-std=c++17 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' }
- - { key: CXXFLAGS, name: c++2a, value: '-std=c++2a -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' }
-
- - { key: optflags, name: '-O0', value: '-O0 -march=x86-64 -mtune=generic' }
- - { key: optflags, name: '-O3', value: '-O3 -march=x86-64 -mtune=generic' }
-
- - { key: append_configure, name: gmp, value: '--with-gmp' }
- - { key: append_configure, name: jemalloc, value: '--with-jemalloc' }
- - { key: append_configure, name: valgrind, value: '--with-valgrind' }
- - { key: append_configure, name: 'coroutine=ucontext', value: '--with-coroutine=ucontext' }
- - { key: append_configure, name: 'coroutine=copy', value: '--with-coroutine=copy' }
- - { key: append_configure, name: disable-mathn, value: '--disable-mathn' }
- - { key: append_configure, name: disable-jit-support, value: '--disable-jit-support' }
- - { key: append_configure, name: disable-dln, value: '--disable-dln' }
- - { key: append_configure, name: disable-rubygems, value: '--disable-rubygems' }
-
- - { key: cppflags, name: OPT_THREADED_CODE=1, value: '-DOPT_THREADED_CODE=1' }
- - { key: cppflags, name: OPT_THREADED_CODE=2, value: '-DOPT_THREADED_CODE=2' }
- - { key: cppflags, name: OPT_THREADED_CODE=3, value: '-DOPT_THREADED_CODE=3' }
-
- - { key: cppflags, name: NDEBUG, value: '-DNDEBUG' }
- - { key: cppflags, name: RUBY_DEBUG, value: '-DRUBY_DEBUG' }
- - { key: cppflags, name: ARRAY_DEBUG, value: '-DARRAY_DEBUG' }
- - { key: cppflags, name: BUGNUM_DEBUG, value: '-DBUGNUM_DEBUG' }
- - { key: cppflags, name: CCAN_LIST_DEBUG, value: '-DCCAN_LIST_DEBUG' }
- - { key: cppflags, name: CPDEBUG=-1, value: '-DCPDEBUG=-1' }
- - { key: cppflags, name: ENC_DEBUG, value: '-DENC_DEBUG' }
- - { key: cppflags, name: GC_DEBUG, value: '-DGC_DEBUG' }
- - { key: cppflags, name: HASH_DEBUG, value: '-DHASH_DEBUG' }
- - { key: cppflags, name: ID_TABLE_DEBUG, value: '-DID_TABLE_DEBUG' }
- - { key: cppflags, name: RGENGC_DEBUG=-1, value: '-DRGENGC_DEBUG=-1' }
- - { key: cppflags, name: SYMBOL_DEBUG, value: '-DSYMBOL_DEBUG' }
- - { key: cppflags, name: THREAD_DEBUG=-1, value: '-DTHREAD_DEBUG=-1' }
-
- - { key: cppflags, name: RGENGC_CHECK_MODE, value: '-DRGENGC_CHECK_MODE' }
- - { key: cppflags, name: TRANSIENT_HEAP_CHECK_MODE, value: '-DTRANSIENT_HEAP_CHECK_MODE' }
- - { key: cppflags, name: VM_CHECK_MODE, value: '-DVM_CHECK_MODE' }
-
- - { key: cppflags, name: USE_EMBED_CI=0, value: '-DUSE_EMBED_CI=0' }
- - { key: cppflags, name: USE_FLONUM=0, value: '-DUSE_FLONUM=0' }
-# - { key: cppflags, name: USE_GC_MALLOC_OBJ_INFO_DETAILS, value: '-DUSE_GC_MALLOC_OBJ_INFO_DETAILS' }
- - { key: cppflags, name: USE_LAZY_LOAD, value: '-DUSE_LAZY_LOAD' }
- - { key: cppflags, name: USE_RINCGC=0, value: '-DUSE_RINCGC=0' }
- - { key: cppflags, name: USE_SYMBOL_GC=0, value: '-DUSE_SYMBOL_GC=0' }
- - { key: cppflags, name: USE_THREAD_CACHE=0, value: '-DUSE_THREAD_CACHE=0' }
- - { key: cppflags, name: USE_TRANSIENT_HEAP=0, value: '-DUSE_TRANSIENT_HEAP=0' }
-
- - { key: cppflags, name: DEBUG_FIND_TIME_NUMGESS, value: '-DDEBUG_FIND_TIME_NUMGESS' }
- - { key: cppflags, name: DEBUG_INTEGER_PACK, value: '-DDEBUG_INTEGER_PACK' }
- - { key: cppflags, name: ENABLE_PATH_CHECK, value: '-DENABLE_PATH_CHECK' }
-
- - { key: cppflags, name: GC_DEBUG_STRESS_TO_CLASS, value: '-DGC_DEBUG_STRESS_TO_CLASS' }
- - { key: cppflags, name: GC_ENABLE_LAZY_SWEEP=0, value: '-DGC_ENABLE_LAZY_SWEEP=0' }
- - { key: cppflags, name: GC_PROFILE_DETAIL_MEMOTY, value: '-DGC_PROFILE_DETAIL_MEMOTY' }
- - { key: cppflags, name: GC_PROFILE_MORE_DETAIL, value: '-DGC_PROFILE_MORE_DETAIL' }
-
- - { key: cppflags, name: CALC_EXACT_MALLOC_SIZE, value: '-DCALC_EXACT_MALLOC_SIZE' }
- - { key: cppflags, name: MALLOC_ALLOCATED_SIZE_CHECK, value: '-DMALLOC_ALLOCATED_SIZE_CHECK' }
-
- - { key: cppflags, name: IBF_ISEQ_ENABLE_LOCAL_BUFFER, value: '-DIBF_ISEQ_ENABLE_LOCAL_BUFFER' }
-
- - { key: cppflags, name: RGENGC_ESTIMATE_OLDMALLOC, value: '-DRGENGC_ESTIMATE_OLDMALLOC' }
- - { key: cppflags, name: RGENGC_FORCE_MAJOR_GC, value: '-DRGENGC_FORCE_MAJOR_GC' }
- - { key: cppflags, name: RGENGC_OBJ_INFO, value: '-DRGENGC_OBJ_INFO' }
- - { key: cppflags, name: RGENGC_OLD_NEWOBJ_CHECK, value: '-DRGENGC_OLD_NEWOBJ_CHECK' }
- - { key: cppflags, name: RGENGC_PROFILE, value: '-DRGENGC_PROFILE' }
-
- - { key: cppflags, name: VM_DEBUG_BP_CHECK, value: '-DVM_DEBUG_BP_CHECK' }
- - { key: cppflags, name: VM_DEBUG_VERIFY_METHOD_CACHE, value: '-DVM_DEBUG_VERIFY_METHOD_CACHE' }
+ - { name: gcc-12, env: { default_cc: gcc-12 } }
+ - { name: gcc-11, env: { default_cc: gcc-11 } }
+ - { name: gcc-10, env: { default_cc: gcc-10 } }
+ - { name: gcc-9, env: { default_cc: gcc-9 } }
+ - { name: gcc-8, env: { default_cc: gcc-8 } }
+ - { name: gcc-7, env: { default_cc: gcc-7 } }
+ - name: 'gcc-13 LTO'
+ container: gcc-13
+ env:
+ default_cc: 'gcc-13 -flto=auto -ffat-lto-objects -Werror=lto-type-mismatch'
+ optflags: '-O2'
+ shared: disable
+ # check: true
+ - { name: clang-16, env: { default_cc: clang-16 } }
+ - { name: clang-15, env: { default_cc: clang-15 } }
+ - { name: clang-14, env: { default_cc: clang-14 } }
+ - { name: clang-13, env: { default_cc: clang-13 } }
+ - { name: clang-12, env: { default_cc: clang-12 } }
+ - { name: clang-11, env: { default_cc: clang-11 } }
+ - { name: clang-10, env: { default_cc: clang-10 } }
+ # llvm-objcopy<=9 doesn't have --wildcard. It compiles, but leaves Rust symbols in libyjit.o.
+ - { name: clang-9, env: { default_cc: clang-9, append_configure: '--disable-yjit' } }
+ - { name: clang-8, env: { default_cc: clang-8, append_configure: '--disable-yjit' } }
+ - { name: clang-7, env: { default_cc: clang-7, append_configure: '--disable-yjit' } }
+ - { name: clang-6.0, env: { default_cc: clang-6.0, append_configure: '--disable-yjit' } }
+ - name: 'clang-16 LTO'
+ container: clang-16
+ env:
+ default_cc: 'clang-16 -flto=auto'
+ optflags: '-O2'
+ shared: disable
+ # check: true
+
+# - { name: aarch64-linux-gnu, crosshost: aarch64-linux-gnu, container: crossbuild-essential-arm64 }
+# - { name: arm-linux-gnueabi, crosshost: arm-linux-gnueabi }
+# - { name: arm-linux-gnueabihf, crosshost: arm-linux-gnueabihf }
+# - { name: i686-w64-mingw32, crosshost: i686-w64-mingw32 }
+# - { name: powerpc-linux-gnu, crosshost: powerpc-linux-gnu }
+# - { name: powerpc64le-linux-gnu, crosshost: powerpc64le-linux-gnu, container: crossbuild-essential-ppc64el }
+# - { name: s390x-linux-gnu, crosshost: s390x-linux-gnu, container: crossbuild-essential-s390x }
+# - { name: x86_64-w64-mingw32, crosshost: x86_64-w64-mingw32, container: mingw-w64 }
+
+ # -Wno-strict-prototypes is necessary with current clang-15 since
+ # older autoconf generate functions without prototype and -pedantic
+ # now implies strict-prototypes. Disabling the error but leaving the
+ # warning generates a lot of noise from use of ANYARGS in
+ # rb_define_method() and friends.
+ # See: https://github.com/llvm/llvm-project/commit/11da1b53d8cd3507959022cd790d5a7ad4573d94
+ - { name: c99, env: { append_cc: '-std=c99 -Werror=pedantic -pedantic-errors -Wno-strict-prototypes' } }
+# - { name: c11, env: { append_cc: '-std=c11 -Werror=pedantic -pedantic-errors -Wno-strict-prototypes' } }
+# - { name: c17, env: { append_cc: '-std=c17 -Werror=pedantic -pedantic-errors -Wno-strict-prototypes' } }
+ - { name: c2x, env: { append_cc: '-std=c2x -Werror=pedantic -pedantic-errors -Wno-strict-prototypes' } }
+ - { name: c++98, env: { CXXFLAGS: '-std=c++98 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' } }
+# - { name: c++11, env: { CXXFLAGS: '-std=c++11 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' } }
+# - { name: c++14, env: { CXXFLAGS: '-std=c++14 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' } }
+# - { name: c++17, env: { CXXFLAGS: '-std=c++17 -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' } }
+ - { name: c++2a, env: { CXXFLAGS: '-std=c++2a -Werror=pedantic -pedantic-errors -Wno-c++11-long-long' } }
+
+ - { name: '-O0', env: { optflags: '-O0 -march=x86-64 -mtune=generic' } }
+# - { name: '-O3', env: { optflags: '-O3 -march=x86-64 -mtune=generic' }, check: true }
+
+ - { name: gmp, env: { append_configure: '--with-gmp' } }
+ - { name: jemalloc, env: { append_configure: '--with-jemalloc' } }
+ - { name: valgrind, env: { append_configure: '--with-valgrind' } }
+ - { name: 'coroutine=ucontext', env: { append_configure: '--with-coroutine=ucontext' } }
+ - { name: 'coroutine=pthread', env: { append_configure: '--with-coroutine=pthread' } }
+ - { name: disable-jit-support, env: { append_configure: '--disable-jit-support' } }
+ - { name: disable-dln, env: { append_configure: '--disable-dln' } }
+ - { name: enable-mkmf-verbose, env: { append_configure: '--enable-mkmf-verbose' } }
+ - { name: disable-rubygems, env: { append_configure: '--disable-rubygems' } }
+ - { name: RUBY_DEVEL, env: { append_configure: '--enable-devel' } }
+
+ - { name: OPT_THREADED_CODE=1, env: { cppflags: '-DOPT_THREADED_CODE=1' } }
+ - { name: OPT_THREADED_CODE=2, env: { cppflags: '-DOPT_THREADED_CODE=2' } }
+ - { name: OPT_THREADED_CODE=3, env: { cppflags: '-DOPT_THREADED_CODE=3' } }
+
+ - { name: NDEBUG, env: { cppflags: '-DNDEBUG' } }
+ - { name: RUBY_DEBUG, env: { cppflags: '-DRUBY_DEBUG' } }
+# - { name: ARRAY_DEBUG, env: { cppflags: '-DARRAY_DEBUG' } }
+# - { name: BIGNUM_DEBUG, env: { cppflags: '-DBIGNUM_DEBUG' } }
+# - { name: CCAN_LIST_DEBUG, env: { cppflags: '-DCCAN_LIST_DEBUG' } }
+# - { name: CPDEBUG=-1, env: { cppflags: '-DCPDEBUG=-1' } }
+# - { name: ENC_DEBUG, env: { cppflags: '-DENC_DEBUG' } }
+# - { name: GC_DEBUG, env: { cppflags: '-DGC_DEBUG' } }
+# - { name: HASH_DEBUG, env: { cppflags: '-DHASH_DEBUG' } }
+# - { name: ID_TABLE_DEBUG, env: { cppflags: '-DID_TABLE_DEBUG' } }
+# - { name: RGENGC_DEBUG=-1, env: { cppflags: '-DRGENGC_DEBUG=-1' } }
+# - { name: SYMBOL_DEBUG, env: { cppflags: '-DSYMBOL_DEBUG' } }
+
+# - { name: RGENGC_CHECK_MODE, env: { cppflags: '-DRGENGC_CHECK_MODE' } }
+# - { name: TRANSIENT_HEAP_CHECK_MODE, env: { cppflags: '-DTRANSIENT_HEAP_CHECK_MODE' } }
+# - { name: VM_CHECK_MODE, env: { cppflags: '-DVM_CHECK_MODE' } }
+
+ - { name: USE_EMBED_CI=0, env: { cppflags: '-DUSE_EMBED_CI=0' } }
+ - name: USE_FLONUM=0,
+ env:
+ cppflags: '-DUSE_FLONUM=0'
+ # yjit requires FLONUM for the pointer tagging scheme
+ append_configure: '--disable-yjit'
+# - { name: USE_GC_MALLOC_OBJ_INFO_DETAILS, env: { cppflags: '-DUSE_GC_MALLOC_OBJ_INFO_DETAILS' } }
+ - { name: USE_LAZY_LOAD, env: { cppflags: '-DUSE_LAZY_LOAD' } }
+# - { name: USE_RINCGC=0, env: { cppflags: '-DUSE_RINCGC=0' } }
+# - { name: USE_SYMBOL_GC=0, env: { cppflags: '-DUSE_SYMBOL_GC=0' } }
+# - { name: USE_THREAD_CACHE=0, env: { cppflags: '-DUSE_THREAD_CACHE=0' } }
+# - { name: USE_TRANSIENT_HEAP=0, env: { cppflags: '-DUSE_TRANSIENT_HEAP=0' } }
+# - { name: USE_RUBY_DEBUG_LOG=1, env: { cppflags: '-DUSE_RUBY_DEBUG_LOG=1' } }
+ - { name: USE_RVARGC=0, env: { cppflags: '-DUSE_RVARGC=0' } }
+# - { name: USE_RVARGC=1, env: { cppflags: '-DUSE_RVARGC=1' } }
+# - { name: USE_DEBUG_COUNTER, env: { cppflags: '-DUSE_DEBUG_COUNTER=1', RUBY_DEBUG_COUNTER_DISABLE: '1' } }
+
+ - { name: DEBUG_FIND_TIME_NUMGUESS, env: { cppflags: '-DDEBUG_FIND_TIME_NUMGUESS' } }
+ - { name: DEBUG_INTEGER_PACK, env: { cppflags: '-DDEBUG_INTEGER_PACK' } }
+# - { name: ENABLE_PATH_CHECK, env: { cppflags: '-DENABLE_PATH_CHECK' } }
+
+ - { name: GC_DEBUG_STRESS_TO_CLASS, env: { cppflags: '-DGC_DEBUG_STRESS_TO_CLASS' } }
+# - { name: GC_ENABLE_LAZY_SWEEP=0, env: { cppflags: '-DGC_ENABLE_LAZY_SWEEP=0' } }
+# - { name: GC_PROFILE_DETAIL_MEMOTY, env: { cppflags: '-DGC_PROFILE_DETAIL_MEMOTY' } }
+# - { name: GC_PROFILE_MORE_DETAIL, env: { cppflags: '-DGC_PROFILE_MORE_DETAIL' } }
+
+# - { name: CALC_EXACT_MALLOC_SIZE, env: { cppflags: '-DCALC_EXACT_MALLOC_SIZE' } }
+# - { name: MALLOC_ALLOCATED_SIZE_CHECK, env: { cppflags: '-DMALLOC_ALLOCATED_SIZE_CHECK' } }
+
+# - { name: IBF_ISEQ_ENABLE_LOCAL_BUFFER, env: { cppflags: '-DIBF_ISEQ_ENABLE_LOCAL_BUFFER' } }
+
+# - { name: RGENGC_ESTIMATE_OLDMALLOC, env: { cppflags: '-DRGENGC_ESTIMATE_OLDMALLOC' } }
+# - { name: RGENGC_FORCE_MAJOR_GC, env: { cppflags: '-DRGENGC_FORCE_MAJOR_GC' } }
+# - { name: RGENGC_OBJ_INFO, env: { cppflags: '-DRGENGC_OBJ_INFO' } }
+# - { name: RGENGC_OLD_NEWOBJ_CHECK, env: { cppflags: '-DRGENGC_OLD_NEWOBJ_CHECK' } }
+# - { name: RGENGC_PROFILE, env: { cppflags: '-DRGENGC_PROFILE' } }
+
+# - { name: VM_DEBUG_BP_CHECK, env: { cppflags: '-DVM_DEBUG_BP_CHECK' } }
+# - { name: VM_DEBUG_VERIFY_METHOD_CACHE, env: { cppflags: '-DVM_DEBUG_VERIFY_METHOD_CACHE' } }
+
+ - { name: MJIT_FORCE_ENABLE, env: { cppflags: '-DMJIT_FORCE_ENABLE' } }
+ - { name: YJIT_FORCE_ENABLE, env: { cppflags: '-DYJIT_FORCE_ENABLE' } }
name: ${{ matrix.entry.name }}
runs-on: ubuntu-latest
- container: shyouhei/c-compilers:latest
- if: "!contains(github.event.head_commit.message, '[ci skip]')"
+ container:
+ image: ghcr.io/ruby/ruby-ci-image:${{ matrix.entry.container || matrix.entry.env.default_cc || 'clang-15' }}
+ options: --user root
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ env: ${{ matrix.entry.env || matrix.env }}
steps:
+ - run: id
+ working-directory:
+ - run: mkdir build
+ working-directory:
- name: setenv
run: |
- echo ::set-env name=${{ matrix.entry.key }}::${{ matrix.entry.value }}
- echo ::set-env name=make::make -sj$((1 + $(nproc --all)))
- - run: mkdir build
- - uses: actions/checkout@v2
+ echo "GNUMAKEFLAGS=-sj$((1 + $(nproc --all)))" >> $GITHUB_ENV
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
- fetch-depth: 128
path: src
- - run: autoconf
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
+ - run: ./autogen.sh
working-directory: src
- name: Run configure
- working-directory: build
- run: |
- ../src/configure -C \
- ${default_configure} \
- ${append_configure} \
- --with-gcc="${default_cc} ${append_cc}"
- - run: $make incs
- working-directory: build
- - run: $make
- working-directory: build
- - run: $make test
- working-directory: build
- - run: $make $UPDATE_UNICODE up
- working-directory: build
- if: "matrix.entry.name == '-O3'"
- - run: $make install
- working-directory: build
- if: "matrix.entry.name == '-O3'"
- - run: /usr/local/bin/gem install --no-doc timezone tzinfo
- working-directory: build
- if: "matrix.entry.name == '-O3'"
- - run: $make test-tool
- working-directory: build
- if: "matrix.entry.name == '-O3'"
- - run: $make test-all TESTS='-- ruby -ext-'
- working-directory: build
- if: "matrix.entry.name == '-O3'"
- - run: $make test-spec
- working-directory: build
- if: "matrix.entry.name == '-O3'"
-
- - uses: k0kubun/action-slack@v2.0.0
+ run: >
+ ../src/configure -C ${default_configure} ${append_configure}
+ --${{
+ matrix.entry.crosshost && 'host' || 'with-gcc'
+ }}=${{
+ matrix.entry.crosshost || '"${default_cc}${append_cc:+ $append_cc}"'
+ }}
+ --${{ matrix.entry.shared || 'enable' }}-shared
+ - run: make extract-extlibs
+ - run: make incs
+ - run: make showflags
+ - run: make
+ - run: make leaked-globals
+ - run: make test
+ - run: make install
+ if: ${{ matrix.entry.check }}
+ - run: make test-tool
+ if: ${{ matrix.entry.check }}
+ - run: make test-all TESTS='-- ruby -ext-'
+ if: ${{ matrix.entry.check }}
+ - run: make test-spec
+ env:
+ CHECK_LEAKS: true
+ if: ${{ matrix.entry.check }}
+ - run: make test-annocheck
+ if: ${{ matrix.entry.check && endsWith(matrix.entry.name, 'annocheck') }}
+
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
with:
payload: |
{
"ci": "GitHub Actions",
- "env": "${{ matrix.entry.name }}",
+ "env": "${{ github.workflow }} / ${{ matrix.entry.name }}",
"url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"commit": "${{ github.sha }}",
- "branch": "${{ github.ref }}".split('/').reverse()[0]
+ "branch": "${{ github.ref_name }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
- if: failure() && github.event_name == 'push'
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml
index b16e0c2c7d..d8dc58b119 100644
--- a/.github/workflows/macos.yml
+++ b/.github/workflows/macos.yml
@@ -1,60 +1,113 @@
name: macOS
-on: [push, pull_request]
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
jobs:
make:
- runs-on: macos-latest
strategy:
matrix:
- test_task: [ "check", "test-bundler", "test-bundled-gems", "leaked-globals" ]
+ test_task: ["check"] # "test-bundler-parallel", "test-bundled-gems"
+ os:
+ - macos-13
+ - macos-14
+ - macos-15
fail-fast: false
- if: "!contains(github.event.head_commit.message, '[ci skip]')"
+ env:
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
+ runs-on: ${{ matrix.os }}
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
steps:
- - name: Disable Firewall
+ - run: mkdir build
+ working-directory:
+ - name: git config
run: |
- sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
- sudo /usr/libexec/ApplicationFirewall/socketfilterfw --getglobalstate
- - uses: actions/checkout@v2
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
- fetch-depth: 128
path: src
- - run: ./src/tool/actions-commit-info.sh
- id: commit_info
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
- name: Install libraries
run: |
- export WAITS='5 60'
- tool/travis_retry.sh brew upgrade
- tool/travis_retry.sh brew install gdbm gmp libffi openssl@1.1 zlib autoconf automake libtool readline
+ brew install gmp libffi openssl@1.1 zlib autoconf automake libtool readline bison
working-directory: src
- name: Set ENV
run: |
- echo '::set-env name=JOBS::'-j$((1 + $(sysctl -n hw.activecpu)))
- - run: autoconf
+ echo "MAKEFLAGS=-j$((1 + $(sysctl -n hw.activecpu)))" >> $GITHUB_ENV
+ echo "PATH="/usr/local/opt/bison/bin:/opt/homebrew/opt/bison/bin:$PATH"" >> $GITHUB_ENV
+ - run: ./autogen.sh
working-directory: src
- - run: mkdir build
- name: Run configure
run: ../src/configure -C --disable-install-doc --with-openssl-dir=$(brew --prefix openssl@1.1) --with-readline-dir=$(brew --prefix readline)
- working-directory: build
- - run: make $JOBS
- working-directory: build
+ - run: make incs
- run: make prepare-gems
- working-directory: build
- if: matrix.test_task == 'check'
- - run: make $JOBS -s ${{ matrix.test_task }}
- working-directory: build
+ if: ${{ matrix.test_task == 'test-bundled-gems' }}
+ - run: make
+ - run: make leaked-globals
+ if: ${{ matrix.test_task == 'check' }}
+ - name: make ${{ matrix.test_task }}
+ run: |
+ make -s ${{ matrix.test_task }} ${TESTS:+TESTS=`echo "$TESTS" | sed 's| |$$/ -n!/|g;s|^|-n!/|;s|$|$$/|'`}
+ timeout-minutes: 40
env:
RUBY_TESTOPTS: "-q --tty=no"
- # Remove minitest from TEST_BUNDLED_GEMS_ALLOW_FAILURES if https://github.com/seattlerb/minitest/pull/798 is resolved
- TEST_BUNDLED_GEMS_ALLOW_FAILURES: "minitest,xmlrpc,rexml"
- - uses: k0kubun/action-slack@v2.0.0
+ TESTS: ${{ matrix.test_task == 'check' && matrix.skipped_tests || '' }}
+ TEST_BUNDLED_GEMS_ALLOW_FAILURES: ""
+ PRECHECK_BUNDLED_GEMS: "no"
+ - name: make skipped tests
+ run: |
+ make -s test-all TESTS=`echo "$TESTS" | sed 's| |$$/ -n/|g;s|^|-n/|;s|$|$$/|'`
+ env:
+ GNUMAKEFLAGS: ""
+ RUBY_TESTOPTS: "-v --tty=no"
+ TESTS: ${{ matrix.skipped_tests }}
+ PRECHECK_BUNDLED_GEMS: "no"
+ if: ${{ matrix.test_task == 'check' && matrix.skipped_tests != '' }}
+ continue-on-error: ${{ matrix.continue-on-skipped_tests || false }}
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
with:
payload: |
{
"ci": "GitHub Actions",
- "env": "${{ github.workflow }} / ${{ matrix.test_task }}",
+ "env": "${{ matrix.os }} / ${{ matrix.test_task }}${{ matrix.configure }}",
"url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"commit": "${{ github.sha }}",
- "branch": "${{ github.ref }}".split('/').reverse()[0]
+ "branch": "${{ github.ref_name }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
- if: failure() && github.event_name == 'push'
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.github/workflows/mingw.yml b/.github/workflows/mingw.yml
index b522386271..0df917d3d8 100644
--- a/.github/workflows/mingw.yml
+++ b/.github/workflows/mingw.yml
@@ -1,138 +1,179 @@
name: MinGW
-on: [push, pull_request]
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
# Notes:
# Actions console encoding causes issues, see test-all & test-spec steps
#
jobs:
make:
- runs-on: windows-2019
+ runs-on: windows-2022
+ name: ${{ github.workflow }} (${{ matrix.msystem }})
env:
- MSYSTEM: MINGW64
- MSYSTEM_PREFIX: /mingw64
+ MSYSTEM: ${{ matrix.msystem }}
MSYS2_ARCH: x86_64
CHOST: "x86_64-w64-mingw32"
- CFLAGS: "-march=x86-64 -mtune=generic -O3 -pipe -fstack-protector-strong"
+ CFLAGS: "-march=x86-64 -mtune=generic -O3 -pipe"
CXXFLAGS: "-march=x86-64 -mtune=generic -O3 -pipe"
CPPFLAGS: "-D_FORTIFY_SOURCE=2 -D__USE_MINGW_ANSI_STDIO=1 -DFD_SETSIZE=2048"
- LDFLAGS: "-pipe -fstack-protector-strong"
+ LDFLAGS: "-pipe"
UPDATE_UNICODE: "UNICODE_FILES=. UNICODE_PROPERTY_FILES=. UNICODE_AUXILIARY_FILES=. UNICODE_EMOJI_FILES=."
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
strategy:
matrix:
- test_task: [ "check" ] # to make job names consistent
+ include:
+ # To mitigate flakiness of MinGW CI, we test only one runtime that newer MSYS2 uses.
+ - msystem: "UCRT64"
+ base_ruby: head
+ test_task: "check"
+ test-all-opts: "--name=!/TestObjSpace#test_reachable_objects_during_iteration/"
fail-fast: false
- if: "!contains(github.event.head_commit.message, '[ci skip]')"
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
steps:
+ - run: mkdir build
+ working-directory:
- name: git config
run: |
- git config --system core.autocrlf false
- git config --system core.eol lf
- - uses: actions/checkout@v2
+ git config --global core.autocrlf false
+ git config --global core.eol lf
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
- fetch-depth: 128
path: src
- - run: ./src/tool/actions-commit-info.sh
- shell: bash
- id: commit_info
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
- name: Set up Ruby & MSYS2
- uses: MSP-Greg/setup-ruby-pkgs@v1
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
with:
- ruby-version: 2.6
- mingw: _upgrade_ gdbm gmp libffi libyaml openssl ragel readline
- msys2: automake1.16 bison
+ ruby-version: ${{ matrix.base_ruby }}
+ - name: set env
+ run: |
+ echo "GNUMAKEFLAGS=-j$((2 * NUMBER_OF_PROCESSORS))" >> $GITHUB_ENV
+
- name: where check
run: |
# show where
- Write-Host
- $where = 'gcc.exe', 'ragel.exe', 'make.exe', 'bison.exe', 'libcrypto-1_1-x64.dll', 'libssl-1_1-x64.dll'
- foreach ($e in $where) {
- $rslt = where.exe $e 2>&1 | Out-String
- if ($rslt.contains($e)) { Write-Host $rslt }
- else { Write-Host "`nCan't find $e" }
- }
- - name: misc setup, autoreconf
+ mv /c/Windows/System32/libcrypto-1_1-x64.dll /c/Windows/System32/libcrypto-1_1-x64.dll_
+ mv /c/Windows/System32/libssl-1_1-x64.dll /c/Windows/System32/libssl-1_1-x64.dll_
+ result=true
+ for e in gcc.exe ragel.exe make.exe bison.exe libcrypto-1_1-x64.dll libssl-1_1-x64.dll; do
+ echo '##['group']'$'\033[93m'$e$'\033[m'
+ where $e || result=false
+ echo '##['endgroup']'
+ done
+ $result
+
+ - name: version check
+ run: |
+ # show version
+ result=true
+ for e in gcc ragel make bison "openssl version"; do
+ case "$e" in *" "*) ;; *) e="$e --version";; esac
+ echo '##['group']'$'\033[93m'$e$'\033[m'
+ $e || result=false
+ echo '##['endgroup']'
+ done
+ $result
+
+ - name: autogen
run: |
- mkdir build
- mkdir install
- mkdir temp
- cd src
- sh -c "autoreconf -fi"
+ ./autogen.sh
+ working-directory: src
- name: configure
- working-directory: build
+ run: >
+ ../src/configure --disable-install-doc --prefix=/.
+ --build=$CHOST --host=$CHOST --target=$CHOST
+
+ - name: update
run: |
- # Actions uses UTF8, causes test failures, similar to normal OS setup
- $PSDefaultParameterValues['*:Encoding'] = 'utf8'
- [Console]::OutputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- [Console]::InputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- $config_args = "--build=$env:CHOST --host=$env:CHOST --target=$env:CHOST"
- Write-Host $config_args
- sh -c "../src/configure --disable-install-doc --prefix=/install $config_args"
- # Write-Host "-------------------------------------- config.log"
- # Get-Content ./config.log | foreach {Write-Output $_}
-
- - name: download unicode, gems, etc
- working-directory: build
+ make incs
+
+ - name: download gems
run: |
- $jobs = [int]$env:NUMBER_OF_PROCESSORS + 1
- make -j $jobs update-unicode
- make -j $jobs update-gems
+ make update-gems
- name: make all
- timeout-minutes: 40
- working-directory: build
+ timeout-minutes: 30
run: |
- $jobs = [int]$env:NUMBER_OF_PROCESSORS + 1
- make -j $jobs
+ make
+
+ - run: make leaked-globals
- name: make install
- working-directory: build
run: |
- # Actions uses UTF8, causes test failures, similar to normal OS setup
- $PSDefaultParameterValues['*:Encoding'] = 'utf8'
- [Console]::OutputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- [Console]::InputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- make DESTDIR=.. install-nodoc
+ make DESTDIR=../install install-nodoc
- name: test
timeout-minutes: 5
- working-directory: build
run: |
make test
+ if: ${{matrix.test_task == 'check' || matrix.test_task == 'test'}}
- name: test-all
- timeout-minutes: 50
- working-directory: build
+ timeout-minutes: 45
run: |
# Actions uses UTF8, causes test failures, similar to normal OS setup
- $PSDefaultParameterValues['*:Encoding'] = 'utf8'
- [Console]::OutputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- [Console]::InputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- $jobs = [int]$env:NUMBER_OF_PROCESSORS
- make test-all TESTOPTS="-j $jobs --retry --job-status=normal --show-skip --timeout-scale=1.5"
+ chcp.com 437
+ make ${{ StartsWith(matrix.test_task, 'test/') && matrix.test_task || 'test-all' }}
+ env:
+ RUBY_TESTOPTS: >-
+ --retry --job-status=normal --show-skip --timeout-scale=1.5
+ ${{ matrix.test-all-opts }}
+ BUNDLER_VERSION:
+ if: ${{matrix.test_task == 'check' || matrix.test_task == 'test-all' || StartsWith(matrix.test_task, 'test/')}}
- name: test-spec
timeout-minutes: 10
- working-directory: src/spec/ruby
run: |
- $env:Path = "$pwd/../../../install/bin;$env:Path"
- # Actions uses UTF8, causes test failures, similar to normal OS setup
- $PSDefaultParameterValues['*:Encoding'] = 'utf8'
- [Console]::OutputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- [Console]::InputEncoding = [System.Text.Encoding]::GetEncoding("IBM437")
- ruby -v
- ruby ../mspec/bin/mspec -j
+ make ${{ StartsWith(matrix.test_task, 'spec/') && matrix.test_task || 'test-spec' }}
+ if: ${{matrix.test_task == 'check' || matrix.test_task == 'test-spec' || StartsWith(matrix.test_task, 'spec/')}}
- - uses: k0kubun/action-slack@v2.0.0
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
with:
payload: |
{
"ci": "GitHub Actions",
- "env": "${{ github.workflow }} / ${{ matrix.test_task }}",
+ "env": "${{ github.workflow }} ${{ matrix.msystem }} / ${{ matrix.test_task }}",
"url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"commit": "${{ github.sha }}",
- "branch": "${{ github.ref }}".split('/').reverse()[0]
+ "branch": "${{ github.ref_name }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
- if: failure() && github.event_name == 'push'
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
+ shell: sh
diff --git a/.github/workflows/mjit-bindgen.yml b/.github/workflows/mjit-bindgen.yml
new file mode 100644
index 0000000000..26f8a1b2aa
--- /dev/null
+++ b/.github/workflows/mjit-bindgen.yml
@@ -0,0 +1,104 @@
+name: MJIT bindgen
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
+jobs:
+ make:
+ strategy:
+ matrix:
+ include:
+ - task: mjit-bindgen
+ fail-fast: false
+ runs-on: ubuntu-22.04
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ steps:
+ - run: mkdir build
+ working-directory:
+ - name: Set ENV
+ run: |
+ echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
+ - name: Install libraries
+ run: |
+ set -x
+ sudo apt-get update -q || :
+ sudo apt-get install --no-install-recommends -q -y \
+ build-essential \
+ libssl-dev libyaml-dev libreadline6-dev \
+ zlib1g-dev libncurses5-dev libffi-dev \
+ libclang1-14 \
+ bison autoconf
+ sudo apt-get install -q -y pkg-config || :
+ - name: Set up Ruby
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
+ with:
+ ruby-version: '3.1'
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ path: src
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
+ - name: Fixed world writable dirs
+ run: |
+ chmod -v go-w $HOME $HOME/.config
+ sudo chmod -R go-w /usr/share
+ sudo bash -c 'IFS=:; for d in '"$PATH"'; do chmod -v go-w $d; done' || :
+ - run: ./autogen.sh
+ working-directory: src
+ - name: Run configure
+ run: ../src/configure -C --disable-install-doc --prefix=$(pwd)/install --enable-yjit=dev_nodebug
+ - run: make incs
+ - run: make
+ - run: make install
+ - run: make ${{ matrix.task }}
+ - run: git diff --exit-code
+ working-directory: src
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ matrix.os }} / ${{ matrix.test_task }}${{ matrix.configure }}",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.github/workflows/mjit.yml b/.github/workflows/mjit.yml
index c07d7e6d41..6f7181489a 100644
--- a/.github/workflows/mjit.yml
+++ b/.github/workflows/mjit.yml
@@ -1,29 +1,69 @@
name: MJIT
-on: [push, pull_request]
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ - '**.[1-8]'
+ - '**.ronn'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ - '**.[1-8]'
+ - '**.ronn'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
jobs:
make:
strategy:
matrix:
- test_task: [ "check" ] # to make job names consistent
- jit_opts: [ "--jit", "--jit-wait" ]
+ test_task: [check] # to make job names consistent
+ mjit_opts: [--mjit-wait]
fail-fast: false
runs-on: ubuntu-latest
- if: "!contains(github.event.head_commit.message, '[ci skip]')"
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
env:
TESTOPTS: '-q --tty=no'
- RUN_OPTS: '--disable-gems --jit-warnings ${{ matrix.jit_opts }}'
+ RUN_OPTS: '--disable-gems ${{ matrix.mjit_opts }} --mjit-debug=-ggdb3'
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
steps:
+ - run: mkdir build
+ working-directory:
- name: Install libraries
run: |
set -x
sudo apt-get update -q || :
- sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev libgdbm-dev bison autoconf ruby
- - uses: actions/checkout@v2
+ sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev bison autoconf ruby
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
- fetch-depth: 128
path: src
- - run: ./src/tool/actions-commit-info.sh
- id: commit_info
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
- name: Fixed world writable dirs
run: |
chmod -v go-w $HOME $HOME/.config
@@ -31,33 +71,43 @@ jobs:
sudo bash -c 'IFS=:; for d in '"$PATH"'; do chmod -v go-w $d; done' || :
- name: Set ENV
run: |
- echo '::set-env name=JOBS::'-j$((1 + $(nproc --all)))
- - run: autoconf
+ echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
+ - run: ./autogen.sh
working-directory: src
- - run: mkdir build
- name: Run configure
- run: ../src/configure -C --disable-install-doc
- working-directory: build
- - run: make $JOBS
- working-directory: build
- - run: sudo make $JOBS -s install
- working-directory: build
- - run: make $JOBS -s test
- working-directory: build
- - run: make $JOBS -s test-all
- working-directory: build
- - run: make $JOBS -s test-spec
- working-directory: build
- - uses: k0kubun/action-slack@v2.0.0
+ run: ../src/configure -C --disable-install-doc cppflags=-DVM_CHECK_MODE
+ - run: make incs
+ - run: make
+ - run: sudo make -s install
+ - name: Run test
+ run: |
+ unset GNUMAKEFLAGS
+ make -s test RUN_OPTS="$RUN_OPTS"
+ timeout-minutes: 60
+ # - name: Run test-all
+ # run: |
+ # ulimit -c unlimited
+ # make -s test-all RUN_OPTS="$RUN_OPTS"
+ # timeout-minutes: 60
+ - name: Run test-spec
+ run: |
+ unset GNUMAKEFLAGS
+ make -s test-spec RUN_OPTS="$RUN_OPTS"
+ timeout-minutes: 60
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
with:
payload: |
{
"ci": "GitHub Actions",
- "env": "${{ github.workflow }} / ${{ matrix.test_task }} ${{ matrix.jit_opts }}",
+ "env": "${{ github.workflow }} / ${{ matrix.test_task }} ${{ matrix.mjit_opts }}",
"url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"commit": "${{ github.sha }}",
- "branch": "${{ github.ref }}".split('/').reverse()[0]
+ "branch": "${{ github.ref_name }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
- if: failure() && github.event_name == 'push'
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
new file mode 100644
index 0000000000..5d4474d978
--- /dev/null
+++ b/.github/workflows/publish.yml
@@ -0,0 +1,18 @@
+name: Start release workflow
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ notify:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Build release package
+ run: |
+ curl -L -X POST \
+ -H "Authorization: Bearer ${{ secrets.MATZBOT_GITHUB_WORKFLOW_TOKEN }}" \
+ -H "Accept: application/vnd.github+json" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/ruby/actions/dispatches \
+ -d '{"event_type": "${{ github.ref }}"}'
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
new file mode 100644
index 0000000000..c12a95362d
--- /dev/null
+++ b/.github/workflows/scorecards.yml
@@ -0,0 +1,72 @@
+# This workflow uses actions that are not certified by GitHub. They are provided
+# by a third-party and are governed by separate terms of service, privacy
+# policy, and support documentation.
+
+name: Scorecards supply-chain security
+on:
+ # For Branch-Protection check. Only the default branch is supported. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
+ branch_protection_rule:
+ # To guarantee Maintained check is occasionally updated. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
+ schedule:
+ - cron: '22 4 * * 2'
+ push:
+ branches: [ "master" ]
+
+# Declare default permissions as read only.
+permissions: read-all
+
+jobs:
+ analysis:
+ name: Scorecards analysis
+ runs-on: ubuntu-latest
+ permissions:
+ # Needed to upload the results to code-scanning dashboard.
+ security-events: write
+ # Needed to publish results and get a badge (see publish_results below).
+ id-token: write
+ # Uncomment the permissions below if installing in a private repository.
+ # contents: read
+ # actions: read
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@ea651e62978af7915d09fe2e282747c798bf2dab # v2.4.1
+ with:
+ results_file: results.sarif
+ results_format: sarif
+ # (Optional) Read-only PAT token. Uncomment the `repo_token` line below if:
+ # - you want to enable the Branch-Protection check on a *public* repository, or
+ # - you are installing Scorecards on a *private* repository
+ # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
+ repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
+
+ # Public repositories:
+ # - Publish results to OpenSSF REST API for easy access by consumers
+ # - Allows the repository to include the Scorecard badge.
+ # - See https://github.com/ossf/scorecard-action#publishing-results.
+ # For private repositories:
+ # - `publish_results` will always be set to `false`, regardless
+ # of the value entered here.
+ publish_results: true
+
+ # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
+ # format to the repository Actions tab.
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard.
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@959cbb7472c4d4ad70cdfe6f4976053fe48ab394 # v2.1.27
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/spec_guards.yml b/.github/workflows/spec_guards.yml
new file mode 100644
index 0000000000..4521195a2b
--- /dev/null
+++ b/.github/workflows/spec_guards.yml
@@ -0,0 +1,71 @@
+name: Rubyspec Version Guards Check
+
+on:
+ push:
+ paths:
+ - 'spec/**'
+ - '!spec/*.md'
+ pull_request:
+ paths:
+ - 'spec/**'
+ - '!spec/*.md'
+ merge_group:
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
+jobs:
+ rubyspec:
+ name: Rubyspec
+
+ runs-on: ubuntu-22.04
+
+ if: >-
+ ${{!(false
+ || contains(github.event.head_commit.message, '[DOC]')
+ || contains(github.event.head_commit.message, 'Document')
+ || contains(github.event.pull_request.title, '[DOC]')
+ || contains(github.event.pull_request.title, 'Document')
+ || contains(github.event.pull_request.labels.*.name, 'Document')
+ || (github.event_name == 'push' && github.actor == 'dependabot[bot]')
+ )}}
+
+ strategy:
+ matrix:
+ # Specs from ruby/spec should still run on all supported Ruby versions.
+ # This also ensures the needed ruby_version_is guards are there, see spec/README.md.
+ ruby:
+ - ruby-3.1
+ - ruby-3.2
+
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
+ with:
+ ruby-version: ${{ matrix.ruby }}
+ bundler: none
+
+ - run: gem install webrick
+
+ - run: ruby ../mspec/bin/mspec
+ working-directory: spec/ruby
+ env:
+ CHECK_LEAKS: true
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ github.workflow }} / rubyspec @ ${{ matrix.ruby }}",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() }}
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index a98c8e5f01..4fbca1170e 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -1,82 +1,146 @@
name: Ubuntu
-on: [push, pull_request]
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
jobs:
make:
strategy:
matrix:
- test_task: [ "check", "test-bundler", "test-bundled-gems", "test-all TESTS=--repeat-count=2", "leaked-globals" ]
- os: [ubuntu-latest, ubuntu-16.04]
- debug: ["", "-DRUBY_DEBUG"]
- exclude:
- - test_task: test-bundler
- os: ubuntu-16.04
+ # main variables included in the job name
+ test_task: [check]
+ configure: [cppflags=-DRUBY_DEBUG] # default to use more assertions
+ arch: ['']
+ # specify all jobs with `include` to avoid testing duplicated things
+ include:
+ - test_task: check
+ - test_task: check
+ arch: i686
+ configure: '' # test without -DRUBY_DEBUG as well
+ - test_task: check
+ configure: "--enable-shared --enable-load-relative"
+ - test_task: test-all TESTS=--repeat-count=2
+ - test_task: test-bundler-parallel
- test_task: test-bundled-gems
- os: ubuntu-16.04
- - test_task: "test-all TESTS=--repeat-count=2"
- os: ubuntu-16.04
- - test_task: leaked-globals
- os: ubuntu-16.04
- - os: ubuntu-16.04
- debug: -DRUBY_DEBUG
- - test_task: "test-all TESTS=--repeat-count=2"
- debug: -DRUBY_DEBUG
- - test_task: leaked-globals
- debug: -DRUBY_DEBUG
fail-fast: false
- runs-on: ${{ matrix.os }}
- if: "!contains(github.event.head_commit.message, '[ci skip]')"
+ env:
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
+ RUBY_DEBUG: ci
+ SETARCH: ${{ matrix.arch && format('setarch {0}', matrix.arch) }}
+ runs-on: ubuntu-22.04
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
steps:
+ - run: mkdir build
+ working-directory:
+ - name: Set ENV
+ run: |
+ echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
- name: Install libraries
+ env:
+ arch: ${{matrix.arch}}
run: |
set -x
+ arch=${arch:+:${arch/i[3-6]86/i386}}
+ ${arch:+sudo dpkg --add-architecture ${arch#:}}
sudo apt-get update -q || :
- sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev libgdbm-dev bison autoconf ruby
- - uses: actions/checkout@v2
+ sudo apt-get install --no-install-recommends -q -y \
+ ${arch:+cross}build-essential${arch/:/-} \
+ libssl-dev${arch} libyaml-dev${arch} libreadline6-dev${arch} \
+ zlib1g-dev${arch} libncurses5-dev${arch} libffi-dev${arch} \
+ bison autoconf ruby
+ sudo apt-get install -q -y pkg-config${arch} || :
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
- fetch-depth: 128
path: src
- - run: ./src/tool/actions-commit-info.sh
- id: commit_info
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
- name: Fixed world writable dirs
run: |
chmod -v go-w $HOME $HOME/.config
sudo chmod -R go-w /usr/share
sudo bash -c 'IFS=:; for d in '"$PATH"'; do chmod -v go-w $d; done' || :
- - name: Set ENV
- run: |
- echo '::set-env name=JOBS::'-j$((1 + $(nproc --all)))
- - run: autoconf
+ - run: ./autogen.sh
working-directory: src
- - run: mkdir build
- name: Run configure
- run: ../src/configure -C --disable-install-doc cppflags=${{ matrix.debug }}
- working-directory: build
- - run: make $JOBS
- working-directory: build
- - run: make prepare-gems
- working-directory: build
- if: matrix.test_task == 'check'
+ env:
+ arch: ${{matrix.arch}}
+ run: >-
+ $SETARCH ../src/configure -C --disable-install-doc ${{ matrix.configure }}
+ ${arch:+--target=$arch-$OSTYPE --host=$arch-$OSTYPE}
+ - run: $SETARCH make incs
+ - run: $SETARCH make prepare-gems
+ if: ${{ matrix.test_task == 'test-bundled-gems' }}
+ - run: $SETARCH make
+ - run: $SETARCH make leaked-globals
+ if: ${{ matrix.test_task == 'check' }}
- name: Create dummy files in build dir
run: |
- ./miniruby -e '(("a".."z").to_a+("A".."Z").to_a+("0".."9").to_a+%w[foo bar test zzz]).each{|basename|File.write("#{basename}.rb", "raise %(do not load #{basename}.rb)")}'
- working-directory: build
- if: matrix.test_task == 'check'
- - run: make $JOBS -s ${{ matrix.test_task }}
- working-directory: build
+ $SETARCH ./miniruby -e '(("a".."z").to_a+("A".."Z").to_a+("0".."9").to_a+%w[foo bar test zzz]).each{|basename|File.write("#{basename}.rb", "raise %(do not load #{basename}.rb)")}'
+ if: ${{ matrix.test_task == 'check' }}
+ - name: make ${{ matrix.test_task }}
+ run: |
+ $SETARCH make -s ${{ matrix.test_task }} ${TESTS:+TESTS=`echo "$TESTS" | sed 's| |$$/ -n!/|g;s|^|-n!/|;s|$|$$/|'`}
+ timeout-minutes: 40
env:
RUBY_TESTOPTS: "-q --tty=no"
- # Remove minitest from TEST_BUNDLED_GEMS_ALLOW_FAILURES if https://github.com/seattlerb/minitest/pull/798 is resolved
- TEST_BUNDLED_GEMS_ALLOW_FAILURES: "minitest,xmlrpc"
- - uses: k0kubun/action-slack@v2.0.0
+ TESTS: ${{ matrix.test_task == 'check' && matrix.skipped_tests || '' }}
+ TEST_BUNDLED_GEMS_ALLOW_FAILURES: ""
+ PRECHECK_BUNDLED_GEMS: "no"
+ - name: make skipped tests
+ run: |
+ $SETARCH make -s test-all TESTS=`echo "$TESTS" | sed 's| |$$/ -n/|g;s|^|-n/|;s|$|$$/|'`
+ env:
+ GNUMAKEFLAGS: ""
+ RUBY_TESTOPTS: "-v --tty=no"
+ TESTS: ${{ matrix.skipped_tests }}
+ if: ${{ matrix.test_task == 'check' && matrix.skipped_tests != '' }}
+ continue-on-error: ${{ matrix.continue-on-skipped_tests || false }}
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
with:
payload: |
{
"ci": "GitHub Actions",
- "env": "${{ matrix.os }} / ${{ matrix.test_task }}${{ matrix.debug }}",
+ "env": "${{ github.workflow }} / ${{ matrix.test_task }} ${{ matrix.configure }}${{ matrix.arch }}",
"url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"commit": "${{ github.sha }}",
- "branch": "${{ github.ref }}".split('/').reverse()[0]
+ "branch": "${{ github.ref_name }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
- if: failure() && github.event_name == 'push'
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.github/workflows/wasm.yml b/.github/workflows/wasm.yml
new file mode 100644
index 0000000000..27920b5821
--- /dev/null
+++ b/.github/workflows/wasm.yml
@@ -0,0 +1,146 @@
+name: WebAssembly
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions: # added using https://github.com/step-security/secure-workflows
+ contents: read
+
+jobs:
+ make:
+ strategy:
+ matrix:
+ entry:
+# # wasmtime can't compile non-optimized Asyncified binary due to locals explosion
+# - { name: O0-debuginfo, optflags: "-O0", debugflags: "-g", wasmoptflags: "-O1" }
+# - { name: O1, optflags: "-O1", debugflags: "" , wasmoptflags: "-O1" }
+ - { name: O2, optflags: "-O2", debugflags: "" , wasmoptflags: "-O2" }
+# - { name: O3, optflags: "-O3", debugflags: "" , wasmoptflags: "-O3" }
+# # -O4 is equivalent to -O3 in clang, but it's different in wasm-opt
+# - { name: O4, optflags: "-O3", debugflags: "" , wasmoptflags: "-O4" }
+# - { name: Oz, optflags: "-Oz", debugflags: "" , wasmoptflags: "-Oz" }
+ fail-fast: false
+ env:
+ RUBY_TESTOPTS: '-q --tty=no'
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
+ WASI_SDK_VERSION_MAJOR: 14
+ WASI_SDK_VERSION_MINOR: 0
+ BINARYEN_VERSION: 109
+ WASMTIME_VERSION: v0.33.0
+ runs-on: ubuntu-22.04
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ steps:
+ - run: mkdir build
+ working-directory:
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ path: src
+ - name: Install libraries
+ run: |
+ set -ex
+ sudo apt-get update -q || :
+ sudo apt-get install --no-install-recommends -q -y ruby bison make autoconf git wget
+
+ wasi_sdk_deb="wasi-sdk_${WASI_SDK_VERSION_MAJOR}.${WASI_SDK_VERSION_MINOR}_amd64.deb"
+ wget "https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_SDK_VERSION_MAJOR}/${wasi_sdk_deb}"
+ sudo dpkg -i "$wasi_sdk_deb"
+ rm -f "$wasi_sdk_deb"
+
+ mkdir build-sdk
+ pushd build-sdk
+
+ wasmtime_url="https://github.com/bytecodealliance/wasmtime/releases/download/${WASMTIME_VERSION}/wasmtime-${WASMTIME_VERSION}-x86_64-linux.tar.xz"
+ wget -O - "$wasmtime_url" | tar xJf -
+ sudo ln -fs "$PWD/wasmtime-${WASMTIME_VERSION}-x86_64-linux/wasmtime" /usr/local/bin/wasmtime
+
+ binaryen_tarball="binaryen-version_${BINARYEN_VERSION}-x86_64-linux.tar.gz"
+ binaryen_url="https://github.com/WebAssembly/binaryen/releases/download/version_${BINARYEN_VERSION}/${binaryen_tarball}"
+ wget -O - "$binaryen_url" | tar xfz -
+ sudo ln -fs "$PWD/binaryen-version_${BINARYEN_VERSION}/bin/wasm-opt" /usr/local/bin/wasm-opt
+ working-directory: src
+ - name: Set ENV
+ run: |
+ echo "MAKEFLAGS=-j$((1 + $(sysctl -n hw.activecpu)))" >> $GITHUB_ENV
+ echo "WASI_SDK_PATH=/opt/wasi-sdk" >> $GITHUB_ENV
+ - run: ./autogen.sh
+ working-directory: src
+
+ - uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
+ with:
+ ruby-version: '3.0'
+ bundler: none
+
+ - name: Download config.guess with wasi version
+ run: |
+ rm tool/config.guess tool/config.sub
+ ruby tool/downloader.rb -d tool -e gnu config.guess config.sub
+ working-directory: src
+
+ - name: Run configure
+ run: |
+ ../src/configure \
+ --host wasm32-unknown-wasi \
+ --with-static-linked-ext \
+ LDFLAGS=" \
+ -Xlinker --stack-first \
+ -Xlinker -z -Xlinker stack-size=16777216 \
+ " \
+ optflags="${{ matrix.entry.optflags }}" \
+ debugflags="${{ matrix.entry.debugflags }}" \
+ wasmoptflags="${{ matrix.entry.wasmoptflags }} ${{ matrix.entry.debugflags }}"
+
+ # miniruby may not be built when cross-compling
+ - run: make mini ruby
+ - name: Run basictest
+ run: wasmtime run ./../build/miniruby --mapdir /::./ -- basictest/test.rb
+ working-directory: src
+ - name: Run bootstraptest (no thread)
+ run: |
+ NO_THREAD_TESTS="$(grep -L Thread -R ./bootstraptest | awk -F/ '{ print $NF }' | uniq | sed -n 's/test_\(.*\).rb/\1/p' | paste -s -d, -)"
+ ruby ./bootstraptest/runner.rb --ruby="$(which wasmtime) run $PWD/../build/ruby --mapdir /::./ -- " --verbose "--sets=$NO_THREAD_TESTS"
+ working-directory: src
+
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ github.workflow }} / ${{ matrix.name }}",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index 05bea72d1d..c2bd4881c2 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -1,77 +1,149 @@
name: Windows
-on: [push, pull_request]
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
jobs:
make:
strategy:
matrix:
- test_task: [test]
- os: [windows-2019]
- vs: [2019]
+ include:
+ - vs: 2022
+ vcvers: -vcvars_ver=14.2
fail-fast: false
- runs-on: ${{ matrix.os }}
- if: "!contains(github.event.head_commit.message, '[ci skip]')"
+ runs-on: windows-${{ matrix.vs }}
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ name: VisualStudio ${{ matrix.vs }}
+ env:
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
+ PATCH: C:\msys64\usr\bin\patch.exe
+ OS_VER: windows-${{ matrix.vs }}
steps:
- - uses: actions/cache@v1
+ - run: md build
+ working-directory:
+ - uses: msys2/setup-msys2@61f9e5e925871ba6c9e3e8da24ede83ea27fa91f # v2.27.0
+ id: setup-msys2
+ with:
+ update: true
+ install: bison patch
+ - name: patch path
+ shell: msys2 {0}
+ run: echo PATCH=$(cygpath -wa $(command -v patch)) >> $GITHUB_ENV
+ if: ${{ steps.setup-msys2.outcome == 'success' }}
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
- path: C:\vcpkg\downloads
- key: ${{ runner.os }}-vcpkg-download-${{ matrix.os }}-${{ github.sha }}
+ path: C:\vcpkg\installed
+ key: ${{ runner.os }}-vcpkg-installed-windows-${{ matrix.vs }}-${{ github.sha }}
restore-keys: |
- ${{ runner.os }}-vcpkg-download-${{ matrix.os }}-
- ${{ runner.os }}-vcpkg-download-
+ ${{ runner.os }}-vcpkg-installed-windows-${{ matrix.vs }}-
+ ${{ runner.os }}-vcpkg-installed-windows-
- name: Install libraries with vcpkg
run: |
- vcpkg --triplet x64-windows install readline zlib
- - uses: actions/cache@v1
- with:
- path: C:\Users\runneradmin\AppData\Local\Temp\chocolatey
- key: ${{ runner.os }}-chocolatey-${{ matrix.os }}-${{ github.sha }}
- restore-keys: |
- ${{ runner.os }}-chocolatey-${{ matrix.os }}-
- ${{ runner.os }}-chocolatey-
- - name: Install libraries with chocolatey
+ iex "& {$(irm get.scoop.sh)} -RunAsAdmin"
+ Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
+ scoop install cmake@3.31.6
+ vcpkg --triplet x64-windows install libffi libyaml openssl readline zlib
+ shell:
+ pwsh
+ - name: git config
run: |
- choco install --no-progress openssl winflexbison3
- - uses: actions/checkout@v2
+ git config --global core.autocrlf false
+ git config --global core.eol lf
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
- fetch-depth: 128
path: src
- - run: ./src/tool/actions-commit-info.sh
- shell: bash
- id: commit_info
- - run: md build
- shell: cmd
- - name: Configure
- run: |
- call "C:\Program Files (x86)\Microsoft Visual Studio\${{ matrix.vs }}\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
- ../src/win32/configure.bat --disable-install-doc --without-ext=+,dbm,gdbm --enable-bundled-libffi --with-opt-dir=C:/vcpkg/installed/x64-windows --with-openssl-dir="C:/Program Files/OpenSSL-Win64"
- working-directory: build
- shell: cmd
- - name: nmake
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
+ - name: setup env
+ # %TEMP% is inconsistent with %TMP% and test-all expects they are consistent.
+ # https://github.com/actions/virtual-environments/issues/712#issuecomment-613004302
+ # msys2/setup-msys2 installs MSYS2 to D:/a/_temp/msys64/usr/bin
run: |
- call "C:\Program Files (x86)\Microsoft Visual Studio\${{ matrix.vs }}\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
- set YACC=win_bison
- echo on
- nmake up
- nmake extract-gems
- nmake
- working-directory: build
- shell: cmd
- - name: nmake test
+ set Path=D:/a/_temp/msys64/usr/bin;%Path%
+ if not "%VCVARS%" == "" goto :vcset
+ set VCVARS="C:\Program Files (x86)\Microsoft Visual Studio\${{ matrix.vs }}\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
+ if not exist %VCVARS% set VCVARS="C:\Program Files\Microsoft Visual Studio\${{ matrix.vs }}\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
+ :vcset
+ set | C:\msys64\usr\bin\sort > old.env
+ call %VCVARS% ${{ matrix.vcvers || ''}}
+ set TMP=%USERPROFILE%\AppData\Local\Temp
+ set TEMP=%USERPROFILE%\AppData\Local\Temp
+ set /a TEST_JOBS=(15 * %NUMBER_OF_PROCESSORS% / 10) > nul
+ set | C:\msys64\usr\bin\sort > new.env
+ C:\msys64\usr\bin\comm -13 old.env new.env >> %GITHUB_ENV%
+ del *.env
+ - name: compiler version
+ run: cl
+ - name: link libraries
run: |
- call "C:\Program Files (x86)\Microsoft Visual Studio\${{ matrix.vs }}\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
- nmake ${{ matrix.test_task }}
- working-directory: build
- shell: cmd
- - uses: k0kubun/action-slack@v2.0.0
+ for %%I in (C:\vcpkg\installed\x64-windows\bin\*.dll) do (
+ if not %%~nI == readline mklink %%~nxI %%I
+ )
+ for %%I in (libcrypto-1_1-x64 libssl-1_1-x64) do (
+ ren c:\Windows\System32\%%I.dll %%I.dll_
+ )
+ - name: Configure
+ run: >-
+ ../src/win32/configure.bat --disable-install-doc
+ --with-opt-dir=C:/vcpkg/installed/x64-windows
+ - run: nmake incs
+ - run: nmake extract-extlibs
+ - run: nmake
+ env:
+ YACC: bison.exe
+ - run: nmake test
+ timeout-minutes: 5
+ - run: nmake test-spec
+ timeout-minutes: 10
+ - run: nmake test-all
+ env:
+ RUBY_TESTOPTS: -j${{env.TEST_JOBS}} --job-status=normal
+ timeout-minutes: 60
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
with:
payload: |
{
"ci": "GitHub Actions",
- "env": "${{ matrix.os }} / ${{ matrix.test_task }}",
+ "env": "VS${{ matrix.vs }} / ${{ matrix.test_task || 'check' }}",
"url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
"commit": "${{ github.sha }}",
- "branch": "${{ github.ref }}".split('/').reverse()[0]
+ "branch": "${{ github.ref_name }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
- if: failure() && github.event_name == 'push'
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
+ shell: cmd
diff --git a/.github/workflows/yjit-ubuntu.yml b/.github/workflows/yjit-ubuntu.yml
new file mode 100644
index 0000000000..0b7b9046e9
--- /dev/null
+++ b/.github/workflows/yjit-ubuntu.yml
@@ -0,0 +1,170 @@
+name: YJIT Ubuntu
+on:
+ push:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+ merge_group:
+ paths-ignore:
+ - 'doc/**'
+ - '**/man'
+ - '**.md'
+ - '**.rdoc'
+ - '**/.document'
+
+concurrency:
+ group: ${{ github.workflow }} / ${{ startsWith(github.event_name, 'pull') && github.ref_name || github.sha }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull') }}
+
+permissions:
+ contents: read
+
+jobs:
+ cargo:
+ name: Rust cargo test
+ # GitHub Action's image seems to already contain a Rust 1.58.0.
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ # For now we can't run cargo test --offline because it complains about the
+ # capstone dependency, even though the dependency is optional
+ #- run: cargo test --offline
+ - run: RUST_BACKTRACE=1 cargo test
+ working-directory: yjit
+ # Also compile and test with all features enabled
+ - run: RUST_BACKTRACE=1 cargo test --all-features
+ working-directory: yjit
+ # Check that we can build in release mode too
+ - run: cargo build --release
+ working-directory: yjit
+ make:
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - test_task: 'yjit-bindgen'
+ hint: 'To fix: use patch in logs'
+ configure: '--with-gcc=clang-14 --enable-yjit=dev'
+ libclang_path: '/usr/lib/llvm-14/lib/libclang.so.1'
+
+ - test_task: "check"
+ # YJIT should be automatically built in release mode on x86-64 Linux with rustc present
+ #configure: "--enable-yjit RUSTC='rustc +1.58.0'"
+ configure: "RUSTC='rustc +1.58.0'"
+ rust_version: "1.58.0"
+
+ - test_task: "check"
+ configure: "--enable-yjit=dev"
+
+ - test_task: "check"
+ configure: "--enable-yjit=dev"
+ yjit_opts: "--yjit-call-threshold=1 --yjit-verify-ctx"
+
+ - test_task: "test-all TESTS=--repeat-count=2"
+ configure: "--enable-yjit=dev"
+
+ - test_task: "test-bundled-gems"
+ configure: "--enable-yjit=dev"
+
+ - test_task: "yjit-bench"
+ configure: "--enable-yjit=dev"
+ yjit_bench_opts: "--yjit-stats"
+ env:
+ GITPULLOPTIONS: --no-tags origin ${{github.ref}}
+ RUN_OPTS: ${{ matrix.yjit_opts }}
+ YJIT_BENCH_OPTS: ${{ matrix.yjit_bench_opts }}
+ RUBY_DEBUG: ci
+ BUNDLE_JOBS: 8 # for yjit-bench
+ runs-on: ubuntu-22.04
+ if: ${{ !contains(github.event.head_commit.message, '[DOC]') && !contains(github.event.pull_request.labels.*.name, 'Documentation') }}
+ steps:
+ - run: mkdir build
+ working-directory:
+ - name: Install libraries
+ run: |
+ set -x
+ sudo apt-get update -q || :
+ sudo apt-get install --no-install-recommends -q -y build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev bison autoconf ruby
+ - name: Install Rust
+ if: ${{ matrix.rust_version }}
+ run: rustup install ${{ matrix.rust_version }} --profile minimal
+ - name: git config
+ run: |
+ git config --global advice.detachedHead 0
+ git config --global init.defaultBranch garbage
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ path: src
+ - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ path: src/.downloaded-cache
+ key: downloaded-cache
+ - name: Fixed world writable dirs
+ run: |
+ chmod -v go-w $HOME $HOME/.config
+ sudo chmod -R go-w /usr/share
+ sudo bash -c 'IFS=:; for d in '"$PATH"'; do chmod -v go-w $d; done' || :
+ - name: Set ENV
+ run: |
+ echo "GNUMAKEFLAGS=-j$((1 + $(nproc --all)))" >> $GITHUB_ENV
+ - run: ./autogen.sh
+ working-directory: src
+ - name: Run configure
+ run: ../src/configure -C --disable-install-doc --prefix=$(pwd)/install ${{ matrix.configure }}
+ - run: make incs
+ - run: make prepare-gems
+ if: ${{ matrix.test_task == 'test-bundled-gems' }}
+ - run: make -j
+ - run: make leaked-globals
+ if: ${{ matrix.test_task == 'check' }}
+ - name: Create dummy files in build dir
+ run: |
+ ./miniruby -e '(("a".."z").to_a+("A".."Z").to_a+("0".."9").to_a+%w[foo bar test zzz]).each{|basename|File.write("#{basename}.rb", "raise %(do not load #{basename}.rb)")}'
+ if: ${{ matrix.test_task == 'check' }}
+ - name: Enable YJIT through ENV
+ run: echo "RUBY_YJIT_ENABLE=1" >> $GITHUB_ENV
+ # Check that the binary was built with YJIT
+ - name: Check YJIT enabled
+ run: ./miniruby --yjit -v | grep "+YJIT"
+ - name: make ${{ matrix.test_task }}
+ run: make -s -j ${{ matrix.test_task }} RUN_OPTS="$RUN_OPTS" YJIT_BENCH_OPTS="$YJIT_BENCH_OPTS"
+ timeout-minutes: 60
+ env:
+ RUBY_TESTOPTS: "-q --tty=no"
+ TEST_BUNDLED_GEMS_ALLOW_FAILURES: ""
+ PRECHECK_BUNDLED_GEMS: "no"
+ LIBCLANG_PATH: ${{ matrix.libclang_path }}
+ continue-on-error: ${{ matrix.test_task == 'yjit-bench' }}
+ - name: Show ${{ github.event.pull_request.base.ref }} GitHub URL for yjit-bench comparison
+ run: echo "https://github.com/${BASE_REPO}/commit/${BASE_SHA}"
+ env:
+ BASE_REPO: ${{ github.event.pull_request.base.repo.full_name }}
+ BASE_SHA: ${{ github.event.pull_request.base.sha }}
+ if: ${{ matrix.test_task == 'yjit-bench' && startsWith(github.event_name, 'pull') }}
+ - uses: ruby/action-slack@0bd85c72233cdbb6a0fe01d37aaeff1d21b5fce1 # v3.2.1
+ with:
+ payload: |
+ {
+ "ci": "GitHub Actions",
+ "env": "${{ github.workflow }} / ${{ matrix.test_task }} ${{ matrix.configure }}",
+ "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "commit": "${{ github.sha }}",
+ "branch": "${{ github.ref_name }}"
+ }
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SIMPLER_ALERTS_URL }} # ruby-lang slack: ruby/simpler-alerts-bot
+ if: ${{ failure() && github.event_name == 'push' }}
+
+defaults:
+ run:
+ working-directory: build
diff --git a/.gitignore b/.gitignore
index 916c7aaf9e..99d32a1825 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,9 +10,11 @@
*.dylib
*.elc
*.i
+*.ii
*.inc
*.log
*.o
+*.o.tmp
*.obj
*.old
*.orig
@@ -24,10 +26,10 @@
*.sav
*.sl
*.so
+*.so.*
*.swp
*.yarb
*~
-.*-*
.*.list
.*.time
.DS_Store
@@ -38,6 +40,7 @@
.ppack
.svn
.time
+.ruby-version
Makefile
cygruby*.def
extconf.h
@@ -57,6 +60,8 @@ lcov*.info
/*.pc
/*.rc
/*_prelude.c
+/.downloaded-cache
+/.top-enc.mk
/build*/
/COPYING.LIB
/ChangeLog
@@ -124,8 +129,10 @@ lcov*.info
/ruby-runner
/ruby-runner.h
/ruby-man.rd.gz
+/rubyspec_temp
/run.gdb
/sizes.c
+/static-ruby
/test.rb
/test-coverage.dat
/tmp
@@ -140,6 +147,8 @@ lcov*.info
/bin/*.exe
/bin/*.dll
+/bin/goruby
+/bin/ruby
# /benchmark/
/benchmark/bm_require.data
@@ -179,6 +188,9 @@ lcov*.info
/ext/-test-/win32/dln/dlntest.exp
/ext/-test-/win32/dln/dlntest.lib
+# /ext/-test-/gems
+/ext/-test-/gems
+
# /ext/etc/
/ext/etc/constdefs.h
@@ -191,6 +203,7 @@ lcov*.info
# /ext/ripper/
/ext/ripper/eventids1.c
+/ext/ripper/.eventids2-check
/ext/ripper/eventids2table.c
/ext/ripper/ripper.*
/ext/ripper/ids1
@@ -210,6 +223,9 @@ lcov*.info
/lib/ruby/[1-9]*.*
/lib/ruby/vendor_ruby
+# /misc/
+/misc/**/__pycache__
+
# /spec/bundler
/.rspec_status
@@ -221,6 +237,14 @@ lcov*.info
/win32/*.ico
# MJIT
-/rb_mjit_header.h
-/mjit_config.h
/include/ruby-*/*/rb_mjit_min_header-*.h
+/lib/ruby_vm/mjit/instruction.rb
+/mjit_config.h
+/rb_mjit_header.h
+
+# YJIT
+/yjit-bench
+/yjit_exit_locations.dump
+
+# /wasm/
+/wasm/tests/*.wasm
diff --git a/.indent.pro b/.indent.pro
new file mode 100644
index 0000000000..1d61cbcad1
--- /dev/null
+++ b/.indent.pro
@@ -0,0 +1,32 @@
+-bap
+-nbbb
+-nbc
+-br
+-brs
+-nbs
+-ncdb
+-nce
+-cdw
+-cli2
+-cbi2
+-ndj
+-ncs
+-nfc1
+-i4
+-l120
+-lp
+-npcs
+-psl
+-sc
+-sob
+-sbi4
+-nut
+-par
+
+-TID
+-TVALUE
+-Tst_data_t
+-Tst_index_t
+-Tst_table
+-Trb_data_type_t
+-TFILE
diff --git a/.rdoc_options b/.rdoc_options
new file mode 100644
index 0000000000..760507c7a2
--- /dev/null
+++ b/.rdoc_options
@@ -0,0 +1,4 @@
+---
+page_dir: doc
+main_page: README.md
+title: Documentation for Ruby development version
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 20a70a2fc1..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,541 +0,0 @@
-# -*- YAML -*-
-# Copyright (C) 2011 Urabe, Shyouhei. All rights reserved.
-#
-# This file is a part of the programming language Ruby. Permission is hereby
-# granted, to either redistribute or modify this file, provided that the
-# conditions mentioned in the file COPYING are met. Consult the file for
-# details.
-
-# This is a Travis-CI build configuration file. The list of configurations
-# available is located in
-#
-# http://about.travis-ci.org/docs/user/build-configuration/
-#
-# and as Ruby itself is a project written in C language,
-#
-# http://about.travis-ci.org/docs/user/languages/c/
-#
-# is also a good place to look at.
-
-language: c
-
-os: linux
-
-dist: xenial
-
-git:
- quiet: true
-
-cache:
- ccache: true
- directories:
- - $HOME/config_2nd
- - $HOME/.downloaded-cache
-
-env:
- global:
- # Reset timestamps early
- - _=$(touch NEWS && find . -type f -exec touch -r NEWS {} +)
- - CONFIGURE_TTY=no
- - CCACHE_COMPILERCHECK=none
- - CCACHE_NOCOMPRESS=1
- - CCACHE_MAXSIZE=512Mi
- - NPROC="`nproc`"
- # JOBS and SETARCH are overridden when necessary; see below.
- - JOBS=-j$((1+${NPROC}))
- - SETARCH=
- - RUBY_PREFIX=/tmp/ruby-prefix
- - GEMS_FOR_TEST='timezone tzinfo'
- - UPDATE_UNICODE="UNICODE_FILES=. UNICODE_PROPERTY_FILES=. UNICODE_AUXILIARY_FILES=. UNICODE_EMOJI_FILES=."
- - BEFORE_INSTALL=true
- # https://github.com/travis-ci/travis-build/blob/e411371dda21430a60f61b8f3f57943d2fe4d344/lib/travis/build/bash/travis_apt_get_options.bash#L7
- - travis_apt_get_options='--allow-downgrades --allow-remove-essential --allow-change-held-packages'
- - travis_apt_get_options="-yq --no-install-suggests --no-install-recommends $travis_apt_get_options"
-
-.org.ruby-lang.ci.matrix-definitions:
-
- - &cron-only
- if: (type = cron) AND (branch = master) AND (fork = false)
-
- - &make-test-only
- script:
- - $SETARCH make -s test TESTOPTS="${TESTOPTS=$JOBS -q --tty=no}"
-
- - &gcc-8
- compiler: gcc-8
- # # Not using addon to control retries
- # addons:
- # apt:
- # sources:
- # - ubuntu-toolchain-r-test
- before_install:
- - bash -cx "${BEFORE_INSTALL}"
- - tool/travis_retry.sh sudo -E apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- - tool/travis_retry.sh sudo bash -c "rm -rf '${TRAVIS_ROOT}/var/lib/apt/lists/'* && exec apt-get update -yq"
- - >-
- tool/travis_retry.sh sudo -E apt-get $travis_apt_get_options install
- ccache
- gcc-8
- g++-8
- libffi-dev
- libgdbm-dev
- libgmp-dev
- libjemalloc-dev
- libncurses5-dev
- libncursesw5-dev
- libreadline6-dev
- libssl-dev
- libyaml-dev
- openssl
- valgrind
- zlib1g-dev
-
- - &clang-8
- compiler: clang-8
- addons:
- apt:
- # Not doing this manually unlike other sources, because it has been stable.
- sources:
- - llvm-toolchain-xenial-8
- config:
- retries: true
- before_install:
- - tool/travis_retry.sh sudo bash -c "rm -rf '${TRAVIS_ROOT}/var/lib/apt/lists/'* && exec apt-get update -yq"
- - >-
- tool/travis_retry.sh sudo -E apt-get $travis_apt_get_options install
- clang-8
- llvm-8-tools
- libffi-dev
- libgdbm-dev
- libgmp-dev
- libjemalloc-dev
- libncurses5-dev
- libncursesw5-dev
- libreadline6-dev
- libssl-dev
- libyaml-dev
- openssl
- valgrind
- zlib1g-dev
-
- # --------
-
- - &x86_64-linux
- name: x86_64-linux
- <<: *gcc-8
-
- - &arm64-linux
- name: arm64-linux
- arch: arm64
- <<: *gcc-8
-
- - &s390x-linux
- name: s390x-linux
- arch: s390x
- <<: *gcc-8
-
- - &jemalloc
- name: --with-jemalloc
- <<: *gcc-8
- <<: *cron-only
- env:
- - CONFIG_FLAG='--with-gmp --with-jemalloc --with-valgrind'
-
- - &assertions
- name: RUBY_DEBUG=1
- <<: *gcc-8
- #<<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cppflags='-DRUBY_DEBUG -DVM_CHECK_MODE=1 -DTRANSIENT_HEAP_CHECK_MODE -DRGENGC_CHECK_MODE -DENC_DEBUG'
-
- - &VM_CHECK_MODE
- name: VM_CHECK_MODE=3
- <<: *gcc-8
- <<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cppflags=-DVM_CHECK_MODE=0x0003
-
- - &SUPPORT_JOKE
- name: SUPPORT_JOKE
- <<: *gcc-8
- <<: *cron-only
- <<: *make-test-only
- env:
- - BEFORE_INSTALL="sed vm_opts.h -e 's/OPT_SUPPORT_JOKE *0/OPT_SUPPORT_JOKE 1/' -i"
-
- - &CPDEBUG
- name: CPDEBUG
- <<: *gcc-8
- <<: *cron-only
- <<: *make-test-only
- env:
- - cppflags=-DCPDEBUG
-
- - &WITH_COROUTINE_UCONTEXT
- name: COROUTINE=ucontext
- <<: *gcc-8
- <<: *cron-only
- env:
- - CONFIG_FLAG='--with-coroutine=ucontext'
-
- - &WITH_COROUTINE_COPY
- name: COROUTINE=copy
- <<: *gcc-8
- <<: *cron-only
- env:
- - CONFIG_FLAG='--with-coroutine=copy'
-
- - &TOKEN_THREADED_CODE
- name: TOKEN_THREADED_CODE
- <<: *gcc-8
- <<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cppflags=-DOPT_THREADED_CODE=1
-
- - &CALL_THREADED_CODE
- name: CALL_THREADED_CODE
- <<: *gcc-8
- <<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cppflags=-DOPT_THREADED_CODE=2
-
- - &NO_THREADED_CODE
- name: NO_THREADED_CODE
- <<: *gcc-8
- <<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cppflags=-DOPT_THREADED_CODE=3
-
- - &ASAN
- name: -fsanitize=address
- <<: *clang-8
- #<<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - ASAN_OPTIONS=detect_leaks=0
- - cflags='-U_FORTIFY_SOURCE -march=native -fsanitize=address -fno-omit-frame-pointer -fPIC'
- - debugflags=-ggdb3
- - optflags=-O1
- - LD=clang-8
- - LDFLAGS='-fsanitize=address -fPIC'
- - CONFIG_FLAG='--with-out-ext=openssl --without-gmp --without-jemalloc --without-valgrind'
-
- - &MSAN
- name: -fsanitize=memory
- <<: *clang-8
- #<<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cflags='-U_FORTIFY_SOURCE -fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -fPIC'
- - optflags=-O1
- - LD=clang-8
- - LDFLAGS='-fsanitize=memory -fPIC'
- - CONFIG_FLAG='--with-out-ext=openssl --without-gmp --without-jemalloc --without-valgrind'
-
- - &UBSAN
- name: -fsanitize=undefined
- <<: *clang-8
- #<<: *cron-only
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - cflags='-U_FORTIFY_SOURCE -fsanitize=undefined,integer,nullability -fno-sanitize=implicit-integer-sign-change,unsigned-integer-overflow'
- - cppflags=-DUNALIGNED_WORD_ACCESS=0
- - debugflags=-ggdb3
- - optflags='-O1 -march=native'
- - LD=clang-8
- - LDFLAGS='-fsanitize=undefined,integer,nullability -fno-sanitize=implicit-integer-sign-change,unsigned-integer-overflow'
-
- - &i686-linux
- name: i686-linux
- compiler: gcc-8
- env:
- - GCC_FLAGS=-m32
- - CXX='g++-8 -m32'
- - debugflags=-g0
- - SETARCH='setarch i686 --verbose --3gb'
- # # Not using addon to control retries
- # addons:
- # apt:
- # sources:
- # - ubuntu-toolchain-r-test
- before_install:
- - tool/travis_retry.sh sudo -E apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- - tool/travis_retry.sh sudo bash -c "rm -rf '${TRAVIS_ROOT}/var/lib/apt/lists/'* && exec apt-get update -yq"
- - >-
- tool/travis_retry.sh sudo -E apt-get $travis_apt_get_options install
- gcc-8-multilib
- g++-8
- g++-8-multilib
- libstdc++-8-dev:i386
- libffi-dev:i386
- libffi6:i386
- libgdbm-dev:i386
- libgdbm3:i386
- libncurses5-dev:i386
- libncurses5:i386
- libncursesw5-dev:i386
- libreadline6-dev:i386
- libreadline6:i386
- libssl-dev:i386
- libssl1.0.0:i386
- linux-libc-dev:i386
- zlib1g-dev:i386
- zlib1g:i386
-
- - &arm32-linux
- name: arm32-linux
- arch: arm64
- # https://packages.ubuntu.com/xenial/crossbuild-essential-armhf
- compiler: arm-linux-gnueabihf-gcc
- env:
- - debugflags=-g0
- - SETARCH='setarch linux32 --verbose --32bit'
- before_install:
- - sudo dpkg --add-architecture armhf
- - tool/travis_retry.sh sudo bash -c "rm -rf '${TRAVIS_ROOT}/var/lib/apt/lists/'* && exec apt-get update -yq"
- - >-
- tool/travis_retry.sh sudo -E apt-get $travis_apt_get_options install
- ccache
- crossbuild-essential-armhf
- libc6:armhf
- libstdc++-5-dev:armhf
- libffi-dev:armhf
- libffi6:armhf
- libgdbm-dev:armhf
- libgdbm3:armhf
- libncurses5-dev:armhf
- libncurses5:armhf
- libncursesw5-dev:armhf
- libreadline6-dev:armhf
- libreadline6:armhf
- libssl-dev:armhf
- libssl1.0.0:armhf
- linux-libc-dev:armhf
- zlib1g-dev:armhf
- zlib1g:armhf
-
- - &pedanticism
- name: -std=c99 -pedantic
- compiler: clang
- <<: *make-test-only
- env:
- - GEMS_FOR_TEST=
- - GCC_FLAGS='-std=c99 -Werror=pedantic -pedantic-errors'
- - CONFIG_FLAG=
- - JOBS=
- - >-
- warnflags='
- -Wall
- -Wextra
- -Werror=deprecated-declarations
- -Werror=division-by-zero
- -Werror=extra-tokens
- -Werror=implicit-function-declaration
- -Werror=implicit-int
- -Werror=pointer-arith
- -Werror=shorten-64-to-32
- -Werror=write-strings
- -Wmissing-noreturn
- -Wno-constant-logical-operand
- -Wno-missing-field-initializers
- -Wno-overlength-strings
- -Wno-parentheses-equality
- -Wno-self-assign
- -Wno-tautological-compare
- -Wno-unused-local-typedef
- -Wno-unused-parameter
- -Wunused-variable'
- - LDFLAGS=-Wno-unused-command-line-argument
-
- - &spec-on-old-ruby
- language: ruby
- before_install:
- install:
- before_script: chmod -R u+w spec/ruby
- # -j randomly hangs.
- script: ruby -C spec/ruby ../mspec/bin/mspec .
-
- - &rubyspec25
- name: Check ruby/spec version guards on Ruby 2.5
- rvm: 2.5.7
- <<: *spec-on-old-ruby
- after_failure:
- - echo "ruby/spec failed on Ruby 2.5. This is likely because of a missing ruby_version_is guard, please add it. See spec/README.md."
-
- - &rubyspec27
- name: Check ruby/spec version guards on Ruby 2.7
- rvm: 2.7.0
- <<: *spec-on-old-ruby
- after_failure:
- - echo "ruby/spec failed on Ruby 2.7. This is likely because of a missing ruby_version_is guard, please add it. See spec/README.md."
-
- - &baseruby
- name: "BASERUBY: Ruby 2.2"
- <<: *gcc-8
- <<: *make-test-only
- language: ruby
- rvm: 2.2
-
- - &dependency
- name: Check dependencies in makefiles
- language: ruby
- before_install:
- install:
- before_script:
- - |-
- ruby -e 'new = []
- Dir.glob("ext/**/extconf.rb") {|ex|
- unless File.exist?(dep = File.dirname(ex)+"/depend")
- puts "Adding "+dep
- File.copy_stream("template/depend.tmpl", dep)
- new << dep
- end
- }
- exec("git", "add", *new) unless new.empty?'
- - git diff --cached
- - "> config.status"
- - "> .rbconfig.time"
- - sed -f tool/prereq.status template/Makefile.in common.mk > Makefile
- - make touch-unicode-files
- - make -s $JOBS $UPDATE_UNICODE up
- - make -s $JOBS srcs
- - rm -f config.status Makefile rbconfig.rb .rbconfig.time
- - $SETARCH ./configure -C --disable-install-doc --prefix=$RUBY_PREFIX --disable-rubygems 'optflags=-O0' 'debugflags=-save-temps=obj -g'
- - ruby tool/update-deps --fix
- script:
- - git diff --no-ext-diff --ignore-submodules --exit-code
- after_failure:
- - echo "Dependencies need to update"
- env:
- - CONFIG_FLAG=
-
-matrix:
- include:
- # Build every commit:
- - <<: *x86_64-linux
- - <<: *i686-linux
- - <<: *pedanticism
- - <<: *assertions
- - <<: *baseruby
- - <<: *rubyspec25
- - <<: *rubyspec27
- - <<: *dependency
- # Build every commit (Allowed Failures):
- - <<: *arm32-linux
- - <<: *arm64-linux
- - <<: *s390x-linux
- - <<: *ASAN
- - <<: *MSAN
- - <<: *UBSAN
- # Cron only:
- - <<: *jemalloc
- - <<: *VM_CHECK_MODE
- - <<: *SUPPORT_JOKE
- - <<: *CPDEBUG
- - <<: *WITH_COROUTINE_UCONTEXT
- - <<: *WITH_COROUTINE_COPY
- - <<: *TOKEN_THREADED_CODE
- - <<: *CALL_THREADED_CODE
- - <<: *NO_THREADED_CODE
- allow_failures:
- - name: arm32-linux
- - name: arm64-linux
- - name: s390x-linux
- - name: -fsanitize=address
- - name: -fsanitize=memory
- fast_finish: true
-
-before_script:
- - rm -fr .ext autom4te.cache
- - |-
- [ -d ~/.downloaded-cache ] ||
- mkdir ~/.downloaded-cache
- - ln -s ~/.downloaded-cache
- - "> config.status"
- - "> .rbconfig.time"
- - sed -f tool/prereq.status template/Makefile.in common.mk > Makefile
- - make touch-unicode-files
- - make -s $JOBS $UPDATE_UNICODE up
- - make -s $JOBS srcs
- - rm -f config.status Makefile rbconfig.rb .rbconfig.time
- - |-
- if [ -d ~/config_2nd ]; then
- cp -pr ~/config_2nd build
- else
- mkdir build
- fi
- - mkdir config_1st config_2nd
- - chmod -R a-w .
- - chmod -R u+w build config_1st config_2nd
- - cd build
- - |-
- case "$CC" in
- gcc*) CC="ccache $CC${GCC_FLAGS:+ }$GCC_FLAGS -fno-diagnostics-color";;
- clang*) CC="ccache $CC${GCC_FLAGS:+ }$GCC_FLAGS -fno-color-diagnostics";;
- esac
- - |-
- [ ! -f config.cache ] ||
- [ "$CC" = "`sed -n s/^ac_cv_prog_CC=//p config.cache`" ] ||
- (set -x; exec rm config.cache)
- - $SETARCH ../configure -C --disable-install-doc --prefix=$RUBY_PREFIX $CONFIG_FLAG
- - cp -pr config.cache config.status .ext/include ../config_1st
- - $SETARCH make reconfig
- - cp -pr config.cache config.status .ext/include ../config_2nd
- - (cd .. && exec diff -ru config_1st config_2nd)
- - chmod u+w ..
- - rm -rf ~/config_2nd
- - mv ../config_2nd ~
- - chmod u-w ..
- - $SETARCH make -s $JOBS
- - make -s install
- - |-
- [ -z "${GEMS_FOR_TEST}" ] ||
- $RUBY_PREFIX/bin/gem install --no-document $GEMS_FOR_TEST
- - echo "raise 'do not load ~/.irbrc in test'" > ~/.irbrc
-
-script:
- - $SETARCH make -s test -o showflags TESTOPTS="${TESTOPTS=$JOBS -q --tty=no}"
- - travis_wait 50 $SETARCH make -s test-all -o exts TESTOPTS="${TESTOPTS} ${TEST_ALL_OPTS}" RUBYOPT="-w"
- - $SETARCH make -s test-spec MSPECOPT=-ff # not using `-j` because sometimes `mspec -j` silently dies
- - $SETARCH make -s -o showflags leaked-globals
-
-# Branch matrix. Not all branches are Travis-ready so we limit branches here.
-branches:
- only:
- - master
- - ruby_2_4
- - ruby_2_5
- - ruby_2_6
- - ruby_2_7
-
-# We want to be notified when something happens.
-notifications:
- irc:
- channels:
- - "chat.freenode.net#ruby-core"
- on_success: change # [always|never|change] # default: always
- on_failure: always # [always|never|change] # default: always
- template:
- - "%{message} by @%{author}: See %{build_url}"
-
- webhooks:
- urls:
- - secure: mRsoS/UbqDkKkW5p3AEqM27d4SZnV6Gsylo3bm8T/deltQzTsGzZwrm7OIBXZv0UFZdE68XmPlyHfZFLSP2V9QZ7apXMf9/vw0GtcSe1gchtnjpAPF6lYBn7nMCbVPPx9cS0dwL927fjdRM1vj7IKZ2bk4F0lAJ25R25S6teqdk= # ruby-lang slack: ruby/simpler-alerts-bot (travis)
- on_success: never
- on_failure: always
-
- email:
- - ko1c-failure@atdot.net
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ffdf2dd4b8..13df6087ca 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,4 +1 @@
-Please see the [official issue tracker] and wiki [HowToContribute].
-
-[official issue tracker]: https://bugs.ruby-lang.org
-[HowToContribute]: https://bugs.ruby-lang.org/projects/ruby/wiki/HowToContribute
+See ["Contributing to Ruby"](https://docs.ruby-lang.org/en/master/contributing_md.html), which includes setup and build instructions.
diff --git a/LEGAL b/LEGAL
index 2f9ad10ca8..0423d57ac9 100644
--- a/LEGAL
+++ b/LEGAL
@@ -60,6 +60,11 @@ mentioned below.
This file is licensed under the {MIT License}[rdoc-label:label-MIT+License].
+[coroutine]
+
+ Unless otherwise specified, these files are licensed under the
+ {MIT License}[rdoc-label:label-MIT+License].
+
[include/ruby/onigmo.h]
[include/ruby/oniguruma.h]
[regcomp.c]
@@ -309,17 +314,6 @@ mentioned below.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
-[aclocal.m4]
-
- This file is free software.
-
- >>>
- Copyright (C) 1996-2020:: Free Software Foundation, Inc.
-
- This file is free software; the Free Software Foundation
- gives unlimited permission to copy and/or distribute it,
- with or without modifications, as long as this notice is preserved.
-
[tool/config.guess]
[tool/config.sub]
@@ -349,6 +343,34 @@ mentioned below.
program. This Exception is an additional permission under section 7
of the GNU General Public License, version 3 ("GPLv3").
+[tool/lib/test/*]
+[tool/lib/core_assertions.rb]
+
+ Some of methods on these files are based on MiniTest 4. MiniTest 4 is
+ distributed under the MIT License.
+
+ >>>
+ Copyright (c) Ryan Davis, seattle.rb
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ 'Software'), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
[parse.c]
[parse.h]
@@ -446,7 +468,8 @@ mentioned below.
>>>
A C-program for MT19937, with initialization improved 2002/2/10.::
- Coded by Takuji Nishimura and Makoto Matsumoto.
+ Coded by Takuji Nishimura and Makoto Matsumoto.
+
This is a faster version by taking Shawn Cokus's optimization,
Matthe Bellew's simplification, Isaku Wada's real version.
@@ -537,12 +560,8 @@ mentioned below.
[include/ruby/st.h]
[missing/acosh.c]
[missing/alloca.c]
-[missing/dup2.c]
[missing/erf.c]
-[missing/finite.c]
[missing/hypot.c]
-[missing/isinf.c]
-[missing/isnan.c]
[missing/lgamma_r.c]
[missing/memcmp.c]
[missing/memmove.c]
@@ -553,8 +572,6 @@ mentioned below.
[ext/date/date_strftime.c]
[ext/digest/sha1/sha1.c]
[ext/digest/sha1/sha1.h]
-[ext/sdbm/_sdbm.c]
-[ext/sdbm/sdbm.h]
These files are all under public domain.
@@ -942,9 +959,7 @@ mentioned below.
[lib/bundler]
[lib/bundler.rb]
-[lib/bundler.gemspec]
[spec/bundler]
-[man/bundle-*,gemfile.*]
Bundler is under the following license.
@@ -954,6 +969,73 @@ mentioned below.
{MIT License}[rdoc-label:label-MIT+License]
+[lib/bundler/vendor/thor]
+
+ Thor is under the following license.
+
+ >>>
+ Copyright (c) 2008 Yehuda Katz, Eric Hodel, et al.
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
+[lib/rubygems/resolver/molinillo]
+
+ molinillo is under the following license.
+
+ >>>
+ Copyright (c) 2014 Samuel E. Giddins segiddins@segiddins.me
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
+[lib/bundler/vendor/pub_grub]
+
+ pub_grub is under the following license.
+
+ >>>
+ Copyright (c) 2018 John Hawthorn
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
+[lib/bundler/vendor/connection_pool]
+
+ connection_pool is under the following license.
+
+ >>>
+ Copyright (c) 2011 Mike Perham
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
+[lib/bundler/vendor/net-http-persistent]
+
+ net-http-persistent is under the following license.
+
+ >>>
+ Copyright (c) Eric Hodel, Aaron Patterson
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
+[lib/did_you_mean]
+[lib/did_you_mean.rb]
+[test/did_you_mean]
+
+ did_you_mean is under the following license.
+
+ >>>
+ Copyright (c) 2014-2016 Yuki Nishijima
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
+[lib/error_highlight]
+[lib/error_highlight.rb]
+[test/error_highlight]
+
+ error_highlight is under the following license.
+
+ >>>
+ Copyright (c) 2021 Yusuke Endoh
+
+ {MIT License}[rdoc-label:label-MIT+License]
+
[benchmark/so_ackermann.rb]
[benchmark/so_array.rb]
[benchmark/so_binary_trees.rb]
diff --git a/NEWS.md b/NEWS.md
index 6856a29517..f6c3c6fc97 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,217 +1,820 @@
-# NEWS for Ruby 2.8.0 (tentative; to be 3.0.0)
+# NEWS for Ruby 3.2.0
-This document is a list of user visible feature changes
-since the **2.7.0** release, except for bug fixes.
+This document is a list of user-visible feature changes
+since the **3.1.0** release, except for bug fixes.
-Note that each entry is kept so brief that no reason behind or reference
-information is supplied with. For a full list of changes with all
-sufficient information, see the ChangeLog file or Redmine
-(e.g. `https://bugs.ruby-lang.org/issues/$FEATURE_OR_BUG_NUMBER`).
+Note that each entry is kept to a minimum, see links for details.
## Language changes
-* Keyword arguments are now separated from positional arguments.
- Code that resulted in deprecation warnings in Ruby 2.7 will now
- result in ArgumentError or different behavior. [[Feature #14183]]
-
-* Procs accepting a single rest argument and keywords are no longer
- subject to autosplatting. This now matches the behavior of Procs
- accepting a single rest argument and no keywords.
- [[Feature #16166]]
+* Anonymous rest and keyword rest arguments can now be passed as
+ arguments, instead of just used in method parameters.
+ [[Feature #18351]]
```ruby
- pr = proc{|*a, **kw| [a, kw]}
+ def foo(*)
+ bar(*)
+ end
+ def baz(**)
+ quux(**)
+ end
+ ```
- pr.call([1])
- # 2.7 => [[1], {}]
- # 3.0 => [[[1]], {}]
+* A proc that accepts a single positional argument and keywords will
+ no longer autosplat. [[Bug #18633]]
- pr.call([1, {a: 1}])
- # 2.7 => [[1], {:a=>1}] # and deprecation warning
- # 3.0 => [[[1, {:a=>1}]], {}]
+ ```ruby
+ proc{|a, **k| a}.call([1, 2])
+ # Ruby 3.1 and before
+ # => 1
+ # Ruby 3.2 and after
+ # => [1, 2]
```
-* $SAFE is now a normal global variable with no special behavior.
- [[Feature #16131]]
+* Constant assignment evaluation order for constants set on explicit
+ objects has been made consistent with single attribute assignment
+ evaluation order. With this code:
-* yield in singleton class definitions in methods is now a SyntaxError
- instead of a warning. yield in a class definition outside of a method
- is now a SyntaxError instead of a LocalJumpError. [[Feature #15575]]
+ ```ruby
+ foo::BAR = baz
+ ```
-* Rightward assignment statement is added. [EXPERIMENTAL]
- [[Feature #15921]]
+ `foo` is now called before `baz`. Similarly, for multiple assignments
+ to constants, left-to-right evaluation order is used. With this
+ code:
```ruby
- fib(10) => x
+ foo1::BAR1, foo2::BAR2 = baz1, baz2
```
-* Endless method definition is added. [EXPERIMENTAL]
- [[Feature #16746]]
+ The following evaluation order is now used:
+
+ 1. `foo1`
+ 2. `foo2`
+ 3. `baz1`
+ 4. `baz2`
+
+ [[Bug #15928]]
+
+* "Find pattern" is no longer experimental.
+ [[Feature #18585]]
+
+* Methods taking a rest parameter (like `*args`) and wishing to delegate keyword
+ arguments through `foo(*args)` must now be marked with `ruby2_keywords`
+ (if not already the case). In other words, all methods wishing to delegate
+ keyword arguments through `*args` must now be marked with `ruby2_keywords`,
+ with no exception. This will make it easier to transition to other ways of
+ delegation once a library can require Ruby 3+. Previously, the `ruby2_keywords`
+ flag was kept if the receiving method took `*args`, but this was a bug and an
+ inconsistency. A good technique to find the potentially-missing `ruby2_keywords`
+ is to run the test suite, for where it fails find the last method which must
+ receive keyword arguments, use `puts nil, caller, nil` there, and check each
+ method/block on the call chain which must delegate keywords is correctly marked
+ as `ruby2_keywords`. [[Bug #18625]] [[Bug #16466]]
```ruby
- def square(x) = x * x
- ```
+ def target(**kw)
+ end
-## Command line options
+ # Accidentally worked without ruby2_keywords in Ruby 2.7-3.1, ruby2_keywords
+ # needed in 3.2+. Just like (*args, **kwargs) or (...) would be needed on
+ # both #foo and #bar when migrating away from ruby2_keywords.
+ ruby2_keywords def bar(*args)
+ target(*args)
+ end
-### `--help` option
+ ruby2_keywords def foo(*args)
+ bar(*args)
+ end
-When the environment variable `RUBY_PAGER` or `PAGER` is present and has
-non-empty value, and the standard input and output are tty, `--help`
-option shows the help message via the pager designated by the value.
-[[Feature #16754]]
+ foo(k: 1)
+ ```
## Core classes updates
-Outstanding ones only.
+Note: We're only listing outstanding class updates.
+
+* Fiber
+
+ * Introduce Fiber.[] and Fiber.[]= for inheritable fiber storage.
+ Introduce Fiber#storage and Fiber#storage= (experimental) for
+ getting and resetting the current storage. Introduce
+ `Fiber.new(storage:)` for setting the storage when creating a
+ fiber. [[Feature #19078]]
+
+ Existing Thread and Fiber local variables can be tricky to use.
+ Thread-local variables are shared between all fibers, making it
+ hard to isolate, while Fiber-local variables can be hard to
+ share. It is often desirable to define unit of execution
+ ("execution context") such that some state is shared between all
+ fibers and threads created in that context. This is what Fiber
+ storage provides.
+
+ ```ruby
+ def log(message)
+ puts "#{Fiber[:request_id]}: #{message}"
+ end
+
+ def handle_requests
+ while request = read_request
+ Fiber.schedule do
+ Fiber[:request_id] = SecureRandom.uuid
+
+ request.messages.each do |message|
+ Fiber.schedule do
+ log("Handling #{message}") # Log includes inherited request_id.
+ end
+ end
+ end
+ end
+ end
+ ```
+
+ You should generally consider Fiber storage for any state which
+ you want to be shared implicitly between all fibers and threads
+ created in a given context, e.g. a connection pool, a request
+ id, a logger level, environment variables, configuration, etc.
+
+* Fiber::Scheduler
+
+ * Introduce `Fiber::Scheduler#io_select` for non-blocking IO.select.
+ [[Feature #19060]]
+
+* IO
+
+ * Introduce IO#timeout= and IO#timeout which can cause
+ IO::TimeoutError to be raised if a blocking operation exceeds the
+ specified timeout. [[Feature #18630]]
+
+ ```ruby
+ STDIN.timeout = 1
+ STDIN.read # => Blocking operation timed out! (IO::TimeoutError)
+ ```
+
+ * Introduce `IO.new(..., path:)` and promote `File#path` to `IO#path`.
+ [[Feature #19036]]
+
+* Class
+
+ * Class#attached_object, which returns the object for which
+ the receiver is the singleton class. Raises TypeError if the
+ receiver is not a singleton class.
+ [[Feature #12084]]
-* Dir
+ ```ruby
+ class Foo; end
- * Modified method
+ Foo.singleton_class.attached_object #=> Foo
+ Foo.new.singleton_class.attached_object #=> #<Foo:0x000000010491a370>
+ Foo.attached_object #=> TypeError: `Foo' is not a singleton class
+ nil.singleton_class.attached_object #=> TypeError: `NilClass' is not a singleton class
+ ```
- * Dir.glob and Dir.[] now sort the results by default, and
- accept `sort:` keyword option. [[Feature #8709]]
+* Data
+
+ * New core class to represent simple immutable value object. The class is
+ similar to Struct and partially shares an implementation, but has more
+ lean and strict API. [[Feature #16122]]
+
+ ```ruby
+ Measure = Data.define(:amount, :unit)
+ distance = Measure.new(100, 'km') #=> #<data Measure amount=100, unit="km">
+ weight = Measure.new(amount: 50, unit: 'kg') #=> #<data Measure amount=50, unit="kg">
+ weight.with(amount: 40) #=> #<data Measure amount=40, unit="kg">
+ weight.amount #=> 50
+ weight.amount = 40 #=> NoMethodError: undefined method `amount='
+ ```
+
+* Encoding
+
+ * Encoding#replicate has been deprecated and will be removed in 3.3. [[Feature #18949]]
+ * The dummy `Encoding::UTF_16` and `Encoding::UTF_32` encodings no longer
+ try to dynamically guess the endian based on a byte order mark.
+ Use `Encoding::UTF_16BE`/`UTF_16LE` and `Encoding::UTF_32BE`/`UTF_32LE` instead.
+ This change speeds up getting the encoding of a String. [[Feature #18949]]
+ * Limit maximum encoding set size by 256.
+ If exceeding maximum size, `EncodingError` will be raised. [[Feature #18949]]
+
+* Enumerator
+
+ * Enumerator.product has been added. Enumerator::Product is the implementation. [[Feature #18685]]
+
+* Exception
+
+ * Exception#detailed_message has been added.
+ The default error printer calls this method on the Exception object
+ instead of #message. [[Feature #18564]]
* Hash
- * Modified method
+ * Hash#shift now always returns nil if the hash is
+ empty, instead of returning the default value or
+ calling the default proc. [[Bug #16908]]
+
+* Integer
- * Hash#transform_keys now accepts a hash that maps keys to new
- keys. [[Feature #16274]]
+ * Integer#ceildiv has been added. [[Feature #18809]]
* Kernel
- * Modified method
+ * Kernel#binding raises RuntimeError if called from a non-Ruby frame
+ (such as a method defined in C). [[Bug #18487]]
- * Kernel#clone when called with `freeze: false` keyword will call
- #initialize_clone with the `freeze: false` keyword.
- [[Bug #14266]]
+* MatchData
- * Kernel#eval when called with two arguments will use "(eval)"
- for `__FILE__` and 1 for `__LINE__` in the evaluated code.
- [[Bug #4352]]
+ * MatchData#byteoffset has been added. [[Feature #13110]]
+ * MatchData#deconstruct has been added. [[Feature #18821]]
+ * MatchData#deconstruct_keys has been added. [[Feature #18821]]
* Module
- * Modified method
+ * Module.used_refinements has been added. [[Feature #14332]]
+ * Module#refinements has been added. [[Feature #12737]]
+ * Module#const_added has been added. [[Feature #17881]]
+ * Module#undefined_instance_methods has been added. [[Feature #12655]]
+
+* Proc
+
+ * Proc#dup returns an instance of subclass. [[Bug #17545]]
+ * Proc#parameters now accepts lambda keyword. [[Feature #15357]]
+
+* Process
+ * Added `RLIMIT_NPTS` constant to FreeBSD platform
+
+* Regexp
+
+ * The cache-based optimization is introduced.
+ Many (but not all) Regexp matching is now in linear time, which
+ will prevent regular expression denial of service (ReDoS)
+ vulnerability. [[Feature #19104]]
+
+ * Regexp.linear_time? is introduced. [[Feature #19194]]
+
+ * Regexp.new now supports passing the regexp flags not only as an Integer,
+ but also as a String. Unknown flags raise ArgumentError.
+ Otherwise, anything other than `true`, `false`, `nil` or Integer will be warned.
+ [[Feature #18788]]
+
+ * Regexp.timeout= has been added. Also, Regexp.new new supports timeout keyword.
+ See [[Feature #17837]]
+
+* Refinement
+
+ * Refinement#refined_class has been added. [[Feature #12737]]
+
+* RubyVM::AbstractSyntaxTree
+
+ * Add `error_tolerant` option for `parse`, `parse_file` and `of`. [[Feature #19013]]
+ With this option
+
+ 1. SyntaxError is suppressed
+ 2. AST is returned for invalid input
+ 3. `end` is complemented when a parser reaches to the end of input but `end` is insufficient
+ 4. `end` is treated as keyword based on indent
+
+ ```ruby
+ # Without error_tolerant option
+ root = RubyVM::AbstractSyntaxTree.parse(<<~RUBY)
+ def m
+ a = 10
+ if
+ end
+ RUBY
+ # => <internal:ast>:33:in `parse': syntax error, unexpected `end' (SyntaxError)
+
+ # With error_tolerant option
+ root = RubyVM::AbstractSyntaxTree.parse(<<~RUBY, error_tolerant: true)
+ def m
+ a = 10
+ if
+ end
+ RUBY
+ p root # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-4:3>
+
+ # `end` is treated as keyword based on indent
+ root = RubyVM::AbstractSyntaxTree.parse(<<~RUBY, error_tolerant: true)
+ module Z
+ class Foo
+ foo.
+ end
+
+ def bar
+ end
+ end
+ RUBY
+ p root.children[-1].children[-1].children[-1].children[-2..-1]
+ # => [#<RubyVM::AbstractSyntaxTree::Node:CLASS@2:2-4:5>, #<RubyVM::AbstractSyntaxTree::Node:DEFN@6:2-7:5>]
+ ```
+
+ * Add `keep_tokens` option for `parse`, `parse_file` and `of`. Add `#tokens` and `#all_tokens`
+ for RubyVM::AbstractSyntaxTree::Node [[Feature #19070]]
+
+ ```ruby
+ root = RubyVM::AbstractSyntaxTree.parse("x = 1 + 2", keep_tokens: true)
+ root.tokens # => [[0, :tIDENTIFIER, "x", [1, 0, 1, 1]], [1, :tSP, " ", [1, 1, 1, 2]], ...]
+ root.tokens.map{_1[2]}.join # => "x = 1 + 2"
+ ```
- * Module#include now includes the arguments in modules and
- classes that have already included or prepended the receiver,
- mirroring the behavior if the arguments were included in the
- receiver before the other modules and classes included or
- prepended the receiver. [[Feature #9573]]
+* Set
-* Symbol
+ * Set is now available as a built-in class without the need for `require "set"`. [[Feature #16989]]
+ It is currently autoloaded via the Set constant or a call to Enumerable#to_set.
- * Modified method
+* String
- * Symbol#to_proc now returns a lambda Proc.
- [[Feature #16260]]
+ * String#byteindex and String#byterindex have been added. [[Feature #13110]]
+ * Update Unicode to Version 15.0.0 and Emoji Version 15.0. [[Feature #18639]]
+ (also applies to Regexp)
+ * String#bytesplice has been added. [[Feature #18598]]
+ * String#dedup has been added as an alias to String#-@. [[Feature #18595]]
+
+* Struct
+
+ * A Struct class can also be initialized with keyword arguments
+ without `keyword_init: true` on Struct.new [[Feature #16806]]
+
+ ```ruby
+ Post = Struct.new(:id, :name)
+ Post.new(1, "hello") #=> #<struct Post id=1, name="hello">
+ # From Ruby 3.2, the following code also works without keyword_init: true.
+ Post.new(id: 1, name: "hello") #=> #<struct Post id=1, name="hello">
+ ```
+
+* Thread
+
+ * Thread.each_caller_location is added. [[Feature #16663]]
+
+* Thread::Queue
+
+ * Thread::Queue#pop(timeout: sec) is added. [[Feature #18774]]
+
+* Thread::SizedQueue
+
+ * Thread::SizedQueue#pop(timeout: sec) is added. [[Feature #18774]]
+ * Thread::SizedQueue#push(timeout: sec) is added. [[Feature #18944]]
+
+* Time
+
+ * Time#deconstruct_keys is added, allowing to use Time instances
+ in pattern-matching expressions [[Feature #19071]]
+
+ * Time.new now can parse a string like generated by Time#inspect
+ and return a Time instance based on the given argument.
+ [[Feature #18033]]
+
+* SyntaxError
+ * SyntaxError#path has been added. [[Feature #19138]]
+
+* TracePoint
+
+ * TracePoint#binding now returns `nil` for `c_call`/`c_return` TracePoints.
+ [[Bug #18487]]
+ * TracePoint#enable `target_thread` keyword argument now defaults to the
+ current thread if a block is given and `target` and `target_line` keyword
+ arguments are not passed. [[Bug #16889]]
+
+* UnboundMethod
+
+ * `UnboundMethod#==` returns `true` if the actual method is same. For example,
+ `String.instance_method(:object_id) == Array.instance_method(:object_id)`
+ returns `true`. [[Feature #18798]]
+
+ * `UnboundMethod#inspect` does not show the receiver of `instance_method`.
+ For example `String.instance_method(:object_id).inspect` returns
+ `"#<UnboundMethod: Kernel#object_id()>"`
+ (was `"#<UnboundMethod: String(Kernel)#object_id()>"`).
+
+* GC
+
+ * Expose `need_major_gc` via `GC.latest_gc_info`. [GH-6791]
+
+* ObjectSpace
+
+ * `ObjectSpace.dump_all` dump shapes as well. [GH-6868]
## Stdlib updates
-Outstanding ones only.
+* Bundler
+
+ * Bundler now uses [PubGrub] resolver instead of [Molinillo] for performance improvement.
+ * Add --ext=rust support to bundle gem for creating simple gems with Rust extensions.
+ [[GH-rubygems-6149]]
+ * Make cloning git repos faster [[GH-rubygems-4475]]
* RubyGems
- * Update to RubyGems 3.2.0.pre1
+ * Add mswin support for cargo builder. [[GH-rubygems-6167]]
-* Bundler
+* CGI
- * Update to Bundler 2.2.0.dev
+ * `CGI.escapeURIComponent` and `CGI.unescapeURIComponent` are added.
+ [[Feature #18822]]
+
+* Coverage
+
+ * `Coverage.setup` now accepts `eval: true`. By this, `eval` and related methods are
+ able to generate code coverage. [[Feature #19008]]
+
+ * `Coverage.supported?(mode)` enables detection of what coverage modes are
+ supported. [[Feature #19026]]
+
+* Date
+
+ * Added `Date#deconstruct_keys` and `DateTime#deconstruct_keys` same as [[Feature #19071]]
+
+* ERB
+
+ * `ERB::Util.html_escape` is made faster than `CGI.escapeHTML`.
+ * It no longer allocates a String object when no character needs to be escaped.
+ * It skips calling `#to_s` method when an argument is already a String.
+ * `ERB::Escape.html_escape` is added as an alias to `ERB::Util.html_escape`,
+ which has not been monkey-patched by Rails.
+ * `ERB::Util.url_encode` is made faster using `CGI.escapeURIComponent`.
+ * `-S` option is removed from `erb` command.
+
+* FileUtils
+
+ * Add FileUtils.ln_sr method and `relative:` option to FileUtils.ln_s.
+ [[Feature #18925]]
+
+* IRB
+
+ * debug.gem integration commands have been added: `debug`, `break`, `catch`,
+ `next`, `delete`, `step`, `continue`, `finish`, `backtrace`, `info`
+ * They work even if you don't have `gem "debug"` in your Gemfile.
+ * See also: [What's new in Ruby 3.2's IRB?](https://st0012.dev/whats-new-in-ruby-3-2-irb)
+ * More Pry-like commands and features have been added.
+ * `edit` and `show_cmds` (like Pry's `help`) are added.
+ * `ls` takes `-g` or `-G` option to filter out outputs.
+ * `show_source` is aliased from `$` and accepts unquoted inputs.
+ * `whereami` is aliased from `@`.
+
+* Net::Protocol
+
+ * Improve `Net::BufferedIO` performance. [[GH-net-protocol-14]]
+
+* Pathname
+
+ * Added `Pathname#lutime`. [[GH-pathname-20]]
+
+* Socket
+
+ * Added the following constants for supported platforms.
+ * `SO_INCOMING_CPU`
+ * `SO_INCOMING_NAPI_ID`
+ * `SO_RTABLE`
+ * `SO_SETFIB`
+ * `SO_USER_COOKIE`
+ * `TCP_KEEPALIVE`
+ * `TCP_CONNECTION_INFO`
+
+* SyntaxSuggest
+
+ * The feature of `syntax_suggest` formerly `dead_end` is integrated in Ruby.
+ [[Feature #18159]]
+
+* UNIXSocket
+
+ * Add support for UNIXSocket on Windows. Emulate anonymous sockets. Add
+ support for File.socket? and File::Stat#socket? where possible.
+ [[Feature #19135]]
+
+* The following default gems are updated.
+
+ * RubyGems 3.4.1
+ * abbrev 0.1.1
+ * benchmark 0.2.1
+ * bigdecimal 3.1.3
+ * bundler 2.4.1
+ * cgi 0.3.6
+ * csv 3.2.6
+ * date 3.3.3
+ * delegate 0.3.0
+ * did_you_mean 1.6.3
+ * digest 3.1.1
+ * drb 2.1.1
+ * english 0.7.2
+ * erb 4.0.2
+ * error_highlight 0.5.1
+ * etc 1.4.2
+ * fcntl 1.0.2
+ * fiddle 1.1.1
+ * fileutils 1.7.0
+ * forwardable 1.3.3
+ * getoptlong 0.2.0
+ * io-console 0.6.0
+ * io-nonblock 0.2.0
+ * io-wait 0.3.0
+ * ipaddr 1.2.5
+ * irb 1.6.2
+ * json 2.6.3
+ * logger 1.5.3
+ * mutex_m 0.1.2
+ * net-http 0.4.0
+ * net-protocol 0.2.1
+ * nkf 0.1.2
+ * open-uri 0.3.0
+ * open3 0.1.2
+ * openssl 3.1.0
+ * optparse 0.3.1
+ * ostruct 0.5.5
+ * pathname 0.2.1
+ * pp 0.4.0
+ * pstore 0.1.2
+ * psych 5.0.1
+ * racc 1.6.2
+ * rdoc 6.5.0
+ * readline-ext 0.1.5
+ * reline 0.3.2
+ * resolv 0.2.2
+ * resolv-replace 0.1.1
+ * securerandom 0.2.2
+ * set 1.0.3
+ * stringio 3.0.4
+ * strscan 3.0.5
+ * syntax_suggest 1.0.2
+ * syslog 0.1.1
+ * tempfile 0.1.3
+ * time 0.2.1
+ * timeout 0.3.1
+ * tmpdir 0.1.3
+ * tsort 0.1.1
+ * un 0.2.1
+ * uri 0.12.0
+ * weakref 0.1.2
+ * win32ole 1.8.9
+ * yaml 0.2.1
+ * zlib 3.0.0
+
+* The following bundled gems are updated.
+
+ * minitest 5.16.3
+ * power_assert 2.0.3
+ * test-unit 3.5.7
+ * net-ftp 0.2.0
+ * net-imap 0.3.4
+ * net-pop 0.1.2
+ * net-smtp 0.3.3
+ * rbs 2.8.2
+ * typeprof 0.21.3
+ * debug 1.7.1
+
+See GitHub releases like [GitHub Releases of Logger](https://github.com/ruby/logger/releases) or changelog for details of the default gems or bundled gems.
+
+## Supported platforms
+
+* WebAssembly/WASI is added. See [wasm/README.md] and [ruby.wasm] for more details. [[Feature #18462]]
-* Net::HTTP
+## Compatibility issues
- * New method
+* `String#to_c` currently treat a sequence of underscores as an end of Complex
+ string. [[Bug #19087]]
- * Add Net::HTTP#verify_hostname= and Net::HTTP#verify_hostname
- to skip hostname verification. [[Feature #16555]]
+* Now `ENV.clone` raises `TypeError` as well as `ENV.dup` [[Bug #17767]]
- * Modified method
+### Removed constants
- * Net::HTTP.get, Net::HTTP.get_response, and Net::HTTP.get_print can
- take request headers as a Hash in the second argument when the first
- argument is a URI.
+The following deprecated constants are removed.
-## Compatibility issues
+* `Fixnum` and `Bignum` [[Feature #12005]]
+* `Random::DEFAULT` [[Feature #17351]]
+* `Struct::Group`
+* `Struct::Passwd`
-Excluding feature bug fixes.
+### Removed methods
-* Regexp literals are frozen [[Feature #8948]] [[Feature #16377]]
+The following deprecated methods are removed.
- ```ruby
- /foo/.frozen? #=> true
- ```
+* `Dir.exists?` [[Feature #17391]]
+* `File.exists?` [[Feature #17391]]
+* `Kernel#=~` [[Feature #15231]]
+* `Kernel#taint`, `Kernel#untaint`, `Kernel#tainted?`
+ [[Feature #16131]]
+* `Kernel#trust`, `Kernel#untrust`, `Kernel#untrusted?`
+ [[Feature #16131]]
+* `Method#public?`, `Method#private?`, `Method#protected?`,
+ `UnboundMethod#public?`, `UnboundMethod#private?`, `UnboundMethod#protected?`
+ [[Bug #18729]] [[Bug #18751]] [[Bug #18435]]
-* Bundled gems
+### Source code incompatibility of extension libraries
- * net-telnet and xmlrpc have been removed from the bundled gems.
- If you are interested in maintaining them, please comment on
- your plan to https://github.com/ruby/xmlrpc
- or https://github.com/ruby/net-telnet.
+* Extension libraries provide PRNG, subclasses of Random, need updates.
+ See [PRNG update] below for more information. [[Bug #19100]]
-* EXPERIMENTAL: Hash#each consistently yields a 2-element array [[Bug #12706]]
+### Error printer
- * Now `{ a: 1 }.each(&->(k, v) { })` raises an ArgumentError
- due to lambda's arity check.
- * This is experimental; if it brings a big incompatibility issue,
- it may be reverted until 2.8/3.0 release.
+* Ruby no longer escapes control characters and backslashes in an
+ error message. [[Feature #18367]]
-* When writing to STDOUT redirected to a closed pipe, no broken pipe
- error message will be shown now. [[Feature #14413]]
+### Constant lookup when defining a class/module
-* `TRUE`/`FALSE`/`NIL` constants are no longer defined.
+* When defining a class/module directly under the Object class by class/module
+ statement, if there is already a class/module defined by `Module#include`
+ with the same name, the statement was handled as "open class" in Ruby 3.1 or before.
+ Since Ruby 3.2, a new class is defined instead. [[Feature #18832]]
## Stdlib compatibility issues
-Excluding feature bug fixes.
+* Psych no longer bundles libyaml sources.
+ And also Fiddle no longer bundles libffi sources.
+ Users need to install the libyaml/libffi library themselves via the package
+ manager like apt, yum, brew, etc.
+
+ Psych and fiddle supported the static build with specific version of libyaml
+ and libffi sources. You can build psych with libyaml-0.2.5 like this.
+
+ ```bash
+ $ ./configure --with-libyaml-source-dir=/path/to/libyaml-0.2.5
+ ```
+
+ And you can build fiddle with libffi-3.4.4 like this.
+
+ ```bash
+ $ ./configure --with-libffi-source-dir=/path/to/libffi-3.4.4
+ ```
+
+ [[Feature #18571]]
+
+* Check cookie name/path/domain characters in `CGI::Cookie`. [[CVE-2021-33621]]
+
+* `URI.parse` return empty string in host instead of nil. [[sec-156615]]
## C API updates
-* C API functions related to $SAFE have been removed.
- [[Feature #16131]]
+### Updated C APIs
+
+The following APIs are updated.
+
+* PRNG update
+
+ `rb_random_interface_t` in ruby/random.h updated and versioned.
+ Extension libraries which use this interface and built for older
+ versions need to rebuild with adding `init_int32` function.
+
+### Added C APIs
+
+* `VALUE rb_hash_new_capa(long capa)` was added to created hashes with the desired capacity.
+* `rb_internal_thread_add_event_hook` and `rb_internal_thread_add_event_hook` were added to instrument threads scheduling.
+ The following events are available:
+ * `RUBY_INTERNAL_THREAD_EVENT_STARTED`
+ * `RUBY_INTERNAL_THREAD_EVENT_READY`
+ * `RUBY_INTERNAL_THREAD_EVENT_RESUMED`
+ * `RUBY_INTERNAL_THREAD_EVENT_SUSPENDED`
+ * `RUBY_INTERNAL_THREAD_EVENT_EXITED`
+* `rb_debug_inspector_current_depth` and `rb_debug_inspector_frame_depth` are added for debuggers.
+
+### Removed C APIs
+
+The following deprecated APIs are removed.
-* C API header file `ruby/ruby.h` was split. [[GH-2991]] Should have no impact
- on extension libraries, but users might experience slow compilations.
+* `rb_cData` variable.
+* "taintedness" and "trustedness" functions. [[Feature #16131]]
## Implementation improvements
-* The number of hashes allocated when using a keyword splat in
- a method call has been reduced to a maximum of 1, and passing
- a keyword splat to a method that accepts specific keywords
- does not allocate a hash.
-
-## Miscellaneous changes
-
-* Methods using `ruby2_keywords` will no longer keep empty keyword
- splats, those are now removed just as they are for methods not
- using `ruby2_keywords`.
-
-* Taint deprecation warnings are now issued in regular mode in
- addition to verbose warning mode. [[Feature #16131]]
-
-* When an exception is caught in the default handler, the error
- message and backtrace are printed in order from the innermost.
- [[Feature #8661]]
-
-
-[Bug #4352]: https://bugs.ruby-lang.org/issues/4352
-[Feature #8661]: https://bugs.ruby-lang.org/issues/8661
-[Feature #8709]: https://bugs.ruby-lang.org/issues/8709
-[Feature #8948]: https://bugs.ruby-lang.org/issues/8948
-[Feature #9573]: https://bugs.ruby-lang.org/issues/9573
-[Feature #14183]: https://bugs.ruby-lang.org/issues/14183
-[Bug #14266]: https://bugs.ruby-lang.org/issues/14266
-[Feature #14413]: https://bugs.ruby-lang.org/issues/14413
-[Feature #15575]: https://bugs.ruby-lang.org/issues/15575
-[Feature #16131]: https://bugs.ruby-lang.org/issues/16131
-[Feature #16166]: https://bugs.ruby-lang.org/issues/16166
-[Feature #16260]: https://bugs.ruby-lang.org/issues/16260
-[Feature #16274]: https://bugs.ruby-lang.org/issues/16274
-[Feature #16377]: https://bugs.ruby-lang.org/issues/16377
-[Bug #12706]: https://bugs.ruby-lang.org/issues/12706
-[Feature #15921]: https://bugs.ruby-lang.org/issues/15921
-[Feature #16555]: https://bugs.ruby-lang.org/issues/16555
-[Feature #16746]: https://bugs.ruby-lang.org/issues/16746
-[Feature #16754]: https://bugs.ruby-lang.org/issues/16754
-[GH-2991]: https://github.com/ruby/ruby/pull/2991
+* Fixed several race conditions in Kernel#autoload. [[Bug #18782]]
+* Cache invalidation for expressions referencing constants is now
+ more fine-grained. `RubyVM.stat(:global_constant_state)` was
+ removed because it was closely tied to the previous caching scheme
+ where setting any constant invalidates all caches in the system.
+ New keys, `:constant_cache_invalidations` and `:constant_cache_misses`,
+ were introduced to help with use cases for `:global_constant_state`.
+ [[Feature #18589]]
+* The cache-based optimization for Regexp matching is introduced.
+ [[Feature #19104]]
+* [Variable Width Allocation](https://shopify.engineering/ruby-variable-width-allocation)
+ is now enabled by default. [[Feature #18239]]
+* Added a new instance variable caching mechanism, called object shapes, which
+ improves inline cache hits for most objects and allows us to generate very
+ efficient JIT code. Objects whose instance variables are defined in a
+ consistent order will see the most performance benefits.
+ [[Feature #18776]]
+* Speed up marking instruction sequences by using a bitmap to find "markable"
+ objects. This change results in faster major collections.
+ [[Feature #18875]]
+
+## JIT
+
+### YJIT
+
+* YJIT is no longer experimental
+ * Has been tested on production workloads for over a year and proven to be quite stable.
+* YJIT now supports both x86-64 and arm64/aarch64 CPUs on Linux, MacOS, BSD and other UNIX platforms.
+ * This release brings support for Mac M1/M2, AWS Graviton and Raspberry Pi 4.
+* Building YJIT now requires Rust 1.58.0+. [[Feature #18481]]
+ * In order to ensure that CRuby is built with YJIT, please install `rustc` >= 1.58.0
+ before running `./configure`
+ * Please reach out to the YJIT team should you run into any issues.
+* Physical memory for JIT code is lazily allocated. Unlike Ruby 3.1,
+ the RSS of a Ruby process is minimized because virtual memory pages
+ allocated by `--yjit-exec-mem-size` will not be mapped to physical
+ memory pages until actually utilized by JIT code.
+* Introduce Code GC that frees all code pages when the memory consumption
+ by JIT code reaches `--yjit-exec-mem-size`.
+ * `RubyVM::YJIT.runtime_stats` returns Code GC metrics in addition to
+ existing `inline_code_size` and `outlined_code_size` keys:
+ `code_gc_count`, `live_page_count`, `freed_page_count`, and `freed_code_size`.
+* Most of the statistics produced by `RubyVM::YJIT.runtime_stats` are now available in release builds.
+ * Simply run ruby with `--yjit-stats` to compute and dump stats (incurs some run-time overhead).
+* YJIT is now optimized to take advantage of object shapes. [[Feature #18776]]
+* Take advantage of finer-grained constant invalidation to invalidate less code when defining new constants. [[Feature #18589]]
+* The default `--yjit-exec-mem-size` is changed to 64 (MiB).
+* The default `--yjit-call-threshold` is changed to 30.
+
+### MJIT
+
+* The MJIT compiler is re-implemented in Ruby as `ruby_vm/mjit/compiler`.
+* MJIT compiler is executed under a forked Ruby process instead of
+ doing it in a native thread called MJIT worker. [[Feature #18968]]
+ * As a result, Microsoft Visual Studio (MSWIN) is no longer supported.
+* MinGW is no longer supported. [[Feature #18824]]
+* Rename `--mjit-min-calls` to `--mjit-call-threshold`.
+* Change default `--mjit-max-cache` back from 10000 to 100.
+
+[Feature #12005]: https://bugs.ruby-lang.org/issues/12005
+[Feature #12084]: https://bugs.ruby-lang.org/issues/12084
+[Feature #12655]: https://bugs.ruby-lang.org/issues/12655
+[Feature #12737]: https://bugs.ruby-lang.org/issues/12737
+[Feature #13110]: https://bugs.ruby-lang.org/issues/13110
+[Feature #14332]: https://bugs.ruby-lang.org/issues/14332
+[Feature #15231]: https://bugs.ruby-lang.org/issues/15231
+[Feature #15357]: https://bugs.ruby-lang.org/issues/15357
+[Bug #15928]: https://bugs.ruby-lang.org/issues/15928
+[Feature #16122]: https://bugs.ruby-lang.org/issues/16122
+[Feature #16131]: https://bugs.ruby-lang.org/issues/16131
+[Bug #16466]: https://bugs.ruby-lang.org/issues/16466
+[Feature #16663]: https://bugs.ruby-lang.org/issues/16663
+[Feature #16806]: https://bugs.ruby-lang.org/issues/16806
+[Bug #16889]: https://bugs.ruby-lang.org/issues/16889
+[Bug #16908]: https://bugs.ruby-lang.org/issues/16908
+[Feature #16989]: https://bugs.ruby-lang.org/issues/16989
+[Feature #17351]: https://bugs.ruby-lang.org/issues/17351
+[Feature #17391]: https://bugs.ruby-lang.org/issues/17391
+[Bug #17545]: https://bugs.ruby-lang.org/issues/17545
+[Bug #17767]: https://bugs.ruby-lang.org/issues/17767
+[Feature #17837]: https://bugs.ruby-lang.org/issues/17837
+[Feature #17881]: https://bugs.ruby-lang.org/issues/17881
+[Feature #18033]: https://bugs.ruby-lang.org/issues/18033
+[Feature #18159]: https://bugs.ruby-lang.org/issues/18159
+[Feature #18239]: https://bugs.ruby-lang.org/issues/18239#note-17
+[Feature #18351]: https://bugs.ruby-lang.org/issues/18351
+[Feature #18367]: https://bugs.ruby-lang.org/issues/18367
+[Bug #18435]: https://bugs.ruby-lang.org/issues/18435
+[Feature #18462]: https://bugs.ruby-lang.org/issues/18462
+[Feature #18481]: https://bugs.ruby-lang.org/issues/18481
+[Bug #18487]: https://bugs.ruby-lang.org/issues/18487
+[Feature #18564]: https://bugs.ruby-lang.org/issues/18564
+[Feature #18571]: https://bugs.ruby-lang.org/issues/18571
+[Feature #18585]: https://bugs.ruby-lang.org/issues/18585
+[Feature #18589]: https://bugs.ruby-lang.org/issues/18589
+[Feature #18595]: https://bugs.ruby-lang.org/issues/18595
+[Feature #18598]: https://bugs.ruby-lang.org/issues/18598
+[Bug #18625]: https://bugs.ruby-lang.org/issues/18625
+[Feature #18630]: https://bugs.ruby-lang.org/issues/18630
+[Bug #18633]: https://bugs.ruby-lang.org/issues/18633
+[Feature #18639]: https://bugs.ruby-lang.org/issues/18639
+[Feature #18685]: https://bugs.ruby-lang.org/issues/18685
+[Bug #18729]: https://bugs.ruby-lang.org/issues/18729
+[Bug #18751]: https://bugs.ruby-lang.org/issues/18751
+[Feature #18774]: https://bugs.ruby-lang.org/issues/18774
+[Feature #18776]: https://bugs.ruby-lang.org/issues/18776
+[Bug #18782]: https://bugs.ruby-lang.org/issues/18782
+[Feature #18788]: https://bugs.ruby-lang.org/issues/18788
+[Feature #18798]: https://bugs.ruby-lang.org/issues/18798
+[Feature #18809]: https://bugs.ruby-lang.org/issues/18809
+[Feature #18821]: https://bugs.ruby-lang.org/issues/18821
+[Feature #18822]: https://bugs.ruby-lang.org/issues/18822
+[Feature #18824]: https://bugs.ruby-lang.org/issues/18824
+[Feature #18832]: https://bugs.ruby-lang.org/issues/18832
+[Feature #18875]: https://bugs.ruby-lang.org/issues/18875
+[Feature #18925]: https://bugs.ruby-lang.org/issues/18925
+[Feature #18944]: https://bugs.ruby-lang.org/issues/18944
+[Feature #18949]: https://bugs.ruby-lang.org/issues/18949
+[Feature #18968]: https://bugs.ruby-lang.org/issues/18968
+[Feature #19008]: https://bugs.ruby-lang.org/issues/19008
+[Feature #19013]: https://bugs.ruby-lang.org/issues/19013
+[Feature #19026]: https://bugs.ruby-lang.org/issues/19026
+[Feature #19036]: https://bugs.ruby-lang.org/issues/19036
+[Feature #19060]: https://bugs.ruby-lang.org/issues/19060
+[Feature #19070]: https://bugs.ruby-lang.org/issues/19070
+[Feature #19071]: https://bugs.ruby-lang.org/issues/19071
+[Feature #19078]: https://bugs.ruby-lang.org/issues/19078
+[Bug #19087]: https://bugs.ruby-lang.org/issues/19087
+[Bug #19100]: https://bugs.ruby-lang.org/issues/19100
+[Feature #19104]: https://bugs.ruby-lang.org/issues/19104
+[Feature #19135]: https://bugs.ruby-lang.org/issues/19135
+[Feature #19138]: https://bugs.ruby-lang.org/issues/19138
+[Feature #19194]: https://bugs.ruby-lang.org/issues/19194
+[Molinillo]: https://github.com/CocoaPods/Molinillo
+[PubGrub]: https://github.com/jhawthorn/pub_grub
+[GH-net-protocol-14]: https://github.com/ruby/net-protocol/pull/14
+[GH-pathname-20]: https://github.com/ruby/pathname/pull/20
+[GH-6791]: https://github.com/ruby/ruby/pull/6791
+[GH-6868]: https://github.com/ruby/ruby/pull/6868
+[GH-rubygems-4475]: https://github.com/rubygems/rubygems/pull/4475
+[GH-rubygems-6149]: https://github.com/rubygems/rubygems/pull/6149
+[GH-rubygems-6167]: https://github.com/rubygems/rubygems/pull/6167
+[sec-156615]: https://hackerone.com/reports/156615
+[CVE-2021-33621]: https://www.ruby-lang.org/en/news/2022/11/22/http-response-splitting-in-cgi-cve-2021-33621/
+[wasm/README.md]: https://github.com/ruby/ruby/blob/master/wasm/README.md
+[ruby.wasm]: https://github.com/ruby/ruby.wasm
diff --git a/README.ja.md b/README.ja.md
index bee6433c62..93c0131690 100644
--- a/README.ja.md
+++ b/README.ja.md
@@ -1,10 +1,9 @@
-[![Build Status](https://travis-ci.org/ruby/ruby.svg?branch=master)](https://travis-ci.org/ruby/ruby)
-[![Build status](https://ci.appveyor.com/api/projects/status/0sy8rrxut4o0k960/branch/master?svg=true)](https://ci.appveyor.com/project/ruby/ruby/branch/master)
-[![Actions Status](https://github.com/ruby/ruby/workflows/macOS/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"macOS")
-[![Actions Status](https://github.com/ruby/ruby/workflows/MinGW/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MinGW")
-[![Actions Status](https://github.com/ruby/ruby/workflows/MJIT/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MJIT")
-[![Actions Status](https://github.com/ruby/ruby/workflows/Ubuntu/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Ubuntu")
-[![Actions Status](https://github.com/ruby/ruby/workflows/Windows/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Windows")
+[![Actions Status: MinGW](https://github.com/ruby/ruby/workflows/MinGW/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MinGW")
+[![Actions Status: MJIT](https://github.com/ruby/ruby/workflows/MJIT/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MJIT")
+[![Actions Status: Ubuntu](https://github.com/ruby/ruby/workflows/Ubuntu/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Ubuntu")
+[![Actions Status: Windows](https://github.com/ruby/ruby/workflows/Windows/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Windows")
+[![AppVeyor status](https://ci.appveyor.com/api/projects/status/0sy8rrxut4o0k960/branch/master?svg=true)](https://ci.appveyor.com/project/ruby/ruby/branch/master)
+[![Travis Status](https://app.travis-ci.com/ruby/ruby.svg?branch=master)](https://app.travis-ci.com/ruby/ruby)
# Rubyã¨ã¯
@@ -52,11 +51,11 @@ Rubyリãƒã‚¸ãƒˆãƒªã®æœ¬æ¥ã®master㯠https://git.ruby-lang.org/ruby.git ã«ã
### Subversion
-å¤ã„Rubyã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã¯æ¬¡ã®ã‚³ãƒžãƒ³ãƒ‰ã§å–å¾—ã§ãã¾ã™ï¼Ž
+å¤ã„Rubyã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã¯æ¬¡ã®ã‚³ãƒžãƒ³ãƒ‰ã§ã‚‚å–å¾—ã§ãã¾ã™ï¼Ž
$ svn co https://svn.ruby-lang.org/repos/ruby/branches/ruby_2_6/ ruby
-ä»–ã«é–‹ç™ºä¸­ã®ãƒ–ランãƒã®ä¸€è¦§ã¯æ¬¡ã®ã‚³ãƒžãƒ³ãƒ‰ã§è¦‹ã‚‰ã‚Œã¾ã™ï¼Ž
+ä»–ã®ãƒ–ランãƒã®ä¸€è¦§ã¯æ¬¡ã®ã‚³ãƒžãƒ³ãƒ‰ã§è¦‹ã‚‰ã‚Œã¾ã™ï¼Ž
$ svn ls https://svn.ruby-lang.org/repos/ruby/branches/
@@ -71,31 +70,26 @@ https://www.ruby-lang.org/
## メーリングリスト
-Rubyã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆãŒã‚りã¾ã™ï¼Žå‚åŠ å¸Œæœ›ã®æ–¹ã¯
-
-mailto:ruby-list-request@ruby-lang.org
-
-ã¾ã§æœ¬æ–‡ã«
+Rubyã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆãŒã‚りã¾ã™ï¼Žå‚åŠ å¸Œæœ›ã®æ–¹ã¯ [ruby-list-request@ruby-lang.org] ã¾ã§æœ¬æ–‡ã«
subscribe
ã¨æ›¸ã„ã¦é€ã£ã¦ä¸‹ã•ã„.
-Ruby開発者å‘ã‘メーリングリストもã‚りã¾ã™ï¼Žã“ã¡ã‚‰ã§ã¯rubyã®ãƒã‚°ï¼Œå°†æ¥ã®ä»•様拡張ãªã©å®Ÿè£…上ã®å•題ã«ã¤ã„ã¦è­°è«–ã•れã¦ã„ã¾ã™ï¼Ž å‚åŠ å¸Œæœ›ã®æ–¹ã¯
-
-mailto:ruby-dev-request@ruby-lang.org
-
-ã¾ã§ruby-listã¨åŒæ§˜ã®æ–¹æ³•ã§ãƒ¡ãƒ¼ãƒ«ã—ã¦ãã ã•ã„.
+Ruby開発者å‘ã‘メーリングリストもã‚りã¾ã™ï¼Žã“ã¡ã‚‰ã§ã¯rubyã®ãƒã‚°ï¼Œå°†æ¥ã®ä»•様拡張ãªã©å®Ÿè£…上ã®å•題ã«ã¤ã„ã¦è­°è«–ã•れã¦ã„ã¾ã™ï¼Ž
+å‚åŠ å¸Œæœ›ã®æ–¹ã¯ [ruby-dev-request@ruby-lang.org] ã¾ã§ruby-listã¨åŒæ§˜ã®æ–¹æ³•ã§ãƒ¡ãƒ¼ãƒ«ã—ã¦ãã ã•ã„.
Ruby拡張モジュールã«ã¤ã„ã¦è©±ã—åˆã†ruby-extãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆã¨æ•°å­¦é–¢ä¿‚ã®è©±é¡Œã«ã¤ã„ã¦è©±ã—åˆã†ruby-mathメーリングリストã¨
英語ã§rubyã«ã¤ã„ã¦è©±ã—åˆã†ruby-talkメーリングリストもã‚りã¾ã™ï¼Žå‚加方法ã¯ã©ã‚Œã‚‚åŒã˜ã§ã™ï¼Ž
+[ruby-list-request@ruby-lang.org]: mailto:ruby-list-request@ruby-lang.org?subject=Join%20Ruby%20Mailing%20List&body=subscribe
+[ruby-dev-request@ruby-lang.org]: mailto:ruby-dev-request@ruby-lang.org?subject=Join%20Ruby%20Mailing%20List&body=subscribe
+
## コンパイル・インストール
ä»¥ä¸‹ã®æ‰‹é †ã§è¡Œã£ã¦ãã ã•ã„.
-1. ã‚‚ã— `configure` ファイルãŒè¦‹ã¤ã‹ã‚‰ãªã„,もã—ã㯠`configure.ac` よりå¤ã„よã†ãªã‚‰ï¼Œ `autoconf` を実行ã—ã¦
- æ–°ã—ã `configure` を生æˆã™ã‚‹
+1. (Gitリãƒã‚¸ãƒˆãƒªã‹ã‚‰å–å¾—ã—ãŸã‚½ãƒ¼ã‚¹ã‚’ビルドã™ã‚‹å ´åˆ) `./autogen.sh` を実行ã—ã¦æ–°ã—ã `configure` を生æˆã™ã‚‹
2. `configure` を実行ã—㦠`Makefile` ãªã©ã‚’生æˆã™ã‚‹
@@ -172,11 +166,14 @@ UNIXã§ã‚れ㰠`configure` ãŒã»ã¨ã‚“ã©ã®å·®ç•°ã‚’å¸åŽã—ã¦ãれるã¯
## フィードãƒãƒƒã‚¯
-Rubyã«é–¢ã™ã‚‹è³ªå•㯠Ruby-Talk(英語)や Ruby-List(日本語) (https://www.ruby-lang.org/ja/community/mailing-lists) や,
-stackoverflow (https://ja.stackoverflow.com/) ãªã©ã®Webã‚µã‚¤ãƒˆã«æŠ•ç¨¿ã—ã¦ãã ã•ã„.
+Rubyã«é–¢ã™ã‚‹è³ªå•㯠[Ruby-Talk](英語)や [Ruby-List](日本語)や,
+[stackoverflow] ãªã©ã®Webã‚µã‚¤ãƒˆã«æŠ•ç¨¿ã—ã¦ãã ã•ã„.
ãƒã‚°å ±å‘Šã¯ https://bugs.ruby-lang.org ã§å—ã‘付ã‘ã¦ã„ã¾ã™ï¼Ž
+[Ruby-Talk]: https://www.ruby-lang.org/en/community/mailing-lists
+[Ruby-List]: https://www.ruby-lang.org/ja/community/mailing-lists
+[stackoverflow]: https://ja.stackoverflow.com/
## 著者
diff --git a/README.md b/README.md
index f06bf1d3ea..c445448c71 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,11 @@
-[![Build Status](https://travis-ci.org/ruby/ruby.svg?branch=master)](https://travis-ci.org/ruby/ruby)
-[![Build status](https://ci.appveyor.com/api/projects/status/0sy8rrxut4o0k960/branch/master?svg=true)](https://ci.appveyor.com/project/ruby/ruby/branch/master)
-[![Actions Status](https://github.com/ruby/ruby/workflows/macOS/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"macOS")
-[![Actions Status](https://github.com/ruby/ruby/workflows/MinGW/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MinGW")
-[![Actions Status](https://github.com/ruby/ruby/workflows/MJIT/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MJIT")
-[![Actions Status](https://github.com/ruby/ruby/workflows/Ubuntu/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Ubuntu")
-[![Actions Status](https://github.com/ruby/ruby/workflows/Windows/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Windows")
+[![Actions Status: MinGW](https://github.com/ruby/ruby/workflows/MinGW/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MinGW")
+[![Actions Status: MJIT](https://github.com/ruby/ruby/workflows/MJIT/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"MJIT")
+[![Actions Status: Ubuntu](https://github.com/ruby/ruby/workflows/Ubuntu/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Ubuntu")
+[![Actions Status: Windows](https://github.com/ruby/ruby/workflows/Windows/badge.svg)](https://github.com/ruby/ruby/actions?query=workflow%3A"Windows")
+[![AppVeyor status](https://ci.appveyor.com/api/projects/status/0sy8rrxut4o0k960/branch/master?svg=true)](https://ci.appveyor.com/project/ruby/ruby/branch/master)
+[![Travis Status](https://app.travis-ci.com/ruby/ruby.svg?branch=master)](https://app.travis-ci.com/ruby/ruby)
-# What's Ruby
+# What is Ruby?
Ruby is an interpreted object-oriented programming language often
used for web development. It also offers many scripting features
@@ -15,28 +14,25 @@ It is simple, straightforward, and extensible.
## Features of Ruby
-* Simple Syntax
-* **Normal** Object-oriented Features (e.g. class, method calls)
-* **Advanced** Object-oriented Features (e.g. mix-in, singleton-method)
-* Operator Overloading
-* Exception Handling
-* Iterators and Closures
-* Garbage Collection
-* Dynamic Loading of Object Files (on some architectures)
-* Highly Portable (works on many Unix-like/POSIX compatible platforms as
- well as Windows, macOS, Haiku, etc.) cf.
- https://github.com/ruby/ruby/blob/master/doc/contributing.rdoc#platform-maintainers
+* Simple Syntax
+* **Normal** Object-oriented Features (e.g. class, method calls)
+* **Advanced** Object-oriented Features (e.g. mix-in, singleton-method)
+* Operator Overloading
+* Exception Handling
+* Iterators and Closures
+* Garbage Collection
+* Dynamic Loading of Object Files (on some architectures)
+* Highly Portable (works on many Unix-like/POSIX compatible platforms as
+ well as Windows, macOS, etc.) cf.
+ https://github.com/ruby/ruby/blob/master/doc/maintainers.rdoc#label-Platform+Maintainers
-
-## How to get Ruby
+## How to get Ruby with Git
For a complete list of ways to install Ruby, including using third-party tools
like rvm, see:
https://www.ruby-lang.org/en/downloads/
-### Git
-
The mirror of the Ruby source tree can be checked out with the following command:
$ git clone https://github.com/ruby/ruby.git
@@ -49,21 +45,19 @@ to see the list of branches:
You may also want to use https://git.ruby-lang.org/ruby.git (actual master of Ruby source)
if you are a committer.
-### Subversion
-
-Stable branches for older Ruby versions can be checked out with the following command:
-
- $ svn co https://svn.ruby-lang.org/repos/ruby/branches/ruby_2_6/ ruby
-
-Try the following command to see the list of branches:
-
- $ svn ls https://svn.ruby-lang.org/repos/ruby/branches/
+## How to build
+see [Building Ruby](doc/contributing/building_ruby.md)
## Ruby home page
https://www.ruby-lang.org/
+## Documentation
+
+- [English](https://docs.ruby-lang.org/en/master/index.html)
+- [Japanese](https://docs.ruby-lang.org/ja/master/index.html)
+
## Mailing list
There is a mailing list to discuss Ruby. To subscribe to this list, please
@@ -71,98 +65,24 @@ send the following phrase:
subscribe
-in the mail body (not subject) to the address
-[ruby-talk-request@ruby-lang.org](mailto:ruby-talk-request@ruby-lang.org?subject=Join%20Ruby%20Mailing%20List&body=subscribe).
-
-## How to compile and install
-
-1. If you want to use Microsoft Visual C++ to compile Ruby, read
- [win32/README.win32](win32/README.win32) instead of this document.
-
-2. If `./configure` does not exist or is older than `configure.ac`, run
- `autoconf` to (re)generate configure.
-
-3. Run `./configure`, which will generate `config.h` and `Makefile`.
-
- Some C compiler flags may be added by default depending on your
- environment. Specify `optflags=..` and `warnflags=..` as necessary to
- override them.
-
-4. Edit `include/ruby/defines.h` if you need. Usually this step will not be needed.
-
-5. Remove comment mark(`#`) before the module names from `ext/Setup` (or add
- module names if not present), if you want to link modules statically.
+in the mail body (not subject) to the address [ruby-talk-request@ruby-lang.org].
- If you don't want to compile non static extension modules (probably on
- architectures which do not allow dynamic loading), remove comment mark
- from the line "`#option nodynamic`" in `ext/Setup`.
-
- Usually this step will not be needed.
-
-6. Run `make`.
-
- * On Mac, set RUBY\_CODESIGN environment variable with a signing identity.
- It uses the identity to sign `ruby` binary. See also codesign(1).
-
-7. Optionally, run '`make check`' to check whether the compiled Ruby
- interpreter works well. If you see the message "`check succeeded`", your
- Ruby works as it should (hopefully).
-
-8. Run '`make install`'.
-
- This command will create the following directories and install files into
- them.
-
- * `${DESTDIR}${prefix}/bin`
- * `${DESTDIR}${prefix}/include/ruby-${MAJOR}.${MINOR}.${TEENY}`
- * `${DESTDIR}${prefix}/include/ruby-${MAJOR}.${MINOR}.${TEENY}/${PLATFORM}`
- * `${DESTDIR}${prefix}/lib`
- * `${DESTDIR}${prefix}/lib/ruby`
- * `${DESTDIR}${prefix}/lib/ruby/${MAJOR}.${MINOR}.${TEENY}`
- * `${DESTDIR}${prefix}/lib/ruby/${MAJOR}.${MINOR}.${TEENY}/${PLATFORM}`
- * `${DESTDIR}${prefix}/lib/ruby/site_ruby`
- * `${DESTDIR}${prefix}/lib/ruby/site_ruby/${MAJOR}.${MINOR}.${TEENY}`
- * `${DESTDIR}${prefix}/lib/ruby/site_ruby/${MAJOR}.${MINOR}.${TEENY}/${PLATFORM}`
- * `${DESTDIR}${prefix}/lib/ruby/vendor_ruby`
- * `${DESTDIR}${prefix}/lib/ruby/vendor_ruby/${MAJOR}.${MINOR}.${TEENY}`
- * `${DESTDIR}${prefix}/lib/ruby/vendor_ruby/${MAJOR}.${MINOR}.${TEENY}/${PLATFORM}`
- * `${DESTDIR}${prefix}/lib/ruby/gems/${MAJOR}.${MINOR}.${TEENY}`
- * `${DESTDIR}${prefix}/share/man/man1`
- * `${DESTDIR}${prefix}/share/ri/${MAJOR}.${MINOR}.${TEENY}/system`
-
-
- If Ruby's API version is '*x.y.z*', the `${MAJOR}` is '*x*', the
- `${MINOR}` is '*y*', and the `${TEENY}` is '*z*'.
-
- **NOTE**: teeny of the API version may be different from one of Ruby's
- program version
-
- You may have to be a super user to install Ruby.
-
-If you fail to compile Ruby, please send the detailed error report with the
-error log and machine/OS type, to help others.
-
-Some extension libraries may not get compiled because of lack of necessary
-external libraries and/or headers, then you will need to run '`make distclean-ext`'
-to remove old configuration after installing them in such case.
+[ruby-talk-request@ruby-lang.org]: mailto:ruby-talk-request@ruby-lang.org?subject=Join%20Ruby%20Mailing%20List&body=subscribe
## Copying
-See the file [COPYING](COPYING).
+See the file [COPYING](rdoc-ref:COPYING).
## Feedback
-Questions about the Ruby language can be asked on the Ruby-Talk mailing list
-(https://www.ruby-lang.org/en/community/mailing-lists) or on websites like
-(https://stackoverflow.com).
-
-Bugs should be reported at https://bugs.ruby-lang.org. Read [HowToReport] for more information.
+Questions about the Ruby language can be asked on the [Ruby-Talk](https://www.ruby-lang.org/en/community/mailing-lists) mailing list
+or on websites like https://stackoverflow.com.
-[HowToReport]: https://bugs.ruby-lang.org/projects/ruby/wiki/HowToReport
+Bugs should be reported at https://bugs.ruby-lang.org. Read ["Reporting Issues"](https://docs.ruby-lang.org/en/master/contributing/reporting_issues_md.html) for more information.
## Contributing
-See the file [CONTRIBUTING.md](CONTRIBUTING.md)
+See ["Contributing to Ruby"](https://docs.ruby-lang.org/en/master/contributing_md.html), which includes setup and build instructions.
## The Author
diff --git a/aclocal.m4 b/aclocal.m4
index 940d91e83f..e69de29bb2 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -1,48 +0,0 @@
-# generated automatically by aclocal 1.16.2 -*- Autoconf -*-
-
-# Copyright (C) 1996-2020 Free Software Foundation, Inc.
-
-# This file is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
-m4_include([tool/m4/_colorize_result_prepare.m4])
-m4_include([tool/m4/ac_msg_result.m4])
-m4_include([tool/m4/colorize_result.m4])
-m4_include([tool/m4/ruby_append_option.m4])
-m4_include([tool/m4/ruby_append_options.m4])
-m4_include([tool/m4/ruby_check_builtin_func.m4])
-m4_include([tool/m4/ruby_check_builtin_setjmp.m4])
-m4_include([tool/m4/ruby_check_printf_prefix.m4])
-m4_include([tool/m4/ruby_check_setjmp.m4])
-m4_include([tool/m4/ruby_check_signedness.m4])
-m4_include([tool/m4/ruby_check_sizeof.m4])
-m4_include([tool/m4/ruby_check_sysconf.m4])
-m4_include([tool/m4/ruby_cppoutfile.m4])
-m4_include([tool/m4/ruby_decl_attribute.m4])
-m4_include([tool/m4/ruby_default_arch.m4])
-m4_include([tool/m4/ruby_define_if.m4])
-m4_include([tool/m4/ruby_defint.m4])
-m4_include([tool/m4/ruby_dtrace_available.m4])
-m4_include([tool/m4/ruby_dtrace_postprocess.m4])
-m4_include([tool/m4/ruby_func_attribute.m4])
-m4_include([tool/m4/ruby_mingw32.m4])
-m4_include([tool/m4/ruby_prepend_option.m4])
-m4_include([tool/m4/ruby_prog_gnu_ld.m4])
-m4_include([tool/m4/ruby_replace_funcs.m4])
-m4_include([tool/m4/ruby_replace_type.m4])
-m4_include([tool/m4/ruby_rm_recursive.m4])
-m4_include([tool/m4/ruby_setjmp_type.m4])
-m4_include([tool/m4/ruby_stack_grow_direction.m4])
-m4_include([tool/m4/ruby_try_cflags.m4])
-m4_include([tool/m4/ruby_try_cxxflags.m4])
-m4_include([tool/m4/ruby_try_ldflags.m4])
-m4_include([tool/m4/ruby_type_attribute.m4])
-m4_include([tool/m4/ruby_universal_arch.m4])
-m4_include([tool/m4/ruby_werror_flag.m4])
diff --git a/addr2line.c b/addr2line.c
index bf6bc6dc24..e5f25293e2 100644
--- a/addr2line.c
+++ b/addr2line.c
@@ -159,11 +159,15 @@ typedef struct obj_info {
struct dwarf_section debug_info;
struct dwarf_section debug_line;
struct dwarf_section debug_ranges;
+ struct dwarf_section debug_str_offsets;
+ struct dwarf_section debug_addr;
+ struct dwarf_section debug_rnglists;
struct dwarf_section debug_str;
+ struct dwarf_section debug_line_str;
struct obj_info *next;
} obj_info_t;
-#define DWARF_SECTION_COUNT 5
+#define DWARF_SECTION_COUNT 9
static struct dwarf_section *
obj_dwarf_section_at(obj_info_t *obj, int n)
@@ -173,7 +177,11 @@ obj_dwarf_section_at(obj_info_t *obj, int n)
&obj->debug_info,
&obj->debug_line,
&obj->debug_ranges,
- &obj->debug_str
+ &obj->debug_str_offsets,
+ &obj->debug_addr,
+ &obj->debug_rnglists,
+ &obj->debug_str,
+ &obj->debug_line_str
};
if (n < 0 || DWARF_SECTION_COUNT <= n) {
abort();
@@ -190,12 +198,12 @@ struct debug_section_definition {
static char binary_filename[PATH_MAX + 1];
static unsigned long
-uleb128(char **p)
+uleb128(const char **p)
{
unsigned long r = 0;
int s = 0;
for (;;) {
- unsigned char b = *(unsigned char *)(*p)++;
+ unsigned char b = (unsigned char)*(*p)++;
if (b < 0x80) {
r += (unsigned long)b << s;
break;
@@ -207,12 +215,12 @@ uleb128(char **p)
}
static long
-sleb128(char **p)
+sleb128(const char **p)
{
long r = 0;
int s = 0;
for (;;) {
- unsigned char b = *(unsigned char *)(*p)++;
+ unsigned char b = (unsigned char)*(*p)++;
if (b < 0x80) {
if (b & 0x40) {
r -= (0x80 - b) << s;
@@ -229,7 +237,7 @@ sleb128(char **p)
}
static const char *
-get_nth_dirname(unsigned long dir, char *p)
+get_nth_dirname(unsigned long dir, const char *p)
{
if (!dir--) {
return "";
@@ -246,39 +254,51 @@ get_nth_dirname(unsigned long dir, char *p)
return p;
}
+static const char *parse_ver5_debug_line_header(const char *p, int idx, uint8_t format, obj_info_t *obj, const char **out_path, uint64_t *out_directory_index);
+
static void
-fill_filename(int file, char *include_directories, char *filenames, line_info_t *line, obj_info_t *obj)
+fill_filename(int file, uint8_t format, uint16_t version, const char *include_directories, const char *filenames, line_info_t *line, obj_info_t *obj)
{
int i;
- char *p = filenames;
- char *filename;
+ const char *p = filenames;
+ const char *filename;
unsigned long dir;
- for (i = 1; i <= file; i++) {
- filename = p;
- if (!*p) {
- /* Need to output binary file name? */
- kprintf("Unexpected file number %d in %s at %tx\n",
- file, binary_filename, filenames - obj->mapped);
- return;
- }
- while (*p) p++;
- p++;
- dir = uleb128(&p);
- /* last modified. */
- uleb128(&p);
- /* size of the file. */
- uleb128(&p);
-
- if (i == file) {
- line->filename = filename;
- line->dirname = get_nth_dirname(dir, include_directories);
- }
+ if (version >= 5) {
+ const char *path;
+ uint64_t directory_index = -1;
+ parse_ver5_debug_line_header(filenames, file, format, obj, &path, &directory_index);
+ line->filename = path;
+ parse_ver5_debug_line_header(include_directories, (int)directory_index, format, obj, &path, NULL);
+ line->dirname = path;
+ }
+ else {
+ for (i = 1; i <= file; i++) {
+ filename = p;
+ if (!*p) {
+ /* Need to output binary file name? */
+ kprintf("Unexpected file number %d in %s at %tx\n",
+ file, binary_filename, filenames - obj->mapped);
+ return;
+ }
+ while (*p) p++;
+ p++;
+ dir = uleb128(&p);
+ /* last modified. */
+ uleb128(&p);
+ /* size of the file. */
+ uleb128(&p);
+
+ if (i == file) {
+ line->filename = filename;
+ line->dirname = get_nth_dirname(dir, include_directories);
+ }
+ }
}
}
static void
fill_line(int num_traces, void **traces, uintptr_t addr, int file, int line,
- char *include_directories, char *filenames,
+ uint8_t format, uint16_t version, const char *include_directories, const char *filenames,
obj_info_t *obj, line_info_t *lines, int offset)
{
int i;
@@ -288,7 +308,7 @@ fill_line(int num_traces, void **traces, uintptr_t addr, int file, int line,
/* We assume one line code doesn't result >100 bytes of native code.
We may want more reliable way eventually... */
if (addr < a && a < addr + 100) {
- fill_filename(file, include_directories, filenames, &lines[i], obj);
+ fill_filename(file, format, version, include_directories, filenames, &lines[i], obj);
lines[i].line = line;
}
}
@@ -313,7 +333,7 @@ struct LineNumberProgramHeader {
};
static int
-parse_debug_line_header(const char **pp, struct LineNumberProgramHeader *header)
+parse_debug_line_header(obj_info_t *obj, const char **pp, struct LineNumberProgramHeader *header)
{
const char *p = *pp;
header->unit_length = *(uint32_t *)p;
@@ -330,7 +350,13 @@ parse_debug_line_header(const char **pp, struct LineNumberProgramHeader *header)
header->version = *(uint16_t *)p;
p += sizeof(uint16_t);
- if (header->version > 4) return -1;
+ if (header->version > 5) return -1;
+
+ if (header->version >= 5) {
+ /* address_size = *(uint8_t *)p++; */
+ /* segment_selector_size = *(uint8_t *)p++; */
+ p += 2;
+ }
header->header_length = header->format == 4 ? *(uint32_t *)p : *(uint64_t *)p;
p += header->format;
@@ -351,20 +377,27 @@ parse_debug_line_header(const char **pp, struct LineNumberProgramHeader *header)
/* header->standard_opcode_lengths = (uint8_t *)p - 1; */
p += header->opcode_base - 1;
- header->include_directories = p;
+ if (header->version >= 5) {
+ header->include_directories = p;
+ p = parse_ver5_debug_line_header(p, -1, header->format, obj, NULL, NULL);
+ header->filenames = p;
+ }
+ else {
+ header->include_directories = p;
- /* temporary measure for compress-debug-sections */
- if (p >= header->cu_end) return -1;
+ /* temporary measure for compress-debug-sections */
+ if (p >= header->cu_end) return -1;
- /* skip include directories */
- while (*p) {
- p = memchr(p, '\0', header->cu_end - p);
- if (!p) return -1;
- p++;
- }
- p++;
+ /* skip include directories */
+ while (*p) {
+ p = memchr(p, '\0', header->cu_end - p);
+ if (!p) return -1;
+ p++;
+ }
+ p++;
- header->filenames = p;
+ header->filenames = p;
+ }
*pp = header->cu_start;
@@ -372,7 +405,7 @@ parse_debug_line_header(const char **pp, struct LineNumberProgramHeader *header)
}
static int
-parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
+parse_debug_line_cu(int num_traces, void **traces, const char **debug_line,
obj_info_t *obj, line_info_t *lines, int offset)
{
const char *p = (const char *)*debug_line;
@@ -390,15 +423,17 @@ parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
/* int epilogue_begin = 0; */
/* unsigned int isa = 0; */
- if (parse_debug_line_header(&p, &header))
+ if (parse_debug_line_header(obj, &p, &header))
return -1;
is_stmt = header.default_is_stmt;
#define FILL_LINE() \
do { \
fill_line(num_traces, traces, addr, file, line, \
- (char *)header.include_directories, \
- (char *)header.filenames, \
+ header.format, \
+ header.version, \
+ header.include_directories, \
+ header.filenames, \
obj, lines, offset); \
/*basic_block = prologue_end = epilogue_begin = 0;*/ \
} while (0)
@@ -411,19 +446,19 @@ parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
FILL_LINE();
break;
case DW_LNS_advance_pc:
- a = uleb128((char **)&p);
+ a = uleb128(&p) * header.minimum_instruction_length;
addr += a;
break;
case DW_LNS_advance_line: {
- long a = sleb128((char **)&p);
+ long a = sleb128(&p);
line += a;
break;
}
case DW_LNS_set_file:
- file = (unsigned int)uleb128((char **)&p);
+ file = (unsigned int)uleb128(&p);
break;
case DW_LNS_set_column:
- /*column = (unsigned int)*/(void)uleb128((char **)&p);
+ /*column = (unsigned int)*/(void)uleb128(&p);
break;
case DW_LNS_negate_stmt:
is_stmt = !is_stmt;
@@ -437,7 +472,8 @@ parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
addr += a;
break;
case DW_LNS_fixed_advance_pc:
- a = *(unsigned char *)p++;
+ a = *(uint16_t *)p;
+ p += sizeof(uint16_t);
addr += a;
break;
case DW_LNS_set_prologue_end:
@@ -447,10 +483,10 @@ parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
/* epilogue_begin = 1; */
break;
case DW_LNS_set_isa:
- /* isa = (unsigned int)*/(void)uleb128((char **)&p);
+ /* isa = (unsigned int)*/(void)uleb128(&p);
break;
case 0:
- a = *(unsigned char *)p++;
+ a = uleb128(&p);
op = *p++;
switch (op) {
case DW_LNE_end_sequence:
@@ -474,7 +510,7 @@ parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
break;
case DW_LNE_set_discriminator:
/* TODO:currently ignore */
- uleb128((char **)&p);
+ uleb128(&p);
break;
default:
kprintf("Unknown extended opcode: %d in %s\n",
@@ -497,10 +533,10 @@ parse_debug_line_cu(int num_traces, void **traces, char **debug_line,
static int
parse_debug_line(int num_traces, void **traces,
- char *debug_line, unsigned long size,
+ const char *debug_line, unsigned long size,
obj_info_t *obj, line_info_t *lines, int offset)
{
- char *debug_line_end = debug_line + size;
+ const char *debug_line_end = debug_line + size;
while (debug_line < debug_line_end) {
if (parse_debug_line_cu(num_traces, traces, &debug_line, obj, lines, offset))
return -1;
@@ -526,13 +562,25 @@ append_obj(obj_info_t **objp)
}
#ifdef USE_ELF
+/* Ideally we should check 4 paths to follow gnu_debuglink:
+ *
+ * - /usr/lib/debug/.build-id/ab/cdef1234.debug
+ * - /usr/bin/ruby.debug
+ * - /usr/bin/.debug/ruby.debug
+ * - /usr/lib/debug/usr/bin/ruby.debug.
+ *
+ * but we handle only two cases for now as the two formats are
+ * used by some linux distributions.
+ *
+ * See GDB's info for detail.
+ * https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
+ */
+
+// check the path pattern of "/usr/lib/debug/usr/bin/ruby.debug"
static void
follow_debuglink(const char *debuglink, int num_traces, void **traces,
obj_info_t **objp, line_info_t *lines, int offset)
{
- /* Ideally we should check 4 paths to follow gnu_debuglink,
- but we handle only one case for now as this format is used
- by some linux distributions. See GDB's info for detail. */
static const char global_debug_dir[] = "/usr/lib/debug";
const size_t global_debug_dir_len = sizeof(global_debug_dir) - 1;
char *p;
@@ -559,6 +607,37 @@ follow_debuglink(const char *debuglink, int num_traces, void **traces,
o2->path = o1->path;
fill_lines(num_traces, traces, 0, objp, lines, offset);
}
+
+// check the path pattern of "/usr/lib/debug/.build-id/ab/cdef1234.debug"
+static void
+follow_debuglink_build_id(const char *build_id, size_t build_id_size, int num_traces, void **traces,
+ obj_info_t **objp, line_info_t *lines, int offset)
+{
+ static const char global_debug_dir[] = "/usr/lib/debug/.build-id/";
+ const size_t global_debug_dir_len = sizeof(global_debug_dir) - 1;
+ char *p;
+ obj_info_t *o1 = *objp, *o2;
+ size_t i;
+
+ if (PATH_MAX < global_debug_dir_len + 1 + build_id_size * 2 + 6) return;
+
+ memcpy(binary_filename, global_debug_dir, global_debug_dir_len);
+ p = binary_filename + global_debug_dir_len;
+ for (i = 0; i < build_id_size; i++) {
+ static const char tbl[] = "0123456789abcdef";
+ unsigned char n = build_id[i];
+ *p++ = tbl[n / 16];
+ *p++ = tbl[n % 16];
+ if (i == 0) *p++ = '/';
+ }
+ strcpy(p, ".debug");
+
+ append_obj(objp);
+ o2 = *objp;
+ o2->base_addr = o1->base_addr;
+ o2->path = o1->path;
+ fill_lines(num_traces, traces, 0, objp, lines, offset);
+}
#endif
enum
@@ -764,32 +843,51 @@ enum
DW_FORM_addrx4 = 0x2c
};
+/* Range list entry encodings */
+enum {
+ DW_RLE_end_of_list = 0x00,
+ DW_RLE_base_addressx = 0x01,
+ DW_RLE_startx_endx = 0x02,
+ DW_RLE_startx_length = 0x03,
+ DW_RLE_offset_pair = 0x04,
+ DW_RLE_base_address = 0x05,
+ DW_RLE_start_end = 0x06,
+ DW_RLE_start_length = 0x07
+};
+
enum {
VAL_none = 0,
VAL_cstr = 1,
VAL_data = 2,
VAL_uint = 3,
- VAL_int = 4
+ VAL_int = 4,
+ VAL_addr = 5
};
# define ABBREV_TABLE_SIZE 256
typedef struct {
obj_info_t *obj;
- char *file;
- char *current_cu;
+ const char *file;
+ uint8_t current_version;
+ const char *current_cu;
uint64_t current_low_pc;
- char *debug_line_cu_end;
- char *debug_line_files;
- char *debug_line_directories;
- char *p;
- char *cu_end;
- char *pend;
- char *q0;
- char *q;
+ uint64_t current_str_offsets_base;
+ uint64_t current_addr_base;
+ uint64_t current_rnglists_base;
+ const char *debug_line_cu_end;
+ uint8_t debug_line_format;
+ uint16_t debug_line_version;
+ const char *debug_line_files;
+ const char *debug_line_directories;
+ const char *p;
+ const char *cu_end;
+ const char *pend;
+ const char *q0;
+ const char *q;
int format; // 4 or 8
uint8_t address_size;
int level;
- char *abbrev_table[ABBREV_TABLE_SIZE];
+ const char *abbrev_table[ABBREV_TABLE_SIZE];
} DebugInfoReader;
typedef struct {
@@ -800,9 +898,10 @@ typedef struct {
typedef struct {
union {
- char *ptr;
+ const char *ptr;
uint64_t uint64;
int64_t int64;
+ uint64_t addr_idx;
} as;
uint64_t off;
uint64_t at;
@@ -811,8 +910,11 @@ typedef struct {
int type;
} DebugInfoValue;
-/* TODO: Big Endian */
+#if defined(WORDS_BIGENDIAN)
+#define MERGE_2INTS(a,b,sz) (((uint64_t)(a)<<sz)|(b))
+#else
#define MERGE_2INTS(a,b,sz) (((uint64_t)(b)<<sz)|(a))
+#endif
static uint16_t
get_uint16(const uint8_t *p)
@@ -833,39 +935,39 @@ get_uint64(const uint8_t *p)
}
static uint8_t
-read_uint8(char **ptr)
+read_uint8(const char **ptr)
{
- const unsigned char *p = (const unsigned char *)*ptr;
- *ptr = (char *)(p + 1);
- return *p;
+ const char *p = *ptr;
+ *ptr = (p + 1);
+ return (uint8_t)*p;
}
static uint16_t
-read_uint16(char **ptr)
+read_uint16(const char **ptr)
{
- const unsigned char *p = (const unsigned char *)*ptr;
- *ptr = (char *)(p + 2);
- return get_uint16(p);
+ const char *p = *ptr;
+ *ptr = (p + 2);
+ return get_uint16((const uint8_t *)p);
}
static uint32_t
-read_uint24(char **ptr)
+read_uint24(const char **ptr)
{
- const unsigned char *p = (const unsigned char *)*ptr;
- *ptr = (char *)(p + 3);
- return (*p << 16) | get_uint16(p+1);
+ const char *p = *ptr;
+ *ptr = (p + 3);
+ return ((uint8_t)*p << 16) | get_uint16((const uint8_t *)p+1);
}
static uint32_t
-read_uint32(char **ptr)
+read_uint32(const char **ptr)
{
- const unsigned char *p = (const unsigned char *)*ptr;
- *ptr = (char *)(p + 4);
- return get_uint32(p);
+ const char *p = *ptr;
+ *ptr = (p + 4);
+ return get_uint32((const uint8_t *)p);
}
static uint64_t
-read_uint64(char **ptr)
+read_uint64(const char **ptr)
{
const unsigned char *p = (const unsigned char *)*ptr;
*ptr = (char *)(p + 8);
@@ -873,7 +975,7 @@ read_uint64(char **ptr)
}
static uintptr_t
-read_uintptr(char **ptr)
+read_uintptr(const char **ptr)
{
const unsigned char *p = (const unsigned char *)*ptr;
*ptr = (char *)(p + SIZEOF_VOIDP);
@@ -914,13 +1016,34 @@ debug_info_reader_init(DebugInfoReader *reader, obj_info_t *obj)
reader->p = obj->debug_info.ptr;
reader->pend = obj->debug_info.ptr + obj->debug_info.size;
reader->debug_line_cu_end = obj->debug_line.ptr;
+ reader->current_low_pc = 0;
+ reader->current_str_offsets_base = 0;
+ reader->current_addr_base = 0;
+ reader->current_rnglists_base = 0;
+}
+
+static void
+di_skip_die_attributes(const char **p)
+{
+ for (;;) {
+ uint64_t at = uleb128(p);
+ uint64_t form = uleb128(p);
+ if (!at && !form) break;
+ switch (form) {
+ default:
+ break;
+ case DW_FORM_implicit_const:
+ sleb128(p);
+ break;
+ }
+ }
}
static void
di_read_debug_abbrev_cu(DebugInfoReader *reader)
{
uint64_t prev = 0;
- char *p = reader->q0;
+ const char *p = reader->q0;
for (;;) {
uint64_t abbrev_number = uleb128(&p);
if (abbrev_number <= prev) break;
@@ -930,12 +1053,7 @@ di_read_debug_abbrev_cu(DebugInfoReader *reader)
prev = abbrev_number;
uleb128(&p); /* tag */
p++; /* has_children */
- /* skip content */
- for (;;) {
- uint64_t at = uleb128(&p);
- uint64_t form = uleb128(&p);
- if (!at && !form) break;
- }
+ di_skip_die_attributes(&p);
}
}
@@ -946,10 +1064,12 @@ di_read_debug_line_cu(DebugInfoReader *reader)
struct LineNumberProgramHeader header;
p = (const char *)reader->debug_line_cu_end;
- if (parse_debug_line_header(&p, &header))
+ if (parse_debug_line_header(reader->obj, &p, &header))
return -1;
reader->debug_line_cu_end = (char *)header.cu_end;
+ reader->debug_line_format = header.format;
+ reader->debug_line_version = header.version;
reader->debug_line_directories = (char *)header.include_directories;
reader->debug_line_files = (char *)header.filenames;
@@ -957,6 +1077,13 @@ di_read_debug_line_cu(DebugInfoReader *reader)
}
static void
+set_addr_idx_value(DebugInfoValue *v, uint64_t n)
+{
+ v->as.addr_idx = n;
+ v->type = VAL_addr;
+}
+
+static void
set_uint_value(DebugInfoValue *v, uint64_t n)
{
v->as.uint64 = n;
@@ -971,7 +1098,7 @@ set_int_value(DebugInfoValue *v, int64_t n)
}
static void
-set_cstr_value(DebugInfoValue *v, char *s)
+set_cstr_value(DebugInfoValue *v, const char *s)
{
v->as.ptr = s;
v->off = 0;
@@ -979,7 +1106,7 @@ set_cstr_value(DebugInfoValue *v, char *s)
}
static void
-set_cstrp_value(DebugInfoValue *v, char *s, uint64_t off)
+set_cstrp_value(DebugInfoValue *v, const char *s, uint64_t off)
{
v->as.ptr = s;
v->off = off;
@@ -987,7 +1114,7 @@ set_cstrp_value(DebugInfoValue *v, char *s, uint64_t off)
}
static void
-set_data_value(DebugInfoValue *v, char *s)
+set_data_value(DebugInfoValue *v, const char *s)
{
v->as.ptr = s;
v->type = VAL_data;
@@ -1003,19 +1130,39 @@ get_cstr_value(DebugInfoValue *v)
}
}
+static const char *
+resolve_strx(DebugInfoReader *reader, uint64_t idx)
+{
+ const char *p = reader->obj->debug_str_offsets.ptr + reader->current_str_offsets_base;
+ uint64_t off;
+ if (reader->format == 4) {
+ off = ((uint32_t *)p)[idx];
+ }
+ else {
+ off = ((uint64_t *)p)[idx];
+ }
+ return reader->obj->debug_str.ptr + off;
+}
+
+static void
+debug_info_reader_read_addr_value(DebugInfoReader *reader, DebugInfoValue *v)
+{
+ if (reader->address_size == 4) {
+ set_uint_value(v, read_uint32(&reader->p));
+ } else if (reader->address_size == 8) {
+ set_uint_value(v, read_uint64(&reader->p));
+ } else {
+ fprintf(stderr,"unknown address_size:%d", reader->address_size);
+ abort();
+ }
+}
+
static void
debug_info_reader_read_value(DebugInfoReader *reader, uint64_t form, DebugInfoValue *v)
{
switch (form) {
case DW_FORM_addr:
- if (reader->address_size == 4) {
- set_uint_value(v, read_uint32(&reader->p));
- } else if (reader->address_size == 8) {
- set_uint_value(v, read_uint64(&reader->p));
- } else {
- fprintf(stderr,"unknown address_size:%d", reader->address_size);
- abort();
- }
+ debug_info_reader_read_addr_value(reader, v);
break;
case DW_FORM_block2:
v->size = read_uint16(&reader->p);
@@ -1067,13 +1214,19 @@ debug_info_reader_read_value(DebugInfoReader *reader, uint64_t form, DebugInfoVa
set_uint_value(v, read_uleb128(reader));
break;
case DW_FORM_ref_addr:
- if (reader->address_size == 4) {
- set_uint_value(v, read_uint32(&reader->p));
- } else if (reader->address_size == 8) {
- set_uint_value(v, read_uint64(&reader->p));
+ if (reader->current_version <= 2) {
+ // DWARF Version 2 specifies that references have
+ // the same size as an address on the target system
+ debug_info_reader_read_addr_value(reader, v);
} else {
- fprintf(stderr,"unknown address_size:%d", reader->address_size);
- abort();
+ if (reader->format == 4) {
+ set_uint_value(v, read_uint32(&reader->p));
+ } else if (reader->format == 8) {
+ set_uint_value(v, read_uint64(&reader->p));
+ } else {
+ fprintf(stderr,"unknown format:%d", reader->format);
+ abort();
+ }
}
break;
case DW_FORM_ref1:
@@ -1115,11 +1268,10 @@ debug_info_reader_read_value(DebugInfoReader *reader, uint64_t form, DebugInfoVa
set_uint_value(v, 1);
break;
case DW_FORM_strx:
- set_uint_value(v, uleb128(&reader->p));
+ set_cstr_value(v, resolve_strx(reader, uleb128(&reader->p)));
break;
case DW_FORM_addrx:
- /* TODO: read .debug_addr */
- set_uint_value(v, uleb128(&reader->p));
+ set_addr_idx_value(v, uleb128(&reader->p));
break;
case DW_FORM_ref_sup4:
set_uint_value(v, read_uint32(&reader->p));
@@ -1134,8 +1286,7 @@ debug_info_reader_read_value(DebugInfoReader *reader, uint64_t form, DebugInfoVa
reader->p += v->size;
break;
case DW_FORM_line_strp:
- set_uint_value(v, read_uint(reader));
- /* *p = reader->file + reader->line->sh_offset + ret; */
+ set_cstrp_value(v, reader->obj->debug_line_str.ptr, read_uint(reader));
break;
case DW_FORM_ref_sig8:
set_uint_value(v, read_uint64(&reader->p));
@@ -1153,28 +1304,28 @@ debug_info_reader_read_value(DebugInfoReader *reader, uint64_t form, DebugInfoVa
set_uint_value(v, read_uint64(&reader->p));
break;
case DW_FORM_strx1:
- set_uint_value(v, read_uint8(&reader->p));
+ set_cstr_value(v, resolve_strx(reader, read_uint8(&reader->p)));
break;
case DW_FORM_strx2:
- set_uint_value(v, read_uint16(&reader->p));
+ set_cstr_value(v, resolve_strx(reader, read_uint16(&reader->p)));
break;
case DW_FORM_strx3:
- set_uint_value(v, read_uint24(&reader->p));
+ set_cstr_value(v, resolve_strx(reader, read_uint24(&reader->p)));
break;
case DW_FORM_strx4:
- set_uint_value(v, read_uint32(&reader->p));
+ set_cstr_value(v, resolve_strx(reader, read_uint32(&reader->p)));
break;
case DW_FORM_addrx1:
- set_uint_value(v, read_uint8(&reader->p));
+ set_addr_idx_value(v, read_uint8(&reader->p));
break;
case DW_FORM_addrx2:
- set_uint_value(v, read_uint16(&reader->p));
+ set_addr_idx_value(v, read_uint16(&reader->p));
break;
case DW_FORM_addrx3:
- set_uint_value(v, read_uint24(&reader->p));
+ set_addr_idx_value(v, read_uint24(&reader->p));
break;
case DW_FORM_addrx4:
- set_uint_value(v, read_uint32(&reader->p));
+ set_addr_idx_value(v, read_uint32(&reader->p));
break;
case 0:
goto fail;
@@ -1188,10 +1339,10 @@ debug_info_reader_read_value(DebugInfoReader *reader, uint64_t form, DebugInfoVa
}
/* find abbrev in current compilation unit */
-static char *
+static const char *
di_find_abbrev(DebugInfoReader *reader, uint64_t abbrev_number)
{
- char *p;
+ const char *p;
if (abbrev_number < ABBREV_TABLE_SIZE) {
return reader->abbrev_table[abbrev_number];
}
@@ -1199,12 +1350,7 @@ di_find_abbrev(DebugInfoReader *reader, uint64_t abbrev_number)
/* skip 255th record */
uleb128(&p); /* tag */
p++; /* has_children */
- /* skip content */
- for (;;) {
- uint64_t at = uleb128(&p);
- uint64_t form = uleb128(&p);
- if (!at && !form) break;
- }
+ di_skip_die_attributes(&p);
for (uint64_t n = uleb128(&p); abbrev_number != n; n = uleb128(&p)) {
if (n == 0) {
fprintf(stderr,"%d: Abbrev Number %"PRId64" not found\n",__LINE__, abbrev_number);
@@ -1212,12 +1358,7 @@ di_find_abbrev(DebugInfoReader *reader, uint64_t abbrev_number)
}
uleb128(&p); /* tag */
p++; /* has_children */
- /* skip content */
- for (;;) {
- uint64_t at = uleb128(&p);
- uint64_t form = uleb128(&p);
- if (!at && !form) break;
- }
+ di_skip_die_attributes(&p);
}
return p;
}
@@ -1231,7 +1372,7 @@ hexdump0(const unsigned char *p, size_t n)
for (i=0; i < n; i++){
switch (i & 15) {
case 0:
- fprintf(stderr, "%02zd: %02X ", i/16, p[i]);
+ fprintf(stderr, "%02" PRIdSIZE ": %02X ", i/16, p[i]);
break;
case 15:
fprintf(stderr, "%02X\n", p[i]);
@@ -1252,16 +1393,16 @@ div_inspect(DebugInfoValue *v)
{
switch (v->type) {
case VAL_uint:
- fprintf(stderr,"%d: type:%d size:%zx v:%lx\n",__LINE__,v->type,v->size,v->as.uint64);
+ fprintf(stderr,"%d: type:%d size:%" PRIxSIZE " v:%"PRIx64"\n",__LINE__,v->type,v->size,v->as.uint64);
break;
case VAL_int:
- fprintf(stderr,"%d: type:%d size:%zx v:%ld\n",__LINE__,v->type,v->size,(int64_t)v->as.uint64);
+ fprintf(stderr,"%d: type:%d size:%" PRIxSIZE " v:%"PRId64"\n",__LINE__,v->type,v->size,(int64_t)v->as.uint64);
break;
case VAL_cstr:
- fprintf(stderr,"%d: type:%d size:%zx v:'%s'\n",__LINE__,v->type,v->size,v->as.ptr);
+ fprintf(stderr,"%d: type:%d size:%" PRIxSIZE " v:'%s'\n",__LINE__,v->type,v->size,v->as.ptr);
break;
case VAL_data:
- fprintf(stderr,"%d: type:%d size:%zx v:\n",__LINE__,v->type,v->size);
+ fprintf(stderr,"%d: type:%d size:%" PRIxSIZE " v:\n",__LINE__,v->type,v->size);
hexdump(v->as.ptr, 16);
break;
}
@@ -1312,6 +1453,76 @@ di_skip_records(DebugInfoReader *reader)
}
}
+typedef struct addr_header {
+ const char *ptr;
+ uint64_t unit_length;
+ uint8_t format;
+ uint8_t address_size;
+ /* uint8_t segment_selector_size; */
+} addr_header_t;
+
+static void
+addr_header_init(obj_info_t *obj, addr_header_t *header) {
+ const char *p = obj->debug_addr.ptr;
+
+ header->ptr = p;
+
+ if (!p) return;
+
+ header->unit_length = *(uint32_t *)p;
+ p += sizeof(uint32_t);
+
+ header->format = 4;
+ if (header->unit_length == 0xffffffff) {
+ header->unit_length = *(uint64_t *)p;
+ p += sizeof(uint64_t);
+ header->format = 8;
+ }
+
+ p += 2; /* version */
+ header->address_size = *p++;
+ p++; /* segment_selector_size */
+}
+
+static uint64_t
+read_addr(addr_header_t *header, uint64_t addr_base, uint64_t idx) {
+ if (header->address_size == 4) {
+ return ((uint32_t*)(header->ptr + addr_base))[idx];
+ }
+ else {
+ return ((uint64_t*)(header->ptr + addr_base))[idx];
+ }
+}
+
+typedef struct rnglists_header {
+ uint64_t unit_length;
+ uint8_t format;
+ uint8_t address_size;
+ uint32_t offset_entry_count;
+} rnglists_header_t;
+
+static void
+rnglists_header_init(obj_info_t *obj, rnglists_header_t *header) {
+ const char *p = obj->debug_rnglists.ptr;
+
+ if (!p) return;
+
+ header->unit_length = *(uint32_t *)p;
+ p += sizeof(uint32_t);
+
+ header->format = 4;
+ if (header->unit_length == 0xffffffff) {
+ header->unit_length = *(uint64_t *)p;
+ p += sizeof(uint64_t);
+ header->format = 8;
+ }
+
+ p += 2; /* version */
+ header->address_size = *p++;
+ p++; /* segment_selector_size */
+ header->offset_entry_count = *(uint32_t *)p;
+}
+
typedef struct {
uint64_t low_pc;
uint64_t high_pc;
@@ -1322,31 +1533,53 @@ typedef struct {
} ranges_t;
static void
-ranges_set(ranges_t *ptr, DebugInfoValue *v)
+ranges_set(ranges_t *ptr, DebugInfoValue *v, addr_header_t *addr_header, uint64_t addr_base)
{
+ uint64_t n = 0;
+ if (v->type == VAL_uint) {
+ n = v->as.uint64;
+ }
+ else if (v->type == VAL_addr) {
+ n = read_addr(addr_header, addr_base, v->as.addr_idx);
+ }
switch (v->at) {
case DW_AT_low_pc:
- ptr->low_pc = v->as.uint64;
+ ptr->low_pc = n;
ptr->low_pc_set = true;
break;
case DW_AT_high_pc:
if (v->form == DW_FORM_addr) {
- ptr->high_pc = v->as.uint64;
+ ptr->high_pc = n;
}
else {
- ptr->high_pc = ptr->low_pc + v->as.uint64;
+ ptr->high_pc = ptr->low_pc + n;
}
ptr->high_pc_set = true;
break;
case DW_AT_ranges:
- ptr->ranges = v->as.uint64;
+ ptr->ranges = n;
ptr->ranges_set = true;
break;
}
}
+static uint64_t
+read_dw_form_addr(DebugInfoReader *reader, const char **ptr)
+{
+ const char *p = *ptr;
+ *ptr = p + reader->address_size;
+ if (reader->address_size == 4) {
+ return read_uint32(&p);
+ } else if (reader->address_size == 8) {
+ return read_uint64(&p);
+ } else {
+ fprintf(stderr,"unknown address_size:%d", reader->address_size);
+ abort();
+ }
+}
+
static uintptr_t
-ranges_include(DebugInfoReader *reader, ranges_t *ptr, uint64_t addr)
+ranges_include(DebugInfoReader *reader, ranges_t *ptr, uint64_t addr, rnglists_header_t *rnglists_header)
{
if (ptr->high_pc_set) {
if (ptr->ranges_set || !ptr->low_pc_set) {
@@ -1358,8 +1591,66 @@ ranges_include(DebugInfoReader *reader, ranges_t *ptr, uint64_t addr)
}
else if (ptr->ranges_set) {
/* TODO: support base address selection entry */
- char *p = reader->obj->debug_ranges.ptr + ptr->ranges;
+ const char *p;
uint64_t base = ptr->low_pc_set ? ptr->low_pc : reader->current_low_pc;
+ bool base_valid = true;
+ if (reader->current_version >= 5) {
+ if (rnglists_header->offset_entry_count == 0) {
+ // DW_FORM_sec_offset
+ p = reader->obj->debug_rnglists.ptr + ptr->ranges + reader->current_rnglists_base;
+ }
+ else {
+ // DW_FORM_rnglistx
+ const char *offset_array = reader->obj->debug_rnglists.ptr + reader->current_rnglists_base;
+ if (rnglists_header->format == 4) {
+ p = offset_array + ((uint32_t *)offset_array)[ptr->ranges];
+ }
+ else {
+ p = offset_array + ((uint64_t *)offset_array)[ptr->ranges];
+ }
+ }
+ for (;;) {
+ uint8_t rle = read_uint8(&p);
+ uintptr_t from = 0, to = 0;
+ if (rle == DW_RLE_end_of_list) break;
+ switch (rle) {
+ case DW_RLE_base_addressx:
+ uleb128(&p);
+ base_valid = false; /* not supported yet */
+ break;
+ case DW_RLE_startx_endx:
+ uleb128(&p);
+ uleb128(&p);
+ break;
+ case DW_RLE_startx_length:
+ uleb128(&p);
+ uleb128(&p);
+ break;
+ case DW_RLE_offset_pair:
+ if (!base_valid) break;
+ from = (uintptr_t)base + uleb128(&p);
+ to = (uintptr_t)base + uleb128(&p);
+ break;
+ case DW_RLE_base_address:
+ base = read_dw_form_addr(reader, &p);
+ base_valid = true;
+ break;
+ case DW_RLE_start_end:
+ from = (uintptr_t)read_dw_form_addr(reader, &p);
+ to = (uintptr_t)read_dw_form_addr(reader, &p);
+ break;
+ case DW_RLE_start_length:
+ from = (uintptr_t)read_dw_form_addr(reader, &p);
+ to = from + uleb128(&p);
+ break;
+ }
+ if (from <= addr && addr < to) {
+ return from;
+ }
+ }
+ return false;
+ }
+ p = reader->obj->debug_ranges.ptr + ptr->ranges;
for (;;) {
uintptr_t from = read_uintptr(&p);
uintptr_t to = read_uintptr(&p);
@@ -1369,7 +1660,7 @@ ranges_include(DebugInfoReader *reader, ranges_t *ptr, uint64_t addr)
base = to;
}
else if (base + from <= addr && addr < base + to) {
- return from;
+ return (uintptr_t)base + from;
}
}
}
@@ -1427,6 +1718,7 @@ di_read_cu(DebugInfoReader *reader)
}
reader->cu_end = reader->p + unit_length;
version = read_uint16(&reader->p);
+ reader->current_version = version;
if (version > 5) {
return -1;
}
@@ -1459,30 +1751,76 @@ di_read_cu(DebugInfoReader *reader)
break;
}
+ reader->current_str_offsets_base = 0;
+ reader->current_addr_base = 0;
+ reader->current_rnglists_base = 0;
+
+ DebugInfoValue low_pc = {{}};
/* enumerate abbrev */
for (;;) {
DebugInfoValue v = {{}};
if (!di_read_record(reader, &v)) break;
switch (v.at) {
case DW_AT_low_pc:
- reader->current_low_pc = v.as.uint64;
+ // clang may output DW_AT_addr_base after DW_AT_low_pc.
+ // We need to resolve the DW_FORM_addr* after DW_AT_addr_base is parsed.
+ low_pc = v;
+ break;
+ case DW_AT_str_offsets_base:
+ reader->current_str_offsets_base = v.as.uint64;
+ break;
+ case DW_AT_addr_base:
+ reader->current_addr_base = v.as.uint64;
+ break;
+ case DW_AT_rnglists_base:
+ reader->current_rnglists_base = v.as.uint64;
break;
}
}
+ // Resolve the DW_FORM_addr of DW_AT_low_pc
+ switch (low_pc.type) {
+ case VAL_uint:
+ reader->current_low_pc = low_pc.as.uint64;
+ break;
+ case VAL_addr:
+ {
+ addr_header_t header;
+ addr_header_init(reader->obj, &header);
+ reader->current_low_pc = read_addr(&header, reader->current_addr_base, low_pc.as.addr_idx);
+ }
+ break;
+ }
} while (0);
#endif
return 0;
}
static void
-read_abstract_origin(DebugInfoReader *reader, uint64_t abstract_origin, line_info_t *line)
+read_abstract_origin(DebugInfoReader *reader, uint64_t form, uint64_t abstract_origin, line_info_t *line)
{
- char *p = reader->p;
- char *q = reader->q;
+ const char *p = reader->p;
+ const char *q = reader->q;
int level = reader->level;
DIE die;
- reader->p = reader->current_cu + abstract_origin;
+ switch (form) {
+ case DW_FORM_ref1:
+ case DW_FORM_ref2:
+ case DW_FORM_ref4:
+ case DW_FORM_ref8:
+ case DW_FORM_ref_udata:
+ reader->p = reader->current_cu + abstract_origin;
+ break;
+ case DW_FORM_ref_addr:
+ goto finish; /* not supported yet */
+ case DW_FORM_ref_sig8:
+ goto finish; /* not supported yet */
+ case DW_FORM_ref_sup4:
+ case DW_FORM_ref_sup8:
+ goto finish; /* not supported yet */
+ default:
+ goto finish;
+ }
if (!di_read_die(reader, &die)) goto finish;
/* enumerate abbrev */
@@ -1505,6 +1843,13 @@ read_abstract_origin(DebugInfoReader *reader, uint64_t abstract_origin, line_inf
static void
debug_info_read(DebugInfoReader *reader, int num_traces, void **traces,
line_info_t *lines, int offset) {
+
+ addr_header_t addr_header = {};
+ addr_header_init(reader->obj, &addr_header);
+
+ rnglists_header_t rnglists_header = {};
+ rnglists_header_init(reader->obj, &rnglists_header);
+
while (reader->p < reader->cu_end) {
DIE die;
ranges_t ranges = {};
@@ -1531,7 +1876,7 @@ debug_info_read(DebugInfoReader *reader, int num_traces, void **traces,
line.sname = get_cstr_value(&v);
break;
case DW_AT_call_file:
- fill_filename((int)v.as.uint64, reader->debug_line_directories, reader->debug_line_files, &line, reader->obj);
+ fill_filename((int)v.as.uint64, reader->debug_line_format, reader->debug_line_version, reader->debug_line_directories, reader->debug_line_files, &line, reader->obj);
break;
case DW_AT_call_line:
line.line = (int)v.as.uint64;
@@ -1539,7 +1884,7 @@ debug_info_read(DebugInfoReader *reader, int num_traces, void **traces,
case DW_AT_low_pc:
case DW_AT_high_pc:
case DW_AT_ranges:
- ranges_set(&ranges, &v);
+ ranges_set(&ranges, &v, &addr_header, reader->current_addr_base);
break;
case DW_AT_declaration:
goto skip_die;
@@ -1547,7 +1892,7 @@ debug_info_read(DebugInfoReader *reader, int num_traces, void **traces,
/* 1 or 3 */
break; /* goto skip_die; */
case DW_AT_abstract_origin:
- read_abstract_origin(reader, v.as.uint64, &line);
+ read_abstract_origin(reader, v.form, v.as.uint64, &line);
break; /* goto skip_die; */
}
}
@@ -1556,9 +1901,9 @@ debug_info_read(DebugInfoReader *reader, int num_traces, void **traces,
for (int i=offset; i < num_traces; i++) {
uintptr_t addr = (uintptr_t)traces[i];
uintptr_t offset = addr - reader->obj->base_addr + reader->obj->vmaddr;
- uintptr_t saddr = ranges_include(reader, &ranges, offset);
+ uintptr_t saddr = ranges_include(reader, &ranges, offset, &rnglists_header);
if (saddr) {
- /* fprintf(stderr, "%d:%tx: %d %lx->%lx %x %s: %s/%s %d %s %s %s\n",__LINE__,die.pos, i,addr,offset, die.tag,line.sname,line.dirname,line.filename,line.line,reader->obj->path,line.sname,lines[i].sname); */
+ /* fprintf(stdout, "%d:%tx: %d %lx->%lx %x %s: %s/%s %d %s %s %s\n",__LINE__,die.pos, i,addr,offset, die.tag,line.sname,line.dirname,line.filename,line.line,reader->obj->path,line.sname,lines[i].sname); */
if (lines[i].sname) {
line_info_t *lp = malloc(sizeof(line_info_t));
memcpy(lp, &lines[i], sizeof(line_info_t));
@@ -1577,10 +1922,59 @@ debug_info_read(DebugInfoReader *reader, int num_traces, void **traces,
}
}
+// This function parses the following attributes of Line Number Program Header in DWARF 5:
+//
+// * directory_entry_format_count
+// * directory_entry_format
+// * directories_count
+// * directories
+//
+// or
+//
+// * file_name_entry_format_count
+// * file_name_entry_format
+// * file_names_count
+// * file_names
+//
+// It records DW_LNCT_path and DW_LNCT_directory_index at the index "idx".
+static const char *
+parse_ver5_debug_line_header(const char *p, int idx, uint8_t format, obj_info_t *obj, const char **out_path, uint64_t *out_directory_index) {
+ int i, j;
+ int entry_format_count = *(uint8_t *)p++;
+ const char *entry_format = p;
+
+ /* skip the part of entry_format */
+ for (i = 0; i < entry_format_count * 2; i++) uleb128(&p);
+
+ int entry_count = (int)uleb128(&p);
+
+ DebugInfoReader reader;
+ debug_info_reader_init(&reader, obj);
+ reader.format = format;
+ reader.p = p;
+ for (j = 0; j < entry_count; j++) {
+ const char *format = entry_format;
+ for (i = 0; i < entry_format_count; i++) {
+ DebugInfoValue v = {{}};
+ unsigned long dw_lnct = uleb128(&format);
+ unsigned long dw_form = uleb128(&format);
+ debug_info_reader_read_value(&reader, dw_form, &v);
+ if (dw_lnct == 1 /* DW_LNCT_path */ && v.type == VAL_cstr && out_path)
+ *out_path = v.as.ptr + v.off;
+ if (dw_lnct == 2 /* DW_LNCT_directory_index */ && v.type == VAL_uint && out_directory_index)
+ *out_directory_index = v.as.uint64;
+ }
+ if (j == idx) return 0;
+ }
+
+ return reader.p;
+}
+
#ifdef USE_ELF
static unsigned long
uncompress_debug_section(ElfW(Shdr) *shdr, char *file, char **ptr)
{
+ *ptr = NULL;
#ifdef SUPPORT_COMPRESSED_DEBUG_LINE
ElfW(Chdr) *chdr = (ElfW(Chdr) *)(file + shdr->sh_offset);
unsigned long destsize = chdr->ch_size;
@@ -1601,6 +1995,7 @@ uncompress_debug_section(ElfW(Shdr) *shdr, char *file, char **ptr)
fail:
free(*ptr);
+ *ptr = NULL;
#endif
return 0;
}
@@ -1615,6 +2010,7 @@ fill_lines(int num_traces, void **traces, int check_debuglink,
ElfW(Ehdr) *ehdr;
ElfW(Shdr) *shdr, *shstr_shdr;
ElfW(Shdr) *gnu_debuglink_shdr = NULL;
+ ElfW(Shdr) *note_gnu_build_id = NULL;
int fd;
off_t filesize;
char *file;
@@ -1687,6 +2083,11 @@ fill_lines(int num_traces, void **traces, int check_debuglink,
/* if (!strcmp(section_name, ".dynsym")) */
dynsym_shdr = shdr + i;
break;
+ case SHT_NOTE:
+ if (!strcmp(section_name, ".note.gnu.build-id")) {
+ note_gnu_build_id = shdr + i;
+ }
+ break;
case SHT_PROGBITS:
if (!strcmp(section_name, ".gnu_debuglink")) {
gnu_debuglink_shdr = shdr + i;
@@ -1697,7 +2098,11 @@ fill_lines(int num_traces, void **traces, int check_debuglink,
".debug_info",
".debug_line",
".debug_ranges",
- ".debug_str"
+ ".debug_str_offsets",
+ ".debug_addr",
+ ".debug_rnglists",
+ ".debug_str",
+ ".debug_line_str"
};
for (j=0; j < DWARF_SECTION_COUNT; j++) {
@@ -1802,6 +2207,13 @@ use_symtab:
num_traces, traces,
objp, lines, offset);
}
+ if (note_gnu_build_id && check_debuglink) {
+ ElfW(Nhdr) *nhdr = (ElfW(Nhdr)*) (file + note_gnu_build_id->sh_offset);
+ const char *build_id = (char *)(nhdr + 1) + nhdr->n_namesz;
+ follow_debuglink_build_id(build_id, nhdr->n_descsz,
+ num_traces, traces,
+ objp, lines, offset);
+ }
goto finish;
}
@@ -1946,7 +2358,11 @@ found_mach_header:
"__debug_info",
"__debug_line",
"__debug_ranges",
- "__debug_str"
+ "__debug_str_offsets",
+ "__debug_addr",
+ "__debug_rnglists",
+ "__debug_str",
+ "__debug_line_str",
};
struct LP(segment_command) *scmd = (struct LP(segment_command) *)lcmd;
if (strcmp(scmd->segname, "__TEXT") == 0) {
@@ -1983,7 +2399,7 @@ found_mach_header:
char *strtab = file + cmd->stroff, *sname = 0;
uint32_t j;
uintptr_t saddr = 0;
- /* kprintf("[%2d]: %x/symtab %p\n", i, cmd->cmd, p); */
+ /* kprintf("[%2d]: %x/symtab %p\n", i, cmd->cmd, (void *)p); */
for (j = 0; j < cmd->nsyms; j++) {
uintptr_t symsize, d;
struct LP(nlist) *e = &nl[j];
@@ -2035,7 +2451,7 @@ fail:
#endif
#define HAVE_MAIN_EXE_PATH
-#if defined(__FreeBSD__)
+#if defined(__FreeBSD__) || defined(__DragonFly__)
# include <sys/sysctl.h>
#endif
/* ssize_t main_exe_path(void)
@@ -2044,17 +2460,21 @@ fail:
* and returns strlen(binary_filename).
* it is NUL terminated.
*/
-#if defined(__linux__)
+#if defined(__linux__) || defined(__NetBSD__)
static ssize_t
main_exe_path(void)
{
-# define PROC_SELF_EXE "/proc/self/exe"
+# if defined(__linux__)
+# define PROC_SELF_EXE "/proc/self/exe"
+# elif defined(__NetBSD__)
+# define PROC_SELF_EXE "/proc/curproc/exe"
+# endif
ssize_t len = readlink(PROC_SELF_EXE, binary_filename, PATH_MAX);
if (len < 0) return 0;
binary_filename[len] = 0;
return len;
}
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
static ssize_t
main_exe_path(void)
{
@@ -2098,9 +2518,12 @@ print_line0(line_info_t *line, void *address)
else if (!line->path) {
kprintf("[0x%"PRIxPTR"]\n", addr);
}
- else if (!line->saddr || !line->sname) {
+ else if (!line->sname) {
kprintf("%s(0x%"PRIxPTR") [0x%"PRIxPTR"]\n", line->path, addr-line->base_addr, addr);
}
+ else if (!line->saddr) {
+ kprintf("%s(%s) [0x%"PRIxPTR"]\n", line->path, line->sname, addr);
+ }
else if (line->line <= 0) {
kprintf("%s(%s+0x%"PRIxPTR") [0x%"PRIxPTR"]\n", line->path, line->sname,
d, addr);
@@ -2137,6 +2560,7 @@ rb_dump_backtrace_with_lines(int num_traces, void **traces)
obj_info_t *obj = NULL;
/* 2 is NULL + main executable */
void **dladdr_fbases = (void **)calloc(num_traces+2, sizeof(void *));
+
#ifdef HAVE_MAIN_EXE_PATH
char *main_path = NULL; /* used on printing backtrace */
ssize_t len;
@@ -2167,8 +2591,8 @@ rb_dump_backtrace_with_lines(int num_traces, void **traces)
/* if the binary is strip-ed, this may effect */
for (p=dladdr_fbases; *p; p++) {
if (*p == info.dli_fbase) {
- lines[i].path = info.dli_fname;
- lines[i].sname = info.dli_sname;
+ if (info.dli_fname) lines[i].path = info.dli_fname;
+ if (info.dli_sname) lines[i].sname = info.dli_sname;
goto next_line;
}
}
@@ -2178,9 +2602,11 @@ rb_dump_backtrace_with_lines(int num_traces, void **traces)
obj->base_addr = (uintptr_t)info.dli_fbase;
path = info.dli_fname;
obj->path = path;
- lines[i].path = path;
- lines[i].sname = info.dli_sname;
- lines[i].saddr = (uintptr_t)info.dli_saddr;
+ if (path) lines[i].path = path;
+ if (info.dli_sname) {
+ lines[i].sname = info.dli_sname;
+ lines[i].saddr = (uintptr_t)info.dli_saddr;
+ }
strlcpy(binary_filename, path, PATH_MAX);
if (fill_lines(num_traces, traces, 1, &obj, lines, i) == (uintptr_t)-1)
break;
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 71dd19338a..0000000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,94 +0,0 @@
----
-version: '{build}'
-init:
- - git config --global user.name git
- - git config --global user.email svn-admin@ruby-lang.org
-clone_depth: 10
-platform:
- - x64
-environment:
- ruby_version: "24-%Platform%"
- zlib_version: "1.2.11"
- matrix:
- - build: vs
- vs: 120
- ssl: OpenSSL
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
- GEMS_FOR_TEST: ""
- - build: vs
- vs: 140
- ssl: OpenSSL-v111
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
- GEMS_FOR_TEST: ""
- RELINE_TEST_ENCODING: "Windows-31J"
- UPDATE_UNICODE: "UNICODE_FILES=. UNICODE_PROPERTY_FILES=. UNICODE_AUXILIARY_FILES=. UNICODE_EMOJI_FILES=."
-for:
--
- matrix:
- only:
- - build: vs
- install:
- - ver
- - chcp
- - SET BITS=%Platform:x86=32%
- - SET BITS=%BITS:x=%
- - SET OPENSSL_DIR=C:\%ssl%-Win%BITS%
- - CALL SET vcvars=%%^VS%VS%COMNTOOLS^%%..\..\VC\vcvarsall.bat
- - SET vcvars
- - '"%vcvars%" %Platform:x64=amd64%'
- - SET ruby_path=C:\Ruby%ruby_version:-x86=%
- - SET PATH=\usr\local\bin;%ruby_path%\bin;%PATH%;C:\msys64\mingw64\bin;C:\msys64\usr\bin
- - ruby --version
- - 'cl'
- - echo> Makefile srcdir=.
- - echo>> Makefile MSC_VER=0
- - echo>> Makefile RT=none
- - echo>> Makefile RT_VER=0
- - echo>> Makefile BUILTIN_ENCOBJS=nul
- - type win32\Makefile.sub >> Makefile
- - nmake %mflags% touch-unicode-files
- - nmake %mflags% %UPDATE_UNICODE% up incs
- - del Makefile
- - mkdir \usr\local\bin
- - mkdir \usr\local\include
- - mkdir \usr\local\lib
- - curl -fsSL -o zlib%zlib_version:.=%.zip --retry 10 https://zlib.net/zlib%zlib_version:.=%.zip
- - 7z x -o%APPVEYOR_BUILD_FOLDER%\ext\zlib zlib%zlib_version:.=%.zip
- - for %%I in (%OPENSSL_DIR%\*.dll) do mklink /h \usr\local\bin\%%~nxI %%I
- - attrib +r /s /d
- - mkdir %Platform%-mswin_%vs%
- build_script:
- - cd %APPVEYOR_BUILD_FOLDER%
- - cd %Platform%-mswin_%vs%
- - ..\win32\configure.bat --without-ext=+,dbm,gdbm,readline --with-opt-dir=/usr/local --with-openssl-dir=%OPENSSL_DIR:\=/%
- - nmake -l
- - nmake install-nodoc
- - \usr\bin\ruby -v -e "p :locale => Encoding.find('locale'), :filesystem => Encoding.find('filesystem')"
- - if not "%GEMS_FOR_TEST%" == "" \usr\bin\gem install --no-document %GEMS_FOR_TEST%
- - \usr\bin\ruby -ropenssl -e "puts 'Build ' + OpenSSL::OPENSSL_VERSION, 'Runtime ' + OpenSSL::OPENSSL_LIBRARY_VERSION"
- test_script:
- - set /a JOBS=%NUMBER_OF_PROCESSORS%
- - nmake -l "TESTOPTS=-v -q" btest
- - nmake -l "TESTOPTS=-v -q" test-basic
- - nmake -l "TESTOPTS=-v --timeout-scale=3.0 --excludes=../test/excludes/_appveyor -j%JOBS% --exclude readline --exclude win32ole --exclude test_bignum --exclude test_syntax --exclude test_open-uri --exclude test_bundled_ca" test-all
- # separately execute tests without -j which may crash worker with -j.
- - nmake -l "TESTOPTS=-v --timeout-scale=3.0 --excludes=../test/excludes/_appveyor" test-all TESTS="../test/win32ole ../test/ruby/test_bignum.rb ../test/ruby/test_syntax.rb ../test/open-uri/test_open-uri.rb ../test/rubygems/test_bundled_ca.rb"
- - nmake -l test-spec MSPECOPT=-fs # not using `-j` because sometimes `mspec -j` silently dies on Windows
-notifications:
- - provider: Webhook
- method: POST
- url:
- secure: CcFlJNDJ/a6to7u3Z4Fnz6dScEPNx7hTha2GkSRlV+1U6dqmxY/7uBcLXYb9gR3jfQk6w+2o/HrjNAyXMNGU/JOka3s2WRI4VKitzM+lQ08owvJIh0R7LxrGH0J2e81U # ruby-lang slack: ruby/simpler-alerts-bot
- body: >-
- {{^isPullRequest}}
- {
- "ci": "AppVeyor CI",
- "env": "Visual Studio 2013 / 2015",
- "url": "{{buildUrl}}",
- "commit": "{{commitId}}",
- "branch": "{{branch}}"
- }
- {{/isPullRequest}}
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: false
diff --git a/array.c b/array.c
index 0bf5c153b9..b76e9a64a3 100644
--- a/array.c
+++ b/array.c
@@ -39,6 +39,37 @@
VALUE rb_cArray;
+/* Flags of RArray
+ *
+ * 1: RARRAY_EMBED_FLAG
+ * The array is embedded (its contents follow the header, rather than
+ * being on a separately allocated buffer).
+ * 2: RARRAY_SHARED_FLAG (equal to ELTS_SHARED)
+ * The array is shared. The buffer this array points to is owned by
+ * another array (the shared root).
+ * if USE_RVARGC
+ * 3-9: RARRAY_EMBED_LEN
+ * The length of the array when RARRAY_EMBED_FLAG is set.
+ * else
+ * 3-4: RARRAY_EMBED_LEN
+ * The length of the array when RARRAY_EMBED_FLAG is set.
+ * endif
+ * 12: RARRAY_SHARED_ROOT_FLAG
+ * The array is a shared root that does reference counting. The buffer
+ * this array points to is owned by this array but may be pointed to
+ * by other arrays.
+ * Note: Frozen arrays may be a shared root without this flag being
+ * set. Frozen arrays do not have reference counting because
+ * they cannot be modified. Not updating the reference count
+ * improves copy-on-write performance. Their reference count is
+ * assumed to be infinity.
+ * 13: RARRAY_TRANSIENT_FLAG
+ * The buffer of the array is allocated on the transient heap.
+ * 14: RARRAY_PTR_IN_USE_FLAG
+ * The buffer of the array is in use. This is only used during
+ * debugging.
+ */
+
/* for OPTIMIZED_CMP: */
#define id_cmp idCmp
@@ -46,28 +77,13 @@ VALUE rb_cArray;
#define ARY_MAX_SIZE (LONG_MAX / (int)sizeof(VALUE))
#define SMALL_ARRAY_LEN 16
+RBIMPL_ATTR_MAYBE_UNUSED()
static int
should_be_T_ARRAY(VALUE ary)
{
return RB_TYPE_P(ary, T_ARRAY);
}
-static int
-should_not_be_shared_and_embedded(VALUE ary)
-{
- return !FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG);
-}
-
-#define ARY_SHARED_P(ary) \
- (assert(should_be_T_ARRAY((VALUE)(ary))), \
- assert(should_not_be_shared_and_embedded((VALUE)ary)), \
- FL_TEST_RAW((ary),ELTS_SHARED)!=0)
-
-#define ARY_EMBED_P(ary) \
- (assert(should_be_T_ARRAY((VALUE)(ary))), \
- assert(should_not_be_shared_and_embedded((VALUE)ary)), \
- FL_TEST_RAW((ary), RARRAY_EMBED_FLAG) != 0)
-
#define ARY_HEAP_PTR(a) (assert(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.ptr)
#define ARY_HEAP_LEN(a) (assert(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.len)
#define ARY_HEAP_CAPA(a) (assert(!ARY_EMBED_P(a)), assert(!ARY_SHARED_ROOT_P(a)), \
@@ -77,11 +93,11 @@ should_not_be_shared_and_embedded(VALUE ary)
#define ARY_EMBED_LEN(a) \
(assert(ARY_EMBED_P(a)), \
(long)((RBASIC(a)->flags >> RARRAY_EMBED_LEN_SHIFT) & \
- (RARRAY_EMBED_LEN_MASK >> RARRAY_EMBED_LEN_SHIFT)))
+ (RARRAY_EMBED_LEN_MASK >> RARRAY_EMBED_LEN_SHIFT)))
#define ARY_HEAP_SIZE(a) (assert(!ARY_EMBED_P(a)), assert(ARY_OWNS_HEAP_P(a)), ARY_CAPA(a) * sizeof(VALUE))
#define ARY_OWNS_HEAP_P(a) (assert(should_be_T_ARRAY((VALUE)(a))), \
- !FL_TEST_RAW((a), ELTS_SHARED|RARRAY_EMBED_FLAG))
+ !FL_TEST_RAW((a), RARRAY_SHARED_FLAG|RARRAY_EMBED_FLAG))
#define FL_SET_EMBED(a) do { \
assert(!ARY_SHARED_P(a)); \
@@ -93,9 +109,9 @@ should_not_be_shared_and_embedded(VALUE ary)
#define FL_UNSET_EMBED(ary) FL_UNSET((ary), RARRAY_EMBED_FLAG|RARRAY_EMBED_LEN_MASK)
#define FL_SET_SHARED(ary) do { \
assert(!ARY_EMBED_P(ary)); \
- FL_SET((ary), ELTS_SHARED); \
+ FL_SET((ary), RARRAY_SHARED_FLAG); \
} while (0)
-#define FL_UNSET_SHARED(ary) FL_UNSET((ary), ELTS_SHARED)
+#define FL_UNSET_SHARED(ary) FL_UNSET((ary), RARRAY_SHARED_FLAG)
#define ARY_SET_PTR(ary, p) do { \
assert(!ARY_EMBED_P(ary)); \
@@ -105,7 +121,6 @@ should_not_be_shared_and_embedded(VALUE ary)
#define ARY_SET_EMBED_LEN(ary, n) do { \
long tmp_n = (n); \
assert(ARY_EMBED_P(ary)); \
- assert(!OBJ_FROZEN(ary)); \
RBASIC(ary)->flags &= ~RARRAY_EMBED_LEN_MASK; \
RBASIC(ary)->flags |= (tmp_n) << RARRAY_EMBED_LEN_SHIFT; \
} while (0)
@@ -137,7 +152,7 @@ should_not_be_shared_and_embedded(VALUE ary)
} \
} while (0)
-#define ARY_CAPA(ary) (ARY_EMBED_P(ary) ? RARRAY_EMBED_LEN_MAX : \
+#define ARY_CAPA(ary) (ARY_EMBED_P(ary) ? ary_embed_capa(ary) : \
ARY_SHARED_ROOT_P(ary) ? RARRAY_LEN(ary) : ARY_HEAP_CAPA(ary))
#define ARY_SET_CAPA(ary, n) do { \
assert(!ARY_EMBED_P(ary)); \
@@ -146,26 +161,25 @@ should_not_be_shared_and_embedded(VALUE ary)
RARRAY(ary)->as.heap.aux.capa = (n); \
} while (0)
-#define ARY_SHARED_ROOT(ary) (assert(ARY_SHARED_P(ary)), RARRAY(ary)->as.heap.aux.shared_root)
#define ARY_SET_SHARED(ary, value) do { \
const VALUE _ary_ = (ary); \
const VALUE _value_ = (value); \
assert(!ARY_EMBED_P(_ary_)); \
assert(ARY_SHARED_P(_ary_)); \
- assert(ARY_SHARED_ROOT_P(_value_)); \
+ assert(!OBJ_FROZEN(_ary_)); \
+ assert(ARY_SHARED_ROOT_P(_value_) || OBJ_FROZEN(_value_)); \
RB_OBJ_WRITE(_ary_, &RARRAY(_ary_)->as.heap.aux.shared_root, _value_); \
} while (0)
-#define RARRAY_SHARED_ROOT_FLAG FL_USER5
-#define ARY_SHARED_ROOT_P(ary) (assert(should_be_T_ARRAY((VALUE)(ary))), \
- FL_TEST_RAW((ary), RARRAY_SHARED_ROOT_FLAG))
-#define ARY_SHARED_ROOT_REFCNT(ary) \
- (assert(ARY_SHARED_ROOT_P(ary)), RARRAY(ary)->as.heap.aux.capa)
-#define ARY_SHARED_ROOT_OCCUPIED(ary) (ARY_SHARED_ROOT_REFCNT(ary) == 1)
+
+#define ARY_SHARED_ROOT_OCCUPIED(ary) (!OBJ_FROZEN(ary) && ARY_SHARED_ROOT_REFCNT(ary) == 1)
#define ARY_SET_SHARED_ROOT_REFCNT(ary, value) do { \
assert(ARY_SHARED_ROOT_P(ary)); \
+ assert(!OBJ_FROZEN(ary)); \
+ assert((value) >= 0); \
RARRAY(ary)->as.heap.aux.capa = (value); \
} while (0)
#define FL_SET_SHARED_ROOT(ary) do { \
+ assert(!OBJ_FROZEN(ary)); \
assert(!ARY_EMBED_P(ary)); \
assert(!RARRAY_TRANSIENT_P(ary)); \
FL_SET((ary), RARRAY_SHARED_ROOT_FLAG); \
@@ -181,6 +195,65 @@ ARY_SET(VALUE a, long i, VALUE v)
}
#undef RARRAY_ASET
+static long
+ary_embed_capa(VALUE ary)
+{
+#if USE_RVARGC
+ size_t size = rb_gc_obj_slot_size(ary) - offsetof(struct RArray, as.ary);
+ assert(size % sizeof(VALUE) == 0);
+ return size / sizeof(VALUE);
+#else
+ return RARRAY_EMBED_LEN_MAX;
+#endif
+}
+
+static size_t
+ary_embed_size(long capa)
+{
+ return offsetof(struct RArray, as.ary) + (sizeof(VALUE) * capa);
+}
+
+static bool
+ary_embeddable_p(long capa)
+{
+#if USE_RVARGC
+ return rb_gc_size_allocatable_p(ary_embed_size(capa));
+#else
+ return capa <= RARRAY_EMBED_LEN_MAX;
+#endif
+}
+
+bool
+rb_ary_embeddable_p(VALUE ary)
+{
+ /* An array cannot be turned embeddable when the array is:
+ * - Shared root: other objects may point to the buffer of this array
+ * so we cannot make it embedded.
+ * - Frozen: this array may also be a shared root without the shared root
+ * flag.
+ * - Shared: we don't want to re-embed an array that points to a shared
+ * root (to save memory).
+ */
+ return !(ARY_SHARED_ROOT_P(ary) || OBJ_FROZEN(ary) || ARY_SHARED_P(ary));
+}
+
+size_t
+rb_ary_size_as_embedded(VALUE ary)
+{
+ size_t real_size;
+
+ if (ARY_EMBED_P(ary)) {
+ real_size = ary_embed_size(ARY_EMBED_LEN(ary));
+ }
+ else if (rb_ary_embeddable_p(ary)) {
+ real_size = ary_embed_size(ARY_HEAP_CAPA(ary));
+ }
+ else {
+ real_size = sizeof(struct RArray);
+ }
+ return real_size;
+}
+
#if ARRAY_DEBUG
#define ary_verify(ary) ary_verify_(ary, __FILE__, __LINE__)
@@ -190,19 +263,19 @@ ary_verify_(VALUE ary, const char *file, int line)
{
assert(RB_TYPE_P(ary, T_ARRAY));
- if (FL_TEST(ary, ELTS_SHARED)) {
- VALUE root = RARRAY(ary)->as.heap.aux.shared_root;
+ if (ARY_SHARED_P(ary)) {
+ VALUE root = ARY_SHARED_ROOT(ary);
const VALUE *ptr = ARY_HEAP_PTR(ary);
const VALUE *root_ptr = RARRAY_CONST_PTR_TRANSIENT(root);
long len = ARY_HEAP_LEN(ary), root_len = RARRAY_LEN(root);
- assert(FL_TEST(root, RARRAY_SHARED_ROOT_FLAG));
+ assert(ARY_SHARED_ROOT_P(root) || OBJ_FROZEN(root));
assert(root_ptr <= ptr && ptr + len <= root_ptr + root_len);
ary_verify(root);
}
else if (ARY_EMBED_P(ary)) {
assert(!RARRAY_TRANSIENT_P(ary));
assert(!ARY_SHARED_P(ary));
- assert(RARRAY_LEN(ary) <= RARRAY_EMBED_LEN_MAX);
+ assert(RARRAY_LEN(ary) <= ary_embed_capa(ary));
}
else {
#if 1
@@ -258,7 +331,7 @@ void
rb_mem_clear(VALUE *mem, long size)
{
while (size--) {
- *mem++ = Qnil;
+ *mem++ = Qnil;
}
}
@@ -266,7 +339,7 @@ static void
ary_mem_clear(VALUE ary, long beg, long size)
{
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- rb_mem_clear(ptr + beg, size);
+ rb_mem_clear(ptr + beg, size);
});
}
@@ -274,7 +347,7 @@ static inline void
memfill(register VALUE *mem, register long size, register VALUE val)
{
while (size--) {
- *mem++ = val;
+ *mem++ = val;
}
}
@@ -282,8 +355,8 @@ static void
ary_memfill(VALUE ary, long beg, long size, VALUE val)
{
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- memfill(ptr + beg, size, val);
- RB_OBJ_WRITTEN(ary, Qundef, val);
+ memfill(ptr + beg, size, val);
+ RB_OBJ_WRITTEN(ary, Qundef, val);
});
}
@@ -352,14 +425,16 @@ ary_heap_free(VALUE ary)
}
}
-static void
+static size_t
ary_heap_realloc(VALUE ary, size_t new_capa)
{
+ size_t alloc_capa = new_capa;
size_t old_capa = ARY_HEAP_CAPA(ary);
if (RARRAY_TRANSIENT_P(ary)) {
if (new_capa <= old_capa) {
/* do nothing */
+ alloc_capa = old_capa;
}
else {
VALUE *new_ptr = rb_transient_heap_alloc(ary, sizeof(VALUE) * new_capa);
@@ -377,6 +452,8 @@ ary_heap_realloc(VALUE ary, size_t new_capa)
SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, new_capa, old_capa);
}
ary_verify(ary);
+
+ return alloc_capa;
}
#if USE_TRANSIENT_HEAP
@@ -384,14 +461,11 @@ static inline void
rb_ary_transient_heap_evacuate_(VALUE ary, int transient, int promote)
{
if (transient) {
+ assert(!ARY_SHARED_ROOT_P(ary));
+
VALUE *new_ptr;
const VALUE *old_ptr = ARY_HEAP_PTR(ary);
long capa = ARY_HEAP_CAPA(ary);
- long len = ARY_HEAP_LEN(ary);
-
- if (ARY_SHARED_ROOT_P(ary)) {
- capa = len;
- }
assert(ARY_OWNS_HEAP_P(ary));
assert(RARRAY_TRANSIENT_P(ary));
@@ -433,6 +507,27 @@ rb_ary_detransient(VALUE ary)
}
#endif
+void
+rb_ary_make_embedded(VALUE ary)
+{
+ assert(rb_ary_embeddable_p(ary));
+ if (!ARY_EMBED_P(ary)) {
+ const VALUE *buf = ARY_HEAP_PTR(ary);
+ long len = ARY_HEAP_LEN(ary);
+ bool was_transient = RARRAY_TRANSIENT_P(ary);
+
+ // FL_SET_EMBED also unsets the transient flag
+ FL_SET_EMBED(ary);
+ ARY_SET_EMBED_LEN(ary, len);
+
+ MEMCPY((void *)ARY_EMBED_PTR(ary), (void *)buf, VALUE, len);
+
+ if (!was_transient) {
+ ary_heap_free_ptr(ary, buf, len * sizeof(VALUE));
+ }
+ }
+}
+
static void
ary_resize_capa(VALUE ary, long capacity)
{
@@ -440,7 +535,8 @@ ary_resize_capa(VALUE ary, long capacity)
assert(!OBJ_FROZEN(ary));
assert(!ARY_SHARED_P(ary));
- if (capacity > RARRAY_EMBED_LEN_MAX) {
+ if (capacity > ary_embed_capa(ary)) {
+ size_t new_capa = capacity;
if (ARY_EMBED_P(ary)) {
long len = ARY_EMBED_LEN(ary);
VALUE *ptr = ary_heap_alloc(ary, capacity);
@@ -451,9 +547,9 @@ ary_resize_capa(VALUE ary, long capacity)
ARY_SET_HEAP_LEN(ary, len);
}
else {
- ary_heap_realloc(ary, capacity);
+ new_capa = ary_heap_realloc(ary, capacity);
}
- ARY_SET_CAPA(ary, capacity);
+ ARY_SET_CAPA(ary, new_capa);
}
else {
if (!ARY_EMBED_P(ary)) {
@@ -491,10 +587,10 @@ ary_double_capa(VALUE ary, long min)
long new_capa = ARY_CAPA(ary) / 2;
if (new_capa < ARY_DEFAULT_SIZE) {
- new_capa = ARY_DEFAULT_SIZE;
+ new_capa = ARY_DEFAULT_SIZE;
}
if (new_capa >= ARY_MAX_SIZE - min) {
- new_capa = (ARY_MAX_SIZE - min) / 2;
+ new_capa = (ARY_MAX_SIZE - min) / 2;
}
new_capa += min;
ary_resize_capa(ary, new_capa);
@@ -505,39 +601,40 @@ ary_double_capa(VALUE ary, long min)
static void
rb_ary_decrement_share(VALUE shared_root)
{
- if (shared_root) {
- long num = ARY_SHARED_ROOT_REFCNT(shared_root) - 1;
- if (num == 0) {
- rb_ary_free(shared_root);
- rb_gc_force_recycle(shared_root);
- }
- else if (num > 0) {
- ARY_SET_SHARED_ROOT_REFCNT(shared_root, num);
- }
+ if (!OBJ_FROZEN(shared_root)) {
+ long num = ARY_SHARED_ROOT_REFCNT(shared_root);
+ ARY_SET_SHARED_ROOT_REFCNT(shared_root, num - 1);
}
}
static void
rb_ary_unshare(VALUE ary)
{
- VALUE shared_root = RARRAY(ary)->as.heap.aux.shared_root;
+ VALUE shared_root = ARY_SHARED_ROOT(ary);
rb_ary_decrement_share(shared_root);
FL_UNSET_SHARED(ary);
}
-static inline void
-rb_ary_unshare_safe(VALUE ary)
+static void
+rb_ary_reset(VALUE ary)
{
- if (ARY_SHARED_P(ary) && !ARY_EMBED_P(ary)) {
- rb_ary_unshare(ary);
+ if (ARY_OWNS_HEAP_P(ary)) {
+ ary_heap_free(ary);
+ }
+ else if (ARY_SHARED_P(ary)) {
+ rb_ary_unshare(ary);
}
+
+ FL_SET_EMBED(ary);
+ ARY_SET_EMBED_LEN(ary, 0);
}
static VALUE
rb_ary_increment_share(VALUE shared_root)
{
- long num = ARY_SHARED_ROOT_REFCNT(shared_root);
- if (num >= 0) {
+ if (!OBJ_FROZEN(shared_root)) {
+ long num = ARY_SHARED_ROOT_REFCNT(shared_root);
+ assert(num >= 0);
ARY_SET_SHARED_ROOT_REFCNT(shared_root, num + 1);
}
return shared_root;
@@ -560,34 +657,33 @@ rb_ary_modify_check(VALUE ary)
}
void
-rb_ary_modify(VALUE ary)
+rb_ary_cancel_sharing(VALUE ary)
{
- rb_ary_modify_check(ary);
if (ARY_SHARED_P(ary)) {
- long shared_len, len = RARRAY_LEN(ary);
+ long shared_len, len = RARRAY_LEN(ary);
VALUE shared_root = ARY_SHARED_ROOT(ary);
ary_verify(shared_root);
- if (len <= RARRAY_EMBED_LEN_MAX) {
- const VALUE *ptr = ARY_HEAP_PTR(ary);
+ if (len <= ary_embed_capa(ary)) {
+ const VALUE *ptr = ARY_HEAP_PTR(ary);
FL_UNSET_SHARED(ary);
FL_SET_EMBED(ary);
- MEMCPY((VALUE *)ARY_EMBED_PTR(ary), ptr, VALUE, len);
+ MEMCPY((VALUE *)ARY_EMBED_PTR(ary), ptr, VALUE, len);
rb_ary_decrement_share(shared_root);
ARY_SET_EMBED_LEN(ary, len);
}
else if (ARY_SHARED_ROOT_OCCUPIED(shared_root) && len > ((shared_len = RARRAY_LEN(shared_root))>>1)) {
long shift = RARRAY_CONST_PTR_TRANSIENT(ary) - RARRAY_CONST_PTR_TRANSIENT(shared_root);
- FL_UNSET_SHARED(ary);
+ FL_UNSET_SHARED(ary);
ARY_SET_PTR(ary, RARRAY_CONST_PTR_TRANSIENT(shared_root));
- ARY_SET_CAPA(ary, shared_len);
+ ARY_SET_CAPA(ary, shared_len);
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- MEMMOVE(ptr, ptr+shift, VALUE, len);
- });
+ MEMMOVE(ptr, ptr+shift, VALUE, len);
+ });
FL_SET_EMBED(shared_root);
rb_ary_decrement_share(shared_root);
- }
+ }
else {
VALUE *ptr = ary_heap_alloc(ary, len);
MEMCPY(ptr, ARY_HEAP_PTR(ary), VALUE, len);
@@ -596,11 +692,18 @@ rb_ary_modify(VALUE ary)
ARY_SET_PTR(ary, ptr);
}
- rb_gc_writebarrier_remember(ary);
+ rb_gc_writebarrier_remember(ary);
}
ary_verify(ary);
}
+void
+rb_ary_modify(VALUE ary)
+{
+ rb_ary_modify_check(ary);
+ rb_ary_cancel_sharing(ary);
+}
+
static VALUE
ary_ensure_room_for_push(VALUE ary, long add_len)
{
@@ -609,40 +712,40 @@ ary_ensure_room_for_push(VALUE ary, long add_len)
long capa;
if (old_len > ARY_MAX_SIZE - add_len) {
- rb_raise(rb_eIndexError, "index %ld too big", new_len);
+ rb_raise(rb_eIndexError, "index %ld too big", new_len);
}
if (ARY_SHARED_P(ary)) {
- if (new_len > RARRAY_EMBED_LEN_MAX) {
+ if (new_len > ary_embed_capa(ary)) {
VALUE shared_root = ARY_SHARED_ROOT(ary);
if (ARY_SHARED_ROOT_OCCUPIED(shared_root)) {
if (ARY_HEAP_PTR(ary) - RARRAY_CONST_PTR_TRANSIENT(shared_root) + new_len <= RARRAY_LEN(shared_root)) {
- rb_ary_modify_check(ary);
+ rb_ary_modify_check(ary);
ary_verify(ary);
ary_verify(shared_root);
return shared_root;
- }
- else {
- /* if array is shared, then it is likely it participate in push/shift pattern */
- rb_ary_modify(ary);
- capa = ARY_CAPA(ary);
- if (new_len > capa - (capa >> 6)) {
- ary_double_capa(ary, new_len);
- }
+ }
+ else {
+ /* if array is shared, then it is likely it participate in push/shift pattern */
+ rb_ary_modify(ary);
+ capa = ARY_CAPA(ary);
+ if (new_len > capa - (capa >> 6)) {
+ ary_double_capa(ary, new_len);
+ }
ary_verify(ary);
- return ary;
- }
- }
- }
+ return ary;
+ }
+ }
+ }
ary_verify(ary);
rb_ary_modify(ary);
}
else {
- rb_ary_modify_check(ary);
+ rb_ary_modify_check(ary);
}
capa = ARY_CAPA(ary);
if (new_len > capa) {
- ary_double_capa(ary, new_len);
+ ary_double_capa(ary, new_len);
}
ary_verify(ary);
@@ -651,12 +754,16 @@ ary_ensure_room_for_push(VALUE ary, long add_len)
/*
* call-seq:
- * ary.freeze -> ary
+ * array.freeze -> self
+ *
+ * Freezes +self+; returns +self+:
*
- * Calls Object#freeze on +ary+ to prevent any further
- * modification. A RuntimeError will be raised if a modification
- * attempt is made.
+ * a = []
+ * a.frozen? # => false
+ * a.freeze
+ * a.frozen? # => true
*
+ * An attempt to modify a frozen \Array raises FrozenError.
*/
VALUE
@@ -676,18 +783,25 @@ VALUE
rb_ary_shared_with_p(VALUE ary1, VALUE ary2)
{
if (!ARY_EMBED_P(ary1) && ARY_SHARED_P(ary1) &&
- !ARY_EMBED_P(ary2) && ARY_SHARED_P(ary2) &&
- RARRAY(ary1)->as.heap.aux.shared_root == RARRAY(ary2)->as.heap.aux.shared_root &&
- RARRAY(ary1)->as.heap.len == RARRAY(ary2)->as.heap.len) {
- return Qtrue;
+ !ARY_EMBED_P(ary2) && ARY_SHARED_P(ary2) &&
+ ARY_SHARED_ROOT(ary1) == ARY_SHARED_ROOT(ary2) &&
+ ARY_HEAP_LEN(ary1) == ARY_HEAP_LEN(ary2)) {
+ return Qtrue;
}
return Qfalse;
}
static VALUE
-ary_alloc(VALUE klass)
+ary_alloc_embed(VALUE klass, long capa)
{
- NEWOBJ_OF(ary, struct RArray, klass, T_ARRAY | RARRAY_EMBED_FLAG | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0));
+ size_t size = ary_embed_size(capa);
+ assert(rb_gc_size_allocatable_p(size));
+#if !USE_RVARGC
+ assert(size <= sizeof(struct RArray));
+#endif
+ RVARGC_NEWOBJ_OF(ary, struct RArray, klass,
+ T_ARRAY | RARRAY_EMBED_FLAG | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0),
+ size);
/* Created array is:
* FL_SET_EMBED((VALUE)ary);
* ARY_SET_EMBED_LEN((VALUE)ary, 0);
@@ -696,10 +810,19 @@ ary_alloc(VALUE klass)
}
static VALUE
+ary_alloc_heap(VALUE klass)
+{
+ RVARGC_NEWOBJ_OF(ary, struct RArray, klass,
+ T_ARRAY | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0),
+ sizeof(struct RArray));
+ return (VALUE)ary;
+}
+
+static VALUE
empty_ary_alloc(VALUE klass)
{
RUBY_DTRACE_CREATE_HOOK(ARRAY, 0);
- return ary_alloc(klass);
+ return ary_alloc_embed(klass, 0);
}
static VALUE
@@ -708,20 +831,24 @@ ary_new(VALUE klass, long capa)
VALUE ary,*ptr;
if (capa < 0) {
- rb_raise(rb_eArgError, "negative array size (or size too big)");
+ rb_raise(rb_eArgError, "negative array size (or size too big)");
}
if (capa > ARY_MAX_SIZE) {
- rb_raise(rb_eArgError, "array size too big");
+ rb_raise(rb_eArgError, "array size too big");
}
RUBY_DTRACE_CREATE_HOOK(ARRAY, capa);
- ary = ary_alloc(klass);
- if (capa > RARRAY_EMBED_LEN_MAX) {
+ if (ary_embeddable_p(capa)) {
+ ary = ary_alloc_embed(klass, capa);
+ }
+ else {
+ ary = ary_alloc_heap(klass);
+ ARY_SET_CAPA(ary, capa);
+ assert(!ARY_EMBED_P(ary));
+
ptr = ary_heap_alloc(ary, capa);
- FL_UNSET_EMBED(ary);
ARY_SET_PTR(ary, ptr);
- ARY_SET_CAPA(ary, capa);
ARY_SET_HEAP_LEN(ary, 0);
}
@@ -737,7 +864,7 @@ rb_ary_new_capa(long capa)
VALUE
rb_ary_new(void)
{
- return rb_ary_new2(RARRAY_EMBED_LEN_MAX);
+ return rb_ary_new_capa(0);
}
VALUE
@@ -751,7 +878,7 @@ VALUE
va_start(ar, n);
for (i=0; i<n; i++) {
- ARY_SET(ary, i, va_arg(ar, VALUE));
+ ARY_SET(ary, i, va_arg(ar, VALUE));
}
va_end(ar);
@@ -766,8 +893,8 @@ rb_ary_tmp_new_from_values(VALUE klass, long n, const VALUE *elts)
ary = ary_new(klass, n);
if (n > 0 && elts) {
- ary_memcpy(ary, 0, n, elts);
- ARY_SET_LEN(ary, n);
+ ary_memcpy(ary, 0, n, elts);
+ ARY_SET_LEN(ary, n);
}
return ary;
@@ -779,8 +906,79 @@ rb_ary_new_from_values(long n, const VALUE *elts)
return rb_ary_tmp_new_from_values(rb_cArray, n, elts);
}
+static VALUE
+ec_ary_alloc_embed(rb_execution_context_t *ec, VALUE klass, long capa)
+{
+ size_t size = ary_embed_size(capa);
+ assert(rb_gc_size_allocatable_p(size));
+#if !USE_RVARGC
+ assert(size <= sizeof(struct RArray));
+#endif
+ RB_RVARGC_EC_NEWOBJ_OF(ec, ary, struct RArray, klass,
+ T_ARRAY | RARRAY_EMBED_FLAG | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0),
+ size);
+ /* Created array is:
+ * FL_SET_EMBED((VALUE)ary);
+ * ARY_SET_EMBED_LEN((VALUE)ary, 0);
+ */
+ return (VALUE)ary;
+}
+
+static VALUE
+ec_ary_alloc_heap(rb_execution_context_t *ec, VALUE klass)
+{
+ RB_RVARGC_EC_NEWOBJ_OF(ec, ary, struct RArray, klass,
+ T_ARRAY | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0),
+ sizeof(struct RArray));
+ return (VALUE)ary;
+}
+
+static VALUE
+ec_ary_new(rb_execution_context_t *ec, VALUE klass, long capa)
+{
+ VALUE ary,*ptr;
+
+ if (capa < 0) {
+ rb_raise(rb_eArgError, "negative array size (or size too big)");
+ }
+ if (capa > ARY_MAX_SIZE) {
+ rb_raise(rb_eArgError, "array size too big");
+ }
+
+ RUBY_DTRACE_CREATE_HOOK(ARRAY, capa);
+
+ if (ary_embeddable_p(capa)) {
+ ary = ec_ary_alloc_embed(ec, klass, capa);
+ }
+ else {
+ ary = ec_ary_alloc_heap(ec, klass);
+ ARY_SET_CAPA(ary, capa);
+ assert(!ARY_EMBED_P(ary));
+
+ ptr = ary_heap_alloc(ary, capa);
+ ARY_SET_PTR(ary, ptr);
+ ARY_SET_HEAP_LEN(ary, 0);
+ }
+
+ return ary;
+}
+
+VALUE
+rb_ec_ary_new_from_values(rb_execution_context_t *ec, long n, const VALUE *elts)
+{
+ VALUE ary;
+
+ ary = ec_ary_new(ec, rb_cArray, n);
+ if (n > 0 && elts) {
+ ary_memcpy(ary, 0, n, elts);
+ ARY_SET_LEN(ary, n);
+ }
+
+ return ary;
+}
+
VALUE
-rb_ary_tmp_new(long capa)
+rb_ary_hidden_new(long capa)
{
VALUE ary = ary_new(0, capa);
rb_ary_transient_heap_evacuate(ary, TRUE);
@@ -788,12 +986,11 @@ rb_ary_tmp_new(long capa)
}
VALUE
-rb_ary_tmp_new_fill(long capa)
+rb_ary_hidden_new_fill(long capa)
{
- VALUE ary = ary_new(0, capa);
+ VALUE ary = rb_ary_hidden_new(capa);
ary_memfill(ary, 0, capa, Qnil);
ARY_SET_LEN(ary, capa);
- rb_ary_transient_heap_evacuate(ary, TRUE);
return ary;
}
@@ -831,64 +1028,69 @@ RUBY_FUNC_EXPORTED size_t
rb_ary_memsize(VALUE ary)
{
if (ARY_OWNS_HEAP_P(ary)) {
- return ARY_CAPA(ary) * sizeof(VALUE);
+ return ARY_CAPA(ary) * sizeof(VALUE);
}
else {
- return 0;
+ return 0;
}
}
-static inline void
-ary_discard(VALUE ary)
-{
- rb_ary_free(ary);
- RBASIC(ary)->flags |= RARRAY_EMBED_FLAG;
- RBASIC(ary)->flags &= ~(RARRAY_EMBED_LEN_MASK | RARRAY_TRANSIENT_FLAG);
-}
-
static VALUE
ary_make_shared(VALUE ary)
{
- assert(!ARY_EMBED_P(ary));
+ assert(USE_RVARGC || !ARY_EMBED_P(ary));
ary_verify(ary);
if (ARY_SHARED_P(ary)) {
return ARY_SHARED_ROOT(ary);
}
else if (ARY_SHARED_ROOT_P(ary)) {
- return ary;
+ return ary;
}
else if (OBJ_FROZEN(ary)) {
- rb_ary_transient_heap_evacuate(ary, TRUE);
- ary_shrink_capa(ary);
- FL_SET_SHARED_ROOT(ary);
- ARY_SET_SHARED_ROOT_REFCNT(ary, 1);
- return ary;
+ if (!ARY_EMBED_P(ary)) {
+ rb_ary_transient_heap_evacuate(ary, TRUE);
+ ary_shrink_capa(ary);
+ }
+ return ary;
}
else {
- long capa = ARY_CAPA(ary), len = RARRAY_LEN(ary);
- const VALUE *ptr;
- NEWOBJ_OF(shared, struct RArray, 0, T_ARRAY | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0));
- VALUE vshared = (VALUE)shared;
-
rb_ary_transient_heap_evacuate(ary, TRUE);
- ptr = ARY_HEAP_PTR(ary);
-
- FL_UNSET_EMBED(vshared);
- ARY_SET_LEN(vshared, capa);
- ARY_SET_PTR(vshared, ptr);
- ary_mem_clear(vshared, len, capa - len);
- FL_SET_SHARED_ROOT(vshared);
- ARY_SET_SHARED_ROOT_REFCNT(vshared, 1);
- FL_SET_SHARED(ary);
+
+ long capa = ARY_CAPA(ary);
+ long len = RARRAY_LEN(ary);
+
+ /* Shared roots cannot be embedded because the reference count
+ * (refcnt) is stored in as.heap.aux.capa. */
+ VALUE shared = ary_alloc_heap(0);
+ FL_SET_SHARED_ROOT(shared);
+
+ if (ARY_EMBED_P(ary)) {
+ /* Cannot use ary_heap_alloc because we don't want to allocate
+ * on the transient heap. */
+ VALUE *ptr = ALLOC_N(VALUE, capa);
+ ARY_SET_PTR(shared, ptr);
+ ary_memcpy(shared, 0, len, RARRAY_PTR(ary));
+
+ FL_UNSET_EMBED(ary);
+ ARY_SET_HEAP_LEN(ary, len);
+ ARY_SET_PTR(ary, ptr);
+ }
+ else {
+ ARY_SET_PTR(shared, RARRAY_PTR(ary));
+ }
+
+ ARY_SET_LEN(shared, capa);
+ ary_mem_clear(shared, len, capa - len);
+ ARY_SET_SHARED_ROOT_REFCNT(shared, 1);
+ FL_SET_SHARED(ary);
RB_DEBUG_COUNTER_INC(obj_ary_shared_create);
- ARY_SET_SHARED(ary, vshared);
- OBJ_FREEZE(vshared);
+ ARY_SET_SHARED(ary, shared);
- ary_verify(vshared);
+ ary_verify(shared);
ary_verify(ary);
- return vshared;
+ return shared;
}
}
@@ -897,8 +1099,10 @@ ary_make_substitution(VALUE ary)
{
long len = RARRAY_LEN(ary);
- if (len <= RARRAY_EMBED_LEN_MAX) {
- VALUE subst = rb_ary_new2(len);
+ if (ary_embeddable_p(len)) {
+ VALUE subst = rb_ary_new_capa(len);
+ assert(ARY_EMBED_P(subst));
+
ary_memcpy(subst, 0, len, RARRAY_CONST_PTR_TRANSIENT(ary));
ARY_SET_EMBED_LEN(subst, len);
return subst;
@@ -933,28 +1137,24 @@ rb_check_to_array(VALUE ary)
return rb_check_convert_type_with_id(ary, T_ARRAY, "Array", idTo_a);
}
+VALUE
+rb_to_array(VALUE ary)
+{
+ return rb_convert_type_with_id(ary, T_ARRAY, "Array", idTo_a);
+}
+
/*
* call-seq:
- * Array.try_convert(obj) -> new_array or nil
- *
- * Tries to convert +obj+ to an \Array.
+ * Array.try_convert(object) -> object, new_array, or nil
*
- * When +obj+ is an
- * {Array-convertible object}[doc/implicit_conversion_rdoc.html#label-Array-Convertible+Objects]
- * (implements +to_ary+),
- * returns the \Array object created by converting it:
+ * If +object+ is an \Array object, returns +object+.
*
- * class ToAryReturnsArray < Set
- * def to_ary
- * self.to_a
- * end
- * end
- * as = ToAryReturnsArray.new([:foo, :bar, :baz])
- * Array.try_convert(as) # => [:foo, :bar, :baz]
+ * Otherwise if +object+ responds to <tt>:to_ary</tt>,
+ * calls <tt>object.to_ary</tt> and returns the result.
*
- * Returns +nil+ if +obj+ is not \Array-convertible:
+ * Returns +nil+ if +object+ does not respond to <tt>:to_ary</tt>
*
- * Array.try_convert(:foo) # => nil
+ * Raises an exception unless <tt>object.to_ary</tt> returns an \Array object.
*/
static VALUE
@@ -963,6 +1163,30 @@ rb_ary_s_try_convert(VALUE dummy, VALUE ary)
return rb_check_array_type(ary);
}
+/* :nodoc: */
+static VALUE
+rb_ary_s_new(int argc, VALUE *argv, VALUE klass)
+{
+ VALUE ary;
+
+ if (klass == rb_cArray) {
+ long size = 0;
+ if (argc > 0 && FIXNUM_P(argv[0])) {
+ size = FIX2LONG(argv[0]);
+ if (size < 0) size = 0;
+ }
+
+ ary = ary_new(klass, size);
+
+ rb_obj_call_init_kw(ary, argc, argv, RB_PASS_CALLED_KEYWORDS);
+ }
+ else {
+ ary = rb_class_new_instance_pass_kw(argc, argv, klass);
+ }
+
+ return ary;
+}
+
/*
* call-seq:
* Array.new -> new_empty_array
@@ -973,88 +1197,42 @@ rb_ary_s_try_convert(VALUE dummy, VALUE ary)
*
* Returns a new \Array.
*
- * Argument +array+, if given, must be an
- * {Array-convertible object}[doc/implicit_conversion_rdoc.html#label-Array-Convertible+Objects]
- * (implements +to_ary+).
- *
- * Argument +size+, if given must be an
- * {Integer-convertible object}[doc/implicit_conversion_rdoc.html#label-Integer-Convertible+Objects]
- * (implements +to_int+).
- *
- * Argument +default_value+ may be any object.
- *
- * ---
- *
- * With no block and no arguments, returns a new empty \Array object:
- *
- * a = Array.new
- * a # => []
+ * With no block and no arguments, returns a new empty \Array object.
*
- * With no block and a single argument +array+,
+ * With no block and a single \Array argument +array+,
* returns a new \Array formed from +array+:
*
* a = Array.new([:foo, 'bar', 2])
* a.class # => Array
* a # => [:foo, "bar", 2]
*
- * With no block and a single argument +size+,
+ * With no block and a single \Integer argument +size+,
* returns a new \Array of the given size
* whose elements are all +nil+:
*
- * a = Array.new(0)
- * a # => []
* a = Array.new(3)
* a # => [nil, nil, nil]
*
- * With no block and arguments +size+ and +default_value+,
+ * With no block and arguments +size+ and +default_value+,
* returns an \Array of the given size;
* each element is that same +default_value+:
*
* a = Array.new(3, 'x')
* a # => ['x', 'x', 'x']
- * a[1].equal?(a[0]) # => true # Identity check.
- * a[2].equal?(a[0]) # => true # Identity check.
*
* With a block and argument +size+,
* returns an \Array of the given size;
* the block is called with each successive integer +index+;
* the element for that +index+ is the return value from the block:
*
- * a = Array.new(3) { |index| "Element #{index}" }
+ * a = Array.new(3) {|index| "Element #{index}" }
* a # => ["Element 0", "Element 1", "Element 2"]
*
+ * Raises ArgumentError if +size+ is negative.
+ *
* With a block and no argument,
* or a single argument +0+,
- * ignores the block and returns a new empty \Array:
- *
- * a = Array.new(0) { |n| fail 'Cannot happen' }
- * a # => []
- * a = Array.new { |n| fail 'Cannot happen' }
- * a # => []
- *
- * With a block and arguments +size+ and +default_value+,
- * gives a warning message
- * ('warning: block supersedes default value argument'),
- * and assigns elements from the block's return values:
- *
- * Array.new(4, :default) {} # => [nil, nil, nil, nil]
- *
- * ---
- *
- * Raises an exception if +size+ is a negative integer:
- *
- * # Raises ArgumentError (negative array size):
- * Array.new(-1)
- * # Raises ArgumentError (negative array size):
- * Array.new(-1, :default)
- * # Raises ArgumentError (negative array size):
- * Array.new(-1) { |n| }
- *
- * Raises an exception if the single argument is neither \Array-convertible
- * nor \Integer-convertible.
- *
- * # Raises TypeError (no implicit conversion of Symbol into Integer):
- * Array.new(:foo)
+ * ignores the block and returns a new empty \Array.
*/
static VALUE
@@ -1065,51 +1243,48 @@ rb_ary_initialize(int argc, VALUE *argv, VALUE ary)
rb_ary_modify(ary);
if (argc == 0) {
- if (ARY_OWNS_HEAP_P(ary) && ARY_HEAP_PTR(ary) != NULL) {
- ary_heap_free(ary);
- }
- rb_ary_unshare_safe(ary);
- FL_SET_EMBED(ary);
- ARY_SET_EMBED_LEN(ary, 0);
- if (rb_block_given_p()) {
- rb_warning("given block not used");
- }
- return ary;
+ rb_ary_reset(ary);
+ assert(ARY_EMBED_P(ary));
+ assert(ARY_EMBED_LEN(ary) == 0);
+ if (rb_block_given_p()) {
+ rb_warning("given block not used");
+ }
+ return ary;
}
rb_scan_args(argc, argv, "02", &size, &val);
if (argc == 1 && !FIXNUM_P(size)) {
- val = rb_check_array_type(size);
- if (!NIL_P(val)) {
- rb_ary_replace(ary, val);
- return ary;
- }
+ val = rb_check_array_type(size);
+ if (!NIL_P(val)) {
+ rb_ary_replace(ary, val);
+ return ary;
+ }
}
len = NUM2LONG(size);
/* NUM2LONG() may call size.to_int, ary can be frozen, modified, etc */
if (len < 0) {
- rb_raise(rb_eArgError, "negative array size");
+ rb_raise(rb_eArgError, "negative array size");
}
if (len > ARY_MAX_SIZE) {
- rb_raise(rb_eArgError, "array size too big");
+ rb_raise(rb_eArgError, "array size too big");
}
/* recheck after argument conversion */
rb_ary_modify(ary);
ary_resize_capa(ary, len);
if (rb_block_given_p()) {
- long i;
+ long i;
- if (argc == 2) {
- rb_warn("block supersedes default value argument");
- }
- for (i=0; i<len; i++) {
- rb_ary_store(ary, i, rb_yield(LONG2NUM(i)));
- ARY_SET_LEN(ary, i + 1);
- }
+ if (argc == 2) {
+ rb_warn("block supersedes default value argument");
+ }
+ for (i=0; i<len; i++) {
+ rb_ary_store(ary, i, rb_yield(LONG2NUM(i)));
+ ARY_SET_LEN(ary, i + 1);
+ }
}
else {
- ary_memfill(ary, 0, len, val);
- ARY_SET_LEN(ary, len);
+ ary_memfill(ary, 0, len, val);
+ ARY_SET_LEN(ary, len);
}
return ary;
}
@@ -1140,26 +1315,26 @@ rb_ary_store(VALUE ary, long idx, VALUE val)
long len = RARRAY_LEN(ary);
if (idx < 0) {
- idx += len;
- if (idx < 0) {
- rb_raise(rb_eIndexError, "index %ld too small for array; minimum: %ld",
- idx - len, -len);
- }
+ idx += len;
+ if (idx < 0) {
+ rb_raise(rb_eIndexError, "index %ld too small for array; minimum: %ld",
+ idx - len, -len);
+ }
}
else if (idx >= ARY_MAX_SIZE) {
- rb_raise(rb_eIndexError, "index %ld too big", idx);
+ rb_raise(rb_eIndexError, "index %ld too big", idx);
}
rb_ary_modify(ary);
if (idx >= ARY_CAPA(ary)) {
- ary_double_capa(ary, idx);
+ ary_double_capa(ary, idx);
}
if (idx > len) {
- ary_mem_clear(ary, len, idx - len + 1);
+ ary_mem_clear(ary, len, idx - len + 1);
}
if (idx >= len) {
- ARY_SET_LEN(ary, idx + 1);
+ ARY_SET_LEN(ary, idx + 1);
}
ARY_SET(ary, idx, val);
}
@@ -1171,17 +1346,20 @@ ary_make_partial(VALUE ary, VALUE klass, long offset, long len)
assert(len >= 0);
assert(offset+len <= RARRAY_LEN(ary));
- if (len <= RARRAY_EMBED_LEN_MAX) {
- VALUE result = ary_alloc(klass);
+ const size_t rarray_embed_capa_max = (sizeof(struct RArray) - offsetof(struct RArray, as.ary)) / sizeof(VALUE);
+
+ if ((size_t)len <= rarray_embed_capa_max && ary_embeddable_p(len)) {
+ VALUE result = ary_alloc_embed(klass, len);
ary_memcpy(result, 0, len, RARRAY_CONST_PTR_TRANSIENT(ary) + offset);
ARY_SET_EMBED_LEN(result, len);
return result;
}
else {
- VALUE shared, result = ary_alloc(klass);
- FL_UNSET_EMBED(result);
+ VALUE shared = ary_make_shared(ary);
+
+ VALUE result = ary_alloc_heap(klass);
+ assert(!ARY_EMBED_P(result));
- shared = ary_make_shared(ary);
ARY_SET_PTR(result, RARRAY_CONST_PTR_TRANSIENT(ary));
ARY_SET_LEN(result, RARRAY_LEN(ary));
rb_ary_set_shared(result, shared);
@@ -1196,9 +1374,59 @@ ary_make_partial(VALUE ary, VALUE klass, long offset, long len)
}
static VALUE
+ary_make_partial_step(VALUE ary, VALUE klass, long offset, long len, long step)
+{
+ assert(offset >= 0);
+ assert(len >= 0);
+ assert(offset+len <= RARRAY_LEN(ary));
+ assert(step != 0);
+
+ const VALUE *values = RARRAY_CONST_PTR_TRANSIENT(ary);
+ const long orig_len = len;
+
+ if (step > 0 && step >= len) {
+ VALUE result = ary_new(klass, 1);
+ VALUE *ptr = (VALUE *)ARY_EMBED_PTR(result);
+ RB_OBJ_WRITE(result, ptr, values[offset]);
+ ARY_SET_EMBED_LEN(result, 1);
+ return result;
+ }
+ else if (step < 0 && step < -len) {
+ step = -len;
+ }
+
+ long ustep = (step < 0) ? -step : step;
+ len = roomof(len, ustep);
+
+ long i;
+ long j = offset + ((step > 0) ? 0 : (orig_len - 1));
+
+ VALUE result = ary_new(klass, len);
+ if (ARY_EMBED_P(result)) {
+ VALUE *ptr = (VALUE *)ARY_EMBED_PTR(result);
+ for (i = 0; i < len; ++i) {
+ RB_OBJ_WRITE(result, ptr+i, values[j]);
+ j += step;
+ }
+ ARY_SET_EMBED_LEN(result, len);
+ }
+ else {
+ RARRAY_PTR_USE_TRANSIENT(result, ptr, {
+ for (i = 0; i < len; ++i) {
+ RB_OBJ_WRITE(result, ptr+i, values[j]);
+ j += step;
+ }
+ });
+ ARY_SET_LEN(result, len);
+ }
+
+ return result;
+}
+
+static VALUE
ary_make_shared_copy(VALUE ary)
{
- return ary_make_partial(ary, rb_obj_class(ary), 0, RARRAY_LEN(ary));
+ return ary_make_partial(ary, rb_cArray, 0, RARRAY_LEN(ary));
}
enum ary_take_pos_flags
@@ -1223,33 +1451,32 @@ ary_take_first_or_last(int argc, const VALUE *argv, VALUE ary, enum ary_take_pos
n = NUM2LONG(argv[0]);
len = RARRAY_LEN(ary);
if (n > len) {
- n = len;
+ n = len;
}
else if (n < 0) {
- rb_raise(rb_eArgError, "negative array size");
+ rb_raise(rb_eArgError, "negative array size");
}
if (last) {
- offset = len - n;
+ offset = len - n;
}
return ary_make_partial(ary, rb_cArray, offset, n);
}
/*
* call-seq:
- * ary << obj -> self
+ * array << object -> self
*
- * Appends +obj+ to +ary+; returns +self+:
+ * Appends +object+ to +self+; returns +self+:
*
* a = [:foo, 'bar', 2]
- * a1 = a << :baz
- * a1 # => [:foo, "bar", 2, :baz]
- * a1.equal?(a) # => true
+ * a << :baz # => [:foo, "bar", 2, :baz]
*
- * Appends +obj+ as one element, even if it is another \Array:
+ * Appends +object+ as one element, even if it is another \Array:
*
* a = [:foo, 'bar', 2]
- * a1 = a << [3, 4] # =>
+ * a1 = a << [3, 4]
* a1 # => [:foo, "bar", 2, [3, 4]]
+ *
*/
VALUE
@@ -1258,7 +1485,7 @@ rb_ary_push(VALUE ary, VALUE item)
long idx = RARRAY_LEN((ary_verify(ary), ary));
VALUE target_ary = ary_ensure_room_for_push(ary, 1);
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- RB_OBJ_WRITE(target_ary, &ptr[idx], item);
+ RB_OBJ_WRITE(target_ary, &ptr[idx], item);
});
ARY_SET_LEN(ary, idx + 1);
ary_verify(ary);
@@ -1277,21 +1504,24 @@ rb_ary_cat(VALUE ary, const VALUE *argv, long len)
/*
* call-seq:
- * ary.push(*objects) -> self
- * ary.append(*objects) -> self
+ * array.push(*objects) -> self
+ *
+ * Appends trailing elements.
*
- * Appends each argument in +objects+ to the array; returns +self+:
+ * Appends each argument in +objects+ to +self+; returns +self+:
*
* a = [:foo, 'bar', 2]
- * a1 = a.push(:baz, :bat)
- * a1 # => [:foo, "bar", 2, :baz, :bat]
- * a1.equal?(a) # => true
+ * a.push(:baz, :bat) # => [:foo, "bar", 2, :baz, :bat]
*
* Appends each argument as one element, even if it is another \Array:
*
* a = [:foo, 'bar', 2]
* a1 = a.push([:baz, :bat], [:bam, :bad])
* a1 # => [:foo, "bar", 2, [:baz, :bat], [:bam, :bad]]
+ *
+ * Array#append is an alias for Array#push.
+ *
+ * Related: #pop, #shift, #unshift.
*/
static VALUE
@@ -1308,10 +1538,10 @@ rb_ary_pop(VALUE ary)
n = RARRAY_LEN(ary);
if (n == 0) return Qnil;
if (ARY_OWNS_HEAP_P(ary) &&
- n * 3 < ARY_CAPA(ary) &&
- ARY_CAPA(ary) > ARY_DEFAULT_SIZE)
+ n * 3 < ARY_CAPA(ary) &&
+ ARY_CAPA(ary) > ARY_DEFAULT_SIZE)
{
- ary_resize_capa(ary, n * 2);
+ ary_resize_capa(ary, n * 2);
}
--n;
ARY_SET_LEN(ary, n);
@@ -1321,62 +1551,33 @@ rb_ary_pop(VALUE ary)
/*
* call-seq:
- * ary.pop -> obj or nil
- * ary.pop(n) -> new_array
- *
- * Removes and returns trailing elements from the array.
- *
- * Argument +n+, if given, must be an
- * {Integer-convertible object}[doc/implicit_conversion_rdoc.html#label-Integer-Convertible+Objects]
- * (implements +to_int+).
+ * array.pop -> object or nil
+ * array.pop(n) -> new_array
*
- * ---
+ * Removes and returns trailing elements.
*
- * When no argument is given and the array is not empty,
- * removes and returns the last element in the array:
+ * When no argument is given and +self+ is not empty,
+ * removes and returns the last element:
*
* a = [:foo, 'bar', 2]
* a.pop # => 2
* a # => [:foo, "bar"]
*
- * Returns +nil+ if the array is empty:
- *
- * a = []
- * a.pop # => nil
+ * Returns +nil+ if the array is empty.
*
- * ---
+ * When a non-negative \Integer argument +n+ is given and is in range,
*
- * When argument +n+ is given and is non-negative and in range,
* removes and returns the last +n+ elements in a new \Array:
- *
* a = [:foo, 'bar', 2]
- * a1 = a.pop(2)
- * a1 # => ["bar", 2]
- * a # => [:foo]
- * a.pop(0) # => []
+ * a.pop(2) # => ["bar", 2]
*
* If +n+ is positive and out of range,
* removes and returns all elements:
*
* a = [:foo, 'bar', 2]
- * a1 = a.pop(50)
- * a1 # => [:foo, "bar", 2]
- * a # => []
- * a.pop(1) # => []
- *
- * ---
- *
- * Raises an exception if +n+ is negative:
- *
- * a = [:foo, 'bar', 2]
- * # Raises ArgumentError (negative array size):
- * a1 = a.pop(-1)
- *
- * Raises an exception if +n+ is not \Integer-convertible (implements +to_int+).
+ * a.pop(50) # => [:foo, "bar", 2]
*
- * a = [:foo, 'bar', 2]
- * # Raises TypeError (no implicit conversion of String into Integer):
- * a1 = a.pop('x')
+ * Related: #push, #shift, #unshift.
*/
static VALUE
@@ -1385,7 +1586,7 @@ rb_ary_pop_m(int argc, VALUE *argv, VALUE ary)
VALUE result;
if (argc == 0) {
- return rb_ary_pop(ary);
+ return rb_ary_pop(ary);
}
rb_ary_modify_check(ary);
@@ -1401,55 +1602,49 @@ rb_ary_shift(VALUE ary)
VALUE top;
long len = RARRAY_LEN(ary);
- rb_ary_modify_check(ary);
- if (len == 0) return Qnil;
- top = RARRAY_AREF(ary, 0);
- if (!ARY_SHARED_P(ary)) {
- if (len < ARY_DEFAULT_SIZE) {
- RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- MEMMOVE(ptr, ptr+1, VALUE, len-1);
- }); /* WB: no new reference */
- ARY_INCREASE_LEN(ary, -1);
- ary_verify(ary);
- return top;
- }
- assert(!ARY_EMBED_P(ary)); /* ARY_EMBED_LEN_MAX < ARY_DEFAULT_SIZE */
-
- ARY_SET(ary, 0, Qnil);
- ary_make_shared(ary);
- }
- else if (ARY_SHARED_ROOT_OCCUPIED(ARY_SHARED_ROOT(ary))) {
- RARRAY_PTR_USE_TRANSIENT(ary, ptr, ptr[0] = Qnil);
+ if (len == 0) {
+ rb_ary_modify_check(ary);
+ return Qnil;
}
- ARY_INCREASE_PTR(ary, 1); /* shift ptr */
- ARY_INCREASE_LEN(ary, -1);
- ary_verify(ary);
+ top = RARRAY_AREF(ary, 0);
+
+ rb_ary_behead(ary, 1);
return top;
}
/*
* call-seq:
- * ary.shift -> obj or nil
- * ary.shift(n) -> new_ary
+ * array.shift -> object or nil
+ * array.shift(n) -> new_array
*
- * Removes the first element of +self+ and returns it (shifting all
- * other elements down by one). Returns +nil+ if the array
- * is empty.
+ * Removes and returns leading elements.
+ *
+ * When no argument is given, removes and returns the first element:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.shift # => :foo
+ * a # => ['bar', 2]
*
- * If a number +n+ is given, returns an array of the first +n+ elements
- * (or less) just like <code>array.slice!(0, n)</code> does. With +ary+
- * containing only the remainder elements, not including what was shifted to
- * +new_ary+. See also Array#unshift for the opposite effect.
+ * Returns +nil+ if +self+ is empty.
*
- * args = [ "-m", "-q", "filename" ]
- * args.shift #=> "-m"
- * args #=> ["-q", "filename"]
+ * When positive \Integer argument +n+ is given, removes the first +n+ elements;
+ * returns those elements in a new \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.shift(2) # => [:foo, 'bar']
+ * a # => [2]
+ *
+ * If +n+ is as large as or larger than <tt>self.length</tt>,
+ * removes all elements; returns those elements in a new \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.shift(3) # => [:foo, 'bar', 2]
*
- * args = [ "-m", "-q", "filename" ]
- * args.shift(2) #=> ["-m", "-q"]
- * args #=> ["filename"]
+ * If +n+ is zero, returns a new empty \Array; +self+ is unmodified.
+ *
+ * Related: #push, #pop, #unshift.
*/
static VALUE
@@ -1459,7 +1654,7 @@ rb_ary_shift_m(int argc, VALUE *argv, VALUE ary)
long n;
if (argc == 0) {
- return rb_ary_shift(ary);
+ return rb_ary_shift(ary);
}
rb_ary_modify_check(ary);
@@ -1473,109 +1668,133 @@ rb_ary_shift_m(int argc, VALUE *argv, VALUE ary)
MJIT_FUNC_EXPORTED VALUE
rb_ary_behead(VALUE ary, long n)
{
- if (n<=0) return ary;
+ if (n <= 0) {
+ return ary;
+ }
rb_ary_modify_check(ary);
- if (ARY_SHARED_P(ary)) {
- if (ARY_SHARED_ROOT_OCCUPIED(ARY_SHARED_ROOT(ary))) {
- setup_occupied_shared:
- ary_mem_clear(ary, 0, n);
- }
- ARY_INCREASE_PTR(ary, n);
- }
- else {
- if (RARRAY_LEN(ary) < ARY_DEFAULT_SIZE) {
+
+ if (!ARY_SHARED_P(ary)) {
+ if (ARY_EMBED_P(ary) || RARRAY_LEN(ary) < ARY_DEFAULT_SIZE) {
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- MEMMOVE(ptr, ptr+n, VALUE, RARRAY_LEN(ary)-n);
- }); /* WB: no new reference */
- }
- else {
- ary_make_shared(ary);
- goto setup_occupied_shared;
- }
+ MEMMOVE(ptr, ptr + n, VALUE, RARRAY_LEN(ary) - n);
+ }); /* WB: no new reference */
+ ARY_INCREASE_LEN(ary, -n);
+ ary_verify(ary);
+ return ary;
+ }
+
+ ary_mem_clear(ary, 0, n);
+ ary_make_shared(ary);
+ }
+ else if (ARY_SHARED_ROOT_OCCUPIED(ARY_SHARED_ROOT(ary))) {
+ ary_mem_clear(ary, 0, n);
}
- ARY_INCREASE_LEN(ary, -n);
+ ARY_INCREASE_PTR(ary, n);
+ ARY_INCREASE_LEN(ary, -n);
ary_verify(ary);
+
return ary;
}
static VALUE
-ary_ensure_room_for_unshift(VALUE ary, int argc)
+make_room_for_unshift(VALUE ary, const VALUE *head, VALUE *sharedp, int argc, long capa, long len)
+{
+ if (head - sharedp < argc) {
+ long room = capa - len - argc;
+
+ room -= room >> 4;
+ MEMMOVE((VALUE *)sharedp + argc + room, head, VALUE, len);
+ head = sharedp + argc + room;
+ }
+ ARY_SET_PTR(ary, head - argc);
+ assert(ARY_SHARED_ROOT_OCCUPIED(ARY_SHARED_ROOT(ary)));
+
+ ary_verify(ary);
+ return ARY_SHARED_ROOT(ary);
+}
+
+static VALUE
+ary_modify_for_unshift(VALUE ary, int argc)
{
long len = RARRAY_LEN(ary);
long new_len = len + argc;
long capa;
const VALUE *head, *sharedp;
- if (len > ARY_MAX_SIZE - argc) {
- rb_raise(rb_eIndexError, "index %ld too big", new_len);
- }
-
- if (ARY_SHARED_P(ary)) {
- VALUE shared_root = ARY_SHARED_ROOT(ary);
- capa = RARRAY_LEN(shared_root);
- if (ARY_SHARED_ROOT_OCCUPIED(shared_root) && capa > new_len) {
- rb_ary_modify_check(ary);
- head = RARRAY_CONST_PTR_TRANSIENT(ary);
- sharedp = RARRAY_CONST_PTR_TRANSIENT(shared_root);
- goto makeroom_if_need;
- }
- }
-
rb_ary_modify(ary);
capa = ARY_CAPA(ary);
if (capa - (capa >> 6) <= new_len) {
- ary_double_capa(ary, new_len);
+ ary_double_capa(ary, new_len);
}
/* use shared array for big "queues" */
- if (new_len > ARY_DEFAULT_SIZE * 4) {
+ if (new_len > ARY_DEFAULT_SIZE * 4 && !ARY_EMBED_P(ary)) {
ary_verify(ary);
/* make a room for unshifted items */
- capa = ARY_CAPA(ary);
- ary_make_shared(ary);
+ capa = ARY_CAPA(ary);
+ ary_make_shared(ary);
head = sharedp = RARRAY_CONST_PTR_TRANSIENT(ary);
- goto makeroom;
- makeroom_if_need:
- if (head - sharedp < argc) {
- long room;
- makeroom:
- room = capa - new_len;
- room -= room >> 4;
- MEMMOVE((VALUE *)sharedp + argc + room, head, VALUE, len);
- head = sharedp + argc + room;
- }
- ARY_SET_PTR(ary, head - argc);
- assert(ARY_SHARED_ROOT_OCCUPIED(ARY_SHARED_ROOT(ary)));
-
- ary_verify(ary);
- return ARY_SHARED_ROOT(ary);
+ return make_room_for_unshift(ary, head, (void *)sharedp, argc, capa, len);
}
else {
- /* sliding items */
+ /* sliding items */
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- MEMMOVE(ptr + argc, ptr, VALUE, len);
- });
+ MEMMOVE(ptr + argc, ptr, VALUE, len);
+ });
ary_verify(ary);
- return ary;
+ return ary;
+ }
+}
+
+static VALUE
+ary_ensure_room_for_unshift(VALUE ary, int argc)
+{
+ long len = RARRAY_LEN(ary);
+ long new_len = len + argc;
+
+ if (len > ARY_MAX_SIZE - argc) {
+ rb_raise(rb_eIndexError, "index %ld too big", new_len);
+ }
+ else if (! ARY_SHARED_P(ary)) {
+ return ary_modify_for_unshift(ary, argc);
+ }
+ else {
+ VALUE shared_root = ARY_SHARED_ROOT(ary);
+ long capa = RARRAY_LEN(shared_root);
+
+ if (! ARY_SHARED_ROOT_OCCUPIED(shared_root)) {
+ return ary_modify_for_unshift(ary, argc);
+ }
+ else if (new_len > capa) {
+ return ary_modify_for_unshift(ary, argc);
+ }
+ else {
+ const VALUE * head = RARRAY_CONST_PTR_TRANSIENT(ary);
+ void *sharedp = (void *)RARRAY_CONST_PTR_TRANSIENT(shared_root);
+
+ rb_ary_modify_check(ary);
+ return make_room_for_unshift(ary, head, sharedp, argc, capa, len);
+ }
}
}
/*
* call-seq:
- * ary.unshift(obj, ...) -> ary
- * ary.prepend(obj, ...) -> ary
+ * array.unshift(*objects) -> self
+ *
+ * Prepends the given +objects+ to +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.unshift(:bam, :bat) # => [:bam, :bat, :foo, "bar", 2]
*
- * Prepends objects to the front of +self+, moving other elements upwards.
- * See also Array#shift for the opposite effect.
+ * Array#prepend is an alias for Array#unshift.
*
- * a = [ "b", "c", "d" ]
- * a.unshift("a") #=> ["a", "b", "c", "d"]
- * a.unshift(1, 2) #=> [ 1, 2, "a", "b", "c", "d"]
+ * Related: #push, #pop, #shift.
*/
static VALUE
@@ -1585,8 +1804,8 @@ rb_ary_unshift_m(int argc, VALUE *argv, VALUE ary)
VALUE target_ary;
if (argc == 0) {
- rb_ary_modify_check(ary);
- return ary;
+ rb_ary_modify_check(ary);
+ return ary;
}
target_ary = ary_ensure_room_for_unshift(ary, argc);
@@ -1608,7 +1827,7 @@ rb_ary_elt(VALUE ary, long offset)
long len = RARRAY_LEN(ary);
if (len == 0) return Qnil;
if (offset < 0 || len <= offset) {
- return Qnil;
+ return Qnil;
}
return RARRAY_AREF(ary, offset);
}
@@ -1620,7 +1839,7 @@ rb_ary_entry(VALUE ary, long offset)
}
VALUE
-rb_ary_subseq(VALUE ary, long beg, long len)
+rb_ary_subseq_step(VALUE ary, long beg, long len, long step)
{
VALUE klass;
long alen = RARRAY_LEN(ary);
@@ -1629,50 +1848,129 @@ rb_ary_subseq(VALUE ary, long beg, long len)
if (beg < 0 || len < 0) return Qnil;
if (alen < len || alen < beg + len) {
- len = alen - beg;
+ len = alen - beg;
}
- klass = rb_obj_class(ary);
+ klass = rb_cArray;
if (len == 0) return ary_new(klass, 0);
+ if (step == 0)
+ rb_raise(rb_eArgError, "slice step cannot be zero");
+ if (step == 1)
+ return ary_make_partial(ary, klass, beg, len);
+ else
+ return ary_make_partial_step(ary, klass, beg, len, step);
+}
- return ary_make_partial(ary, klass, beg, len);
+VALUE
+rb_ary_subseq(VALUE ary, long beg, long len)
+{
+ return rb_ary_subseq_step(ary, beg, len, 1);
}
static VALUE rb_ary_aref2(VALUE ary, VALUE b, VALUE e);
/*
* call-seq:
- * ary[index] -> obj or nil
- * ary[start, length] -> new_ary or nil
- * ary[range] -> new_ary or nil
- * ary.slice(index) -> obj or nil
- * ary.slice(start, length) -> new_ary or nil
- * ary.slice(range) -> new_ary or nil
- *
- * Element Reference --- Returns the element at +index+, or returns a
- * subarray starting at the +start+ index and continuing for +length+
- * elements, or returns a subarray specified by +range+ of indices.
- *
- * Negative indices count backward from the end of the array (-1 is the last
- * element). For +start+ and +range+ cases the starting index is just before
- * an element. Additionally, an empty array is returned when the starting
- * index for an element range is at the end of the array.
- *
- * Returns +nil+ if the index (or starting index) are out of range.
- *
- * a = [ "a", "b", "c", "d", "e" ]
- * a[2] + a[0] + a[1] #=> "cab"
- * a[6] #=> nil
- * a[1, 2] #=> [ "b", "c" ]
- * a[1..3] #=> [ "b", "c", "d" ]
- * a[4..7] #=> [ "e" ]
- * a[6..10] #=> nil
- * a[-3, 3] #=> [ "c", "d", "e" ]
- * # special cases
- * a[5] #=> nil
- * a[6, 1] #=> nil
- * a[5, 1] #=> []
- * a[5..10] #=> []
+ * array[index] -> object or nil
+ * array[start, length] -> object or nil
+ * array[range] -> object or nil
+ * array[aseq] -> object or nil
+ * array.slice(index) -> object or nil
+ * array.slice(start, length) -> object or nil
+ * array.slice(range) -> object or nil
+ * array.slice(aseq) -> object or nil
+ *
+ * Returns elements from +self+; does not modify +self+.
+ *
+ * When a single \Integer argument +index+ is given, returns the element at offset +index+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0] # => :foo
+ * a[2] # => 2
+ * a # => [:foo, "bar", 2]
+ *
+ * If +index+ is negative, counts relative to the end of +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[-1] # => 2
+ * a[-2] # => "bar"
+ *
+ * If +index+ is out of range, returns +nil+.
+ *
+ * When two \Integer arguments +start+ and +length+ are given,
+ * returns a new \Array of size +length+ containing successive elements beginning at offset +start+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0, 2] # => [:foo, "bar"]
+ * a[1, 2] # => ["bar", 2]
+ *
+ * If <tt>start + length</tt> is greater than <tt>self.length</tt>,
+ * returns all elements from offset +start+ to the end:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0, 4] # => [:foo, "bar", 2]
+ * a[1, 3] # => ["bar", 2]
+ * a[2, 2] # => [2]
+ *
+ * If <tt>start == self.size</tt> and <tt>length >= 0</tt>,
+ * returns a new empty \Array.
+ *
+ * If +length+ is negative, returns +nil+.
+ *
+ * When a single \Range argument +range+ is given,
+ * treats <tt>range.min</tt> as +start+ above
+ * and <tt>range.size</tt> as +length+ above:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0..1] # => [:foo, "bar"]
+ * a[1..2] # => ["bar", 2]
+ *
+ * Special case: If <tt>range.start == a.size</tt>, returns a new empty \Array.
+ *
+ * If <tt>range.end</tt> is negative, calculates the end index from the end:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0..-1] # => [:foo, "bar", 2]
+ * a[0..-2] # => [:foo, "bar"]
+ * a[0..-3] # => [:foo]
+ *
+ * If <tt>range.start</tt> is negative, calculates the start index from the end:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[-1..2] # => [2]
+ * a[-2..2] # => ["bar", 2]
+ * a[-3..2] # => [:foo, "bar", 2]
+ *
+ * If <tt>range.start</tt> is larger than the array size, returns +nil+.
+ *
+ * a = [:foo, 'bar', 2]
+ * a[4..1] # => nil
+ * a[4..0] # => nil
+ * a[4..-1] # => nil
+ *
+ * When a single Enumerator::ArithmeticSequence argument +aseq+ is given,
+ * returns an \Array of elements corresponding to the indexes produced by
+ * the sequence.
+ *
+ * a = ['--', 'data1', '--', 'data2', '--', 'data3']
+ * a[(1..).step(2)] # => ["data1", "data2", "data3"]
+ *
+ * Unlike slicing with range, if the start or the end of the arithmetic sequence
+ * is larger than array size, throws RangeError.
+ *
+ * a = ['--', 'data1', '--', 'data2', '--', 'data3']
+ * a[(1..11).step(2)]
+ * # RangeError (((1..11).step(2)) out of range)
+ * a[(7..).step(2)]
+ * # RangeError (((7..).step(2)) out of range)
*
+ * If given a single argument, and its type is not one of the listed, tries to
+ * convert it to Integer, and raises if it is impossible:
+ *
+ * a = [:foo, 'bar', 2]
+ * # Raises TypeError (no implicit conversion of Symbol into Integer):
+ * a[:foo]
+ *
+ * Array#slice is an alias for Array#[].
*/
VALUE
@@ -1680,7 +1978,7 @@ rb_ary_aref(int argc, const VALUE *argv, VALUE ary)
{
rb_check_arity(argc, 1, 2);
if (argc == 2) {
- return rb_ary_aref2(ary, argv[0], argv[1]);
+ return rb_ary_aref2(ary, argv[0], argv[1]);
}
return rb_ary_aref1(ary, argv[0]);
}
@@ -1691,7 +1989,7 @@ rb_ary_aref2(VALUE ary, VALUE b, VALUE e)
long beg = NUM2LONG(b);
long len = NUM2LONG(e);
if (beg < 0) {
- beg += RARRAY_LEN(ary);
+ beg += RARRAY_LEN(ary);
}
return rb_ary_subseq(ary, beg, len);
}
@@ -1699,35 +1997,34 @@ rb_ary_aref2(VALUE ary, VALUE b, VALUE e)
MJIT_FUNC_EXPORTED VALUE
rb_ary_aref1(VALUE ary, VALUE arg)
{
- long beg, len;
+ long beg, len, step;
/* special case - speeding up */
if (FIXNUM_P(arg)) {
- return rb_ary_entry(ary, FIX2LONG(arg));
+ return rb_ary_entry(ary, FIX2LONG(arg));
}
- /* check if idx is Range */
- switch (rb_range_beg_len(arg, &beg, &len, RARRAY_LEN(ary), 0)) {
+ /* check if idx is Range or ArithmeticSequence */
+ switch (rb_arithmetic_sequence_beg_len_step(arg, &beg, &len, &step, RARRAY_LEN(ary), 0)) {
case Qfalse:
- break;
+ break;
case Qnil:
- return Qnil;
+ return Qnil;
default:
- return rb_ary_subseq(ary, beg, len);
+ return rb_ary_subseq_step(ary, beg, len, step);
}
+
return rb_ary_entry(ary, NUM2LONG(arg));
}
/*
* call-seq:
- * ary.at(index) -> obj or nil
+ * array.at(index) -> object
*
- * Returns the element at +index+. A negative index counts from the end of
- * +self+. Returns +nil+ if the index is out of range. See also
- * Array#[].
+ * Returns the element at \Integer offset +index+; does not modify +self+.
+ * a = [:foo, 'bar', 2]
+ * a.at(0) # => :foo
+ * a.at(2) # => 2
*
- * a = [ "a", "b", "c", "d", "e" ]
- * a.at(0) #=> "a"
- * a.at(-1) #=> "e"
*/
VALUE
@@ -1738,81 +2035,131 @@ rb_ary_at(VALUE ary, VALUE pos)
/*
* call-seq:
- * ary.first -> obj or nil
- * ary.first(n) -> new_ary
+ * array.first -> object or nil
+ * array.first(n) -> new_array
+ *
+ * Returns elements from +self+; does not modify +self+.
+ *
+ * When no argument is given, returns the first element:
*
- * Returns the first element, or the first +n+ elements, of the array.
- * If the array is empty, the first form returns +nil+, and the
- * second form returns an empty array. See also Array#last for
- * the opposite effect.
+ * a = [:foo, 'bar', 2]
+ * a.first # => :foo
+ * a # => [:foo, "bar", 2]
+ *
+ * If +self+ is empty, returns +nil+.
+ *
+ * When non-negative \Integer argument +n+ is given,
+ * returns the first +n+ elements in a new \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.first(2) # => [:foo, "bar"]
*
- * a = [ "q", "r", "s", "t" ]
- * a.first #=> "q"
- * a.first(2) #=> ["q", "r"]
+ * If <tt>n >= array.size</tt>, returns all elements:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.first(50) # => [:foo, "bar", 2]
+ *
+ * If <tt>n == 0</tt> returns an new empty \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.first(0) # []
+ *
+ * Related: #last.
*/
-
static VALUE
rb_ary_first(int argc, VALUE *argv, VALUE ary)
{
if (argc == 0) {
- if (RARRAY_LEN(ary) == 0) return Qnil;
- return RARRAY_AREF(ary, 0);
+ if (RARRAY_LEN(ary) == 0) return Qnil;
+ return RARRAY_AREF(ary, 0);
}
else {
- return ary_take_first_or_last(argc, argv, ary, ARY_TAKE_FIRST);
+ return ary_take_first_or_last(argc, argv, ary, ARY_TAKE_FIRST);
}
}
/*
* call-seq:
- * ary.last -> obj or nil
- * ary.last(n) -> new_ary
+ * array.last -> object or nil
+ * array.last(n) -> new_array
+ *
+ * Returns elements from +self+; +self+ is not modified.
*
- * Returns the last element(s) of +self+. If the array is empty,
- * the first form returns +nil+.
+ * When no argument is given, returns the last element:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.last # => 2
+ * a # => [:foo, "bar", 2]
*
- * See also Array#first for the opposite effect.
+ * If +self+ is empty, returns +nil+.
*
- * a = [ "w", "x", "y", "z" ]
- * a.last #=> "z"
- * a.last(2) #=> ["y", "z"]
+ * When non-negative \Integer argument +n+ is given,
+ * returns the last +n+ elements in a new \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.last(2) # => ["bar", 2]
+ *
+ * If <tt>n >= array.size</tt>, returns all elements:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.last(50) # => [:foo, "bar", 2]
+ *
+ * If <tt>n == 0</tt>, returns an new empty \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.last(0) # []
+ *
+ * Related: #first.
*/
VALUE
rb_ary_last(int argc, const VALUE *argv, VALUE ary)
{
if (argc == 0) {
- long len = RARRAY_LEN(ary);
- if (len == 0) return Qnil;
- return RARRAY_AREF(ary, len-1);
+ long len = RARRAY_LEN(ary);
+ if (len == 0) return Qnil;
+ return RARRAY_AREF(ary, len-1);
}
else {
- return ary_take_first_or_last(argc, argv, ary, ARY_TAKE_LAST);
+ return ary_take_first_or_last(argc, argv, ary, ARY_TAKE_LAST);
}
}
/*
* call-seq:
- * ary.fetch(index) -> obj
- * ary.fetch(index, default) -> obj
- * ary.fetch(index) {|index| block} -> obj
- *
- * Tries to return the element at position +index+, but throws an IndexError
- * exception if the referenced +index+ lies outside of the array bounds. This
- * error can be prevented by supplying a second argument, which will act as a
- * +default+ value.
- *
- * Alternatively, if a block is given it will only be executed when an
- * invalid +index+ is referenced.
- *
- * Negative values of +index+ count from the end of the array.
- *
- * a = [ 11, 22, 33, 44 ]
- * a.fetch(1) #=> 22
- * a.fetch(-1) #=> 44
- * a.fetch(4, 'cat') #=> "cat"
- * a.fetch(100) {|i| puts "#{i} is out of bounds"}
- * #=> "100 is out of bounds"
+ * array.fetch(index) -> element
+ * array.fetch(index, default_value) -> element
+ * array.fetch(index) {|index| ... } -> element
+ *
+ * Returns the element at offset +index+.
+ *
+ * With the single \Integer argument +index+,
+ * returns the element at offset +index+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.fetch(1) # => "bar"
+ *
+ * If +index+ is negative, counts from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.fetch(-1) # => 2
+ * a.fetch(-2) # => "bar"
+ *
+ * With arguments +index+ and +default_value+,
+ * returns the element at offset +index+ if index is in range,
+ * otherwise returns +default_value+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.fetch(1, nil) # => "bar"
+ *
+ * With argument +index+ and a block,
+ * returns the element at offset +index+ if index is in range
+ * (and the block is not called); otherwise calls the block with index and returns its return value:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.fetch(1) {|index| raise 'Cannot happen' } # => "bar"
+ * a.fetch(50) {|index| "Value for #{index}" } # => "Value for 50"
+ *
*/
static VALUE
@@ -1825,50 +2172,60 @@ rb_ary_fetch(int argc, VALUE *argv, VALUE ary)
rb_scan_args(argc, argv, "11", &pos, &ifnone);
block_given = rb_block_given_p();
if (block_given && argc == 2) {
- rb_warn("block supersedes default value argument");
+ rb_warn("block supersedes default value argument");
}
idx = NUM2LONG(pos);
if (idx < 0) {
- idx += RARRAY_LEN(ary);
+ idx += RARRAY_LEN(ary);
}
if (idx < 0 || RARRAY_LEN(ary) <= idx) {
- if (block_given) return rb_yield(pos);
- if (argc == 1) {
- rb_raise(rb_eIndexError, "index %ld outside of array bounds: %ld...%ld",
- idx - (idx < 0 ? RARRAY_LEN(ary) : 0), -RARRAY_LEN(ary), RARRAY_LEN(ary));
- }
- return ifnone;
+ if (block_given) return rb_yield(pos);
+ if (argc == 1) {
+ rb_raise(rb_eIndexError, "index %ld outside of array bounds: %ld...%ld",
+ idx - (idx < 0 ? RARRAY_LEN(ary) : 0), -RARRAY_LEN(ary), RARRAY_LEN(ary));
+ }
+ return ifnone;
}
return RARRAY_AREF(ary, idx);
}
/*
* call-seq:
- * ary.find_index(obj) -> int or nil
- * ary.find_index {|item| block} -> int or nil
- * ary.find_index -> Enumerator
- * ary.index(obj) -> int or nil
- * ary.index {|item| block} -> int or nil
- * ary.index -> Enumerator
+ * array.index(object) -> integer or nil
+ * array.index {|element| ... } -> integer or nil
+ * array.index -> new_enumerator
+ *
+ * Returns the index of a specified element.
*
- * Returns the _index_ of the first object in +ary+ such that the object is
- * <code>==</code> to +obj+.
+ * When argument +object+ is given but no block,
+ * returns the index of the first element +element+
+ * for which <tt>object == element</tt>:
*
- * If a block is given instead of an argument, returns the _index_ of the
- * first object for which the block returns +true+. Returns +nil+ if no
- * match is found.
+ * a = [:foo, 'bar', 2, 'bar']
+ * a.index('bar') # => 1
*
- * See also Array#rindex.
+ * Returns +nil+ if no such element found.
*
- * An Enumerator is returned if neither a block nor argument is given.
+ * When both argument +object+ and a block are given,
+ * calls the block with each successive element;
+ * returns the index of the first element for which the block returns a truthy value:
*
- * a = [ "a", "b", "c" ]
- * a.index("b") #=> 1
- * a.index("z") #=> nil
- * a.index {|x| x == "b"} #=> 1
+ * a = [:foo, 'bar', 2, 'bar']
+ * a.index {|element| element == 'bar' } # => 1
*
- * Array#index is an alias for Array#find_index.
+ * Returns +nil+ if the block never returns a truthy value.
+ *
+ * When neither an argument nor a block is given, returns a new Enumerator:
+ *
+ * a = [:foo, 'bar', 2]
+ * e = a.index
+ * e # => #<Enumerator: [:foo, "bar", 2]:index>
+ * e.each {|element| element == 'bar' } # => 1
+ *
+ * Array#find_index is an alias for Array#index.
+ *
+ * Related: #rindex.
*/
static VALUE
@@ -1878,49 +2235,58 @@ rb_ary_index(int argc, VALUE *argv, VALUE ary)
long i;
if (argc == 0) {
- RETURN_ENUMERATOR(ary, 0, 0);
- for (i=0; i<RARRAY_LEN(ary); i++) {
- if (RTEST(rb_yield(RARRAY_AREF(ary, i)))) {
- return LONG2NUM(i);
- }
- }
- return Qnil;
+ RETURN_ENUMERATOR(ary, 0, 0);
+ for (i=0; i<RARRAY_LEN(ary); i++) {
+ if (RTEST(rb_yield(RARRAY_AREF(ary, i)))) {
+ return LONG2NUM(i);
+ }
+ }
+ return Qnil;
}
rb_check_arity(argc, 0, 1);
val = argv[0];
if (rb_block_given_p())
- rb_warn("given block not used");
+ rb_warn("given block not used");
for (i=0; i<RARRAY_LEN(ary); i++) {
- VALUE e = RARRAY_AREF(ary, i);
- if (rb_equal(e, val)) {
- return LONG2NUM(i);
- }
+ VALUE e = RARRAY_AREF(ary, i);
+ if (rb_equal(e, val)) {
+ return LONG2NUM(i);
+ }
}
return Qnil;
}
/*
* call-seq:
- * ary.rindex(obj) -> int or nil
- * ary.rindex {|item| block} -> int or nil
- * ary.rindex -> Enumerator
+ * array.rindex(object) -> integer or nil
+ * array.rindex {|element| ... } -> integer or nil
+ * array.rindex -> new_enumerator
*
- * Returns the _index_ of the last object in +self+ <code>==</code> to +obj+.
+ * Returns the index of the last element for which <tt>object == element</tt>.
*
- * If a block is given instead of an argument, returns the _index_ of the
- * first object for which the block returns +true+, starting from the last
- * object.
+ * When argument +object+ is given but no block, returns the index of the last such element found:
*
- * Returns +nil+ if no match is found.
+ * a = [:foo, 'bar', 2, 'bar']
+ * a.rindex('bar') # => 3
*
- * See also Array#index.
+ * Returns +nil+ if no such object found.
*
- * If neither block nor argument is given, an Enumerator is returned instead.
+ * When a block is given but no argument, calls the block with each successive element;
+ * returns the index of the last element for which the block returns a truthy value:
*
- * a = [ "a", "b", "b", "b", "c" ]
- * a.rindex("b") #=> 3
- * a.rindex("z") #=> nil
- * a.rindex {|x| x == "b"} #=> 3
+ * a = [:foo, 'bar', 2, 'bar']
+ * a.rindex {|element| element == 'bar' } # => 3
+ *
+ * Returns +nil+ if the block never returns a truthy value.
+ *
+ * When neither an argument nor a block is given, returns a new \Enumerator:
+ *
+ * a = [:foo, 'bar', 2, 'bar']
+ * e = a.rindex
+ * e # => #<Enumerator: [:foo, "bar", 2, "bar"]:rindex>
+ * e.each {|element| element == 'bar' } # => 3
+ *
+ * Related: #index.
*/
static VALUE
@@ -1930,25 +2296,25 @@ rb_ary_rindex(int argc, VALUE *argv, VALUE ary)
long i = RARRAY_LEN(ary), len;
if (argc == 0) {
- RETURN_ENUMERATOR(ary, 0, 0);
- while (i--) {
- if (RTEST(rb_yield(RARRAY_AREF(ary, i))))
- return LONG2NUM(i);
- if (i > (len = RARRAY_LEN(ary))) {
- i = len;
- }
- }
- return Qnil;
+ RETURN_ENUMERATOR(ary, 0, 0);
+ while (i--) {
+ if (RTEST(rb_yield(RARRAY_AREF(ary, i))))
+ return LONG2NUM(i);
+ if (i > (len = RARRAY_LEN(ary))) {
+ i = len;
+ }
+ }
+ return Qnil;
}
rb_check_arity(argc, 0, 1);
val = argv[0];
if (rb_block_given_p())
- rb_warn("given block not used");
+ rb_warn("given block not used");
while (i--) {
- VALUE e = RARRAY_AREF(ary, i);
- if (rb_equal(e, val)) {
- return LONG2NUM(i);
- }
+ VALUE e = RARRAY_AREF(ary, i);
+ if (rb_equal(e, val)) {
+ return LONG2NUM(i);
+ }
if (i > RARRAY_LEN(ary)) {
break;
}
@@ -1974,54 +2340,54 @@ rb_ary_splice(VALUE ary, long beg, long len, const VALUE *rptr, long rlen)
if (len < 0) rb_raise(rb_eIndexError, "negative length (%ld)", len);
olen = RARRAY_LEN(ary);
if (beg < 0) {
- beg += olen;
- if (beg < 0) {
- rb_raise(rb_eIndexError, "index %ld too small for array; minimum: %ld",
- beg - olen, -olen);
- }
+ beg += olen;
+ if (beg < 0) {
+ rb_raise(rb_eIndexError, "index %ld too small for array; minimum: %ld",
+ beg - olen, -olen);
+ }
}
if (olen < len || olen < beg + len) {
- len = olen - beg;
+ len = olen - beg;
}
{
const VALUE *optr = RARRAY_CONST_PTR_TRANSIENT(ary);
- rofs = (rptr >= optr && rptr < optr + olen) ? rptr - optr : -1;
+ rofs = (rptr >= optr && rptr < optr + olen) ? rptr - optr : -1;
}
if (beg >= olen) {
- VALUE target_ary;
- if (beg > ARY_MAX_SIZE - rlen) {
- rb_raise(rb_eIndexError, "index %ld too big", beg);
- }
- target_ary = ary_ensure_room_for_push(ary, rlen-len); /* len is 0 or negative */
- len = beg + rlen;
- ary_mem_clear(ary, olen, beg - olen);
- if (rlen > 0) {
+ VALUE target_ary;
+ if (beg > ARY_MAX_SIZE - rlen) {
+ rb_raise(rb_eIndexError, "index %ld too big", beg);
+ }
+ target_ary = ary_ensure_room_for_push(ary, rlen-len); /* len is 0 or negative */
+ len = beg + rlen;
+ ary_mem_clear(ary, olen, beg - olen);
+ if (rlen > 0) {
if (rofs != -1) rptr = RARRAY_CONST_PTR_TRANSIENT(ary) + rofs;
- ary_memcpy0(ary, beg, rlen, rptr, target_ary);
- }
- ARY_SET_LEN(ary, len);
+ ary_memcpy0(ary, beg, rlen, rptr, target_ary);
+ }
+ ARY_SET_LEN(ary, len);
}
else {
- long alen;
-
- if (olen - len > ARY_MAX_SIZE - rlen) {
- rb_raise(rb_eIndexError, "index %ld too big", olen + rlen - len);
- }
- rb_ary_modify(ary);
- alen = olen + rlen - len;
- if (alen >= ARY_CAPA(ary)) {
- ary_double_capa(ary, alen);
- }
-
- if (len != rlen) {
+ long alen;
+
+ if (olen - len > ARY_MAX_SIZE - rlen) {
+ rb_raise(rb_eIndexError, "index %ld too big", olen + rlen - len);
+ }
+ rb_ary_modify(ary);
+ alen = olen + rlen - len;
+ if (alen >= ARY_CAPA(ary)) {
+ ary_double_capa(ary, alen);
+ }
+
+ if (len != rlen) {
RARRAY_PTR_USE_TRANSIENT(ary, ptr,
MEMMOVE(ptr + beg + rlen, ptr + beg + len,
VALUE, olen - (beg + len)));
- ARY_SET_LEN(ary, alen);
- }
- if (rlen > 0) {
+ ARY_SET_LEN(ary, alen);
+ }
+ if (rlen > 0) {
if (rofs != -1) rptr = RARRAY_CONST_PTR_TRANSIENT(ary) + rofs;
/* give up wb-protected ary */
RB_OBJ_WB_UNPROTECT_FOR(ARRAY, ary);
@@ -2031,7 +2397,7 @@ rb_ary_splice(VALUE ary, long beg, long len, const VALUE *rptr, long rlen)
*/
RARRAY_PTR_USE_TRANSIENT(ary, ptr,
MEMMOVE(ptr + beg, rptr, VALUE, rlen));
- }
+ }
}
}
@@ -2042,22 +2408,14 @@ rb_ary_set_len(VALUE ary, long len)
rb_ary_modify_check(ary);
if (ARY_SHARED_P(ary)) {
- rb_raise(rb_eRuntimeError, "can't set length of shared ");
+ rb_raise(rb_eRuntimeError, "can't set length of shared ");
}
if (len > (capa = (long)ARY_CAPA(ary))) {
- rb_bug("probable buffer overflow: %ld for %ld", len, capa);
+ rb_bug("probable buffer overflow: %ld for %ld", len, capa);
}
ARY_SET_LEN(ary, len);
}
-/*!
- * expands or shrinks \a ary to \a len elements.
- * expanded region will be filled with Qnil.
- * \param ary an array
- * \param len new size
- * \return \a ary
- * \post the size of \a ary is \a len.
- */
VALUE
rb_ary_resize(VALUE ary, long len)
{
@@ -2067,118 +2425,231 @@ rb_ary_resize(VALUE ary, long len)
olen = RARRAY_LEN(ary);
if (len == olen) return ary;
if (len > ARY_MAX_SIZE) {
- rb_raise(rb_eIndexError, "index %ld too big", len);
+ rb_raise(rb_eIndexError, "index %ld too big", len);
}
if (len > olen) {
- if (len >= ARY_CAPA(ary)) {
- ary_double_capa(ary, len);
- }
- ary_mem_clear(ary, olen, len - olen);
- ARY_SET_LEN(ary, len);
+ if (len >= ARY_CAPA(ary)) {
+ ary_double_capa(ary, len);
+ }
+ ary_mem_clear(ary, olen, len - olen);
+ ARY_SET_LEN(ary, len);
}
else if (ARY_EMBED_P(ary)) {
ARY_SET_EMBED_LEN(ary, len);
}
- else if (len <= RARRAY_EMBED_LEN_MAX) {
- VALUE tmp[RARRAY_EMBED_LEN_MAX];
- MEMCPY(tmp, ARY_HEAP_PTR(ary), VALUE, len);
- ary_discard(ary);
- MEMCPY((VALUE *)ARY_EMBED_PTR(ary), tmp, VALUE, len); /* WB: no new reference */
+ else if (len <= ary_embed_capa(ary)) {
+ const VALUE *ptr = ARY_HEAP_PTR(ary);
+ long ptr_capa = ARY_HEAP_SIZE(ary);
+ bool is_malloc_ptr = !ARY_SHARED_P(ary) && !RARRAY_TRANSIENT_P(ary);
+
+ FL_UNSET(ary, RARRAY_TRANSIENT_FLAG);
+ FL_SET_EMBED(ary);
+
+ MEMCPY((VALUE *)ARY_EMBED_PTR(ary), ptr, VALUE, len); /* WB: no new reference */
ARY_SET_EMBED_LEN(ary, len);
+
+ if (is_malloc_ptr) ruby_sized_xfree((void *)ptr, ptr_capa);
}
else {
- if (olen > len + ARY_DEFAULT_SIZE) {
- ary_heap_realloc(ary, len);
- ARY_SET_CAPA(ary, len);
- }
- ARY_SET_HEAP_LEN(ary, len);
+ if (olen > len + ARY_DEFAULT_SIZE) {
+ size_t new_capa = ary_heap_realloc(ary, len);
+ ARY_SET_CAPA(ary, new_capa);
+ }
+ ARY_SET_HEAP_LEN(ary, len);
}
ary_verify(ary);
return ary;
}
+static VALUE
+ary_aset_by_rb_ary_store(VALUE ary, long key, VALUE val)
+{
+ rb_ary_store(ary, key, val);
+ return val;
+}
+
+static VALUE
+ary_aset_by_rb_ary_splice(VALUE ary, long beg, long len, VALUE val)
+{
+ VALUE rpl = rb_ary_to_ary(val);
+ rb_ary_splice(ary, beg, len, RARRAY_CONST_PTR_TRANSIENT(rpl), RARRAY_LEN(rpl));
+ RB_GC_GUARD(rpl);
+ return val;
+}
+
/*
* call-seq:
- * ary[index] = obj -> obj
- * ary[start, length] = obj or other_ary or nil -> obj or other_ary or nil
- * ary[range] = obj or other_ary or nil -> obj or other_ary or nil
- *
- * Element Assignment --- Sets the element at +index+, or replaces a subarray
- * from the +start+ index for +length+ elements, or replaces a subarray
- * specified by the +range+ of indices.
- *
- * If indices are greater than the current capacity of the array, the array
- * grows automatically. Elements are inserted into the array at +start+ if
- * +length+ is zero.
- *
- * Negative indices will count backward from the end of the array. For
- * +start+ and +range+ cases the starting index is just before an element.
- *
- * An IndexError is raised if a negative index points past the beginning of
- * the array.
- *
- * See also Array#push, and Array#unshift.
- *
- * a = Array.new
- * a[4] = "4"; #=> [nil, nil, nil, nil, "4"]
- * a[0, 3] = [ 'a', 'b', 'c' ] #=> ["a", "b", "c", nil, "4"]
- * a[1..2] = [ 1, 2 ] #=> ["a", 1, 2, nil, "4"]
- * a[0, 2] = "?" #=> ["?", 2, nil, "4"]
- * a[0..2] = "A" #=> ["A", "4"]
- * a[-1] = "Z" #=> ["A", "Z"]
- * a[1..-1] = nil #=> ["A", nil]
- * a[1..-1] = [] #=> ["A"]
- * a[0, 0] = [ 1, 2 ] #=> [1, 2, "A"]
- * a[3, 0] = "B" #=> [1, 2, "A", "B"]
+ * array[index] = object -> object
+ * array[start, length] = object -> object
+ * array[range] = object -> object
+ *
+ * Assigns elements in +self+; returns the given +object+.
+ *
+ * When \Integer argument +index+ is given, assigns +object+ to an element in +self+.
+ *
+ * If +index+ is non-negative, assigns +object+ the element at offset +index+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0] = 'foo' # => "foo"
+ * a # => ["foo", "bar", 2]
+ *
+ * If +index+ is greater than <tt>self.length</tt>, extends the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[7] = 'foo' # => "foo"
+ * a # => [:foo, "bar", 2, nil, nil, nil, nil, "foo"]
+ *
+ * If +index+ is negative, counts backwards from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[-1] = 'two' # => "two"
+ * a # => [:foo, "bar", "two"]
+ *
+ * When \Integer arguments +start+ and +length+ are given and +object+ is not an \Array,
+ * removes <tt>length - 1</tt> elements beginning at offset +start+,
+ * and assigns +object+ at offset +start+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0, 2] = 'foo' # => "foo"
+ * a # => ["foo", 2]
+ *
+ * If +start+ is negative, counts backwards from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[-2, 2] = 'foo' # => "foo"
+ * a # => [:foo, "foo"]
+ *
+ * If +start+ is non-negative and outside the array (<tt> >= self.size</tt>),
+ * extends the array with +nil+, assigns +object+ at offset +start+,
+ * and ignores +length+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[6, 50] = 'foo' # => "foo"
+ * a # => [:foo, "bar", 2, nil, nil, nil, "foo"]
+ *
+ * If +length+ is zero, shifts elements at and following offset +start+
+ * and assigns +object+ at offset +start+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[1, 0] = 'foo' # => "foo"
+ * a # => [:foo, "foo", "bar", 2]
+ *
+ * If +length+ is too large for the existing array, does not extend the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[1, 5] = 'foo' # => "foo"
+ * a # => [:foo, "foo"]
+ *
+ * When \Range argument +range+ is given and +object+ is an \Array,
+ * removes <tt>length - 1</tt> elements beginning at offset +start+,
+ * and assigns +object+ at offset +start+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[0..1] = 'foo' # => "foo"
+ * a # => ["foo", 2]
+ *
+ * if <tt>range.begin</tt> is negative, counts backwards from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[-2..2] = 'foo' # => "foo"
+ * a # => [:foo, "foo"]
+ *
+ * If the array length is less than <tt>range.begin</tt>,
+ * assigns +object+ at offset <tt>range.begin</tt>, and ignores +length+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[6..50] = 'foo' # => "foo"
+ * a # => [:foo, "bar", 2, nil, nil, nil, "foo"]
+ *
+ * If <tt>range.end</tt> is zero, shifts elements at and following offset +start+
+ * and assigns +object+ at offset +start+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[1..0] = 'foo' # => "foo"
+ * a # => [:foo, "foo", "bar", 2]
+ *
+ * If <tt>range.end</tt> is negative, assigns +object+ at offset +start+,
+ * retains <tt>range.end.abs -1</tt> elements past that, and removes those beyond:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[1..-1] = 'foo' # => "foo"
+ * a # => [:foo, "foo"]
+ * a = [:foo, 'bar', 2]
+ * a[1..-2] = 'foo' # => "foo"
+ * a # => [:foo, "foo", 2]
+ * a = [:foo, 'bar', 2]
+ * a[1..-3] = 'foo' # => "foo"
+ * a # => [:foo, "foo", "bar", 2]
+ * a = [:foo, 'bar', 2]
+ *
+ * If <tt>range.end</tt> is too large for the existing array,
+ * replaces array elements, but does not extend the array with +nil+ values:
+ *
+ * a = [:foo, 'bar', 2]
+ * a[1..5] = 'foo' # => "foo"
+ * a # => [:foo, "foo"]
+ *
*/
static VALUE
rb_ary_aset(int argc, VALUE *argv, VALUE ary)
{
long offset, beg, len;
- VALUE rpl;
+ rb_check_arity(argc, 2, 3);
+ rb_ary_modify_check(ary);
if (argc == 3) {
- rb_ary_modify_check(ary);
- beg = NUM2LONG(argv[0]);
- len = NUM2LONG(argv[1]);
- goto range;
+ beg = NUM2LONG(argv[0]);
+ len = NUM2LONG(argv[1]);
+ return ary_aset_by_rb_ary_splice(ary, beg, len, argv[2]);
}
- rb_check_arity(argc, 2, 2);
- rb_ary_modify_check(ary);
if (FIXNUM_P(argv[0])) {
- offset = FIX2LONG(argv[0]);
- goto fixnum;
+ offset = FIX2LONG(argv[0]);
+ return ary_aset_by_rb_ary_store(ary, offset, argv[1]);
}
if (rb_range_beg_len(argv[0], &beg, &len, RARRAY_LEN(ary), 1)) {
- /* check if idx is Range */
- range:
- rpl = rb_ary_to_ary(argv[argc-1]);
- rb_ary_splice(ary, beg, len, RARRAY_CONST_PTR_TRANSIENT(rpl), RARRAY_LEN(rpl));
- RB_GC_GUARD(rpl);
- return argv[argc-1];
+ /* check if idx is Range */
+ return ary_aset_by_rb_ary_splice(ary, beg, len, argv[1]);
}
offset = NUM2LONG(argv[0]);
-fixnum:
- rb_ary_store(ary, offset, argv[1]);
- return argv[1];
+ return ary_aset_by_rb_ary_store(ary, offset, argv[1]);
}
/*
* call-seq:
- * ary.insert(index, obj...) -> ary
+ * array.insert(index, *objects) -> self
*
- * Inserts the given values before the element with the given +index+.
+ * Inserts given +objects+ before or after the element at \Integer index +offset+;
+ * returns +self+.
*
- * Negative indices count backwards from the end of the array, where +-1+ is
- * the last element. If a negative index is used, the given values will be
- * inserted after that element, so using an index of +-1+ will insert the
- * values at the end of the array.
+ * When +index+ is non-negative, inserts all given +objects+
+ * before the element at offset +index+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.insert(1, :bat, :bam) # => [:foo, :bat, :bam, "bar", 2]
+ *
+ * Extends the array if +index+ is beyond the array (<tt>index >= self.size</tt>):
+ *
+ * a = [:foo, 'bar', 2]
+ * a.insert(5, :bat, :bam)
+ * a # => [:foo, "bar", 2, nil, nil, :bat, :bam]
+ *
+ * Does nothing if no objects given:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.insert(1)
+ * a.insert(50)
+ * a.insert(-50)
+ * a # => [:foo, "bar", 2]
+ *
+ * When +index+ is negative, inserts all given +objects+
+ * _after_ the element at offset <tt>index+self.size</tt>:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.insert(-2, :bat, :bam)
+ * a # => [:foo, "bar", :bat, :bam, 2]
*
- * a = %w{ a b c d }
- * a.insert(2, 99) #=> ["a", "b", 99, "c", "d"]
- * a.insert(-2, 1, 2, 3) #=> ["a", "b", 99, "c", 1, 2, 3, "d"]
*/
static VALUE
@@ -2191,15 +2662,15 @@ rb_ary_insert(int argc, VALUE *argv, VALUE ary)
pos = NUM2LONG(argv[0]);
if (argc == 1) return ary;
if (pos == -1) {
- pos = RARRAY_LEN(ary);
+ pos = RARRAY_LEN(ary);
}
else if (pos < 0) {
- long minpos = -RARRAY_LEN(ary) - 1;
- if (pos < minpos) {
- rb_raise(rb_eIndexError, "index %ld too small for array; minimum: %ld",
- pos, minpos);
- }
- pos++;
+ long minpos = -RARRAY_LEN(ary) - 1;
+ if (pos < minpos) {
+ rb_raise(rb_eIndexError, "index %ld too small for array; minimum: %ld",
+ pos, minpos);
+ }
+ pos++;
}
rb_ary_splice(ary, pos, 0, argv + 1, argc - 1);
return ary;
@@ -2216,20 +2687,47 @@ ary_enum_length(VALUE ary, VALUE args, VALUE eobj)
/*
* call-seq:
- * ary.each {|item| block} -> ary
- * ary.each -> Enumerator
+ * array.each {|element| ... } -> self
+ * array.each -> Enumerator
+ *
+ * Iterates over array elements.
+ *
+ * When a block given, passes each successive array element to the block;
+ * returns +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.each {|element| puts "#{element.class} #{element}" }
+ *
+ * Output:
+ *
+ * Symbol foo
+ * String bar
+ * Integer 2
+ *
+ * Allows the array to be modified during iteration:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.each {|element| puts element; a.clear if element.to_s.start_with?('b') }
*
- * Calls the given block once for each element in +self+, passing that element
- * as a parameter. Returns the array itself.
+ * Output:
*
- * If no block is given, an Enumerator is returned.
+ * foo
+ * bar
*
- * a = [ "a", "b", "c" ]
- * a.each {|x| print x, " -- " }
+ * When no block given, returns a new \Enumerator:
+ * a = [:foo, 'bar', 2]
+ *
+ * e = a.each
+ * e # => #<Enumerator: [:foo, "bar", 2]:each>
+ * a1 = e.each {|element| puts "#{element.class} #{element}" }
+ *
+ * Output:
*
- * produces:
+ * Symbol foo
+ * String bar
+ * Integer 2
*
- * a -- b -- c --
+ * Related: #each_index, #reverse_each.
*/
VALUE
@@ -2239,27 +2737,54 @@ rb_ary_each(VALUE ary)
ary_verify(ary);
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
for (i=0; i<RARRAY_LEN(ary); i++) {
- rb_yield(RARRAY_AREF(ary, i));
+ rb_yield(RARRAY_AREF(ary, i));
}
return ary;
}
/*
* call-seq:
- * ary.each_index {|index| block} -> ary
- * ary.each_index -> Enumerator
+ * array.each_index {|index| ... } -> self
+ * array.each_index -> Enumerator
+ *
+ * Iterates over array indexes.
+ *
+ * When a block given, passes each successive array index to the block;
+ * returns +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.each_index {|index| puts "#{index} #{a[index]}" }
+ *
+ * Output:
+ *
+ * 0 foo
+ * 1 bar
+ * 2 2
+ *
+ * Allows the array to be modified during iteration:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.each_index {|index| puts index; a.clear if index > 0 }
+ *
+ * Output:
+ *
+ * 0
+ * 1
*
- * Same as Array#each, but passes the +index+ of the element instead of the
- * element itself.
+ * When no block given, returns a new \Enumerator:
*
- * An Enumerator is returned if no block is given.
+ * a = [:foo, 'bar', 2]
+ * e = a.each_index
+ * e # => #<Enumerator: [:foo, "bar", 2]:each_index>
+ * a1 = e.each {|index| puts "#{index} #{a[index]}"}
*
- * a = [ "a", "b", "c" ]
- * a.each_index {|x| print x, " -- " }
+ * Output:
*
- * produces:
+ * 0 foo
+ * 1 bar
+ * 2 2
*
- * 0 -- 1 -- 2 --
+ * Related: #each, #reverse_each.
*/
static VALUE
@@ -2269,24 +2794,54 @@ rb_ary_each_index(VALUE ary)
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
for (i=0; i<RARRAY_LEN(ary); i++) {
- rb_yield(LONG2NUM(i));
+ rb_yield(LONG2NUM(i));
}
return ary;
}
/*
* call-seq:
- * ary.reverse_each {|item| block} -> ary
- * ary.reverse_each -> Enumerator
+ * array.reverse_each {|element| ... } -> self
+ * array.reverse_each -> Enumerator
+ *
+ * Iterates backwards over array elements.
*
- * Same as Array#each, but traverses +self+ in reverse order.
+ * When a block given, passes, in reverse order, each element to the block;
+ * returns +self+:
*
- * a = [ "a", "b", "c" ]
- * a.reverse_each {|x| print x, " " }
+ * a = [:foo, 'bar', 2]
+ * a.reverse_each {|element| puts "#{element.class} #{element}" }
+ *
+ * Output:
+ *
+ * Integer 2
+ * String bar
+ * Symbol foo
+ *
+ * Allows the array to be modified during iteration:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.reverse_each {|element| puts element; a.clear if element.to_s.start_with?('b') }
*
- * produces:
+ * Output:
*
- * c b a
+ * 2
+ * bar
+ *
+ * When no block given, returns a new \Enumerator:
+ *
+ * a = [:foo, 'bar', 2]
+ * e = a.reverse_each
+ * e # => #<Enumerator: [:foo, "bar", 2]:reverse_each>
+ * a1 = e.each {|element| puts "#{element.class} #{element}" }
+ *
+ * Output:
+ *
+ * Integer 2
+ * String bar
+ * Symbol foo
+ *
+ * Related: #each, #each_index.
*/
static VALUE
@@ -2297,24 +2852,21 @@ rb_ary_reverse_each(VALUE ary)
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
len = RARRAY_LEN(ary);
while (len--) {
- long nlen;
- rb_yield(RARRAY_AREF(ary, len));
- nlen = RARRAY_LEN(ary);
- if (nlen < len) {
- len = nlen;
- }
+ long nlen;
+ rb_yield(RARRAY_AREF(ary, len));
+ nlen = RARRAY_LEN(ary);
+ if (nlen < len) {
+ len = nlen;
+ }
}
return ary;
}
/*
* call-seq:
- * ary.length -> int
- *
- * Returns the number of elements in +self+. May be zero.
+ * array.length -> an_integer
*
- * [ 1, 2, 3, 4, 5 ].length #=> 5
- * [].length #=> 0
+ * Returns the count of elements in +self+.
*/
static VALUE
@@ -2326,19 +2878,16 @@ rb_ary_length(VALUE ary)
/*
* call-seq:
- * ary.empty? -> true or false
- *
- * Returns +true+ if +self+ contains no elements.
+ * array.empty? -> true or false
*
- * [].empty? #=> true
+ * Returns +true+ if the count of elements in +self+ is zero,
+ * +false+ otherwise.
*/
static VALUE
rb_ary_empty_p(VALUE ary)
{
- if (RARRAY_LEN(ary) == 0)
- return Qtrue;
- return Qfalse;
+ return RBOOL(RARRAY_LEN(ary) == 0);
}
VALUE
@@ -2374,10 +2923,10 @@ recursive_join(VALUE obj, VALUE argp, int recur)
int *first = (int *)arg[3];
if (recur) {
- rb_raise(rb_eArgError, "recursive array join");
+ rb_raise(rb_eArgError, "recursive array join");
}
else {
- ary_join_1(obj, ary, sep, 0, result, first);
+ ary_join_1(obj, ary, sep, 0, result, first);
}
return Qnil;
}
@@ -2390,65 +2939,68 @@ ary_join_0(VALUE ary, VALUE sep, long max, VALUE result)
if (max > 0) rb_enc_copy(result, RARRAY_AREF(ary, 0));
for (i=0; i<max; i++) {
- val = RARRAY_AREF(ary, i);
+ val = RARRAY_AREF(ary, i);
if (!RB_TYPE_P(val, T_STRING)) break;
- if (i > 0 && !NIL_P(sep))
- rb_str_buf_append(result, sep);
- rb_str_buf_append(result, val);
+ if (i > 0 && !NIL_P(sep))
+ rb_str_buf_append(result, sep);
+ rb_str_buf_append(result, val);
}
return i;
}
static void
+ary_join_1_str(VALUE dst, VALUE src, int *first)
+{
+ rb_str_buf_append(dst, src);
+ if (*first) {
+ rb_enc_copy(dst, src);
+ *first = FALSE;
+ }
+}
+
+static void
+ary_join_1_ary(VALUE obj, VALUE ary, VALUE sep, VALUE result, VALUE val, int *first)
+{
+ if (val == ary) {
+ rb_raise(rb_eArgError, "recursive array join");
+ }
+ else {
+ VALUE args[4];
+
+ *first = FALSE;
+ args[0] = val;
+ args[1] = sep;
+ args[2] = result;
+ args[3] = (VALUE)first;
+ rb_exec_recursive(recursive_join, obj, (VALUE)args);
+ }
+}
+
+static void
ary_join_1(VALUE obj, VALUE ary, VALUE sep, long i, VALUE result, int *first)
{
VALUE val, tmp;
for (; i<RARRAY_LEN(ary); i++) {
- if (i > 0 && !NIL_P(sep))
- rb_str_buf_append(result, sep);
-
- val = RARRAY_AREF(ary, i);
- if (RB_TYPE_P(val, T_STRING)) {
- str_join:
- rb_str_buf_append(result, val);
- if (*first) {
- rb_enc_copy(result, val);
- *first = FALSE;
- }
- }
- else if (RB_TYPE_P(val, T_ARRAY)) {
- obj = val;
- ary_join:
- if (val == ary) {
- rb_raise(rb_eArgError, "recursive array join");
- }
- else {
- VALUE args[4];
-
- *first = FALSE;
- args[0] = val;
- args[1] = sep;
- args[2] = result;
- args[3] = (VALUE)first;
- rb_exec_recursive(recursive_join, obj, (VALUE)args);
- }
- }
- else {
- tmp = rb_check_string_type(val);
- if (!NIL_P(tmp)) {
- val = tmp;
- goto str_join;
- }
- tmp = rb_check_array_type(val);
- if (!NIL_P(tmp)) {
- obj = val;
- val = tmp;
- goto ary_join;
- }
- val = rb_obj_as_string(val);
- goto str_join;
- }
+ if (i > 0 && !NIL_P(sep))
+ rb_str_buf_append(result, sep);
+
+ val = RARRAY_AREF(ary, i);
+ if (RB_TYPE_P(val, T_STRING)) {
+ ary_join_1_str(result, val, first);
+ }
+ else if (RB_TYPE_P(val, T_ARRAY)) {
+ ary_join_1_ary(val, ary, sep, result, val, first);
+ }
+ else if (!NIL_P(tmp = rb_check_string_type(val))) {
+ ary_join_1_str(result, tmp, first);
+ }
+ else if (!NIL_P(tmp = rb_check_array_type(val))) {
+ ary_join_1_ary(val, ary, sep, result, tmp, first);
+ }
+ else {
+ ary_join_1_str(result, rb_obj_as_string(val), first);
+ }
}
}
@@ -2461,26 +3013,26 @@ rb_ary_join(VALUE ary, VALUE sep)
if (RARRAY_LEN(ary) == 0) return rb_usascii_str_new(0, 0);
if (!NIL_P(sep)) {
- StringValue(sep);
- len += RSTRING_LEN(sep) * (RARRAY_LEN(ary) - 1);
+ StringValue(sep);
+ len += RSTRING_LEN(sep) * (RARRAY_LEN(ary) - 1);
}
for (i=0; i<RARRAY_LEN(ary); i++) {
- val = RARRAY_AREF(ary, i);
- tmp = rb_check_string_type(val);
+ val = RARRAY_AREF(ary, i);
+ tmp = rb_check_string_type(val);
- if (NIL_P(tmp) || tmp != val) {
- int first;
+ if (NIL_P(tmp) || tmp != val) {
+ int first;
long n = RARRAY_LEN(ary);
if (i > n) i = n;
result = rb_str_buf_new(len + (n-i)*10);
- rb_enc_associate(result, rb_usascii_encoding());
+ rb_enc_associate(result, rb_usascii_encoding());
i = ary_join_0(ary, sep, i, result);
- first = i == 0;
- ary_join_1(ary, ary, sep, i, result, &first);
- return result;
- }
+ first = i == 0;
+ ary_join_1(ary, ary, sep, i, result, &first);
+ return result;
+ }
- len += RSTRING_LEN(tmp);
+ len += RSTRING_LEN(tmp);
}
result = rb_str_new(0, len);
@@ -2493,22 +3045,32 @@ rb_ary_join(VALUE ary, VALUE sep)
/*
* call-seq:
- * ary.join(separator=$,) -> str
+ * array.join ->new_string
+ * array.join(separator = $,) -> new_string
*
- * Returns a string created by converting each element of the array to
- * a string, separated by the given +separator+.
- * If the +separator+ is +nil+, it uses current <code>$,</code>.
- * If both the +separator+ and <code>$,</code> are +nil+,
- * it uses an empty string.
+ * Returns the new \String formed by joining the array elements after conversion.
+ * For each element +element+:
*
- * [ "a", "b", "c" ].join #=> "abc"
- * [ "a", "b", "c" ].join("-") #=> "a-b-c"
+ * - Uses <tt>element.to_s</tt> if +element+ is not a <tt>kind_of?(Array)</tt>.
+ * - Uses recursive <tt>element.join(separator)</tt> if +element+ is a <tt>kind_of?(Array)</tt>.
*
- * For nested arrays, join is applied recursively:
+ * With no argument, joins using the output field separator, <tt>$,</tt>:
+ *
+ * a = [:foo, 'bar', 2]
+ * $, # => nil
+ * a.join # => "foobar2"
+ *
+ * With \string argument +separator+, joins using that separator:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.join("\n") # => "foo\nbar\n2"
+ *
+ * Joins recursively for nested Arrays:
+ *
+ * a = [:foo, [:bar, [:baz, :bat]]]
+ * a.join # => "foobarbazbat"
*
- * [ "a", [1, 2, [:x, :y]], "b" ].join("-") #=> "a-1-2-x-y-b"
*/
-
static VALUE
rb_ary_join_m(int argc, VALUE *argv, VALUE ary)
{
@@ -2517,7 +3079,7 @@ rb_ary_join_m(int argc, VALUE *argv, VALUE ary)
if (rb_check_arity(argc, 0, 1) == 0 || NIL_P(sep = argv[0])) {
sep = rb_output_fs;
if (!NIL_P(sep)) {
- rb_warn("$, is set to non-nil value");
+ rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "$, is set to non-nil value");
}
}
@@ -2533,10 +3095,10 @@ inspect_ary(VALUE ary, VALUE dummy, int recur)
if (recur) return rb_usascii_str_new_cstr("[...]");
str = rb_str_buf_new2("[");
for (i=0; i<RARRAY_LEN(ary); i++) {
- s = rb_inspect(RARRAY_AREF(ary, i));
- if (i > 0) rb_str_buf_cat2(str, ", ");
- else rb_enc_copy(str, s);
- rb_str_buf_append(str, s);
+ s = rb_inspect(RARRAY_AREF(ary, i));
+ if (i > 0) rb_str_buf_cat2(str, ", ");
+ else rb_enc_copy(str, s);
+ rb_str_buf_append(str, s);
}
rb_str_buf_cat2(str, "]");
return str;
@@ -2544,13 +3106,15 @@ inspect_ary(VALUE ary, VALUE dummy, int recur)
/*
* call-seq:
- * ary.inspect -> string
- * ary.to_s -> string
+ * array.inspect -> new_string
*
- * Creates a string representation of +self+, by calling #inspect
- * on each element.
+ * Returns the new \String formed by calling method <tt>#inspect</tt>
+ * on each array element:
*
- * [ "a", "b", "c" ].to_s #=> "[\"a\", \"b\", \"c\"]"
+ * a = [:foo, 'bar', 2]
+ * a.inspect # => "[:foo, \"bar\", 2]"
+ *
+ * Array#to_s is an alias for Array#inspect.
*/
static VALUE
@@ -2568,40 +3132,59 @@ rb_ary_to_s(VALUE ary)
/*
* call-seq:
- * ary.to_a -> ary
+ * to_a -> self or new_array
*
- * Returns +self+.
+ * When +self+ is an instance of \Array, returns +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.to_a # => [:foo, "bar", 2]
+ *
+ * Otherwise, returns a new \Array containing the elements of +self+:
+ *
+ * class MyArray < Array; end
+ * a = MyArray.new(['foo', 'bar', 'two'])
+ * a.instance_of?(Array) # => false
+ * a.kind_of?(Array) # => true
+ * a1 = a.to_a
+ * a1 # => ["foo", "bar", "two"]
+ * a1.class # => Array # Not MyArray
*
- * If called on a subclass of Array, converts the receiver to an Array object.
*/
static VALUE
rb_ary_to_a(VALUE ary)
{
if (rb_obj_class(ary) != rb_cArray) {
- VALUE dup = rb_ary_new2(RARRAY_LEN(ary));
- rb_ary_replace(dup, ary);
- return dup;
+ VALUE dup = rb_ary_new2(RARRAY_LEN(ary));
+ rb_ary_replace(dup, ary);
+ return dup;
}
return ary;
}
/*
* call-seq:
- * ary.to_h -> hash
- * ary.to_h {|item| block } -> hash
+ * array.to_h -> new_hash
+ * array.to_h {|item| ... } -> new_hash
+ *
+ * Returns a new \Hash formed from +self+.
+ *
+ * When a block is given, calls the block with each array element;
+ * the block must return a 2-element \Array whose two elements
+ * form a key-value pair in the returned \Hash:
*
- * Returns the result of interpreting <i>ary</i> as an array of
- * <tt>[key, value]</tt> pairs.
+ * a = ['foo', :bar, 1, [2, 3], {baz: 4}]
+ * h = a.to_h {|item| [item, item] }
+ * h # => {"foo"=>"foo", :bar=>:bar, 1=>1, [2, 3]=>[2, 3], {:baz=>4}=>{:baz=>4}}
*
- * [[:foo, :bar], [1, 2]].to_h
- * # => {:foo => :bar, 1 => 2}
+ * When no block is given, +self+ must be an \Array of 2-element sub-arrays,
+ * each sub-array is formed into a key-value pair in the new \Hash:
*
- * If a block is given, the results of the block on each element of
- * the array will be used as pairs.
+ * [].to_h # => {}
+ * a = [['foo', 'zero'], ['bar', 'one'], ['baz', 'two']]
+ * h = a.to_h
+ * h # => {"foo"=>"zero", "bar"=>"one", "baz"=>"two"}
*
- * ["foo", "bar"].to_h {|s| [s.ord, s]}
- * # => {102=>"foo", 98=>"bar"}
*/
static VALUE
@@ -2612,25 +3195,25 @@ rb_ary_to_h(VALUE ary)
int block_given = rb_block_given_p();
for (i=0; i<RARRAY_LEN(ary); i++) {
- const VALUE e = rb_ary_elt(ary, i);
- const VALUE elt = block_given ? rb_yield_force_blockarg(e) : e;
- const VALUE key_value_pair = rb_check_array_type(elt);
- if (NIL_P(key_value_pair)) {
- rb_raise(rb_eTypeError, "wrong element type %"PRIsVALUE" at %ld (expected array)",
- rb_obj_class(elt), i);
- }
- if (RARRAY_LEN(key_value_pair) != 2) {
- rb_raise(rb_eArgError, "wrong array length at %ld (expected 2, was %ld)",
- i, RARRAY_LEN(key_value_pair));
- }
- rb_hash_aset(hash, RARRAY_AREF(key_value_pair, 0), RARRAY_AREF(key_value_pair, 1));
+ const VALUE e = rb_ary_elt(ary, i);
+ const VALUE elt = block_given ? rb_yield_force_blockarg(e) : e;
+ const VALUE key_value_pair = rb_check_array_type(elt);
+ if (NIL_P(key_value_pair)) {
+ rb_raise(rb_eTypeError, "wrong element type %"PRIsVALUE" at %ld (expected array)",
+ rb_obj_class(elt), i);
+ }
+ if (RARRAY_LEN(key_value_pair) != 2) {
+ rb_raise(rb_eArgError, "wrong array length at %ld (expected 2, was %ld)",
+ i, RARRAY_LEN(key_value_pair));
+ }
+ rb_hash_aset(hash, RARRAY_AREF(key_value_pair, 0), RARRAY_AREF(key_value_pair, 1));
}
return hash;
}
/*
* call-seq:
- * ary.to_ary -> ary
+ * array.to_ary -> self
*
* Returns +self+.
*/
@@ -2645,9 +3228,9 @@ static void
ary_reverse(VALUE *p1, VALUE *p2)
{
while (p1 < p2) {
- VALUE tmp = *p1;
- *p1++ = *p2;
- *p2-- = tmp;
+ VALUE tmp = *p1;
+ *p1++ = *p2;
+ *p2-- = tmp;
}
}
@@ -2662,20 +3245,20 @@ rb_ary_reverse(VALUE ary)
RARRAY_PTR_USE_TRANSIENT(ary, p1, {
p2 = p1 + len - 1; /* points last item */
ary_reverse(p1, p2);
- }); /* WB: no new reference */
+ }); /* WB: no new reference */
}
return ary;
}
/*
* call-seq:
- * ary.reverse! -> ary
+ * array.reverse! -> self
+ *
+ * Reverses +self+ in place:
*
- * Reverses +self+ in place.
+ * a = ['foo', 'bar', 'two']
+ * a.reverse! # => ["two", "bar", "foo"]
*
- * a = [ "a", "b", "c" ]
- * a.reverse! #=> ["c", "b", "a"]
- * a #=> ["c", "b", "a"]
*/
static VALUE
@@ -2686,12 +3269,14 @@ rb_ary_reverse_bang(VALUE ary)
/*
* call-seq:
- * ary.reverse -> new_ary
+ * array.reverse -> new_array
*
- * Returns a new array containing +self+'s elements in reverse order.
+ * Returns a new \Array with the elements of +self+ in reverse order:
+ *
+ * a = ['foo', 'bar', 'two']
+ * a1 = a.reverse
+ * a1 # => ["two", "bar", "foo"]
*
- * [ "a", "b", "c" ].reverse #=> ["c", "b", "a"]
- * [ 1 ].reverse #=> [1]
*/
static VALUE
@@ -2703,7 +3288,7 @@ rb_ary_reverse_m(VALUE ary)
if (len > 0) {
const VALUE *p1 = RARRAY_CONST_PTR_TRANSIENT(ary);
VALUE *p2 = (VALUE *)RARRAY_CONST_PTR_TRANSIENT(dup) + len - 1;
- do *p2-- = *p1++; while (--len > 0);
+ do *p2-- = *p1++; while (--len > 0);
}
ARY_SET_LEN(dup, RARRAY_LEN(ary));
return dup;
@@ -2722,11 +3307,13 @@ ary_rotate_ptr(VALUE *ptr, long len, long cnt)
VALUE tmp = *ptr;
memmove(ptr, ptr + 1, sizeof(VALUE)*(len - 1));
*(ptr + len - 1) = tmp;
- } else if (cnt == len - 1) {
+ }
+ else if (cnt == len - 1) {
VALUE tmp = *(ptr + len - 1);
memmove(ptr + 1, ptr, sizeof(VALUE)*(len - 1));
*ptr = tmp;
- } else {
+ }
+ else {
--len;
if (cnt < len) ary_reverse(ptr + cnt, ptr + len);
if (--cnt > 0) ary_reverse(ptr, ptr + cnt);
@@ -2751,19 +3338,48 @@ rb_ary_rotate(VALUE ary, long cnt)
/*
* call-seq:
- * ary.rotate!(count=1) -> ary
+ * array.rotate! -> self
+ * array.rotate!(count) -> self
*
- * Rotates +self+ in place so that the element at +count+ comes first, and
- * returns +self+.
+ * Rotates +self+ in place by moving elements from one end to the other; returns +self+.
+ *
+ * When no argument given, rotates the first element to the last position:
+ *
+ * a = [:foo, 'bar', 2, 'bar']
+ * a.rotate! # => ["bar", 2, "bar", :foo]
*
- * If +count+ is negative then it rotates in the opposite direction, starting
- * from the end of the array where +-1+ is the last element.
+ * When given a non-negative \Integer +count+,
+ * rotates +count+ elements from the beginning to the end:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.rotate!(2)
+ * a # => [2, :foo, "bar"]
+ *
+ * If +count+ is large, uses <tt>count % array.size</tt> as the count:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.rotate!(20)
+ * a # => [2, :foo, "bar"]
+ *
+ * If +count+ is zero, returns +self+ unmodified:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.rotate!(0)
+ * a # => [:foo, "bar", 2]
+ *
+ * When given a negative Integer +count+, rotates in the opposite direction,
+ * from end to beginning:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.rotate!(-2)
+ * a # => ["bar", 2, :foo]
+ *
+ * If +count+ is small (far from zero), uses <tt>count % array.size</tt> as the count:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.rotate!(-5)
+ * a # => ["bar", 2, :foo]
*
- * a = [ "a", "b", "c", "d" ]
- * a.rotate! #=> ["b", "c", "d", "a"]
- * a #=> ["b", "c", "d", "a"]
- * a.rotate!(2) #=> ["d", "a", "b", "c"]
- * a.rotate!(-3) #=> ["a", "b", "c", "d"]
*/
static VALUE
@@ -2776,19 +3392,51 @@ rb_ary_rotate_bang(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.rotate(count=1) -> new_ary
+ * array.rotate -> new_array
+ * array.rotate(count) -> new_array
+ *
+ * Returns a new \Array formed from +self+ with elements
+ * rotated from one end to the other.
+ *
+ * When no argument given, returns a new \Array that is like +self+,
+ * except that the first element has been rotated to the last position:
+ *
+ * a = [:foo, 'bar', 2, 'bar']
+ * a1 = a.rotate
+ * a1 # => ["bar", 2, "bar", :foo]
+ *
+ * When given a non-negative \Integer +count+,
+ * returns a new \Array with +count+ elements rotated from the beginning to the end:
+ *
+ * a = [:foo, 'bar', 2]
+ * a1 = a.rotate(2)
+ * a1 # => [2, :foo, "bar"]
+ *
+ * If +count+ is large, uses <tt>count % array.size</tt> as the count:
+ *
+ * a = [:foo, 'bar', 2]
+ * a1 = a.rotate(20)
+ * a1 # => [2, :foo, "bar"]
+ *
+ * If +count+ is zero, returns a copy of +self+, unmodified:
+ *
+ * a = [:foo, 'bar', 2]
+ * a1 = a.rotate(0)
+ * a1 # => [:foo, "bar", 2]
+ *
+ * When given a negative \Integer +count+, rotates in the opposite direction,
+ * from end to beginning:
*
- * Returns a new array by rotating +self+ so that the element at +count+ is
- * the first element of the new array.
+ * a = [:foo, 'bar', 2]
+ * a1 = a.rotate(-2)
+ * a1 # => ["bar", 2, :foo]
+ *
+ * If +count+ is small (far from zero), uses <tt>count % array.size</tt> as the count:
*
- * If +count+ is negative then it rotates in the opposite direction, starting
- * from the end of +self+ where +-1+ is the last element.
+ * a = [:foo, 'bar', 2]
+ * a1 = a.rotate(-5)
+ * a1 # => ["bar", 2, :foo]
*
- * a = [ "a", "b", "c", "d" ]
- * a.rotate #=> ["b", "c", "d", "a"]
- * a #=> ["a", "b", "c", "d"]
- * a.rotate(2) #=> ["c", "d", "a", "b"]
- * a.rotate(-3) #=> ["b", "c", "d", "a"]
*/
static VALUE
@@ -2802,11 +3450,11 @@ rb_ary_rotate_m(int argc, VALUE *argv, VALUE ary)
len = RARRAY_LEN(ary);
rotated = rb_ary_new2(len);
if (len > 0) {
- cnt = rotate_count(cnt, len);
+ cnt = rotate_count(cnt, len);
ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
- len -= cnt;
- ary_memcpy(rotated, 0, len, ptr + cnt);
- ary_memcpy(rotated, len, cnt, ptr);
+ len -= cnt;
+ ary_memcpy(rotated, 0, len, ptr + cnt);
+ ary_memcpy(rotated, len, cnt, ptr);
}
ARY_SET_LEN(rotated, RARRAY_LEN(ary));
return rotated;
@@ -2814,18 +3462,27 @@ rb_ary_rotate_m(int argc, VALUE *argv, VALUE ary)
struct ary_sort_data {
VALUE ary;
- struct cmp_opt_data cmp_opt;
+ VALUE receiver;
};
static VALUE
sort_reentered(VALUE ary)
{
if (RBASIC(ary)->klass) {
- rb_raise(rb_eRuntimeError, "sort reentered");
+ rb_raise(rb_eRuntimeError, "sort reentered");
}
return Qnil;
}
+static void
+sort_returned(struct ary_sort_data *data)
+{
+ if (rb_obj_frozen_p(data->receiver)) {
+ rb_raise(rb_eFrozenError, "array frozen during sort");
+ }
+ sort_reentered(data->ary);
+}
+
static int
sort_1(const void *ap, const void *bp, void *dummy)
{
@@ -2839,7 +3496,7 @@ sort_1(const void *ap, const void *bp, void *dummy)
args[1] = b;
retval = rb_yield_values2(2, args);
n = rb_cmpint(retval, a, b);
- sort_reentered(data->ary);
+ sort_returned(data);
return n;
}
@@ -2851,47 +3508,64 @@ sort_2(const void *ap, const void *bp, void *dummy)
VALUE a = *(const VALUE *)ap, b = *(const VALUE *)bp;
int n;
- if (FIXNUM_P(a) && FIXNUM_P(b) && CMP_OPTIMIZABLE(data->cmp_opt, Integer)) {
- if ((long)a > (long)b) return 1;
- if ((long)a < (long)b) return -1;
- return 0;
+ if (FIXNUM_P(a) && FIXNUM_P(b) && CMP_OPTIMIZABLE(INTEGER)) {
+ if ((long)a > (long)b) return 1;
+ if ((long)a < (long)b) return -1;
+ return 0;
}
- if (STRING_P(a) && STRING_P(b) && CMP_OPTIMIZABLE(data->cmp_opt, String)) {
- return rb_str_cmp(a, b);
+ if (STRING_P(a) && STRING_P(b) && CMP_OPTIMIZABLE(STRING)) {
+ return rb_str_cmp(a, b);
}
- if (RB_FLOAT_TYPE_P(a) && CMP_OPTIMIZABLE(data->cmp_opt, Float)) {
- return rb_float_cmp(a, b);
+ if (RB_FLOAT_TYPE_P(a) && CMP_OPTIMIZABLE(FLOAT)) {
+ return rb_float_cmp(a, b);
}
retval = rb_funcallv(a, id_cmp, 1, &b);
n = rb_cmpint(retval, a, b);
- sort_reentered(data->ary);
+ sort_returned(data);
return n;
}
/*
* call-seq:
- * ary.sort! -> ary
- * ary.sort! {|a, b| block} -> ary
+ * array.sort! -> self
+ * array.sort! {|a, b| ... } -> self
+ *
+ * Returns +self+ with its elements sorted in place.
+ *
+ * With no block, compares elements using operator <tt><=></tt>
+ * (see Comparable):
*
- * Sorts +self+ in place.
+ * a = 'abcde'.split('').shuffle
+ * a # => ["e", "b", "d", "a", "c"]
+ * a.sort!
+ * a # => ["a", "b", "c", "d", "e"]
*
- * Comparisons for the sort will be done using the <code><=></code> operator
- * or using an optional code block.
+ * With a block, calls the block with each element pair;
+ * for each element pair +a+ and +b+, the block should return an integer:
*
- * The block must implement a comparison between +a+ and +b+ and return
- * an integer less than 0 when +b+ follows +a+, +0+ when +a+ and +b+
- * are equivalent, or an integer greater than 0 when +a+ follows +b+.
+ * - Negative when +b+ is to follow +a+.
+ * - Zero when +a+ and +b+ are equivalent.
+ * - Positive when +a+ is to follow +b+.
*
- * The result is not guaranteed to be stable. When the comparison of two
- * elements returns +0+, the order of the elements is unpredictable.
+ * Example:
*
- * ary = [ "d", "a", "e", "c", "b" ]
- * ary.sort! #=> ["a", "b", "c", "d", "e"]
- * ary.sort! {|a, b| b <=> a} #=> ["e", "d", "c", "b", "a"]
+ * a = 'abcde'.split('').shuffle
+ * a # => ["e", "b", "d", "a", "c"]
+ * a.sort! {|a, b| a <=> b }
+ * a # => ["a", "b", "c", "d", "e"]
+ * a.sort! {|a, b| b <=> a }
+ * a # => ["e", "d", "c", "b", "a"]
+ *
+ * When the block returns zero, the order for +a+ and +b+ is indeterminate,
+ * and may be unstable:
+ *
+ * a = 'abcde'.split('').shuffle
+ * a # => ["e", "b", "d", "a", "c"]
+ * a.sort! {|a, b| 0 }
+ * a # => ["d", "e", "c", "a", "b"]
*
- * See also Enumerable#sort_by.
*/
VALUE
@@ -2900,24 +3574,23 @@ rb_ary_sort_bang(VALUE ary)
rb_ary_modify(ary);
assert(!ARY_SHARED_P(ary));
if (RARRAY_LEN(ary) > 1) {
- VALUE tmp = ary_make_substitution(ary); /* only ary refers tmp */
- struct ary_sort_data data;
- long len = RARRAY_LEN(ary);
- RBASIC_CLEAR_CLASS(tmp);
- data.ary = tmp;
- data.cmp_opt.opt_methods = 0;
- data.cmp_opt.opt_inited = 0;
- RARRAY_PTR_USE(tmp, ptr, {
+ VALUE tmp = ary_make_substitution(ary); /* only ary refers tmp */
+ struct ary_sort_data data;
+ long len = RARRAY_LEN(ary);
+ RBASIC_CLEAR_CLASS(tmp);
+ data.ary = tmp;
+ data.receiver = ary;
+ RARRAY_PTR_USE(tmp, ptr, {
ruby_qsort(ptr, len, sizeof(VALUE),
rb_block_given_p()?sort_1:sort_2, &data);
- }); /* WB: no new reference */
- rb_ary_modify(ary);
+ }); /* WB: no new reference */
+ rb_ary_modify(ary);
if (ARY_EMBED_P(tmp)) {
if (ARY_SHARED_P(ary)) { /* ary might be destructively operated in the given block */
rb_ary_unshare(ary);
- FL_SET_EMBED(ary);
+ FL_SET_EMBED(ary);
}
- ary_memcpy(ary, 0, ARY_EMBED_LEN(tmp), ARY_EMBED_PTR(tmp));
+ ary_memcpy(ary, 0, ARY_EMBED_LEN(tmp), ARY_EMBED_PTR(tmp));
ARY_SET_LEN(ary, ARY_EMBED_LEN(tmp));
}
else {
@@ -2956,31 +3629,44 @@ rb_ary_sort_bang(VALUE ary)
/*
* call-seq:
- * ary.sort -> new_ary
- * ary.sort {|a, b| block} -> new_ary
+ * array.sort -> new_array
+ * array.sort {|a, b| ... } -> new_array
+ *
+ * Returns a new \Array whose elements are those from +self+, sorted.
*
- * Returns a new array created by sorting +self+.
+ * With no block, compares elements using operator <tt><=></tt>
+ * (see Comparable):
*
- * Comparisons for the sort will be done using the <code><=></code> operator
- * or using an optional code block.
+ * a = 'abcde'.split('').shuffle
+ * a # => ["e", "b", "d", "a", "c"]
+ * a1 = a.sort
+ * a1 # => ["a", "b", "c", "d", "e"]
*
- * The block must implement a comparison between +a+ and +b+ and return
- * an integer less than 0 when +b+ follows +a+, +0+ when +a+ and +b+
- * are equivalent, or an integer greater than 0 when +a+ follows +b+.
+ * With a block, calls the block with each element pair;
+ * for each element pair +a+ and +b+, the block should return an integer:
*
- * The result is not guaranteed to be stable. When the comparison of two
- * elements returns +0+, the order of the elements is unpredictable.
+ * - Negative when +b+ is to follow +a+.
+ * - Zero when +a+ and +b+ are equivalent.
+ * - Positive when +a+ is to follow +b+.
*
- * ary = [ "d", "a", "e", "c", "b" ]
- * ary.sort #=> ["a", "b", "c", "d", "e"]
- * ary.sort {|a, b| b <=> a} #=> ["e", "d", "c", "b", "a"]
+ * Example:
*
- * To produce the reverse order, the following can also be used
- * (and may be faster):
+ * a = 'abcde'.split('').shuffle
+ * a # => ["e", "b", "d", "a", "c"]
+ * a1 = a.sort {|a, b| a <=> b }
+ * a1 # => ["a", "b", "c", "d", "e"]
+ * a2 = a.sort {|a, b| b <=> a }
+ * a2 # => ["e", "d", "c", "b", "a"]
*
- * ary.sort.reverse! #=> ["e", "d", "c", "b", "a"]
+ * When the block returns zero, the order for +a+ and +b+ is indeterminate,
+ * and may be unstable:
*
- * See also Enumerable#sort_by.
+ * a = 'abcde'.split('').shuffle
+ * a # => ["e", "b", "d", "a", "c"]
+ * a1 = a.sort {|a, b| 0 }
+ * a1 # => ["c", "e", "b", "d", "a"]
+ *
+ * Related: Enumerable#sort_by.
*/
VALUE
@@ -2995,55 +3681,12 @@ static VALUE rb_ary_bsearch_index(VALUE ary);
/*
* call-seq:
- * ary.bsearch {|x| block } -> elem
- *
- * By using binary search, finds a value from this array which meets
- * the given condition in O(log n) where n is the size of the array.
- *
- * You can use this method in two modes: a find-minimum mode and
- * a find-any mode. In either case, the elements of the array must be
- * monotone (or sorted) with respect to the block.
- *
- * In find-minimum mode (this is a good choice for typical use cases),
- * the block must always return true or false, and there must be an index i
- * (0 <= i <= ary.size) so that:
- *
- * - the block returns false for any element whose index is less than
- * i, and
- * - the block returns true for any element whose index is greater
- * than or equal to i.
- *
- * This method returns the i-th element. If i is equal to ary.size,
- * it returns nil.
- *
- * ary = [0, 4, 7, 10, 12]
- * ary.bsearch {|x| x >= 4 } #=> 4
- * ary.bsearch {|x| x >= 6 } #=> 7
- * ary.bsearch {|x| x >= -1 } #=> 0
- * ary.bsearch {|x| x >= 100 } #=> nil
- *
- * In find-any mode (this behaves like libc's bsearch(3)), the block
- * must always return a number, and there must be two indices i and j
- * (0 <= i <= j <= ary.size) so that:
- *
- * - the block returns a positive number for ary[k] if 0 <= k < i,
- * - the block returns zero for ary[k] if i <= k < j, and
- * - the block returns a negative number for ary[k] if
- * j <= k < ary.size.
- *
- * Under this condition, this method returns any element whose index
- * is within i...j. If i is equal to j (i.e., there is no element
- * that satisfies the block), this method returns nil.
- *
- * ary = [0, 4, 7, 10, 12]
- * # try to find v such that 4 <= v < 8
- * ary.bsearch {|x| 1 - x / 4 } #=> 4 or 7
- * # try to find v such that 8 <= v < 10
- * ary.bsearch {|x| 4 - x / 2 } #=> nil
- *
- * You must not mix the two modes at a time; the block must always
- * return either true/false, or always return a number. It is
- * undefined which value is actually picked up at each iteration.
+ * array.bsearch {|element| ... } -> object
+ * array.bsearch -> new_enumerator
+ *
+ * Returns an element from +self+ selected by a binary search.
+ *
+ * See {Binary Searching}[rdoc-ref:bsearch.rdoc].
*/
static VALUE
@@ -3052,22 +3695,18 @@ rb_ary_bsearch(VALUE ary)
VALUE index_result = rb_ary_bsearch_index(ary);
if (FIXNUM_P(index_result)) {
- return rb_ary_entry(ary, FIX2LONG(index_result));
+ return rb_ary_entry(ary, FIX2LONG(index_result));
}
return index_result;
}
/*
* call-seq:
- * ary.bsearch_index {|x| block } -> int or nil
+ * array.bsearch_index {|element| ... } -> integer or nil
+ * array.bsearch_index -> new_enumerator
*
- * By using binary search, finds an index of a value from this array which
- * meets the given condition in O(log n) where n is the size of the array.
- *
- * It supports two modes, depending on the nature of the block. They are
- * exactly the same as in the case of the #bsearch method, with the only difference
- * being that this method returns the index of the element instead of the
- * element itself. For more details consult the documentation for #bsearch.
+ * Searches +self+ as described at method #bsearch,
+ * but returns the _index_ of the found element instead of the element itself.
*/
static VALUE
@@ -3079,39 +3718,39 @@ rb_ary_bsearch_index(VALUE ary)
RETURN_ENUMERATOR(ary, 0, 0);
while (low < high) {
- mid = low + ((high - low) / 2);
- val = rb_ary_entry(ary, mid);
- v = rb_yield(val);
- if (FIXNUM_P(v)) {
- if (v == INT2FIX(0)) return INT2FIX(mid);
- smaller = (SIGNED_VALUE)v < 0; /* Fixnum preserves its sign-bit */
- }
- else if (v == Qtrue) {
- satisfied = 1;
- smaller = 1;
- }
- else if (v == Qfalse || v == Qnil) {
- smaller = 0;
- }
- else if (rb_obj_is_kind_of(v, rb_cNumeric)) {
- const VALUE zero = INT2FIX(0);
- switch (rb_cmpint(rb_funcallv(v, id_cmp, 1, &zero), v, zero)) {
- case 0: return INT2FIX(mid);
- case 1: smaller = 1; break;
- case -1: smaller = 0;
- }
- }
- else {
- rb_raise(rb_eTypeError, "wrong argument type %"PRIsVALUE
- " (must be numeric, true, false or nil)",
- rb_obj_class(v));
- }
- if (smaller) {
- high = mid;
- }
- else {
- low = mid + 1;
- }
+ mid = low + ((high - low) / 2);
+ val = rb_ary_entry(ary, mid);
+ v = rb_yield(val);
+ if (FIXNUM_P(v)) {
+ if (v == INT2FIX(0)) return INT2FIX(mid);
+ smaller = (SIGNED_VALUE)v < 0; /* Fixnum preserves its sign-bit */
+ }
+ else if (v == Qtrue) {
+ satisfied = 1;
+ smaller = 1;
+ }
+ else if (!RTEST(v)) {
+ smaller = 0;
+ }
+ else if (rb_obj_is_kind_of(v, rb_cNumeric)) {
+ const VALUE zero = INT2FIX(0);
+ switch (rb_cmpint(rb_funcallv(v, id_cmp, 1, &zero), v, zero)) {
+ case 0: return INT2FIX(mid);
+ case 1: smaller = 0; break;
+ case -1: smaller = 1;
+ }
+ }
+ else {
+ rb_raise(rb_eTypeError, "wrong argument type %"PRIsVALUE
+ " (must be numeric, true, false or nil)",
+ rb_obj_class(v));
+ }
+ if (smaller) {
+ high = mid;
+ }
+ else {
+ low = mid + 1;
+ }
}
if (!satisfied) return Qnil;
return INT2FIX(low);
@@ -3126,18 +3765,28 @@ sort_by_i(RB_BLOCK_CALL_FUNC_ARGLIST(i, dummy))
/*
* call-seq:
- * ary.sort_by! {|obj| block} -> ary
- * ary.sort_by! -> Enumerator
+ * array.sort_by! {|element| ... } -> self
+ * array.sort_by! -> new_enumerator
+ *
+ * Sorts the elements of +self+ in place,
+ * using an ordering determined by the block; returns self.
+ *
+ * Calls the block with each successive element;
+ * sorts elements based on the values returned from the block.
+ *
+ * For duplicates returned by the block, the ordering is indeterminate, and may be unstable.
*
- * Sorts +self+ in place using a set of keys generated by mapping the
- * values in +self+ through the given block.
+ * This example sorts strings based on their sizes:
*
- * The result is not guaranteed to be stable. When two keys are equal,
- * the order of the corresponding elements is unpredictable.
+ * a = ['aaaa', 'bbb', 'cc', 'd']
+ * a.sort_by! {|element| element.size }
+ * a # => ["d", "cc", "bbb", "aaaa"]
*
- * If no block is given, an Enumerator is returned instead.
+ * Returns a new \Enumerator if no block given:
+ *
+ * a = ['aaaa', 'bbb', 'cc', 'd']
+ * a.sort_by! # => #<Enumerator: ["aaaa", "bbb", "cc", "d"]:sort_by!>
*
- * See also Enumerable#sort_by.
*/
static VALUE
@@ -3155,23 +3804,22 @@ rb_ary_sort_by_bang(VALUE ary)
/*
* call-seq:
- * ary.collect {|item| block} -> new_ary
- * ary.map {|item| block} -> new_ary
- * ary.collect -> Enumerator
- * ary.map -> Enumerator
- *
- * Invokes the given block once for each element of +self+.
+ * array.map {|element| ... } -> new_array
+ * array.map -> new_enumerator
*
- * Creates a new array containing the values returned by the block.
+ * Calls the block, if given, with each element of +self+;
+ * returns a new \Array whose elements are the return values from the block:
*
- * See also Enumerable#collect.
+ * a = [:foo, 'bar', 2]
+ * a1 = a.map {|element| element.class }
+ * a1 # => [Symbol, String, Integer]
*
- * If no block is given, an Enumerator is returned instead.
+ * Returns a new \Enumerator if no block given:
+ * a = [:foo, 'bar', 2]
+ * a1 = a.map
+ * a1 # => #<Enumerator: [:foo, "bar", 2]:map>
*
- * a = [ "a", "b", "c", "d" ]
- * a.collect {|x| x + "!"} #=> ["a!", "b!", "c!", "d!"]
- * a.map.with_index {|x, i| x * i} #=> ["", "b", "cc", "ddd"]
- * a #=> ["a", "b", "c", "d"]
+ * Array#collect is an alias for Array#map.
*/
static VALUE
@@ -3191,23 +3839,22 @@ rb_ary_collect(VALUE ary)
/*
* call-seq:
- * ary.collect! {|item| block } -> ary
- * ary.map! {|item| block } -> ary
- * ary.collect! -> Enumerator
- * ary.map! -> Enumerator
+ * array.map! {|element| ... } -> self
+ * array.map! -> new_enumerator
*
- * Invokes the given block once for each element of +self+, replacing the
- * element with the value returned by the block.
+ * Calls the block, if given, with each element;
+ * replaces the element with the block's return value:
*
- * See also Enumerable#collect.
+ * a = [:foo, 'bar', 2]
+ * a.map! { |element| element.class } # => [Symbol, String, Integer]
*
- * If no block is given, an Enumerator is returned instead.
+ * Returns a new \Enumerator if no block given:
*
- * a = [ "a", "b", "c", "d" ]
- * a.map! {|x| x + "!" }
- * a #=> [ "a!", "b!", "c!", "d!" ]
- * a.collect!.with_index {|x, i| x[0...i] }
- * a #=> ["", "b", "c!", "d!"]
+ * a = [:foo, 'bar', 2]
+ * a1 = a.map!
+ * a1 # => #<Enumerator: [:foo, "bar", 2]:map!>
+ *
+ * Array#collect! is an alias for Array#map!.
*/
static VALUE
@@ -3218,7 +3865,7 @@ rb_ary_collect_bang(VALUE ary)
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
rb_ary_modify(ary);
for (i = 0; i < RARRAY_LEN(ary); i++) {
- rb_ary_store(ary, i, rb_yield(RARRAY_AREF(ary, i)));
+ rb_ary_store(ary, i, rb_yield(RARRAY_AREF(ary, i)));
}
return ary;
}
@@ -3230,21 +3877,21 @@ rb_get_values_at(VALUE obj, long olen, int argc, const VALUE *argv, VALUE (*func
long beg, len, i, j;
for (i=0; i<argc; i++) {
- if (FIXNUM_P(argv[i])) {
- rb_ary_push(result, (*func)(obj, FIX2LONG(argv[i])));
- continue;
- }
- /* check if idx is Range */
- if (rb_range_beg_len(argv[i], &beg, &len, olen, 1)) {
- long end = olen < beg+len ? olen : beg+len;
- for (j = beg; j < end; j++) {
- rb_ary_push(result, (*func)(obj, j));
- }
- if (beg + len > j)
- rb_ary_resize(result, RARRAY_LEN(result) + (beg + len) - j);
- continue;
- }
- rb_ary_push(result, (*func)(obj, NUM2LONG(argv[i])));
+ if (FIXNUM_P(argv[i])) {
+ rb_ary_push(result, (*func)(obj, FIX2LONG(argv[i])));
+ continue;
+ }
+ /* check if idx is Range */
+ if (rb_range_beg_len(argv[i], &beg, &len, olen, 1)) {
+ long end = olen < beg+len ? olen : beg+len;
+ for (j = beg; j < end; j++) {
+ rb_ary_push(result, (*func)(obj, j));
+ }
+ if (beg + len > j)
+ rb_ary_resize(result, RARRAY_LEN(result) + (beg + len) - j);
+ continue;
+ }
+ rb_ary_push(result, (*func)(obj, NUM2LONG(argv[i])));
}
return result;
}
@@ -3254,45 +3901,70 @@ append_values_at_single(VALUE result, VALUE ary, long olen, VALUE idx)
{
long beg, len;
if (FIXNUM_P(idx)) {
- beg = FIX2LONG(idx);
+ beg = FIX2LONG(idx);
}
/* check if idx is Range */
else if (rb_range_beg_len(idx, &beg, &len, olen, 1)) {
- if (len > 0) {
+ if (len > 0) {
const VALUE *const src = RARRAY_CONST_PTR_TRANSIENT(ary);
- const long end = beg + len;
- const long prevlen = RARRAY_LEN(result);
- if (beg < olen) {
- rb_ary_cat(result, src + beg, end > olen ? olen-beg : len);
- }
- if (end > olen) {
- rb_ary_store(result, prevlen + len - 1, Qnil);
- }
- }
- return result;
+ const long end = beg + len;
+ const long prevlen = RARRAY_LEN(result);
+ if (beg < olen) {
+ rb_ary_cat(result, src + beg, end > olen ? olen-beg : len);
+ }
+ if (end > olen) {
+ rb_ary_store(result, prevlen + len - 1, Qnil);
+ }
+ }
+ return result;
}
else {
- beg = NUM2LONG(idx);
+ beg = NUM2LONG(idx);
}
return rb_ary_push(result, rb_ary_entry(ary, beg));
}
/*
* call-seq:
- * ary.values_at(selector, ...) -> new_ary
+ * array.values_at(*indexes) -> new_array
+ *
+ * Returns a new \Array whose elements are the elements
+ * of +self+ at the given \Integer or \Range +indexes+.
+ *
+ * For each positive +index+, returns the element at offset +index+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.values_at(0, 2) # => [:foo, 2]
+ * a.values_at(0..1) # => [:foo, "bar"]
+ *
+ * The given +indexes+ may be in any order, and may repeat:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.values_at(2, 0, 1, 0, 2) # => [2, :foo, "bar", :foo, 2]
+ * a.values_at(1, 0..2) # => ["bar", :foo, "bar", 2]
+ *
+ * Assigns +nil+ for an +index+ that is too large:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.values_at(0, 3, 1, 3) # => [:foo, nil, "bar", nil]
+ *
+ * Returns a new empty \Array if no arguments given.
+ *
+ * For each negative +index+, counts backward from the end of the array:
*
- * Returns an array containing the elements in +self+ corresponding to the
- * given +selector+(s).
+ * a = [:foo, 'bar', 2]
+ * a.values_at(-1, -3) # => [2, :foo]
+ *
+ * Assigns +nil+ for an +index+ that is too small:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.values_at(0, -5, 1, -6, 2) # => [:foo, nil, "bar", nil, 2]
*
- * The selectors may be either integer indices or ranges.
+ * The given +indexes+ may have a mixture of signs:
*
- * See also Array#select.
+ * a = [:foo, 'bar', 2]
+ * a.values_at(0, -2, 1, -1) # => [:foo, "bar", "bar", 2]
*
- * a = %w{ a b c d e f }
- * a.values_at(1, 3, 5) # => ["b", "d", "f"]
- * a.values_at(1, 3, 5, 7) # => ["b", "d", "f", nil]
- * a.values_at(-1, -2, -2, -7) # => ["f", "e", "e", nil]
- * a.values_at(4..6, 3...6) # => ["e", "f", nil, "d", "e", "f"]
*/
static VALUE
@@ -3301,7 +3973,7 @@ rb_ary_values_at(int argc, VALUE *argv, VALUE ary)
long i, olen = RARRAY_LEN(ary);
VALUE result = rb_ary_new_capa(argc);
for (i = 0; i < argc; ++i) {
- append_values_at_single(result, ary, olen, argv[i]);
+ append_values_at_single(result, ary, olen, argv[i]);
}
RB_GC_GUARD(ary);
return result;
@@ -3310,22 +3982,21 @@ rb_ary_values_at(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.select {|item| block} -> new_ary
- * ary.select -> Enumerator
- * ary.filter {|item| block} -> new_ary
- * ary.filter -> Enumerator
- *
- * Returns a new array containing all elements of +ary+
- * for which the given +block+ returns a true value.
+ * array.select {|element| ... } -> new_array
+ * array.select -> new_enumerator
*
- * If no block is given, an Enumerator is returned instead.
+ * Calls the block, if given, with each element of +self+;
+ * returns a new \Array containing those elements of +self+
+ * for which the block returns a truthy value:
*
- * [1,2,3,4,5].select {|num| num.even? } #=> [2, 4]
+ * a = [:foo, 'bar', 2, :bam]
+ * a1 = a.select {|element| element.to_s.start_with?('b') }
+ * a1 # => ["bar", :bam]
*
- * a = %w[ a b c d e f ]
- * a.select {|v| v =~ /[aeiou]/ } #=> ["a", "e"]
+ * Returns a new \Enumerator if no block given:
*
- * See also Enumerable#select.
+ * a = [:foo, 'bar', 2, :bam]
+ * a.select # => #<Enumerator: [:foo, "bar", 2, :bam]:select>
*
* Array#filter is an alias for Array#select.
*/
@@ -3339,9 +4010,9 @@ rb_ary_select(VALUE ary)
RETURN_SIZED_ENUMERATOR(ary, 0, 0, ary_enum_length);
result = rb_ary_new2(RARRAY_LEN(ary));
for (i = 0; i < RARRAY_LEN(ary); i++) {
- if (RTEST(rb_yield(RARRAY_AREF(ary, i)))) {
- rb_ary_push(result, rb_ary_elt(ary, i));
- }
+ if (RTEST(rb_yield(RARRAY_AREF(ary, i)))) {
+ rb_ary_push(result, rb_ary_elt(ary, i));
+ }
}
return result;
}
@@ -3359,12 +4030,12 @@ select_bang_i(VALUE a)
long i1, i2;
for (i1 = i2 = 0; i1 < RARRAY_LEN(ary); arg->len[0] = ++i1) {
- VALUE v = RARRAY_AREF(ary, i1);
- if (!RTEST(rb_yield(v))) continue;
- if (i1 != i2) {
- rb_ary_store(ary, i2, v);
- }
- arg->len[1] = ++i2;
+ VALUE v = RARRAY_AREF(ary, i1);
+ if (!RTEST(rb_yield(v))) continue;
+ if (i1 != i2) {
+ rb_ary_store(ary, i2, v);
+ }
+ arg->len[1] = ++i2;
}
return (i1 == i2) ? Qnil : ary;
}
@@ -3378,35 +4049,38 @@ select_bang_ensure(VALUE a)
long i1 = arg->len[0], i2 = arg->len[1];
if (i2 < len && i2 < i1) {
- long tail = 0;
- if (i1 < len) {
- tail = len - i1;
+ long tail = 0;
+ rb_ary_modify(ary);
+ if (i1 < len) {
+ tail = len - i1;
RARRAY_PTR_USE_TRANSIENT(ary, ptr, {
- MEMMOVE(ptr + i2, ptr + i1, VALUE, tail);
- });
- }
- ARY_SET_LEN(ary, i2 + tail);
+ MEMMOVE(ptr + i2, ptr + i1, VALUE, tail);
+ });
+ }
+ ARY_SET_LEN(ary, i2 + tail);
}
return ary;
}
/*
* call-seq:
- * ary.select! {|item| block } -> ary or nil
- * ary.select! -> Enumerator
- * ary.filter! {|item| block } -> ary or nil
- * ary.filter! -> Enumerator
+ * array.select! {|element| ... } -> self or nil
+ * array.select! -> new_enumerator
+ *
+ * Calls the block, if given with each element of +self+;
+ * removes from +self+ those elements for which the block returns +false+ or +nil+.
*
- * Invokes the given block passing in successive elements from +self+,
- * deleting elements for which the block returns a +false+ value.
+ * Returns +self+ if any elements were removed:
*
- * The array may not be changed instantly every time the block is called.
+ * a = [:foo, 'bar', 2, :bam]
+ * a.select! {|element| element.to_s.start_with?('b') } # => ["bar", :bam]
*
- * If changes were made, it will return +self+, otherwise it returns +nil+.
+ * Returns +nil+ if no elements were removed.
*
- * If no block is given, an Enumerator is returned instead.
+ * Returns a new \Enumerator if no block given:
*
- * See also Array#keep_if.
+ * a = [:foo, 'bar', 2, :bam]
+ * a.select! # => #<Enumerator: [:foo, "bar", 2, :bam]:select!>
*
* Array#filter! is an alias for Array#select!.
*/
@@ -3426,19 +4100,20 @@ rb_ary_select_bang(VALUE ary)
/*
* call-seq:
- * ary.keep_if {|item| block} -> ary
- * ary.keep_if -> Enumerator
+ * array.keep_if {|element| ... } -> self
+ * array.keep_if -> new_enumeration
*
- * Deletes every element of +self+ for which the given block evaluates to
- * +false+, and returns +self+.
+ * Retains those elements for which the block returns a truthy value;
+ * deletes all other elements; returns +self+:
*
- * If no block is given, an Enumerator is returned instead.
+ * a = [:foo, 'bar', 2, :bam]
+ * a.keep_if {|element| element.to_s.start_with?('b') } # => ["bar", :bam]
*
- * a = %w[ a b c d e f ]
- * a.keep_if {|v| v =~ /[aeiou]/ } #=> ["a", "e"]
- * a #=> ["a", "e"]
+ * Returns a new \Enumerator if no block given:
+ *
+ * a = [:foo, 'bar', 2, :bam]
+ * a.keep_if # => #<Enumerator: [:foo, "bar", 2, :bam]:keep_if>
*
- * See also Array#select!.
*/
static VALUE
@@ -3454,32 +4129,48 @@ ary_resize_smaller(VALUE ary, long len)
{
rb_ary_modify(ary);
if (RARRAY_LEN(ary) > len) {
- ARY_SET_LEN(ary, len);
- if (len * 2 < ARY_CAPA(ary) &&
- ARY_CAPA(ary) > ARY_DEFAULT_SIZE) {
- ary_resize_capa(ary, len * 2);
- }
+ ARY_SET_LEN(ary, len);
+ if (len * 2 < ARY_CAPA(ary) &&
+ ARY_CAPA(ary) > ARY_DEFAULT_SIZE) {
+ ary_resize_capa(ary, len * 2);
+ }
}
}
/*
* call-seq:
- * ary.delete(obj) -> item or nil
- * ary.delete(obj) {block} -> item or result of block
+ * array.delete(obj) -> deleted_object
+ * array.delete(obj) {|nosuch| ... } -> deleted_object or block_return
+ *
+ * Removes zero or more elements from +self+.
+ *
+ * When no block is given,
+ * removes from +self+ each element +ele+ such that <tt>ele == obj</tt>;
+ * returns the last deleted element:
+ *
+ * s1 = 'bar'; s2 = 'bar'
+ * a = [:foo, s1, 2, s2]
+ * a.delete('bar') # => "bar"
+ * a # => [:foo, 2]
+ *
+ * Returns +nil+ if no elements removed.
*
- * Deletes all items from +self+ that are equal to +obj+.
+ * When a block is given,
+ * removes from +self+ each element +ele+ such that <tt>ele == obj</tt>.
*
- * Returns the last deleted item, or +nil+ if no matching item is found.
+ * If any such elements are found, ignores the block
+ * and returns the last deleted element:
*
- * If the optional code block is given, the result of the block is returned if
- * the item is not found. (To remove +nil+ elements and get an informative
- * return value, use Array#compact!)
+ * s1 = 'bar'; s2 = 'bar'
+ * a = [:foo, s1, 2, s2]
+ * deleted_obj = a.delete('bar') {|obj| fail 'Cannot happen' }
+ * a # => [:foo, 2]
+ *
+ * If no such elements are found, returns the block's return value:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.delete(:nosuch) {|obj| "#{obj} not found" } # => "nosuch not found"
*
- * a = [ "a", "b", "b", "b", "c" ]
- * a.delete("b") #=> "b"
- * a #=> ["a", "c"]
- * a.delete("z") #=> nil
- * a.delete("z") {"not found"} #=> "not found"
*/
VALUE
@@ -3489,22 +4180,22 @@ rb_ary_delete(VALUE ary, VALUE item)
long i1, i2;
for (i1 = i2 = 0; i1 < RARRAY_LEN(ary); i1++) {
- VALUE e = RARRAY_AREF(ary, i1);
+ VALUE e = RARRAY_AREF(ary, i1);
- if (rb_equal(e, item)) {
- v = e;
- continue;
- }
- if (i1 != i2) {
- rb_ary_store(ary, i2, e);
- }
- i2++;
+ if (rb_equal(e, item)) {
+ v = e;
+ continue;
+ }
+ if (i1 != i2) {
+ rb_ary_store(ary, i2, e);
+ }
+ i2++;
}
if (RARRAY_LEN(ary) == i2) {
- if (rb_block_given_p()) {
- return rb_yield(item);
- }
- return Qnil;
+ if (rb_block_given_p()) {
+ return rb_yield(item);
+ }
+ return Qnil;
}
ary_resize_smaller(ary, i2);
@@ -3519,18 +4210,18 @@ rb_ary_delete_same(VALUE ary, VALUE item)
long i1, i2;
for (i1 = i2 = 0; i1 < RARRAY_LEN(ary); i1++) {
- VALUE e = RARRAY_AREF(ary, i1);
+ VALUE e = RARRAY_AREF(ary, i1);
- if (e == item) {
- continue;
- }
- if (i1 != i2) {
- rb_ary_store(ary, i2, e);
- }
- i2++;
+ if (e == item) {
+ continue;
+ }
+ if (i1 != i2) {
+ rb_ary_store(ary, i2, e);
+ }
+ i2++;
}
if (RARRAY_LEN(ary) == i2) {
- return;
+ return;
}
ary_resize_smaller(ary, i2);
@@ -3544,8 +4235,8 @@ rb_ary_delete_at(VALUE ary, long pos)
if (pos >= len) return Qnil;
if (pos < 0) {
- pos += len;
- if (pos < 0) return Qnil;
+ pos += len;
+ if (pos < 0) return Qnil;
}
rb_ary_modify(ary);
@@ -3560,17 +4251,25 @@ rb_ary_delete_at(VALUE ary, long pos)
/*
* call-seq:
- * ary.delete_at(index) -> obj or nil
+ * array.delete_at(index) -> deleted_object or nil
+ *
+ * Deletes an element from +self+, per the given \Integer +index+.
*
- * Deletes the element at the specified +index+, returning that element, or
- * +nil+ if the +index+ is out of range.
+ * When +index+ is non-negative, deletes the element at offset +index+:
*
- * See also Array#slice!
+ * a = [:foo, 'bar', 2]
+ * a.delete_at(1) # => "bar"
+ * a # => [:foo, 2]
+ *
+ * If index is too large, returns +nil+.
+ *
+ * When +index+ is negative, counts backward from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.delete_at(-2) # => "bar"
+ * a # => [:foo, 2]
*
- * a = ["ant", "bat", "cat", "dog"]
- * a.delete_at(2) #=> "cat"
- * a #=> ["ant", "bat", "dog"]
- * a.delete_at(99) #=> nil
+ * If +index+ is too small (far from zero), returns nil.
*/
static VALUE
@@ -3579,70 +4278,133 @@ rb_ary_delete_at_m(VALUE ary, VALUE pos)
return rb_ary_delete_at(ary, NUM2LONG(pos));
}
+static VALUE
+ary_slice_bang_by_rb_ary_splice(VALUE ary, long pos, long len)
+{
+ const long orig_len = RARRAY_LEN(ary);
+
+ if (len < 0) {
+ return Qnil;
+ }
+ else if (pos < -orig_len) {
+ return Qnil;
+ }
+ else if (pos < 0) {
+ pos += orig_len;
+ }
+ else if (orig_len < pos) {
+ return Qnil;
+ }
+ if (orig_len < pos + len) {
+ len = orig_len - pos;
+ }
+ if (len == 0) {
+ return rb_ary_new2(0);
+ }
+ else {
+ VALUE arg2 = rb_ary_new4(len, RARRAY_CONST_PTR_TRANSIENT(ary)+pos);
+ rb_ary_splice(ary, pos, len, 0, 0);
+ return arg2;
+ }
+}
+
/*
* call-seq:
- * ary.slice!(index) -> obj or nil
- * ary.slice!(start, length) -> new_ary or nil
- * ary.slice!(range) -> new_ary or nil
- *
- * Deletes the element(s) given by an +index+ (optionally up to +length+
- * elements) or by a +range+.
- *
- * Returns the deleted object (or objects), or +nil+ if the +index+ is out of
- * range.
- *
- * a = [ "a", "b", "c" ]
- * a.slice!(1) #=> "b"
- * a #=> ["a", "c"]
- * a.slice!(-1) #=> "c"
- * a #=> ["a"]
- * a.slice!(100) #=> nil
- * a #=> ["a"]
+ * array.slice!(n) -> object or nil
+ * array.slice!(start, length) -> new_array or nil
+ * array.slice!(range) -> new_array or nil
+ *
+ * Removes and returns elements from +self+.
+ *
+ * When the only argument is an \Integer +n+,
+ * removes and returns the _nth_ element in +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(1) # => "bar"
+ * a # => [:foo, 2]
+ *
+ * If +n+ is negative, counts backwards from the end of +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(-1) # => 2
+ * a # => [:foo, "bar"]
+ *
+ * If +n+ is out of range, returns +nil+.
+ *
+ * When the only arguments are Integers +start+ and +length+,
+ * removes +length+ elements from +self+ beginning at offset +start+;
+ * returns the deleted objects in a new \Array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(0, 2) # => [:foo, "bar"]
+ * a # => [2]
+ *
+ * If <tt>start + length</tt> exceeds the array size,
+ * removes and returns all elements from offset +start+ to the end:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(1, 50) # => ["bar", 2]
+ * a # => [:foo]
+ *
+ * If <tt>start == a.size</tt> and +length+ is non-negative,
+ * returns a new empty \Array.
+ *
+ * If +length+ is negative, returns +nil+.
+ *
+ * When the only argument is a \Range object +range+,
+ * treats <tt>range.min</tt> as +start+ above and <tt>range.size</tt> as +length+ above:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(1..2) # => ["bar", 2]
+ * a # => [:foo]
+ *
+ * If <tt>range.start == a.size</tt>, returns a new empty \Array.
+ *
+ * If <tt>range.start</tt> is larger than the array size, returns +nil+.
+ *
+ * If <tt>range.end</tt> is negative, counts backwards from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(0..-2) # => [:foo, "bar"]
+ * a # => [2]
+ *
+ * If <tt>range.start</tt> is negative,
+ * calculates the start index backwards from the end of the array:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.slice!(-2..2) # => ["bar", 2]
+ * a # => [:foo]
+ *
*/
static VALUE
rb_ary_slice_bang(int argc, VALUE *argv, VALUE ary)
{
- VALUE arg1, arg2;
- long pos, len, orig_len;
+ VALUE arg1;
+ long pos, len;
rb_ary_modify_check(ary);
- if (argc == 2) {
- pos = NUM2LONG(argv[0]);
- len = NUM2LONG(argv[1]);
- delete_pos_len:
- if (len < 0) return Qnil;
- orig_len = RARRAY_LEN(ary);
- if (pos < 0) {
- pos += orig_len;
- if (pos < 0) return Qnil;
- }
- else if (orig_len < pos) return Qnil;
- if (orig_len < pos + len) {
- len = orig_len - pos;
- }
- if (len == 0) return rb_ary_new2(0);
- arg2 = rb_ary_new4(len, RARRAY_CONST_PTR_TRANSIENT(ary)+pos);
- RBASIC_SET_CLASS(arg2, rb_obj_class(ary));
- rb_ary_splice(ary, pos, len, 0, 0);
- return arg2;
- }
-
rb_check_arity(argc, 1, 2);
arg1 = argv[0];
+ if (argc == 2) {
+ pos = NUM2LONG(argv[0]);
+ len = NUM2LONG(argv[1]);
+ return ary_slice_bang_by_rb_ary_splice(ary, pos, len);
+ }
+
if (!FIXNUM_P(arg1)) {
- switch (rb_range_beg_len(arg1, &pos, &len, RARRAY_LEN(ary), 0)) {
- case Qtrue:
- /* valid range */
- goto delete_pos_len;
- case Qnil:
- /* invalid range */
- return Qnil;
- default:
- /* not a range */
- break;
- }
+ switch (rb_range_beg_len(arg1, &pos, &len, RARRAY_LEN(ary), 0)) {
+ case Qtrue:
+ /* valid range */
+ return ary_slice_bang_by_rb_ary_splice(ary, pos, len);
+ case Qnil:
+ /* invalid range */
+ return Qnil;
+ default:
+ /* not a range */
+ break;
+ }
}
return rb_ary_delete_at(ary, NUM2LONG(arg1));
@@ -3654,11 +4416,11 @@ ary_reject(VALUE orig, VALUE result)
long i;
for (i = 0; i < RARRAY_LEN(orig); i++) {
- VALUE v = RARRAY_AREF(orig, i);
+ VALUE v = RARRAY_AREF(orig, i);
if (!RTEST(rb_yield(v))) {
- rb_ary_push(result, v);
- }
+ rb_ary_push(result, v);
+ }
}
return result;
}
@@ -3671,12 +4433,12 @@ reject_bang_i(VALUE a)
long i1, i2;
for (i1 = i2 = 0; i1 < RARRAY_LEN(ary); arg->len[0] = ++i1) {
- VALUE v = RARRAY_AREF(ary, i1);
- if (RTEST(rb_yield(v))) continue;
- if (i1 != i2) {
- rb_ary_store(ary, i2, v);
- }
- arg->len[1] = ++i2;
+ VALUE v = RARRAY_AREF(ary, i1);
+ if (RTEST(rb_yield(v))) continue;
+ if (i1 != i2) {
+ rb_ary_store(ary, i2, v);
+ }
+ arg->len[1] = ++i2;
}
return (i1 == i2) ? Qnil : ary;
}
@@ -3693,17 +4455,23 @@ ary_reject_bang(VALUE ary)
/*
* call-seq:
- * ary.reject! {|item| block} -> ary or nil
- * ary.reject! -> Enumerator
+ * array.reject! {|element| ... } -> self or nil
+ * array.reject! -> new_enumerator
+ *
+ * Removes each element for which the block returns a truthy value.
+ *
+ * Returns +self+ if any elements removed:
+ *
+ * a = [:foo, 'bar', 2, 'bat']
+ * a.reject! {|element| element.to_s.start_with?('b') } # => [:foo, 2]
*
- * Deletes every element of +self+ for which the block evaluates to +true+,
- * if no changes were made returns +nil+.
+ * Returns +nil+ if no elements removed.
*
- * The array may not be changed instantly every time the block is called.
+ * Returns a new \Enumerator if no block given:
*
- * See also Enumerable#reject and Array#delete_if.
+ * a = [:foo, 'bar', 2]
+ * a.reject! # => #<Enumerator: [:foo, "bar", 2]:reject!>
*
- * If no block is given, an Enumerator is returned instead.
*/
static VALUE
@@ -3716,15 +4484,21 @@ rb_ary_reject_bang(VALUE ary)
/*
* call-seq:
- * ary.reject {|item| block } -> new_ary
- * ary.reject -> Enumerator
+ * array.reject {|element| ... } -> new_array
+ * array.reject -> new_enumerator
+ *
+ * Returns a new \Array whose elements are all those from +self+
+ * for which the block returns +false+ or +nil+:
+ *
+ * a = [:foo, 'bar', 2, 'bat']
+ * a1 = a.reject {|element| element.to_s.start_with?('b') }
+ * a1 # => [:foo, 2]
*
- * Returns a new array containing the items in +self+ for which the given
- * block is not +true+. The ordering of non-rejected elements is maintained.
+ * Returns a new \Enumerator if no block given:
*
- * See also Array#delete_if
+ * a = [:foo, 'bar', 2]
+ * a.reject # => #<Enumerator: [:foo, "bar", 2]:reject>
*
- * If no block is given, an Enumerator is returned instead.
*/
static VALUE
@@ -3740,21 +4514,21 @@ rb_ary_reject(VALUE ary)
/*
* call-seq:
- * ary.delete_if {|item| block} -> ary
- * ary.delete_if -> Enumerator
+ * array.delete_if {|element| ... } -> self
+ * array.delete_if -> Enumerator
*
- * Deletes every element of +self+ for which block evaluates to +true+.
+ * Removes each element in +self+ for which the block returns a truthy value;
+ * returns +self+:
*
- * The array is changed instantly every time the block is called, not after
- * the iteration is over.
+ * a = [:foo, 'bar', 2, 'bat']
+ * a.delete_if {|element| element.to_s.start_with?('b') } # => [:foo, 2]
*
- * See also Array#reject!
+ * Returns a new \Enumerator if no block given:
*
- * If no block is given, an Enumerator is returned instead.
+ * a = [:foo, 'bar', 2]
+ * a.delete_if # => #<Enumerator: [:foo, "bar", 2]:delete_if>
*
- * scores = [ 97, 42, 75 ]
- * scores.delete_if {|score| score < 80 } #=> [97]
- */
+3 */
static VALUE
rb_ary_delete_if(VALUE ary)
@@ -3769,10 +4543,9 @@ static VALUE
take_i(RB_BLOCK_CALL_FUNC_ARGLIST(val, cbarg))
{
VALUE *args = (VALUE *)cbarg;
- if (args[1] == 0) rb_iter_break();
- else args[1]--;
if (argc > 1) val = rb_ary_new4(argc, argv);
rb_ary_push(args[0], val);
+ if (--args[1] == 0) rb_iter_break();
return Qnil;
}
@@ -3782,38 +4555,71 @@ take_items(VALUE obj, long n)
VALUE result = rb_check_array_type(obj);
VALUE args[2];
+ if (n == 0) return result;
if (!NIL_P(result)) return rb_ary_subseq(result, 0, n);
result = rb_ary_new2(n);
args[0] = result; args[1] = (VALUE)n;
- if (rb_check_block_call(obj, idEach, 0, 0, take_i, (VALUE)args) == Qundef)
- rb_raise(rb_eTypeError, "wrong argument type %"PRIsVALUE" (must respond to :each)",
- rb_obj_class(obj));
+ if (UNDEF_P(rb_check_block_call(obj, idEach, 0, 0, take_i, (VALUE)args)))
+ rb_raise(rb_eTypeError, "wrong argument type %"PRIsVALUE" (must respond to :each)",
+ rb_obj_class(obj));
return result;
}
/*
* call-seq:
- * ary.zip(arg, ...) -> new_ary
- * ary.zip(arg, ...) {|arr| block} -> nil
+ * array.zip(*other_arrays) -> new_array
+ * array.zip(*other_arrays) {|other_array| ... } -> nil
+ *
+ * When no block given, returns a new \Array +new_array+ of size <tt>self.size</tt>
+ * whose elements are Arrays.
+ *
+ * Each nested array <tt>new_array[n]</tt> is of size <tt>other_arrays.size+1</tt>,
+ * and contains:
+ *
+ * - The _nth_ element of +self+.
+ * - The _nth_ element of each of the +other_arrays+.
+ *
+ * If all +other_arrays+ and +self+ are the same size:
+ *
+ * a = [:a0, :a1, :a2, :a3]
+ * b = [:b0, :b1, :b2, :b3]
+ * c = [:c0, :c1, :c2, :c3]
+ * d = a.zip(b, c)
+ * d # => [[:a0, :b0, :c0], [:a1, :b1, :c1], [:a2, :b2, :c2], [:a3, :b3, :c3]]
+ *
+ * If any array in +other_arrays+ is smaller than +self+,
+ * fills to <tt>self.size</tt> with +nil+:
*
- * Converts any arguments to arrays, then merges elements of +self+ with
- * corresponding elements from each argument.
+ * a = [:a0, :a1, :a2, :a3]
+ * b = [:b0, :b1, :b2]
+ * c = [:c0, :c1]
+ * d = a.zip(b, c)
+ * d # => [[:a0, :b0, :c0], [:a1, :b1, :c1], [:a2, :b2, nil], [:a3, nil, nil]]
*
- * This generates a sequence of <code>ary.size</code> _n_-element arrays,
- * where _n_ is one more than the count of arguments.
+ * If any array in +other_arrays+ is larger than +self+,
+ * its trailing elements are ignored:
*
- * If the size of any argument is less than the size of the initial array,
- * +nil+ values are supplied.
+ * a = [:a0, :a1, :a2, :a3]
+ * b = [:b0, :b1, :b2, :b3, :b4]
+ * c = [:c0, :c1, :c2, :c3, :c4, :c5]
+ * d = a.zip(b, c)
+ * d # => [[:a0, :b0, :c0], [:a1, :b1, :c1], [:a2, :b2, :c2], [:a3, :b3, :c3]]
*
- * If a block is given, it is invoked for each output +array+, otherwise an
- * array of arrays is returned.
+ * When a block is given, calls the block with each of the sub-arrays (formed as above); returns +nil+:
+ *
+ * a = [:a0, :a1, :a2, :a3]
+ * b = [:b0, :b1, :b2, :b3]
+ * c = [:c0, :c1, :c2, :c3]
+ * a.zip(b, c) {|sub_array| p sub_array} # => nil
+ *
+ * Output:
+ *
+ * [:a0, :b0, :c0]
+ * [:a1, :b1, :c1]
+ * [:a2, :b2, :c2]
+ * [:a3, :b3, :c3]
*
- * a = [ 4, 5, 6 ]
- * b = [ 7, 8, 9 ]
- * [1, 2, 3].zip(a, b) #=> [[1, 4, 7], [2, 5, 8], [3, 6, 9]]
- * [1, 2].zip(a, b) #=> [[1, 4, 7], [2, 5, 8]]
- * a.zip([1, 2], [8]) #=> [[4, 1, 8], [5, 2, nil], [6, nil, nil]]
*/
static VALUE
@@ -3824,51 +4630,51 @@ rb_ary_zip(int argc, VALUE *argv, VALUE ary)
VALUE result = Qnil;
for (i=0; i<argc; i++) {
- argv[i] = take_items(argv[i], len);
+ argv[i] = take_items(argv[i], len);
}
if (rb_block_given_p()) {
- int arity = rb_block_arity();
-
- if (arity > 1) {
- VALUE work, *tmp;
-
- tmp = ALLOCV_N(VALUE, work, argc+1);
-
- for (i=0; i<RARRAY_LEN(ary); i++) {
- tmp[0] = RARRAY_AREF(ary, i);
- for (j=0; j<argc; j++) {
- tmp[j+1] = rb_ary_elt(argv[j], i);
- }
- rb_yield_values2(argc+1, tmp);
- }
-
- if (work) ALLOCV_END(work);
- }
- else {
- for (i=0; i<RARRAY_LEN(ary); i++) {
- VALUE tmp = rb_ary_new2(argc+1);
-
- rb_ary_push(tmp, RARRAY_AREF(ary, i));
- for (j=0; j<argc; j++) {
- rb_ary_push(tmp, rb_ary_elt(argv[j], i));
- }
- rb_yield(tmp);
- }
- }
+ int arity = rb_block_arity();
+
+ if (arity > 1) {
+ VALUE work, *tmp;
+
+ tmp = ALLOCV_N(VALUE, work, argc+1);
+
+ for (i=0; i<RARRAY_LEN(ary); i++) {
+ tmp[0] = RARRAY_AREF(ary, i);
+ for (j=0; j<argc; j++) {
+ tmp[j+1] = rb_ary_elt(argv[j], i);
+ }
+ rb_yield_values2(argc+1, tmp);
+ }
+
+ if (work) ALLOCV_END(work);
+ }
+ else {
+ for (i=0; i<RARRAY_LEN(ary); i++) {
+ VALUE tmp = rb_ary_new2(argc+1);
+
+ rb_ary_push(tmp, RARRAY_AREF(ary, i));
+ for (j=0; j<argc; j++) {
+ rb_ary_push(tmp, rb_ary_elt(argv[j], i));
+ }
+ rb_yield(tmp);
+ }
+ }
}
else {
- result = rb_ary_new_capa(len);
+ result = rb_ary_new_capa(len);
- for (i=0; i<len; i++) {
- VALUE tmp = rb_ary_new_capa(argc+1);
+ for (i=0; i<len; i++) {
+ VALUE tmp = rb_ary_new_capa(argc+1);
- rb_ary_push(tmp, RARRAY_AREF(ary, i));
- for (j=0; j<argc; j++) {
- rb_ary_push(tmp, rb_ary_elt(argv[j], i));
- }
- rb_ary_push(result, tmp);
- }
+ rb_ary_push(tmp, RARRAY_AREF(ary, i));
+ for (j=0; j<argc; j++) {
+ rb_ary_push(tmp, rb_ary_elt(argv[j], i));
+ }
+ rb_ary_push(result, tmp);
+ }
}
return result;
@@ -3876,15 +4682,14 @@ rb_ary_zip(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.transpose -> new_ary
+ * array.transpose -> new_array
*
- * Assumes that +self+ is an array of arrays and transposes the rows and
- * columns.
+ * Transposes the rows and columns in an \Array of Arrays;
+ * the nested Arrays must all be the same size:
*
- * a = [[1,2], [3,4], [5,6]]
- * a.transpose #=> [[1, 3, 5], [2, 4, 6]]
+ * a = [[:a0, :a1], [:b0, :b1], [:c0, :c1]]
+ * a.transpose # => [[:a0, :b0, :c0], [:a1, :b1, :c1]]
*
- * If the length of the subarrays don't match, an IndexError is raised.
*/
static VALUE
@@ -3896,36 +4701,34 @@ rb_ary_transpose(VALUE ary)
alen = RARRAY_LEN(ary);
if (alen == 0) return rb_ary_dup(ary);
for (i=0; i<alen; i++) {
- tmp = to_ary(rb_ary_elt(ary, i));
- if (elen < 0) { /* first element */
- elen = RARRAY_LEN(tmp);
- result = rb_ary_new2(elen);
- for (j=0; j<elen; j++) {
- rb_ary_store(result, j, rb_ary_new2(alen));
- }
- }
- else if (elen != RARRAY_LEN(tmp)) {
- rb_raise(rb_eIndexError, "element size differs (%ld should be %ld)",
- RARRAY_LEN(tmp), elen);
- }
- for (j=0; j<elen; j++) {
- rb_ary_store(rb_ary_elt(result, j), i, rb_ary_elt(tmp, j));
- }
+ tmp = to_ary(rb_ary_elt(ary, i));
+ if (elen < 0) { /* first element */
+ elen = RARRAY_LEN(tmp);
+ result = rb_ary_new2(elen);
+ for (j=0; j<elen; j++) {
+ rb_ary_store(result, j, rb_ary_new2(alen));
+ }
+ }
+ else if (elen != RARRAY_LEN(tmp)) {
+ rb_raise(rb_eIndexError, "element size differs (%ld should be %ld)",
+ RARRAY_LEN(tmp), elen);
+ }
+ for (j=0; j<elen; j++) {
+ rb_ary_store(rb_ary_elt(result, j), i, rb_ary_elt(tmp, j));
+ }
}
return result;
}
/*
* call-seq:
- * ary.replace(other_ary) -> ary
- * ary.initialize_copy(other_ary) -> ary
+ * array.replace(other_array) -> self
+ *
+ * Replaces the content of +self+ with the content of +other_array+; returns +self+:
*
- * Replaces the contents of +self+ with the contents of +other_ary+,
- * truncating or expanding if necessary.
+ * a = [:foo, 'bar', 2]
+ * a.replace(['foo', :bar, 3]) # => ["foo", :bar, 3]
*
- * a = [ "a", "b", "c", "d", "e" ]
- * a.replace([ "x", "y", "z" ]) #=> ["x", "y", "z"]
- * a #=> ["x", "y", "z"]
*/
VALUE
@@ -3935,31 +4738,35 @@ rb_ary_replace(VALUE copy, VALUE orig)
orig = to_ary(orig);
if (copy == orig) return copy;
- if (RARRAY_LEN(orig) <= RARRAY_EMBED_LEN_MAX) {
- VALUE shared_root = 0;
+ rb_ary_reset(copy);
- if (ARY_OWNS_HEAP_P(copy)) {
- ary_heap_free(copy);
- }
- else if (ARY_SHARED_P(copy)) {
- shared_root = ARY_SHARED_ROOT(copy);
- FL_UNSET_SHARED(copy);
- }
- FL_SET_EMBED(copy);
+ /* orig has enough space to embed the contents of orig. */
+ if (RARRAY_LEN(orig) <= ary_embed_capa(copy)) {
+ assert(ARY_EMBED_P(copy));
ary_memcpy(copy, 0, RARRAY_LEN(orig), RARRAY_CONST_PTR_TRANSIENT(orig));
- if (shared_root) {
- rb_ary_decrement_share(shared_root);
- }
- ARY_SET_LEN(copy, RARRAY_LEN(orig));
+ ARY_SET_EMBED_LEN(copy, RARRAY_LEN(orig));
}
+#if USE_RVARGC
+ /* orig is embedded but copy does not have enough space to embed the
+ * contents of orig. */
+ else if (ARY_EMBED_P(orig)) {
+ long len = ARY_EMBED_LEN(orig);
+ VALUE *ptr = ary_heap_alloc(copy, len);
+
+ FL_UNSET_EMBED(copy);
+ ARY_SET_PTR(copy, ptr);
+ ARY_SET_LEN(copy, len);
+ ARY_SET_CAPA(copy, len);
+
+ // No allocation and exception expected that could leave `copy` in a
+ // bad state from the edits above.
+ ary_memcpy(copy, 0, len, RARRAY_CONST_PTR_TRANSIENT(orig));
+ }
+#endif
+ /* Otherwise, orig is on heap and copy does not have enough space to embed
+ * the contents of orig. */
else {
VALUE shared_root = ary_make_shared(orig);
- if (ARY_OWNS_HEAP_P(copy)) {
- ary_heap_free(copy);
- }
- else {
- rb_ary_unshare_safe(copy);
- }
FL_UNSET_EMBED(copy);
ARY_SET_PTR(copy, ARY_HEAP_PTR(orig));
ARY_SET_LEN(copy, ARY_HEAP_LEN(orig));
@@ -3971,12 +4778,13 @@ rb_ary_replace(VALUE copy, VALUE orig)
/*
* call-seq:
- * ary.clear -> ary
+ * array.clear -> self
*
- * Removes all elements from +self+.
+ * Removes all elements from +self+:
+ *
+ * a = [:foo, 'bar', 2]
+ * a.clear # => []
*
- * a = [ "a", "b", "c", "d", "e" ]
- * a.clear #=> [ ]
*/
VALUE
@@ -3984,11 +4792,11 @@ rb_ary_clear(VALUE ary)
{
rb_ary_modify_check(ary);
if (ARY_SHARED_P(ary)) {
- if (!ARY_EMBED_P(ary)) {
- rb_ary_unshare(ary);
- FL_SET_EMBED(ary);
+ if (!ARY_EMBED_P(ary)) {
+ rb_ary_unshare(ary);
+ FL_SET_EMBED(ary);
ARY_SET_EMBED_LEN(ary, 0);
- }
+ }
}
else {
ARY_SET_LEN(ary, 0);
@@ -4002,32 +4810,198 @@ rb_ary_clear(VALUE ary)
/*
* call-seq:
- * ary.fill(obj) -> ary
- * ary.fill(obj, start [, length]) -> ary
- * ary.fill(obj, range) -> ary
- * ary.fill {|index| block} -> ary
- * ary.fill(start [, length]) {|index| block} -> ary
- * ary.fill(range) {|index| block} -> ary
+ * array.fill(obj) -> self
+ * array.fill(obj, start) -> self
+ * array.fill(obj, start, length) -> self
+ * array.fill(obj, range) -> self
+ * array.fill {|index| ... } -> self
+ * array.fill(start) {|index| ... } -> self
+ * array.fill(start, length) {|index| ... } -> self
+ * array.fill(range) {|index| ... } -> self
+ *
+ * Replaces specified elements in +self+ with specified objects; returns +self+.
+ *
+ * With argument +obj+ and no block given, replaces all elements with that one object:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a # => ["a", "b", "c", "d"]
+ * a.fill(:X) # => [:X, :X, :X, :X]
+ *
+ * With arguments +obj+ and \Integer +start+, and no block given,
+ * replaces elements based on the given start.
+ *
+ * If +start+ is in range (<tt>0 <= start < array.size</tt>),
+ * replaces all elements from offset +start+ through the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 2) # => ["a", "b", :X, :X]
+ *
+ * If +start+ is too large (<tt>start >= array.size</tt>), does nothing:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 4) # => ["a", "b", "c", "d"]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 5) # => ["a", "b", "c", "d"]
+ *
+ * If +start+ is negative, counts from the end (starting index is <tt>start + array.size</tt>):
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, -2) # => ["a", "b", :X, :X]
+ *
+ * If +start+ is too small (less than and far from zero), replaces all elements:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, -6) # => [:X, :X, :X, :X]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, -50) # => [:X, :X, :X, :X]
+ *
+ * With arguments +obj+, \Integer +start+, and \Integer +length+, and no block given,
+ * replaces elements based on the given +start+ and +length+.
+ *
+ * If +start+ is in range, replaces +length+ elements beginning at offset +start+:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 1, 1) # => ["a", :X, "c", "d"]
+ *
+ * If +start+ is negative, counts from the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, -2, 1) # => ["a", "b", :X, "d"]
+ *
+ * If +start+ is large (<tt>start >= array.size</tt>), extends +self+ with +nil+:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 5, 0) # => ["a", "b", "c", "d", nil]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 5, 2) # => ["a", "b", "c", "d", nil, :X, :X]
+ *
+ * If +length+ is zero or negative, replaces no elements:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, 1, 0) # => ["a", "b", "c", "d"]
+ * a.fill(:X, 1, -1) # => ["a", "b", "c", "d"]
+ *
+ * With arguments +obj+ and \Range +range+, and no block given,
+ * replaces elements based on the given range.
+ *
+ * If the range is positive and ascending (<tt>0 < range.begin <= range.end</tt>),
+ * replaces elements from <tt>range.begin</tt> to <tt>range.end</tt>:
*
- * The first three forms set the selected elements of +self+ (which
- * may be the entire array) to +obj+.
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, (1..1)) # => ["a", :X, "c", "d"]
*
- * A +start+ of +nil+ is equivalent to zero.
+ * If <tt>range.first</tt> is negative, replaces no elements:
*
- * A +length+ of +nil+ is equivalent to the length of the array.
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, (-1..1)) # => ["a", "b", "c", "d"]
*
- * The last three forms fill the array with the value of the given block,
- * which is passed the absolute index of each element to be filled.
+ * If <tt>range.last</tt> is negative, counts from the end:
*
- * Negative values of +start+ count from the end of the array, where +-1+ is
- * the last element.
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, (0..-2)) # => [:X, :X, :X, "d"]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, (1..-2)) # => ["a", :X, :X, "d"]
+ *
+ * If <tt>range.last</tt> and <tt>range.last</tt> are both negative,
+ * both count from the end of the array:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, (-1..-1)) # => ["a", "b", "c", :X]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(:X, (-2..-2)) # => ["a", "b", :X, "d"]
+ *
+ * With no arguments and a block given, calls the block with each index;
+ * replaces the corresponding element with the block's return value:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill { |index| "new_#{index}" } # => ["new_0", "new_1", "new_2", "new_3"]
+ *
+ * With argument +start+ and a block given, calls the block with each index
+ * from offset +start+ to the end; replaces the corresponding element
+ * with the block's return value.
+ *
+ * If start is in range (<tt>0 <= start < array.size</tt>),
+ * replaces from offset +start+ to the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(1) { |index| "new_#{index}" } # => ["a", "new_1", "new_2", "new_3"]
+ *
+ * If +start+ is too large(<tt>start >= array.size</tt>), does nothing:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(4) { |index| fail 'Cannot happen' } # => ["a", "b", "c", "d"]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(4) { |index| fail 'Cannot happen' } # => ["a", "b", "c", "d"]
+ *
+ * If +start+ is negative, counts from the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-2) { |index| "new_#{index}" } # => ["a", "b", "new_2", "new_3"]
+ *
+ * If start is too small (<tt>start <= -array.size</tt>, replaces all elements:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-6) { |index| "new_#{index}" } # => ["new_0", "new_1", "new_2", "new_3"]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-50) { |index| "new_#{index}" } # => ["new_0", "new_1", "new_2", "new_3"]
+ *
+ * With arguments +start+ and +length+, and a block given,
+ * calls the block for each index specified by start length;
+ * replaces the corresponding element with the block's return value.
+ *
+ * If +start+ is in range, replaces +length+ elements beginning at offset +start+:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(1, 1) { |index| "new_#{index}" } # => ["a", "new_1", "c", "d"]
+ *
+ * If start is negative, counts from the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-2, 1) { |index| "new_#{index}" } # => ["a", "b", "new_2", "d"]
+ *
+ * If +start+ is large (<tt>start >= array.size</tt>), extends +self+ with +nil+:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(5, 0) { |index| "new_#{index}" } # => ["a", "b", "c", "d", nil]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(5, 2) { |index| "new_#{index}" } # => ["a", "b", "c", "d", nil, "new_5", "new_6"]
+ *
+ * If +length+ is zero or less, replaces no elements:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(1, 0) { |index| "new_#{index}" } # => ["a", "b", "c", "d"]
+ * a.fill(1, -1) { |index| "new_#{index}" } # => ["a", "b", "c", "d"]
+ *
+ * With arguments +obj+ and +range+, and a block given,
+ * calls the block with each index in the given range;
+ * replaces the corresponding element with the block's return value.
+ *
+ * If the range is positive and ascending (<tt>range 0 < range.begin <= range.end</tt>,
+ * replaces elements from <tt>range.begin</tt> to <tt>range.end</tt>:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(1..1) { |index| "new_#{index}" } # => ["a", "new_1", "c", "d"]
+ *
+ * If +range.first+ is negative, does nothing:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-1..1) { |index| fail 'Cannot happen' } # => ["a", "b", "c", "d"]
+ *
+ * If <tt>range.last</tt> is negative, counts from the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(0..-2) { |index| "new_#{index}" } # => ["new_0", "new_1", "new_2", "d"]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(1..-2) { |index| "new_#{index}" } # => ["a", "new_1", "new_2", "d"]
+ *
+ * If <tt>range.first</tt> and <tt>range.last</tt> are both negative,
+ * both count from the end:
+ *
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-1..-1) { |index| "new_#{index}" } # => ["a", "b", "c", "new_3"]
+ * a = ['a', 'b', 'c', 'd']
+ * a.fill(-2..-2) { |index| "new_#{index}" } # => ["a", "b", "new_2", "d"]
*
- * a = [ "a", "b", "c", "d" ]
- * a.fill("x") #=> ["x", "x", "x", "x"]
- * a.fill("z", 2, 2) #=> ["x", "x", "z", "z"]
- * a.fill("y", 0..1) #=> ["y", "y", "z", "z"]
- * a.fill {|i| i*i} #=> [0, 1, 4, 9]
- * a.fill(-2) {|i| i*i*i} #=> [0, 1, 8, 27]
*/
static VALUE
@@ -4037,84 +5011,74 @@ rb_ary_fill(int argc, VALUE *argv, VALUE ary)
long beg = 0, end = 0, len = 0;
if (rb_block_given_p()) {
- rb_scan_args(argc, argv, "02", &arg1, &arg2);
- argc += 1; /* hackish */
+ rb_scan_args(argc, argv, "02", &arg1, &arg2);
+ argc += 1; /* hackish */
}
else {
- rb_scan_args(argc, argv, "12", &item, &arg1, &arg2);
+ rb_scan_args(argc, argv, "12", &item, &arg1, &arg2);
}
switch (argc) {
case 1:
- beg = 0;
- len = RARRAY_LEN(ary);
- break;
+ beg = 0;
+ len = RARRAY_LEN(ary);
+ break;
case 2:
- if (rb_range_beg_len(arg1, &beg, &len, RARRAY_LEN(ary), 1)) {
- break;
- }
- /* fall through */
+ if (rb_range_beg_len(arg1, &beg, &len, RARRAY_LEN(ary), 1)) {
+ break;
+ }
+ /* fall through */
case 3:
- beg = NIL_P(arg1) ? 0 : NUM2LONG(arg1);
- if (beg < 0) {
- beg = RARRAY_LEN(ary) + beg;
- if (beg < 0) beg = 0;
- }
- len = NIL_P(arg2) ? RARRAY_LEN(ary) - beg : NUM2LONG(arg2);
- break;
+ beg = NIL_P(arg1) ? 0 : NUM2LONG(arg1);
+ if (beg < 0) {
+ beg = RARRAY_LEN(ary) + beg;
+ if (beg < 0) beg = 0;
+ }
+ len = NIL_P(arg2) ? RARRAY_LEN(ary) - beg : NUM2LONG(arg2);
+ break;
}
rb_ary_modify(ary);
if (len < 0) {
return ary;
}
if (beg >= ARY_MAX_SIZE || len > ARY_MAX_SIZE - beg) {
- rb_raise(rb_eArgError, "argument too big");
+ rb_raise(rb_eArgError, "argument too big");
}
end = beg + len;
if (RARRAY_LEN(ary) < end) {
- if (end >= ARY_CAPA(ary)) {
- ary_resize_capa(ary, end);
- }
- ary_mem_clear(ary, RARRAY_LEN(ary), end - RARRAY_LEN(ary));
- ARY_SET_LEN(ary, end);
+ if (end >= ARY_CAPA(ary)) {
+ ary_resize_capa(ary, end);
+ }
+ ary_mem_clear(ary, RARRAY_LEN(ary), end - RARRAY_LEN(ary));
+ ARY_SET_LEN(ary, end);
}
- if (item == Qundef) {
- VALUE v;
- long i;
+ if (UNDEF_P(item)) {
+ VALUE v;
+ long i;
- for (i=beg; i<end; i++) {
- v = rb_yield(LONG2NUM(i));
- if (i>=RARRAY_LEN(ary)) break;
- ARY_SET(ary, i, v);
- }
+ for (i=beg; i<end; i++) {
+ v = rb_yield(LONG2NUM(i));
+ if (i>=RARRAY_LEN(ary)) break;
+ ARY_SET(ary, i, v);
+ }
}
else {
- ary_memfill(ary, beg, len, item);
+ ary_memfill(ary, beg, len, item);
}
return ary;
}
/*
* call-seq:
- * ary + other_ary -> new_ary
+ * array + other_array -> new_array
*
- * Concatenation --- Returns a new array built by concatenating the
- * two arrays together to produce a third array.
+ * Returns a new \Array containing all elements of +array+
+ * followed by all elements of +other_array+:
*
- * [ 1, 2, 3 ] + [ 4, 5 ] #=> [ 1, 2, 3, 4, 5 ]
- * a = [ "a", "b", "c" ]
- * c = a + [ "d", "e", "f" ]
- * c #=> [ "a", "b", "c", "d", "e", "f" ]
- * a #=> [ "a", "b", "c" ]
+ * a = [0, 1] + [2, 3]
+ * a # => [0, 1, 2, 3]
*
- * Note that
- * x += y
- * is the same as
- * x = x + y
- * This means that it produces a new array. As a consequence,
- * repeated use of <code>+=</code> on arrays can be quite inefficient.
- *
- * See also Array#concat.
+ * Related: #concat.
*/
VALUE
@@ -4142,27 +5106,18 @@ ary_append(VALUE x, VALUE y)
if (n > 0) {
rb_ary_splice(x, RARRAY_LEN(x), 0, RARRAY_CONST_PTR_TRANSIENT(y), n);
}
+ RB_GC_GUARD(y);
return x;
}
/*
* call-seq:
- * ary.concat(other_ary1, other_ary2, ...) -> ary
- *
- * Appends the elements of <code>other_ary</code>s to +self+.
+ * array.concat(*other_arrays) -> self
*
- * [ "a", "b" ].concat( ["c", "d"]) #=> [ "a", "b", "c", "d" ]
- * [ "a" ].concat( ["b"], ["c", "d"]) #=> [ "a", "b", "c", "d" ]
- * [ "a" ].concat #=> [ "a" ]
+ * Adds to +array+ all elements from each \Array in +other_arrays+; returns +self+:
*
- * a = [ 1, 2, 3 ]
- * a.concat( [ 4, 5 ])
- * a #=> [ 1, 2, 3, 4, 5 ]
- *
- * a = [ 1, 2 ]
- * a.concat(a, a) #=> [1, 2, 1, 2, 1, 2]
- *
- * See also Array#+.
+ * a = [0, 1]
+ * a.concat([2, 3], [4, 5]) # => [0, 1, 2, 3, 4, 5]
*/
static VALUE
@@ -4171,15 +5126,15 @@ rb_ary_concat_multi(int argc, VALUE *argv, VALUE ary)
rb_ary_modify_check(ary);
if (argc == 1) {
- rb_ary_concat(ary, argv[0]);
+ rb_ary_concat(ary, argv[0]);
}
else if (argc > 1) {
- int i;
- VALUE args = rb_ary_tmp_new(argc);
- for (i = 0; i < argc; i++) {
- rb_ary_concat(args, argv[i]);
- }
- ary_append(ary, args);
+ int i;
+ VALUE args = rb_ary_hidden_new(argc);
+ for (i = 0; i < argc; i++) {
+ rb_ary_concat(args, argv[i]);
+ }
+ ary_append(ary, args);
}
ary_verify(ary);
@@ -4194,18 +5149,19 @@ rb_ary_concat(VALUE x, VALUE y)
/*
* call-seq:
- * ary * int -> new_ary
- * ary * str -> new_string
+ * array * n -> new_array
+ * array * string_separator -> new_string
*
- * Repetition --- With a String argument, equivalent to
- * <code>ary.join(str)</code>.
+ * When non-negative argument \Integer +n+ is given,
+ * returns a new \Array built by concatenating the +n+ copies of +self+:
*
- * Otherwise, returns a new array built by concatenating the +int+ copies of
- * +self+.
+ * a = ['x', 'y']
+ * a * 3 # => ["x", "y", "x", "y", "x", "y"]
*
+ * When \String argument +string_separator+ is given,
+ * equivalent to <tt>array.join(string_separator)</tt>:
*
- * [ 1, 2, 3 ] * 3 #=> [ 1, 2, 3, 1, 2, 3, 1, 2, 3 ]
- * [ 1, 2, 3 ] * "," #=> "1,2,3"
+ * [0, [0, 1], {foo: 0}] * ', ' # => "0, 0, 1, {:foo=>0}"
*
*/
@@ -4218,30 +5174,30 @@ rb_ary_times(VALUE ary, VALUE times)
tmp = rb_check_string_type(times);
if (!NIL_P(tmp)) {
- return rb_ary_join(ary, tmp);
+ return rb_ary_join(ary, tmp);
}
len = NUM2LONG(times);
if (len == 0) {
- ary2 = ary_new(rb_obj_class(ary), 0);
- goto out;
+ ary2 = ary_new(rb_cArray, 0);
+ goto out;
}
if (len < 0) {
- rb_raise(rb_eArgError, "negative argument");
+ rb_raise(rb_eArgError, "negative argument");
}
if (ARY_MAX_SIZE/len < RARRAY_LEN(ary)) {
- rb_raise(rb_eArgError, "argument too big");
+ rb_raise(rb_eArgError, "argument too big");
}
len *= RARRAY_LEN(ary);
- ary2 = ary_new(rb_obj_class(ary), len);
+ ary2 = ary_new(rb_cArray, len);
ARY_SET_LEN(ary2, len);
ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
t = RARRAY_LEN(ary);
if (0 < t) {
- ary_memcpy(ary2, 0, t, ptr);
- while (t <= len/2) {
+ ary_memcpy(ary2, 0, t, ptr);
+ while (t <= len/2) {
ary_memcpy(ary2, t, t, RARRAY_CONST_PTR_TRANSIENT(ary2));
t *= 2;
}
@@ -4255,22 +5211,17 @@ rb_ary_times(VALUE ary, VALUE times)
/*
* call-seq:
- * ary.assoc(obj) -> element_ary or nil
+ * array.assoc(obj) -> found_array or nil
*
- * Searches through an array whose elements are also arrays comparing +obj+
- * with the first element of each contained array using <code>obj.==</code>.
+ * Returns the first element in +self+ that is an \Array
+ * whose first element <tt>==</tt> +obj+:
*
- * Returns the first contained array that matches (that is, the first
- * associated array), or +nil+ if no match is found.
+ * a = [{foo: 0}, [2, 4], [4, 5, 6], [4, 5]]
+ * a.assoc(4) # => [4, 5, 6]
*
- * See also Array#rassoc
+ * Returns +nil+ if no such element is found.
*
- * s1 = [ "colors", "red", "blue", "green" ]
- * s2 = [ "letters", "a", "b", "c" ]
- * s3 = "foo"
- * a = [ s1, s2, s3 ]
- * a.assoc("letters") #=> [ "letters", "a", "b", "c" ]
- * a.assoc("foo") #=> nil
+ * Related: #rassoc.
*/
VALUE
@@ -4280,30 +5231,27 @@ rb_ary_assoc(VALUE ary, VALUE key)
VALUE v;
for (i = 0; i < RARRAY_LEN(ary); ++i) {
- v = rb_check_array_type(RARRAY_AREF(ary, i));
- if (!NIL_P(v) && RARRAY_LEN(v) > 0 &&
- rb_equal(RARRAY_AREF(v, 0), key))
- return v;
+ v = rb_check_array_type(RARRAY_AREF(ary, i));
+ if (!NIL_P(v) && RARRAY_LEN(v) > 0 &&
+ rb_equal(RARRAY_AREF(v, 0), key))
+ return v;
}
return Qnil;
}
/*
* call-seq:
- * ary.rassoc(obj) -> element_ary or nil
+ * array.rassoc(obj) -> found_array or nil
*
- * Searches through the array whose elements are also arrays.
+ * Returns the first element in +self+ that is an \Array
+ * whose second element <tt>==</tt> +obj+:
*
- * Compares +obj+ with the second element of each contained array using
- * <code>obj.==</code>.
+ * a = [{foo: 0}, [2, 4], [4, 5, 6], [4, 5]]
+ * a.rassoc(4) # => [2, 4]
*
- * Returns the first contained array that matches +obj+.
+ * Returns +nil+ if no such element is found.
*
- * See also Array#assoc.
- *
- * a = [ [ 1, "one"], [2, "two"], [3, "three"], ["ii", "two"] ]
- * a.rassoc("two") #=> [2, "two"]
- * a.rassoc("four") #=> nil
+ * Related: #assoc.
*/
VALUE
@@ -4313,11 +5261,11 @@ rb_ary_rassoc(VALUE ary, VALUE value)
VALUE v;
for (i = 0; i < RARRAY_LEN(ary); ++i) {
- v = RARRAY_AREF(ary, i);
- if (RB_TYPE_P(v, T_ARRAY) &&
- RARRAY_LEN(v) > 1 &&
- rb_equal(RARRAY_AREF(v, 1), value))
- return v;
+ v = RARRAY_AREF(ary, i);
+ if (RB_TYPE_P(v, T_ARRAY) &&
+ RARRAY_LEN(v) > 1 &&
+ rb_equal(RARRAY_AREF(v, 1), value))
+ return v;
}
return Qnil;
}
@@ -4336,38 +5284,42 @@ recursive_equal(VALUE ary1, VALUE ary2, int recur)
len1 = RARRAY_LEN(ary1);
for (i = 0; i < len1; i++) {
- if (*p1 != *p2) {
- if (rb_equal(*p1, *p2)) {
- len1 = RARRAY_LEN(ary1);
- if (len1 != RARRAY_LEN(ary2))
- return Qfalse;
- if (len1 < i)
- return Qtrue;
+ if (*p1 != *p2) {
+ if (rb_equal(*p1, *p2)) {
+ len1 = RARRAY_LEN(ary1);
+ if (len1 != RARRAY_LEN(ary2))
+ return Qfalse;
+ if (len1 < i)
+ return Qtrue;
p1 = RARRAY_CONST_PTR(ary1) + i;
p2 = RARRAY_CONST_PTR(ary2) + i;
- }
- else {
- return Qfalse;
- }
- }
- p1++;
- p2++;
+ }
+ else {
+ return Qfalse;
+ }
+ }
+ p1++;
+ p2++;
}
return Qtrue;
}
/*
* call-seq:
- * ary == other_ary -> bool
+ * array == other_array -> true or false
+ *
+ * Returns +true+ if both <tt>array.size == other_array.size</tt>
+ * and for each index +i+ in +array+, <tt>array[i] == other_array[i]</tt>:
*
- * Equality --- Two arrays are equal if they contain the same number of
- * elements and if each element is equal to (according to Object#==) the
- * corresponding element in +other_ary+.
+ * a0 = [:foo, 'bar', 2]
+ * a1 = [:foo, 'bar', 2.0]
+ * a1 == a0 # => true
+ * [] == [] # => true
*
- * [ "a", "c" ] == [ "a", "c", 7 ] #=> false
- * [ "a", "c", 7 ] == [ "a", "c", 7 ] #=> true
- * [ "a", "c", 7 ] == [ "a", "d", "f" ] #=> false
+ * Otherwise, returns +false+.
*
+ * This method is different from method Array#eql?,
+ * which compares elements using <tt>Object#eql?</tt>.
*/
static VALUE
@@ -4375,10 +5327,10 @@ rb_ary_equal(VALUE ary1, VALUE ary2)
{
if (ary1 == ary2) return Qtrue;
if (!RB_TYPE_P(ary2, T_ARRAY)) {
- if (!rb_respond_to(ary2, idTo_ary)) {
- return Qfalse;
- }
- return rb_equal(ary2, ary1);
+ if (!rb_respond_to(ary2, idTo_ary)) {
+ return Qfalse;
+ }
+ return rb_equal(ary2, ary1);
}
if (RARRAY_LEN(ary1) != RARRAY_LEN(ary2)) return Qfalse;
if (RARRAY_CONST_PTR_TRANSIENT(ary1) == RARRAY_CONST_PTR_TRANSIENT(ary2)) return Qtrue;
@@ -4392,18 +5344,27 @@ recursive_eql(VALUE ary1, VALUE ary2, int recur)
if (recur) return Qtrue; /* Subtle! */
for (i=0; i<RARRAY_LEN(ary1); i++) {
- if (!rb_eql(rb_ary_elt(ary1, i), rb_ary_elt(ary2, i)))
- return Qfalse;
+ if (!rb_eql(rb_ary_elt(ary1, i), rb_ary_elt(ary2, i)))
+ return Qfalse;
}
return Qtrue;
}
/*
* call-seq:
- * ary.eql?(other) -> true or false
+ * array.eql? other_array -> true or false
+ *
+ * Returns +true+ if +self+ and +other_array+ are the same size,
+ * and if, for each index +i+ in +self+, <tt>self[i].eql? other_array[i]</tt>:
+ *
+ * a0 = [:foo, 'bar', 2]
+ * a1 = [:foo, 'bar', 2]
+ * a1.eql?(a0) # => true
*
- * Returns +true+ if +self+ and +other+ are the same object,
- * or are both arrays with the same content (according to Object#eql?).
+ * Otherwise, returns +false+.
+ *
+ * This method is different from method Array#==,
+ * which compares using method <tt>Object#==</tt>.
*/
static VALUE
@@ -4418,14 +5379,15 @@ rb_ary_eql(VALUE ary1, VALUE ary2)
/*
* call-seq:
- * ary.hash -> integer
+ * array.hash -> integer
+ *
+ * Returns the integer hash value for +self+.
*
- * Compute a hash-code for this array.
+ * Two arrays with the same content will have the same hash code (and will compare using eql?):
*
- * Two arrays with the same content will have the same hash code (and will
- * compare using #eql?).
+ * [0, 1, 2].hash == [0, 1, 2].hash # => true
+ * [0, 1, 2].hash == [0, 1, 3].hash # => false
*
- * See also Object#hash.
*/
static VALUE
@@ -4438,8 +5400,8 @@ rb_ary_hash(VALUE ary)
h = rb_hash_start(RARRAY_LEN(ary));
h = rb_hash_uint(h, (st_index_t)rb_ary_hash);
for (i=0; i<RARRAY_LEN(ary); i++) {
- n = rb_hash(RARRAY_AREF(ary, i));
- h = rb_hash_uint(h, NUM2LONG(n));
+ n = rb_hash(RARRAY_AREF(ary, i));
+ h = rb_hash_uint(h, NUM2LONG(n));
}
h = rb_hash_end(h);
return ST2FIX(h);
@@ -4447,14 +5409,13 @@ rb_ary_hash(VALUE ary)
/*
* call-seq:
- * ary.include?(object) -> true or false
+ * array.include?(obj) -> true or false
*
- * Returns +true+ if the given +object+ is present in +self+ (that is, if any
- * element <code>==</code> +object+), otherwise returns +false+.
+ * Returns +true+ if for some index +i+ in +self+, <tt>obj == self[i]</tt>;
+ * otherwise +false+:
*
- * a = [ "a", "b", "c" ]
- * a.include?("b") #=> true
- * a.include?("z") #=> false
+ * [0, 1, 2].include?(2) # => true
+ * [0, 1, 2].include?(3) # => false
*/
VALUE
@@ -4464,10 +5425,10 @@ rb_ary_includes(VALUE ary, VALUE item)
VALUE e;
for (i=0; i<RARRAY_LEN(ary); i++) {
- e = RARRAY_AREF(ary, i);
- if (rb_equal(e, item)) {
- return Qtrue;
- }
+ e = RARRAY_AREF(ary, i);
+ if (rb_equal(e, item)) {
+ return Qtrue;
+ }
}
return Qfalse;
}
@@ -4479,10 +5440,10 @@ rb_ary_includes_by_eql(VALUE ary, VALUE item)
VALUE e;
for (i=0; i<RARRAY_LEN(ary); i++) {
- e = RARRAY_AREF(ary, i);
- if (rb_eql(item, e)) {
- return Qtrue;
- }
+ e = RARRAY_AREF(ary, i);
+ if (rb_eql(item, e)) {
+ return Qtrue;
+ }
}
return Qfalse;
}
@@ -4495,45 +5456,46 @@ recursive_cmp(VALUE ary1, VALUE ary2, int recur)
if (recur) return Qundef; /* Subtle! */
len = RARRAY_LEN(ary1);
if (len > RARRAY_LEN(ary2)) {
- len = RARRAY_LEN(ary2);
+ len = RARRAY_LEN(ary2);
}
for (i=0; i<len; i++) {
- VALUE e1 = rb_ary_elt(ary1, i), e2 = rb_ary_elt(ary2, i);
- VALUE v = rb_funcallv(e1, id_cmp, 1, &e2);
- if (v != INT2FIX(0)) {
- return v;
- }
+ VALUE e1 = rb_ary_elt(ary1, i), e2 = rb_ary_elt(ary2, i);
+ VALUE v = rb_funcallv(e1, id_cmp, 1, &e2);
+ if (v != INT2FIX(0)) {
+ return v;
+ }
}
return Qundef;
}
/*
* call-seq:
- * ary <=> other_ary -> -1, 0, +1 or nil
+ * array <=> other_array -> -1, 0, or 1
+ *
+ * Returns -1, 0, or 1 as +self+ is less than, equal to, or greater than +other_array+.
+ * For each index +i+ in +self+, evaluates <tt>result = self[i] <=> other_array[i]</tt>.
+ *
+ * Returns -1 if any result is -1:
+ *
+ * [0, 1, 2] <=> [0, 1, 3] # => -1
+ *
+ * Returns 1 if any result is 1:
+ *
+ * [0, 1, 2] <=> [0, 1, 1] # => 1
+ *
+ * When all results are zero:
*
- * Comparison --- Returns an integer (+-1+, +0+, or <code>+1</code>) if this
- * array is less than, equal to, or greater than +other_ary+.
+ * - Returns -1 if +array+ is smaller than +other_array+:
*
- * Each object in each array is compared (using the <=> operator).
+ * [0, 1, 2] <=> [0, 1, 2, 3] # => -1
*
- * Arrays are compared in an "element-wise" manner; the first element of +ary+
- * is compared with the first one of +other_ary+ using the <=> operator, then
- * each of the second elements, etc...
- * As soon as the result of any such comparison is non zero (i.e. the two
- * corresponding elements are not equal), that result is returned for the
- * whole array comparison.
+ * - Returns 1 if +array+ is larger than +other_array+:
*
- * If all the elements are equal, then the result is based on a comparison of
- * the array lengths. Thus, two arrays are "equal" according to Array#<=> if,
- * and only if, they have the same length and the value of each element is
- * equal to the value of the corresponding element in the other array.
+ * [0, 1, 2] <=> [0, 1] # => 1
*
- * +nil+ is returned if the +other_ary+ is not an array or if the comparison
- * of two elements returned +nil+.
+ * - Returns 0 if +array+ and +other_array+ are the same size:
*
- * [ "a", "a", "c" ] <=> [ "a", "b", "c" ] #=> -1
- * [ 1, 2, 3, 4, 5, 6 ] <=> [ 1, 2 ] #=> +1
- * [ 1, 2 ] <=> [ 1, :two ] #=> nil
+ * [0, 1, 2] <=> [0, 1, 2] # => 0
*
*/
@@ -4547,7 +5509,7 @@ rb_ary_cmp(VALUE ary1, VALUE ary2)
if (NIL_P(ary2)) return Qnil;
if (ary1 == ary2) return INT2FIX(0);
v = rb_exec_recursive_paired(recursive_cmp, ary1, ary2, ary2);
- if (v != Qundef) return v;
+ if (!UNDEF_P(v)) return v;
len = RARRAY_LEN(ary1) - RARRAY_LEN(ary2);
if (len == 0) return INT2FIX(0);
if (len > 0) return INT2FIX(1);
@@ -4560,8 +5522,8 @@ ary_add_hash(VALUE hash, VALUE ary)
long i;
for (i=0; i<RARRAY_LEN(ary); i++) {
- VALUE elt = RARRAY_AREF(ary, i);
- rb_hash_add_new_element(hash, elt, elt);
+ VALUE elt = RARRAY_AREF(ary, i);
+ rb_hash_add_new_element(hash, elt, elt);
}
return hash;
}
@@ -4589,8 +5551,8 @@ ary_add_hash_by(VALUE hash, VALUE ary)
long i;
for (i = 0; i < RARRAY_LEN(ary); ++i) {
- VALUE v = rb_ary_elt(ary, i), k = rb_yield(v);
- rb_hash_add_new_element(hash, k, v);
+ VALUE v = rb_ary_elt(ary, i), k = rb_yield(v);
+ rb_hash_add_new_element(hash, k, v);
}
return hash;
}
@@ -4608,35 +5570,28 @@ ary_recycle_hash(VALUE hash)
assert(RBASIC_CLASS(hash) == 0);
if (RHASH_ST_TABLE_P(hash)) {
st_table *tbl = RHASH_ST_TABLE(hash);
- st_free_table(tbl);
+ st_free_table(tbl);
RHASH_ST_CLEAR(hash);
}
}
/*
* call-seq:
- * ary - other_ary -> new_ary
+ * array - other_array -> new_array
*
- * Array Difference
+ * Returns a new \Array containing only those elements from +array+
+ * that are not found in \Array +other_array+;
+ * items are compared using <tt>eql?</tt>;
+ * the order from +array+ is preserved:
*
- * Returns a new array that is a copy of the original array, removing all
- * occurrences of any item that also appear in +other_ary+. The order is
- * preserved from the original array.
+ * [0, 1, 1, 2, 1, 1, 3, 1, 1] - [1] # => [0, 2, 3]
+ * [0, 1, 2, 3] - [3, 0] # => [1, 2]
+ * [0, 1, 2] - [4] # => [0, 1, 2]
*
- * It compares elements using their #hash and #eql? methods for efficiency.
- *
- * [ 1, 1, 2, 2, 3, 3, 4, 5 ] - [ 1, 2, 4 ] #=> [ 3, 3, 5 ]
- *
- * Note that while 1 and 2 were only present once in the array argument, and
- * were present twice in the receiver array, all occurrences of each Integer are
- * removed in the returned array.
- *
- * If you need set-like behavior, see the library class Set.
- *
- * See also Array#difference.
+ * Related: Array#difference.
*/
-static VALUE
+VALUE
rb_ary_diff(VALUE ary1, VALUE ary2)
{
VALUE ary3;
@@ -4644,21 +5599,22 @@ rb_ary_diff(VALUE ary1, VALUE ary2)
long i;
ary2 = to_ary(ary2);
+ if (RARRAY_LEN(ary2) == 0) { return ary_make_shared_copy(ary1); }
ary3 = rb_ary_new();
if (RARRAY_LEN(ary1) <= SMALL_ARRAY_LEN || RARRAY_LEN(ary2) <= SMALL_ARRAY_LEN) {
- for (i=0; i<RARRAY_LEN(ary1); i++) {
- VALUE elt = rb_ary_elt(ary1, i);
- if (rb_ary_includes_by_eql(ary2, elt)) continue;
- rb_ary_push(ary3, elt);
- }
- return ary3;
+ for (i=0; i<RARRAY_LEN(ary1); i++) {
+ VALUE elt = rb_ary_elt(ary1, i);
+ if (rb_ary_includes_by_eql(ary2, elt)) continue;
+ rb_ary_push(ary3, elt);
+ }
+ return ary3;
}
hash = ary_make_hash(ary2);
for (i=0; i<RARRAY_LEN(ary1); i++) {
if (rb_hash_stlike_lookup(hash, RARRAY_AREF(ary1, i), NULL)) continue;
- rb_ary_push(ary3, rb_ary_elt(ary1, i));
+ rb_ary_push(ary3, rb_ary_elt(ary1, i));
}
ary_recycle_hash(hash);
return ary3;
@@ -4666,31 +5622,19 @@ rb_ary_diff(VALUE ary1, VALUE ary2)
/*
* call-seq:
- * ary.difference(other_ary1, other_ary2, ...) -> new_ary
- *
- * Array Difference
- *
- * Returns a new array that is a copy of the original array, removing all
- * occurrences of any item that also appear in +other_ary+. The order is
- * preserved from the original array.
- *
- * It compares elements using their #hash and #eql? methods for efficiency.
- *
- * [ 1, 1, 2, 2, 3, 3, 4, 5 ].difference([ 1, 2, 4 ]) #=> [ 3, 3, 5 ]
+ * array.difference(*other_arrays) -> new_array
*
- * Note that while 1 and 2 were only present once in the array argument, and
- * were present twice in the receiver array, all occurrences of each Integer are
- * removed in the returned array.
+ * Returns a new \Array containing only those elements from +self+
+ * that are not found in any of the Arrays +other_arrays+;
+ * items are compared using <tt>eql?</tt>; order from +self+ is preserved:
*
- * Multiple array arguments can be supplied and all occurrences of any element
- * in those supplied arrays that match the receiver will be removed from the
- * returned array.
+ * [0, 1, 1, 2, 1, 1, 3, 1, 1].difference([1]) # => [0, 2, 3]
+ * [0, 1, 2, 3].difference([3, 0], [1, 3]) # => [2]
+ * [0, 1, 2].difference([4]) # => [0, 1, 2]
*
- * [ 1, 'c', :s, 'yep' ].difference([ 1 ], [ 'a', 'c' ]) #=> [ :s, "yep" ]
+ * Returns a copy of +self+ if no arguments given.
*
- * If you need set-like behavior, see the library class Set.
- *
- * See also Array#-.
+ * Related: Array#-.
*/
static VALUE
@@ -4732,17 +5676,19 @@ rb_ary_difference_multi(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary & other_ary -> new_ary
+ * array & other_array -> new_array
+ *
+ * Returns a new \Array containing each element found in both +array+ and \Array +other_array+;
+ * duplicates are omitted; items are compared using <tt>eql?</tt>:
*
- * Set Intersection --- Returns a new array containing unique elements common to the
- * two arrays. The order is preserved from the original array.
+ * [0, 1, 2, 3] & [1, 2] # => [1, 2]
+ * [0, 1, 0, 1] & [0, 1] # => [0, 1]
*
- * It compares elements using their #hash and #eql? methods for efficiency.
+ * Preserves order from +array+:
*
- * [ 1, 1, 3, 5 ] & [ 3, 2, 1 ] #=> [ 1, 3 ]
- * [ 'a', 'b', 'b', 'z' ] & [ 'a', 'b', 'c' ] #=> [ 'a', 'b' ]
+ * [0, 1, 2] & [3, 2, 1, 0] # => [0, 1, 2]
*
- * See also Array#uniq.
+ * Related: Array#intersection.
*/
@@ -4758,23 +5704,23 @@ rb_ary_and(VALUE ary1, VALUE ary2)
if (RARRAY_LEN(ary1) == 0 || RARRAY_LEN(ary2) == 0) return ary3;
if (RARRAY_LEN(ary1) <= SMALL_ARRAY_LEN && RARRAY_LEN(ary2) <= SMALL_ARRAY_LEN) {
- for (i=0; i<RARRAY_LEN(ary1); i++) {
- v = RARRAY_AREF(ary1, i);
- if (!rb_ary_includes_by_eql(ary2, v)) continue;
- if (rb_ary_includes_by_eql(ary3, v)) continue;
- rb_ary_push(ary3, v);
- }
- return ary3;
+ for (i=0; i<RARRAY_LEN(ary1); i++) {
+ v = RARRAY_AREF(ary1, i);
+ if (!rb_ary_includes_by_eql(ary2, v)) continue;
+ if (rb_ary_includes_by_eql(ary3, v)) continue;
+ rb_ary_push(ary3, v);
+ }
+ return ary3;
}
hash = ary_make_hash(ary2);
for (i=0; i<RARRAY_LEN(ary1); i++) {
- v = RARRAY_AREF(ary1, i);
- vv = (st_data_t)v;
+ v = RARRAY_AREF(ary1, i);
+ vv = (st_data_t)v;
if (rb_hash_stlike_delete(hash, &vv, 0)) {
- rb_ary_push(ary3, v);
- }
+ rb_ary_push(ary3, v);
+ }
}
ary_recycle_hash(hash);
@@ -4783,19 +5729,22 @@ rb_ary_and(VALUE ary1, VALUE ary2)
/*
* call-seq:
- * ary.intersection(other_ary1, other_ary2, ...) -> new_ary
+ * array.intersection(*other_arrays) -> new_array
+ *
+ * Returns a new \Array containing each element found both in +self+
+ * and in all of the given Arrays +other_arrays+;
+ * duplicates are omitted; items are compared using <tt>eql?</tt>:
*
- * Set Intersection --- Returns a new array containing unique elements common
- * to +self+ and <code>other_ary</code>s. Order is preserved from the original
- * array.
+ * [0, 1, 2, 3].intersection([0, 1, 2], [0, 1, 3]) # => [0, 1]
+ * [0, 0, 1, 1, 2, 3].intersection([0, 1, 2], [0, 1, 3]) # => [0, 1]
*
- * It compares elements using their #hash and #eql? methods for efficiency.
+ * Preserves order from +self+:
*
- * [ 1, 1, 3, 5 ].intersection([ 3, 2, 1 ]) # => [ 1, 3 ]
- * [ "a", "b", "z" ].intersection([ "a", "b", "c" ], [ "b" ]) # => [ "b" ]
- * [ "a" ].intersection #=> [ "a" ]
+ * [0, 1, 2].intersection([2, 1, 0]) # => [0, 1, 2]
*
- * See also Array#&.
+ * Returns a copy of +self+ if no arguments given.
+ *
+ * Related: Array#&.
*/
static VALUE
@@ -4844,17 +5793,17 @@ rb_ary_union_hash(VALUE hash, VALUE ary2)
/*
* call-seq:
- * ary | other_ary -> new_ary
- *
- * Set Union --- Returns a new array by joining +ary+ with +other_ary+,
- * excluding any duplicates and preserving the order from the given arrays.
+ * array | other_array -> new_array
*
- * It compares elements using their #hash and #eql? methods for efficiency.
+ * Returns the union of +array+ and \Array +other_array+;
+ * duplicates are removed; order is preserved;
+ * items are compared using <tt>eql?</tt>:
*
- * [ "a", "b", "c" ] | [ "c", "d", "a" ] #=> [ "a", "b", "c", "d" ]
- * [ "c", "d", "a" ] | [ "a", "b", "c" ] #=> [ "c", "d", "a", "b" ]
+ * [0, 1] | [2, 3] # => [0, 1, 2, 3]
+ * [0, 1, 1] | [2, 2, 3] # => [0, 1, 2, 3]
+ * [0, 1, 2] | [3, 2, 1, 0] # => [0, 1, 2, 3]
*
- * See also Array#union.
+ * Related: Array#union.
*/
static VALUE
@@ -4864,10 +5813,10 @@ rb_ary_or(VALUE ary1, VALUE ary2)
ary2 = to_ary(ary2);
if (RARRAY_LEN(ary1) + RARRAY_LEN(ary2) <= SMALL_ARRAY_LEN) {
- ary3 = rb_ary_new();
+ ary3 = rb_ary_new();
rb_ary_union(ary3, ary1);
rb_ary_union(ary3, ary2);
- return ary3;
+ return ary3;
}
hash = ary_make_hash(ary1);
@@ -4880,18 +5829,18 @@ rb_ary_or(VALUE ary1, VALUE ary2)
/*
* call-seq:
- * ary.union(other_ary1, other_ary2, ...) -> new_ary
+ * array.union(*other_arrays) -> new_array
*
- * Set Union --- Returns a new array by joining <code>other_ary</code>s with +self+,
- * excluding any duplicates and preserving the order from the given arrays.
+ * Returns a new \Array that is the union of +self+ and all given Arrays +other_arrays+;
+ * duplicates are removed; order is preserved; items are compared using <tt>eql?</tt>:
*
- * It compares elements using their #hash and #eql? methods for efficiency.
+ * [0, 1, 2, 3].union([4, 5], [6, 7]) # => [0, 1, 2, 3, 4, 5, 6, 7]
+ * [0, 1, 1].union([2, 1], [3, 1]) # => [0, 1, 2, 3]
+ * [0, 1, 2, 3].union([3, 2], [1, 0]) # => [0, 1, 2, 3]
*
- * [ "a", "b", "c" ].union( [ "c", "d", "a" ] ) #=> [ "a", "b", "c", "d" ]
- * [ "a" ].union( ["e", "b"], ["a", "c", "b"] ) #=> [ "a", "e", "b", "c" ]
- * [ "a" ].union #=> [ "a" ]
+ * Returns a copy of +self+ if no arguments given.
*
- * See also Array#|.
+ * Related: Array#|.
*/
static VALUE
@@ -4926,30 +5875,191 @@ rb_ary_union_multi(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.max -> obj
- * ary.max {|a, b| block} -> obj
- * ary.max(n) -> array
- * ary.max(n) {|a, b| block} -> array
+ * ary.intersect?(other_ary) -> true or false
+ *
+ * Returns +true+ if the array and +other_ary+ have at least one element in
+ * common, otherwise returns +false+:
+ *
+ * a = [ 1, 2, 3 ]
+ * b = [ 3, 4, 5 ]
+ * c = [ 5, 6, 7 ]
+ * a.intersect?(b) #=> true
+ * a.intersect?(c) #=> false
+ *
+ */
+
+static VALUE
+rb_ary_intersect_p(VALUE ary1, VALUE ary2)
+{
+ VALUE hash, v, result, shorter, longer;
+ st_data_t vv;
+ long i;
+
+ ary2 = to_ary(ary2);
+ if (RARRAY_LEN(ary1) == 0 || RARRAY_LEN(ary2) == 0) return Qfalse;
+
+ if (RARRAY_LEN(ary1) <= SMALL_ARRAY_LEN && RARRAY_LEN(ary2) <= SMALL_ARRAY_LEN) {
+ for (i=0; i<RARRAY_LEN(ary1); i++) {
+ v = RARRAY_AREF(ary1, i);
+ if (rb_ary_includes_by_eql(ary2, v)) return Qtrue;
+ }
+ return Qfalse;
+ }
+
+ shorter = ary1;
+ longer = ary2;
+ if (RARRAY_LEN(ary1) > RARRAY_LEN(ary2)) {
+ longer = ary1;
+ shorter = ary2;
+ }
+
+ hash = ary_make_hash(shorter);
+ result = Qfalse;
+
+ for (i=0; i<RARRAY_LEN(longer); i++) {
+ v = RARRAY_AREF(longer, i);
+ vv = (st_data_t)v;
+ if (rb_hash_stlike_lookup(hash, vv, 0)) {
+ result = Qtrue;
+ break;
+ }
+ }
+ ary_recycle_hash(hash);
+
+ return result;
+}
+
+static VALUE
+ary_max_generic(VALUE ary, long i, VALUE vmax)
+{
+ RUBY_ASSERT(i > 0 && i < RARRAY_LEN(ary));
+
+ VALUE v;
+ for (; i < RARRAY_LEN(ary); ++i) {
+ v = RARRAY_AREF(ary, i);
+
+ if (rb_cmpint(rb_funcallv(vmax, id_cmp, 1, &v), vmax, v) < 0) {
+ vmax = v;
+ }
+ }
+
+ return vmax;
+}
+
+static VALUE
+ary_max_opt_fixnum(VALUE ary, long i, VALUE vmax)
+{
+ const long n = RARRAY_LEN(ary);
+ RUBY_ASSERT(i > 0 && i < n);
+ RUBY_ASSERT(FIXNUM_P(vmax));
+
+ VALUE v;
+ for (; i < n; ++i) {
+ v = RARRAY_AREF(ary, i);
+
+ if (FIXNUM_P(v)) {
+ if ((long)vmax < (long)v) {
+ vmax = v;
+ }
+ }
+ else {
+ return ary_max_generic(ary, i, vmax);
+ }
+ }
+
+ return vmax;
+}
+
+static VALUE
+ary_max_opt_float(VALUE ary, long i, VALUE vmax)
+{
+ const long n = RARRAY_LEN(ary);
+ RUBY_ASSERT(i > 0 && i < n);
+ RUBY_ASSERT(RB_FLOAT_TYPE_P(vmax));
+
+ VALUE v;
+ for (; i < n; ++i) {
+ v = RARRAY_AREF(ary, i);
+
+ if (RB_FLOAT_TYPE_P(v)) {
+ if (rb_float_cmp(vmax, v) < 0) {
+ vmax = v;
+ }
+ }
+ else {
+ return ary_max_generic(ary, i, vmax);
+ }
+ }
+
+ return vmax;
+}
+
+static VALUE
+ary_max_opt_string(VALUE ary, long i, VALUE vmax)
+{
+ const long n = RARRAY_LEN(ary);
+ RUBY_ASSERT(i > 0 && i < n);
+ RUBY_ASSERT(STRING_P(vmax));
+
+ VALUE v;
+ for (; i < n; ++i) {
+ v = RARRAY_AREF(ary, i);
+
+ if (STRING_P(v)) {
+ if (rb_str_cmp(vmax, v) < 0) {
+ vmax = v;
+ }
+ }
+ else {
+ return ary_max_generic(ary, i, vmax);
+ }
+ }
+
+ return vmax;
+}
+
+/*
+ * call-seq:
+ * array.max -> element
+ * array.max {|a, b| ... } -> element
+ * array.max(n) -> new_array
+ * array.max(n) {|a, b| ... } -> new_array
+ *
+ * Returns one of the following:
+ *
+ * - The maximum-valued element from +self+.
+ * - A new \Array of maximum-valued elements selected from +self+.
+ *
+ * When no block is given, each element in +self+ must respond to method <tt><=></tt>
+ * with an \Integer.
+ *
+ * With no argument and no block, returns the element in +self+
+ * having the maximum value per method <tt><=></tt>:
+ *
+ * [0, 1, 2].max # => 2
+ *
+ * With an argument \Integer +n+ and no block, returns a new \Array with at most +n+ elements,
+ * in descending order per method <tt><=></tt>:
+ *
+ * [0, 1, 2, 3].max(3) # => [3, 2, 1]
+ * [0, 1, 2, 3].max(6) # => [3, 2, 1, 0]
+ *
+ * When a block is given, the block must return an \Integer.
+ *
+ * With a block and no argument, calls the block <tt>self.size-1</tt> times to compare elements;
+ * returns the element having the maximum value per the block:
*
- * Returns the object in _ary_ with the maximum value. The
- * first form assumes all objects implement <code><=></code>;
- * the second uses the block to return <em>a <=> b</em>.
+ * ['0', '00', '000'].max {|a, b| a.size <=> b.size } # => "000"
*
- * ary = %w(albatross dog horse)
- * ary.max #=> "horse"
- * ary.max {|a, b| a.length <=> b.length} #=> "albatross"
+ * With an argument +n+ and a block, returns a new \Array with at most +n+ elements,
+ * in descending order per the block:
*
- * If the +n+ argument is given, maximum +n+ elements are returned
- * as an array.
+ * ['0', '00', '000'].max(2) {|a, b| a.size <=> b.size } # => ["000", "00"]
*
- * ary = %w[albatross dog horse]
- * ary.max(2) #=> ["horse", "dog"]
- * ary.max(2) {|a, b| a.length <=> b.length } #=> ["albatross", "horse"]
*/
static VALUE
rb_ary_max(int argc, VALUE *argv, VALUE ary)
{
- struct cmp_opt_data cmp_opt = { 0, 0 };
VALUE result = Qundef, v;
VALUE num;
long i;
@@ -4957,52 +6067,167 @@ rb_ary_max(int argc, VALUE *argv, VALUE ary)
if (rb_check_arity(argc, 0, 1) && !NIL_P(num = argv[0]))
return rb_nmin_run(ary, num, 0, 1, 1);
+ const long n = RARRAY_LEN(ary);
if (rb_block_given_p()) {
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- v = RARRAY_AREF(ary, i);
- if (result == Qundef || rb_cmpint(rb_yield_values(2, v, result), v, result) > 0) {
- result = v;
- }
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ v = RARRAY_AREF(ary, i);
+ if (UNDEF_P(result) || rb_cmpint(rb_yield_values(2, v, result), v, result) > 0) {
+ result = v;
+ }
+ }
}
- else {
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- v = RARRAY_AREF(ary, i);
- if (result == Qundef || OPTIMIZED_CMP(v, result, cmp_opt) > 0) {
- result = v;
- }
- }
- }
- if (result == Qundef) return Qnil;
+ else if (n > 0) {
+ result = RARRAY_AREF(ary, 0);
+ if (n > 1) {
+ if (FIXNUM_P(result) && CMP_OPTIMIZABLE(INTEGER)) {
+ return ary_max_opt_fixnum(ary, 1, result);
+ }
+ else if (STRING_P(result) && CMP_OPTIMIZABLE(STRING)) {
+ return ary_max_opt_string(ary, 1, result);
+ }
+ else if (RB_FLOAT_TYPE_P(result) && CMP_OPTIMIZABLE(FLOAT)) {
+ return ary_max_opt_float(ary, 1, result);
+ }
+ else {
+ return ary_max_generic(ary, 1, result);
+ }
+ }
+ }
+ if (UNDEF_P(result)) return Qnil;
return result;
}
+static VALUE
+ary_min_generic(VALUE ary, long i, VALUE vmin)
+{
+ RUBY_ASSERT(i > 0 && i < RARRAY_LEN(ary));
+
+ VALUE v;
+ for (; i < RARRAY_LEN(ary); ++i) {
+ v = RARRAY_AREF(ary, i);
+
+ if (rb_cmpint(rb_funcallv(vmin, id_cmp, 1, &v), vmin, v) > 0) {
+ vmin = v;
+ }
+ }
+
+ return vmin;
+}
+
+static VALUE
+ary_min_opt_fixnum(VALUE ary, long i, VALUE vmin)
+{
+ const long n = RARRAY_LEN(ary);
+ RUBY_ASSERT(i > 0 && i < n);
+ RUBY_ASSERT(FIXNUM_P(vmin));
+
+ VALUE a;
+ for (; i < n; ++i) {
+ a = RARRAY_AREF(ary, i);
+
+ if (FIXNUM_P(a)) {
+ if ((long)vmin > (long)a) {
+ vmin = a;
+ }
+ }
+ else {
+ return ary_min_generic(ary, i, vmin);
+ }
+ }
+
+ return vmin;
+}
+
+static VALUE
+ary_min_opt_float(VALUE ary, long i, VALUE vmin)
+{
+ const long n = RARRAY_LEN(ary);
+ RUBY_ASSERT(i > 0 && i < n);
+ RUBY_ASSERT(RB_FLOAT_TYPE_P(vmin));
+
+ VALUE a;
+ for (; i < n; ++i) {
+ a = RARRAY_AREF(ary, i);
+
+ if (RB_FLOAT_TYPE_P(a)) {
+ if (rb_float_cmp(vmin, a) > 0) {
+ vmin = a;
+ }
+ }
+ else {
+ return ary_min_generic(ary, i, vmin);
+ }
+ }
+
+ return vmin;
+}
+
+static VALUE
+ary_min_opt_string(VALUE ary, long i, VALUE vmin)
+{
+ const long n = RARRAY_LEN(ary);
+ RUBY_ASSERT(i > 0 && i < n);
+ RUBY_ASSERT(STRING_P(vmin));
+
+ VALUE a;
+ for (; i < n; ++i) {
+ a = RARRAY_AREF(ary, i);
+
+ if (STRING_P(a)) {
+ if (rb_str_cmp(vmin, a) > 0) {
+ vmin = a;
+ }
+ }
+ else {
+ return ary_min_generic(ary, i, vmin);
+ }
+ }
+
+ return vmin;
+}
+
/*
* call-seq:
- * ary.min -> obj
- * ary.min {| a,b | block } -> obj
- * ary.min(n) -> array
- * ary.min(n) {| a,b | block } -> array
+ * array.min -> element
+ * array.min { |a, b| ... } -> element
+ * array.min(n) -> new_array
+ * array.min(n) { |a, b| ... } -> new_array
+ *
+ * Returns one of the following:
+ *
+ * - The minimum-valued element from +self+.
+ * - A new \Array of minimum-valued elements selected from +self+.
+ *
+ * When no block is given, each element in +self+ must respond to method <tt><=></tt>
+ * with an \Integer.
+ *
+ * With no argument and no block, returns the element in +self+
+ * having the minimum value per method <tt><=></tt>:
+ *
+ * [0, 1, 2].min # => 0
*
- * Returns the object in _ary_ with the minimum value. The
- * first form assumes all objects implement <code><=></code>;
- * the second uses the block to return <em>a <=> b</em>.
+ * With \Integer argument +n+ and no block, returns a new \Array with at most +n+ elements,
+ * in ascending order per method <tt><=></tt>:
*
- * ary = %w(albatross dog horse)
- * ary.min #=> "albatross"
- * ary.min {|a, b| a.length <=> b.length} #=> "dog"
+ * [0, 1, 2, 3].min(3) # => [0, 1, 2]
+ * [0, 1, 2, 3].min(6) # => [0, 1, 2, 3]
*
- * If the +n+ argument is given, minimum +n+ elements are returned
- * as an array.
+ * When a block is given, the block must return an Integer.
+ *
+ * With a block and no argument, calls the block <tt>self.size-1</tt> times to compare elements;
+ * returns the element having the minimum value per the block:
+ *
+ * ['0', '00', '000'].min { |a, b| a.size <=> b.size } # => "0"
+ *
+ * With an argument +n+ and a block, returns a new \Array with at most +n+ elements,
+ * in ascending order per the block:
+ *
+ * ['0', '00', '000'].min(2) {|a, b| a.size <=> b.size } # => ["0", "00"]
*
- * ary = %w[albatross dog horse]
- * ary.min(2) #=> ["albatross", "dog"]
- * ary.min(2) {|a, b| a.length <=> b.length } #=> ["dog", "horse"]
*/
static VALUE
rb_ary_min(int argc, VALUE *argv, VALUE ary)
{
- struct cmp_opt_data cmp_opt = { 0, 0 };
VALUE result = Qundef, v;
VALUE num;
long i;
@@ -5010,36 +6235,58 @@ rb_ary_min(int argc, VALUE *argv, VALUE ary)
if (rb_check_arity(argc, 0, 1) && !NIL_P(num = argv[0]))
return rb_nmin_run(ary, num, 0, 0, 1);
+ const long n = RARRAY_LEN(ary);
if (rb_block_given_p()) {
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- v = RARRAY_AREF(ary, i);
- if (result == Qundef || rb_cmpint(rb_yield_values(2, v, result), v, result) < 0) {
- result = v;
- }
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ v = RARRAY_AREF(ary, i);
+ if (UNDEF_P(result) || rb_cmpint(rb_yield_values(2, v, result), v, result) < 0) {
+ result = v;
+ }
+ }
}
- else {
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- v = RARRAY_AREF(ary, i);
- if (result == Qundef || OPTIMIZED_CMP(v, result, cmp_opt) < 0) {
- result = v;
- }
- }
- }
- if (result == Qundef) return Qnil;
+ else if (n > 0) {
+ result = RARRAY_AREF(ary, 0);
+ if (n > 1) {
+ if (FIXNUM_P(result) && CMP_OPTIMIZABLE(INTEGER)) {
+ return ary_min_opt_fixnum(ary, 1, result);
+ }
+ else if (STRING_P(result) && CMP_OPTIMIZABLE(STRING)) {
+ return ary_min_opt_string(ary, 1, result);
+ }
+ else if (RB_FLOAT_TYPE_P(result) && CMP_OPTIMIZABLE(FLOAT)) {
+ return ary_min_opt_float(ary, 1, result);
+ }
+ else {
+ return ary_min_generic(ary, 1, result);
+ }
+ }
+ }
+ if (UNDEF_P(result)) return Qnil;
return result;
}
/*
* call-seq:
- * ary.minmax -> [obj, obj]
- * ary.minmax {| a,b | block } -> [obj, obj]
+ * array.minmax -> [min_val, max_val]
+ * array.minmax {|a, b| ... } -> [min_val, max_val]
+ *
+ * Returns a new 2-element \Array containing the minimum and maximum values
+ * from +self+, either per method <tt><=></tt> or per a given block:.
+ *
+ * When no block is given, each element in +self+ must respond to method <tt><=></tt>
+ * with an \Integer;
+ * returns a new 2-element \Array containing the minimum and maximum values
+ * from +self+, per method <tt><=></tt>:
+ *
+ * [0, 1, 2].minmax # => [0, 2]
*
- * Returns a two element array which contains the minimum and the
- * maximum value in the array.
+ * When a block is given, the block must return an \Integer;
+ * the block is called <tt>self.size-1</tt> times to compare elements;
+ * returns a new 2-element \Array containing the minimum and maximum values
+ * from +self+, per the block:
+ *
+ * ['0', '00', '000'].minmax {|a, b| a.size <=> b.size } # => ["0", "000"]
*
- * Can be given an optional block to override the default comparison
- * method <code>a <=> b</code>.
*/
static VALUE
rb_ary_minmax(VALUE ary)
@@ -5059,31 +6306,33 @@ push_value(st_data_t key, st_data_t val, st_data_t ary)
/*
* call-seq:
- * ary.uniq! -> ary or nil
- * ary.uniq! {|item| ...} -> ary or nil
+ * array.uniq! -> self or nil
+ * array.uniq! {|element| ... } -> self or nil
*
- * Removes duplicate elements from +self+.
+ * Removes duplicate elements from +self+, the first occurrence always being retained;
+ * returns +self+ if any elements removed, +nil+ otherwise.
*
- * If a block is given, it will use the return value of the block for
- * comparison.
+ * With no block given, identifies and removes elements using method <tt>eql?</tt>
+ * to compare.
*
- * It compares values using their #hash and #eql? methods for efficiency.
+ * Returns +self+ if any elements removed:
*
- * +self+ is traversed in order, and the first occurrence is kept.
+ * a = [0, 0, 1, 1, 2, 2]
+ * a.uniq! # => [0, 1, 2]
*
- * Returns +nil+ if no changes are made (that is, no duplicates are found).
+ * Returns +nil+ if no elements removed.
*
- * a = [ "a", "a", "b", "b", "c" ]
- * a.uniq! # => ["a", "b", "c"]
+ * With a block given, calls the block for each element;
+ * identifies (using method <tt>eql?</tt>) and removes
+ * elements for which the block returns duplicate values.
*
- * b = [ "a", "b", "c" ]
- * b.uniq! # => nil
+ * Returns +self+ if any elements removed:
*
- * c = [["student","sam"], ["student","george"], ["teacher","matz"]]
- * c.uniq! {|s| s.first} # => [["student", "sam"], ["teacher", "matz"]]
+ * a = ['a', 'aa', 'aaa', 'b', 'bb', 'bbb']
+ * a.uniq! {|element| element.size } # => ['a', 'aa', 'aaa']
*
+ * Returns +nil+ if no elements removed.
*/
-
static VALUE
rb_ary_uniq_bang(VALUE ary)
{
@@ -5094,19 +6343,19 @@ rb_ary_uniq_bang(VALUE ary)
if (RARRAY_LEN(ary) <= 1)
return Qnil;
if (rb_block_given_p())
- hash = ary_make_hash_by(ary);
+ hash = ary_make_hash_by(ary);
else
- hash = ary_make_hash(ary);
+ hash = ary_make_hash(ary);
hash_size = RHASH_SIZE(hash);
if (RARRAY_LEN(ary) == hash_size) {
- return Qnil;
+ return Qnil;
}
rb_ary_modify_check(ary);
ARY_SET_LEN(ary, 0);
if (ARY_SHARED_P(ary) && !ARY_EMBED_P(ary)) {
- rb_ary_unshare(ary);
- FL_SET_EMBED(ary);
+ rb_ary_unshare(ary);
+ FL_SET_EMBED(ary);
}
ary_resize_capa(ary, hash_size);
rb_hash_foreach(hash, push_value, ary);
@@ -5117,22 +6366,24 @@ rb_ary_uniq_bang(VALUE ary)
/*
* call-seq:
- * ary.uniq -> new_ary
- * ary.uniq {|item| ...} -> new_ary
- *
- * Returns a new array by removing duplicate values in +self+.
+ * array.uniq -> new_array
+ * array.uniq {|element| ... } -> new_array
*
- * If a block is given, it will use the return value of the block for comparison.
+ * Returns a new \Array containing those elements from +self+ that are not duplicates,
+ * the first occurrence always being retained.
*
- * It compares values using their #hash and #eql? methods for efficiency.
+ * With no block given, identifies and omits duplicates using method <tt>eql?</tt>
+ * to compare:
*
- * +self+ is traversed in order, and the first occurrence is kept.
+ * a = [0, 0, 1, 1, 2, 2]
+ * a.uniq # => [0, 1, 2]
*
- * a = [ "a", "a", "b", "b", "c" ]
- * a.uniq # => ["a", "b", "c"]
+ * With a block given, calls the block for each element;
+ * identifies (using method <tt>eql?</tt>) and omits duplicate values,
+ * that is, those elements for which the block returns the same value:
*
- * b = [["student","sam"], ["student","george"], ["teacher","matz"]]
- * b.uniq {|s| s.first} # => [["student", "sam"], ["teacher", "matz"]]
+ * a = ['a', 'aa', 'aaa', 'b', 'bb', 'bbb']
+ * a.uniq {|element| element.size } # => ["a", "aa", "aaa"]
*
*/
@@ -5146,14 +6397,13 @@ rb_ary_uniq(VALUE ary)
uniq = rb_ary_dup(ary);
}
else if (rb_block_given_p()) {
- hash = ary_make_hash_by(ary);
- uniq = rb_hash_values(hash);
+ hash = ary_make_hash_by(ary);
+ uniq = rb_hash_values(hash);
}
else {
- hash = ary_make_hash(ary);
- uniq = rb_hash_values(hash);
+ hash = ary_make_hash(ary);
+ uniq = rb_hash_values(hash);
}
- RBASIC_SET_CLASS(uniq, rb_obj_class(ary));
if (hash) {
ary_recycle_hash(hash);
}
@@ -5163,14 +6413,11 @@ rb_ary_uniq(VALUE ary)
/*
* call-seq:
- * ary.compact! -> ary or nil
+ * array.compact! -> self or nil
*
- * Removes +nil+ elements from the array.
+ * Removes all +nil+ elements from +self+.
*
- * Returns +nil+ if no changes were made, otherwise returns the array.
- *
- * [ "a", nil, "b", nil, "c" ].compact! #=> [ "a", "b", "c" ]
- * [ "a", "b", "c" ].compact! #=> nil
+ * Returns +self+ if any elements removed, otherwise +nil+.
*/
static VALUE
@@ -5184,12 +6431,12 @@ rb_ary_compact_bang(VALUE ary)
end = p + RARRAY_LEN(ary);
while (t < end) {
- if (NIL_P(*t)) t++;
- else *p++ = *t++;
+ if (NIL_P(*t)) t++;
+ else *p++ = *t++;
}
n = p - RARRAY_CONST_PTR_TRANSIENT(ary);
if (RARRAY_LEN(ary) == n) {
- return Qnil;
+ return Qnil;
}
ary_resize_smaller(ary, n);
@@ -5198,12 +6445,12 @@ rb_ary_compact_bang(VALUE ary)
/*
* call-seq:
- * ary.compact -> new_ary
+ * array.compact -> new_array
*
- * Returns a copy of +self+ with all +nil+ elements removed.
+ * Returns a new \Array containing all non-+nil+ elements from +self+:
*
- * [ "a", nil, "b", nil, "c", nil ].compact
- * #=> [ "a", "b", "c" ]
+ * a = [nil, 0, nil, 1, nil, 2, nil]
+ * a.compact # => [0, 1, 2]
*/
static VALUE
@@ -5216,23 +6463,29 @@ rb_ary_compact(VALUE ary)
/*
* call-seq:
- * ary.count -> int
- * ary.count(obj) -> int
- * ary.count {|item| block} -> int
+ * array.count -> an_integer
+ * array.count(obj) -> an_integer
+ * array.count {|element| ... } -> an_integer
+ *
+ * Returns a count of specified elements.
+ *
+ * With no argument and no block, returns the count of all elements:
+ *
+ * [0, 1, 2].count # => 3
+ * [].count # => 0
*
- * Returns the number of elements.
+ * With argument +obj+, returns the count of elements <tt>==</tt> to +obj+:
*
- * If an argument is given, counts the number of elements which equal +obj+
- * using <code>==</code>.
+ * [0, 1, 2, 0.0].count(0) # => 2
+ * [0, 1, 2].count(3) # => 0
*
- * If a block is given, counts the number of elements for which the block
- * returns a true value.
+ * With no argument and a block given, calls the block with each element;
+ * returns the count of elements for which the block returns a truthy value:
*
- * ary = [1, 2, 4, 2]
- * ary.count #=> 4
- * ary.count(2) #=> 2
- * ary.count {|x| x%2 == 0} #=> 3
+ * [0, 1, 2, 3].count {|element| element > 1} # => 2
*
+ * With argument +obj+ and a block given, issues a warning, ignores the block,
+ * and returns the count of elements <tt>==</tt> to +obj+.
*/
static VALUE
@@ -5241,25 +6494,25 @@ rb_ary_count(int argc, VALUE *argv, VALUE ary)
long i, n = 0;
if (rb_check_arity(argc, 0, 1) == 0) {
- VALUE v;
+ VALUE v;
- if (!rb_block_given_p())
- return LONG2NUM(RARRAY_LEN(ary));
+ if (!rb_block_given_p())
+ return LONG2NUM(RARRAY_LEN(ary));
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- v = RARRAY_AREF(ary, i);
- if (RTEST(rb_yield(v))) n++;
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ v = RARRAY_AREF(ary, i);
+ if (RTEST(rb_yield(v))) n++;
+ }
}
else {
VALUE obj = argv[0];
- if (rb_block_given_p()) {
- rb_warn("given block not used");
- }
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- if (rb_equal(RARRAY_AREF(ary, i), obj)) n++;
- }
+ if (rb_block_given_p()) {
+ rb_warn("given block not used");
+ }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ if (rb_equal(RARRAY_AREF(ary, i), obj)) n++;
+ }
}
return LONG2NUM(n);
@@ -5270,7 +6523,7 @@ flatten(VALUE ary, int level)
{
long i;
VALUE stack, result, tmp = 0, elt, vmemo;
- st_table *memo;
+ st_table *memo = 0;
st_data_t id;
for (i = 0; i < RARRAY_LEN(ary); i++) {
@@ -5282,8 +6535,6 @@ flatten(VALUE ary, int level)
}
if (i == RARRAY_LEN(ary)) {
return ary;
- } else if (tmp == ary) {
- rb_raise(rb_eArgError, "tried to flatten recursive array");
}
result = ary_new(0, RARRAY_LEN(ary));
@@ -5294,79 +6545,100 @@ flatten(VALUE ary, int level)
rb_ary_push(stack, ary);
rb_ary_push(stack, LONG2NUM(i + 1));
- vmemo = rb_hash_new();
- RBASIC_CLEAR_CLASS(vmemo);
- memo = st_init_numtable();
- rb_hash_st_table_set(vmemo, memo);
- st_insert(memo, (st_data_t)ary, (st_data_t)Qtrue);
- st_insert(memo, (st_data_t)tmp, (st_data_t)Qtrue);
+ if (level < 0) {
+ vmemo = rb_hash_new();
+ RBASIC_CLEAR_CLASS(vmemo);
+ memo = st_init_numtable();
+ rb_hash_st_table_set(vmemo, memo);
+ st_insert(memo, (st_data_t)ary, (st_data_t)Qtrue);
+ st_insert(memo, (st_data_t)tmp, (st_data_t)Qtrue);
+ }
ary = tmp;
i = 0;
while (1) {
- while (i < RARRAY_LEN(ary)) {
- elt = RARRAY_AREF(ary, i++);
- if (level >= 0 && RARRAY_LEN(stack) / 2 >= level) {
- rb_ary_push(result, elt);
- continue;
- }
- tmp = rb_check_array_type(elt);
- if (RBASIC(result)->klass) {
- RB_GC_GUARD(vmemo);
- st_clear(memo);
- rb_raise(rb_eRuntimeError, "flatten reentered");
- }
- if (NIL_P(tmp)) {
- rb_ary_push(result, elt);
- }
- else {
- id = (st_data_t)tmp;
- if (st_is_member(memo, id)) {
+ while (i < RARRAY_LEN(ary)) {
+ elt = RARRAY_AREF(ary, i++);
+ if (level >= 0 && RARRAY_LEN(stack) / 2 >= level) {
+ rb_ary_push(result, elt);
+ continue;
+ }
+ tmp = rb_check_array_type(elt);
+ if (RBASIC(result)->klass) {
+ if (memo) {
+ RB_GC_GUARD(vmemo);
st_clear(memo);
- rb_raise(rb_eArgError, "tried to flatten recursive array");
- }
- st_insert(memo, id, (st_data_t)Qtrue);
- rb_ary_push(stack, ary);
- rb_ary_push(stack, LONG2NUM(i));
- ary = tmp;
- i = 0;
- }
- }
- if (RARRAY_LEN(stack) == 0) {
- break;
- }
- id = (st_data_t)ary;
- st_delete(memo, &id, 0);
- tmp = rb_ary_pop(stack);
- i = NUM2LONG(tmp);
- ary = rb_ary_pop(stack);
- }
-
- st_clear(memo);
-
- RBASIC_SET_CLASS(result, rb_obj_class(ary));
+ }
+ rb_raise(rb_eRuntimeError, "flatten reentered");
+ }
+ if (NIL_P(tmp)) {
+ rb_ary_push(result, elt);
+ }
+ else {
+ if (memo) {
+ id = (st_data_t)tmp;
+ if (st_is_member(memo, id)) {
+ st_clear(memo);
+ rb_raise(rb_eArgError, "tried to flatten recursive array");
+ }
+ st_insert(memo, id, (st_data_t)Qtrue);
+ }
+ rb_ary_push(stack, ary);
+ rb_ary_push(stack, LONG2NUM(i));
+ ary = tmp;
+ i = 0;
+ }
+ }
+ if (RARRAY_LEN(stack) == 0) {
+ break;
+ }
+ if (memo) {
+ id = (st_data_t)ary;
+ st_delete(memo, &id, 0);
+ }
+ tmp = rb_ary_pop(stack);
+ i = NUM2LONG(tmp);
+ ary = rb_ary_pop(stack);
+ }
+
+ if (memo) {
+ st_clear(memo);
+ }
+
+ RBASIC_SET_CLASS(result, rb_cArray);
return result;
}
/*
* call-seq:
- * ary.flatten! -> ary or nil
- * ary.flatten!(level) -> ary or nil
+ * array.flatten! -> self or nil
+ * array.flatten!(level) -> self or nil
*
- * Flattens +self+ in place.
+ * Replaces each nested \Array in +self+ with the elements from that \Array;
+ * returns +self+ if any changes, +nil+ otherwise.
*
- * Returns +nil+ if no modifications were made (i.e., the array contains no
- * subarrays.)
+ * With non-negative \Integer argument +level+, flattens recursively through +level+ levels:
*
- * The optional +level+ argument determines the level of recursion to flatten.
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten!(1) # => [0, 1, [2, 3], 4, 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten!(2) # => [0, 1, 2, 3, 4, 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten!(3) # => [0, 1, 2, 3, 4, 5]
+ * [0, 1, 2].flatten!(1) # => nil
+ *
+ * With no argument, a +nil+ argument, or with negative argument +level+, flattens all levels:
+ *
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten! # => [0, 1, 2, 3, 4, 5]
+ * [0, 1, 2].flatten! # => nil
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten!(-1) # => [0, 1, 2, 3, 4, 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten!(-2) # => [0, 1, 2, 3, 4, 5]
+ * [0, 1, 2].flatten!(-1) # => nil
*
- * a = [ 1, 2, [3, [4, 5] ] ]
- * a.flatten! #=> [1, 2, 3, 4, 5]
- * a.flatten! #=> nil
- * a #=> [1, 2, 3, 4, 5]
- * a = [ 1, 2, [3, [4, 5] ] ]
- * a.flatten!(1) #=> [1, 2, 3, [4, 5]]
*/
static VALUE
@@ -5382,7 +6654,7 @@ rb_ary_flatten_bang(int argc, VALUE *argv, VALUE ary)
result = flatten(ary, level);
if (result == ary) {
- return Qnil;
+ return Qnil;
}
if (!(mod = ARY_EMBED_P(result))) rb_obj_freeze(result);
rb_ary_replace(ary, result);
@@ -5393,24 +6665,35 @@ rb_ary_flatten_bang(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.flatten -> new_ary
- * ary.flatten(level) -> new_ary
- *
- * Returns a new array that is a one-dimensional flattening of +self+
- * (recursively).
- *
- * That is, for every element that is an array, extract its elements into
- * the new array.
- *
- * The optional +level+ argument determines the level of recursion to
- * flatten.
+ * array.flatten -> new_array
+ * array.flatten(level) -> new_array
+ *
+ * Returns a new \Array that is a recursive flattening of +self+:
+ * - Each non-Array element is unchanged.
+ * - Each \Array is replaced by its individual elements.
+ *
+ * With non-negative \Integer argument +level+, flattens recursively through +level+ levels:
+ *
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten(0) # => [0, [1, [2, 3], 4], 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten(1) # => [0, 1, [2, 3], 4, 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten(2) # => [0, 1, 2, 3, 4, 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten(3) # => [0, 1, 2, 3, 4, 5]
+ *
+ * With no argument, a +nil+ argument, or with negative argument +level+, flattens all levels:
+ *
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten # => [0, 1, 2, 3, 4, 5]
+ * [0, 1, 2].flatten # => [0, 1, 2]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten(-1) # => [0, 1, 2, 3, 4, 5]
+ * a = [ 0, [ 1, [2, 3], 4 ], 5 ]
+ * a.flatten(-2) # => [0, 1, 2, 3, 4, 5]
+ * [0, 1, 2].flatten(-1) # => [0, 1, 2]
*
- * s = [ 1, 2, 3 ] #=> [1, 2, 3]
- * t = [ 4, 5, 6, [7, 8] ] #=> [4, 5, 6, [7, 8]]
- * a = [ s, t, 9, 10 ] #=> [[1, 2, 3], [4, 5, 6, [7, 8]], 9, 10]
- * a.flatten #=> [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- * a = [ 1, 2, [3, [4, 5] ] ]
- * a.flatten(1) #=> [1, 2, 3, [4, 5]]
*/
static VALUE
@@ -5442,16 +6725,16 @@ rb_ary_shuffle_bang(rb_execution_context_t *ec, VALUE ary, VALUE randgen)
rb_ary_modify(ary);
i = len = RARRAY_LEN(ary);
RARRAY_PTR_USE(ary, ptr, {
- while (i) {
- long j = RAND_UPTO(i);
- VALUE tmp;
+ while (i) {
+ long j = RAND_UPTO(i);
+ VALUE tmp;
if (len != RARRAY_LEN(ary) || ptr != RARRAY_CONST_PTR_TRANSIENT(ary)) {
rb_raise(rb_eRuntimeError, "modified during shuffle");
- }
- tmp = ptr[--i];
- ptr[i] = ptr[j];
- ptr[j] = tmp;
- }
+ }
+ tmp = ptr[--i];
+ ptr[i] = ptr[j];
+ ptr[j] = tmp;
+ }
}); /* WB: no new reference */
return ary;
}
@@ -5465,7 +6748,7 @@ rb_ary_shuffle(rb_execution_context_t *ec, VALUE ary, VALUE randgen)
}
static VALUE
-rb_ary_sample(rb_execution_context_t *ec, VALUE ary, VALUE randgen, VALUE nv, VALUE to_array)
+ary_sample(rb_execution_context_t *ec, VALUE ary, VALUE randgen, VALUE nv, VALUE to_array)
{
VALUE result;
long n, len, i, j, k, idx[10];
@@ -5474,120 +6757,120 @@ rb_ary_sample(rb_execution_context_t *ec, VALUE ary, VALUE randgen, VALUE nv, VA
len = RARRAY_LEN(ary);
if (!to_array) {
- if (len < 2)
- i = 0;
- else
- i = RAND_UPTO(len);
+ if (len < 2)
+ i = 0;
+ else
+ i = RAND_UPTO(len);
- return rb_ary_elt(ary, i);
+ return rb_ary_elt(ary, i);
}
n = NUM2LONG(nv);
if (n < 0) rb_raise(rb_eArgError, "negative sample number");
if (n > len) n = len;
if (n <= numberof(idx)) {
- for (i = 0; i < n; ++i) {
- rnds[i] = RAND_UPTO(len - i);
- }
+ for (i = 0; i < n; ++i) {
+ rnds[i] = RAND_UPTO(len - i);
+ }
}
k = len;
len = RARRAY_LEN(ary);
if (len < k && n <= numberof(idx)) {
- for (i = 0; i < n; ++i) {
- if (rnds[i] >= len) return rb_ary_new_capa(0);
- }
+ for (i = 0; i < n; ++i) {
+ if (rnds[i] >= len) return rb_ary_new_capa(0);
+ }
}
if (n > len) n = len;
switch (n) {
case 0:
- return rb_ary_new_capa(0);
+ return rb_ary_new_capa(0);
case 1:
- i = rnds[0];
- return rb_ary_new_from_values(1, &RARRAY_AREF(ary, i));
+ i = rnds[0];
+ return rb_ary_new_from_args(1, RARRAY_AREF(ary, i));
case 2:
- i = rnds[0];
- j = rnds[1];
- if (j >= i) j++;
- return rb_ary_new_from_args(2, RARRAY_AREF(ary, i), RARRAY_AREF(ary, j));
+ i = rnds[0];
+ j = rnds[1];
+ if (j >= i) j++;
+ return rb_ary_new_from_args(2, RARRAY_AREF(ary, i), RARRAY_AREF(ary, j));
case 3:
- i = rnds[0];
- j = rnds[1];
- k = rnds[2];
- {
- long l = j, g = i;
- if (j >= i) l = i, g = ++j;
- if (k >= l && (++k >= g)) ++k;
- }
- return rb_ary_new_from_args(3, RARRAY_AREF(ary, i), RARRAY_AREF(ary, j), RARRAY_AREF(ary, k));
+ i = rnds[0];
+ j = rnds[1];
+ k = rnds[2];
+ {
+ long l = j, g = i;
+ if (j >= i) l = i, g = ++j;
+ if (k >= l && (++k >= g)) ++k;
+ }
+ return rb_ary_new_from_args(3, RARRAY_AREF(ary, i), RARRAY_AREF(ary, j), RARRAY_AREF(ary, k));
}
memo_threshold =
- len < 2560 ? len / 128 :
- len < 5120 ? len / 64 :
- len < 10240 ? len / 32 :
- len / 16;
+ len < 2560 ? len / 128 :
+ len < 5120 ? len / 64 :
+ len < 10240 ? len / 32 :
+ len / 16;
if (n <= numberof(idx)) {
- long sorted[numberof(idx)];
- sorted[0] = idx[0] = rnds[0];
- for (i=1; i<n; i++) {
- k = rnds[i];
- for (j = 0; j < i; ++j) {
- if (k < sorted[j]) break;
- ++k;
- }
- memmove(&sorted[j+1], &sorted[j], sizeof(sorted[0])*(i-j));
- sorted[j] = idx[i] = k;
- }
- result = rb_ary_new_capa(n);
+ long sorted[numberof(idx)];
+ sorted[0] = idx[0] = rnds[0];
+ for (i=1; i<n; i++) {
+ k = rnds[i];
+ for (j = 0; j < i; ++j) {
+ if (k < sorted[j]) break;
+ ++k;
+ }
+ memmove(&sorted[j+1], &sorted[j], sizeof(sorted[0])*(i-j));
+ sorted[j] = idx[i] = k;
+ }
+ result = rb_ary_new_capa(n);
RARRAY_PTR_USE_TRANSIENT(result, ptr_result, {
- for (i=0; i<n; i++) {
- ptr_result[i] = RARRAY_AREF(ary, idx[i]);
- }
- });
+ for (i=0; i<n; i++) {
+ ptr_result[i] = RARRAY_AREF(ary, idx[i]);
+ }
+ });
}
else if (n <= memo_threshold / 2) {
- long max_idx = 0;
+ long max_idx = 0;
#undef RUBY_UNTYPED_DATA_WARNING
#define RUBY_UNTYPED_DATA_WARNING 0
- VALUE vmemo = Data_Wrap_Struct(0, 0, st_free_table, 0);
- st_table *memo = st_init_numtable_with_size(n);
- DATA_PTR(vmemo) = memo;
- result = rb_ary_new_capa(n);
- RARRAY_PTR_USE(result, ptr_result, {
- for (i=0; i<n; i++) {
- long r = RAND_UPTO(len-i) + i;
- ptr_result[i] = r;
- if (r > max_idx) max_idx = r;
- }
- len = RARRAY_LEN(ary);
- if (len <= max_idx) n = 0;
- else if (n > len) n = len;
+ VALUE vmemo = Data_Wrap_Struct(0, 0, st_free_table, 0);
+ st_table *memo = st_init_numtable_with_size(n);
+ DATA_PTR(vmemo) = memo;
+ result = rb_ary_new_capa(n);
+ RARRAY_PTR_USE(result, ptr_result, {
+ for (i=0; i<n; i++) {
+ long r = RAND_UPTO(len-i) + i;
+ ptr_result[i] = r;
+ if (r > max_idx) max_idx = r;
+ }
+ len = RARRAY_LEN(ary);
+ if (len <= max_idx) n = 0;
+ else if (n > len) n = len;
RARRAY_PTR_USE_TRANSIENT(ary, ptr_ary, {
- for (i=0; i<n; i++) {
- long j2 = j = ptr_result[i];
- long i2 = i;
- st_data_t value;
- if (st_lookup(memo, (st_data_t)i, &value)) i2 = (long)value;
- if (st_lookup(memo, (st_data_t)j, &value)) j2 = (long)value;
- st_insert(memo, (st_data_t)j, (st_data_t)i2);
- ptr_result[i] = ptr_ary[j2];
- }
- });
- });
- DATA_PTR(vmemo) = 0;
- st_free_table(memo);
+ for (i=0; i<n; i++) {
+ long j2 = j = ptr_result[i];
+ long i2 = i;
+ st_data_t value;
+ if (st_lookup(memo, (st_data_t)i, &value)) i2 = (long)value;
+ if (st_lookup(memo, (st_data_t)j, &value)) j2 = (long)value;
+ st_insert(memo, (st_data_t)j, (st_data_t)i2);
+ ptr_result[i] = ptr_ary[j2];
+ }
+ });
+ });
+ DATA_PTR(vmemo) = 0;
+ st_free_table(memo);
}
else {
- result = rb_ary_dup(ary);
- RBASIC_CLEAR_CLASS(result);
- RB_GC_GUARD(ary);
- RARRAY_PTR_USE(result, ptr_result, {
- for (i=0; i<n; i++) {
- j = RAND_UPTO(len-i) + i;
- nv = ptr_result[j];
- ptr_result[j] = ptr_result[i];
- ptr_result[i] = nv;
- }
- });
- RBASIC_SET_CLASS_RAW(result, rb_cArray);
+ result = rb_ary_dup(ary);
+ RBASIC_CLEAR_CLASS(result);
+ RB_GC_GUARD(ary);
+ RARRAY_PTR_USE(result, ptr_result, {
+ for (i=0; i<n; i++) {
+ j = RAND_UPTO(len-i) + i;
+ nv = ptr_result[j];
+ ptr_result[j] = ptr_result[i];
+ ptr_result[i] = nv;
+ }
+ });
+ RBASIC_SET_CLASS_RAW(result, rb_cArray);
}
ARY_SET_LEN(result, n);
@@ -5595,15 +6878,21 @@ rb_ary_sample(rb_execution_context_t *ec, VALUE ary, VALUE randgen, VALUE nv, VA
}
static VALUE
+ary_sample0(rb_execution_context_t *ec, VALUE ary)
+{
+ return ary_sample(ec, ary, rb_cRandom, Qfalse, Qfalse);
+}
+
+static VALUE
rb_ary_cycle_size(VALUE self, VALUE args, VALUE eobj)
{
long mul;
VALUE n = Qnil;
if (args && (RARRAY_LEN(args) > 0)) {
- n = RARRAY_AREF(args, 0);
+ n = RARRAY_AREF(args, 0);
}
if (RARRAY_LEN(self) == 0) return INT2FIX(0);
- if (n == Qnil) return DBL2NUM(HUGE_VAL);
+ if (NIL_P(n)) return DBL2NUM(HUGE_VAL);
mul = NUM2LONG(n);
if (mul <= 0) return INT2FIX(0);
n = LONG2FIX(mul);
@@ -5612,24 +6901,37 @@ rb_ary_cycle_size(VALUE self, VALUE args, VALUE eobj)
/*
* call-seq:
- * ary.cycle(n=nil) {|obj| block} -> nil
- * ary.cycle(n=nil) -> Enumerator
+ * array.cycle {|element| ... } -> nil
+ * array.cycle(count) {|element| ... } -> nil
+ * array.cycle -> new_enumerator
+ * array.cycle(count) -> new_enumerator
+ *
+ * When called with positive \Integer argument +count+ and a block,
+ * calls the block with each element, then does so again,
+ * until it has done so +count+ times; returns +nil+:
+ *
+ * output = []
+ * [0, 1].cycle(2) {|element| output.push(element) } # => nil
+ * output # => [0, 1, 0, 1]
*
- * Calls the given block for each element +n+ times or forever if +nil+ is
- * given.
+ * If +count+ is zero or negative, does not call the block:
*
- * Does nothing if a non-positive number is given or the array is empty.
+ * [0, 1].cycle(0) {|element| fail 'Cannot happen' } # => nil
+ * [0, 1].cycle(-1) {|element| fail 'Cannot happen' } # => nil
*
- * Returns +nil+ if the loop has finished without getting interrupted.
+ * When a block is given, and argument is omitted or +nil+, cycles forever:
*
- * If no block is given, an Enumerator is returned instead.
+ * # Prints 0 and 1 forever.
+ * [0, 1].cycle {|element| puts element }
+ * [0, 1].cycle(nil) {|element| puts element }
*
- * a = ["a", "b", "c"]
- * a.cycle {|x| puts x} # print, a, b, c, a, b, c,.. forever.
- * a.cycle(2) {|x| puts x} # print, a, b, c, a, b, c.
+ * When no block is given, returns a new \Enumerator:
+ *
+ * [0, 1].cycle(2) # => #<Enumerator: [0, 1]:cycle(2)>
+ * [0, 1].cycle # => # => #<Enumerator: [0, 1]:cycle>
+ * [0, 1].cycle.first(5) # => [0, 1, 0, 1, 0]
*
*/
-
static VALUE
rb_ary_cycle(int argc, VALUE *argv, VALUE ary)
{
@@ -5654,9 +6956,6 @@ rb_ary_cycle(int argc, VALUE *argv, VALUE ary)
return Qnil;
}
-#define tmpary(n) rb_ary_tmp_new(n)
-#define tmpary_discard(a) (ary_discard(a), RBASIC_SET_CLASS_RAW(a, rb_cArray))
-
/*
* Build a ruby array of the corresponding values and yield it to the
* associated block.
@@ -5692,52 +6991,52 @@ permute0(const long n, const long r, long *const p, char *const used, const VALU
long i = 0, index = 0;
for (;;) {
- const char *const unused = memchr(&used[i], 0, n-i);
- if (!unused) {
- if (!index) break;
- i = p[--index]; /* pop index */
- used[i++] = 0; /* index unused */
- }
- else {
- i = unused - used;
- p[index] = i;
- used[i] = 1; /* mark index used */
- ++index;
- if (index < r-1) { /* if not done yet */
- p[index] = i = 0;
- continue;
- }
- for (i = 0; i < n; ++i) {
- if (used[i]) continue;
- p[index] = i;
- if (!yield_indexed_values(values, r, p)) {
- rb_raise(rb_eRuntimeError, "permute reentered");
- }
- }
- i = p[--index]; /* pop index */
- used[i] = 0; /* index unused */
- p[index] = ++i;
- }
+ const char *const unused = memchr(&used[i], 0, n-i);
+ if (!unused) {
+ if (!index) break;
+ i = p[--index]; /* pop index */
+ used[i++] = 0; /* index unused */
+ }
+ else {
+ i = unused - used;
+ p[index] = i;
+ used[i] = 1; /* mark index used */
+ ++index;
+ if (index < r-1) { /* if not done yet */
+ p[index] = i = 0;
+ continue;
+ }
+ for (i = 0; i < n; ++i) {
+ if (used[i]) continue;
+ p[index] = i;
+ if (!yield_indexed_values(values, r, p)) {
+ rb_raise(rb_eRuntimeError, "permute reentered");
+ }
+ }
+ i = p[--index]; /* pop index */
+ used[i] = 0; /* index unused */
+ p[index] = ++i;
+ }
}
}
/*
* Returns the product of from, from-1, ..., from - how_many + 1.
- * http://en.wikipedia.org/wiki/Pochhammer_symbol
+ * https://en.wikipedia.org/wiki/Pochhammer_symbol
*/
static VALUE
descending_factorial(long from, long how_many)
{
VALUE cnt;
if (how_many > 0) {
- cnt = LONG2FIX(from);
- while (--how_many > 0) {
- long v = --from;
- cnt = rb_int_mul(cnt, LONG2FIX(v));
- }
+ cnt = LONG2FIX(from);
+ while (--how_many > 0) {
+ long v = --from;
+ cnt = rb_int_mul(cnt, LONG2FIX(v));
+ }
}
else {
- cnt = LONG2FIX(how_many == 0);
+ cnt = LONG2FIX(how_many == 0);
}
return cnt;
}
@@ -5748,18 +7047,18 @@ binomial_coefficient(long comb, long size)
VALUE r;
long i;
if (comb > size-comb) {
- comb = size-comb;
+ comb = size-comb;
}
if (comb < 0) {
- return LONG2FIX(0);
+ return LONG2FIX(0);
}
else if (comb == 0) {
- return LONG2FIX(1);
+ return LONG2FIX(1);
}
r = LONG2FIX(size);
for (i = 1; i < comb; ++i) {
- r = rb_int_mul(r, LONG2FIX(size - i));
- r = rb_int_idiv(r, LONG2FIX(i + 1));
+ r = rb_int_mul(r, LONG2FIX(size - i));
+ r = rb_int_idiv(r, LONG2FIX(i + 1));
}
return r;
}
@@ -5775,30 +7074,82 @@ rb_ary_permutation_size(VALUE ary, VALUE args, VALUE eobj)
/*
* call-seq:
- * ary.permutation {|p| block} -> ary
- * ary.permutation -> Enumerator
- * ary.permutation(n) {|p| block} -> ary
- * ary.permutation(n) -> Enumerator
+ * array.permutation {|element| ... } -> self
+ * array.permutation(n) {|element| ... } -> self
+ * array.permutation -> new_enumerator
+ * array.permutation(n) -> new_enumerator
+ *
+ * When invoked with a block, yield all permutations of elements of +self+; returns +self+.
+ * The order of permutations is indeterminate.
+ *
+ * When a block and an in-range positive \Integer argument +n+ (<tt>0 < n <= self.size</tt>)
+ * are given, calls the block with all +n+-tuple permutations of +self+.
+ *
+ * Example:
+ *
+ * a = [0, 1, 2]
+ * a.permutation(2) {|permutation| p permutation }
+ *
+ * Output:
*
- * When invoked with a block, yield all permutations of length +n+ of the
- * elements of the array, then return the array itself.
+ * [0, 1]
+ * [0, 2]
+ * [1, 0]
+ * [1, 2]
+ * [2, 0]
+ * [2, 1]
*
- * If +n+ is not specified, yield all permutations of all elements.
+ * Another example:
*
- * The implementation makes no guarantees about the order in which the
- * permutations are yielded.
+ * a = [0, 1, 2]
+ * a.permutation(3) {|permutation| p permutation }
*
- * If no block is given, an Enumerator is returned instead.
+ * Output:
*
- * Examples:
+ * [0, 1, 2]
+ * [0, 2, 1]
+ * [1, 0, 2]
+ * [1, 2, 0]
+ * [2, 0, 1]
+ * [2, 1, 0]
+ *
+ * When +n+ is zero, calls the block once with a new empty \Array:
+ *
+ * a = [0, 1, 2]
+ * a.permutation(0) {|permutation| p permutation }
+ *
+ * Output:
+ *
+ * []
+ *
+ * When +n+ is out of range (negative or larger than <tt>self.size</tt>),
+ * does not call the block:
+ *
+ * a = [0, 1, 2]
+ * a.permutation(-1) {|permutation| fail 'Cannot happen' }
+ * a.permutation(4) {|permutation| fail 'Cannot happen' }
+ *
+ * When a block given but no argument,
+ * behaves the same as <tt>a.permutation(a.size)</tt>:
+ *
+ * a = [0, 1, 2]
+ * a.permutation {|permutation| p permutation }
+ *
+ * Output:
+ *
+ * [0, 1, 2]
+ * [0, 2, 1]
+ * [1, 0, 2]
+ * [1, 2, 0]
+ * [2, 0, 1]
+ * [2, 1, 0]
+ *
+ * Returns a new \Enumerator if no block given:
+ *
+ * a = [0, 1, 2]
+ * a.permutation # => #<Enumerator: [0, 1, 2]:permutation>
+ * a.permutation(2) # => #<Enumerator: [0, 1, 2]:permutation(2)>
*
- * a = [1, 2, 3]
- * a.permutation.to_a #=> [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]
- * a.permutation(1).to_a #=> [[1],[2],[3]]
- * a.permutation(2).to_a #=> [[1,2],[1,3],[2,1],[2,3],[3,1],[3,2]]
- * a.permutation(3).to_a #=> [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]
- * a.permutation(0).to_a #=> [[]] # one permutation of length 0
- * a.permutation(4).to_a #=> [] # no permutations of length 4
*/
static VALUE
@@ -5813,28 +7164,28 @@ rb_ary_permutation(int argc, VALUE *argv, VALUE ary)
r = NUM2LONG(argv[0]); /* Permutation size from argument */
if (r < 0 || n < r) {
- /* no permutations: yield nothing */
+ /* no permutations: yield nothing */
}
else if (r == 0) { /* exactly one permutation: the zero-length array */
- rb_yield(rb_ary_new2(0));
+ rb_yield(rb_ary_new2(0));
}
else if (r == 1) { /* this is a special, easy case */
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
+ }
}
else { /* this is the general case */
- volatile VALUE t0;
- long *p = ALLOCV_N(long, t0, r+roomof(n, sizeof(long)));
- char *used = (char*)(p + r);
- VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
- RBASIC_CLEAR_CLASS(ary0);
+ volatile VALUE t0;
+ long *p = ALLOCV_N(long, t0, r+roomof(n, sizeof(long)));
+ char *used = (char*)(p + r);
+ VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
+ RBASIC_CLEAR_CLASS(ary0);
- MEMZERO(used, char, n); /* initialize array */
+ MEMZERO(used, char, n); /* initialize array */
- permute0(n, r, p, used, ary0); /* compute and yield permutations */
- ALLOCV_END(t0);
- RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
+ permute0(n, r, p, used, ary0); /* compute and yield permutations */
+ ALLOCV_END(t0);
+ RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
}
return ary;
}
@@ -5847,16 +7198,16 @@ combinate0(const long len, const long n, long *const stack, const VALUE values)
MEMZERO(stack+1, long, n);
stack[0] = -1;
for (;;) {
- for (lev++; lev < n; lev++) {
- stack[lev+1] = stack[lev]+1;
- }
- if (!yield_indexed_values(values, n, stack+1)) {
- rb_raise(rb_eRuntimeError, "combination reentered");
- }
- do {
- if (lev == 0) return;
- stack[lev--]++;
- } while (stack[lev+1]+n == len+lev+1);
+ for (lev++; lev < n; lev++) {
+ stack[lev+1] = stack[lev]+1;
+ }
+ if (!yield_indexed_values(values, n, stack+1)) {
+ rb_raise(rb_eRuntimeError, "combination reentered");
+ }
+ do {
+ if (lev == 0) return;
+ stack[lev--]++;
+ } while (stack[lev+1]+n == len+lev+1);
}
}
@@ -5871,26 +7222,55 @@ rb_ary_combination_size(VALUE ary, VALUE args, VALUE eobj)
/*
* call-seq:
- * ary.combination(n) {|c| block} -> ary
- * ary.combination(n) -> Enumerator
+ * array.combination(n) {|element| ... } -> self
+ * array.combination(n) -> new_enumerator
+ *
+ * Calls the block, if given, with combinations of elements of +self+;
+ * returns +self+. The order of combinations is indeterminate.
+ *
+ * When a block and an in-range positive \Integer argument +n+ (<tt>0 < n <= self.size</tt>)
+ * are given, calls the block with all +n+-tuple combinations of +self+.
+ *
+ * Example:
+ *
+ * a = [0, 1, 2]
+ * a.combination(2) {|combination| p combination }
+ *
+ * Output:
+ *
+ * [0, 1]
+ * [0, 2]
+ * [1, 2]
*
- * When invoked with a block, yields all combinations of length +n+ of elements
- * from the array and then returns the array itself.
+ * Another example:
*
- * The implementation makes no guarantees about the order in which the
- * combinations are yielded.
+ * a = [0, 1, 2]
+ * a.combination(3) {|combination| p combination }
*
- * If no block is given, an Enumerator is returned instead.
+ * Output:
*
- * Examples:
+ * [0, 1, 2]
*
- * a = [1, 2, 3, 4]
- * a.combination(1).to_a #=> [[1],[2],[3],[4]]
- * a.combination(2).to_a #=> [[1,2],[1,3],[1,4],[2,3],[2,4],[3,4]]
- * a.combination(3).to_a #=> [[1,2,3],[1,2,4],[1,3,4],[2,3,4]]
- * a.combination(4).to_a #=> [[1,2,3,4]]
- * a.combination(0).to_a #=> [[]] # one combination of length 0
- * a.combination(5).to_a #=> [] # no combinations of length 5
+ * When +n+ is zero, calls the block once with a new empty \Array:
+ *
+ * a = [0, 1, 2]
+ * a1 = a.combination(0) {|combination| p combination }
+ *
+ * Output:
+ *
+ * []
+ *
+ * When +n+ is out of range (negative or larger than <tt>self.size</tt>),
+ * does not call the block:
+ *
+ * a = [0, 1, 2]
+ * a.combination(-1) {|combination| fail 'Cannot happen' }
+ * a.combination(4) {|combination| fail 'Cannot happen' }
+ *
+ * Returns a new \Enumerator if no block given:
+ *
+ * a = [0, 1, 2]
+ * a.combination(2) # => #<Enumerator: [0, 1, 2]:combination(2)>
*
*/
@@ -5903,25 +7283,25 @@ rb_ary_combination(VALUE ary, VALUE num)
RETURN_SIZED_ENUMERATOR(ary, 1, &num, rb_ary_combination_size);
len = RARRAY_LEN(ary);
if (n < 0 || len < n) {
- /* yield nothing */
+ /* yield nothing */
}
else if (n == 0) {
- rb_yield(rb_ary_new2(0));
+ rb_yield(rb_ary_new2(0));
}
else if (n == 1) {
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
+ }
}
else {
- VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
- volatile VALUE t0;
- long *stack = ALLOCV_N(long, t0, n+1);
+ VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
+ volatile VALUE t0;
+ long *stack = ALLOCV_N(long, t0, n+1);
- RBASIC_CLEAR_CLASS(ary0);
- combinate0(len, n, stack, ary0);
- ALLOCV_END(t0);
- RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
+ RBASIC_CLEAR_CLASS(ary0);
+ combinate0(len, n, stack, ary0);
+ ALLOCV_END(t0);
+ RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
}
return ary;
}
@@ -5945,19 +7325,19 @@ rpermute0(const long n, const long r, long *const p, const VALUE values)
p[index] = i;
for (;;) {
- if (++index < r-1) {
- p[index] = i = 0;
- continue;
- }
- for (i = 0; i < n; ++i) {
- p[index] = i;
- if (!yield_indexed_values(values, r, p)) {
- rb_raise(rb_eRuntimeError, "repeated permute reentered");
- }
- }
- do {
- if (index <= 0) return;
- } while ((i = ++p[--index]) >= n);
+ if (++index < r-1) {
+ p[index] = i = 0;
+ continue;
+ }
+ for (i = 0; i < n; ++i) {
+ p[index] = i;
+ if (!yield_indexed_values(values, r, p)) {
+ rb_raise(rb_eRuntimeError, "repeated permute reentered");
+ }
+ }
+ do {
+ if (index <= 0) return;
+ } while ((i = ++p[--index]) >= n);
}
}
@@ -5968,37 +7348,79 @@ rb_ary_repeated_permutation_size(VALUE ary, VALUE args, VALUE eobj)
long k = NUM2LONG(RARRAY_AREF(args, 0));
if (k < 0) {
- return LONG2FIX(0);
+ return LONG2FIX(0);
}
if (n <= 0) {
- return LONG2FIX(!k);
+ return LONG2FIX(!k);
}
return rb_int_positive_pow(n, (unsigned long)k);
}
/*
* call-seq:
- * ary.repeated_permutation(n) {|p| block} -> ary
- * ary.repeated_permutation(n) -> Enumerator
+ * array.repeated_permutation(n) {|permutation| ... } -> self
+ * array.repeated_permutation(n) -> new_enumerator
+ *
+ * Calls the block with each repeated permutation of length +n+ of the elements of +self+;
+ * each permutation is an \Array;
+ * returns +self+. The order of the permutations is indeterminate.
+ *
+ * When a block and a positive \Integer argument +n+ are given, calls the block with each
+ * +n+-tuple repeated permutation of the elements of +self+.
+ * The number of permutations is <tt>self.size**n</tt>.
+ *
+ * +n+ = 1:
+ *
+ * a = [0, 1, 2]
+ * a.repeated_permutation(1) {|permutation| p permutation }
+ *
+ * Output:
+ *
+ * [0]
+ * [1]
+ * [2]
+ *
+ * +n+ = 2:
+ *
+ * a.repeated_permutation(2) {|permutation| p permutation }
+ *
+ * Output:
+ *
+ * [0, 0]
+ * [0, 1]
+ * [0, 2]
+ * [1, 0]
+ * [1, 1]
+ * [1, 2]
+ * [2, 0]
+ * [2, 1]
+ * [2, 2]
+ *
+ * If +n+ is zero, calls the block once with an empty \Array.
*
- * When invoked with a block, yield all repeated permutations of length +n+ of
- * the elements of the array, then return the array itself.
+ * If +n+ is negative, does not call the block:
*
- * The implementation makes no guarantees about the order in which the repeated
- * permutations are yielded.
+ * a.repeated_permutation(-1) {|permutation| fail 'Cannot happen' }
*
- * If no block is given, an Enumerator is returned instead.
+ * Returns a new \Enumerator if no block given:
*
- * Examples:
+ * a = [0, 1, 2]
+ * a.repeated_permutation(2) # => #<Enumerator: [0, 1, 2]:permutation(2)>
+ *
+ * Using Enumerators, it's convenient to show the permutations and counts
+ * for some values of +n+:
+ *
+ * e = a.repeated_permutation(0)
+ * e.size # => 1
+ * e.to_a # => [[]]
+ * e = a.repeated_permutation(1)
+ * e.size # => 3
+ * e.to_a # => [[0], [1], [2]]
+ * e = a.repeated_permutation(2)
+ * e.size # => 9
+ * e.to_a # => [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
*
- * a = [1, 2]
- * a.repeated_permutation(1).to_a #=> [[1], [2]]
- * a.repeated_permutation(2).to_a #=> [[1,1],[1,2],[2,1],[2,2]]
- * a.repeated_permutation(3).to_a #=> [[1,1,1],[1,1,2],[1,2,1],[1,2,2],
- * # [2,1,1],[2,1,2],[2,2,1],[2,2,2]]
- * a.repeated_permutation(0).to_a #=> [[]] # one permutation of length 0
*/
-
static VALUE
rb_ary_repeated_permutation(VALUE ary, VALUE num)
{
@@ -6009,25 +7431,25 @@ rb_ary_repeated_permutation(VALUE ary, VALUE num)
r = NUM2LONG(num); /* Permutation size from argument */
if (r < 0) {
- /* no permutations: yield nothing */
+ /* no permutations: yield nothing */
}
else if (r == 0) { /* exactly one permutation: the zero-length array */
- rb_yield(rb_ary_new2(0));
+ rb_yield(rb_ary_new2(0));
}
else if (r == 1) { /* this is a special, easy case */
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
+ }
}
else { /* this is the general case */
- volatile VALUE t0;
- long *p = ALLOCV_N(long, t0, r);
- VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
- RBASIC_CLEAR_CLASS(ary0);
+ volatile VALUE t0;
+ long *p = ALLOCV_N(long, t0, r);
+ VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
+ RBASIC_CLEAR_CLASS(ary0);
- rpermute0(n, r, p, ary0); /* compute and yield repeated permutations */
- ALLOCV_END(t0);
- RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
+ rpermute0(n, r, p, ary0); /* compute and yield repeated permutations */
+ ALLOCV_END(t0);
+ RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
}
return ary;
}
@@ -6039,19 +7461,19 @@ rcombinate0(const long n, const long r, long *const p, const long rest, const VA
p[index] = i;
for (;;) {
- if (++index < r-1) {
- p[index] = i;
- continue;
- }
- for (; i < n; ++i) {
- p[index] = i;
- if (!yield_indexed_values(values, r, p)) {
- rb_raise(rb_eRuntimeError, "repeated combination reentered");
- }
- }
- do {
- if (index <= 0) return;
- } while ((i = ++p[--index]) >= n);
+ if (++index < r-1) {
+ p[index] = i;
+ continue;
+ }
+ for (; i < n; ++i) {
+ p[index] = i;
+ if (!yield_indexed_values(values, r, p)) {
+ rb_raise(rb_eRuntimeError, "repeated combination reentered");
+ }
+ }
+ do {
+ if (index <= 0) return;
+ } while ((i = ++p[--index]) >= n);
}
}
@@ -6061,35 +7483,71 @@ rb_ary_repeated_combination_size(VALUE ary, VALUE args, VALUE eobj)
long n = RARRAY_LEN(ary);
long k = NUM2LONG(RARRAY_AREF(args, 0));
if (k == 0) {
- return LONG2FIX(1);
+ return LONG2FIX(1);
}
return binomial_coefficient(k, n + k - 1);
}
/*
* call-seq:
- * ary.repeated_combination(n) {|c| block} -> ary
- * ary.repeated_combination(n) -> Enumerator
+ * array.repeated_combination(n) {|combination| ... } -> self
+ * array.repeated_combination(n) -> new_enumerator
+ *
+ * Calls the block with each repeated combination of length +n+ of the elements of +self+;
+ * each combination is an \Array;
+ * returns +self+. The order of the combinations is indeterminate.
+ *
+ * When a block and a positive \Integer argument +n+ are given, calls the block with each
+ * +n+-tuple repeated combination of the elements of +self+.
+ * The number of combinations is <tt>(n+1)(n+2)/2</tt>.
+ *
+ * +n+ = 1:
+ *
+ * a = [0, 1, 2]
+ * a.repeated_combination(1) {|combination| p combination }
*
- * When invoked with a block, yields all repeated combinations of length +n+ of
- * elements from the array and then returns the array itself.
+ * Output:
*
- * The implementation makes no guarantees about the order in which the repeated
- * combinations are yielded.
+ * [0]
+ * [1]
+ * [2]
*
- * If no block is given, an Enumerator is returned instead.
+ * +n+ = 2:
*
- * Examples:
+ * a.repeated_combination(2) {|combination| p combination }
*
- * a = [1, 2, 3]
- * a.repeated_combination(1).to_a #=> [[1], [2], [3]]
- * a.repeated_combination(2).to_a #=> [[1,1],[1,2],[1,3],[2,2],[2,3],[3,3]]
- * a.repeated_combination(3).to_a #=> [[1,1,1],[1,1,2],[1,1,3],[1,2,2],[1,2,3],
- * # [1,3,3],[2,2,2],[2,2,3],[2,3,3],[3,3,3]]
- * a.repeated_combination(4).to_a #=> [[1,1,1,1],[1,1,1,2],[1,1,1,3],[1,1,2,2],[1,1,2,3],
- * # [1,1,3,3],[1,2,2,2],[1,2,2,3],[1,2,3,3],[1,3,3,3],
- * # [2,2,2,2],[2,2,2,3],[2,2,3,3],[2,3,3,3],[3,3,3,3]]
- * a.repeated_combination(0).to_a #=> [[]] # one combination of length 0
+ * Output:
+ *
+ * [0, 0]
+ * [0, 1]
+ * [0, 2]
+ * [1, 1]
+ * [1, 2]
+ * [2, 2]
+ *
+ * If +n+ is zero, calls the block once with an empty \Array.
+ *
+ * If +n+ is negative, does not call the block:
+ *
+ * a.repeated_combination(-1) {|combination| fail 'Cannot happen' }
+ *
+ * Returns a new \Enumerator if no block given:
+ *
+ * a = [0, 1, 2]
+ * a.repeated_combination(2) # => #<Enumerator: [0, 1, 2]:combination(2)>
+ *
+ * Using Enumerators, it's convenient to show the combinations and counts
+ * for some values of +n+:
+ *
+ * e = a.repeated_combination(0)
+ * e.size # => 1
+ * e.to_a # => [[]]
+ * e = a.repeated_combination(1)
+ * e.size # => 3
+ * e.to_a # => [[0], [1], [2]]
+ * e = a.repeated_combination(2)
+ * e.size # => 6
+ * e.to_a # => [[0, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 2]]
*
*/
@@ -6102,58 +7560,97 @@ rb_ary_repeated_combination(VALUE ary, VALUE num)
RETURN_SIZED_ENUMERATOR(ary, 1, &num, rb_ary_repeated_combination_size); /* Return enumerator if no block */
len = RARRAY_LEN(ary);
if (n < 0) {
- /* yield nothing */
+ /* yield nothing */
}
else if (n == 0) {
- rb_yield(rb_ary_new2(0));
+ rb_yield(rb_ary_new2(0));
}
else if (n == 1) {
- for (i = 0; i < RARRAY_LEN(ary); i++) {
- rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
- }
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
+ rb_yield(rb_ary_new3(1, RARRAY_AREF(ary, i)));
+ }
}
else if (len == 0) {
- /* yield nothing */
+ /* yield nothing */
}
else {
- volatile VALUE t0;
- long *p = ALLOCV_N(long, t0, n);
- VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
- RBASIC_CLEAR_CLASS(ary0);
+ volatile VALUE t0;
+ long *p = ALLOCV_N(long, t0, n);
+ VALUE ary0 = ary_make_shared_copy(ary); /* private defensive copy of ary */
+ RBASIC_CLEAR_CLASS(ary0);
- rcombinate0(len, n, p, n, ary0); /* compute and yield repeated combinations */
- ALLOCV_END(t0);
- RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
+ rcombinate0(len, n, p, n, ary0); /* compute and yield repeated combinations */
+ ALLOCV_END(t0);
+ RBASIC_SET_CLASS_RAW(ary0, rb_cArray);
}
return ary;
}
/*
* call-seq:
- * ary.product(other_ary, ...) -> new_ary
- * ary.product(other_ary, ...) {|p| block} -> ary
+ * array.product(*other_arrays) -> new_array
+ * array.product(*other_arrays) {|combination| ... } -> self
+ *
+ * Computes and returns or yields all combinations of elements from all the Arrays,
+ * including both +self+ and +other_arrays+:
*
- * Returns an array of all combinations of elements from all arrays.
+ * - The number of combinations is the product of the sizes of all the arrays,
+ * including both +self+ and +other_arrays+.
+ * - The order of the returned combinations is indeterminate.
*
- * The length of the returned array is the product of the length of +self+ and
- * the argument arrays.
+ * When no block is given, returns the combinations as an \Array of Arrays:
*
- * If given a block, #product will yield all combinations and return +self+
- * instead.
+ * a = [0, 1, 2]
+ * a1 = [3, 4]
+ * a2 = [5, 6]
+ * p = a.product(a1)
+ * p.size # => 6 # a.size * a1.size
+ * p # => [[0, 3], [0, 4], [1, 3], [1, 4], [2, 3], [2, 4]]
+ * p = a.product(a1, a2)
+ * p.size # => 12 # a.size * a1.size * a2.size
+ * p # => [[0, 3, 5], [0, 3, 6], [0, 4, 5], [0, 4, 6], [1, 3, 5], [1, 3, 6], [1, 4, 5], [1, 4, 6], [2, 3, 5], [2, 3, 6], [2, 4, 5], [2, 4, 6]]
+ *
+ * If any argument is an empty \Array, returns an empty \Array.
+ *
+ * If no argument is given, returns an \Array of 1-element Arrays,
+ * each containing an element of +self+:
+ *
+ * a.product # => [[0], [1], [2]]
+ *
+ * When a block is given, yields each combination as an \Array; returns +self+:
+ *
+ * a.product(a1) {|combination| p combination }
+ *
+ * Output:
+ *
+ * [0, 3]
+ * [0, 4]
+ * [1, 3]
+ * [1, 4]
+ * [2, 3]
+ * [2, 4]
+ *
+ * If any argument is an empty \Array, does not call the block:
+ *
+ * a.product(a1, a2, []) {|combination| fail 'Cannot happen' }
+ *
+ * If no argument is given, yields each element of +self+ as a 1-element \Array:
+ *
+ * a.product {|combination| p combination }
+ *
+ * Output:
+ *
+ * [0]
+ * [1]
+ * [2]
*
- * [1,2,3].product([4,5]) #=> [[1,4],[1,5],[2,4],[2,5],[3,4],[3,5]]
- * [1,2].product([1,2]) #=> [[1,1],[1,2],[2,1],[2,2]]
- * [1,2].product([3,4],[5,6]) #=> [[1,3,5],[1,3,6],[1,4,5],[1,4,6],
- * # [2,3,5],[2,3,6],[2,4,5],[2,4,6]]
- * [1,2].product() #=> [[1],[2]]
- * [1,2].product([]) #=> []
*/
static VALUE
rb_ary_product(int argc, VALUE *argv, VALUE ary)
{
int n = argc+1; /* How many arrays we're operating on */
- volatile VALUE t0 = tmpary(n);
+ volatile VALUE t0 = rb_ary_hidden_new(n);
volatile VALUE t1 = Qundef;
VALUE *arrays = RARRAY_PTR(t0); /* The arrays we're computing the product of */
int *counters = ALLOCV_N(int, t1, n); /* The current position in each one */
@@ -6174,64 +7671,64 @@ rb_ary_product(int argc, VALUE *argv, VALUE ary)
/* Otherwise, allocate and fill in an array of results */
if (rb_block_given_p()) {
- /* Make defensive copies of arrays; exit if any is empty */
- for (i = 0; i < n; i++) {
- if (RARRAY_LEN(arrays[i]) == 0) goto done;
- arrays[i] = ary_make_shared_copy(arrays[i]);
- }
+ /* Make defensive copies of arrays; exit if any is empty */
+ for (i = 0; i < n; i++) {
+ if (RARRAY_LEN(arrays[i]) == 0) goto done;
+ arrays[i] = ary_make_shared_copy(arrays[i]);
+ }
}
else {
- /* Compute the length of the result array; return [] if any is empty */
- for (i = 0; i < n; i++) {
- long k = RARRAY_LEN(arrays[i]);
- if (k == 0) {
- result = rb_ary_new2(0);
- goto done;
- }
+ /* Compute the length of the result array; return [] if any is empty */
+ for (i = 0; i < n; i++) {
+ long k = RARRAY_LEN(arrays[i]);
+ if (k == 0) {
+ result = rb_ary_new2(0);
+ goto done;
+ }
if (MUL_OVERFLOW_LONG_P(resultlen, k))
- rb_raise(rb_eRangeError, "too big to product");
- resultlen *= k;
- }
- result = rb_ary_new2(resultlen);
+ rb_raise(rb_eRangeError, "too big to product");
+ resultlen *= k;
+ }
+ result = rb_ary_new2(resultlen);
}
for (;;) {
- int m;
- /* fill in one subarray */
- VALUE subarray = rb_ary_new2(n);
- for (j = 0; j < n; j++) {
- rb_ary_push(subarray, rb_ary_entry(arrays[j], counters[j]));
- }
-
- /* put it on the result array */
- if (NIL_P(result)) {
- FL_SET(t0, FL_USER5);
- rb_yield(subarray);
- if (! FL_TEST(t0, FL_USER5)) {
- rb_raise(rb_eRuntimeError, "product reentered");
- }
- else {
- FL_UNSET(t0, FL_USER5);
- }
- }
- else {
- rb_ary_push(result, subarray);
- }
-
- /*
- * Increment the last counter. If it overflows, reset to 0
- * and increment the one before it.
- */
- m = n-1;
- counters[m]++;
- while (counters[m] == RARRAY_LEN(arrays[m])) {
- counters[m] = 0;
- /* If the first counter overflows, we are done */
- if (--m < 0) goto done;
- counters[m]++;
- }
+ int m;
+ /* fill in one subarray */
+ VALUE subarray = rb_ary_new2(n);
+ for (j = 0; j < n; j++) {
+ rb_ary_push(subarray, rb_ary_entry(arrays[j], counters[j]));
+ }
+
+ /* put it on the result array */
+ if (NIL_P(result)) {
+ FL_SET(t0, RARRAY_SHARED_ROOT_FLAG);
+ rb_yield(subarray);
+ if (!FL_TEST(t0, RARRAY_SHARED_ROOT_FLAG)) {
+ rb_raise(rb_eRuntimeError, "product reentered");
+ }
+ else {
+ FL_UNSET(t0, RARRAY_SHARED_ROOT_FLAG);
+ }
+ }
+ else {
+ rb_ary_push(result, subarray);
+ }
+
+ /*
+ * Increment the last counter. If it overflows, reset to 0
+ * and increment the one before it.
+ */
+ m = n-1;
+ counters[m]++;
+ while (counters[m] == RARRAY_LEN(arrays[m])) {
+ counters[m] = 0;
+ /* If the first counter overflows, we are done */
+ if (--m < 0) goto done;
+ counters[m]++;
+ }
}
+
done:
- tmpary_discard(t0);
ALLOCV_END(t1);
return NIL_P(result) ? ary : result;
@@ -6239,16 +7736,19 @@ done:
/*
* call-seq:
- * ary.take(n) -> new_ary
- *
- * Returns first +n+ elements from the array.
+ * array.take(n) -> new_array
*
- * If a negative number is given, raises an ArgumentError.
+ * Returns a new \Array containing the first +n+ element of +self+,
+ * where +n+ is a non-negative \Integer;
+ * does not modify +self+.
*
- * See also Array#drop
+ * Examples:
*
- * a = [1, 2, 3, 4, 5, 0]
- * a.take(3) #=> [1, 2, 3]
+ * a = [0, 1, 2, 3, 4, 5]
+ * a.take(1) # => [0]
+ * a.take(2) # => [0, 1]
+ * a.take(50) # => [0, 1, 2, 3, 4, 5]
+ * a # => [0, 1, 2, 3, 4, 5]
*
*/
@@ -6257,25 +7757,31 @@ rb_ary_take(VALUE obj, VALUE n)
{
long len = NUM2LONG(n);
if (len < 0) {
- rb_raise(rb_eArgError, "attempt to take negative size");
+ rb_raise(rb_eArgError, "attempt to take negative size");
}
return rb_ary_subseq(obj, 0, len);
}
/*
* call-seq:
- * ary.take_while {|obj| block} -> new_ary
- * ary.take_while -> Enumerator
+ * array.take_while {|element| ... } -> new_array
+ * array.take_while -> new_enumerator
+ *
+ * Returns a new \Array containing zero or more leading elements of +self+;
+ * does not modify +self+.
*
- * Passes elements to the block until the block returns +nil+ or +false+, then
- * stops iterating and returns an array of all prior elements.
+ * With a block given, calls the block with each successive element of +self+;
+ * stops if the block returns +false+ or +nil+;
+ * returns a new \Array containing those elements for which the block returned a truthy value:
*
- * If no block is given, an Enumerator is returned instead.
+ * a = [0, 1, 2, 3, 4, 5]
+ * a.take_while {|element| element < 3 } # => [0, 1, 2]
+ * a.take_while {|element| true } # => [0, 1, 2, 3, 4, 5]
+ * a # => [0, 1, 2, 3, 4, 5]
*
- * See also Array#drop_while
+ * With no block given, returns a new \Enumerator:
*
- * a = [1, 2, 3, 4, 5, 0]
- * a.take_while {|i| i < 3} #=> [1, 2]
+ * [0, 1].take_while # => #<Enumerator: [0, 1]:take_while>
*
*/
@@ -6286,24 +7792,25 @@ rb_ary_take_while(VALUE ary)
RETURN_ENUMERATOR(ary, 0, 0);
for (i = 0; i < RARRAY_LEN(ary); i++) {
- if (!RTEST(rb_yield(RARRAY_AREF(ary, i)))) break;
+ if (!RTEST(rb_yield(RARRAY_AREF(ary, i)))) break;
}
return rb_ary_take(ary, LONG2FIX(i));
}
/*
* call-seq:
- * ary.drop(n) -> new_ary
+ * array.drop(n) -> new_array
*
- * Drops first +n+ elements from +ary+ and returns the rest of the elements in
- * an array.
+ * Returns a new \Array containing all but the first +n+ element of +self+,
+ * where +n+ is a non-negative \Integer;
+ * does not modify +self+.
*
- * If a negative number is given, raises an ArgumentError.
+ * Examples:
*
- * See also Array#take
- *
- * a = [1, 2, 3, 4, 5, 0]
- * a.drop(3) #=> [4, 5, 0]
+ * a = [0, 1, 2, 3, 4, 5]
+ * a.drop(0) # => [0, 1, 2, 3, 4, 5]
+ * a.drop(1) # => [1, 2, 3, 4, 5]
+ * a.drop(2) # => [2, 3, 4, 5]
*
*/
@@ -6313,29 +7820,32 @@ rb_ary_drop(VALUE ary, VALUE n)
VALUE result;
long pos = NUM2LONG(n);
if (pos < 0) {
- rb_raise(rb_eArgError, "attempt to drop negative size");
+ rb_raise(rb_eArgError, "attempt to drop negative size");
}
result = rb_ary_subseq(ary, pos, RARRAY_LEN(ary));
- if (result == Qnil) result = rb_ary_new();
+ if (NIL_P(result)) result = rb_ary_new();
return result;
}
/*
* call-seq:
- * ary.drop_while {|obj| block} -> new_ary
- * ary.drop_while -> Enumerator
+ * array.drop_while {|element| ... } -> new_array
+ * array.drop_while -> new_enumerator
+
+ * Returns a new \Array containing zero or more trailing elements of +self+;
+ * does not modify +self+.
*
- * Drops elements up to, but not including, the first element for which the
- * block returns +nil+ or +false+ and returns an array containing the
- * remaining elements.
+ * With a block given, calls the block with each successive element of +self+;
+ * stops if the block returns +false+ or +nil+;
+ * returns a new \Array _omitting_ those elements for which the block returned a truthy value:
*
- * If no block is given, an Enumerator is returned instead.
+ * a = [0, 1, 2, 3, 4, 5]
+ * a.drop_while {|element| element < 3 } # => [3, 4, 5]
*
- * See also Array#take_while
+ * With no block given, returns a new \Enumerator:
*
- * a = [1, 2, 3, 4, 5, 0]
- * a.drop_while {|i| i < 3 } #=> [3, 4, 5, 0]
+ * [0, 1].drop_while # => # => #<Enumerator: [0, 1]:drop_while>
*
*/
@@ -6346,17 +7856,42 @@ rb_ary_drop_while(VALUE ary)
RETURN_ENUMERATOR(ary, 0, 0);
for (i = 0; i < RARRAY_LEN(ary); i++) {
- if (!RTEST(rb_yield(RARRAY_AREF(ary, i)))) break;
+ if (!RTEST(rb_yield(RARRAY_AREF(ary, i)))) break;
}
return rb_ary_drop(ary, LONG2FIX(i));
}
/*
* call-seq:
- * ary.any? [{|obj| block} ] -> true or false
- * ary.any?(pattern) -> true or false
+ * array.any? -> true or false
+ * array.any? {|element| ... } -> true or false
+ * array.any?(obj) -> true or false
*
- * See also Enumerable#any?
+ * Returns +true+ if any element of +self+ meets a given criterion.
+ *
+ * With no block given and no argument, returns +true+ if +self+ has any truthy element,
+ * +false+ otherwise:
+ *
+ * [nil, 0, false].any? # => true
+ * [nil, false].any? # => false
+ * [].any? # => false
+ *
+ * With a block given and no argument, calls the block with each element in +self+;
+ * returns +true+ if the block returns any truthy value, +false+ otherwise:
+ *
+ * [0, 1, 2].any? {|element| element > 1 } # => true
+ * [0, 1, 2].any? {|element| element > 2 } # => false
+ *
+ * If argument +obj+ is given, returns +true+ if +obj+.<tt>===</tt> any element,
+ * +false+ otherwise:
+ *
+ * ['food', 'drink'].any?(/foo/) # => true
+ * ['food', 'drink'].any?(/bar/) # => false
+ * [].any?(/foo/) # => false
+ * [0, 1, 2].any?(1) # => true
+ * [0, 1, 2].any?(3) # => false
+ *
+ * Related: Enumerable#any?
*/
static VALUE
@@ -6370,9 +7905,9 @@ rb_ary_any_p(int argc, VALUE *argv, VALUE ary)
if (rb_block_given_p()) {
rb_warn("given block not used");
}
- for (i = 0; i < RARRAY_LEN(ary); ++i) {
- if (RTEST(rb_funcall(argv[0], idEqq, 1, RARRAY_AREF(ary, i)))) return Qtrue;
- }
+ for (i = 0; i < RARRAY_LEN(ary); ++i) {
+ if (RTEST(rb_funcall(argv[0], idEqq, 1, RARRAY_AREF(ary, i)))) return Qtrue;
+ }
}
else if (!rb_block_given_p()) {
for (i = 0; i < len; ++i) {
@@ -6380,19 +7915,43 @@ rb_ary_any_p(int argc, VALUE *argv, VALUE ary)
}
}
else {
- for (i = 0; i < RARRAY_LEN(ary); ++i) {
- if (RTEST(rb_yield(RARRAY_AREF(ary, i)))) return Qtrue;
- }
+ for (i = 0; i < RARRAY_LEN(ary); ++i) {
+ if (RTEST(rb_yield(RARRAY_AREF(ary, i)))) return Qtrue;
+ }
}
return Qfalse;
}
/*
* call-seq:
- * ary.all? [{|obj| block} ] -> true or false
- * ary.all?(pattern) -> true or false
+ * array.all? -> true or false
+ * array.all? {|element| ... } -> true or false
+ * array.all?(obj) -> true or false
+ *
+ * Returns +true+ if all elements of +self+ meet a given criterion.
+ *
+ * With no block given and no argument, returns +true+ if +self+ contains only truthy elements,
+ * +false+ otherwise:
+ *
+ * [0, 1, :foo].all? # => true
+ * [0, nil, 2].all? # => false
+ * [].all? # => true
+ *
+ * With a block given and no argument, calls the block with each element in +self+;
+ * returns +true+ if the block returns only truthy values, +false+ otherwise:
+ *
+ * [0, 1, 2].all? { |element| element < 3 } # => true
+ * [0, 1, 2].all? { |element| element < 2 } # => false
*
- * See also Enumerable#all?
+ * If argument +obj+ is given, returns +true+ if <tt>obj.===</tt> every element, +false+ otherwise:
+ *
+ * ['food', 'fool', 'foot'].all?(/foo/) # => true
+ * ['food', 'drink'].all?(/bar/) # => false
+ * [].all?(/foo/) # => true
+ * [0, 0, 0].all?(0) # => true
+ * [0, 1, 2].all?(1) # => false
+ *
+ * Related: Enumerable#all?
*/
static VALUE
@@ -6425,10 +7984,34 @@ rb_ary_all_p(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.none? [{|obj| block} ] -> true or false
- * ary.none?(pattern) -> true or false
+ * array.none? -> true or false
+ * array.none? {|element| ... } -> true or false
+ * array.none?(obj) -> true or false
+ *
+ * Returns +true+ if no element of +self+ meet a given criterion.
+ *
+ * With no block given and no argument, returns +true+ if +self+ has no truthy elements,
+ * +false+ otherwise:
+ *
+ * [nil, false].none? # => true
+ * [nil, 0, false].none? # => false
+ * [].none? # => true
*
- * See also Enumerable#none?
+ * With a block given and no argument, calls the block with each element in +self+;
+ * returns +true+ if the block returns no truthy value, +false+ otherwise:
+ *
+ * [0, 1, 2].none? {|element| element > 3 } # => true
+ * [0, 1, 2].none? {|element| element > 1 } # => false
+ *
+ * If argument +obj+ is given, returns +true+ if <tt>obj.===</tt> no element, +false+ otherwise:
+ *
+ * ['food', 'drink'].none?(/bar/) # => true
+ * ['food', 'drink'].none?(/foo/) # => false
+ * [].none?(/foo/) # => true
+ * [0, 1, 2].none?(3) # => true
+ * [0, 1, 2].none?(1) # => false
+ *
+ * Related: Enumerable#none?
*/
static VALUE
@@ -6461,10 +8044,38 @@ rb_ary_none_p(int argc, VALUE *argv, VALUE ary)
/*
* call-seq:
- * ary.one? [{|obj| block} ] -> true or false
- * ary.one?(pattern) -> true or false
+ * array.one? -> true or false
+ * array.one? {|element| ... } -> true or false
+ * array.one?(obj) -> true or false
+ *
+ * Returns +true+ if exactly one element of +self+ meets a given criterion.
+ *
+ * With no block given and no argument, returns +true+ if +self+ has exactly one truthy element,
+ * +false+ otherwise:
+ *
+ * [nil, 0].one? # => true
+ * [0, 0].one? # => false
+ * [nil, nil].one? # => false
+ * [].one? # => false
+ *
+ * With a block given and no argument, calls the block with each element in +self+;
+ * returns +true+ if the block a truthy value for exactly one element, +false+ otherwise:
+ *
+ * [0, 1, 2].one? {|element| element > 0 } # => false
+ * [0, 1, 2].one? {|element| element > 1 } # => true
+ * [0, 1, 2].one? {|element| element > 2 } # => false
+ *
+ * If argument +obj+ is given, returns +true+ if <tt>obj.===</tt> exactly one element,
+ * +false+ otherwise:
*
- * See also Enumerable#one?
+ * [0, 1, 2].one?(0) # => true
+ * [0, 0, 1].one?(0) # => false
+ * [1, 1, 2].one?(0) # => false
+ * ['food', 'drink'].one?(/bar/) # => false
+ * ['food', 'drink'].one?(/foo/) # => true
+ * [].one?(/foo/) # => false
+ *
+ * Related: Enumerable#one?
*/
static VALUE
@@ -6506,19 +8117,22 @@ rb_ary_one_p(int argc, VALUE *argv, VALUE ary)
}
/*
- * call-seq:
- * ary.dig(idx, ...) -> object
+ * call-seq:
+ * array.dig(index, *identifiers) -> object
*
- * Extracts the nested value specified by the sequence of <i>idx</i>
- * objects by calling +dig+ at each step, returning +nil+ if any
- * intermediate step is +nil+.
+ * Finds and returns the object in nested objects
+ * that is specified by +index+ and +identifiers+.
+ * The nested objects may be instances of various classes.
+ * See {Dig Methods}[rdoc-ref:dig_methods.rdoc].
*
- * a = [[1, [2, 3]]]
+ * Examples:
+ *
+ * a = [:foo, [:bar, :baz, [:bat, :bam]]]
+ * a.dig(1) # => [:bar, :baz, [:bat, :bam]]
+ * a.dig(1, 2) # => [:bat, :bam]
+ * a.dig(1, 2, 0) # => :bat
+ * a.dig(1, 2, 3) # => nil
*
- * a.dig(0, 1, 1) #=> 3
- * a.dig(1, 2, 3) #=> nil
- * a.dig(0, 0, 0) #=> TypeError: Integer does not have #dig method
- * [42, {foo: :bar}].dig(1, :foo) #=> :bar
*/
static VALUE
@@ -6536,14 +8150,8 @@ finish_exact_sum(long n, VALUE r, VALUE v, int z)
{
if (n != 0)
v = rb_fix_plus(LONG2FIX(n), v);
- if (r != Qundef) {
- /* r can be an Integer when mathn is loaded */
- if (FIXNUM_P(r))
- v = rb_fix_plus(r, v);
- else if (RB_TYPE_P(r, T_BIGNUM))
- v = rb_big_plus(r, v);
- else
- v = rb_rational_plus(r, v);
+ if (!UNDEF_P(r)) {
+ v = rb_rational_plus(r, v);
}
else if (!n && z) {
v = rb_fix_plus(LONG2FIX(0), v);
@@ -6553,43 +8161,41 @@ finish_exact_sum(long n, VALUE r, VALUE v, int z)
/*
* call-seq:
- * ary.sum(init=0) -> number
- * ary.sum(init=0) {|e| expr } -> number
- *
- * Returns the sum of elements.
- * For example, [e1, e2, e3].sum returns init + e1 + e2 + e3.
+ * array.sum(init = 0) -> object
+ * array.sum(init = 0) {|element| ... } -> object
*
- * If a block is given, the block is applied to each element
- * before addition.
+ * When no block is given, returns the object equivalent to:
*
- * If <i>ary</i> is empty, it returns <i>init</i>.
+ * sum = init
+ * array.each {|element| sum += element }
+ * sum
*
- * [].sum #=> 0
- * [].sum(0.0) #=> 0.0
- * [1, 2, 3].sum #=> 6
- * [3, 5.5].sum #=> 8.5
- * [2.5, 3.0].sum(0.0) {|e| e * e } #=> 15.25
- * [Object.new].sum #=> TypeError
+ * For example, <tt>[e1, e2, e3].sum</tt> returns <tt>init + e1 + e2 + e3</tt>.
*
- * The (arithmetic) mean value of an array can be obtained as follows.
+ * Examples:
*
- * mean = ary.sum(0.0) / ary.length
+ * a = [0, 1, 2, 3]
+ * a.sum # => 6
+ * a.sum(100) # => 106
*
- * This method can be used for non-numeric objects by
- * explicit <i>init</i> argument.
+ * The elements need not be numeric, but must be <tt>+</tt>-compatible
+ * with each other and with +init+:
*
- * ["a", "b", "c"].sum("") #=> "abc"
- * [[1], [[2]], [3]].sum([]) #=> [1, [2], 3]
+ * a = ['abc', 'def', 'ghi']
+ * a.sum('jkl') # => "jklabcdefghi"
*
- * However, Array#join and Array#flatten is faster than Array#sum for
- * array of strings and array of arrays.
+ * When a block is given, it is called with each element
+ * and the block's return value (instead of the element itself) is used as the addend:
*
- * ["a", "b", "c"].join #=> "abc"
- * [[1], [[2]], [3]].flatten(1) #=> [1, [2], 3]
+ * a = ['zero', 1, :two]
+ * s = a.sum('Coerced and concatenated: ') {|element| element.to_s }
+ * s # => "Coerced and concatenated: zero1two"
*
+ * Notes:
*
- * Array#sum method may not respect method redefinition of "+" methods
- * such as Integer#+.
+ * - Array#join and Array#flatten may be faster than Array#sum
+ * for an \Array of Strings or an \Array of Arrays.
+ * - Array#sum method may not respect method redefinition of "+" methods such as Integer#+.
*
*/
@@ -6620,10 +8226,10 @@ rb_ary_sum(int argc, VALUE *argv, VALUE ary)
n = 0;
}
}
- else if (RB_TYPE_P(e, T_BIGNUM))
+ else if (RB_BIGNUM_TYPE_P(e))
v = rb_big_plus(e, v);
else if (RB_TYPE_P(e, T_RATIONAL)) {
- if (r == Qundef)
+ if (UNDEF_P(r))
r = e;
else
r = rb_rational_plus(r, e);
@@ -6640,7 +8246,7 @@ rb_ary_sum(int argc, VALUE *argv, VALUE ary)
if (RB_FLOAT_TYPE_P(e)) {
/*
* Kahan-Babuska balancing compensated summation algorithm
- * See http://link.springer.com/article/10.1007/s00607-005-0139-x
+ * See https://link.springer.com/article/10.1007/s00607-005-0139-x
*/
double f, c;
double x, t;
@@ -6657,7 +8263,7 @@ rb_ary_sum(int argc, VALUE *argv, VALUE ary)
x = RFLOAT_VALUE(e);
else if (FIXNUM_P(e))
x = FIX2LONG(e);
- else if (RB_TYPE_P(e, T_BIGNUM))
+ else if (RB_BIGNUM_TYPE_P(e))
x = rb_big2dbl(e);
else if (RB_TYPE_P(e, T_RATIONAL))
x = rb_num2dbl(e);
@@ -6710,75 +8316,128 @@ rb_ary_deconstruct(VALUE ary)
}
/*
- * An \Array is an ordered, integer-indexed collection of objects,
- * called _elements_. Any object may be an \Array element.
+ * An \Array is an ordered, integer-indexed collection of objects, called _elements_.
+ * Any object (even another array) may be an array element,
+ * and an array can contain objects of different types.
*
* == \Array Indexes
*
* \Array indexing starts at 0, as in C or Java.
*
* A positive index is an offset from the first element:
+ *
* - Index 0 indicates the first element.
* - Index 1 indicates the second element.
* - ...
*
* A negative index is an offset, backwards, from the end of the array:
+ *
* - Index -1 indicates the last element.
* - Index -2 indicates the next-to-last element.
* - ...
*
- * A non-negative index is <i>in range</i> if it is smaller than
+ * A non-negative index is <i>in range</i> if and only if it is smaller than
* the size of the array. For a 3-element array:
+ *
* - Indexes 0 through 2 are in range.
* - Index 3 is out of range.
*
- * A negative index is <i>in range</i> if its absolute value is
+ * A negative index is <i>in range</i> if and only if its absolute value is
* not larger than the size of the array. For a 3-element array:
+ *
* - Indexes -1 through -3 are in range.
* - Index -4 is out of range.
*
+ * Although the effective index into an array is always an integer,
+ * some methods (both within and outside of class \Array)
+ * accept one or more non-integer arguments that are
+ * {integer-convertible objects}[rdoc-ref:implicit_conversion.rdoc@Integer-Convertible+Objects].
+ *
+ *
* == Creating Arrays
*
- * A new array can be created by using the literal constructor
- * <code>[]</code>. Arrays can contain different types of objects. For
- * example, the array below contains an Integer, a String and a Float:
+ * You can create an \Array object explicitly with:
*
- * ary = [1, "two", 3.0] #=> [1, "two", 3.0]
+ * - An {array literal}[rdoc-ref:literals.rdoc@Array+Literals]:
*
- * An array can also be created by explicitly calling Array.new with zero, one
- * (the initial size of the Array) or two arguments (the initial size and a
- * default object).
+ * [1, 'one', :one, [2, 'two', :two]]
*
- * ary = Array.new #=> []
- * Array.new(3) #=> [nil, nil, nil]
- * Array.new(3, true) #=> [true, true, true]
+ * - A {%w or %W: string-array Literal}[rdoc-ref:literals.rdoc@25w+and+-25W-3A+String-Array+Literals]:
*
- * Note that the second argument populates the array with references to the
- * same object. Therefore, it is only recommended in cases when you need to
- * instantiate arrays with natively immutable objects such as Symbols,
- * numbers, true or false.
+ * %w[foo bar baz] # => ["foo", "bar", "baz"]
+ * %w[1 % *] # => ["1", "%", "*"]
*
- * To create an array with separate objects a block can be passed instead.
- * This method is safe to use with mutable objects such as hashes, strings or
- * other arrays:
+ * - A {%i pr %I: symbol-array Literal}[rdoc-ref:literals.rdoc@25i+and+-25I-3A+Symbol-Array+Literals]:
*
- * Array.new(4) {Hash.new} #=> [{}, {}, {}, {}]
- * Array.new(4) {|i| i.to_s } #=> ["0", "1", "2", "3"]
+ * %i[foo bar baz] # => [:foo, :bar, :baz]
+ * %i[1 % *] # => [:"1", :%, :*]
*
- * This is also a quick way to build up multi-dimensional arrays:
+ * - \Method Kernel#Array:
*
- * empty_table = Array.new(3) {Array.new(3)}
- * #=> [[nil, nil, nil], [nil, nil, nil], [nil, nil, nil]]
+ * Array(["a", "b"]) # => ["a", "b"]
+ * Array(1..5) # => [1, 2, 3, 4, 5]
+ * Array(key: :value) # => [[:key, :value]]
+ * Array(nil) # => []
+ * Array(1) # => [1]
+ * Array({:a => "a", :b => "b"}) # => [[:a, "a"], [:b, "b"]]
*
- * An array can also be created by using the Array() method, provided by
- * Kernel, which tries to call #to_ary, then #to_a on its argument.
+ * - \Method Array.new:
*
- * Array({:a => "a", :b => "b"}) #=> [[:a, "a"], [:b, "b"]]
+ * Array.new # => []
+ * Array.new(3) # => [nil, nil, nil]
+ * Array.new(4) {Hash.new} # => [{}, {}, {}, {}]
+ * Array.new(3, true) # => [true, true, true]
+ *
+ * Note that the last example above populates the array
+ * with references to the same object.
+ * This is recommended only in cases where that object is a natively immutable object
+ * such as a symbol, a numeric, +nil+, +true+, or +false+.
+ *
+ * Another way to create an array with various objects, using a block;
+ * this usage is safe for mutable objects such as hashes, strings or
+ * other arrays:
+ *
+ * Array.new(4) {|i| i.to_s } # => ["0", "1", "2", "3"]
+ *
+ * Here is a way to create a multi-dimensional array:
+ *
+ * Array.new(3) {Array.new(3)}
+ * # => [[nil, nil, nil], [nil, nil, nil], [nil, nil, nil]]
+ *
+ * A number of Ruby methods, both in the core and in the standard library,
+ * provide instance method +to_a+, which converts an object to an array.
+ *
+ * - ARGF#to_a
+ * - Array#to_a
+ * - Enumerable#to_a
+ * - Hash#to_a
+ * - MatchData#to_a
+ * - NilClass#to_a
+ * - OptionParser#to_a
+ * - Range#to_a
+ * - Set#to_a
+ * - Struct#to_a
+ * - Time#to_a
+ * - Benchmark::Tms#to_a
+ * - CSV::Table#to_a
+ * - Enumerator::Lazy#to_a
+ * - Gem::List#to_a
+ * - Gem::NameTuple#to_a
+ * - Gem::Platform#to_a
+ * - Gem::RequestSet::Lockfile::Tokenizer#to_a
+ * - Gem::SourceList#to_a
+ * - OpenSSL::X509::Extension#to_a
+ * - OpenSSL::X509::Name#to_a
+ * - Racc::ISet#to_a
+ * - Rinda::RingFinger#to_a
+ * - Ripper::Lexer::Elem#to_a
+ * - RubyVM::InstructionSequence#to_a
+ * - YAML::DBM#to_a
*
* == Example Usage
*
* In addition to the methods it mixes in through the Enumerable module, the
- * Array class has proprietary methods for accessing, searching and otherwise
+ * \Array class has proprietary methods for accessing, searching and otherwise
* manipulating arrays.
*
* Some of the more common ones are illustrated below.
@@ -6826,7 +8485,7 @@ rb_ary_deconstruct(VALUE ary)
*
* arr.drop(3) #=> [4, 5, 6]
*
- * == Obtaining Information about an Array
+ * == Obtaining Information about an \Array
*
* Arrays keep track of their own length at all times. To query an array
* about the number of elements it contains, use #length, #count or #size.
@@ -6864,7 +8523,7 @@ rb_ary_deconstruct(VALUE ary)
* arr.insert(3, 'orange', 'pear', 'grapefruit')
* #=> [0, 1, 2, "orange", "pear", "grapefruit", "apple", 3, 4, 5, 6]
*
- * == Removing Items from an Array
+ * == Removing Items from an \Array
*
* The method #pop removes the last element in an array and returns it:
*
@@ -6906,9 +8565,9 @@ rb_ary_deconstruct(VALUE ary)
*
* == Iterating over Arrays
*
- * Like all classes that include the Enumerable module, Array has an each
+ * Like all classes that include the Enumerable module, \Array has an each
* method, which defines what elements should be iterated over and how. In
- * case of Array's #each, all elements in the Array instance are yielded to
+ * case of Array's #each, all elements in the \Array instance are yielded to
* the supplied block in sequence.
*
* Note that this operation leaves the array unchanged.
@@ -6934,7 +8593,8 @@ rb_ary_deconstruct(VALUE ary)
* arr.map! {|a| a**2} #=> [1, 4, 9, 16, 25]
* arr #=> [1, 4, 9, 16, 25]
*
- * == Selecting Items from an Array
+ *
+ * == Selecting Items from an \Array
*
* Elements can be selected from an array according to criteria defined in a
* block. The selection can happen in a destructive or a non-destructive
@@ -6964,18 +8624,199 @@ rb_ary_deconstruct(VALUE ary)
* arr = [1, 2, 3, 4, 5, 6]
* arr.keep_if {|a| a < 4} #=> [1, 2, 3]
* arr #=> [1, 2, 3]
+ *
+ * == What's Here
+ *
+ * First, what's elsewhere. \Class \Array:
+ *
+ * - Inherits from {class Object}[rdoc-ref:Object@What-27s+Here].
+ * - Includes {module Enumerable}[rdoc-ref:Enumerable@What-27s+Here],
+ * which provides dozens of additional methods.
+ *
+ * Here, class \Array provides methods that are useful for:
+ *
+ * - {Creating an Array}[rdoc-ref:Array@Methods+for+Creating+an+Array]
+ * - {Querying}[rdoc-ref:Array@Methods+for+Querying]
+ * - {Comparing}[rdoc-ref:Array@Methods+for+Comparing]
+ * - {Fetching}[rdoc-ref:Array@Methods+for+Fetching]
+ * - {Assigning}[rdoc-ref:Array@Methods+for+Assigning]
+ * - {Deleting}[rdoc-ref:Array@Methods+for+Deleting]
+ * - {Combining}[rdoc-ref:Array@Methods+for+Combining]
+ * - {Iterating}[rdoc-ref:Array@Methods+for+Iterating]
+ * - {Converting}[rdoc-ref:Array@Methods+for+Converting]
+ * - {And more....}[rdoc-ref:Array@Other+Methods]
+ *
+ * === Methods for Creating an \Array
+ *
+ * - ::[]: Returns a new array populated with given objects.
+ * - ::new: Returns a new array.
+ * - ::try_convert: Returns a new array created from a given object.
+ *
+ * === Methods for Querying
+ *
+ * - #length, #size: Returns the count of elements.
+ * - #include?: Returns whether any element <tt>==</tt> a given object.
+ * - #empty?: Returns whether there are no elements.
+ * - #all?: Returns whether all elements meet a given criterion.
+ * - #any?: Returns whether any element meets a given criterion.
+ * - #none?: Returns whether no element <tt>==</tt> a given object.
+ * - #one?: Returns whether exactly one element <tt>==</tt> a given object.
+ * - #count: Returns the count of elements that meet a given criterion.
+ * - #find_index, #index: Returns the index of the first element that meets a given criterion.
+ * - #rindex: Returns the index of the last element that meets a given criterion.
+ * - #hash: Returns the integer hash code.
+ *
+ * === Methods for Comparing
+ *
+ * - #<=>: Returns -1, 0, or 1 * as +self+ is less than, equal to, or
+ * greater than a given object.
+ * - #==: Returns whether each element in +self+ is <tt>==</tt> to the corresponding element
+ * in a given object.
+ * - #eql?: Returns whether each element in +self+ is <tt>eql?</tt> to the corresponding
+ * element in a given object.
+
+ * === Methods for Fetching
+ *
+ * These methods do not modify +self+.
+ *
+ * - #[]: Returns one or more elements.
+ * - #fetch: Returns the element at a given offset.
+ * - #first: Returns one or more leading elements.
+ * - #last: Returns one or more trailing elements.
+ * - #max: Returns one or more maximum-valued elements,
+ * as determined by <tt><=></tt> or a given block.
+ * - #min: Returns one or more minimum-valued elements,
+ * as determined by <tt><=></tt> or a given block.
+ * - #minmax: Returns the minimum-valued and maximum-valued elements,
+ * as determined by <tt><=></tt> or a given block.
+ * - #assoc: Returns the first element that is an array
+ * whose first element <tt>==</tt> a given object.
+ * - #rassoc: Returns the first element that is an array
+ * whose second element <tt>==</tt> a given object.
+ * - #at: Returns the element at a given offset.
+ * - #values_at: Returns the elements at given offsets.
+ * - #dig: Returns the object in nested objects
+ * that is specified by a given index and additional arguments.
+ * - #drop: Returns trailing elements as determined by a given index.
+ * - #take: Returns leading elements as determined by a given index.
+ * - #drop_while: Returns trailing elements as determined by a given block.
+ * - #take_while: Returns leading elements as determined by a given block.
+ * - #slice: Returns consecutive elements as determined by a given argument.
+ * - #sort: Returns all elements in an order determined by <tt><=></tt> or a given block.
+ * - #reverse: Returns all elements in reverse order.
+ * - #compact: Returns an array containing all non-+nil+ elements.
+ * - #select, #filter: Returns an array containing elements selected by a given block.
+ * - #uniq: Returns an array containing non-duplicate elements.
+ * - #rotate: Returns all elements with some rotated from one end to the other.
+ * - #bsearch: Returns an element selected via a binary search
+ * as determined by a given block.
+ * - #bsearch_index: Returns the index of an element selected via a binary search
+ * as determined by a given block.
+ * - #sample: Returns one or more random elements.
+ * - #shuffle: Returns elements in a random order.
+ *
+ * === Methods for Assigning
+ *
+ * These methods add, replace, or reorder elements in +self+.
+ *
+ * - #[]=: Assigns specified elements with a given object.
+ * - #push, #append, #<<: Appends trailing elements.
+ * - #unshift, #prepend: Prepends leading elements.
+ * - #insert: Inserts given objects at a given offset; does not replace elements.
+ * - #concat: Appends all elements from given arrays.
+ * - #fill: Replaces specified elements with specified objects.
+ * - #replace: Replaces the content of +self+ with the content of a given array.
+ * - #reverse!: Replaces +self+ with its elements reversed.
+ * - #rotate!: Replaces +self+ with its elements rotated.
+ * - #shuffle!: Replaces +self+ with its elements in random order.
+ * - #sort!: Replaces +self+ with its elements sorted,
+ * as determined by <tt><=></tt> or a given block.
+ * - #sort_by!: Replaces +self+ with its elements sorted, as determined by a given block.
+ *
+ * === Methods for Deleting
+ *
+ * Each of these methods removes elements from +self+:
+ *
+ * - #pop: Removes and returns the last element.
+ * - #shift: Removes and returns the first element.
+ * - #compact!: Removes all +nil+ elements.
+ * - #delete: Removes elements equal to a given object.
+ * - #delete_at: Removes the element at a given offset.
+ * - #delete_if: Removes elements specified by a given block.
+ * - #keep_if: Removes elements not specified by a given block.
+ * - #reject!: Removes elements specified by a given block.
+ * - #select!, #filter!: Removes elements not specified by a given block.
+ * - #slice!: Removes and returns a sequence of elements.
+ * - #uniq!: Removes duplicates.
+ *
+ * === Methods for Combining
+ *
+ * - #&: Returns an array containing elements found both in +self+ and a given array.
+ * - #intersection: Returns an array containing elements found both in +self+
+ * and in each given array.
+ * - #+: Returns an array containing all elements of +self+ followed by all elements of a given array.
+ * - #-: Returns an array containing all elements of +self+ that are not found in a given array.
+ * - #|: Returns an array containing all elements of +self+ and all elements of a given array,
+ * duplicates removed.
+ * - #union: Returns an array containing all elements of +self+ and all elements of given arrays,
+ * duplicates removed.
+ * - #difference: Returns an array containing all elements of +self+ that are not found
+ * in any of the given arrays..
+ * - #product: Returns or yields all combinations of elements from +self+ and given arrays.
+ *
+ * === Methods for Iterating
+ *
+ * - #each: Passes each element to a given block.
+ * - #reverse_each: Passes each element, in reverse order, to a given block.
+ * - #each_index: Passes each element index to a given block.
+ * - #cycle: Calls a given block with each element, then does so again,
+ * for a specified number of times, or forever.
+ * - #combination: Calls a given block with combinations of elements of +self+;
+ * a combination does not use the same element more than once.
+ * - #permutation: Calls a given block with permutations of elements of +self+;
+ * a permutation does not use the same element more than once.
+ * - #repeated_combination: Calls a given block with combinations of elements of +self+;
+ * a combination may use the same element more than once.
+ * - #repeated_permutation: Calls a given block with permutations of elements of +self+;
+ * a permutation may use the same element more than once.
+ *
+ * === Methods for Converting
+ *
+ * - #map, #collect: Returns an array containing the block return-value for each element.
+ * - #map!, #collect!: Replaces each element with a block return-value.
+ * - #flatten: Returns an array that is a recursive flattening of +self+.
+ * - #flatten!: Replaces each nested array in +self+ with the elements from that array.
+ * - #inspect, #to_s: Returns a new String containing the elements.
+ * - #join: Returns a newsString containing the elements joined by the field separator.
+ * - #to_a: Returns +self+ or a new array containing all elements.
+ * - #to_ary: Returns +self+.
+ * - #to_h: Returns a new hash formed from the elements.
+ * - #transpose: Transposes +self+, which must be an array of arrays.
+ * - #zip: Returns a new array of arrays containing +self+ and given arrays;
+ * follow the link for details.
+ *
+ * === Other Methods
+ *
+ * - #*: Returns one of the following:
+ *
+ * - With integer argument +n+, a new array that is the concatenation
+ * of +n+ copies of +self+.
+ * - With string argument +field_separator+, a new string that is equivalent to
+ * <tt>join(field_separator)</tt>.
+ *
+ * - #abbrev: Returns a hash of unambiguous abbreviations for elements.
+ * - #pack: Packs the elements into a binary sequence.
+ * - #sum: Returns a sum of elements according to either <tt>+</tt> or a given block.
*/
void
Init_Array(void)
{
-#undef rb_intern
-#define rb_intern(str) rb_intern_const(str)
-
rb_cArray = rb_define_class("Array", rb_cObject);
rb_include_module(rb_cArray, rb_mEnumerable);
rb_define_alloc_func(rb_cArray, empty_ary_alloc);
+ rb_define_singleton_method(rb_cArray, "new", rb_ary_s_new, -1);
rb_define_singleton_method(rb_cArray, "[]", rb_ary_s_create, -1);
rb_define_singleton_method(rb_cArray, "try_convert", rb_ary_s_try_convert, 1);
rb_define_method(rb_cArray, "initialize", rb_ary_initialize, -1);
@@ -7001,6 +8842,7 @@ Init_Array(void)
rb_define_method(rb_cArray, "union", rb_ary_union_multi, -1);
rb_define_method(rb_cArray, "difference", rb_ary_difference_multi, -1);
rb_define_method(rb_cArray, "intersection", rb_ary_intersection_multi, -1);
+ rb_define_method(rb_cArray, "intersect?", rb_ary_intersect_p, 1);
rb_define_method(rb_cArray, "<<", rb_ary_push, 1);
rb_define_method(rb_cArray, "push", rb_ary_push_m, -1);
rb_define_alias(rb_cArray, "append", "push");
@@ -7013,7 +8855,7 @@ Init_Array(void)
rb_define_method(rb_cArray, "each_index", rb_ary_each_index, 0);
rb_define_method(rb_cArray, "reverse_each", rb_ary_reverse_each, 0);
rb_define_method(rb_cArray, "length", rb_ary_length, 0);
- rb_define_alias(rb_cArray, "size", "length");
+ rb_define_method(rb_cArray, "size", rb_ary_length, 0);
rb_define_method(rb_cArray, "empty?", rb_ary_empty_p, 0);
rb_define_method(rb_cArray, "find_index", rb_ary_index, -1);
rb_define_method(rb_cArray, "index", rb_ary_index, -1);
diff --git a/array.rb b/array.rb
index a49ab9823e..b9fa9844e6 100644
--- a/array.rb
+++ b/array.rb
@@ -1,62 +1,69 @@
class Array
# call-seq:
- # ary.shuffle! -> ary
- # ary.shuffle!(random: rng) -> ary
+ # array.shuffle!(random: Random) -> array
#
- # Shuffles elements in +self+ in place.
- #
- # a = [ 1, 2, 3 ] #=> [1, 2, 3]
- # a.shuffle! #=> [2, 3, 1]
- # a #=> [2, 3, 1]
- #
- # The optional +rng+ argument will be used as the random number generator.
+ # Shuffles the elements of +self+ in place.
+ # a = [1, 2, 3] #=> [1, 2, 3]
+ # a.shuffle! #=> [2, 3, 1]
+ # a #=> [2, 3, 1]
#
+ # The optional +random+ argument will be used as the random number generator:
# a.shuffle!(random: Random.new(1)) #=> [1, 3, 2]
def shuffle!(random: Random)
- __builtin_rb_ary_shuffle_bang(random)
+ Primitive.rb_ary_shuffle_bang(random)
end
# call-seq:
- # ary.shuffle -> new_ary
- # ary.shuffle(random: rng) -> new_ary
+ # array.shuffle(random: Random) -> new_ary
#
# Returns a new array with elements of +self+ shuffled.
+ # a = [1, 2, 3] #=> [1, 2, 3]
+ # a.shuffle #=> [2, 3, 1]
+ # a #=> [1, 2, 3]
#
- # a = [ 1, 2, 3 ] #=> [1, 2, 3]
- # a.shuffle #=> [2, 3, 1]
- # a #=> [1, 2, 3]
- #
- # The optional +rng+ argument will be used as the random number generator.
- #
+ # The optional +random+ argument will be used as the random number generator:
# a.shuffle(random: Random.new(1)) #=> [1, 3, 2]
def shuffle(random: Random)
- __builtin_rb_ary_shuffle(random)
+ Primitive.rb_ary_shuffle(random)
end
# call-seq:
- # ary.sample -> obj
- # ary.sample(random: rng) -> obj
- # ary.sample(n) -> new_ary
- # ary.sample(n, random: rng) -> new_ary
- #
- # Choose a random element or +n+ random elements from the array.
- #
- # The elements are chosen by using random and unique indices into the array
- # in order to ensure that an element doesn't repeat itself unless the array
- # already contained duplicate elements.
+ # array.sample(random: Random) -> object
+ # array.sample(n, random: Random) -> new_ary
#
- # If the array is empty the first form returns +nil+ and the second form
- # returns an empty array.
+ # Returns random elements from +self+.
#
- # a = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
- # a.sample #=> 7
- # a.sample(4) #=> [6, 4, 2, 5]
+ # When no arguments are given, returns a random element from +self+:
+ # a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ # a.sample # => 3
+ # a.sample # => 8
+ # If +self+ is empty, returns +nil+.
#
- # The optional +rng+ argument will be used as the random number generator.
+ # When argument +n+ is given, returns a new \Array containing +n+ random
+ # elements from +self+:
+ # a.sample(3) # => [8, 9, 2]
+ # a.sample(6) # => [9, 6, 10, 3, 1, 4]
+ # Returns no more than <tt>a.size</tt> elements
+ # (because no new duplicates are introduced):
+ # a.sample(a.size * 2) # => [6, 4, 1, 8, 5, 9, 10, 2, 3, 7]
+ # But +self+ may contain duplicates:
+ # a = [1, 1, 1, 2, 2, 3]
+ # a.sample(a.size * 2) # => [1, 1, 3, 2, 1, 2]
+ # The argument +n+ must be a non-negative numeric value.
+ # The order of the result array is unrelated to the order of +self+.
+ # Returns a new empty \Array if +self+ is empty.
#
+ # The optional +random+ argument will be used as the random number generator:
+ # a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# a.sample(random: Random.new(1)) #=> 6
# a.sample(4, random: Random.new(1)) #=> [6, 10, 9, 2]
def sample(n = (ary = false), random: Random)
- __builtin_rb_ary_sample(random, n, ary)
+ if Primitive.mandatory_only?
+ # Primitive.cexpr! %{ rb_ary_sample(self, rb_cRandom, Qfalse, Qfalse) }
+ Primitive.ary_sample0
+ else
+ # Primitive.cexpr! %{ rb_ary_sample(self, random, n, ary) }
+ Primitive.ary_sample(random, n, ary)
+ end
end
end
diff --git a/ast.c b/ast.c
index 18a97239ff..adb7287ed3 100644
--- a/ast.c
+++ b/ast.c
@@ -17,7 +17,7 @@ static VALUE rb_cNode;
struct ASTNodeData {
rb_ast_t *ast;
- NODE *node;
+ const NODE *node;
};
static void
@@ -44,7 +44,7 @@ static const rb_data_type_t rb_node_type = {
static VALUE rb_ast_node_alloc(VALUE klass);
static void
-setup_node(VALUE obj, rb_ast_t *ast, NODE *node)
+setup_node(VALUE obj, rb_ast_t *ast, const NODE *node)
{
struct ASTNodeData *data;
@@ -54,7 +54,7 @@ setup_node(VALUE obj, rb_ast_t *ast, NODE *node)
}
static VALUE
-ast_new_internal(rb_ast_t *ast, NODE *node)
+ast_new_internal(rb_ast_t *ast, const NODE *node)
{
VALUE obj;
@@ -64,9 +64,8 @@ ast_new_internal(rb_ast_t *ast, NODE *node)
return obj;
}
-static VALUE rb_ast_parse_str(VALUE str);
-static VALUE rb_ast_parse_file(VALUE path);
-static VALUE rb_ast_parse_array(VALUE array);
+static VALUE rb_ast_parse_str(VALUE str, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens);
+static VALUE rb_ast_parse_file(VALUE path, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens);
static VALUE
ast_parse_new(void)
@@ -86,29 +85,33 @@ ast_parse_done(rb_ast_t *ast)
}
static VALUE
-ast_s_parse(rb_execution_context_t *ec, VALUE module, VALUE str)
+ast_s_parse(rb_execution_context_t *ec, VALUE module, VALUE str, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens)
{
- return rb_ast_parse_str(str);
+ return rb_ast_parse_str(str, keep_script_lines, error_tolerant, keep_tokens);
}
static VALUE
-rb_ast_parse_str(VALUE str)
+rb_ast_parse_str(VALUE str, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens)
{
rb_ast_t *ast = 0;
StringValue(str);
- ast = rb_parser_compile_string_path(ast_parse_new(), Qnil, str, 1);
+ VALUE vparser = ast_parse_new();
+ if (RTEST(keep_script_lines)) rb_parser_keep_script_lines(vparser);
+ if (RTEST(error_tolerant)) rb_parser_error_tolerant(vparser);
+ if (RTEST(keep_tokens)) rb_parser_keep_tokens(vparser);
+ ast = rb_parser_compile_string_path(vparser, Qnil, str, 1);
return ast_parse_done(ast);
}
static VALUE
-ast_s_parse_file(rb_execution_context_t *ec, VALUE module, VALUE path)
+ast_s_parse_file(rb_execution_context_t *ec, VALUE module, VALUE path, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens)
{
- return rb_ast_parse_file(path);
+ return rb_ast_parse_file(path, keep_script_lines, error_tolerant, keep_tokens);
}
static VALUE
-rb_ast_parse_file(VALUE path)
+rb_ast_parse_file(VALUE path, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens)
{
VALUE f;
rb_ast_t *ast = 0;
@@ -117,7 +120,11 @@ rb_ast_parse_file(VALUE path)
FilePathValue(path);
f = rb_file_open_str(path, "r");
rb_funcall(f, rb_intern("set_encoding"), 2, rb_enc_from_encoding(enc), rb_str_new_cstr("-"));
- ast = rb_parser_compile_file_path(ast_parse_new(), Qnil, f, 1);
+ VALUE vparser = ast_parse_new();
+ if (RTEST(keep_script_lines)) rb_parser_keep_script_lines(vparser);
+ if (RTEST(error_tolerant)) rb_parser_error_tolerant(vparser);
+ if (RTEST(keep_tokens)) rb_parser_keep_tokens(vparser);
+ ast = rb_parser_compile_file_path(vparser, Qnil, f, 1);
rb_io_close(f);
return ast_parse_done(ast);
}
@@ -136,16 +143,20 @@ lex_array(VALUE array, int index)
}
static VALUE
-rb_ast_parse_array(VALUE array)
+rb_ast_parse_array(VALUE array, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens)
{
rb_ast_t *ast = 0;
array = rb_check_array_type(array);
- ast = rb_parser_compile_generic(ast_parse_new(), lex_array, Qnil, array, 1);
+ VALUE vparser = ast_parse_new();
+ if (RTEST(keep_script_lines)) rb_parser_keep_script_lines(vparser);
+ if (RTEST(error_tolerant)) rb_parser_error_tolerant(vparser);
+ if (RTEST(keep_tokens)) rb_parser_keep_tokens(vparser);
+ ast = rb_parser_compile_generic(vparser, lex_array, Qnil, array, 1);
return ast_parse_done(ast);
}
-static VALUE node_children(rb_ast_t*, NODE*);
+static VALUE node_children(rb_ast_t*, const NODE*);
static VALUE
node_find(VALUE self, const int node_id)
@@ -188,35 +199,69 @@ script_lines(VALUE path)
}
static VALUE
-ast_s_of(rb_execution_context_t *ec, VALUE module, VALUE body)
+node_id_for_backtrace_location(rb_execution_context_t *ec, VALUE module, VALUE location)
{
- VALUE path, node, lines;
int node_id;
- const rb_iseq_t *iseq = NULL;
- if (rb_obj_is_proc(body)) {
- iseq = vm_proc_iseq(body);
+ if (!rb_frame_info_p(location)) {
+ rb_raise(rb_eTypeError, "Thread::Backtrace::Location object expected");
+ }
- if (!rb_obj_is_iseq((VALUE)iseq)) {
- iseq = NULL;
- }
+ node_id = rb_get_node_id_from_frame_info(location);
+ if (node_id == -1) {
+ return Qnil;
+ }
+
+ return INT2NUM(node_id);
+}
+
+static VALUE
+ast_s_of(rb_execution_context_t *ec, VALUE module, VALUE body, VALUE keep_script_lines, VALUE error_tolerant, VALUE keep_tokens)
+{
+ VALUE node, lines = Qnil;
+ const rb_iseq_t *iseq;
+ int node_id;
+
+ if (rb_frame_info_p(body)) {
+ iseq = rb_get_iseq_from_frame_info(body);
+ node_id = rb_get_node_id_from_frame_info(body);
}
else {
- iseq = rb_method_iseq(body);
+ iseq = NULL;
+
+ if (rb_obj_is_proc(body)) {
+ iseq = vm_proc_iseq(body);
+
+ if (!rb_obj_is_iseq((VALUE)iseq)) return Qnil;
+ }
+ else {
+ iseq = rb_method_iseq(body);
+ }
+ if (iseq) {
+ node_id = ISEQ_BODY(iseq)->location.node_id;
+ }
}
- if (!iseq) return Qnil;
+ if (!iseq) {
+ return Qnil;
+ }
+ lines = ISEQ_BODY(iseq)->variable.script_lines;
+
+ VALUE path = rb_iseq_path(iseq);
+ int e_option = RSTRING_LEN(path) == 2 && memcmp(RSTRING_PTR(path), "-e", 2) == 0;
+
+ if (NIL_P(lines) && rb_iseq_from_eval_p(iseq) && !e_option) {
+ rb_raise(rb_eArgError, "cannot get AST for method defined in eval");
+ }
- path = rb_iseq_path(iseq);
- node_id = iseq->body->location.node_id;
- if (!NIL_P(lines = script_lines(path))) {
- node = rb_ast_parse_array(lines);
+ if (!NIL_P(lines) || !NIL_P(lines = script_lines(path))) {
+ node = rb_ast_parse_array(lines, keep_script_lines, error_tolerant, keep_tokens);
}
- else if (RSTRING_LEN(path) == 2 && memcmp(RSTRING_PTR(path), "-e", 2) == 0) {
- node = rb_ast_parse_str(rb_e_script);
+ else if (e_option) {
+ node = rb_ast_parse_str(rb_e_script, keep_script_lines, error_tolerant, keep_tokens);
}
else {
- node = rb_ast_parse_file(path);
+ node = rb_ast_parse_file(path, keep_script_lines, error_tolerant, keep_tokens);
}
return node_find(node, node_id);
@@ -246,6 +291,15 @@ ast_node_type(rb_execution_context_t *ec, VALUE self)
return rb_sym_intern_ascii_cstr(node_type_to_str(data->node));
}
+static VALUE
+ast_node_node_id(rb_execution_context_t *ec, VALUE self)
+{
+ struct ASTNodeData *data;
+ TypedData_Get_Struct(self, struct ASTNodeData, &rb_node_type, data);
+
+ return INT2FIX(nd_node_id(data->node));
+}
+
#define NEW_CHILD(ast, node) node ? ast_new_internal(ast, node) : Qnil
static VALUE
@@ -268,13 +322,13 @@ rb_ary_new_from_node_args(rb_ast_t *ast, long n, ...)
}
static VALUE
-dump_block(rb_ast_t *ast, NODE *node)
+dump_block(rb_ast_t *ast, const NODE *node)
{
VALUE ary = rb_ary_new();
do {
rb_ary_push(ary, NEW_CHILD(ast, node->nd_head));
} while (node->nd_next &&
- nd_type(node->nd_next) == NODE_BLOCK &&
+ nd_type_p(node->nd_next, NODE_BLOCK) &&
(node = node->nd_next, 1));
if (node->nd_next) {
rb_ary_push(ary, NEW_CHILD(ast, node->nd_next));
@@ -284,12 +338,12 @@ dump_block(rb_ast_t *ast, NODE *node)
}
static VALUE
-dump_array(rb_ast_t *ast, NODE *node)
+dump_array(rb_ast_t *ast, const NODE *node)
{
VALUE ary = rb_ary_new();
rb_ary_push(ary, NEW_CHILD(ast, node->nd_head));
- while (node->nd_next && nd_type(node->nd_next) == NODE_LIST) {
+ while (node->nd_next && nd_type_p(node->nd_next, NODE_LIST)) {
node = node->nd_next;
rb_ary_push(ary, NEW_CHILD(ast, node->nd_head));
}
@@ -307,7 +361,21 @@ var_name(ID id)
}
static VALUE
-node_children(rb_ast_t *ast, NODE *node)
+no_name_rest(void)
+{
+ ID rest;
+ CONST_ID(rest, "NODE_SPECIAL_NO_NAME_REST");
+ return ID2SYM(rest);
+}
+
+static VALUE
+rest_arg(rb_ast_t *ast, const NODE *rest_arg)
+{
+ return NODE_NAMED_REST_P(rest_arg) ? NEW_CHILD(ast, rest_arg) : no_name_rest();
+}
+
+static VALUE
+node_children(rb_ast_t *ast, const NODE *node)
{
char name[DECIMAL_SIZE_OF_BITS(sizeof(long) * CHAR_BIT) + 2]; /* including '$' */
@@ -330,22 +398,17 @@ node_children(rb_ast_t *ast, NODE *node)
case NODE_IN:
return rb_ary_new_from_node_args(ast, 3, node->nd_head, node->nd_body, node->nd_next);
case NODE_WHILE:
- goto loop;
case NODE_UNTIL:
- loop:
return rb_ary_push(rb_ary_new_from_node_args(ast, 2, node->nd_cond, node->nd_body),
- (node->nd_state ? Qtrue : Qfalse));
+ RBOOL(node->nd_state));
case NODE_ITER:
case NODE_FOR:
return rb_ary_new_from_node_args(ast, 2, node->nd_iter, node->nd_body);
case NODE_FOR_MASGN:
return rb_ary_new_from_node_args(ast, 1, node->nd_var);
case NODE_BREAK:
- goto jump;
case NODE_NEXT:
- goto jump;
case NODE_RETURN:
- jump:
return rb_ary_new_from_node_args(ast, 1, node->nd_stts);
case NODE_REDO:
return rb_ary_new_from_node_args(ast, 0);
@@ -360,15 +423,13 @@ node_children(rb_ast_t *ast, NODE *node)
case NODE_ENSURE:
return rb_ary_new_from_node_args(ast, 2, node->nd_head, node->nd_ensr);
case NODE_AND:
- goto andor;
case NODE_OR:
- andor:
{
VALUE ary = rb_ary_new();
while (1) {
rb_ary_push(ary, NEW_CHILD(ast, node->nd_1st));
- if (!node->nd_2nd || nd_type(node->nd_2nd) != (int)type)
+ if (!node->nd_2nd || !nd_type_p(node->nd_2nd, type))
break;
node = node->nd_2nd;
}
@@ -382,24 +443,17 @@ node_children(rb_ast_t *ast, NODE *node)
else {
return rb_ary_new_from_args(3, NEW_CHILD(ast, node->nd_value),
NEW_CHILD(ast, node->nd_head),
- ID2SYM(rb_intern("NODE_SPECIAL_NO_NAME_REST")));
+ no_name_rest());
}
case NODE_LASGN:
- goto asgn;
case NODE_DASGN:
- goto asgn;
- case NODE_DASGN_CURR:
- goto asgn;
case NODE_IASGN:
- goto asgn;
case NODE_CVASGN:
- asgn:
+ case NODE_GASGN:
if (NODE_REQUIRED_KEYWORD_P(node)) {
return rb_ary_new_from_args(2, var_name(node->nd_vid), ID2SYM(rb_intern("NODE_SPECIAL_REQUIRED_KEYWORD")));
}
return rb_ary_new_from_args(2, var_name(node->nd_vid), NEW_CHILD(ast, node->nd_value));
- case NODE_GASGN:
- goto asgn;
case NODE_CDECL:
if (node->nd_vid) {
return rb_ary_new_from_args(2, ID2SYM(node->nd_vid), NEW_CHILD(ast, node->nd_value));
@@ -411,9 +465,10 @@ node_children(rb_ast_t *ast, NODE *node)
NEW_CHILD(ast, node->nd_args->nd_head),
NEW_CHILD(ast, node->nd_args->nd_body));
case NODE_OP_ASGN2:
- return rb_ary_new_from_args(4, NEW_CHILD(ast, node->nd_recv),
- node->nd_next->nd_aid ? Qtrue : Qfalse,
+ return rb_ary_new_from_args(5, NEW_CHILD(ast, node->nd_recv),
+ RBOOL(node->nd_next->nd_aid),
ID2SYM(node->nd_next->nd_vid),
+ ID2SYM(node->nd_next->nd_mid),
NEW_CHILD(ast, node->nd_value));
case NODE_OP_ASGN_AND:
return rb_ary_new_from_args(3, NEW_CHILD(ast, node->nd_head), ID2SYM(idANDOP),
@@ -441,9 +496,7 @@ node_children(rb_ast_t *ast, NODE *node)
case NODE_ZSUPER:
return rb_ary_new_from_node_args(ast, 0);
case NODE_LIST:
- goto ary;
case NODE_VALUES:
- ary:
return dump_array(ast, node);
case NODE_ZLIST:
return rb_ary_new_from_node_args(ast, 0);
@@ -467,8 +520,6 @@ node_children(rb_ast_t *ast, NODE *node)
name[1] = (char)node->nd_nth;
name[2] = '\0';
return rb_ary_new_from_args(1, ID2SYM(rb_intern(name)));
- case NODE_MATCH:
- goto lit;
case NODE_MATCH2:
if (node->nd_args) {
return rb_ary_new_from_node_args(ast, 3, node->nd_recv, node->nd_value, node->nd_args);
@@ -476,26 +527,26 @@ node_children(rb_ast_t *ast, NODE *node)
return rb_ary_new_from_node_args(ast, 2, node->nd_recv, node->nd_value);
case NODE_MATCH3:
return rb_ary_new_from_node_args(ast, 2, node->nd_recv, node->nd_value);
+ case NODE_MATCH:
case NODE_LIT:
- goto lit;
case NODE_STR:
- goto lit;
case NODE_XSTR:
- lit:
return rb_ary_new_from_args(1, node->nd_lit);
case NODE_ONCE:
return rb_ary_new_from_node_args(ast, 1, node->nd_body);
case NODE_DSTR:
- goto dlit;
case NODE_DXSTR:
- goto dlit;
case NODE_DREGX:
- goto dlit;
case NODE_DSYM:
- dlit:
- return rb_ary_new_from_args(3, node->nd_lit,
- NEW_CHILD(ast, node->nd_next->nd_head),
- NEW_CHILD(ast, node->nd_next->nd_next));
+ {
+ NODE *n = node->nd_next;
+ VALUE head = Qnil, next = Qnil;
+ if (n) {
+ head = NEW_CHILD(ast, n->nd_head);
+ next = NEW_CHILD(ast, n->nd_next);
+ }
+ return rb_ary_new_from_args(3, node->nd_lit, head, next);
+ }
case NODE_EVSTR:
return rb_ary_new_from_node_args(ast, 1, node->nd_body);
case NODE_ARGSCAT:
@@ -527,13 +578,9 @@ node_children(rb_ast_t *ast, NODE *node)
case NODE_COLON3:
return rb_ary_new_from_args(1, ID2SYM(node->nd_mid));
case NODE_DOT2:
- goto dot;
case NODE_DOT3:
- goto dot;
case NODE_FLIP2:
- goto dot;
case NODE_FLIP3:
- dot:
return rb_ary_new_from_node_args(ast, 2, node->nd_beg, node->nd_end);
case NODE_SELF:
return rb_ary_new_from_node_args(ast, 0);
@@ -561,7 +608,7 @@ node_children(rb_ast_t *ast, NODE *node)
if (NODE_NAMED_REST_P(node->nd_1st)) {
return rb_ary_new_from_node_args(ast, 2, node->nd_1st, node->nd_2nd);
}
- return rb_ary_new_from_args(2, ID2SYM(rb_intern("NODE_SPECIAL_NO_NAME_REST")),
+ return rb_ary_new_from_args(2, no_name_rest(),
NEW_CHILD(ast, node->nd_2nd));
case NODE_ARGS:
{
@@ -573,32 +620,44 @@ node_children(rb_ast_t *ast, NODE *node)
var_name(ainfo->first_post_arg),
INT2NUM(ainfo->post_args_num),
NEW_CHILD(ast, ainfo->post_init),
- var_name(ainfo->rest_arg),
+ (ainfo->rest_arg == NODE_SPECIAL_EXCESSIVE_COMMA
+ ? ID2SYM(rb_intern("NODE_SPECIAL_EXCESSIVE_COMMA"))
+ : var_name(ainfo->rest_arg)),
(ainfo->no_kwarg ? Qfalse : NEW_CHILD(ast, ainfo->kw_args)),
(ainfo->no_kwarg ? Qfalse : NEW_CHILD(ast, ainfo->kw_rest_arg)),
var_name(ainfo->block_arg));
}
case NODE_SCOPE:
{
- ID *tbl = node->nd_tbl;
- int i, size = tbl ? (int)*tbl++ : 0;
+ rb_ast_id_table_t *tbl = node->nd_tbl;
+ int i, size = tbl ? tbl->size : 0;
VALUE locals = rb_ary_new_capa(size);
for (i = 0; i < size; i++) {
- rb_ary_push(locals, var_name(tbl[i]));
+ rb_ary_push(locals, var_name(tbl->ids[i]));
}
return rb_ary_new_from_args(3, locals, NEW_CHILD(ast, node->nd_args), NEW_CHILD(ast, node->nd_body));
}
case NODE_ARYPTN:
{
struct rb_ary_pattern_info *apinfo = node->nd_apinfo;
- VALUE rest = NODE_NAMED_REST_P(apinfo->rest_arg) ? NEW_CHILD(ast, apinfo->rest_arg) :
- ID2SYM(rb_intern("NODE_SPECIAL_NO_NAME_REST"));
+ VALUE rest = rest_arg(ast, apinfo->rest_arg);
return rb_ary_new_from_args(4,
NEW_CHILD(ast, node->nd_pconst),
NEW_CHILD(ast, apinfo->pre_args),
rest,
NEW_CHILD(ast, apinfo->post_args));
}
+ case NODE_FNDPTN:
+ {
+ struct rb_fnd_pattern_info *fpinfo = node->nd_fpinfo;
+ VALUE pre_rest = rest_arg(ast, fpinfo->pre_rest_arg);
+ VALUE post_rest = rest_arg(ast, fpinfo->post_rest_arg);
+ return rb_ary_new_from_args(4,
+ NEW_CHILD(ast, node->nd_pconst),
+ pre_rest,
+ NEW_CHILD(ast, fpinfo->args),
+ post_rest);
+ }
case NODE_HSHPTN:
{
VALUE kwrest = node->nd_pkwrestarg == NODE_SPECIAL_NO_REST_KEYWORD ? ID2SYM(rb_intern("NODE_SPECIAL_NO_REST_KEYWORD")) :
@@ -609,6 +668,8 @@ node_children(rb_ast_t *ast, NODE *node)
NEW_CHILD(ast, node->nd_pkwargs),
kwrest);
}
+ case NODE_ERROR:
+ return rb_ary_new_from_node_args(ast, 0);
case NODE_ARGS_AUX:
case NODE_LAST:
break;
@@ -663,6 +724,15 @@ ast_node_last_column(rb_execution_context_t *ec, VALUE self)
}
static VALUE
+ast_node_all_tokens(rb_execution_context_t *ec, VALUE self)
+{
+ struct ASTNodeData *data;
+ TypedData_Get_Struct(self, struct ASTNodeData, &rb_node_type, data);
+
+ return rb_ast_tokens(data->ast);
+}
+
+static VALUE
ast_node_inspect(rb_execution_context_t *ec, VALUE self)
{
VALUE str;
@@ -682,6 +752,16 @@ ast_node_inspect(rb_execution_context_t *ec, VALUE self)
return str;
}
+static VALUE
+ast_node_script_lines(rb_execution_context_t *ec, VALUE self)
+{
+ struct ASTNodeData *data;
+ TypedData_Get_Struct(self, struct ASTNodeData, &rb_node_type, data);
+ VALUE ret = data->ast->body.script_lines;
+ if (!RB_TYPE_P(ret, T_ARRAY)) return Qnil;
+ return ret;
+}
+
#include "ast.rbinc"
void
diff --git a/ast.rb b/ast.rb
index c507c50af8..f3f72c747f 100644
--- a/ast.rb
+++ b/ast.rb
@@ -1,145 +1,275 @@
# for ast.c
-class RubyVM
+# AbstractSyntaxTree provides methods to parse Ruby code into
+# abstract syntax trees. The nodes in the tree
+# are instances of RubyVM::AbstractSyntaxTree::Node.
+#
+# This module is MRI specific as it exposes implementation details
+# of the MRI abstract syntax tree.
+#
+# This module is experimental and its API is not stable, therefore it might
+# change without notice. As examples, the order of children nodes is not
+# guaranteed, the number of children nodes might change, there is no way to
+# access children nodes by name, etc.
+#
+# If you are looking for a stable API or an API working under multiple Ruby
+# implementations, consider using the _parser_ gem or Ripper. If you would
+# like to make RubyVM::AbstractSyntaxTree stable, please join the discussion
+# at https://bugs.ruby-lang.org/issues/14844.
+#
+module RubyVM::AbstractSyntaxTree
- # AbstractSyntaxTree provides methods to parse Ruby code into
- # abstract syntax trees. The nodes in the tree
- # are instances of RubyVM::AbstractSyntaxTree::Node.
+ # call-seq:
+ # RubyVM::AbstractSyntaxTree.parse(string, keep_script_lines: false, error_tolerant: false, keep_tokens: false) -> RubyVM::AbstractSyntaxTree::Node
#
- # This class is MRI specific as it exposes implementation details
- # of the MRI abstract syntax tree.
+ # Parses the given _string_ into an abstract syntax tree,
+ # returning the root node of that tree.
#
- # This class is experimental and its API is not stable, therefore it might
- # change without notice. As examples, the order of children nodes is not
- # guaranteed, the number of children nodes might change, there is no way to
- # access children nodes by name, etc.
+ # RubyVM::AbstractSyntaxTree.parse("x = 1 + 2")
+ # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-1:9>
#
- # If you are looking for a stable API or an API working under multiple Ruby
- # implementations, consider using the _parser_ gem or Ripper. If you would
- # like to make RubyVM::AbstractSyntaxTree stable, please join the discussion
- # at https://bugs.ruby-lang.org/issues/14844.
+ # If <tt>keep_script_lines: true</tt> option is provided, the text of the parsed
+ # source is associated with nodes and is available via Node#script_lines.
#
- module AbstractSyntaxTree
+ # If <tt>keep_tokens: true</tt> option is provided, Node#tokens are populated.
+ #
+ # SyntaxError is raised if the given _string_ is invalid syntax. To overwrite this
+ # behavior, <tt>error_tolerant: true</tt> can be provided. In this case, the parser
+ # will produce a tree where expressions with syntax errors would be represented by
+ # Node with <tt>type=:ERROR</tt>.
+ #
+ # root = RubyVM::AbstractSyntaxTree.parse("x = 1; p(x; y=2")
+ # # <internal:ast>:33:in `parse': syntax error, unexpected ';', expecting ')' (SyntaxError)
+ # # x = 1; p(x; y=2
+ # # ^
+ #
+ # root = RubyVM::AbstractSyntaxTree.parse("x = 1; p(x; y=2", error_tolerant: true)
+ # # (SCOPE@1:0-1:15
+ # # tbl: [:x, :y]
+ # # args: nil
+ # # body: (BLOCK@1:0-1:15 (LASGN@1:0-1:5 :x (LIT@1:4-1:5 1)) (ERROR@1:7-1:11) (LASGN@1:12-1:15 :y (LIT@1:14-1:15 2))))
+ # root.children.last.children
+ # # [(LASGN@1:0-1:5 :x (LIT@1:4-1:5 1)),
+ # # (ERROR@1:7-1:11),
+ # # (LASGN@1:12-1:15 :y (LIT@1:14-1:15 2))]
+ #
+ # Note that parsing continues even after the errored expresion.
+ #
+ def self.parse string, keep_script_lines: false, error_tolerant: false, keep_tokens: false
+ Primitive.ast_s_parse string, keep_script_lines, error_tolerant, keep_tokens
+ end
+
+ # call-seq:
+ # RubyVM::AbstractSyntaxTree.parse_file(pathname, keep_script_lines: false, error_tolerant: false, keep_tokens: false) -> RubyVM::AbstractSyntaxTree::Node
+ #
+ # Reads the file from _pathname_, then parses it like ::parse,
+ # returning the root node of the abstract syntax tree.
+ #
+ # SyntaxError is raised if _pathname_'s contents are not
+ # valid Ruby syntax.
+ #
+ # RubyVM::AbstractSyntaxTree.parse_file("my-app/app.rb")
+ # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-31:3>
+ #
+ # See ::parse for explanation of keyword argument meaning and usage.
+ def self.parse_file pathname, keep_script_lines: false, error_tolerant: false, keep_tokens: false
+ Primitive.ast_s_parse_file pathname, keep_script_lines, error_tolerant, keep_tokens
+ end
+
+ # call-seq:
+ # RubyVM::AbstractSyntaxTree.of(proc, keep_script_lines: false, error_tolerant: false, keep_tokens: false) -> RubyVM::AbstractSyntaxTree::Node
+ # RubyVM::AbstractSyntaxTree.of(method, keep_script_lines: false, error_tolerant: false, keep_tokens: false) -> RubyVM::AbstractSyntaxTree::Node
+ #
+ # Returns AST nodes of the given _proc_ or _method_.
+ #
+ # RubyVM::AbstractSyntaxTree.of(proc {1 + 2})
+ # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:35-1:42>
+ #
+ # def hello
+ # puts "hello, world"
+ # end
+ #
+ # RubyVM::AbstractSyntaxTree.of(method(:hello))
+ # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-3:3>
+ #
+ # See ::parse for explanation of keyword argument meaning and usage.
+ def self.of body, keep_script_lines: false, error_tolerant: false, keep_tokens: false
+ Primitive.ast_s_of body, keep_script_lines, error_tolerant, keep_tokens
+ end
+
+ # call-seq:
+ # RubyVM::AbstractSyntaxTree.node_id_for_backtrace_location(backtrace_location) -> integer
+ #
+ # Returns the node id for the given backtrace location.
+ #
+ # begin
+ # raise
+ # rescue => e
+ # loc = e.backtrace_locations.first
+ # RubyVM::AbstractSyntaxTree.node_id_for_backtrace_location(loc)
+ # end # => 0
+ def self.node_id_for_backtrace_location backtrace_location
+ Primitive.node_id_for_backtrace_location backtrace_location
+ end
+
+ # RubyVM::AbstractSyntaxTree::Node instances are created by parse methods in
+ # RubyVM::AbstractSyntaxTree.
+ #
+ # This class is MRI specific.
+ #
+ class Node
# call-seq:
- # RubyVM::AbstractSyntaxTree.parse(string) -> RubyVM::AbstractSyntaxTree::Node
+ # node.type -> symbol
#
- # Parses the given _string_ into an abstract syntax tree,
- # returning the root node of that tree.
+ # Returns the type of this node as a symbol.
#
- # SyntaxError is raised if the given _string_ is invalid syntax.
+ # root = RubyVM::AbstractSyntaxTree.parse("x = 1 + 2")
+ # root.type # => :SCOPE
+ # lasgn = root.children[2]
+ # lasgn.type # => :LASGN
+ # call = lasgn.children[1]
+ # call.type # => :OPCALL
+ def type
+ Primitive.ast_node_type
+ end
+
+ # call-seq:
+ # node.first_lineno -> integer
#
- # RubyVM::AbstractSyntaxTree.parse("x = 1 + 2")
- # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-1:9>
- def self.parse string
- __builtin_ast_s_parse string
+ # The line number in the source code where this AST's text began.
+ def first_lineno
+ Primitive.ast_node_first_lineno
end
# call-seq:
- # RubyVM::AbstractSyntaxTree.parse_file(pathname) -> RubyVM::AbstractSyntaxTree::Node
+ # node.first_column -> integer
#
- # Reads the file from _pathname_, then parses it like ::parse,
- # returning the root node of the abstract syntax tree.
+ # The column number in the source code where this AST's text began.
+ def first_column
+ Primitive.ast_node_first_column
+ end
+
+ # call-seq:
+ # node.last_lineno -> integer
#
- # SyntaxError is raised if _pathname_'s contents are not
- # valid Ruby syntax.
+ # The line number in the source code where this AST's text ended.
+ def last_lineno
+ Primitive.ast_node_last_lineno
+ end
+
+ # call-seq:
+ # node.last_column -> integer
#
- # RubyVM::AbstractSyntaxTree.parse_file("my-app/app.rb")
- # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-31:3>
- def self.parse_file pathname
- __builtin_ast_s_parse_file pathname
+ # The column number in the source code where this AST's text ended.
+ def last_column
+ Primitive.ast_node_last_column
end
# call-seq:
- # RubyVM::AbstractSyntaxTree.of(proc) -> RubyVM::AbstractSyntaxTree::Node
- # RubyVM::AbstractSyntaxTree.of(method) -> RubyVM::AbstractSyntaxTree::Node
+ # node.tokens -> array
#
- # Returns AST nodes of the given _proc_ or _method_.
+ # Returns tokens corresponding to the location of the node.
+ # Returns +nil+ if +keep_tokens+ is not enabled when #parse method is called.
#
- # RubyVM::AbstractSyntaxTree.of(proc {1 + 2})
- # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:35-1:42>
+ # root = RubyVM::AbstractSyntaxTree.parse("x = 1 + 2", keep_tokens: true)
+ # root.tokens # => [[0, :tIDENTIFIER, "x", [1, 0, 1, 1]], [1, :tSP, " ", [1, 1, 1, 2]], ...]
+ # root.tokens.map{_1[2]}.join # => "x = 1 + 2"
#
- # def hello
- # puts "hello, world"
- # end
+ # Token is an array of:
#
- # RubyVM::AbstractSyntaxTree.of(method(:hello))
- # # => #<RubyVM::AbstractSyntaxTree::Node:SCOPE@1:0-3:3>
- def self.of body
- __builtin_ast_s_of body
- end
+ # - id
+ # - token type
+ # - source code text
+ # - location [ first_lineno, first_column, last_lineno, last_column ]
+ def tokens
+ return nil unless all_tokens
- # RubyVM::AbstractSyntaxTree::Node instances are created by parse methods in
- # RubyVM::AbstractSyntaxTree.
- #
- # This class is MRI specific.
- #
- class Node
-
- # call-seq:
- # node.type -> symbol
- #
- # Returns the type of this node as a symbol.
- #
- # root = RubyVM::AbstractSyntaxTree.parse("x = 1 + 2")
- # root.type # => :SCOPE
- # lasgn = root.children[2]
- # lasgn.type # => :LASGN
- # call = lasgn.children[1]
- # call.type # => :OPCALL
- def type
- __builtin_ast_node_type
+ all_tokens.each_with_object([]) do |token, a|
+ loc = token.last
+ if ([first_lineno, first_column] <=> [loc[0], loc[1]]) <= 0 &&
+ ([last_lineno, last_column] <=> [loc[2], loc[3]]) >= 0
+ a << token
+ end
end
+ end
- # call-seq:
- # node.first_lineno -> integer
- #
- # The line number in the source code where this AST's text began.
- def first_lineno
- __builtin_ast_node_first_lineno
- end
+ # call-seq:
+ # node.all_tokens -> array
+ #
+ # Returns all tokens for the input script regardless the receiver node.
+ # Returns +nil+ if +keep_tokens+ is not enabled when #parse method is called.
+ #
+ # root = RubyVM::AbstractSyntaxTree.parse("x = 1 + 2", keep_tokens: true)
+ # root.all_tokens # => [[0, :tIDENTIFIER, "x", [1, 0, 1, 1]], [1, :tSP, " ", [1, 1, 1, 2]], ...]
+ # root.children[-1].all_tokens # => [[0, :tIDENTIFIER, "x", [1, 0, 1, 1]], [1, :tSP, " ", [1, 1, 1, 2]], ...]
+ def all_tokens
+ Primitive.ast_node_all_tokens
+ end
- # call-seq:
- # node.first_column -> integer
- #
- # The column number in the source code where this AST's text began.
- def first_column
- __builtin_ast_node_first_column
- end
+ # call-seq:
+ # node.children -> array
+ #
+ # Returns AST nodes under this one. Each kind of node
+ # has different children, depending on what kind of node it is.
+ #
+ # The returned array may contain other nodes or <code>nil</code>.
+ def children
+ Primitive.ast_node_children
+ end
- # call-seq:
- # node.last_lineno -> integer
- #
- # The line number in the source code where this AST's text ended.
- def last_lineno
- __builtin_ast_node_last_lineno
- end
+ # call-seq:
+ # node.inspect -> string
+ #
+ # Returns debugging information about this node as a string.
+ def inspect
+ Primitive.ast_node_inspect
+ end
- # call-seq:
- # node.last_column -> integer
- #
- # The column number in the source code where this AST's text ended.
- def last_column
- __builtin_ast_node_last_column
- end
+ # call-seq:
+ # node.node_id -> integer
+ #
+ # Returns an internal node_id number.
+ # Note that this is an API for ruby internal use, debugging,
+ # and research. Do not use this for any other purpose.
+ # The compatibility is not guaranteed.
+ def node_id
+ Primitive.ast_node_node_id
+ end
- # call-seq:
- # node.children -> array
- #
- # Returns AST nodes under this one. Each kind of node
- # has different children, depending on what kind of node it is.
- #
- # The returned array may contain other nodes or <code>nil</code>.
- def children
- __builtin_ast_node_children
- end
+ # call-seq:
+ # node.script_lines -> array
+ #
+ # Returns the original source code as an array of lines.
+ #
+ # Note that this is an API for ruby internal use, debugging,
+ # and research. Do not use this for any other purpose.
+ # The compatibility is not guaranteed.
+ def script_lines
+ Primitive.ast_node_script_lines
+ end
- # call-seq:
- # node.inspect -> string
- #
- # Returns debugging information about this node as a string.
- def inspect
- __builtin_ast_node_inspect
+ # call-seq:
+ # node.source -> string
+ #
+ # Returns the code fragment that corresponds to this AST.
+ #
+ # Note that this is an API for ruby internal use, debugging,
+ # and research. Do not use this for any other purpose.
+ # The compatibility is not guaranteed.
+ #
+ # Also note that this API may return an incomplete code fragment
+ # that does not parse; for example, a here document following
+ # an expression may be dropped.
+ def source
+ lines = script_lines
+ if lines
+ lines = lines[first_lineno - 1 .. last_lineno - 1]
+ lines[-1] = lines[-1][0...last_column]
+ lines[0] = lines[0][first_column..-1]
+ lines.join
+ else
+ nil
end
end
end
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000000..f8cdf3c0c1
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+PWD=
+case "$0" in
+*/*) srcdir=`dirname $0`;;
+*) srcdir="";;
+esac
+
+symlink='--install --symlink'
+case " $* " in
+ *" -i "*|*" --install "*)
+ # reset to copy missing standard auxiliary files, instead of symlinks
+ symlink=
+ ;;
+esac
+
+exec ${AUTORECONF:-autoreconf} ${symlink} "$@" ${srcdir:+"$srcdir"}
diff --git a/basictest/test.rb b/basictest/test.rb
index 25a4298234..95875b52a6 100755
--- a/basictest/test.rb
+++ b/basictest/test.rb
@@ -1425,9 +1425,6 @@ marity_test(:test_ok)
marity_test(:marity_test)
marity_test(:p)
-lambda(&method(:test_ok)).call(true)
-lambda(&block_get{|a,n| test_ok(a,n)}).call(true, 2)
-
class ITER_TEST1
def a
block_given?
@@ -1963,6 +1960,8 @@ test_ok(p1.call == 5)
test_ok(i7 == nil)
end
+# WASI doesn't support spawning a new process for now.
+unless /wasi/ =~ RUBY_PLATFORM
test_check "system"
test_ok(`echo foobar` == "foobar\n")
test_ok(`./miniruby -e 'print "foobar"'` == 'foobar')
@@ -2013,6 +2012,7 @@ test_ok(done)
File.unlink script_tmp or `/bin/rm -f "#{script_tmp}"`
File.unlink "#{script_tmp}.bak" or `/bin/rm -f "#{script_tmp}.bak"`
+end # not /wasi/ =~ RUBY_PLATFORM
test_check "const"
TEST1 = 1
@@ -2140,7 +2140,7 @@ $_ = foobar
test_ok($_ == foobar)
class Gods
- @@rule = "Uranus" # private to Gods
+ @@rule = "Uranus"
def ruler0
@@rule
end
@@ -2163,7 +2163,7 @@ module Olympians
end
class Titans < Gods
- @@rule = "Cronus" # do not affect @@rule in Gods
+ @@rule = "Cronus" # modifies @@rule in Gods
include Olympians
def ruler4
@@rule
@@ -2178,7 +2178,14 @@ test_ok(Titans.ruler2 == "Cronus")
atlas = Titans.new
test_ok(atlas.ruler0 == "Cronus")
test_ok(atlas.ruler3 == "Zeus")
-test_ok(atlas.ruler4 == "Cronus")
+begin
+ atlas.ruler4
+rescue RuntimeError => e
+ test_ok(e.message.include?("class variable @@rule of Olympians is overtaken by Gods"))
+else
+ test_ok(false)
+end
+test_ok(atlas.ruler3 == "Zeus")
test_check "trace"
$x = 1234
diff --git a/benchmark/README.md b/benchmark/README.md
index c6d7cc318d..e11381cad9 100644
--- a/benchmark/README.md
+++ b/benchmark/README.md
@@ -28,15 +28,17 @@ See also:
```console
Usage: benchmark-driver [options] RUBY|YAML...
- -r, --runner TYPE Specify runner type: ips, time, memory, once (default: ips)
- -o, --output TYPE Specify output type: compare, simple, markdown, record (default: compare)
+ -r, --runner TYPE Specify runner type: ips, time, memory, once, block (default: ips)
+ -o, --output TYPE Specify output type: compare, simple, markdown, record, all (default: compare)
-e, --executables EXECS Ruby executables (e1::path1 arg1; e2::path2 arg2;...)
--rbenv VERSIONS Ruby executables in rbenv (x.x.x arg1;y.y.y arg2;...)
--repeat-count NUM Try benchmark NUM times and use the fastest result or the worst memory usage
--repeat-result TYPE Yield "best", "average" or "worst" result with --repeat-count (default: best)
+ --alternate Alternate executables instead of running the same executable in a row with --repeat-count
--bundler Install and use gems specified in Gemfile
--filter REGEXP Filter out benchmarks with given regexp
--run-duration SECONDS Warmup estimates loop_count to run for this duration (default: 3)
+ --timeout SECONDS Timeout ruby command execution with timeout(1)
-v, --verbose Verbose mode. Multiple -v options increase visilibity (max: 2)
```
@@ -61,7 +63,7 @@ make benchmark ITEM=vm OPTS=--filter=block
# You can specify the benchmark by an exact filename instead of using the default argument:
# ARGS = $$(find $(srcdir)/benchmark -maxdepth 1 -name '*$(ITEM)*.yml' -o -name '*$(ITEM)*.rb')
-make benchmark ARGS=../benchmark/erb_render.yml
+make benchmark ARGS=benchmark/erb_render.yml
# You can specify any option via $OPTS
make benchmark OPTS="--help"
diff --git a/benchmark/array_max_float.yml b/benchmark/array_max_float.yml
new file mode 100644
index 0000000000..ace1ae2e14
--- /dev/null
+++ b/benchmark/array_max_float.yml
@@ -0,0 +1,30 @@
+prelude: |
+ ary2 = 2.times.map(&:to_f).shuffle
+ ary10 = 10.times.map(&:to_f).shuffle
+ ary100 = 100.times.map(&:to_f).shuffle
+ ary500 = 500.times.map(&:to_f).shuffle
+ ary1000 = 1000.times.map(&:to_f).shuffle
+ ary2000 = 2500.times.map(&:to_f).shuffle
+ ary3000 = 2500.times.map(&:to_f).shuffle
+ ary5000 = 5000.times.map(&:to_f).shuffle
+ ary10000 = 10000.times.map(&:to_f).shuffle
+ ary20000 = 20000.times.map(&:to_f).shuffle
+ ary50000 = 50000.times.map(&:to_f).shuffle
+ ary100000 = 100000.times.map(&:to_f).shuffle
+
+benchmark:
+ ary2.max: ary2.max
+ ary10.max: ary10.max
+ ary100.max: ary100.max
+ ary500.max: ary500.max
+ ary1000.max: ary1000.max
+ ary2000.max: ary2000.max
+ ary3000.max: ary3000.max
+ ary5000.max: ary5000.max
+ ary10000.max: ary10000.max
+ ary20000.max: ary20000.max
+ ary50000.max: ary50000.max
+ ary100000.max: ary100000.max
+
+loop_count: 10000
+
diff --git a/benchmark/array_max_int.yml b/benchmark/array_max_int.yml
new file mode 100644
index 0000000000..acd83684d0
--- /dev/null
+++ b/benchmark/array_max_int.yml
@@ -0,0 +1,31 @@
+prelude: |
+ ary2 = 2.times.to_a.shuffle
+ ary10 = 10.times.to_a.shuffle
+ ary100 = 100.times.to_a.shuffle
+ ary500 = 500.times.to_a.shuffle
+ ary1000 = 1000.times.to_a.shuffle
+ ary2000 = 2500.times.to_a.shuffle
+ ary3000 = 2500.times.to_a.shuffle
+ ary5000 = 5000.times.to_a.shuffle
+ ary10000 = 10000.times.to_a.shuffle
+ ary20000 = 20000.times.to_a.shuffle
+ ary50000 = 50000.times.to_a.shuffle
+ ary100000 = 100000.times.to_a.shuffle
+ ary1000000 = 1000000.times.to_a.shuffle
+
+benchmark:
+ ary2.max: ary2.max
+ ary10.max: ary10.max
+ ary100.max: ary100.max
+ ary500.max: ary500.max
+ ary1000.max: ary1000.max
+ ary2000.max: ary2000.max
+ ary3000.max: ary3000.max
+ ary5000.max: ary5000.max
+ ary10000.max: ary10000.max
+ ary20000.max: ary20000.max
+ ary50000.max: ary50000.max
+ ary100000.max: ary100000.max
+ ary1000000.max: ary1000000.max
+
+loop_count: 10000
diff --git a/benchmark/array_max_str.yml b/benchmark/array_max_str.yml
new file mode 100644
index 0000000000..2aeed010f2
--- /dev/null
+++ b/benchmark/array_max_str.yml
@@ -0,0 +1,30 @@
+prelude: |
+ ary2 = 2.times.map(&:to_s).shuffle
+ ary10 = 10.times.map(&:to_s).shuffle
+ ary100 = 100.times.map(&:to_s).shuffle
+ ary500 = 500.times.map(&:to_s).shuffle
+ ary1000 = 1000.times.map(&:to_s).shuffle
+ ary2000 = 2500.times.map(&:to_s).shuffle
+ ary3000 = 2500.times.map(&:to_s).shuffle
+ ary5000 = 5000.times.map(&:to_s).shuffle
+ ary10000 = 10000.times.map(&:to_s).shuffle
+ ary20000 = 20000.times.map(&:to_s).shuffle
+ ary50000 = 50000.times.map(&:to_s).shuffle
+ ary100000 = 100000.times.map(&:to_s).shuffle
+
+benchmark:
+ ary2.max: ary2.max
+ ary10.max: ary10.max
+ ary100.max: ary100.max
+ ary500.max: ary500.max
+ ary1000.max: ary1000.max
+ ary2000.max: ary2000.max
+ ary3000.max: ary3000.max
+ ary5000.max: ary5000.max
+ ary10000.max: ary10000.max
+ ary20000.max: ary20000.max
+ ary50000.max: ary50000.max
+ ary100000.max: ary100000.max
+
+loop_count: 10000
+
diff --git a/benchmark/array_min.yml b/benchmark/array_min.yml
new file mode 100644
index 0000000000..53e5072b14
--- /dev/null
+++ b/benchmark/array_min.yml
@@ -0,0 +1,31 @@
+prelude: |
+ ary2 = 2.times.to_a.shuffle
+ ary10 = 10.times.to_a.shuffle
+ ary100 = 100.times.to_a.shuffle
+ ary500 = 500.times.to_a.shuffle
+ ary1000 = 1000.times.to_a.shuffle
+ ary2000 = 2500.times.to_a.shuffle
+ ary3000 = 2500.times.to_a.shuffle
+ ary5000 = 5000.times.to_a.shuffle
+ ary10000 = 10000.times.to_a.shuffle
+ ary20000 = 20000.times.to_a.shuffle
+ ary50000 = 50000.times.to_a.shuffle
+ ary100000 = 100000.times.to_a.shuffle
+ ary1000000 = 1000000.times.to_a.shuffle
+
+benchmark:
+ ary2.min: ary2.min
+ ary10.min: ary10.min
+ ary100.min: ary100.min
+ ary500.min: ary500.min
+ ary1000.min: ary1000.min
+ ary2000.min: ary2000.min
+ ary3000.min: ary3000.min
+ ary5000.min: ary5000.min
+ ary10000.min: ary10000.min
+ ary20000.min: ary20000.min
+ ary50000.min: ary50000.min
+ ary100000.min: ary100000.min
+ ary1000000.min: ary1000000.min
+
+loop_count: 10000
diff --git a/benchmark/array_sample.yml b/benchmark/array_sample.yml
new file mode 100644
index 0000000000..1cd2b34794
--- /dev/null
+++ b/benchmark/array_sample.yml
@@ -0,0 +1,4 @@
+prelude: ary = (1..10_000).to_a
+benchmark:
+ - ary.sample
+ - ary.sample(2)
diff --git a/benchmark/array_sort_int.yml b/benchmark/array_sort_int.yml
new file mode 100644
index 0000000000..7b9027ebf7
--- /dev/null
+++ b/benchmark/array_sort_int.yml
@@ -0,0 +1,15 @@
+prelude: |
+ ary2 = 2.times.to_a.shuffle
+ ary10 = 10.times.to_a.shuffle
+ ary100 = 100.times.to_a.shuffle
+ ary1000 = 1000.times.to_a.shuffle
+ ary10000 = 10000.times.to_a.shuffle
+
+benchmark:
+ ary2.sort: ary2.sort
+ ary10.sort: ary10.sort
+ ary100.sort: ary100.sort
+ ary1000.sort: ary1000.sort
+ ary10000.sort: ary10000.sort
+
+loop_count: 10000
diff --git a/benchmark/attr_accessor.yml b/benchmark/attr_accessor.yml
new file mode 100644
index 0000000000..82134cdf9b
--- /dev/null
+++ b/benchmark/attr_accessor.yml
@@ -0,0 +1,29 @@
+prelude: |
+ class C
+ attr_accessor :x
+ def initialize
+ @x = nil
+ end
+ class_eval <<-END
+ def ar
+ #{'x;'*256}
+ end
+ def aw
+ #{'self.x = nil;'*256}
+ end
+ def arm
+ m = method(:x)
+ #{'m.call;'*256}
+ end
+ def awm
+ m = method(:x=)
+ #{'m.call(nil);'*256}
+ end
+ END
+ end
+ obj = C.new
+benchmark:
+ attr_reader: "obj.ar"
+ attr_writer: "obj.aw"
+ attr_reader_method: "obj.arm"
+ attr_writer_method: "obj.awm"
diff --git a/benchmark/buffer_each.yml b/benchmark/buffer_each.yml
new file mode 100644
index 0000000000..417941104e
--- /dev/null
+++ b/benchmark/buffer_each.yml
@@ -0,0 +1,27 @@
+prelude: |
+ # frozen_string_literal: true
+ Warning[:experimental] = false
+ string = "The quick brown fox jumped over the lazy dog."
+ array = string.bytes
+ buffer = IO::Buffer.for(string)
+benchmark:
+ string.each_byte: |
+ upcased = String.new
+ string.each_byte do |byte|
+ upcased << (byte ^ 32)
+ end
+ array.each: |
+ upcased = String.new
+ array.each do |byte|
+ upcased << (byte ^ 32)
+ end
+ buffer.each: |
+ upcased = String.new
+ buffer.each(:U8) do |offset, byte|
+ upcased << (byte ^ 32)
+ end
+ buffer.each_byte: |
+ upcased = String.new
+ buffer.each_byte do |byte|
+ upcased << (byte ^ 32)
+ end
diff --git a/benchmark/buffer_get.yml b/benchmark/buffer_get.yml
new file mode 100644
index 0000000000..9e1f99d64e
--- /dev/null
+++ b/benchmark/buffer_get.yml
@@ -0,0 +1,25 @@
+prelude: |
+ # frozen_string_literal: true
+ Warning[:experimental] = false
+ string = "The quick brown fox jumped over the lazy dog."
+ buffer = IO::Buffer.for(string)
+ format = [:U32, :U32, :U32, :U32]
+benchmark:
+ string.unpack1: |
+ [
+ string.unpack1("N"),
+ string.unpack1("N", offset: 4),
+ string.unpack1("N", offset: 8),
+ string.unpack1("N", offset: 12),
+ ]
+ buffer.get_value: |
+ [
+ buffer.get_value(:U32, 0),
+ buffer.get_value(:U32, 4),
+ buffer.get_value(:U32, 8),
+ buffer.get_value(:U32, 12),
+ ]
+ buffer.get_values: |
+ buffer.get_values(format, 0)
+ string.unpack: |
+ string.unpack("NNNN")
diff --git a/benchmark/cgi_escape_html.yml b/benchmark/cgi_escape_html.yml
index af6abd08ac..655be9d7d8 100644
--- a/benchmark/cgi_escape_html.yml
+++ b/benchmark/cgi_escape_html.yml
@@ -1,32 +1,23 @@
-prelude: require 'cgi/escape'
+prelude: |
+ # frozen_string_literal: true
+ require 'cgi/escape'
benchmark:
- - name: escape_html_blank
- prelude: str = ""
- script: CGI.escapeHTML(str)
+ - script: CGI.escapeHTML("")
loop_count: 20000000
- - name: escape_html_short_none
- prelude: str = "abcde"
- script: CGI.escapeHTML(str)
+ - script: CGI.escapeHTML("abcde")
loop_count: 20000000
- - name: escape_html_short_one
- prelude: str = "abcd<"
- script: CGI.escapeHTML(str)
+ - script: CGI.escapeHTML("abcd<")
loop_count: 20000000
- - name: escape_html_short_all
- prelude: str = "'&\"<>"
- script: CGI.escapeHTML(str)
+ - script: CGI.escapeHTML("'&\"<>")
loop_count: 5000000
- - name: escape_html_long_none
- prelude: str = "abcde" * 300
- script: CGI.escapeHTML(str)
+ - prelude: long_no_escape = "abcde" * 300
+ script: CGI.escapeHTML(long_no_escape)
loop_count: 1000000
- - name: escape_html_long_all
- prelude: str = "'&\"<>" * 10
- script: CGI.escapeHTML(str)
+ - prelude: long_all_escape = "'&\"<>" * 10
+ script: CGI.escapeHTML(long_all_escape)
loop_count: 1000000
- - name: escape_html_real
- prelude: | # http://example.com/
- str = <<~HTML
+ - prelude: | # http://example.com/
+ example_html = <<~HTML
<body>
<div>
<h1>Example Domain</h1>
@@ -36,5 +27,5 @@ benchmark:
</div>
</body>
HTML
- script: CGI.escapeHTML(str)
+ script: CGI.escapeHTML(example_html)
loop_count: 1000000
diff --git a/benchmark/constant_invalidation.rb b/benchmark/constant_invalidation.rb
new file mode 100644
index 0000000000..a95ec6f37e
--- /dev/null
+++ b/benchmark/constant_invalidation.rb
@@ -0,0 +1,22 @@
+$VERBOSE = nil
+
+CONSTANT1 = 1
+CONSTANT2 = 1
+CONSTANT3 = 1
+CONSTANT4 = 1
+CONSTANT5 = 1
+
+def constants
+ [CONSTANT1, CONSTANT2, CONSTANT3, CONSTANT4, CONSTANT5]
+end
+
+500_000.times do
+ constants
+
+ # With previous behavior, this would cause all of the constant caches
+ # associated with the constant lookups listed above to invalidate, meaning
+ # they would all have to be fetched again. With current behavior, it only
+ # invalidates when a name matches, so the following constant set shouldn't
+ # impact the constant lookups listed above.
+ INVALIDATE = true
+end
diff --git a/benchmark/enum_lazy_flat_map.yml b/benchmark/enum_lazy_flat_map.yml
new file mode 100644
index 0000000000..0ee390a441
--- /dev/null
+++ b/benchmark/enum_lazy_flat_map.yml
@@ -0,0 +1,16 @@
+prelude: |
+ num = (1..).lazy.take(100)
+ ary2 = [[1,2]].cycle.lazy.take(10)
+ ary10 = [[*1..10]].cycle.lazy.take(10)
+ ary20 = [[*1..20]].cycle.lazy.take(10)
+ ary50 = [[*1..50]].cycle.lazy.take(10)
+ ary100 = [[*1..100]].cycle.lazy.take(10)
+
+benchmark:
+ num3: num.flat_map {|x| x}.take(3).to_a
+ num10: num.flat_map {|x| x}.take(3).to_a
+ ary2: ary2.flat_map {|x| x}.take(3).to_a
+ ary10: ary10.flat_map {|x| x}.take(3).to_a
+ ary20: ary20.flat_map {|x| x}.take(3).to_a
+ ary50: ary50.flat_map {|x| x}.take(3).to_a
+ ary100: ary100.flat_map {|x| x}.take(3).to_a
diff --git a/benchmark/enum_lazy_zip.yml b/benchmark/enum_lazy_zip.yml
new file mode 100644
index 0000000000..4566ff0261
--- /dev/null
+++ b/benchmark/enum_lazy_zip.yml
@@ -0,0 +1,22 @@
+prelude: |
+ a = (1..3).lazy
+ b = a.map {|x| x}
+
+benchmark:
+ first_ary: a.zip(["a", "b", "c"]).first
+ first_nonary: a.zip("a".."c").first
+ first_noarg: a.zip.first
+
+ take3_ary: a.zip(["a", "b", "c"]).take(3).force
+ take3_nonary: a.zip("a".."c").take(3).force
+ take3_noarg: a.zip.take(3).force
+
+ chain-first_ary: b.zip(["a", "b", "c"]).first
+ chain-first_nonary: b.zip("a".."c").first
+ chain-first_noarg: b.zip.first
+
+ chain-take3_ary: b.zip(["a", "b", "c"]).take(3).force
+ chain-take3_nonary: b.zip("a".."c").take(3).force
+ chain-take3_noarg: b.zip.take(3).force
+
+ block: a.zip("a".."c") {|x, y| [x, y]}
diff --git a/benchmark/enum_minmax.yml b/benchmark/enum_minmax.yml
new file mode 100644
index 0000000000..9d01731abb
--- /dev/null
+++ b/benchmark/enum_minmax.yml
@@ -0,0 +1,25 @@
+prelude: |
+ set2 = 2.times.to_a.shuffle.to_set
+ set10 = 10.times.to_a.shuffle.to_set
+ set100 = 100.times.to_a.shuffle.to_set
+ set1000 = 1000.times.to_a.shuffle.to_set
+ set10000 = 10000.times.to_a.shuffle.to_set
+
+benchmark:
+ set2.min: set2.min
+ set10.min: set10.min
+ set100.min: set100.min
+ set1000.min: set1000.min
+ set10000.min: set10000.min
+ set2.max: set2.max
+ set10.max: set10.max
+ set100.max: set100.max
+ set1000.max: set1000.max
+ set10000.max: set10000.max
+ set2.minmax: set2.minmax
+ set10.minmax: set10.minmax
+ set100.minmax: set100.minmax
+ set1000.minmax: set1000.minmax
+ set10000.minmax: set10000.minmax
+
+loop_count: 10000
diff --git a/benchmark/enum_sort.yml b/benchmark/enum_sort.yml
new file mode 100644
index 0000000000..6f26e748c6
--- /dev/null
+++ b/benchmark/enum_sort.yml
@@ -0,0 +1,15 @@
+prelude: |
+ set2 = 2.times.to_a.shuffle.to_set
+ set10 = 10.times.to_a.shuffle.to_set
+ set100 = 100.times.to_a.shuffle.to_set
+ set1000 = 1000.times.to_a.shuffle.to_set
+ set10000 = 10000.times.to_a.shuffle.to_set
+
+benchmark:
+ set2.sort_by: set2.sort_by { 0 }
+ set10.sort_by: set10.sort_by { 0 }
+ set100.sort_by: set100.sort_by { 0 }
+ set1000.sort_by: set1000.sort_by { 0 }
+ set10000.sort_by: set10000.sort_by { 0 }
+
+loop_count: 10000
diff --git a/benchmark/enum_tally.yml b/benchmark/enum_tally.yml
new file mode 100644
index 0000000000..edd2e040a0
--- /dev/null
+++ b/benchmark/enum_tally.yml
@@ -0,0 +1,4 @@
+prelude: |
+ list = ("aaa".."zzz").to_a*10
+benchmark:
+ tally: list.tally
diff --git a/benchmark/erb_escape_html.yml b/benchmark/erb_escape_html.yml
new file mode 100644
index 0000000000..ca28d756e7
--- /dev/null
+++ b/benchmark/erb_escape_html.yml
@@ -0,0 +1,31 @@
+prelude: |
+ # frozen_string_literal: true
+ require 'erb'
+benchmark:
+ - script: ERB::Util.html_escape("")
+ loop_count: 20000000
+ - script: ERB::Util.html_escape("abcde")
+ loop_count: 20000000
+ - script: ERB::Util.html_escape("abcd<")
+ loop_count: 20000000
+ - script: ERB::Util.html_escape("'&\"<>")
+ loop_count: 5000000
+ - prelude: long_no_escape = "abcde" * 300
+ script: ERB::Util.html_escape(long_no_escape)
+ loop_count: 1000000
+ - prelude: long_all_escape = "'&\"<>" * 10
+ script: ERB::Util.html_escape(long_all_escape)
+ loop_count: 1000000
+ - prelude: | # http://example.com/
+ example_html = <<~HTML
+ <body>
+ <div>
+ <h1>Example Domain</h1>
+ <p>This domain is established to be used for illustrative examples in documents. You may use this
+ domain in examples without prior coordination or asking for permission.</p>
+ <p><a href="http://www.iana.org/domains/example">More information...</a></p>
+ </div>
+ </body>
+ HTML
+ script: ERB::Util.html_escape(example_html)
+ loop_count: 1000000
diff --git a/benchmark/float_methods.yml b/benchmark/float_methods.yml
new file mode 100644
index 0000000000..56ea41effc
--- /dev/null
+++ b/benchmark/float_methods.yml
@@ -0,0 +1,14 @@
+prelude: |
+ flo = 4.2
+benchmark:
+ to_f: |
+ flo.to_f
+ abs: |
+ flo.abs
+ magnitude: |
+ flo.magnitude
+ -@: |
+ -flo
+ zero?: |
+ flo.zero?
+loop_count: 20000000
diff --git a/benchmark/float_neg_posi.yml b/benchmark/float_neg_posi.yml
new file mode 100644
index 0000000000..172db1bf6d
--- /dev/null
+++ b/benchmark/float_neg_posi.yml
@@ -0,0 +1,8 @@
+prelude: |
+ flo = 4.2
+benchmark:
+ negative?: |
+ flo.negative?
+ positive?: |
+ flo.positive?
+loop_count: 20000000
diff --git a/benchmark/float_to_s.yml b/benchmark/float_to_s.yml
new file mode 100644
index 0000000000..0abae5cdb8
--- /dev/null
+++ b/benchmark/float_to_s.yml
@@ -0,0 +1,7 @@
+prelude: |
+ floats = [*0.0.step(1.0, 0.0001)]
+
+benchmark:
+ to_s: floats.each {|f| f.to_s}
+
+loop_count: 1000
diff --git a/benchmark/hash_aref_array.rb b/benchmark/hash_aref_array.rb
new file mode 100644
index 0000000000..ac7a683d95
--- /dev/null
+++ b/benchmark/hash_aref_array.rb
@@ -0,0 +1,5 @@
+h = {}
+arrays = (0..99).each_slice(10).to_a
+#STDERR.puts arrays.inspect
+arrays.each { |s| h[s] = s }
+200_000.times { arrays.each { |s| h[s] } }
diff --git a/benchmark/hash_first.yml b/benchmark/hash_first.yml
new file mode 100644
index 0000000000..c26df1a7ed
--- /dev/null
+++ b/benchmark/hash_first.yml
@@ -0,0 +1,11 @@
+prelude: |
+ hash1 = 1_000_000.times.to_h { [rand, true]}
+ hash2 = hash1.dup
+ hash2.keys[1..100_000].each { hash2.delete _1 }
+ hash2.delete hash2.first[0]
+
+benchmark:
+ hash1: hash1.first
+ hash2: hash2.first
+
+loop_count: 100_000
diff --git a/benchmark/io_write.rb b/benchmark/io_write.rb
new file mode 100644
index 0000000000..cdb409948b
--- /dev/null
+++ b/benchmark/io_write.rb
@@ -0,0 +1,22 @@
+#!/usr/bin/env ruby
+
+require 'benchmark'
+
+i, o = IO.pipe
+o.sync = true
+
+DOT = ".".freeze
+
+chunks = 100_000.times.collect{DOT}
+
+thread = Thread.new do
+ while i.read(1024)
+ end
+end
+
+100.times do
+ o.write(*chunks)
+end
+
+o.close
+thread.join
diff --git a/benchmark/iseq_load_from_binary.yml b/benchmark/iseq_load_from_binary.yml
new file mode 100644
index 0000000000..7e9d73bdd4
--- /dev/null
+++ b/benchmark/iseq_load_from_binary.yml
@@ -0,0 +1,25 @@
+prelude: |
+ symbol = RubyVM::InstructionSequence.compile(":foo; :bar; :baz; :egg; :spam").to_binary
+
+ define_method = RubyVM::InstructionSequence.compile(%{
+ def foo; end
+ def bar; end
+ def baz; end
+ def egg; end
+ def spam; end
+ }).to_binary
+
+ all = RubyVM::InstructionSequence.compile(%{
+ module Foo; def foo; :foo; end; end
+ module Bar; def bar; :bar; end; end
+ module Baz; def baz; :baz; end; end
+ class Egg; def egg; :egg; end; end
+ class Spaml; def spam; :spam; end; end
+ }).to_binary
+
+benchmark:
+ symbol: RubyVM::InstructionSequence.load_from_binary(symbol)
+ define_method: RubyVM::InstructionSequence.load_from_binary(define_method)
+ all: RubyVM::InstructionSequence.load_from_binary(all)
+
+loop_count: 100_000
diff --git a/benchmark/ivar_extend.yml b/benchmark/ivar_extend.yml
new file mode 100644
index 0000000000..eb9ee923f5
--- /dev/null
+++ b/benchmark/ivar_extend.yml
@@ -0,0 +1,23 @@
+prelude: |
+ class Embedded
+ def initialize
+ @a = 1
+ @b = 1
+ @c = 1
+ end
+ end
+
+ class Extended
+ def initialize
+ @a = 1
+ @b = 1
+ @c = 1
+ @d = 1
+ @e = 1
+ @f = 1
+ end
+ end
+benchmark:
+ embedded: Embedded.new
+ extended: Extended.new
+loop_count: 20_000_000
diff --git a/benchmark/kernel_tap.yml b/benchmark/kernel_tap.yml
new file mode 100644
index 0000000000..4dcbb31b4d
--- /dev/null
+++ b/benchmark/kernel_tap.yml
@@ -0,0 +1,6 @@
+prelude: |
+ obj = Object.new
+ x = nil
+benchmark:
+ kernel_tap: obj.tap { |o| x = o }
+loop_count: 20000000
diff --git a/benchmark/kernel_then.yml b/benchmark/kernel_then.yml
new file mode 100644
index 0000000000..85f7341e33
--- /dev/null
+++ b/benchmark/kernel_then.yml
@@ -0,0 +1,6 @@
+benchmark:
+ kernel_then: 1.then { |i| i + 1 }
+ kernel_then_enum: 1.then
+ kernel_yield_self: 1.yield_self { |i| i + 1 }
+ kernel_yield_self_enum: 1.yield_self
+loop_count: 20000000
diff --git a/benchmark/lib/benchmark_driver/runner/mjit.rb b/benchmark/lib/benchmark_driver/runner/mjit.rb
new file mode 100644
index 0000000000..3a58a620de
--- /dev/null
+++ b/benchmark/lib/benchmark_driver/runner/mjit.rb
@@ -0,0 +1,34 @@
+require 'benchmark_driver/struct'
+require 'benchmark_driver/metric'
+require 'erb'
+
+# A runner to measure after-JIT performance easily
+class BenchmarkDriver::Runner::Mjit < BenchmarkDriver::Runner::Ips
+ # JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
+ Job = Class.new(BenchmarkDriver::DefaultJob)
+
+ # Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
+ JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC]).extend(Module.new{
+ def parse(**)
+ jobs = super
+ jobs.map do |job|
+ job = job.dup
+ job.prelude = "#{job.prelude}\n#{<<~EOS}"
+ if defined?(RubyVM::MJIT) && RubyVM::MJIT.enabled?
+ __bmdv_ruby_i = 0
+ while __bmdv_ruby_i < 10000 # MJIT call threshold
+ #{job.script}
+ __bmdv_ruby_i += 1
+ end
+ RubyVM::MJIT.pause # compile
+ #{job.script}
+ RubyVM::MJIT.resume; RubyVM::MJIT.pause # recompile
+ #{job.script}
+ RubyVM::MJIT.resume; RubyVM::MJIT.pause # recompile 2
+ end
+ EOS
+ job
+ end
+ end
+ })
+end
diff --git a/benchmark/lib/benchmark_driver/runner/mjit_exec.rb b/benchmark/lib/benchmark_driver/runner/mjit_exec.rb
deleted file mode 100644
index 9f7c8c8af3..0000000000
--- a/benchmark/lib/benchmark_driver/runner/mjit_exec.rb
+++ /dev/null
@@ -1,237 +0,0 @@
-require 'benchmark_driver/struct'
-require 'benchmark_driver/metric'
-require 'erb'
-
-# A special runner dedicated for measuring mjit_exec overhead.
-class BenchmarkDriver::Runner::MjitExec
- METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
-
- # JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
- Job = ::BenchmarkDriver::Struct.new(
- :name, # @param [String] name - This is mandatory for all runner
- :metrics, # @param [Array<BenchmarkDriver::Metric>]
- :num_methods, # @param [Integer] num_methods - The number of methods to be defined
- :loop_count, # @param [Integer] loop_count
- :from_jit, # @param [TrueClass,FalseClass] from_jit - Whether the mjit_exec() is from JIT or not
- :to_jit, # @param [TrueClass,FalseClass] to_jit - Whether the mjit_exec() is to JIT or not
- )
- # Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
- class << JobParser = Module.new
- # @param [Array,String] num_methods
- # @param [Integer] loop_count
- # @param [TrueClass,FalseClass] from_jit
- # @param [TrueClass,FalseClass] to_jit
- def parse(num_methods:, loop_count:, from_jit:, to_jit:)
- if num_methods.is_a?(String)
- num_methods = eval(num_methods)
- end
-
- num_methods.map do |num|
- if num_methods.size > 1
- suffix = "[#{'%4d' % num}]"
- else
- suffix = "_#{num}"
- end
- Job.new(
- name: "mjit_exec_#{from_jit ? 'JT' : 'VM'}2#{to_jit ? 'JT' : 'VM'}#{suffix}",
- metrics: [METRIC],
- num_methods: num,
- loop_count: loop_count,
- from_jit: from_jit,
- to_jit: to_jit,
- )
- end
- end
- end
-
- # @param [BenchmarkDriver::Config::RunnerConfig] config
- # @param [BenchmarkDriver::Output] output
- # @param [BenchmarkDriver::Context] contexts
- def initialize(config:, output:, contexts:)
- @config = config
- @output = output
- @contexts = contexts
- end
-
- # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
- # @param [Array<BenchmarkDriver::Runner::Peak::Job>] jobs
- def run(jobs)
- @output.with_benchmark do
- jobs.each do |job|
- @output.with_job(name: job.name) do
- @contexts.each do |context|
- result = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: true, rest_on_average: :average) do
- run_benchmark(job, context: context)
- end
- value, duration = result.value
- @output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do
- @output.report(values: { METRIC => value }, duration: duration, loop_count: job.loop_count)
- end
- end
- end
- end
- end
- end
-
- private
-
- # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
- # @param [BenchmarkDriver::Context] context
- # @return [BenchmarkDriver::Metrics]
- def run_benchmark(job, context:)
- if job.from_jit
- if job.to_jit
- benchmark = BenchmarkJT2JT.new(num_methods: job.num_methods, loop_count: job.loop_count)
- else
- raise NotImplementedError, "JT2VM is not implemented yet"
- end
- else
- if job.to_jit
- benchmark = BenchmarkVM2JT.new(num_methods: job.num_methods, loop_count: job.loop_count)
- else
- benchmark = BenchmarkVM2VM.new(num_methods: job.num_methods, loop_count: job.loop_count)
- end
- end
-
- duration = Tempfile.open(['benchmark_driver-result', '.txt']) do |f|
- with_script(benchmark.render(result: f.path)) do |path|
- opt = []
- if context.executable.command.any? { |c| c.start_with?('--jit') }
- opt << '--jit-min-calls=2'
- end
- IO.popen([*context.executable.command, '--disable-gems', *opt, path], &:read)
- if $?.success?
- Float(f.read)
- else
- BenchmarkDriver::Result::ERROR
- end
- end
- end
-
- [job.loop_count.to_f / duration, duration]
- end
-
- def with_script(script)
- if @config.verbose >= 2
- sep = '-' * 30
- $stdout.puts "\n\n#{sep}[Script begin]#{sep}\n#{script}#{sep}[Script end]#{sep}\n\n"
- end
-
- Tempfile.open(['benchmark_driver-', '.rb']) do |f|
- f.puts script
- f.close
- return yield(f.path)
- end
- end
-
- # @param [Integer] num_methods
- # @param [Integer] loop_count
- BenchmarkVM2VM = ::BenchmarkDriver::Struct.new(:num_methods, :loop_count) do
- # @param [String] result - A file to write result
- def render(result:)
- ERB.new(<<~EOS, trim_mode: '%').result(binding)
- % num_methods.times do |i|
- def a<%= i %>
- nil
- end
- % end
- RubyVM::MJIT.pause if RubyVM::MJIT.enabled?
-
- def vm
- t = Process.clock_gettime(Process::CLOCK_MONOTONIC)
- i = 0
- while i < <%= loop_count / 1000 %>
- % 1000.times do |i|
- a<%= i % num_methods %>
- % end
- i += 1
- end
- % (loop_count % 1000).times do |i|
- a<%= i % num_methods %>
- % end
- Process.clock_gettime(Process::CLOCK_MONOTONIC) - t
- end
-
- vm # warmup call cache
- File.write(<%= result.dump %>, vm)
- EOS
- end
- end
- private_constant :BenchmarkVM2VM
-
- # @param [Integer] num_methods
- # @param [Integer] loop_count
- BenchmarkVM2JT = ::BenchmarkDriver::Struct.new(:num_methods, :loop_count) do
- # @param [String] result - A file to write result
- def render(result:)
- ERB.new(<<~EOS, trim_mode: '%').result(binding)
- % num_methods.times do |i|
- def a<%= i %>
- nil
- end
- a<%= i %>
- a<%= i %> # --jit-min-calls=2
- % end
- RubyVM::MJIT.pause if RubyVM::MJIT.enabled?
-
- def vm
- t = Process.clock_gettime(Process::CLOCK_MONOTONIC)
- i = 0
- while i < <%= loop_count / 1000 %>
- % 1000.times do |i|
- a<%= i % num_methods %>
- % end
- i += 1
- end
- % (loop_count % 1000).times do |i|
- a<%= i % num_methods %>
- % end
- Process.clock_gettime(Process::CLOCK_MONOTONIC) - t
- end
-
- vm # warmup call cache
- File.write(<%= result.dump %>, vm)
- EOS
- end
- end
- private_constant :BenchmarkVM2JT
-
- # @param [Integer] num_methods
- # @param [Integer] loop_count
- BenchmarkJT2JT = ::BenchmarkDriver::Struct.new(:num_methods, :loop_count) do
- # @param [String] result - A file to write result
- def render(result:)
- ERB.new(<<~EOS, trim_mode: '%').result(binding)
- % num_methods.times do |i|
- def a<%= i %>
- nil
- end
- % end
-
- # You may need to:
- # * Increase `JIT_ISEQ_SIZE_THRESHOLD` to 10000000 in mjit.h
- # * Always return false in `inlinable_iseq_p()` of mjit_compile.c
- def jit
- t = Process.clock_gettime(Process::CLOCK_MONOTONIC)
- i = 0
- while i < <%= loop_count / 1000 %>
- % 1000.times do |i|
- a<%= i % num_methods %>
- % end
- i += 1
- end
- % (loop_count % 1000).times do |i|
- a<%= i % num_methods %>
- % end
- Process.clock_gettime(Process::CLOCK_MONOTONIC) - t
- end
-
- jit
- jit
- RubyVM::MJIT.pause if RubyVM::MJIT.enabled?
- File.write(<%= result.dump %>, jit)
- EOS
- end
- end
- private_constant :BenchmarkJT2JT
-end
diff --git a/benchmark/lib/benchmark_driver/runner/ractor.rb b/benchmark/lib/benchmark_driver/runner/ractor.rb
new file mode 100644
index 0000000000..c730b8e4a5
--- /dev/null
+++ b/benchmark/lib/benchmark_driver/runner/ractor.rb
@@ -0,0 +1,122 @@
+require 'erb'
+
+# A runner to measure performance *inside* Ractor
+class BenchmarkDriver::Runner::Ractor < BenchmarkDriver::Runner::Ips
+ # JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
+ Job = Class.new(BenchmarkDriver::DefaultJob) do
+ attr_accessor :ractor
+ end
+
+ # Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
+ JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC]).extend(Module.new{
+ def parse(ractor: 1, **kwargs)
+ super(**kwargs).each do |job|
+ job.ractor = ractor
+ end
+ end
+ })
+
+ private
+
+ unless private_instance_methods.include?(:run_benchmark)
+ raise "#run_benchmark is no longer defined in BenchmarkDriver::Runner::Ips"
+ end
+
+ # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
+ # @param [BenchmarkDriver::Context] context
+ # @return [BenchmarkDriver::Metrics]
+ def run_benchmark(job, context:)
+ benchmark = BenchmarkScript.new(
+ preludes: [context.prelude, job.prelude],
+ script: job.script,
+ teardown: job.teardown,
+ loop_count: job.loop_count,
+ )
+
+ results = job.ractor.times.map do
+ Tempfile.open('benchmark_driver_result')
+ end
+ duration = with_script(benchmark.render(results: results.map(&:path))) do |path|
+ success = execute(*context.executable.command, path, exception: false)
+ if success && ((value = results.map { |f| Float(f.read) }.max) > 0)
+ value
+ else
+ BenchmarkDriver::Result::ERROR
+ end
+ end
+ results.each(&:close)
+
+ value_duration(
+ loop_count: job.loop_count,
+ duration: duration,
+ )
+ end
+
+ # @param [String] prelude
+ # @param [String] script
+ # @param [String] teardown
+ # @param [Integer] loop_count
+ BenchmarkScript = ::BenchmarkDriver::Struct.new(:preludes, :script, :teardown, :loop_count) do
+ # @param [String] result - A file to write result
+ def render(results:)
+ prelude = preludes.reject(&:nil?).reject(&:empty?).join("\n")
+ ERB.new(<<-RUBY).result_with_hash(results: results)
+Warning[:experimental] = false
+# shareable-constant-value: experimental_everything
+#{prelude}
+
+if #{loop_count} == 1
+ __bmdv_loop_before = 0
+ __bmdv_loop_after = 0
+else
+ __bmdv_loop_before = Time.new
+ #{while_loop('', loop_count, id: 0)}
+ __bmdv_loop_after = Time.new
+end
+
+__bmdv_ractors = []
+<% results.size.times do %>
+__bmdv_ractors << Ractor.new(__bmdv_loop_after - __bmdv_loop_before) { |__bmdv_loop_time|
+ __bmdv_time = Time
+ __bmdv_script_before = __bmdv_time.new
+ #{while_loop(script, loop_count, id: 1)}
+ __bmdv_script_after = __bmdv_time.new
+
+ (__bmdv_script_after - __bmdv_script_before) - __bmdv_loop_time
+}
+<% end %>
+
+# Wait for all Ractors before executing code to write results
+__bmdv_ractors.map!(&:take)
+
+<% results.each do |result| %>
+File.write(<%= result.dump %>, __bmdv_ractors.shift)
+<% end %>
+
+#{teardown}
+ RUBY
+ end
+
+ private
+
+ # id is to prevent:
+ # can not isolate a Proc because it accesses outer variables (__bmdv_i)
+ def while_loop(content, times, id:)
+ if !times.is_a?(Integer) || times <= 0
+ raise ArgumentError.new("Unexpected times: #{times.inspect}")
+ elsif times == 1
+ return content
+ end
+
+ # TODO: execute in batch
+ <<-RUBY
+__bmdv_i#{id} = 0
+while __bmdv_i#{id} < #{times}
+ #{content}
+ __bmdv_i#{id} += 1
+end
+ RUBY
+ end
+ end
+ private_constant :BenchmarkScript
+end
diff --git a/benchmark/marshal_dump_load_integer.yml b/benchmark/marshal_dump_load_integer.yml
new file mode 100644
index 0000000000..78ebf823d2
--- /dev/null
+++ b/benchmark/marshal_dump_load_integer.yml
@@ -0,0 +1,22 @@
+prelude: |
+ smallint_array = 1000.times.map { |x| x }
+ bigint32_array = 1000.times.map { |x| x + 2**32 }
+ bigint64_array = 1000.times.map { |x| x + 2**64 }
+
+ smallint_dump = Marshal.dump(smallint_array)
+ bigint32_dump = Marshal.dump(bigint32_array)
+ bigint64_dump = Marshal.dump(bigint64_array)
+benchmark:
+ marshal_dump_integer_small: |
+ Marshal.dump(smallint_array)
+ marshal_dump_integer_over_32_bit: |
+ Marshal.dump(bigint32_array)
+ marshal_dump_integer_over_64_bit: |
+ Marshal.dump(bigint64_array)
+ marshal_load_integer_small: |
+ Marshal.load(smallint_dump)
+ marshal_load_integer_over_32_bit: |
+ Marshal.load(bigint32_dump)
+ marshal_load_integer_over_64_bit: |
+ Marshal.load(bigint64_dump)
+loop_count: 4000
diff --git a/benchmark/masgn.yml b/benchmark/masgn.yml
new file mode 100644
index 0000000000..31cb8ee4a3
--- /dev/null
+++ b/benchmark/masgn.yml
@@ -0,0 +1,53 @@
+prelude: |
+ a = [nil] * 3
+ b = Class.new{attr_writer :a, :b, :c}.new
+ c = d = e = f = g = h = i = nil
+benchmark:
+ array2_2: "c = (a[0], a[1] = 1, 2)"
+ array2_3: "c = (a[0], a[1] = 1, 2, 3)"
+ array3_2: "c = (a[0], a[1], a[2] = 1, 2)"
+ array3_3: "c = (a[0], a[1], a[2] = 1, 2, 3)"
+ attr2_2: "c = (b.a, b.b = 1, 2)"
+ attr2_3: "c = (b.a, b.b = 1, 2, 3)"
+ attr3_2: "c = (b.a, b.b, b.c = 1, 2)"
+ attr3_3: "c = (b.a, b.b, b.c = 1, 2, 3)"
+ lvar2_2: "c = (d, e = 1, 2)"
+ lvar2_3: "c = (d, e = 1, 2, 3)"
+ lvar3_2: "c = (d, e, f = 1, 2)"
+ lvar3_3: "c = (d, e, f = 1, 2, 3)"
+ array2_2p: "(a[0], a[1] = 1, 2; nil)"
+ array2_3p: "(a[0], a[1] = 1, 2, 3; nil)"
+ array3_2p: "(a[0], a[1], a[2] = 1, 2; nil)"
+ array3_3p: "(a[0], a[1], a[2] = 1, 2, 3; nil)"
+ attr2_2p: "(b.a, b.b = 1, 2; nil)"
+ attr2_3p: "(b.a, b.b = 1, 2, 3; nil)"
+ attr3_2p: "(b.a, b.b, b.c = 1, 2; nil)"
+ attr3_3p: "(b.a, b.b, b.c = 1, 2, 3; nil)"
+ lvar2_2p: "(d, e = 1, 2; nil)"
+ lvar2_3p: "(d, e = 1, 2, 3; nil)"
+ lvar3_2p: "(d, e, f = 1, 2; nil)"
+ lvar3_3p: "(d, e, f = 1, 2, 3; nil)"
+ array2_2lv: "c = (a[0], a[1] = g, h)"
+ array2_ilv: "c = (a[0], a[1] = g, h, i)"
+ arrayi_2lv: "c = (a[0], a[1], a[2] = g, h)"
+ arrayi_ilv: "c = (a[0], a[1], a[2] = g, h, i)"
+ attr2_2lv: "c = (b.a, b.b = g, h)"
+ attr2_ilv: "c = (b.a, b.b = g, h, i)"
+ attri_2lv: "c = (b.a, b.b, b.c = g, h)"
+ attri_ilv: "c = (b.a, b.b, b.c = g, h, i)"
+ lvar2_2lv: "c = (d, e = g, h)"
+ lvar2_ilv: "c = (d, e = g, h, i)"
+ lvari_2lv: "c = (d, e, f = g, h)"
+ lvari_ilv: "c = (d, e, f = g, h, i)"
+ array2_2plv: "(a[0], a[1] = g, h; nil)"
+ array2_iplv: "(a[0], a[1] = g, h, i; nil)"
+ arrayi_2plv: "(a[0], a[1], a[2] = g, h; nil)"
+ arrayi_iplv: "(a[0], a[1], a[2] = g, h, i; nil)"
+ attr2_2plv: "(b.a, b.b = g, h; nil)"
+ attr2_iplv: "(b.a, b.b = g, h, i; nil)"
+ attri_2plv: "(b.a, b.b, b.c = g, h; nil)"
+ attri_iplv: "(b.a, b.b, b.c = g, h, i; nil)"
+ lvar2_2plv: "(d, e = g, h; nil)"
+ lvar2_iplv: "(d, e = g, h, i; nil)"
+ lvari_2plv: "(d, e, f = g, h; nil)"
+ lvari_iplv: "(d, e, f = g, h, i; nil)"
diff --git a/benchmark/method_bind_call.yml b/benchmark/method_bind_call.yml
new file mode 100644
index 0000000000..9e0e046ed4
--- /dev/null
+++ b/benchmark/method_bind_call.yml
@@ -0,0 +1,16 @@
+prelude: |
+ named_module = Kernel
+
+ module FakeName
+ def self.name
+ "NotMyame".freeze
+ end
+ end
+
+ MOD_NAME = Module.instance_method(:name)
+
+benchmark:
+ fastpath: MOD_NAME.bind_call(Kernel)
+ slowpath: MOD_NAME.bind_call(FakeName)
+
+loop_count: 100_000
diff --git a/benchmark/mjit_exec_jt2jt.yml b/benchmark/mjit_exec_jt2jt.yml
deleted file mode 100644
index 5be408e30c..0000000000
--- a/benchmark/mjit_exec_jt2jt.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-# Usage:
-# RUBYOPT=-Ibenchmark/lib benchmark-driver -e 'ruby --jit' benchmark/mjit_exec_vm2jt.yml
-type: mjit_exec # benchmark/lib/benchmark_driver/runner/mjit_exec.rb
-num_methods: [1]
-#num_methods: (1..100).to_a + [200, 300, 400, 500, 600, 700, 800, 900, 1000]
-loop_count: 50000000
-from_jit: true
-to_jit: true
diff --git a/benchmark/mjit_exec_vm2jt.yml b/benchmark/mjit_exec_vm2jt.yml
deleted file mode 100644
index 9947dbb7dd..0000000000
--- a/benchmark/mjit_exec_vm2jt.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-# Usage:
-# RUBYOPT=-Ibenchmark/lib benchmark-driver -e 'ruby --jit' benchmark/mjit_exec_vm2jt.yml
-type: mjit_exec # benchmark/lib/benchmark_driver/runner/mjit_exec.rb
-num_methods: [1]
-#num_methods: (1..100).to_a + [200, 300, 400, 500, 600, 700, 800, 900, 1000]
-loop_count: 50000000
-from_jit: false
-to_jit: true
diff --git a/benchmark/mjit_exec_vm2vm.yml b/benchmark/mjit_exec_vm2vm.yml
deleted file mode 100644
index 4b84427b10..0000000000
--- a/benchmark/mjit_exec_vm2vm.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-# Usage:
-# RUBYOPT=-Ibenchmark/lib benchmark-driver -e 'ruby --jit' benchmark/mjit_exec_vm2vm.yml
-type: mjit_exec # benchmark/lib/benchmark_driver/runner/mjit_exec.rb
-num_methods: [1]
-#num_methods: (1..100).to_a + [200, 300, 400, 500, 600, 700, 800, 900, 1000]
-loop_count: 50000000
-from_jit: false
-to_jit: false
diff --git a/benchmark/mjit_exivar.yml b/benchmark/mjit_exivar.yml
index e6fbae96de..2584fa6410 100644
--- a/benchmark/mjit_exivar.yml
+++ b/benchmark/mjit_exivar.yml
@@ -1,3 +1,4 @@
+type: lib/benchmark_driver/runner/mjit
prelude: |
class Bench < Hash
def initialize
@@ -11,20 +12,6 @@ prelude: |
bench = Bench.new
- if defined?(RubyVM::MJIT) && RubyVM::MJIT.enabled?
- jit_min_calls = 10000
- i = 0
- while i < jit_min_calls
- bench.exivar
- i += 1
- end
- RubyVM::MJIT.pause # compile (1)
- # issue recompile
- bench.exivar
- RubyVM::MJIT.resume
- RubyVM::MJIT.pause # compile (2)
- end
-
benchmark:
mjit_exivar: bench.exivar
diff --git a/benchmark/mjit_integer.yml b/benchmark/mjit_integer.yml
new file mode 100644
index 0000000000..a6b5c9ee16
--- /dev/null
+++ b/benchmark/mjit_integer.yml
@@ -0,0 +1,32 @@
+type: lib/benchmark_driver/runner/mjit
+prelude: |
+ def mjit_abs(int) int.abs end
+ def mjit_bit_length(int) int.bit_length end
+ def mjit_comp(int) ~int end
+ def mjit_even?(int) int.even? end
+ def mjit_integer?(int) int.integer? end
+ def mjit_magnitude(int) int.magnitude end
+ def mjit_odd?(int) int.odd? end
+ def mjit_ord(int) int.ord end
+ def mjit_size(int) int.size end
+ def mjit_to_i(int) int.to_i end
+ def mjit_to_int(int) int.to_int end
+ def mjit_uminus(int) -int end
+ def mjit_zero?(int) int.zero? end
+
+benchmark:
+ - mjit_abs(-1)
+ - mjit_bit_length(100)
+ - mjit_comp(1)
+ - mjit_even?(2)
+ - mjit_integer?(0)
+ - mjit_magnitude(-1)
+ - mjit_odd?(1)
+ - mjit_ord(1)
+ - mjit_size(1)
+ - mjit_to_i(1)
+ - mjit_to_int(1)
+ - mjit_uminus(1)
+ - mjit_zero?(0)
+
+loop_count: 40000000
diff --git a/benchmark/mjit_kernel.yml b/benchmark/mjit_kernel.yml
new file mode 100644
index 0000000000..7720e65c2c
--- /dev/null
+++ b/benchmark/mjit_kernel.yml
@@ -0,0 +1,20 @@
+type: lib/benchmark_driver/runner/mjit
+prelude: |
+ def mjit_class(obj)
+ obj.class
+ end
+
+ def mjit_frozen?(obj)
+ obj.frozen?
+ end
+
+ str = ""
+ fstr = "".freeze
+
+benchmark:
+ - mjit_class(self)
+ - mjit_class(1)
+ - mjit_frozen?(str)
+ - mjit_frozen?(fstr)
+
+loop_count: 40000000
diff --git a/benchmark/mjit_leave.yml b/benchmark/mjit_leave.yml
index 292d6ef041..9ac68b164b 100644
--- a/benchmark/mjit_leave.yml
+++ b/benchmark/mjit_leave.yml
@@ -1,3 +1,4 @@
+type: lib/benchmark_driver/runner/mjit
prelude: |
def leave
nil
diff --git a/benchmark/mjit_opt_cc_insns.yml b/benchmark/mjit_opt_cc_insns.yml
new file mode 100644
index 0000000000..fed6d34bd5
--- /dev/null
+++ b/benchmark/mjit_opt_cc_insns.yml
@@ -0,0 +1,27 @@
+# opt_* insns using vm_method_cfunc_is with send-compatible operands:
+# * opt_nil_p
+# * opt_not
+# * opt_eq
+type: lib/benchmark_driver/runner/mjit
+prelude: |
+ def mjit_nil?(obj)
+ obj.nil?
+ end
+
+ def mjit_not(obj)
+ !obj
+ end
+
+ def mjit_eq(a, b)
+ a == b
+ end
+
+benchmark:
+ - script: mjit_nil?(1)
+ loop_count: 40000000
+ - script: mjit_not(1)
+ loop_count: 40000000
+ - script: mjit_eq(1, nil)
+ loop_count: 8000000
+ - script: mjit_eq(nil, 1)
+ loop_count: 8000000
diff --git a/benchmark/mjit_send_cfunc.yml b/benchmark/mjit_send_cfunc.yml
deleted file mode 100644
index b5f9c897ec..0000000000
--- a/benchmark/mjit_send_cfunc.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-prelude: |
- def mjit_send_cfunc
- self.class
- end
-benchmark: mjit_send_cfunc
-loop_count: 100000000
diff --git a/benchmark/mjit_struct_aref.yml b/benchmark/mjit_struct_aref.yml
new file mode 100644
index 0000000000..bfba1323f2
--- /dev/null
+++ b/benchmark/mjit_struct_aref.yml
@@ -0,0 +1,10 @@
+type: lib/benchmark_driver/runner/mjit
+prelude: |
+ def mjit_struct_aref(struct)
+ struct.aa
+ end
+ struct = Struct.new(:a0, :a1, :a2, :a3, :a4, :a5, :a6, :a7, :a8, :a9, :aa).new
+
+benchmark: mjit_struct_aref(struct)
+
+loop_count: 40000000
diff --git a/benchmark/module_eqq.yml b/benchmark/module_eqq.yml
new file mode 100644
index 0000000000..a561fb86dc
--- /dev/null
+++ b/benchmark/module_eqq.yml
@@ -0,0 +1,27 @@
+prelude: |
+ class SimpleClass; end
+ class MediumClass
+ 10.times { include Module.new }
+ end
+ class LargeClass
+ 100.times { include Module.new }
+ end
+ class HugeClass
+ 300.times { include Module.new }
+ end
+ SimpleObj = SimpleClass.new
+ MediumObj = MediumClass.new
+ LargeObj = LargeClass.new
+ HugeObj = HugeClass.new
+benchmark:
+ simple_class_eqq_simple_obj: |
+ SimpleClass === SimpleObj
+ medium_class_eqq_simple_obj: |
+ MediumClass === SimpleObj
+ simple_class_eqq_medium_obj: |
+ SimpleClass === MediumObj
+ simple_class_eqq_large_obj: |
+ SimpleClass === LargeObj
+ simple_class_eqq_huge_obj: |
+ SimpleClass === HugeObj
+loop_count: 20000000
diff --git a/benchmark/nilclass.yml b/benchmark/nilclass.yml
new file mode 100644
index 0000000000..fba67a5f6a
--- /dev/null
+++ b/benchmark/nilclass.yml
@@ -0,0 +1,6 @@
+benchmark:
+ to_i: |
+ nil.to_i
+ to_f: |
+ nil.to_f
+loop_count: 100000
diff --git a/benchmark/num_zero_p.yml b/benchmark/num_zero_p.yml
new file mode 100644
index 0000000000..2195963433
--- /dev/null
+++ b/benchmark/num_zero_p.yml
@@ -0,0 +1,8 @@
+benchmark:
+ - 0.zero?
+ - 1.zero?
+ - 0r.zero?
+ - 1r.zero?
+ - 0i.zero?
+ - 1i.zero?
+loop_count: 50000000
diff --git a/benchmark/numeric_methods.yml b/benchmark/numeric_methods.yml
new file mode 100644
index 0000000000..1384902935
--- /dev/null
+++ b/benchmark/numeric_methods.yml
@@ -0,0 +1,29 @@
+prelude: |
+ int = 42
+ flo = 4.2
+benchmark:
+ real?: |
+ int.real?
+ integer?: |
+ flo.integer?
+ finite?: |
+ int.finite?
+ infinite?: |
+ int.infinite?
+ integer_real: |
+ int.real
+ float_real: |
+ flo.real
+ integr_imag: |
+ int.imag
+ float_imag: |
+ flo.imag
+ integer_conj: |
+ int.conj
+ float_conj: |
+ flo.conj
+ integer_numerator: |
+ int.numerator
+ integer_denominator: |
+ int.denominator
+loop_count: 20000000
diff --git a/benchmark/object_allocate.yml b/benchmark/object_allocate.yml
new file mode 100644
index 0000000000..93ff463e41
--- /dev/null
+++ b/benchmark/object_allocate.yml
@@ -0,0 +1,21 @@
+prelude: |
+ class Eight
+ 8.times { include(Module.new) }
+ end
+ class ThirtyTwo
+ 32.times { include(Module.new) }
+ end
+ class SixtyFour
+ 64.times { include(Module.new) }
+ end
+ class OneTwentyEight
+ 128.times { include(Module.new) }
+ end
+ # Disable GC to see raw throughput:
+ GC.disable
+benchmark:
+ allocate_8_deep: Eight.new
+ allocate_32_deep: ThirtyTwo.new
+ allocate_64_deep: SixtyFour.new
+ allocate_128_deep: OneTwentyEight.new
+loop_count: 100000
diff --git a/benchmark/objspace_dump_all.yml b/benchmark/objspace_dump_all.yml
new file mode 100644
index 0000000000..ebab562d2e
--- /dev/null
+++ b/benchmark/objspace_dump_all.yml
@@ -0,0 +1,13 @@
+prelude: |
+ require 'objspace'
+ require 'tempfile'
+ $objs = 1_000.times.map { Object.new }
+ $strings = 1_000.times.map { |i| "string #{i}" }
+ $file = Tempfile.new('heap')
+ $dev_null = File.open(File::NULL, 'w+')
+
+benchmark:
+ dump_all_string: "ObjectSpace.dump_all(output: :string)"
+ dump_all_file: "ObjectSpace.dump_all(output: $file)"
+ dump_all_dev_null: "ObjectSpace.dump_all(output: $dev_null)"
+loop_count: 1
diff --git a/benchmark/pm_array.yml b/benchmark/pm_array.yml
new file mode 100644
index 0000000000..babb65a289
--- /dev/null
+++ b/benchmark/pm_array.yml
@@ -0,0 +1,19 @@
+prelude: |
+ def call(*val)
+ case val
+ in [String => body]
+ [200, {}, [body]]
+ in [Integer => status]
+ [status, {}, [""]]
+ in [Integer, String] => response
+ [response[0], {}, [response[1]]]
+ in [Integer, Hash, String] => response
+ [response[0], response[1], [response[2]]]
+ end
+ end
+
+benchmark:
+ first_match: call("ok")
+ second_match: call(401)
+ third_match: call(200, "ok")
+ fourth_match: call(201, {}, "created")
diff --git a/benchmark/ractor_const.yml b/benchmark/ractor_const.yml
new file mode 100644
index 0000000000..d7ab74bdca
--- /dev/null
+++ b/benchmark/ractor_const.yml
@@ -0,0 +1,4 @@
+type: lib/benchmark_driver/runner/ractor
+benchmark:
+ ractor_const: Object
+ractor: 1
diff --git a/benchmark/ractor_float_to_s.yml b/benchmark/ractor_float_to_s.yml
new file mode 100644
index 0000000000..8f492be668
--- /dev/null
+++ b/benchmark/ractor_float_to_s.yml
@@ -0,0 +1,8 @@
+type: lib/benchmark_driver/runner/ractor
+prelude: |
+ FLOATS = [*0.0.step(1.0, 0.001)]
+benchmark:
+ ractor_float_to_s: |
+ FLOATS.each {|f| f.to_s}
+loop_count: 100
+ractor: 2
diff --git a/benchmark/range_min.yml b/benchmark/range_min.yml
new file mode 100644
index 0000000000..9e60dd7308
--- /dev/null
+++ b/benchmark/range_min.yml
@@ -0,0 +1,2 @@
+benchmark:
+ - (1..10).min
diff --git a/benchmark/so_nbody.rb b/benchmark/so_nbody.rb
index d6c5bb9e61..9884fc4edc 100644
--- a/benchmark/so_nbody.rb
+++ b/benchmark/so_nbody.rb
@@ -12,38 +12,38 @@ def _puts *args
end
class Planet
- attr_accessor :x, :y, :z, :vx, :vy, :vz, :mass
+ attr_accessor :x, :y, :z, :vx, :vy, :vz, :mass
- def initialize(x, y, z, vx, vy, vz, mass)
- @x, @y, @z = x, y, z
- @vx, @vy, @vz = vx * DAYS_PER_YEAR, vy * DAYS_PER_YEAR, vz * DAYS_PER_YEAR
- @mass = mass * SOLAR_MASS
- end
-
- def move_from_i(bodies, nbodies, dt, i)
- while i < nbodies
- b2 = bodies[i]
- dx = @x - b2.x
- dy = @y - b2.y
- dz = @z - b2.z
-
- distance = Math.sqrt(dx * dx + dy * dy + dz * dz)
- mag = dt / (distance * distance * distance)
- b_mass_mag, b2_mass_mag = @mass * mag, b2.mass * mag
-
- @vx -= dx * b2_mass_mag
- @vy -= dy * b2_mass_mag
- @vz -= dz * b2_mass_mag
- b2.vx += dx * b_mass_mag
- b2.vy += dy * b_mass_mag
- b2.vz += dz * b_mass_mag
- i += 1
+ def initialize(x, y, z, vx, vy, vz, mass)
+ @x, @y, @z = x, y, z
+ @vx, @vy, @vz = vx * DAYS_PER_YEAR, vy * DAYS_PER_YEAR, vz * DAYS_PER_YEAR
+ @mass = mass * SOLAR_MASS
end
- @x += dt * @vx
- @y += dt * @vy
- @z += dt * @vz
- end
+ def move_from_i(bodies, nbodies, dt, i)
+ while i < nbodies
+ b2 = bodies[i]
+ dx = @x - b2.x
+ dy = @y - b2.y
+ dz = @z - b2.z
+
+ distance = Math.sqrt(dx * dx + dy * dy + dz * dz)
+ mag = dt / (distance * distance * distance)
+ b_mass_mag, b2_mass_mag = @mass * mag, b2.mass * mag
+
+ @vx -= dx * b2_mass_mag
+ @vy -= dy * b2_mass_mag
+ @vz -= dz * b2_mass_mag
+ b2.vx += dx * b_mass_mag
+ b2.vy += dy * b_mass_mag
+ b2.vz += dz * b_mass_mag
+ i += 1
+ end
+
+ @x += dt * @vx
+ @y += dt * @vy
+ @z += dt * @vz
+ end
end
def energy(bodies)
diff --git a/benchmark/string_concat.yml b/benchmark/string_concat.yml
new file mode 100644
index 0000000000..e65c00cca9
--- /dev/null
+++ b/benchmark/string_concat.yml
@@ -0,0 +1,45 @@
+prelude: |
+ CHUNK = "a" * 64
+ UCHUNK = "é" * 32
+ GC.disable # GC causes a lot of variance
+benchmark:
+ binary_concat_7bit: |
+ buffer = String.new(capacity: 4096, encoding: Encoding::BINARY)
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ utf8_concat_7bit: |
+ buffer = String.new(capacity: 4096, encoding: Encoding::UTF_8)
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ buffer << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK << CHUNK
+ utf8_concat_UTF8: |
+ buffer = String.new(capacity: 4096, encoding: Encoding::UTF_8)
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ buffer << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK << UCHUNK
+ interpolation: |
+ buffer = "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \
+ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}"
diff --git a/benchmark/time_at.yml b/benchmark/time_at.yml
new file mode 100644
index 0000000000..3247efbe77
--- /dev/null
+++ b/benchmark/time_at.yml
@@ -0,0 +1,7 @@
+prelude: |
+ # frozen_string_literal: true
+benchmark:
+ - 'Time.at(0)'
+ - 'Time.at(0, 500)'
+ - 'Time.at(0, in: "+09:00")'
+ - 'Time.at(0, 500, in: "+09:00")'
diff --git a/benchmark/time_new.yml b/benchmark/time_new.yml
new file mode 100644
index 0000000000..5947dd3a41
--- /dev/null
+++ b/benchmark/time_new.yml
@@ -0,0 +1,4 @@
+benchmark:
+ - 'Time.new(2021)'
+ - 'Time.new(2021, 8, 22)'
+ - 'Time.new(2021, 8, 22, in: "+09:00")'
diff --git a/benchmark/time_now.yml b/benchmark/time_now.yml
new file mode 100644
index 0000000000..f6d6a31489
--- /dev/null
+++ b/benchmark/time_now.yml
@@ -0,0 +1,3 @@
+benchmark:
+ - 'Time.now'
+ - 'Time.now(in: "+09:00")'
diff --git a/benchmark/time_parse.yml b/benchmark/time_parse.yml
new file mode 100644
index 0000000000..6060b58bc6
--- /dev/null
+++ b/benchmark/time_parse.yml
@@ -0,0 +1,10 @@
+prelude: |
+ require 'time'
+ inspect = "2021-08-23 09:57:02 +0900"
+ iso8601 = "2021-08-23T09:57:02+09:00"
+benchmark:
+ - Time.iso8601(iso8601)
+ - Time.parse(iso8601)
+ - Time.parse(inspect)
+ - Time.new(iso8601) rescue Time.iso8601(iso8601)
+ - Time.new(inspect) rescue Time.parse(inspect)
diff --git a/benchmark/vm_block_handler.yml b/benchmark/vm_block_handler.yml
new file mode 100644
index 0000000000..461d7953ad
--- /dev/null
+++ b/benchmark/vm_block_handler.yml
@@ -0,0 +1,27 @@
+# :FIXME: is there a way to benchmark block_handler_type_ifunc?
+
+prelude: |
+ p = proc{_1}
+ o = Object.new
+ def o.each
+ i = 0
+ while i < 3_000_000 do
+ yield i
+ i += 1
+ end
+ end
+
+benchmark:
+ - name: block_handler_type_iseq
+ script: |
+ o.each{_1}
+
+ - name: block_handler_type_symbol
+ script: |
+ o.each(&:itself)
+
+ - name: block_handler_type_proc
+ script: |
+ o.each(&p)
+
+loop_count: 1
diff --git a/benchmark/vm_case_classes.yml b/benchmark/vm_case_classes.yml
new file mode 100644
index 0000000000..cacc4f0464
--- /dev/null
+++ b/benchmark/vm_case_classes.yml
@@ -0,0 +1,9 @@
+benchmark:
+ vm_case_classes: |
+ case :foo
+ when Hash
+ raise
+ when Array
+ raise
+ end
+loop_count: 6000000
diff --git a/benchmark/vm_const.yml b/benchmark/vm_const.yml
index 6064d4eed0..8939ca0cd3 100644
--- a/benchmark/vm_const.yml
+++ b/benchmark/vm_const.yml
@@ -1,7 +1,13 @@
prelude: |
Const = 1
+ A = B = C = D = E = F = G = H = I = J = K = L = M = N = O = P = Q = R = S = T = U = V = W = X = Y = Z = 1
+ def foo
+ A; B; C; D; E; F; G; H; I; J; K; L; M; N; O; P; Q; R; S; T; U; V; W; X; Y; Z
+ end
benchmark:
vm_const: |
j = Const
k = Const
+ vm_const_many: |
+ foo
loop_count: 30000000
diff --git a/benchmark/vm_cvar.yml b/benchmark/vm_cvar.yml
new file mode 100644
index 0000000000..1d0e161829
--- /dev/null
+++ b/benchmark/vm_cvar.yml
@@ -0,0 +1,20 @@
+prelude: |
+ class A
+ @@foo = 1
+
+ def self.foo
+ @@foo
+ end
+
+ ("A".."Z").each do |module_name|
+ eval <<-EOM
+ module #{module_name}
+ end
+
+ include #{module_name}
+ EOM
+ end
+ end
+benchmark:
+ vm_cvar: A.foo
+loop_count: 600000
diff --git a/benchmark/vm_dstr_ary.rb b/benchmark/vm_dstr_ary.rb
new file mode 100644
index 0000000000..1d3aa3b97b
--- /dev/null
+++ b/benchmark/vm_dstr_ary.rb
@@ -0,0 +1,6 @@
+i = 0
+x = y = []
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_bool.rb b/benchmark/vm_dstr_bool.rb
new file mode 100644
index 0000000000..631ca54755
--- /dev/null
+++ b/benchmark/vm_dstr_bool.rb
@@ -0,0 +1,7 @@
+i = 0
+x = true
+y = false
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_class_module.rb b/benchmark/vm_dstr_class_module.rb
new file mode 100644
index 0000000000..becf0861c7
--- /dev/null
+++ b/benchmark/vm_dstr_class_module.rb
@@ -0,0 +1,10 @@
+i = 0
+class A; end unless defined?(A)
+module B; end unless defined?(B)
+x = A
+y = B
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
+
diff --git a/benchmark/vm_dstr_digit.rb b/benchmark/vm_dstr_digit.rb
new file mode 100644
index 0000000000..caaa395192
--- /dev/null
+++ b/benchmark/vm_dstr_digit.rb
@@ -0,0 +1,7 @@
+i = 0
+x = 0
+y = 9
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_int.rb b/benchmark/vm_dstr_int.rb
new file mode 100644
index 0000000000..ed380d7595
--- /dev/null
+++ b/benchmark/vm_dstr_int.rb
@@ -0,0 +1,5 @@
+i = 0
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{i}bar#{i}baz"
+end
diff --git a/benchmark/vm_dstr_nil.rb b/benchmark/vm_dstr_nil.rb
new file mode 100644
index 0000000000..ec4f5d6c67
--- /dev/null
+++ b/benchmark/vm_dstr_nil.rb
@@ -0,0 +1,6 @@
+i = 0
+x = y = nil
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_obj.rb b/benchmark/vm_dstr_obj.rb
new file mode 100644
index 0000000000..fb78637ead
--- /dev/null
+++ b/benchmark/vm_dstr_obj.rb
@@ -0,0 +1,6 @@
+i = 0
+x = y = Object.new
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_obj_def.rb b/benchmark/vm_dstr_obj_def.rb
new file mode 100644
index 0000000000..99ff7b98fb
--- /dev/null
+++ b/benchmark/vm_dstr_obj_def.rb
@@ -0,0 +1,8 @@
+i = 0
+o = Object.new
+def o.to_s; -""; end
+x = y = o
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_str.rb b/benchmark/vm_dstr_str.rb
new file mode 100644
index 0000000000..45fc107892
--- /dev/null
+++ b/benchmark/vm_dstr_str.rb
@@ -0,0 +1,6 @@
+i = 0
+x = y = ""
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_dstr_sym.rb b/benchmark/vm_dstr_sym.rb
new file mode 100644
index 0000000000..484b8f8150
--- /dev/null
+++ b/benchmark/vm_dstr_sym.rb
@@ -0,0 +1,6 @@
+i = 0
+x = y = :z
+while i<6_000_000 # benchmark loop 2
+ i += 1
+ str = "foo#{x}bar#{y}baz"
+end
diff --git a/benchmark/vm_freezeobj.yml b/benchmark/vm_freezeobj.yml
new file mode 100644
index 0000000000..69a795a354
--- /dev/null
+++ b/benchmark/vm_freezeobj.yml
@@ -0,0 +1,6 @@
+prelude: |
+ objs = 100000.times.map { Object.new }
+benchmark:
+ vm_freeze_obj: |
+ objs.map(&:freeze)
+loop_count: 600
diff --git a/benchmark/vm_iclass_super.yml b/benchmark/vm_iclass_super.yml
new file mode 100644
index 0000000000..21bb7db247
--- /dev/null
+++ b/benchmark/vm_iclass_super.yml
@@ -0,0 +1,20 @@
+prelude: |
+ class C
+ def m
+ 1
+ end
+
+ ("A".."M").each do |module_name|
+ eval <<-EOM
+ module #{module_name}
+ def m; super; end
+ end
+ prepend #{module_name}
+ EOM
+ end
+ end
+
+ obj = C.new
+benchmark:
+ vm_iclass_super: obj.m
+loop_count: 6000000
diff --git a/benchmark/vm_ivar_embedded_obj_init.yml b/benchmark/vm_ivar_embedded_obj_init.yml
new file mode 100644
index 0000000000..74fe20a630
--- /dev/null
+++ b/benchmark/vm_ivar_embedded_obj_init.yml
@@ -0,0 +1,14 @@
+prelude: |
+ class C
+ def set_ivars
+ @a = nil
+ @b = nil
+ @c = nil
+ end
+ end
+
+ c = C.new
+benchmark:
+ vm_ivar_embedded_obj_init: |
+ c.set_ivars
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_extended_obj_init.yml b/benchmark/vm_ivar_extended_obj_init.yml
new file mode 100644
index 0000000000..f054bab282
--- /dev/null
+++ b/benchmark/vm_ivar_extended_obj_init.yml
@@ -0,0 +1,16 @@
+prelude: |
+ class C
+ def set_ivars
+ @a = nil
+ @b = nil
+ @c = nil
+ @d = nil
+ @e = nil
+ end
+ end
+
+ c = C.new
+benchmark:
+ vm_ivar_extended_obj_init: |
+ c.set_ivars
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_generic_get.yml b/benchmark/vm_ivar_generic_get.yml
new file mode 100644
index 0000000000..dae2d37671
--- /dev/null
+++ b/benchmark/vm_ivar_generic_get.yml
@@ -0,0 +1,17 @@
+prelude: |
+ class C < Array
+ attr_reader :a, :b, :c
+ def initialize
+ @a = nil
+ @b = nil
+ @c = nil
+ end
+ end
+
+ c = C.new
+benchmark:
+ vm_ivar_generic_get: |
+ c.a
+ c.b
+ c.c
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_generic_set.yml b/benchmark/vm_ivar_generic_set.yml
new file mode 100644
index 0000000000..102a6577fb
--- /dev/null
+++ b/benchmark/vm_ivar_generic_set.yml
@@ -0,0 +1,14 @@
+prelude: |
+ class C < Array
+ def set_ivars
+ @a = nil
+ @b = nil
+ @c = nil
+ end
+ end
+
+ c = C.new
+benchmark:
+ vm_ivar_generic_set: |
+ c.set_ivars
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_get.yml b/benchmark/vm_ivar_get.yml
new file mode 100644
index 0000000000..9174af6965
--- /dev/null
+++ b/benchmark/vm_ivar_get.yml
@@ -0,0 +1,37 @@
+prelude: |
+ class Example
+ def initialize
+ @v0 = 1
+ @v1 = 2
+ @v3 = 3
+ @levar = 1
+ end
+
+ def get_value_loop
+ sum = 0
+
+ i = 0
+ while i < 1000000
+ # 10 times to de-emphasize loop overhead
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ sum += @levar
+ i += 1
+ end
+
+ return sum
+ end
+ end
+
+ obj = Example.new
+benchmark:
+ vm_ivar_get: |
+ obj.get_value_loop
+loop_count: 100
diff --git a/benchmark/vm_ivar_get_unintialized.yml b/benchmark/vm_ivar_get_unintialized.yml
new file mode 100644
index 0000000000..a1ccfb06ce
--- /dev/null
+++ b/benchmark/vm_ivar_get_unintialized.yml
@@ -0,0 +1,12 @@
+prelude: |
+ class Example
+ def read
+ @uninitialized
+ end
+ end
+
+ obj = Example.new
+benchmark:
+ vm_ivar_get_uninitialized: |
+ obj.read
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_lazy_set.yml b/benchmark/vm_ivar_lazy_set.yml
new file mode 100644
index 0000000000..7372ffcfbc
--- /dev/null
+++ b/benchmark/vm_ivar_lazy_set.yml
@@ -0,0 +1,12 @@
+prelude: |
+ class Example
+ def lazy_set
+ @uninitialized ||= 123
+ end
+ end
+
+ objs = 10000000.times.map { Example.new }
+benchmark:
+ vm_ivar_lazy_set: |
+ objs.each(&:lazy_set)
+loop_count: 1
diff --git a/benchmark/vm_ivar_of_class.yml b/benchmark/vm_ivar_of_class.yml
new file mode 100644
index 0000000000..172e28b2fd
--- /dev/null
+++ b/benchmark/vm_ivar_of_class.yml
@@ -0,0 +1,12 @@
+prelude: |
+ class C
+ @a = 1
+ def self.a
+ _a = @a; _a = @a; _a = @a; _a = @a; _a = @a;
+ _a = @a; _a = @a; _a = @a; _a = @a; _a = @a;
+ end
+ end
+benchmark:
+ vm_ivar_of_class: |
+ a = C.a
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_of_class_set.yml b/benchmark/vm_ivar_of_class_set.yml
new file mode 100644
index 0000000000..2ea5199423
--- /dev/null
+++ b/benchmark/vm_ivar_of_class_set.yml
@@ -0,0 +1,11 @@
+prelude: |
+ class C
+ @a = 1
+ def self.a o
+ @a = o; @a = o; @a = o; @a = o; @a = o; @a = o;
+ end
+ end
+benchmark:
+ vm_ivar_of_class_set: |
+ a = C.a(nil)
+loop_count: 30000000
diff --git a/benchmark/vm_ivar_set_on_instance.yml b/benchmark/vm_ivar_set_on_instance.yml
new file mode 100644
index 0000000000..91857b7742
--- /dev/null
+++ b/benchmark/vm_ivar_set_on_instance.yml
@@ -0,0 +1,35 @@
+prelude: |
+ class TheClass
+ def initialize
+ @v0 = 1
+ @v1 = 2
+ @v3 = 3
+ @levar = 1
+ end
+
+ def set_value_loop
+ # 1M
+ i = 0
+ while i < 1000000
+ # 10 times to de-emphasize loop overhead
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ @levar = i
+ i += 1
+ end
+ end
+ end
+
+ obj = TheClass.new
+
+benchmark:
+ vm_ivar_set_on_instance: |
+ obj.set_value_loop
+loop_count: 100
diff --git a/benchmark/vm_ivar_set_subclass.yml b/benchmark/vm_ivar_set_subclass.yml
new file mode 100644
index 0000000000..bc8bf5bf6b
--- /dev/null
+++ b/benchmark/vm_ivar_set_subclass.yml
@@ -0,0 +1,20 @@
+prelude: |
+ class A
+ def set_ivars
+ @a = nil
+ @b = nil
+ @c = nil
+ @d = nil
+ @e = nil
+ end
+ end
+ class B < A; end
+ class C < A; end
+
+ b = B.new
+ c = C.new
+benchmark:
+ vm_ivar_init_subclass: |
+ b.set_ivars
+ c.set_ivars
+loop_count: 3000000
diff --git a/benchmark/vm_lvar_cond_set.yml b/benchmark/vm_lvar_cond_set.yml
new file mode 100644
index 0000000000..1845f9d12e
--- /dev/null
+++ b/benchmark/vm_lvar_cond_set.yml
@@ -0,0 +1,8 @@
+benchmark:
+ vm_lvar_cond_set: |
+ a ||= 1
+ b ||= 1
+ c ||= 1
+ d ||= 1
+ nil
+loop_count: 30000000
diff --git a/benchmark/vm_send.yml b/benchmark/vm_send.yml
index 753d3e8318..f31bc7ac89 100644
--- a/benchmark/vm_send.yml
+++ b/benchmark/vm_send.yml
@@ -5,7 +5,10 @@ prelude: |
end
o = C.new
+ m = :m
benchmark:
vm_send: |
o.__send__ :m
+ vm_send_var: |
+ o.__send__ m
loop_count: 6000000
diff --git a/benchmark/vm_thread_condvar1.rb b/benchmark/vm_thread_condvar1.rb
index cf5706b23e..feed27c3ad 100644
--- a/benchmark/vm_thread_condvar1.rb
+++ b/benchmark/vm_thread_condvar1.rb
@@ -1,9 +1,9 @@
# two threads, two mutex, two condvar ping-pong
require 'thread'
-m1 = Mutex.new
-m2 = Mutex.new
-cv1 = ConditionVariable.new
-cv2 = ConditionVariable.new
+m1 = Thread::Mutex.new
+m2 = Thread::Mutex.new
+cv1 = Thread::ConditionVariable.new
+cv2 = Thread::ConditionVariable.new
max = 100000
i = 0
wait = nil
diff --git a/benchmark/vm_thread_condvar2.rb b/benchmark/vm_thread_condvar2.rb
index 7c8dc19481..6590c4134b 100644
--- a/benchmark/vm_thread_condvar2.rb
+++ b/benchmark/vm_thread_condvar2.rb
@@ -1,16 +1,16 @@
# many threads, one mutex, many condvars
require 'thread'
-m = Mutex.new
-cv1 = ConditionVariable.new
-cv2 = ConditionVariable.new
+m = Thread::Mutex.new
+cv1 = Thread::ConditionVariable.new
+cv2 = Thread::ConditionVariable.new
max = 1000
n = 100
waiting = 0
scvs = []
waiters = n.times.map do |i|
- start_cv = ConditionVariable.new
+ start_cv = Thread::ConditionVariable.new
scvs << start_cv
- start_mtx = Mutex.new
+ start_mtx = Thread::Mutex.new
start_mtx.synchronize do
th = Thread.new(start_mtx, start_cv) do |sm, scv|
m.synchronize do
diff --git a/bignum.c b/bignum.c
index 956673ac29..cb2c3b6f07 100644
--- a/bignum.c
+++ b/bignum.c
@@ -23,8 +23,14 @@
# include <ieeefp.h>
#endif
+#if !defined(USE_GMP)
#if defined(HAVE_LIBGMP) && defined(HAVE_GMP_H)
-# define USE_GMP
+# define USE_GMP 1
+#else
+# define USE_GMP 0
+#endif
+#endif
+#if USE_GMP
# include <gmp.h>
#endif
@@ -36,15 +42,12 @@
#include "internal/numeric.h"
#include "internal/object.h"
#include "internal/sanitizers.h"
-#include "internal/util.h"
#include "internal/variable.h"
#include "internal/warnings.h"
#include "ruby/thread.h"
#include "ruby/util.h"
#include "ruby_assert.h"
-#define RB_BIGNUM_TYPE_P(x) RB_TYPE_P((x), T_BIGNUM)
-
const char ruby_digitmap[] = "0123456789abcdefghijklmnopqrstuvwxyz";
#ifndef SIZEOF_BDIGIT_DBL
@@ -75,7 +78,7 @@ STATIC_ASSERT(sizeof_long_and_sizeof_bdigit, SIZEOF_BDIGIT % SIZEOF_LONG == 0);
#else
# define HOST_BIGENDIAN_P 0
#endif
-/* (!LSHIFTABLE(d, n) ? 0 : (n)) is same as n but suppress a warning, C4293, by Visual Studio. */
+/* (!LSHIFTABLE(d, n) ? 0 : (n)) is the same as n but suppress a warning, C4293, by Visual Studio. */
#define LSHIFTABLE(d, n) ((n) < sizeof(d) * CHAR_BIT)
#define LSHIFTX(d, n) (!LSHIFTABLE(d, n) ? 0 : ((d) << (!LSHIFTABLE(d, n) ? 0 : (n))))
#define CLEAR_LOWBITS(d, numbits) ((d) & LSHIFTX(~((d)*0), (numbits)))
@@ -102,8 +105,8 @@ STATIC_ASSERT(sizeof_long_and_sizeof_bdigit, SIZEOF_BDIGIT % SIZEOF_LONG == 0);
#endif
#define BIGZEROP(x) (BIGNUM_LEN(x) == 0 || \
- (BDIGITS(x)[0] == 0 && \
- (BIGNUM_LEN(x) == 1 || bigzero_p(x))))
+ (BDIGITS(x)[0] == 0 && \
+ (BIGNUM_LEN(x) == 1 || bigzero_p(x))))
#define BIGSIZE(x) (BIGNUM_LEN(x) == 0 ? (size_t)0 : \
BDIGITS(x)[BIGNUM_LEN(x)-1] ? \
(size_t)(BIGNUM_LEN(x)*SIZEOF_BDIGIT - nlz(BDIGITS(x)[BIGNUM_LEN(x)-1])/CHAR_BIT) : \
@@ -148,7 +151,7 @@ STATIC_ASSERT(sizeof_long_and_sizeof_bdigit, SIZEOF_BDIGIT % SIZEOF_LONG == 0);
#define GMP_DIV_DIGITS 20
#define GMP_BIG2STR_DIGITS 20
#define GMP_STR2BIG_DIGITS 20
-#ifdef USE_GMP
+#if USE_GMP
# define NAIVE_MUL_DIGITS GMP_MUL_DIGITS
#else
# define NAIVE_MUL_DIGITS KARATSUBA_MUL_DIGITS
@@ -159,15 +162,11 @@ typedef void (mulfunc_t)(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, c
static mulfunc_t bary_mul_toom3_start;
static mulfunc_t bary_mul_karatsuba_start;
static BDIGIT bigdivrem_single(BDIGIT *qds, const BDIGIT *xds, size_t xn, BDIGIT y);
-static void bary_divmod(BDIGIT *qds, size_t qn, BDIGIT *rds, size_t rn, const BDIGIT *xds, size_t xn, const BDIGIT *yds, size_t yn);
-static VALUE bigmul0(VALUE x, VALUE y);
-static void bary_mul_toom3(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yds, size_t yn, BDIGIT *wds, size_t wn);
static VALUE bignew_1(VALUE klass, size_t len, int sign);
static inline VALUE bigtrunc(VALUE x);
static VALUE bigsq(VALUE x);
-static void bigdivmod(VALUE x, VALUE y, volatile VALUE *divp, volatile VALUE *modp);
static inline VALUE power_cache_get_power(int base, int power_level, size_t *numdigits_ret);
#if SIZEOF_BDIGIT <= SIZEOF_INT
@@ -420,9 +419,9 @@ bary_small_lshift(BDIGIT *zds, const BDIGIT *xds, size_t n, int shift)
assert(0 <= shift && shift < BITSPERDIG);
for (i=0; i<n; i++) {
- num = num | (BDIGIT_DBL)*xds++ << shift;
- *zds++ = BIGLO(num);
- num = BIGDN(num);
+ num = num | (BDIGIT_DBL)*xds++ << shift;
+ *zds++ = BIGLO(num);
+ num = BIGDN(num);
}
return BIGLO(num);
}
@@ -438,9 +437,9 @@ bary_small_rshift(BDIGIT *zds, const BDIGIT *xds, size_t n, int shift, BDIGIT hi
num = BIGUP(higher_bdigit);
for (i = 0; i < n; i++) {
BDIGIT x = xds[n - i - 1];
- num = (num | x) >> shift;
+ num = (num | x) >> shift;
zds[n - i - 1] = BIGLO(num);
- num = BIGUP(x);
+ num = BIGUP(x);
}
}
@@ -450,7 +449,7 @@ bary_zero_p(const BDIGIT *xds, size_t xn)
if (xn == 0)
return 1;
do {
- if (xds[--xn]) return 0;
+ if (xds[--xn]) return 0;
} while (xn);
return 1;
}
@@ -467,7 +466,6 @@ static int
bary_2comp(BDIGIT *ds, size_t n)
{
size_t i;
- i = 0;
for (i = 0; i < n; i++) {
if (ds[i] != 0) {
goto non_zero;
@@ -979,7 +977,7 @@ integer_unpack_num_bdigits_small(size_t numwords, size_t wordsize, size_t nails,
{
/* nlp_bits stands for number of leading padding bits */
size_t num_bits = (wordsize * CHAR_BIT - nails) * numwords;
- size_t num_bdigits = (num_bits + BITSPERDIG - 1) / BITSPERDIG;
+ size_t num_bdigits = roomof(num_bits, BITSPERDIG);
*nlp_bits_ret = (int)(num_bdigits * BITSPERDIG - num_bits);
return num_bdigits;
}
@@ -989,7 +987,7 @@ integer_unpack_num_bdigits_generic(size_t numwords, size_t wordsize, size_t nail
{
/* BITSPERDIG = SIZEOF_BDIGIT * CHAR_BIT */
/* num_bits = (wordsize * CHAR_BIT - nails) * numwords */
- /* num_bdigits = (num_bits + BITSPERDIG - 1) / BITSPERDIG */
+ /* num_bdigits = roomof(num_bits, BITSPERDIG) */
/* num_bits = CHAR_BIT * (wordsize * numwords) - nails * numwords = CHAR_BIT * num_bytes1 - nails * numwords */
size_t num_bytes1 = wordsize * numwords;
@@ -1057,6 +1055,7 @@ integer_unpack_num_bdigits(size_t numwords, size_t wordsize, size_t nails, int *
size_t num_bdigits1 = integer_unpack_num_bdigits_generic(numwords, wordsize, nails, &nlp_bits1);
assert(num_bdigits == num_bdigits1);
assert(*nlp_bits_ret == nlp_bits1);
+ (void)num_bdigits1;
}
#endif
}
@@ -1351,9 +1350,9 @@ bary_subb(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yd
num = borrow ? -1 : 0;
for (i = 0; i < sn; i++) {
- num += (BDIGIT_DBL_SIGNED)xds[i] - yds[i];
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ num += (BDIGIT_DBL_SIGNED)xds[i] - yds[i];
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
if (yn <= xn) {
for (; i < xn; i++) {
@@ -1372,7 +1371,7 @@ bary_subb(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yd
}
if (num == 0) goto num_is_zero;
for (; i < zn; i++) {
- zds[i] = BDIGMAX;
+ zds[i] = BDIGMAX;
}
return 1;
@@ -1380,10 +1379,10 @@ bary_subb(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yd
if (xds == zds && xn == zn)
return 0;
for (; i < xn; i++) {
- zds[i] = xds[i];
+ zds[i] = xds[i];
}
for (; i < zn; i++) {
- zds[i] = 0;
+ zds[i] = 0;
}
return 0;
}
@@ -1410,27 +1409,27 @@ bary_addc(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yd
assert(yn <= zn);
if (xn > yn) {
- const BDIGIT *tds;
- tds = xds; xds = yds; yds = tds;
- i = xn; xn = yn; yn = i;
+ const BDIGIT *tds;
+ tds = xds; xds = yds; yds = tds;
+ i = xn; xn = yn; yn = i;
}
num = carry ? 1 : 0;
for (i = 0; i < xn; i++) {
- num += (BDIGIT_DBL)xds[i] + yds[i];
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ num += (BDIGIT_DBL)xds[i] + yds[i];
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
for (; i < yn; i++) {
if (num == 0) goto num_is_zero;
- num += yds[i];
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ num += yds[i];
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
for (; i < zn; i++) {
if (num == 0) goto num_is_zero;
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
return num != 0;
@@ -1438,10 +1437,10 @@ bary_addc(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yd
if (yds == zds && yn == zn)
return 0;
for (; i < yn; i++) {
- zds[i] = yds[i];
+ zds[i] = yds[i];
}
for (; i < zn; i++) {
- zds[i] = 0;
+ zds[i] = 0;
}
return 0;
}
@@ -1580,7 +1579,7 @@ rb_big_mul_normal(VALUE x, VALUE y)
/* efficient squaring (2 times faster than normal multiplication)
* ref: Handbook of Applied Cryptography, Algorithm 14.16
- * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
+ * https://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
*/
static void
bary_sq_fast(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn)
@@ -1598,30 +1597,30 @@ bary_sq_fast(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn)
return;
for (i = 0; i < xn-1; i++) {
- v = (BDIGIT_DBL)xds[i];
- if (!v)
+ v = (BDIGIT_DBL)xds[i];
+ if (!v)
continue;
- c = (BDIGIT_DBL)zds[i + i] + v * v;
- zds[i + i] = BIGLO(c);
- c = BIGDN(c);
- v *= 2;
+ c = (BDIGIT_DBL)zds[i + i] + v * v;
+ zds[i + i] = BIGLO(c);
+ c = BIGDN(c);
+ v *= 2;
vl = BIGLO(v);
vh = (int)BIGDN(v);
- for (j = i + 1; j < xn; j++) {
- w = (BDIGIT_DBL)xds[j];
- c += (BDIGIT_DBL)zds[i + j] + vl * w;
- zds[i + j] = BIGLO(c);
- c = BIGDN(c);
- if (vh)
+ for (j = i + 1; j < xn; j++) {
+ w = (BDIGIT_DBL)xds[j];
+ c += (BDIGIT_DBL)zds[i + j] + vl * w;
+ zds[i + j] = BIGLO(c);
+ c = BIGDN(c);
+ if (vh)
c += w;
- }
- if (c) {
- c += (BDIGIT_DBL)zds[i + xn];
- zds[i + xn] = BIGLO(c);
- c = BIGDN(c);
+ }
+ if (c) {
+ c += (BDIGIT_DBL)zds[i + xn];
+ zds[i + xn] = BIGLO(c);
+ c = BIGDN(c);
if (c)
zds[i + xn + 1] += (BDIGIT)c;
- }
+ }
}
/* i == xn-1 */
@@ -1646,13 +1645,21 @@ rb_big_sq_fast(VALUE x)
return z;
}
+static inline size_t
+max_size(size_t a, size_t b)
+{
+ return (a > b ? a : b);
+}
+
/* balancing multiplication by slicing larger argument */
static void
-bary_mul_balance_with_mulfunc(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yds, size_t yn, BDIGIT *wds, size_t wn, mulfunc_t *mulfunc)
+bary_mul_balance_with_mulfunc(BDIGIT *const zds, const size_t zn,
+ const BDIGIT *const xds, const size_t xn,
+ const BDIGIT *const yds, const size_t yn,
+ BDIGIT *wds, size_t wn, mulfunc_t *const mulfunc)
{
VALUE work = 0;
- size_t yn0 = yn;
- size_t r, n;
+ size_t n;
assert(xn + yn <= zn);
assert(xn <= yn);
@@ -1660,14 +1667,26 @@ bary_mul_balance_with_mulfunc(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t
BDIGITS_ZERO(zds, xn);
+ if (wn < xn) {
+ /* The condition when a new buffer is needed:
+ * 1. (2(xn+r) > zn-(yn-r)) => (2xn+r > zn-yn), at the last
+ * iteration (or r == 0)
+ * 2. (2(xn+xn) > zn-(yn-r-xn)) => (3xn-r > zn-yn), at the
+ * previous iteration.
+ */
+ const size_t r = yn % xn;
+ if (2*xn + yn + max_size(xn-r, r) > zn) {
+ wn = xn;
+ wds = ALLOCV_N(BDIGIT, work, wn);
+ }
+ }
+
n = 0;
- while (yn > 0) {
- BDIGIT *tds;
- size_t tn;
- r = xn > yn ? yn : xn;
- tn = xn + r;
+ while (yn > n) {
+ const size_t r = (xn > (yn - n) ? (yn - n) : xn);
+ const size_t tn = (xn + r);
if (2 * (xn + r) <= zn - n) {
- tds = zds + n + xn + r;
+ BDIGIT *const tds = zds + n + xn + r;
mulfunc(tds, tn, xds, xn, yds + n, r, wds, wn);
BDIGITS_ZERO(zds + n + xn, r);
bary_add(zds + n, tn,
@@ -1675,21 +1694,25 @@ bary_mul_balance_with_mulfunc(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t
tds, tn);
}
else {
+ BDIGIT *const tds = zds + n;
if (wn < xn) {
+ /* xn is invariant, only once here */
+#if 0
wn = xn;
wds = ALLOCV_N(BDIGIT, work, wn);
+#else
+ rb_bug("wds is not enough: %" PRIdSIZE " for %" PRIdSIZE, wn, xn);
+#endif
}
- tds = zds + n;
MEMCPY(wds, zds + n, BDIGIT, xn);
mulfunc(tds, tn, xds, xn, yds + n, r, wds+xn, wn-xn);
bary_add(zds + n, tn,
zds + n, tn,
wds, xn);
}
- yn -= r;
- n += r;
+ n += r;
}
- BDIGITS_ZERO(zds+xn+yn0, zn - (xn+yn0));
+ BDIGITS_ZERO(zds+xn+yn, zn - (xn+yn));
if (work)
ALLOCV_END(work);
@@ -2006,7 +2029,7 @@ bary_mul_toom3(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGI
}
/*
- * ref. http://en.wikipedia.org/wiki/Toom%E2%80%93Cook_multiplication
+ * ref. https://en.wikipedia.org/wiki/Toom%E2%80%93Cook_multiplication
*
* x(b) = x0 * b^0 + x1 * b^1 + x2 * b^2
* y(b) = y0 * b^0 + y1 * b^1 + y2 * b^2
@@ -2079,21 +2102,21 @@ bary_mul_toom3(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGI
v3n = u3n; v3ds = u3ds; v3p = u3p;
}
else {
- /* v1 <- y0 + y2 */
+ /* v1 <- y0 + y2 */
bary_add(v1ds, v1n, y0ds, y0n, y2ds, y2n);
v1p = 1;
- /* y(-1) : v2 <- v1 - y1 = y0 - y1 + y2 */
+ /* y(-1) : v2 <- v1 - y1 = y0 - y1 + y2 */
v2p = 1;
if (bary_sub(v2ds, v2n, v1ds, v1n, y1ds, y1n)) {
bary_2comp(v2ds, v2n);
v2p = 0;
}
- /* y(1) : v1 <- v1 + y1 = y0 + y1 + y2 */
+ /* y(1) : v1 <- v1 + y1 = y0 + y1 + y2 */
bary_add(v1ds, v1n, v1ds, v1n, y1ds, y1n);
- /* y(-2) : v3 <- 2 * (v2 + y2) - y0 = y0 - 2 * (y1 - 2 * y2) */
+ /* y(-2) : v3 <- 2 * (v2 + y2) - y0 = y0 - 2 * (y1 - 2 * y2) */
v3p = 1;
if (v2p) {
bary_add(v3ds, v3n, v2ds, v2n, y2ds, y2n);
@@ -2286,7 +2309,7 @@ rb_big_mul_toom3(VALUE x, VALUE y)
return z;
}
-#ifdef USE_GMP
+#if USE_GMP
static inline void
bdigits_to_mpz(mpz_t mp, const BDIGIT *digits, size_t len)
{
@@ -2359,9 +2382,9 @@ bary_sparse_p(const BDIGIT *ds, size_t n)
{
long c = 0;
- if ( ds[rb_genrand_ulong_limited(n / 2) + n / 4]) c++;
- if (c <= 1 && ds[rb_genrand_ulong_limited(n / 2) + n / 4]) c++;
- if (c <= 1 && ds[rb_genrand_ulong_limited(n / 2) + n / 4]) c++;
+ if ( ds[2 * n / 5]) c++;
+ if (c <= 1 && ds[ n / 2]) c++;
+ if (c <= 1 && ds[3 * n / 5]) c++;
return (c <= 1) ? 1 : 0;
}
@@ -2424,8 +2447,8 @@ bary_mul_precheck(BDIGIT **zdsp, size_t *znp, const BDIGIT **xdsp, size_t *xnp,
if (xn > yn) {
const BDIGIT *tds;
size_t tn;
- tds = xds; xds = yds; yds = tds;
- tn = xn; xn = yn; yn = tn;
+ tds = xds; xds = yds; yds = tds;
+ tn = xn; xn = yn; yn = tn;
}
assert(xn <= yn);
@@ -2469,12 +2492,7 @@ bary_mul_karatsuba_branch(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn,
{
/* normal multiplication when x is small */
if (xn < KARATSUBA_MUL_DIGITS) {
- normal:
- if (xds == yds && xn == yn)
- bary_sq_fast(zds, zn, xds, xn);
- else
- bary_short_mul(zds, zn, xds, xn, yds, yn);
- return;
+ goto normal;
}
/* normal multiplication when x or y is a sparse bignum */
@@ -2492,6 +2510,15 @@ bary_mul_karatsuba_branch(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn,
/* multiplication by karatsuba method */
bary_mul_karatsuba(zds, zn, xds, xn, yds, yn, wds, wn);
+ return;
+
+ normal:
+ if (xds == yds && xn == yn) {
+ bary_sq_fast(zds, zn, xds, xn);
+ }
+ else {
+ bary_short_mul(zds, zn, xds, xn, yds, yn);
+ }
}
static void
@@ -2547,7 +2574,7 @@ bary_mul(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yds
}
}
-#ifdef USE_GMP
+#if USE_GMP
bary_mul_gmp(zds, zn, xds, xn, yds, yn);
#else
bary_mul_toom3_start(zds, zn, xds, xn, yds, yn, NULL, 0);
@@ -2571,26 +2598,26 @@ bigdivrem1(void *ptr)
BDIGIT q;
do {
- if (bds->stop) {
- bds->zn = zn;
- return 0;
+ if (bds->stop) {
+ bds->zn = zn;
+ return 0;
}
- if (zds[zn-1] == yds[yn-1]) q = BDIGMAX;
- else q = (BDIGIT)((BIGUP(zds[zn-1]) + zds[zn-2])/yds[yn-1]);
- if (q) {
+ if (zds[zn-1] == yds[yn-1]) q = BDIGMAX;
+ else q = (BDIGIT)((BIGUP(zds[zn-1]) + zds[zn-2])/yds[yn-1]);
+ if (q) {
num = bigdivrem_mulsub(zds+zn-(yn+1), yn+1,
q,
yds, yn);
- while (num) { /* "add back" required */
- q--;
+ while (num) { /* "add back" required */
+ q--;
num = bary_add(zds+zn-(yn+1), yn,
zds+zn-(yn+1), yn,
yds, yn);
num--;
- }
- }
+ }
+ }
zn--;
- zds[zn] = q;
+ zds[zn] = q;
} while (zn > yn);
return 0;
}
@@ -2659,16 +2686,16 @@ bigdivrem_restoring(BDIGIT *zds, size_t zn, BDIGIT *yds, size_t yn)
bds.zn = zn - ynzero;
if (bds.zn > 10000 || bds.yn > 10000) {
retry:
- bds.stop = Qfalse;
+ bds.stop = Qfalse;
rb_nogvl(bigdivrem1, &bds, rb_big_stop, &bds, RB_NOGVL_UBF_ASYNC_SAFE);
- if (bds.stop == Qtrue) {
- /* execute trap handler, but exception was not raised. */
- goto retry;
- }
+ if (bds.stop == Qtrue) {
+ /* execute trap handler, but exception was not raised. */
+ goto retry;
+ }
}
else {
- bigdivrem1(&bds);
+ bigdivrem1(&bds);
}
}
@@ -2767,7 +2794,7 @@ rb_big_divrem_normal(VALUE x, VALUE y)
return rb_assoc_new(q, r);
}
-#ifdef USE_GMP
+#if USE_GMP
static void
bary_divmod_gmp(BDIGIT *qds, size_t qn, BDIGIT *rds, size_t rn, const BDIGIT *xds, size_t xn, const BDIGIT *yds, size_t yn)
{
@@ -2851,7 +2878,7 @@ rb_big_divrem_gmp(VALUE x, VALUE y)
static void
bary_divmod_branch(BDIGIT *qds, size_t qn, BDIGIT *rds, size_t rn, const BDIGIT *xds, size_t xn, const BDIGIT *yds, size_t yn)
{
-#ifdef USE_GMP
+#if USE_GMP
if (GMP_DIV_DIGITS < xn) {
bary_divmod_gmp(qds, qn, rds, rn, xds, xn, yds, yn);
return;
@@ -2906,29 +2933,8 @@ bary_divmod(BDIGIT *qds, size_t qn, BDIGIT *rds, size_t rn, const BDIGIT *xds, s
}
-#define BIGNUM_DEBUG 0
-#if BIGNUM_DEBUG
-#define ON_DEBUG(x) do { x; } while (0)
-static void
-dump_bignum(VALUE x)
-{
- long i;
- printf("%c0x0", BIGNUM_SIGN(x) ? '+' : '-');
- for (i = BIGNUM_LEN(x); i--; ) {
- printf("_%0*"PRIxBDIGIT, SIZEOF_BDIGIT*2, BDIGITS(x)[i]);
- }
- printf(", len=%"PRIuSIZE, BIGNUM_LEN(x));
- puts("");
-}
-
-static VALUE
-rb_big_dump(VALUE x)
-{
- dump_bignum(x);
- return x;
-}
-#else
-#define ON_DEBUG(x)
+#ifndef BIGNUM_DEBUG
+# define BIGNUM_DEBUG (0+RUBY_DEBUG)
#endif
static int
@@ -2947,7 +2953,7 @@ int
rb_cmpint(VALUE val, VALUE a, VALUE b)
{
if (NIL_P(val)) {
- rb_cmperr(a, b);
+ rb_cmperr(a, b);
}
if (FIXNUM_P(val)) {
long l = FIX2LONG(val);
@@ -2956,9 +2962,9 @@ rb_cmpint(VALUE val, VALUE a, VALUE b)
return 0;
}
if (RB_BIGNUM_TYPE_P(val)) {
- if (BIGZEROP(val)) return 0;
- if (BIGNUM_SIGN(val)) return 1;
- return -1;
+ if (BIGZEROP(val)) return 0;
+ if (BIGNUM_SIGN(val)) return 1;
+ return -1;
}
if (RTEST(rb_funcall(val, '>', 1, INT2FIX(0)))) return 1;
if (RTEST(rb_funcall(val, '<', 1, INT2FIX(0)))) return -1;
@@ -2968,8 +2974,8 @@ rb_cmpint(VALUE val, VALUE a, VALUE b)
#define BIGNUM_SET_LEN(b,l) \
(BIGNUM_EMBED_P(b) ? \
(void)(RBASIC(b)->flags = \
- (RBASIC(b)->flags & ~BIGNUM_EMBED_LEN_MASK) | \
- ((l) << BIGNUM_EMBED_LEN_SHIFT)) : \
+ (RBASIC(b)->flags & ~BIGNUM_EMBED_LEN_MASK) | \
+ ((l) << BIGNUM_EMBED_LEN_SHIFT)) : \
(void)(RBIGNUM(b)->as.heap.len = (l)))
static void
@@ -2977,33 +2983,33 @@ rb_big_realloc(VALUE big, size_t len)
{
BDIGIT *ds;
if (BIGNUM_EMBED_P(big)) {
- if (BIGNUM_EMBED_LEN_MAX < len) {
- ds = ALLOC_N(BDIGIT, len);
- MEMCPY(ds, RBIGNUM(big)->as.ary, BDIGIT, BIGNUM_EMBED_LEN_MAX);
- RBIGNUM(big)->as.heap.len = BIGNUM_LEN(big);
- RBIGNUM(big)->as.heap.digits = ds;
+ if (BIGNUM_EMBED_LEN_MAX < len) {
+ ds = ALLOC_N(BDIGIT, len);
+ MEMCPY(ds, RBIGNUM(big)->as.ary, BDIGIT, BIGNUM_EMBED_LEN_MAX);
+ RBIGNUM(big)->as.heap.len = BIGNUM_LEN(big);
+ RBIGNUM(big)->as.heap.digits = ds;
FL_UNSET_RAW(big, BIGNUM_EMBED_FLAG);
- }
+ }
}
else {
- if (len <= BIGNUM_EMBED_LEN_MAX) {
- ds = RBIGNUM(big)->as.heap.digits;
+ if (len <= BIGNUM_EMBED_LEN_MAX) {
+ ds = RBIGNUM(big)->as.heap.digits;
FL_SET_RAW(big, BIGNUM_EMBED_FLAG);
- BIGNUM_SET_LEN(big, len);
+ BIGNUM_SET_LEN(big, len);
(void)VALGRIND_MAKE_MEM_UNDEFINED((void*)RBIGNUM(big)->as.ary, sizeof(RBIGNUM(big)->as.ary));
- if (ds) {
- MEMCPY(RBIGNUM(big)->as.ary, ds, BDIGIT, len);
- xfree(ds);
- }
- }
- else {
- if (BIGNUM_LEN(big) == 0) {
- RBIGNUM(big)->as.heap.digits = ALLOC_N(BDIGIT, len);
- }
- else {
- REALLOC_N(RBIGNUM(big)->as.heap.digits, BDIGIT, len);
- }
- }
+ if (ds) {
+ MEMCPY(RBIGNUM(big)->as.ary, ds, BDIGIT, len);
+ xfree(ds);
+ }
+ }
+ else {
+ if (BIGNUM_LEN(big) == 0) {
+ RBIGNUM(big)->as.heap.digits = ALLOC_N(BDIGIT, len);
+ }
+ else {
+ REALLOC_N(RBIGNUM(big)->as.heap.digits, BDIGIT, len);
+ }
+ }
}
}
@@ -3089,7 +3095,7 @@ abs2twocomp(VALUE *xp, long *n_ret)
MEMCPY(BDIGITS(z), ds, BDIGIT, n);
bary_2comp(BDIGITS(z), n);
hibits = BDIGMAX;
- *xp = z;
+ *xp = z;
}
*n_ret = n;
return hibits;
@@ -3113,7 +3119,7 @@ bigtrunc(VALUE x)
if (len == 0) return x;
while (--len && !ds[len]);
if (BIGNUM_LEN(x) > len+1) {
- rb_big_resize(x, len+1);
+ rb_big_resize(x, len+1);
}
return x;
}
@@ -3166,7 +3172,7 @@ static VALUE
bignorm(VALUE x)
{
if (RB_BIGNUM_TYPE_P(x)) {
- x = bigfixize(x);
+ x = bigfixize(x);
}
return x;
}
@@ -3188,8 +3194,8 @@ rb_uint2big(uintptr_t n)
digits[0] = n;
#else
for (i = 0; i < bdigit_roomof(SIZEOF_VALUE); i++) {
- digits[i] = BIGLO(n);
- n = BIGDN(n);
+ digits[i] = BIGLO(n);
+ n = BIGDN(n);
}
#endif
@@ -3208,14 +3214,14 @@ rb_int2big(intptr_t n)
if (n < 0) {
u = 1 + (VALUE)(-(n + 1)); /* u = -n avoiding overflow */
- neg = 1;
+ neg = 1;
}
else {
u = n;
}
big = rb_uint2big(u);
if (neg) {
- BIGNUM_SET_NEGATIVE_SIGN(big);
+ BIGNUM_SET_NEGATIVE_SIGN(big);
}
return big;
}
@@ -3374,7 +3380,7 @@ absint_numwords_generic(size_t numbytes, int nlz_bits_in_msbyte, size_t word_num
if (sign == 2) {
#if defined __GNUC__ && (__GNUC__ == 4 && __GNUC_MINOR__ == 4)
- *nlz_bits_ret = 0;
+ *nlz_bits_ret = 0;
#endif
return (size_t)-1;
}
@@ -3422,6 +3428,7 @@ rb_absint_numwords(VALUE val, size_t word_numbits, size_t *nlz_bits_ret)
numwords0 = absint_numwords_generic(numbytes, nlz_bits_in_msbyte, word_numbits, &nlz_bits0);
assert(numwords0 == numwords);
assert(nlz_bits0 == nlz_bits);
+ (void)numwords0;
}
#endif
}
@@ -3693,7 +3700,7 @@ rb_integer_unpack(const void *words, size_t numwords, size_t wordsize, size_t na
}
else if (num_bdigits == numberof(fixbuf)) {
val = bignew((long)num_bdigits+1, 0);
- MEMCPY(BDIGITS(val), fixbuf, BDIGIT, num_bdigits);
+ MEMCPY(BDIGITS(val), fixbuf, BDIGIT, num_bdigits);
BDIGITS(val)[num_bdigits++] = 1;
}
else {
@@ -3705,11 +3712,11 @@ rb_integer_unpack(const void *words, size_t numwords, size_t wordsize, size_t na
BDIGIT_DBL u = fixbuf[0] + BIGUP(fixbuf[1]);
if (u == 0)
return LONG2FIX(0);
- if (0 < sign && POSFIXABLE(u))
- return LONG2FIX(u);
- if (sign < 0 && BDIGIT_MSB(fixbuf[1]) == 0 &&
+ if (0 < sign && POSFIXABLE(u))
+ return LONG2FIX((long)u);
+ if (sign < 0 && BDIGIT_MSB(fixbuf[1]) == 0 &&
NEGFIXABLE(-(BDIGIT_DBL_SIGNED)u))
- return LONG2FIX(-(BDIGIT_DBL_SIGNED)u);
+ return LONG2FIX((long)-(BDIGIT_DBL_SIGNED)u);
val = bignew((long)num_bdigits, 0 <= sign);
MEMCPY(BDIGITS(val), fixbuf, BDIGIT, num_bdigits);
}
@@ -3759,42 +3766,41 @@ str2big_scan_digits(const char *s, const char *str, int base, int badcheck, size
int c;
if (!len) {
- *num_digits_p = 0;
- *len_p = 0;
- return TRUE;
+ *num_digits_p = 0;
+ *len_p = 0;
+ return TRUE;
}
- if (badcheck && *str == '_') goto bad;
+ if (badcheck && *str == '_') return FALSE;
while ((c = *str++) != 0) {
- if (c == '_') {
- if (nondigit) {
- if (badcheck) goto bad;
- break;
- }
- nondigit = (char) c;
- }
- else if ((c = conv_digit(c)) < 0 || c >= base) {
- break;
- }
- else {
- nondigit = 0;
- num_digits++;
- digits_end = str;
- }
- if (len > 0 && !--len) break;
- }
- if (badcheck && nondigit) goto bad;
+ if (c == '_') {
+ if (nondigit) {
+ if (badcheck) return FALSE;
+ break;
+ }
+ nondigit = (char) c;
+ }
+ else if ((c = conv_digit(c)) < 0 || c >= base) {
+ break;
+ }
+ else {
+ nondigit = 0;
+ num_digits++;
+ digits_end = str;
+ }
+ if (len > 0 && !--len) break;
+ }
+ if (badcheck && nondigit) return FALSE;
if (badcheck && len) {
- str--;
- while (*str && ISSPACE(*str)) {
- str++;
- if (len > 0 && !--len) break;
- }
- if (len && *str) {
- bad:
- return FALSE;
- }
+ str--;
+ while (*str && ISSPACE(*str)) {
+ str++;
+ if (len > 0 && !--len) break;
+ }
+ if (len && *str) {
+ return FALSE;
+ }
}
*num_digits_p = num_digits;
*len_p = digits_end - digits_start;
@@ -3969,7 +3975,7 @@ str2big_karatsuba(
return z;
}
-#ifdef USE_GMP
+#if USE_GMP
static VALUE
str2big_gmp(
int sign,
@@ -4036,8 +4042,8 @@ rb_cstr_to_inum(const char *str, int base, int badcheck)
char *end;
VALUE ret = rb_cstr_parse_inum(str, -1, (badcheck ? NULL : &end), base);
if (NIL_P(ret)) {
- if (badcheck) rb_invalid_str(str, "Integer()");
- ret = INT2FIX(0);
+ if (badcheck) rb_invalid_str(str, "Integer()");
+ ret = INT2FIX(0);
}
return ret;
}
@@ -4061,7 +4067,7 @@ rb_cstr_to_inum(const char *str, int base, int badcheck)
VALUE
rb_int_parse_cstr(const char *str, ssize_t len, char **endp, size_t *ndigits,
- int base, int flags)
+ int base, int flags)
{
const char *const s = str;
char sign = 1;
@@ -4078,85 +4084,82 @@ rb_int_parse_cstr(const char *str, ssize_t len, char **endp, size_t *ndigits,
const int badcheck = !endp;
#define ADV(n) do {\
- if (len > 0 && len <= (n)) goto bad; \
- str += (n); \
- len -= (n); \
+ if (len > 0 && len <= (n)) goto bad; \
+ str += (n); \
+ len -= (n); \
} while (0)
#define ASSERT_LEN() do {\
- assert(len != 0); \
- if (len0 >= 0) assert(s + len0 == str + len); \
+ assert(len != 0); \
+ if (len0 >= 0) assert(s + len0 == str + len); \
} while (0)
if (!str) {
- bad:
- if (endp) *endp = (char *)str;
- if (ndigits) *ndigits = num_digits;
- return z;
+ goto bad;
}
if (len && (flags & RB_INT_PARSE_SIGN)) {
- while (ISSPACE(*str)) ADV(1);
+ while (ISSPACE(*str)) ADV(1);
- if (str[0] == '+') {
- ADV(1);
- }
- else if (str[0] == '-') {
- ADV(1);
- sign = 0;
- }
- ASSERT_LEN();
+ if (str[0] == '+') {
+ ADV(1);
+ }
+ else if (str[0] == '-') {
+ ADV(1);
+ sign = 0;
+ }
+ ASSERT_LEN();
}
if (base <= 0) {
- if (str[0] == '0' && len > 1) {
- switch (str[1]) {
- case 'x': case 'X':
- base = 16;
- ADV(2);
- break;
- case 'b': case 'B':
- base = 2;
- ADV(2);
- break;
- case 'o': case 'O':
- base = 8;
- ADV(2);
- break;
- case 'd': case 'D':
- base = 10;
- ADV(2);
- break;
- default:
- base = 8;
- }
- }
- else if (base < -1) {
- base = -base;
- }
- else {
- base = 10;
- }
+ if (str[0] == '0' && len > 1) {
+ switch (str[1]) {
+ case 'x': case 'X':
+ base = 16;
+ ADV(2);
+ break;
+ case 'b': case 'B':
+ base = 2;
+ ADV(2);
+ break;
+ case 'o': case 'O':
+ base = 8;
+ ADV(2);
+ break;
+ case 'd': case 'D':
+ base = 10;
+ ADV(2);
+ break;
+ default:
+ base = 8;
+ }
+ }
+ else if (base < -1) {
+ base = -base;
+ }
+ else {
+ base = 10;
+ }
}
else if (len == 1 || !(flags & RB_INT_PARSE_PREFIX)) {
- /* no prefix */
+ /* no prefix */
}
else if (base == 2) {
- if (str[0] == '0' && (str[1] == 'b'||str[1] == 'B')) {
- ADV(2);
- }
+ if (str[0] == '0' && (str[1] == 'b'||str[1] == 'B')) {
+ ADV(2);
+ }
}
else if (base == 8) {
- if (str[0] == '0' && (str[1] == 'o'||str[1] == 'O')) {
- ADV(2);
- }
+ if (str[0] == '0' && (str[1] == 'o'||str[1] == 'O')) {
+ ADV(2);
+ }
}
else if (base == 10) {
- if (str[0] == '0' && (str[1] == 'd'||str[1] == 'D')) {
- ADV(2);
- }
+ if (str[0] == '0' && (str[1] == 'd'||str[1] == 'D')) {
+ ADV(2);
+ }
}
else if (base == 16) {
- if (str[0] == '0' && (str[1] == 'x'||str[1] == 'X')) {
- ADV(2);
- }
+ if (str[0] == '0' && (str[1] == 'x'||str[1] == 'X')) {
+ ADV(2);
+ }
}
if (!valid_radix_p(base)) {
invalid_radix(base);
@@ -4164,80 +4167,79 @@ rb_int_parse_cstr(const char *str, ssize_t len, char **endp, size_t *ndigits,
if (!len) goto bad;
num_digits = str - s;
if (*str == '0' && len != 1) { /* squeeze preceding 0s */
- int us = 0;
- const char *end = len < 0 ? NULL : str + len;
- ++num_digits;
- while ((c = *++str) == '0' ||
- ((flags & RB_INT_PARSE_UNDERSCORE) && c == '_')) {
- if (c == '_') {
- if (++us >= 2)
- break;
- }
- else {
- ++num_digits;
- us = 0;
- }
- if (str == end) break;
- }
- if (!c || ISSPACE(c)) --str;
- if (end) len = end - str;
- ASSERT_LEN();
+ int us = 0;
+ const char *end = len < 0 ? NULL : str + len;
+ ++num_digits;
+ while ((c = *++str) == '0' ||
+ ((flags & RB_INT_PARSE_UNDERSCORE) && c == '_')) {
+ if (c == '_') {
+ if (++us >= 2)
+ break;
+ }
+ else {
+ ++num_digits;
+ us = 0;
+ }
+ if (str == end) break;
+ }
+ if (!c || ISSPACE(c)) --str;
+ if (end) len = end - str;
}
c = *str;
c = conv_digit(c);
if (c < 0 || c >= base) {
- if (!badcheck && num_digits) z = INT2FIX(0);
- goto bad;
+ if (!badcheck && num_digits) z = INT2FIX(0);
+ goto bad;
}
if (ndigits) *ndigits = num_digits;
val = ruby_scan_digits(str, len, base, &num_digits, &ov);
if (!ov) {
- const char *end = &str[num_digits];
- if (num_digits > 0 && *end == '_' && (flags & RB_INT_PARSE_UNDERSCORE))
- goto bigparse;
- if (endp) *endp = (char *)end;
- if (ndigits) *ndigits += num_digits;
- if (badcheck) {
- if (num_digits == 0) return Qnil; /* no number */
- while (len < 0 ? *end : end < str + len) {
- if (!ISSPACE(*end)) return Qnil; /* trailing garbage */
- end++;
- }
- }
-
- if (POSFIXABLE(val)) {
- if (sign) return LONG2FIX(val);
- else {
- long result = -(long)val;
- return LONG2FIX(result);
- }
- }
- else {
- VALUE big = rb_uint2big(val);
- BIGNUM_SET_SIGN(big, sign);
- return bignorm(big);
- }
+ const char *end = &str[num_digits];
+ if (num_digits > 0 && *end == '_' && (flags & RB_INT_PARSE_UNDERSCORE))
+ goto bigparse;
+ if (endp) *endp = (char *)end;
+ if (ndigits) *ndigits += num_digits;
+ if (badcheck) {
+ if (num_digits == 0) return Qnil; /* no number */
+ while (len < 0 ? *end : end < str + len) {
+ if (!ISSPACE(*end)) return Qnil; /* trailing garbage */
+ end++;
+ }
+ }
+
+ if (POSFIXABLE(val)) {
+ if (sign) return LONG2FIX(val);
+ else {
+ long result = -(long)val;
+ return LONG2FIX(result);
+ }
+ }
+ else {
+ VALUE big = rb_uint2big(val);
+ BIGNUM_SET_SIGN(big, sign);
+ return bignorm(big);
+ }
}
bigparse:
digits_start = str;
if (!str2big_scan_digits(s, str, base, badcheck, &num_digits, &len))
- goto bad;
+ goto bad;
if (endp) *endp = (char *)(str + len);
if (ndigits) *ndigits += num_digits;
digits_end = digits_start + len;
if (POW2_P(base)) {
z = str2big_poweroftwo(sign, digits_start, digits_end, num_digits,
- bit_length(base-1));
+ bit_length(base-1));
}
else {
int digits_per_bdigits_dbl;
maxpow_in_bdigit_dbl(base, &digits_per_bdigits_dbl);
num_bdigits = roomof(num_digits, digits_per_bdigits_dbl)*2;
-#ifdef USE_GMP
+#if USE_GMP
if (GMP_STR2BIG_DIGITS < num_bdigits) {
z = str2big_gmp(sign, digits_start, digits_end, num_digits,
num_bdigits, base);
@@ -4255,13 +4257,18 @@ rb_int_parse_cstr(const char *str, ssize_t len, char **endp, size_t *ndigits,
}
return bignorm(z);
+
+ bad:
+ if (endp) *endp = (char *)str;
+ if (ndigits) *ndigits = num_digits;
+ return z;
}
static VALUE
rb_cstr_parse_inum(const char *str, ssize_t len, char **endp, int base)
{
return rb_int_parse_cstr(str, len, endp, NULL, base,
- RB_INT_PARSE_DEFAULT);
+ RB_INT_PARSE_DEFAULT);
}
VALUE
@@ -4310,14 +4317,14 @@ rb_str2big_poweroftwo(VALUE arg, int base, int badcheck)
s = str = StringValueCStr(arg);
len = RSTRING_LEN(arg);
if (*str == '-') {
- len--;
+ len--;
str++;
positive_p = 0;
}
digits_start = str;
if (!str2big_scan_digits(s, str, base, badcheck, &num_digits, &len))
- invalid_integer(arg);
+ invalid_integer(arg);
digits_end = digits_start + len;
z = str2big_poweroftwo(positive_p, digits_start, digits_end, num_digits,
@@ -4349,14 +4356,14 @@ rb_str2big_normal(VALUE arg, int base, int badcheck)
s = str = StringValuePtr(arg);
len = RSTRING_LEN(arg);
if (len > 0 && *str == '-') {
- len--;
+ len--;
str++;
positive_p = 0;
}
digits_start = str;
if (!str2big_scan_digits(s, str, base, badcheck, &num_digits, &len))
- invalid_integer(arg);
+ invalid_integer(arg);
digits_end = digits_start + len;
maxpow_in_bdigit_dbl(base, &digits_per_bdigits_dbl);
@@ -4391,14 +4398,14 @@ rb_str2big_karatsuba(VALUE arg, int base, int badcheck)
s = str = StringValuePtr(arg);
len = RSTRING_LEN(arg);
if (len > 0 && *str == '-') {
- len--;
+ len--;
str++;
positive_p = 0;
}
digits_start = str;
if (!str2big_scan_digits(s, str, base, badcheck, &num_digits, &len))
- invalid_integer(arg);
+ invalid_integer(arg);
digits_end = digits_start + len;
maxpow_in_bdigit_dbl(base, &digits_per_bdigits_dbl);
@@ -4412,7 +4419,7 @@ rb_str2big_karatsuba(VALUE arg, int base, int badcheck)
return bignorm(z);
}
-#ifdef USE_GMP
+#if USE_GMP
VALUE
rb_str2big_gmp(VALUE arg, int base, int badcheck)
{
@@ -4434,14 +4441,14 @@ rb_str2big_gmp(VALUE arg, int base, int badcheck)
s = str = StringValuePtr(arg);
len = RSTRING_LEN(arg);
if (len > 0 && *str == '-') {
- len--;
+ len--;
str++;
positive_p = 0;
}
digits_start = str;
if (!str2big_scan_digits(s, str, base, badcheck, &num_digits, &len))
- invalid_integer(arg);
+ invalid_integer(arg);
digits_end = digits_start + len;
maxpow_in_bdigit_dbl(base, &digits_per_bdigits_dbl);
@@ -4468,8 +4475,8 @@ rb_ull2big(unsigned LONG_LONG n)
digits[0] = n;
#else
for (i = 0; i < bdigit_roomof(SIZEOF_LONG_LONG); i++) {
- digits[i] = BIGLO(n);
- n = BIGDN(n);
+ digits[i] = BIGLO(n);
+ n = BIGDN(n);
}
#endif
@@ -4488,14 +4495,14 @@ rb_ll2big(LONG_LONG n)
if (n < 0) {
u = 1 + (unsigned LONG_LONG)(-(n + 1)); /* u = -n avoiding overflow */
- neg = 1;
+ neg = 1;
}
else {
u = n;
}
big = rb_ull2big(u);
if (neg) {
- BIGNUM_SET_NEGATIVE_SIGN(big);
+ BIGNUM_SET_NEGATIVE_SIGN(big);
}
return big;
}
@@ -4503,14 +4510,14 @@ rb_ll2big(LONG_LONG n)
VALUE
rb_ull2inum(unsigned LONG_LONG n)
{
- if (POSFIXABLE(n)) return LONG2FIX(n);
+ if (POSFIXABLE(n)) return LONG2FIX((long)n);
return rb_ull2big(n);
}
VALUE
rb_ll2inum(LONG_LONG n)
{
- if (FIXABLE(n)) return LONG2FIX(n);
+ if (FIXABLE(n)) return LONG2FIX((long)n);
return rb_ll2big(n);
}
@@ -4525,7 +4532,7 @@ rb_uint128t2big(uint128_t n)
BDIGIT *digits = BDIGITS(big);
for (i = 0; i < bdigit_roomof(SIZEOF_INT128_T); i++) {
- digits[i] = BIGLO(RSHIFT(n ,BITSPERDIG*i));
+ digits[i] = BIGLO(RSHIFT(n ,BITSPERDIG*i));
}
i = bdigit_roomof(SIZEOF_INT128_T);
@@ -4543,14 +4550,14 @@ rb_int128t2big(int128_t n)
if (n < 0) {
u = 1 + (uint128_t)(-(n + 1)); /* u = -n avoiding overflow */
- neg = 1;
+ neg = 1;
}
else {
u = n;
}
big = rb_uint128t2big(u);
if (neg) {
- BIGNUM_SET_NEGATIVE_SIGN(big);
+ BIGNUM_SET_NEGATIVE_SIGN(big);
}
return big;
}
@@ -4579,11 +4586,14 @@ big_shift3(VALUE x, int lshift_p, size_t shift_numdigits, int shift_numbits)
if (lshift_p) {
if (LONG_MAX < shift_numdigits) {
- rb_raise(rb_eArgError, "too big number");
+ too_big:
+ rb_raise(rb_eRangeError, "shift width too big");
}
s1 = shift_numdigits;
s2 = shift_numbits;
+ if ((size_t)s1 != shift_numdigits) goto too_big;
xn = BIGNUM_LEN(x);
+ if (LONG_MAX/SIZEOF_BDIGIT <= xn+s1) goto too_big;
z = bignew(xn+s1+1, BIGNUM_SIGN(x));
zds = BDIGITS(z);
BDIGITS_ZERO(zds, s1);
@@ -4713,7 +4723,7 @@ power_cache_get_power(int base, int power_level, size_t *numdigits_ret)
rb_obj_hide(power);
base36_power_cache[base - 2][power_level] = power;
base36_numdigits_cache[base - 2][power_level] = numdigits;
- rb_gc_register_mark_object(power);
+ rb_gc_register_mark_object(power);
}
if (numdigits_ret)
*numdigits_ret = base36_numdigits_cache[base - 2][power_level];
@@ -4764,7 +4774,7 @@ big2str_2bdigits(struct big2str_struct *b2s, BDIGIT *xds, size_t xn, size_t tail
} while (num);
len = sizeof(buf) - j;
big2str_alloc(b2s, len + taillen);
- MEMCPY(b2s->ptr, buf + j, char, len);
+ MEMCPY(b2s->ptr, buf + j, char, len);
}
else {
p = b2s->ptr;
@@ -4781,7 +4791,7 @@ big2str_2bdigits(struct big2str_struct *b2s, BDIGIT *xds, size_t xn, size_t tail
static void
big2str_karatsuba(struct big2str_struct *b2s, BDIGIT *xds, size_t xn, size_t wn,
- int power_level, size_t taillen)
+ int power_level, size_t taillen)
{
VALUE b;
size_t half_numdigits, lower_numdigits;
@@ -4811,17 +4821,17 @@ big2str_karatsuba(struct big2str_struct *b2s, BDIGIT *xds, size_t xn, size_t wn,
*/
if (xn == 0 || bary_zero_p(xds, xn)) {
- if (b2s->ptr) {
+ if (b2s->ptr) {
/* When x is zero, power_cache_get_power(base, power_level) should be cached already. */
power_cache_get_power(b2s->base, power_level, &len);
- memset(b2s->ptr, '0', len);
+ memset(b2s->ptr, '0', len);
b2s->ptr += len;
- }
+ }
return;
}
if (power_level == 0) {
- big2str_2bdigits(b2s, xds, xn, taillen);
+ big2str_2bdigits(b2s, xds, xn, taillen);
return;
}
@@ -4849,7 +4859,7 @@ big2str_karatsuba(struct big2str_struct *b2s, BDIGIT *xds, size_t xn, size_t wn,
memset(b2s->ptr, '0', len);
b2s->ptr += len;
}
- big2str_2bdigits(b2s, xds, xn, taillen);
+ big2str_2bdigits(b2s, xds, xn, taillen);
}
else {
BDIGIT *qds, *rds;
@@ -4953,11 +4963,11 @@ big2str_generic(VALUE x, int base)
BARY_TRUNC(xds, xn);
if (xn == 0) {
- return rb_usascii_str_new2("0");
+ return rb_usascii_str_new2("0");
}
if (!valid_radix_p(base))
- invalid_radix(base);
+ invalid_radix(base);
if (xn >= LONG_MAX/BITSPERDIG) {
rb_raise(rb_eRangeError, "bignum too big to convert into `string'");
@@ -4994,7 +5004,7 @@ big2str_generic(VALUE x, int base)
b2s_data.ptr = NULL;
if (power_level == 0) {
- big2str_2bdigits(&b2s_data, xds, xn, 0);
+ big2str_2bdigits(&b2s_data, xds, xn, 0);
}
else {
VALUE tmpw = 0;
@@ -5003,7 +5013,7 @@ big2str_generic(VALUE x, int base)
wn = power_level * BIGDIVREM_EXTRA_WORDS + BIGNUM_LEN(power);
wds = ALLOCV_N(BDIGIT, tmpw, xn + wn);
MEMCPY(wds, xds, BDIGIT, xn);
- big2str_karatsuba(&b2s_data, wds, xn, wn, power_level, 0);
+ big2str_karatsuba(&b2s_data, wds, xn, wn, power_level, 0);
if (tmpw)
ALLOCV_END(tmpw);
}
@@ -5022,7 +5032,7 @@ rb_big2str_generic(VALUE x, int base)
return big2str_generic(x, base);
}
-#ifdef USE_GMP
+#if USE_GMP
static VALUE
big2str_gmp(VALUE x, int base)
{
@@ -5069,7 +5079,7 @@ rb_big2str1(VALUE x, int base)
size_t xn;
if (FIXNUM_P(x)) {
- return rb_fix2str(x, base);
+ return rb_fix2str(x, base);
}
bigtrunc(x);
@@ -5078,11 +5088,11 @@ rb_big2str1(VALUE x, int base)
BARY_TRUNC(xds, xn);
if (xn == 0) {
- return rb_usascii_str_new2("0");
+ return rb_usascii_str_new2("0");
}
if (!valid_radix_p(base))
- invalid_radix(base);
+ invalid_radix(base);
if (xn >= LONG_MAX/BITSPERDIG) {
rb_raise(rb_eRangeError, "bignum too big to convert into `string'");
@@ -5093,7 +5103,7 @@ rb_big2str1(VALUE x, int base)
return big2str_base_poweroftwo(x, base);
}
-#ifdef USE_GMP
+#if USE_GMP
if (GMP_BIG2STR_DIGITS < xn) {
return big2str_gmp(x, base);
}
@@ -5129,7 +5139,7 @@ big2ulong(VALUE x, const char *type)
#else
num = 0;
for (i = 0; i < len; i++) {
- num <<= BITSPERDIG;
+ num <<= BITSPERDIG;
num += (unsigned long)ds[len - i - 1]; /* overflow is already checked */
}
#endif
@@ -5182,13 +5192,13 @@ big2ull(VALUE x, const char *type)
if (len == 0)
return 0;
if (BIGSIZE(x) > SIZEOF_LONG_LONG)
- rb_raise(rb_eRangeError, "bignum too big to convert into `%s'", type);
+ rb_raise(rb_eRangeError, "bignum too big to convert into `%s'", type);
#if SIZEOF_LONG_LONG <= SIZEOF_BDIGIT
num = (unsigned LONG_LONG)ds[0];
#else
num = 0;
for (i = 0; i < len; i++) {
- num = BIGUP(num);
+ num = BIGUP(num);
num += ds[len - i - 1];
}
#endif
@@ -5238,23 +5248,23 @@ dbl2big(double d)
double u = (d < 0)?-d:d;
if (isinf(d)) {
- rb_raise(rb_eFloatDomainError, d < 0 ? "-Infinity" : "Infinity");
+ rb_raise(rb_eFloatDomainError, d < 0 ? "-Infinity" : "Infinity");
}
if (isnan(d)) {
- rb_raise(rb_eFloatDomainError, "NaN");
+ rb_raise(rb_eFloatDomainError, "NaN");
}
while (1.0 <= u) {
- u /= (double)(BIGRAD);
- i++;
+ u /= (double)(BIGRAD);
+ i++;
}
z = bignew(i, d>=0);
digits = BDIGITS(z);
while (i--) {
- u *= BIGRAD;
- c = (BDIGIT)u;
- u -= c;
- digits[i] = c;
+ u *= BIGRAD;
+ c = (BDIGIT)u;
+ u -= c;
+ digits[i] = c;
}
return z;
@@ -5274,28 +5284,28 @@ big2dbl(VALUE x)
BDIGIT *ds = BDIGITS(x), dl;
if (i) {
- bits = i * BITSPERDIG - nlz(ds[i-1]);
- if (bits > DBL_MANT_DIG+DBL_MAX_EXP) {
- d = HUGE_VAL;
- }
- else {
- if (bits > DBL_MANT_DIG+1)
- lo = (bits -= DBL_MANT_DIG+1) / BITSPERDIG;
- else
- bits = 0;
- while (--i > lo) {
- d = ds[i] + BIGRAD*d;
- }
- dl = ds[i];
- if (bits && (dl & ((BDIGIT)1 << (bits %= BITSPERDIG)))) {
- int carry = (dl & ~(BDIGMAX << bits)) != 0;
- if (!carry) {
- while (i-- > 0) {
- carry = ds[i] != 0;
- if (carry) break;
- }
- }
- if (carry) {
+ bits = i * BITSPERDIG - nlz(ds[i-1]);
+ if (bits > DBL_MANT_DIG+DBL_MAX_EXP) {
+ d = HUGE_VAL;
+ }
+ else {
+ if (bits > DBL_MANT_DIG+1)
+ lo = (bits -= DBL_MANT_DIG+1) / BITSPERDIG;
+ else
+ bits = 0;
+ while (--i > lo) {
+ d = ds[i] + BIGRAD*d;
+ }
+ dl = ds[i];
+ if (bits && (dl & ((BDIGIT)1 << (bits %= BITSPERDIG)))) {
+ int carry = (dl & ~(BDIGMAX << bits)) != 0;
+ if (!carry) {
+ while (i-- > 0) {
+ carry = ds[i] != 0;
+ if (carry) break;
+ }
+ }
+ if (carry) {
BDIGIT mask = BDIGMAX;
BDIGIT bit = 1;
mask <<= bits;
@@ -5303,19 +5313,19 @@ big2dbl(VALUE x)
dl &= mask;
dl += bit;
dl = BIGLO(dl);
- if (!dl) d += 1;
- }
- }
- d = dl + BIGRAD*d;
- if (lo) {
- if (lo > INT_MAX / BITSPERDIG)
- d = HUGE_VAL;
- else if (lo < INT_MIN / BITSPERDIG)
- d = 0.0;
- else
- d = ldexp(d, (int)(lo * BITSPERDIG));
- }
- }
+ if (!dl) d += 1;
+ }
+ }
+ d = dl + BIGRAD*d;
+ if (lo) {
+ if (lo > INT_MAX / BITSPERDIG)
+ d = HUGE_VAL;
+ else if (lo < INT_MIN / BITSPERDIG)
+ d = 0.0;
+ else
+ d = ldexp(d, (int)(lo * BITSPERDIG));
+ }
+ }
}
if (BIGNUM_NEGATIVE_P(x)) d = -d;
return d;
@@ -5327,11 +5337,11 @@ rb_big2dbl(VALUE x)
double d = big2dbl(x);
if (isinf(d)) {
- rb_warning("Bignum out of Float range");
- if (d < 0.0)
- d = -HUGE_VAL;
- else
- d = HUGE_VAL;
+ rb_warning("Integer out of Float range");
+ if (d < 0.0)
+ d = -HUGE_VAL;
+ else
+ d = HUGE_VAL;
}
return d;
}
@@ -5401,7 +5411,7 @@ rb_integer_float_eq(VALUE x, VALUE y)
double yd = RFLOAT_VALUE(y);
double yi, yf;
- if (isnan(yd) || isinf(yd))
+ if (!isfinite(yd))
return Qfalse;
yf = modf(yd, &yi);
if (yf != 0)
@@ -5409,18 +5419,14 @@ rb_integer_float_eq(VALUE x, VALUE y)
if (FIXNUM_P(x)) {
#if SIZEOF_LONG * CHAR_BIT < DBL_MANT_DIG /* assume FLT_RADIX == 2 */
double xd = (double)FIX2LONG(x);
- if (xd != yd)
- return Qfalse;
- return Qtrue;
+ return RBOOL(xd == yd);
#else
long xn, yn;
if (yi < LONG_MIN || LONG_MAX_as_double <= yi)
return Qfalse;
xn = FIX2LONG(x);
yn = (long)yi;
- if (xn != yn)
- return Qfalse;
- return Qtrue;
+ return RBOOL(xn == yn);
#endif
}
y = rb_dbl2big(yi);
@@ -5432,26 +5438,26 @@ VALUE
rb_big_cmp(VALUE x, VALUE y)
{
if (FIXNUM_P(y)) {
- x = bigfixize(x);
+ x = bigfixize(x);
if (FIXNUM_P(x)) {
- /* SIGNED_VALUE and Fixnum have same sign-bits, same
- * order */
- SIGNED_VALUE sx = (SIGNED_VALUE)x, sy = (SIGNED_VALUE)y;
- if (sx < sy) return INT2FIX(-1);
- return INT2FIX(sx > sy);
+ /* SIGNED_VALUE and Fixnum have same sign-bits, same
+ * order */
+ SIGNED_VALUE sx = (SIGNED_VALUE)x, sy = (SIGNED_VALUE)y;
+ if (sx < sy) return INT2FIX(-1);
+ return INT2FIX(sx > sy);
}
}
else if (RB_BIGNUM_TYPE_P(y)) {
- if (BIGNUM_SIGN(x) == BIGNUM_SIGN(y)) {
- int cmp = bary_cmp(BDIGITS(x), BIGNUM_LEN(x), BDIGITS(y), BIGNUM_LEN(y));
- return INT2FIX(BIGNUM_SIGN(x) ? cmp : -cmp);
- }
+ if (BIGNUM_SIGN(x) == BIGNUM_SIGN(y)) {
+ int cmp = bary_cmp(BDIGITS(x), BIGNUM_LEN(x), BDIGITS(y), BIGNUM_LEN(y));
+ return INT2FIX(BIGNUM_SIGN(x) ? cmp : -cmp);
+ }
}
else if (RB_FLOAT_TYPE_P(y)) {
return rb_integer_float_cmp(x, y);
}
else {
- return rb_num_coerce_cmp(x, y, idCmp);
+ return rb_num_coerce_cmp(x, y, idCmp);
}
return INT2FIX(BIGNUM_SIGN(x) ? 1 : -1);
}
@@ -5470,30 +5476,30 @@ big_op(VALUE x, VALUE y, enum big_op_t op)
int n;
if (RB_INTEGER_TYPE_P(y)) {
- rel = rb_big_cmp(x, y);
+ rel = rb_big_cmp(x, y);
}
else if (RB_FLOAT_TYPE_P(y)) {
rel = rb_integer_float_cmp(x, y);
}
else {
- ID id = 0;
- switch (op) {
- case big_op_gt: id = '>'; break;
- case big_op_ge: id = idGE; break;
- case big_op_lt: id = '<'; break;
- case big_op_le: id = idLE; break;
- }
- return rb_num_coerce_relop(x, y, id);
+ ID id = 0;
+ switch (op) {
+ case big_op_gt: id = '>'; break;
+ case big_op_ge: id = idGE; break;
+ case big_op_lt: id = '<'; break;
+ case big_op_le: id = idLE; break;
+ }
+ return rb_num_coerce_relop(x, y, id);
}
if (NIL_P(rel)) return Qfalse;
n = FIX2INT(rel);
switch (op) {
- case big_op_gt: return n > 0 ? Qtrue : Qfalse;
- case big_op_ge: return n >= 0 ? Qtrue : Qfalse;
- case big_op_lt: return n < 0 ? Qtrue : Qfalse;
- case big_op_le: return n <= 0 ? Qtrue : Qfalse;
+ case big_op_gt: return RBOOL(n > 0);
+ case big_op_ge: return RBOOL(n >= 0);
+ case big_op_lt: return RBOOL(n < 0);
+ case big_op_le: return RBOOL(n <= 0);
}
return Qundef;
}
@@ -5528,7 +5534,7 @@ rb_big_le(VALUE x, VALUE y)
*
* Returns <code>true</code> only if <i>obj</i> has the same value
* as <i>big</i>. Contrast this with Integer#eql?, which requires
- * <i>obj</i> to be a Integer.
+ * <i>obj</i> to be an Integer.
*
* 68719476736 == 68719476736.0 #=> true
*/
@@ -5537,7 +5543,7 @@ VALUE
rb_big_eq(VALUE x, VALUE y)
{
if (FIXNUM_P(y)) {
- return bignorm(x) == y ? Qtrue : Qfalse;
+ return RBOOL(bignorm(x) == y);
}
else if (RB_BIGNUM_TYPE_P(y)) {
}
@@ -5545,12 +5551,11 @@ rb_big_eq(VALUE x, VALUE y)
return rb_integer_float_eq(x, y);
}
else {
- return rb_equal(y, x);
+ return rb_equal(y, x);
}
if (BIGNUM_SIGN(x) != BIGNUM_SIGN(y)) return Qfalse;
if (BIGNUM_LEN(x) != BIGNUM_LEN(y)) return Qfalse;
- if (MEMCMP(BDIGITS(x),BDIGITS(y),BDIGIT,BIGNUM_LEN(y)) != 0) return Qfalse;
- return Qtrue;
+ return RBOOL(MEMCMP(BDIGITS(x),BDIGITS(y),BDIGIT,BIGNUM_LEN(y)) == 0);
}
VALUE
@@ -5559,8 +5564,7 @@ rb_big_eql(VALUE x, VALUE y)
if (!RB_BIGNUM_TYPE_P(y)) return Qfalse;
if (BIGNUM_SIGN(x) != BIGNUM_SIGN(y)) return Qfalse;
if (BIGNUM_LEN(x) != BIGNUM_LEN(y)) return Qfalse;
- if (MEMCMP(BDIGITS(x),BDIGITS(y),BDIGIT,BIGNUM_LEN(y)) != 0) return Qfalse;
- return Qtrue;
+ return RBOOL(MEMCMP(BDIGITS(x),BDIGITS(y),BDIGIT,BIGNUM_LEN(y)) == 0);
}
VALUE
@@ -5654,10 +5658,10 @@ bigsub_int(VALUE x, long y0)
assert(xn == zn);
num = (BDIGIT_DBL_SIGNED)xds[0] - y;
if (xn == 1 && num < 0) {
- BIGNUM_NEGATE(z);
- zds[0] = (BDIGIT)-num;
- RB_GC_GUARD(x);
- return bignorm(z);
+ BIGNUM_NEGATE(z);
+ zds[0] = (BDIGIT)-num;
+ RB_GC_GUARD(x);
+ return bignorm(z);
}
zds[0] = BIGLO(num);
num = BIGDN(num);
@@ -5669,10 +5673,10 @@ bigsub_int(VALUE x, long y0)
num = 0;
for (i=0; i < xn; i++) {
if (y == 0) goto y_is_zero_x;
- num += (BDIGIT_DBL_SIGNED)xds[i] - BIGLO(y);
- zds[i] = BIGLO(num);
- num = BIGDN(num);
- y = BIGDN(y);
+ num += (BDIGIT_DBL_SIGNED)xds[i] - BIGLO(y);
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
+ y = BIGDN(y);
}
for (; i < zn; i++) {
if (y == 0) goto y_is_zero_z;
@@ -5687,9 +5691,9 @@ bigsub_int(VALUE x, long y0)
for (; i < xn; i++) {
y_is_zero_x:
if (num == 0) goto num_is_zero_x;
- num += xds[i];
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ num += xds[i];
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
#if SIZEOF_BDIGIT < SIZEOF_LONG
for (; i < zn; i++) {
@@ -5703,7 +5707,7 @@ bigsub_int(VALUE x, long y0)
for (; i < xn; i++) {
num_is_zero_x:
- zds[i] = xds[i];
+ zds[i] = xds[i];
}
#if SIZEOF_BDIGIT < SIZEOF_LONG
for (; i < zn; i++) {
@@ -5717,7 +5721,7 @@ bigsub_int(VALUE x, long y0)
assert(num == 0 || num == -1);
if (num < 0) {
get2comp(z);
- BIGNUM_NEGATE(z);
+ BIGNUM_NEGATE(z);
}
RB_GC_GUARD(x);
return bignorm(z);
@@ -5760,17 +5764,17 @@ bigadd_int(VALUE x, long y)
num = 0;
for (i=0; i < xn; i++) {
if (y == 0) goto y_is_zero_x;
- num += (BDIGIT_DBL)xds[i] + BIGLO(y);
- zds[i] = BIGLO(num);
- num = BIGDN(num);
- y = BIGDN(y);
+ num += (BDIGIT_DBL)xds[i] + BIGLO(y);
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
+ y = BIGDN(y);
}
for (; i < zn; i++) {
if (y == 0) goto y_is_zero_z;
- num += BIGLO(y);
- zds[i] = BIGLO(num);
- num = BIGDN(num);
- y = BIGDN(y);
+ num += BIGLO(y);
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
+ y = BIGDN(y);
}
goto finish;
@@ -5779,25 +5783,25 @@ bigadd_int(VALUE x, long y)
for (;i < xn; i++) {
y_is_zero_x:
if (num == 0) goto num_is_zero_x;
- num += (BDIGIT_DBL)xds[i];
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ num += (BDIGIT_DBL)xds[i];
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
for (; i < zn; i++) {
y_is_zero_z:
if (num == 0) goto num_is_zero_z;
- zds[i] = BIGLO(num);
- num = BIGDN(num);
+ zds[i] = BIGLO(num);
+ num = BIGDN(num);
}
goto finish;
for (;i < xn; i++) {
num_is_zero_x:
- zds[i] = xds[i];
+ zds[i] = xds[i];
}
for (; i < zn; i++) {
num_is_zero_z:
- zds[i] = 0;
+ zds[i] = 0;
}
goto finish;
@@ -5814,15 +5818,15 @@ bigadd(VALUE x, VALUE y, int sign)
sign = (sign == BIGNUM_SIGN(y));
if (BIGNUM_SIGN(x) != sign) {
- if (sign) return bigsub(y, x);
- return bigsub(x, y);
+ if (sign) return bigsub(y, x);
+ return bigsub(x, y);
}
if (BIGNUM_LEN(x) > BIGNUM_LEN(y)) {
- len = BIGNUM_LEN(x) + 1;
+ len = BIGNUM_LEN(x) + 1;
}
else {
- len = BIGNUM_LEN(y) + 1;
+ len = BIGNUM_LEN(y) + 1;
}
z = bignew(len, sign);
@@ -5839,26 +5843,26 @@ rb_big_plus(VALUE x, VALUE y)
long n;
if (FIXNUM_P(y)) {
- n = FIX2LONG(y);
- if ((n > 0) != BIGNUM_SIGN(x)) {
- if (n < 0) {
- n = -n;
- }
- return bigsub_int(x, n);
- }
- if (n < 0) {
- n = -n;
- }
- return bigadd_int(x, n);
+ n = FIX2LONG(y);
+ if ((n > 0) != BIGNUM_SIGN(x)) {
+ if (n < 0) {
+ n = -n;
+ }
+ return bigsub_int(x, n);
+ }
+ if (n < 0) {
+ n = -n;
+ }
+ return bigadd_int(x, n);
}
else if (RB_BIGNUM_TYPE_P(y)) {
- return bignorm(bigadd(x, y, 1));
+ return bignorm(bigadd(x, y, 1));
}
else if (RB_FLOAT_TYPE_P(y)) {
- return DBL2NUM(rb_big2dbl(x) + RFLOAT_VALUE(y));
+ return DBL2NUM(rb_big2dbl(x) + RFLOAT_VALUE(y));
}
else {
- return rb_num_coerce_bin(x, y, '+');
+ return rb_num_coerce_bin(x, y, '+');
}
}
@@ -5868,26 +5872,26 @@ rb_big_minus(VALUE x, VALUE y)
long n;
if (FIXNUM_P(y)) {
- n = FIX2LONG(y);
- if ((n > 0) != BIGNUM_SIGN(x)) {
- if (n < 0) {
- n = -n;
- }
- return bigadd_int(x, n);
- }
- if (n < 0) {
- n = -n;
- }
- return bigsub_int(x, n);
+ n = FIX2LONG(y);
+ if ((n > 0) != BIGNUM_SIGN(x)) {
+ if (n < 0) {
+ n = -n;
+ }
+ return bigadd_int(x, n);
+ }
+ if (n < 0) {
+ n = -n;
+ }
+ return bigsub_int(x, n);
}
else if (RB_BIGNUM_TYPE_P(y)) {
- return bignorm(bigadd(x, y, 0));
+ return bignorm(bigadd(x, y, 0));
}
else if (RB_FLOAT_TYPE_P(y)) {
- return DBL2NUM(rb_big2dbl(x) - RFLOAT_VALUE(y));
+ return DBL2NUM(rb_big2dbl(x) - RFLOAT_VALUE(y));
}
else {
- return rb_num_coerce_bin(x, y, '-');
+ return rb_num_coerce_bin(x, y, '-');
}
}
@@ -5946,15 +5950,15 @@ VALUE
rb_big_mul(VALUE x, VALUE y)
{
if (FIXNUM_P(y)) {
- y = rb_int2big(FIX2LONG(y));
+ y = rb_int2big(FIX2LONG(y));
}
else if (RB_BIGNUM_TYPE_P(y)) {
}
else if (RB_FLOAT_TYPE_P(y)) {
- return DBL2NUM(rb_big2dbl(x) * RFLOAT_VALUE(y));
+ return DBL2NUM(rb_big2dbl(x) * RFLOAT_VALUE(y));
}
else {
- return rb_num_coerce_bin(x, y, '*');
+ return rb_num_coerce_bin(x, y, '*');
}
return bignorm(bigmul0(x, y));
@@ -5981,21 +5985,21 @@ bigdivrem(VALUE x, VALUE y, volatile VALUE *divp, volatile VALUE *modp)
BARY_TRUNC(xds, xn);
if (xn < yn || (xn == yn && xds[xn - 1] < yds[yn - 1])) {
- if (divp) *divp = rb_int2big(0);
- if (modp) *modp = x;
- return Qnil;
+ if (divp) *divp = rb_int2big(0);
+ if (modp) *modp = x;
+ return Qnil;
}
if (yn == 1) {
- dd = yds[0];
- z = bignew(xn, BIGNUM_SIGN(x)==BIGNUM_SIGN(y));
- zds = BDIGITS(z);
+ dd = yds[0];
+ z = bignew(xn, BIGNUM_SIGN(x)==BIGNUM_SIGN(y));
+ zds = BDIGITS(z);
dd = bigdivrem_single(zds, xds, xn, dd);
- if (modp) {
- *modp = rb_uint2big((uintptr_t)dd);
- BIGNUM_SET_SIGN(*modp, BIGNUM_SIGN(x));
- }
- if (divp) *divp = z;
- return Qnil;
+ if (modp) {
+ *modp = rb_uint2big((uintptr_t)dd);
+ BIGNUM_SET_SIGN(*modp, BIGNUM_SIGN(x));
+ }
+ if (divp) *divp = z;
+ return Qnil;
}
if (xn == 2 && yn == 2) {
BDIGIT_DBL x0 = bary2bdigitdbl(xds, 2);
@@ -6060,11 +6064,11 @@ bigdivmod(VALUE x, VALUE y, volatile VALUE *divp, volatile VALUE *modp)
bigdivrem(x, y, divp, &mod);
if (BIGNUM_SIGN(x) != BIGNUM_SIGN(y) && !BIGZEROP(mod)) {
- if (divp) *divp = bigadd(*divp, rb_int2big(1), 0);
- if (modp) *modp = bigadd(mod, y, 1);
+ if (divp) *divp = bigadd(*divp, rb_int2big(1), 0);
+ if (modp) *modp = bigadd(mod, y, 1);
}
else if (modp) {
- *modp = mod;
+ *modp = mod;
}
}
@@ -6075,25 +6079,25 @@ rb_big_divide(VALUE x, VALUE y, ID op)
VALUE z;
if (FIXNUM_P(y)) {
- y = rb_int2big(FIX2LONG(y));
+ y = rb_int2big(FIX2LONG(y));
}
else if (RB_BIGNUM_TYPE_P(y)) {
}
else if (RB_FLOAT_TYPE_P(y)) {
- if (op == '/') {
+ if (op == '/') {
double dx = rb_big2dbl(x);
return rb_flo_div_flo(DBL2NUM(dx), y);
- }
- else {
+ }
+ else {
VALUE v;
- double dy = RFLOAT_VALUE(y);
- if (dy == 0.0) rb_num_zerodiv();
+ double dy = RFLOAT_VALUE(y);
+ if (dy == 0.0) rb_num_zerodiv();
v = rb_big_divide(x, y, '/');
return rb_dbl2big(RFLOAT_VALUE(v));
- }
+ }
}
else {
- return rb_num_coerce_bin(x, y, op);
+ return rb_num_coerce_bin(x, y, op);
}
bigdivmod(x, y, &z, 0);
@@ -6118,10 +6122,10 @@ rb_big_modulo(VALUE x, VALUE y)
VALUE z;
if (FIXNUM_P(y)) {
- y = rb_int2big(FIX2LONG(y));
+ y = rb_int2big(FIX2LONG(y));
}
else if (!RB_BIGNUM_TYPE_P(y)) {
- return rb_num_coerce_bin(x, y, '%');
+ return rb_num_coerce_bin(x, y, '%');
}
bigdivmod(x, y, 0, &z);
@@ -6134,10 +6138,10 @@ rb_big_remainder(VALUE x, VALUE y)
VALUE z;
if (FIXNUM_P(y)) {
- y = rb_int2big(FIX2LONG(y));
+ y = rb_int2big(FIX2LONG(y));
}
else if (!RB_BIGNUM_TYPE_P(y)) {
- return rb_num_coerce_bin(x, y, rb_intern("remainder"));
+ return rb_num_coerce_bin(x, y, rb_intern("remainder"));
}
bigdivrem(x, y, 0, &z);
@@ -6150,7 +6154,7 @@ rb_big_divmod(VALUE x, VALUE y)
VALUE div, mod;
if (FIXNUM_P(y)) {
- y = rb_int2big(FIX2LONG(y));
+ y = rb_int2big(FIX2LONG(y));
}
else if (!RB_BIGNUM_TYPE_P(y)) {
return rb_num_coerce_bin(x, y, idDivmod);
@@ -6164,9 +6168,9 @@ static VALUE
big_shift(VALUE x, long n)
{
if (n < 0)
- return big_lshift(x, 1+(unsigned long)(-(n+1)));
+ return big_lshift(x, 1+(unsigned long)(-(n+1)));
else if (n > 0)
- return big_rshift(x, (unsigned long)n);
+ return big_rshift(x, (unsigned long)n);
return x;
}
@@ -6190,9 +6194,9 @@ big_fdiv(VALUE x, VALUE y, long ey)
l = ex - ey;
#if SIZEOF_LONG > SIZEOF_INT
{
- /* Visual C++ can't be here */
- if (l > INT_MAX) return HUGE_VAL;
- if (l < INT_MIN) return 0.0;
+ /* Visual C++ can't be here */
+ if (l > INT_MAX) return HUGE_VAL;
+ if (l < INT_MIN) return 0.0;
}
#endif
return ldexp(big2dbl(z), (int)l);
@@ -6226,19 +6230,19 @@ rb_big_fdiv_double(VALUE x, VALUE y)
dx = big2dbl(x);
if (FIXNUM_P(y)) {
- dy = (double)FIX2LONG(y);
- if (isinf(dx))
- return big_fdiv_int(x, rb_int2big(FIX2LONG(y)));
+ dy = (double)FIX2LONG(y);
+ if (isinf(dx))
+ return big_fdiv_int(x, rb_int2big(FIX2LONG(y)));
}
else if (RB_BIGNUM_TYPE_P(y)) {
- return big_fdiv_int(x, y);
+ return big_fdiv_int(x, y);
}
else if (RB_FLOAT_TYPE_P(y)) {
- dy = RFLOAT_VALUE(y);
- if (isnan(dy))
- return dy;
- if (isinf(dx))
- return big_fdiv_float(x, y);
+ dy = RFLOAT_VALUE(y);
+ if (isnan(dy))
+ return dy;
+ if (isinf(dx))
+ return big_fdiv_float(x, y);
}
else {
return NUM2DBL(rb_num_coerce_bin(x, y, idFdiv));
@@ -6263,20 +6267,20 @@ rb_big_pow(VALUE x, VALUE y)
if (y == INT2FIX(0)) return INT2FIX(1);
if (y == INT2FIX(1)) return x;
if (RB_FLOAT_TYPE_P(y)) {
- d = RFLOAT_VALUE(y);
- if ((BIGNUM_NEGATIVE_P(x) && !BIGZEROP(x))) {
+ d = RFLOAT_VALUE(y);
+ if ((BIGNUM_NEGATIVE_P(x) && !BIGZEROP(x))) {
return rb_dbl_complex_new_polar_pi(pow(-rb_big2dbl(x), d), d);
- }
+ }
}
else if (RB_BIGNUM_TYPE_P(y)) {
- y = bignorm(y);
- if (FIXNUM_P(y))
- goto again;
- rb_warn("in a**b, b may be too big");
- d = rb_big2dbl(y);
+ y = bignorm(y);
+ if (FIXNUM_P(y))
+ goto again;
+ rb_warn("in a**b, b may be too big");
+ d = rb_big2dbl(y);
}
else if (FIXNUM_P(y)) {
- yy = FIX2LONG(y);
+ yy = FIX2LONG(y);
if (yy < 0) {
x = rb_big_pow(x, LONG2NUM(-yy));
@@ -6285,31 +6289,31 @@ rb_big_pow(VALUE x, VALUE y)
else
return DBL2NUM(1.0 / NUM2DBL(x));
}
- else {
- VALUE z = 0;
- SIGNED_VALUE mask;
+ else {
+ VALUE z = 0;
+ SIGNED_VALUE mask;
const size_t xbits = rb_absint_numwords(x, 1, NULL);
- const size_t BIGLEN_LIMIT = 32*1024*1024;
+ const size_t BIGLEN_LIMIT = 32*1024*1024;
- if (xbits == (size_t)-1 ||
+ if (xbits == (size_t)-1 ||
(xbits > BIGLEN_LIMIT) ||
(xbits * yy > BIGLEN_LIMIT)) {
- rb_warn("in a**b, b may be too big");
- d = (double)yy;
- }
- else {
- for (mask = FIXNUM_MAX + 1; mask; mask >>= 1) {
- if (z) z = bigsq(z);
- if (yy & mask) {
- z = z ? bigtrunc(bigmul0(z, x)) : x;
- }
- }
- return bignorm(z);
- }
- }
+ rb_warn("in a**b, b may be too big");
+ d = (double)yy;
+ }
+ else {
+ for (mask = FIXNUM_MAX + 1; mask; mask >>= 1) {
+ if (z) z = bigsq(z);
+ if (yy & mask) {
+ z = z ? bigtrunc(bigmul0(z, x)) : x;
+ }
+ }
+ return bignorm(z);
+ }
+ }
}
else {
- return rb_num_coerce_bin(x, y, idPow);
+ return rb_num_coerce_bin(x, y, idPow);
}
return DBL2NUM(pow(rb_big2dbl(x), d));
}
@@ -6329,8 +6333,8 @@ bigand_int(VALUE x, long xn, BDIGIT hibitsx, long y)
xds = BDIGITS(x);
#if SIZEOF_BDIGIT >= SIZEOF_LONG
if (!hibitsy) {
- y &= xds[0];
- return LONG2NUM(y);
+ y &= xds[0];
+ return LONG2NUM(y);
}
#endif
@@ -6359,10 +6363,10 @@ bigand_int(VALUE x, long xn, BDIGIT hibitsx, long y)
}
#endif
for (;i < xn; i++) {
- zds[i] = xds[i] & hibitsy;
+ zds[i] = xds[i] & hibitsy;
}
for (;i < zn; i++) {
- zds[i] = hibitsx & hibitsy;
+ zds[i] = hibitsx & hibitsy;
}
twocomp2abs_bang(z, hibitsx && hibitsy);
RB_GC_GUARD(x);
@@ -6382,12 +6386,12 @@ rb_big_and(VALUE x, VALUE y)
long tmpn;
if (!RB_INTEGER_TYPE_P(y)) {
- return rb_num_coerce_bit(x, y, '&');
+ return rb_num_coerce_bit(x, y, '&');
}
hibitsx = abs2twocomp(&x, &xn);
if (FIXNUM_P(y)) {
- return bigand_int(x, xn, hibitsx, FIX2LONG(y));
+ return bigand_int(x, xn, hibitsx, FIX2LONG(y));
}
hibitsy = abs2twocomp(&y, &yn);
if (xn > yn) {
@@ -6409,10 +6413,10 @@ rb_big_and(VALUE x, VALUE y)
zds = BDIGITS(z);
for (i=0; i<n1; i++) {
- zds[i] = ds1[i] & ds2[i];
+ zds[i] = ds1[i] & ds2[i];
}
for (; i<n2; i++) {
- zds[i] = hibits1 & ds2[i];
+ zds[i] = hibits1 & ds2[i];
}
twocomp2abs_bang(z, hibits1 && hibits2);
RB_GC_GUARD(x);
@@ -6501,12 +6505,12 @@ rb_big_or(VALUE x, VALUE y)
long tmpn;
if (!RB_INTEGER_TYPE_P(y)) {
- return rb_num_coerce_bit(x, y, '|');
+ return rb_num_coerce_bit(x, y, '|');
}
hibitsx = abs2twocomp(&x, &xn);
if (FIXNUM_P(y)) {
- return bigor_int(x, xn, hibitsx, FIX2LONG(y));
+ return bigor_int(x, xn, hibitsx, FIX2LONG(y));
}
hibitsy = abs2twocomp(&y, &yn);
if (xn > yn) {
@@ -6528,10 +6532,10 @@ rb_big_or(VALUE x, VALUE y)
zds = BDIGITS(z);
for (i=0; i<n1; i++) {
- zds[i] = ds1[i] | ds2[i];
+ zds[i] = ds1[i] | ds2[i];
}
for (; i<n2; i++) {
- zds[i] = hibits1 | ds2[i];
+ zds[i] = hibits1 | ds2[i];
}
twocomp2abs_bang(z, hibits1 || hibits2);
RB_GC_GUARD(x);
@@ -6595,12 +6599,12 @@ rb_big_xor(VALUE x, VALUE y)
long tmpn;
if (!RB_INTEGER_TYPE_P(y)) {
- return rb_num_coerce_bit(x, y, '^');
+ return rb_num_coerce_bit(x, y, '^');
}
hibitsx = abs2twocomp(&x, &xn);
if (FIXNUM_P(y)) {
- return bigxor_int(x, xn, hibitsx, FIX2LONG(y));
+ return bigxor_int(x, xn, hibitsx, FIX2LONG(y));
}
hibitsy = abs2twocomp(&y, &yn);
if (xn > yn) {
@@ -6619,10 +6623,10 @@ rb_big_xor(VALUE x, VALUE y)
zds = BDIGITS(z);
for (i=0; i<n1; i++) {
- zds[i] = ds1[i] ^ ds2[i];
+ zds[i] = ds1[i] ^ ds2[i];
}
for (; i<n2; i++) {
- zds[i] = hibitsx ^ ds2[i];
+ zds[i] = hibitsx ^ ds2[i];
}
twocomp2abs_bang(z, (hibits1 ^ hibits2) != 0);
RB_GC_GUARD(x);
@@ -6638,25 +6642,25 @@ rb_big_lshift(VALUE x, VALUE y)
int shift_numbits;
for (;;) {
- if (FIXNUM_P(y)) {
- long l = FIX2LONG(y);
+ if (FIXNUM_P(y)) {
+ long l = FIX2LONG(y);
unsigned long shift;
- if (0 <= l) {
- lshift_p = 1;
+ if (0 <= l) {
+ lshift_p = 1;
shift = l;
}
else {
- lshift_p = 0;
- shift = 1+(unsigned long)(-(l+1));
- }
+ lshift_p = 0;
+ shift = 1+(unsigned long)(-(l+1));
+ }
shift_numbits = (int)(shift & (BITSPERDIG-1));
shift_numdigits = shift >> bit_length(BITSPERDIG-1);
return bignorm(big_shift3(x, lshift_p, shift_numdigits, shift_numbits));
- }
- else if (RB_BIGNUM_TYPE_P(y)) {
+ }
+ else if (RB_BIGNUM_TYPE_P(y)) {
return bignorm(big_shift2(x, 1, y));
- }
- y = rb_to_int(y);
+ }
+ y = rb_to_int(y);
}
}
@@ -6668,8 +6672,8 @@ rb_big_rshift(VALUE x, VALUE y)
int shift_numbits;
for (;;) {
- if (FIXNUM_P(y)) {
- long l = FIX2LONG(y);
+ if (FIXNUM_P(y)) {
+ long l = FIX2LONG(y);
unsigned long shift;
if (0 <= l) {
lshift_p = 0;
@@ -6677,16 +6681,16 @@ rb_big_rshift(VALUE x, VALUE y)
}
else {
lshift_p = 1;
- shift = 1+(unsigned long)(-(l+1));
- }
+ shift = 1+(unsigned long)(-(l+1));
+ }
shift_numbits = (int)(shift & (BITSPERDIG-1));
shift_numdigits = shift >> bit_length(BITSPERDIG-1);
return bignorm(big_shift3(x, lshift_p, shift_numdigits, shift_numbits));
- }
- else if (RB_BIGNUM_TYPE_P(y)) {
+ }
+ else if (RB_BIGNUM_TYPE_P(y)) {
return bignorm(big_shift2(x, 0, y));
- }
- y = rb_to_int(y);
+ }
+ y = rb_to_int(y);
}
}
@@ -6700,29 +6704,29 @@ rb_big_aref(VALUE x, VALUE y)
BDIGIT bit;
if (RB_BIGNUM_TYPE_P(y)) {
- if (BIGNUM_NEGATIVE_P(y))
- return INT2FIX(0);
- bigtrunc(y);
- if (BIGSIZE(y) > sizeof(size_t)) {
- out_of_range:
- return BIGNUM_SIGN(x) ? INT2FIX(0) : INT2FIX(1);
- }
+ if (BIGNUM_NEGATIVE_P(y))
+ return INT2FIX(0);
+ bigtrunc(y);
+ if (BIGSIZE(y) > sizeof(size_t)) {
+ return BIGNUM_SIGN(x) ? INT2FIX(0) : INT2FIX(1);
+ }
#if SIZEOF_SIZE_T <= SIZEOF_LONG
- shift = big2ulong(y, "long");
+ shift = big2ulong(y, "long");
#else
- shift = big2ull(y, "long long");
+ shift = big2ull(y, "long long");
#endif
}
else {
- l = NUM2LONG(y);
- if (l < 0) return INT2FIX(0);
- shift = (size_t)l;
+ l = NUM2LONG(y);
+ if (l < 0) return INT2FIX(0);
+ shift = (size_t)l;
}
s1 = shift/BITSPERDIG;
s2 = shift%BITSPERDIG;
bit = (BDIGIT)1 << s2;
- if (s1 >= BIGNUM_LEN(x)) goto out_of_range;
+ if (s1 >= BIGNUM_LEN(x))
+ return BIGNUM_SIGN(x) ? INT2FIX(0) : INT2FIX(1);
xds = BDIGITS(x);
if (BIGNUM_POSITIVE_P(x))
@@ -6746,14 +6750,15 @@ rb_big_hash(VALUE x)
/*
* call-seq:
- * big.coerce(numeric) -> array
+ * int.coerce(numeric) -> array
*
- * Returns an array with both a +numeric+ and a +big+ represented as Bignum
- * objects.
+ * Returns an array with both a +numeric+ and a +int+ represented as
+ * Integer objects or Float objects.
*
- * This is achieved by converting +numeric+ to a Bignum.
+ * This is achieved by converting +numeric+ to an Integer or a Float.
*
- * A TypeError is raised if the +numeric+ is not a Fixnum or Bignum type.
+ * A TypeError is raised if the +numeric+ is not an Integer or a Float
+ * type.
*
* (0x3FFFFFFFFFFFFFFF+1).coerce(42) #=> [42, 4611686018427387904]
*/
@@ -6775,8 +6780,8 @@ VALUE
rb_big_abs(VALUE x)
{
if (BIGNUM_NEGATIVE_P(x)) {
- x = rb_big_clone(x);
- BIGNUM_SET_POSITIVE_SIGN(x);
+ x = rb_big_clone(x);
+ BIGNUM_SET_POSITIVE_SIGN(x);
}
return x;
}
@@ -6843,17 +6848,14 @@ rb_big_bit_length(VALUE big)
VALUE
rb_big_odd_p(VALUE num)
{
- if (BIGNUM_LEN(num) != 0 && BDIGITS(num)[0] & 1) {
- return Qtrue;
- }
- return Qfalse;
+ return RBOOL(BIGNUM_LEN(num) != 0 && BDIGITS(num)[0] & 1);
}
VALUE
rb_big_even_p(VALUE num)
{
if (BIGNUM_LEN(num) != 0 && BDIGITS(num)[0] & 1) {
- return Qfalse;
+ return Qfalse;
}
return Qtrue;
}
@@ -6884,21 +6886,21 @@ estimate_initial_sqrt(VALUE *xp, const size_t xn, const BDIGIT *nds, size_t len)
double f;
if (rshift > 0) {
- lowbits = (BDIGIT)d & ~(~(BDIGIT)1U << rshift);
- d >>= rshift;
+ lowbits = (BDIGIT)d & ~(~(BDIGIT)1U << rshift);
+ d >>= rshift;
}
else if (rshift < 0) {
- d <<= -rshift;
- d |= nds[len-dbl_per_bdig-1] >> (BITSPERDIG+rshift);
+ d <<= -rshift;
+ d |= nds[len-dbl_per_bdig-1] >> (BITSPERDIG+rshift);
}
f = sqrt(BDIGIT_DBL_TO_DOUBLE(d));
d = (BDIGIT_DBL)ceil(f);
if (BDIGIT_DBL_TO_DOUBLE(d) == f) {
- if (lowbits || (lowbits = !bary_zero_p(nds, len-dbl_per_bdig)))
- ++d;
+ if (lowbits || (lowbits = !bary_zero_p(nds, len-dbl_per_bdig)))
+ ++d;
}
else {
- lowbits = 1;
+ lowbits = 1;
}
rshift /= 2;
rshift += (2-(len&1))*BITSPERDIG/2;
@@ -6930,37 +6932,35 @@ rb_big_isqrt(VALUE n)
BDIGIT *xds;
if (len <= 2) {
- BDIGIT sq = rb_bdigit_dbl_isqrt(bary2bdigitdbl(nds, len));
+ BDIGIT sq = rb_bdigit_dbl_isqrt(bary2bdigitdbl(nds, len));
#if SIZEOF_BDIGIT > SIZEOF_LONG
- return ULL2NUM(sq);
+ return ULL2NUM(sq);
#else
- return ULONG2NUM(sq);
+ return ULONG2NUM(sq);
#endif
}
else if ((xds = estimate_initial_sqrt(&x, xn, nds, len)) != 0) {
- size_t tn = xn + BIGDIVREM_EXTRA_WORDS;
- VALUE t = bignew_1(0, tn, 1);
- BDIGIT *tds = BDIGITS(t);
- tn = BIGNUM_LEN(t);
-
- /* t = n/x */
- while (bary_divmod_branch(tds, tn, NULL, 0, nds, len, xds, xn),
- bary_cmp(tds, tn, xds, xn) < 0) {
- int carry;
- BARY_TRUNC(tds, tn);
- /* x = (x+t)/2 */
- carry = bary_add(xds, xn, xds, xn, tds, tn);
- bary_small_rshift(xds, xds, xn, 1, carry);
- tn = BIGNUM_LEN(t);
- }
- rb_big_realloc(t, 0);
- rb_gc_force_recycle(t);
+ size_t tn = xn + BIGDIVREM_EXTRA_WORDS;
+ VALUE t = bignew_1(0, tn, 1);
+ BDIGIT *tds = BDIGITS(t);
+ tn = BIGNUM_LEN(t);
+
+ /* t = n/x */
+ while (bary_divmod_branch(tds, tn, NULL, 0, nds, len, xds, xn),
+ bary_cmp(tds, tn, xds, xn) < 0) {
+ int carry;
+ BARY_TRUNC(tds, tn);
+ /* x = (x+t)/2 */
+ carry = bary_add(xds, xn, xds, xn, tds, tn);
+ bary_small_rshift(xds, xds, xn, 1, carry);
+ tn = BIGNUM_LEN(t);
+ }
}
RBASIC_SET_CLASS_RAW(x, rb_cInteger);
return x;
}
-#ifdef USE_GMP
+#if USE_GMP
static void
bary_powm_gmp(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT *yds, size_t yn, const BDIGIT *mds, size_t mn)
{
@@ -6986,7 +6986,7 @@ bary_powm_gmp(BDIGIT *zds, size_t zn, const BDIGIT *xds, size_t xn, const BDIGIT
static VALUE
int_pow_tmp3(VALUE x, VALUE y, VALUE m, int nega_flg)
{
-#ifdef USE_GMP
+#if USE_GMP
VALUE z;
size_t xn, yn, mn, zn;
@@ -7152,6 +7152,7 @@ rb_int_powm(int const argc, VALUE * const argv, VALUE const num)
long const half_val = (long)HALF_LONG_MSB;
long const mm = FIX2LONG(m);
if (!mm) rb_num_zerodiv();
+ if (mm == 1) return INT2FIX(0);
if (mm <= half_val) {
return int_pow_tmp1(rb_int_modulo(a, m), b, mm, nega_flg);
}
@@ -7161,6 +7162,7 @@ rb_int_powm(int const argc, VALUE * const argv, VALUE const num)
}
else {
if (rb_bigzero_p(m)) rb_num_zerodiv();
+ if (bignorm(m) == INT2FIX(1)) return INT2FIX(0);
return int_pow_tmp3(rb_int_modulo(a, m), b, m, nega_flg);
}
}
@@ -7188,13 +7190,9 @@ rb_int_powm(int const argc, VALUE * const argv, VALUE const num)
void
Init_Bignum(void)
{
- /* An obsolete class, use Integer */
- rb_define_const(rb_cObject, "Bignum", rb_cInteger);
- rb_deprecate_constant(rb_cObject, "Bignum");
-
rb_define_method(rb_cInteger, "coerce", rb_int_coerce, 1);
-#ifdef USE_GMP
+#if USE_GMP
/* The version of loaded GMP. */
rb_define_const(rb_cInteger, "GMP_VERSION", rb_sprintf("GMP %s", gmp_version));
#endif
diff --git a/bin/bundle b/bin/bundle
deleted file mode 100755
index 1a0b06b005..0000000000
--- a/bin/bundle
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ruby
-#
-# This file was generated by RubyGems.
-#
-# The application 'bundler' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require 'rubygems'
-
-version = ">= 0.a"
-
-str = ARGV.first
-if str
- str = str.b[/\A_(.*)_\z/, 1]
- if str and Gem::Version.correct?(str)
- version = str
- ARGV.shift
- end
-end
-
-if Gem.respond_to?(:activate_bin_path)
-load Gem.activate_bin_path('bundler', 'bundle', version)
-else
-gem "bundler", version
-load Gem.bin_path("bundler", "bundle", version)
-end
diff --git a/bin/bundler b/bin/bundler
deleted file mode 100755
index e15eb39ed7..0000000000
--- a/bin/bundler
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ruby
-#
-# This file was generated by RubyGems.
-#
-# The application 'bundler' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require 'rubygems'
-
-version = ">= 0.a"
-
-str = ARGV.first
-if str
- str = str.b[/\A_(.*)_\z/, 1]
- if str and Gem::Version.correct?(str)
- version = str
- ARGV.shift
- end
-end
-
-if Gem.respond_to?(:activate_bin_path)
-load Gem.activate_bin_path('bundler', 'bundler', version)
-else
-gem "bundler", version
-load Gem.bin_path("bundler", "bundler", version)
-end
diff --git a/bin/erb b/bin/erb
deleted file mode 100755
index 2435224fe1..0000000000
--- a/bin/erb
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env ruby
-# Tiny eRuby --- ERB2
-# Copyright (c) 1999-2000,2002 Masatoshi SEKI
-# You can redistribute it and/or modify it under the same terms as Ruby.
-
-require 'erb'
-
-class ERB
- module Main
- def ARGV.switch
- return nil if self.empty?
- arg = self.shift
- return nil if arg == '--'
- case arg
- when /\A-(.)(.*)/
- if $1 == '-'
- arg, @maybe_arg = arg.split(/=/, 2)
- return arg
- end
- raise 'unknown switch "-"' if $2[0] == ?- and $1 != 'T'
- if $2.size > 0
- self.unshift "-#{$2}"
- @maybe_arg = $2
- else
- @maybe_arg = nil
- end
- "-#{$1}"
- when /\A(\w+)=/
- arg
- else
- self.unshift arg
- nil
- end
- end
-
- def ARGV.req_arg
- (@maybe_arg || self.shift || raise('missing argument')).tap {
- @maybe_arg = nil
- }
- end
-
- def trim_mode_opt(trim_mode, disable_percent)
- return trim_mode if disable_percent
- case trim_mode
- when 0
- return '%'
- when 1
- return '%>'
- when 2
- return '%<>'
- when '-'
- return '%-'
- end
- end
- module_function :trim_mode_opt
-
- def run(factory=ERB)
- trim_mode = 0
- disable_percent = false
- variables = {}
- begin
- while switch = ARGV.switch
- case switch
- when '-x' # ruby source
- output = true
- when '-n' # line number
- number = true
- when '-v' # verbose
- $VERBOSE = true
- when '--version' # version
- STDERR.puts factory.version
- exit
- when '-d', '--debug' # debug
- $DEBUG = true
- when '-r' # require
- require ARGV.req_arg
- when '-S' # security level
- warn 'warning: -S option of erb command is deprecated. Please do not use this.'
- arg = ARGV.req_arg
- raise "invalid safe_level #{arg.dump}" unless arg =~ /\A[0-1]\z/
- safe_level = arg.to_i
- when '-T' # trim mode
- arg = ARGV.req_arg
- if arg == '-'
- trim_mode = arg
- next
- end
- raise "invalid trim mode #{arg.dump}" unless arg =~ /\A[0-2]\z/
- trim_mode = arg.to_i
- when '-E', '--encoding'
- arg = ARGV.req_arg
- set_encoding(*arg.split(/:/, 2))
- when '-U'
- set_encoding(Encoding::UTF_8, Encoding::UTF_8)
- when '-P'
- disable_percent = true
- when '--help'
- raise "print this help"
- when /\A-/
- raise "unknown switch #{switch.dump}"
- else
- var, val = *switch.split('=', 2)
- (variables ||= {})[var] = val
- end
- end
- rescue # usage
- STDERR.puts $!.to_s
- STDERR.puts File.basename($0) +
- " [switches] [var=value...] [inputfile]"
- STDERR.puts <<EOU
- -x print ruby script
- -n print ruby script with line number
- -v enable verbose mode
- -d set $DEBUG to true
- -r library load a library
- -E ex[:in] set default external/internal encodings
- -U set default encoding to UTF-8
- -T trim_mode specify trim_mode (0..2, -)
- -P disable ruby code evaluation for lines beginning with %
- var=value set variable
-EOU
- exit 1
- end
-
- $<.set_encoding(Encoding::UTF_8, nil)
- src = $<.read
- filename = $FILENAME
- exit 2 unless src
- trim = trim_mode_opt(trim_mode, disable_percent)
- if safe_level.nil?
- erb = factory.new(src, trim_mode: trim)
- else
- # [deprecated] This will be removed at Ruby 2.7.
- erb = factory.new(src, safe_level, trim_mode: trim)
- end
- erb.filename = filename
- if output
- if number
- erb.src.each_line.with_index do |line, l|
- puts "%3d %s"%[l+1, line]
- end
- else
- puts erb.src
- end
- else
- bind = TOPLEVEL_BINDING
- if variables
- enc = erb.encoding
- for var, val in variables do
- val = val.encode(enc) if val
- bind.local_variable_set(var, val)
- end
- end
- erb.run(bind)
- end
- end
- module_function :run
-
- def set_encoding(extern, intern = nil)
- verbose, $VERBOSE = $VERBOSE, nil
- Encoding.default_external = extern unless extern.nil? || extern == ""
- Encoding.default_internal = intern unless intern.nil? || intern == ""
- [$stdin, $stdout, $stderr].each do |io|
- io.set_encoding(extern, intern)
- end
- ensure
- $VERBOSE = verbose
- end
- module_function :set_encoding
- class << self; private :set_encoding; end
- end
-end
-
-if __FILE__ == $0
- ERB::Main.run
-end
diff --git a/bin/gem b/bin/gem
index a4ec754abb..1c16ea7ddd 100755
--- a/bin/gem
+++ b/bin/gem
@@ -5,21 +5,6 @@
# See LICENSE.txt for permissions.
#++
-require 'rubygems'
-require 'rubygems/gem_runner'
-require 'rubygems/exceptions'
-
-required_version = Gem::Requirement.new ">= 1.8.7"
-
-unless required_version.satisfied_by? Gem.ruby_version then
- abort "Expected Ruby Version #{required_version}, is #{Gem.ruby_version}"
-end
-
-args = ARGV.clone
-
-begin
- Gem::GemRunner.new.run args
-rescue Gem::SystemExitException => e
- exit e.exit_code
-end
+require "rubygems/gem_runner"
+Gem::GemRunner.new.run ARGV.clone
diff --git a/bin/irb b/bin/irb
deleted file mode 100755
index ae6d358c9d..0000000000
--- a/bin/irb
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ruby
-#
-# This file was generated by RubyGems.
-#
-# The application 'irb' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require 'rubygems'
-
-version = ">= 0.a"
-
-str = ARGV.first
-if str
- str = str.b[/\A_(.*)_\z/, 1]
- if str and Gem::Version.correct?(str)
- version = str
- ARGV.shift
- end
-end
-
-if Gem.respond_to?(:activate_bin_path)
-load Gem.activate_bin_path('irb', 'irb', version)
-else
-gem "irb", version
-load Gem.bin_path("irb", "irb", version)
-end
diff --git a/bin/racc b/bin/racc
deleted file mode 100755
index 3ddac532b4..0000000000
--- a/bin/racc
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ruby
-#
-# This file was generated by RubyGems.
-#
-# The application 'racc' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require 'rubygems'
-
-version = ">= 0.a"
-
-if ARGV.first
- str = ARGV.first
- str = str.dup.force_encoding("BINARY") if str.respond_to? :force_encoding
- if str =~ /\A_(.*)_\z/ and Gem::Version.correct?($1) then
- version = $1
- ARGV.shift
- end
-end
-
-if Gem.respond_to?(:activate_bin_path)
-load Gem.activate_bin_path('racc', 'racc', version)
-else
-gem "racc", version
-load Gem.bin_path("racc", "racc", version)
-end
diff --git a/bin/rdoc b/bin/rdoc
deleted file mode 100755
index 8fa948cddb..0000000000
--- a/bin/rdoc
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ruby
-#
-# This file was generated by RubyGems.
-#
-# The application 'rdoc' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require 'rubygems'
-
-version = ">= 0.a"
-
-str = ARGV.first
-if str
- str = str.b[/\A_(.*)_\z/, 1]
- if str and Gem::Version.correct?(str)
- version = str
- ARGV.shift
- end
-end
-
-if Gem.respond_to?(:activate_bin_path)
-load Gem.activate_bin_path('rdoc', 'rdoc', version)
-else
-gem "rdoc", version
-load Gem.bin_path("rdoc", "rdoc", version)
-end
diff --git a/bin/ri b/bin/ri
deleted file mode 100755
index 0cc2f73bb6..0000000000
--- a/bin/ri
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env ruby
-#
-# This file was generated by RubyGems.
-#
-# The application 'rdoc' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require 'rubygems'
-
-version = ">= 0.a"
-
-str = ARGV.first
-if str
- str = str.b[/\A_(.*)_\z/, 1]
- if str and Gem::Version.correct?(str)
- version = str
- ARGV.shift
- end
-end
-
-if Gem.respond_to?(:activate_bin_path)
-load Gem.activate_bin_path('rdoc', 'ri', version)
-else
-gem "rdoc", version
-load Gem.bin_path("rdoc", "ri", version)
-end
diff --git a/bootstraptest/runner.rb b/bootstraptest/runner.rb
index 56b4b12230..f9b3e919b8 100755
--- a/bootstraptest/runner.rb
+++ b/bootstraptest/runner.rb
@@ -8,6 +8,8 @@
# Never use Ruby extensions in this file.
# Maintain Ruby 1.8 compatibility for now
+$start_time = Time.now
+
begin
require 'fileutils'
require 'tmpdir'
@@ -58,24 +60,114 @@ if !Dir.respond_to?(:mktmpdir)
end
end
+# Configuration
+bt = Struct.new(:ruby,
+ :verbose,
+ :color,
+ :tty,
+ :quiet,
+ :wn,
+ :progress,
+ :progress_bs,
+ :passed,
+ :failed,
+ :reset,
+ :columns,
+ :window_width,
+ :width,
+ :indent,
+ :platform,
+ )
+BT = Class.new(bt) do
+ def indent=(n)
+ super
+ if (self.columns ||= 0) < n
+ $stderr.print(' ' * (n - self.columns))
+ end
+ self.columns = indent
+ end
+
+ def putc(c)
+ unless self.quiet
+ if self.window_width == nil
+ unless w = ENV["COLUMNS"] and (w = w.to_i) > 0
+ w = 80
+ end
+ w -= 1
+ self.window_width = w
+ end
+ if self.window_width and self.columns >= self.window_width
+ $stderr.print "\n", " " * (self.indent ||= 0)
+ self.columns = indent
+ end
+ $stderr.print c
+ $stderr.flush
+ self.columns += 1
+ end
+ end
+
+ def wn=(wn)
+ unless wn == 1
+ if /(?:\A|\s)--jobserver-(?:auth|fds)=(?:(\d+),(\d+)|fifo:((?:\\.|\S)+))/ =~ ENV.delete("MAKEFLAGS")
+ begin
+ if fifo = $3
+ fifo.gsub!(/\\(?=.)/, '')
+ r = File.open(fifo, IO::RDONLY|IO::NONBLOCK|IO::BINARY)
+ w = File.open(fifo, IO::WRONLY|IO::NONBLOCK|IO::BINARY)
+ else
+ r = IO.for_fd($1.to_i(10), "rb", autoclose: false)
+ w = IO.for_fd($2.to_i(10), "wb", autoclose: false)
+ end
+ rescue => e
+ r.close if r
+ else
+ r.close_on_exec = true
+ w.close_on_exec = true
+ tokens = r.read_nonblock(wn > 0 ? wn : 1024, exception: false)
+ r.close
+ if String === tokens
+ tokens.freeze
+ auth = w
+ w = nil
+ at_exit {auth << tokens; auth.close}
+ wn = tokens.size + 1
+ else
+ w.close
+ wn = 1
+ end
+ end
+ end
+ if wn <= 0
+ require 'etc'
+ wn = [Etc.nprocessors / 2, 1].max
+ end
+ end
+ super wn
+ end
+end.new
+
+BT_STATE = Struct.new(:count, :error).new
+
def main
- @ruby = File.expand_path('miniruby')
- @verbose = false
+ BT.ruby = File.expand_path('miniruby')
+ BT.verbose = false
$VERBOSE = false
$stress = false
- @color = nil
- @tty = nil
- @quiet = false
+ BT.color = nil
+ BT.tty = nil
+ BT.quiet = false
+ # BT.wn = 1
dir = nil
quiet = false
tests = nil
ARGV.delete_if {|arg|
case arg
when /\A--ruby=(.*)/
- @ruby = $1
- @ruby.gsub!(/^([^ ]*)/){File.expand_path($1)}
- @ruby.gsub!(/(\s+-I\s*)((?!(?:\.\/)*-(?:\s|\z))\S+)/){$1+File.expand_path($2)}
- @ruby.gsub!(/(\s+-r\s*)(\.\.?\/\S+)/){$1+File.expand_path($2)}
+ ruby = $1
+ ruby.gsub!(/^([^ ]*)/){File.expand_path($1)}
+ ruby.gsub!(/(\s+-I\s*)((?!(?:\.\/)*-(?:\s|\z))\S+)/){$1+File.expand_path($2)}
+ ruby.gsub!(/(\s+-r\s*)(\.\.?\/\S+)/){$1+File.expand_path($2)}
+ BT.ruby = ruby
true
when /\A--sets=(.*)/
tests = Dir.glob("#{File.dirname($0)}/test_{#{$1}}*.rb").sort
@@ -88,18 +180,23 @@ def main
$stress = true
when /\A--color(?:=(?:always|(auto)|(never)|(.*)))?\z/
warn "unknown --color argument: #$3" if $3
- @color = $1 ? nil : !$2
+ BT.color = color = $1 ? nil : !$2
true
when /\A--tty(=(?:yes|(no)|(.*)))?\z/
warn "unknown --tty argument: #$3" if $3
- @tty = !$1 || !$2
+ BT.tty = !$1 || !$2
true
when /\A(-q|--q(uiet))\z/
quiet = true
- @quiet = true
+ BT.quiet = true
+ true
+ when /\A-j(\d+)?/
+ BT.wn = $1.to_i
true
when /\A(-v|--v(erbose))\z/
- @verbose = true
+ BT.verbose = true
+ BT.quiet = false
+ true
when /\A(-h|--h(elp)?)\z/
puts(<<-End)
Usage: #{File.basename($0, '.*')} --ruby=PATH [--sets=NAME,NAME,...]
@@ -121,22 +218,23 @@ End
end
}
if tests and not ARGV.empty?
- $stderr.puts "--tests and arguments are exclusive"
- exit false
+ abort "--sets and arguments are exclusive"
end
tests ||= ARGV
tests = Dir.glob("#{File.dirname($0)}/test_*.rb").sort if tests.empty?
pathes = tests.map {|path| File.expand_path(path) }
- @progress = %w[- \\ | /]
- @progress_bs = "\b" * @progress[0].size
- @tty = $stderr.tty? if @tty.nil?
- case @color
+ BT.progress = %w[- \\ | /]
+ BT.progress_bs = "\b" * BT.progress[0].size
+ BT.tty = $stderr.tty? if BT.tty.nil?
+ BT.wn ||= /-j(\d+)?/ =~ (ENV["MAKEFLAGS"] || ENV["MFLAGS"]) ? $1.to_i : 1
+
+ case BT.color
when nil
- @color = @tty && /dumb/ !~ ENV["TERM"]
+ BT.color = BT.tty && /dumb/ !~ ENV["TERM"]
end
- @tty &&= !@verbose
- if @color
+ BT.tty &&= !BT.verbose
+ if BT.color
# dircolors-like style
colors = (colors = ENV['TEST_COLORS']) ? Hash[colors.scan(/(\w+)=([^:\n]*)/)] : {}
begin
@@ -145,14 +243,16 @@ End
end
rescue
end
- @passed = "\e[;#{colors["pass"] || "32"}m"
- @failed = "\e[;#{colors["fail"] || "31"}m"
- @reset = "\e[m"
+ BT.passed = "\e[;#{colors["pass"] || "32"}m"
+ BT.failed = "\e[;#{colors["fail"] || "31"}m"
+ BT.reset = "\e[m"
else
- @passed = @failed = @reset = ""
+ BT.passed = BT.failed = BT.reset = ""
end
+ target_version = `#{BT.ruby} -v`.chomp
+ BT.platform = target_version[/\[(.*)\]\z/, 1]
unless quiet
- puts Time.now
+ puts $start_time
if defined?(RUBY_DESCRIPTION)
puts "Driver is #{RUBY_DESCRIPTION}"
elsif defined?(RUBY_PATCHLEVEL)
@@ -160,265 +260,469 @@ End
else
puts "Driver is ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE}) [#{RUBY_PLATFORM}]"
end
- puts "Target is #{`#{@ruby} -v`.chomp}"
+ puts "Target is #{target_version}"
puts
$stdout.flush
end
- in_temporary_working_directory(dir) {
+ in_temporary_working_directory(dir) do
exec_test pathes
- }
+ end
end
def erase(e = true)
- if e and @columns > 0 and @tty and !@verbose
+ if e and BT.columns > 0 and BT.tty and !BT.verbose
"\e[1K\r"
else
""
end
end
-def exec_test(pathes)
- @count = 0
- @error = 0
- @errbuf = []
- @location = nil
- @columns = 0
- @width = pathes.map {|path| File.basename(path).size}.max + 2
+def load_test pathes
pathes.each do |path|
- @basename = File.basename(path)
- $stderr.printf("%s%-*s ", erase(@quiet), @width, @basename)
- $stderr.flush
- @columns = @width + 1
- $stderr.puts if @verbose
- count = @count
- error = @error
load File.expand_path(path)
- if @tty
- if @error == error
- msg = "PASS #{@count-count}"
- @columns += msg.size - 1
- $stderr.print "#{@progress_bs}#{@passed}#{msg}#{@reset}"
+ end
+end
+
+def concurrent_exec_test
+ aq = Queue.new
+ rq = Queue.new
+
+ ts = BT.wn.times.map do
+ Thread.new do
+ while as = aq.pop
+ as.call
+ rq << as
+ end
+ ensure
+ rq << nil
+ end
+ end
+
+ Assertion.all.to_a.shuffle.each do |path, assertions|
+ assertions.each do |as|
+ aq << as
+ end
+ end
+
+ BT.indent = 1
+ aq.close
+ i = 1
+ term_wn = 0
+ begin
+ while BT.wn != term_wn
+ if r = rq.pop
+ case
+ when BT.quiet
+ when BT.tty
+ $stderr.print "#{BT.progress_bs}#{BT.progress[(i+=1) % BT.progress.size]}"
+ else
+ BT.putc '.'
+ end
else
- msg = "FAIL #{@error-error}/#{@count-count}"
- $stderr.print "#{@progress_bs}#{@failed}#{msg}#{@reset}"
- @columns = 0
+ term_wn += 1
+ end
+ end
+ ensure
+ ts.each(&:kill)
+ ts.each(&:join)
+ end
+end
+
+def exec_test(pathes)
+ # setup
+ load_test pathes
+ BT_STATE.count = 0
+ BT_STATE.error = 0
+ BT.columns = 0
+ BT.width = pathes.map {|path| File.basename(path).size}.max + 2
+
+ # execute tests
+ if BT.wn > 1
+ concurrent_exec_test
+ else
+ prev_basename = nil
+ Assertion.all.each do |basename, assertions|
+ if !BT.quiet && basename != prev_basename
+ prev_basename = basename
+ $stderr.printf("%s%-*s ", erase(BT.quiet), BT.width, basename)
+ $stderr.flush
+ end
+ BT.columns = BT.width + 1
+ $stderr.puts if BT.verbose
+ count = BT_STATE.count
+ error = BT_STATE.error
+
+ assertions.each do |assertion|
+ BT_STATE.count += 1
+ assertion.call
+ end
+
+ if BT.tty
+ if BT_STATE.error == error
+ msg = "PASS #{BT_STATE.count-count}"
+ BT.columns += msg.size - 1
+ $stderr.print "#{BT.progress_bs}#{BT.passed}#{msg}#{BT.reset}" unless BT.quiet
+ else
+ msg = "FAIL #{BT_STATE.error-error}/#{BT_STATE.count-count}"
+ $stderr.print "#{BT.progress_bs}#{BT.failed}#{msg}#{BT.reset}"
+ BT.columns = 0
+ end
end
+ $stderr.puts if !BT.quiet and (BT.tty or BT_STATE.error == error)
end
- $stderr.puts unless @quiet and @tty and @error == error
end
- $stderr.print(erase) if @quiet
- @errbuf.each do |msg|
+
+ # show results
+ unless BT.quiet
+ $stderr.puts(erase)
+
+ sec = Time.now - $start_time
+ $stderr.puts "Finished in #{'%.2f' % sec} sec\n\n" if Assertion.count > 0
+ end
+
+ Assertion.errbuf.each do |msg|
$stderr.puts msg
end
- if @error == 0
- if @count == 0
- $stderr.puts "No tests, no problem"
+
+ out = BT.quiet ? $stdout : $stderr
+
+ if BT_STATE.error == 0
+ if Assertion.count == 0
+ out.puts "No tests, no problem" unless BT.quiet
else
- $stderr.puts "#{@passed}PASS#{@reset} all #{@count} tests"
+ out.puts "#{BT.passed}PASS#{BT.reset} all #{Assertion.count} tests"
end
- exit true
+ true
else
- $stderr.puts "#{@failed}FAIL#{@reset} #{@error}/#{@count} tests failed"
- exit false
+ $stderr.puts "#{BT.failed}FAIL#{BT.reset} #{BT_STATE.error}/#{BT_STATE.count} tests failed"
+ false
end
end
-def show_progress(message = '')
- if @verbose
- $stderr.print "\##{@count} #{@location} "
- elsif @tty
- $stderr.print "#{@progress_bs}#{@progress[@count % @progress.size]}"
- end
- t = Time.now if @verbose
- faildesc, errout = with_stderr {yield}
- t = Time.now - t if @verbose
- if !faildesc
- if @tty
- $stderr.print "#{@progress_bs}#{@progress[@count % @progress.size]}"
- elsif @verbose
- $stderr.printf(". %.3f\n", t)
+def target_platform
+ BT.platform or RUBY_PLATFORM
+end
+
+class Assertion < Struct.new(:src, :path, :lineno, :proc)
+ @count = 0
+ @all = Hash.new{|h, k| h[k] = []}
+ @errbuf = []
+
+ class << self
+ attr_reader :count, :errbuf
+
+ def all
+ @all
+ end
+
+ def add as
+ @all[as.path] << as
+ as.id = (@count += 1)
+ end
+ end
+
+ attr_accessor :id
+ attr_reader :err, :category
+
+ def initialize(*args)
+ super
+ self.class.add self
+ @category = self.path.match(/test_(.+)\.rb/)[1]
+ end
+
+ def call
+ self.proc.call self
+ end
+
+ def assert_check(message = '', opt = '', **argh)
+ show_progress(message) {
+ result = get_result_string(opt, **argh)
+ yield(result)
+ }
+ end
+
+ def with_stderr
+ out = err = nil
+ r, w = IO.pipe
+ @err = w
+ err_reader = Thread.new{ r.read }
+
+ begin
+ out = yield
+ ensure
+ w.close
+ err = err_reader.value
+ r.close rescue nil
+ end
+
+ return out, err
+ end
+
+ def show_error(msg, additional_message)
+ msg = "#{BT.failed}\##{self.id} #{self.path}:#{self.lineno}#{BT.reset}: #{msg} #{additional_message}"
+ if BT.tty
+ $stderr.puts "#{erase}#{msg}"
else
- $stderr.print '.'
+ Assertion.errbuf << msg
end
- else
- $stderr.print "#{@failed}F"
- $stderr.printf(" %.3f", t) if @verbose
- $stderr.print @reset
- $stderr.puts if @verbose
- error faildesc, message
- unless errout.empty?
- $stderr.print "#{@failed}stderr output is not empty#{@reset}\n", adjust_indent(errout)
+ BT_STATE.error += 1
+ end
+
+
+ def show_progress(message = '')
+ if BT.quiet || BT.wn > 1
+ # do nothing
+ elsif BT.verbose
+ $stderr.print "\##{@id} #{self.path}:#{self.lineno} "
+ elsif BT.tty
+ $stderr.print "#{BT.progress_bs}#{BT.progress[BT_STATE.count % BT.progress.size]}"
+ end
+
+ t = Time.now if BT.verbose
+ faildesc, errout = with_stderr {yield}
+ t = Time.now - t if BT.verbose
+
+ if !faildesc
+ # success
+ if BT.quiet || BT.wn > 1
+ # do nothing
+ elsif BT.tty
+ $stderr.print "#{BT.progress_bs}#{BT.progress[BT_STATE.count % BT.progress.size]}"
+ elsif BT.verbose
+ $stderr.printf(". %.3f\n", t)
+ else
+ BT.putc '.'
+ end
+ else
+ $stderr.print "#{BT.failed}F"
+ $stderr.printf(" %.3f", t) if BT.verbose
+ $stderr.print BT.reset
+ $stderr.puts if BT.verbose
+ show_error faildesc, message
+ unless errout.empty?
+ $stderr.print "#{BT.failed}stderr output is not empty#{BT.reset}\n", adjust_indent(errout)
+ end
+
+ if BT.tty and !BT.verbose and BT.wn == 1
+ $stderr.printf("%-*s%s", BT.width, path, BT.progress[BT_STATE.count % BT.progress.size])
+ end
end
- if @tty and !@verbose
- $stderr.printf("%-*s%s", @width, @basename, @progress[@count % @progress.size])
+ rescue Interrupt
+ $stderr.puts "\##{@id} #{path}:#{lineno}"
+ raise
+ rescue Exception => err
+ $stderr.print 'E'
+ $stderr.puts if BT.verbose
+ show_error err.message, message
+ ensure
+ begin
+ check_coredump
+ rescue CoreDumpError => err
+ $stderr.print 'E'
+ $stderr.puts if BT.verbose
+ show_error err.message, message
+ cleanup_coredump
end
end
-rescue Interrupt
- $stderr.puts "\##{@count} #{@location}"
- raise
-rescue Exception => err
- $stderr.print 'E'
- $stderr.puts if @verbose
- error err.message, message
-end
-def show_limit(testsrc, opt = '', **argh)
- result = get_result_string(testsrc, opt, **argh)
- if @tty and @verbose
- $stderr.puts ".{#@reset}\n#{erase}#{result}"
- else
- @errbuf.push result
+ def get_result_string(opt = '', **argh)
+ if BT.ruby
+ filename = make_srcfile(**argh)
+ begin
+ kw = self.err ? {err: self.err} : {}
+ out = IO.popen("#{BT.ruby} -W0 #{opt} #{filename}", **kw)
+ pid = out.pid
+ out.read.tap{ Process.waitpid(pid); out.close }
+ ensure
+ raise Interrupt if $? and $?.signaled? && $?.termsig == Signal.list["INT"]
+
+ begin
+ Process.kill :KILL, pid
+ rescue Errno::ESRCH
+ # OK
+ end
+ end
+ else
+ eval(src).to_s
+ end
+ end
+
+ def make_srcfile(frozen_string_literal: nil)
+ filename = "bootstraptest.#{self.path}_#{self.lineno}_#{self.id}.rb"
+ File.open(filename, 'w') {|f|
+ f.puts "#frozen_string_literal:true" if frozen_string_literal
+ f.puts "GC.stress = true" if $stress
+ f.puts "print(begin; #{self.src}; end)"
+ }
+ filename
end
end
-def assert_check(testsrc, message = '', opt = '', **argh)
- show_progress(message) {
- result = get_result_string(testsrc, opt, **argh)
- check_coredump
- yield(result)
- }
+def add_assertion src, pr
+ loc = caller_locations(2, 1).first
+ lineno = loc.lineno
+ path = File.basename(loc.path)
+
+ Assertion.new(src, path, lineno, pr)
end
def assert_equal(expected, testsrc, message = '', opt = '', **argh)
- newtest
- assert_check(testsrc, message, opt, **argh) {|result|
- if expected == result
- nil
- else
- desc = "#{result.inspect} (expected #{expected.inspect})"
- pretty(testsrc, desc, result)
- end
- }
+ add_assertion testsrc, -> as do
+ as.assert_check(message, opt, **argh) {|result|
+ if expected == result
+ nil
+ else
+ desc = "#{result.inspect} (expected #{expected.inspect})"
+ pretty(testsrc, desc, result)
+ end
+ }
+ end
end
def assert_match(expected_pattern, testsrc, message = '')
- newtest
- assert_check(testsrc, message) {|result|
- if expected_pattern =~ result
- nil
- else
- desc = "#{expected_pattern.inspect} expected to be =~\n#{result.inspect}"
- pretty(testsrc, desc, result)
- end
- }
+ add_assertion testsrc, -> as do
+ as.assert_check(message) {|result|
+ if expected_pattern =~ result
+ nil
+ else
+ desc = "#{expected_pattern.inspect} expected to be =~\n#{result.inspect}"
+ pretty(testsrc, desc, result)
+ end
+ }
+ end
end
def assert_not_match(unexpected_pattern, testsrc, message = '')
- newtest
- assert_check(testsrc, message) {|result|
- if unexpected_pattern !~ result
- nil
- else
- desc = "#{unexpected_pattern.inspect} expected to be !~\n#{result.inspect}"
- pretty(testsrc, desc, result)
- end
- }
+ add_assertion testsrc, -> as do
+ as.assert_check(message) {|result|
+ if unexpected_pattern !~ result
+ nil
+ else
+ desc = "#{unexpected_pattern.inspect} expected to be !~\n#{result.inspect}"
+ pretty(testsrc, desc, result)
+ end
+ }
+ end
end
def assert_valid_syntax(testsrc, message = '')
- newtest
- assert_check(testsrc, message, '-c') {|result|
- result if /Syntax OK/ !~ result
- }
+ add_assertion testsrc, -> as do
+ as.assert_check(message, '-c') {|result|
+ result if /Syntax OK/ !~ result
+ }
+ end
end
def assert_normal_exit(testsrc, *rest, timeout: nil, **opt)
- newtest
- message, ignore_signals = rest
- message ||= ''
- show_progress(message) {
- faildesc = nil
- filename = make_srcfile(testsrc)
- old_stderr = $stderr.dup
- timeout_signaled = false
- begin
- $stderr.reopen("assert_normal_exit.log", "w")
- io = IO.popen("#{@ruby} -W0 #{filename}")
- pid = io.pid
- th = Thread.new {
- io.read
- io.close
- $?
- }
- if !th.join(timeout)
- Process.kill :KILL, pid
- timeout_signaled = true
- end
- status = th.value
- ensure
- $stderr.reopen(old_stderr)
- old_stderr.close
- end
- if status && status.signaled?
- signo = status.termsig
- signame = Signal.list.invert[signo]
- unless ignore_signals and ignore_signals.include?(signame)
- sigdesc = "signal #{signo}"
- if signame
- sigdesc = "SIG#{signame} (#{sigdesc})"
- end
- if timeout_signaled
- sigdesc << " (timeout)"
+ add_assertion testsrc, -> as do
+ message, ignore_signals = rest
+ message ||= ''
+ as.show_progress(message) {
+ faildesc = nil
+ filename = as.make_srcfile
+ timeout_signaled = false
+ logfile = "assert_normal_exit.#{as.path}.#{as.lineno}.log"
+
+ begin
+ err = open(logfile, "w")
+ io = IO.popen("#{BT.ruby} -W0 #{filename}", err: err)
+ pid = io.pid
+ th = Thread.new {
+ io.read
+ io.close
+ $?
+ }
+ if !th.join(timeout)
+ Process.kill :KILL, pid
+ timeout_signaled = true
end
- faildesc = pretty(testsrc, "killed by #{sigdesc}", nil)
- stderr_log = File.read("assert_normal_exit.log")
- if !stderr_log.empty?
- faildesc << "\n" if /\n\z/ !~ faildesc
- stderr_log << "\n" if /\n\z/ !~ stderr_log
- stderr_log.gsub!(/^.*\n/) { '| ' + $& }
- faildesc << stderr_log
+ status = th.value
+ ensure
+ err.close
+ end
+ if status && status.signaled?
+ signo = status.termsig
+ signame = Signal.list.invert[signo]
+ unless ignore_signals and ignore_signals.include?(signame)
+ sigdesc = "signal #{signo}"
+ if signame
+ sigdesc = "SIG#{signame} (#{sigdesc})"
+ end
+ if timeout_signaled
+ sigdesc << " (timeout)"
+ end
+ faildesc = pretty(testsrc, "killed by #{sigdesc}", nil)
+ stderr_log = File.read(logfile)
+ if !stderr_log.empty?
+ faildesc << "\n" if /\n\z/ !~ faildesc
+ stderr_log << "\n" if /\n\z/ !~ stderr_log
+ stderr_log.gsub!(/^.*\n/) { '| ' + $& }
+ faildesc << stderr_log
+ end
end
end
- end
- faildesc
- }
+ faildesc
+ }
+ end
end
def assert_finish(timeout_seconds, testsrc, message = '')
- if RubyVM.const_defined? :MJIT
- timeout_seconds *= 3 if RubyVM::MJIT.enabled? # for --jit-wait
- end
- newtest
- show_progress(message) {
- faildesc = nil
- filename = make_srcfile(testsrc)
- io = IO.popen("#{@ruby} -W0 #{filename}")
- pid = io.pid
- waited = false
- tlimit = Time.now + timeout_seconds
- diff = timeout_seconds
- while diff > 0
- if Process.waitpid pid, Process::WNOHANG
- waited = true
- break
- end
- if io.respond_to?(:read_nonblock)
- if IO.select([io], nil, nil, diff)
- begin
- io.read_nonblock(1024)
- rescue Errno::EAGAIN, IO::WaitReadable, EOFError
- break
- end while true
+ add_assertion testsrc, -> as do
+ if defined?(RubyVM::MJIT) && RubyVM::MJIT.enabled? # for --jit-wait
+ timeout_seconds *= 3
+ end
+
+ as.show_progress(message) {
+ faildesc = nil
+ filename = as.make_srcfile
+ io = IO.popen("#{BT.ruby} -W0 #{filename}", err: as.err)
+ pid = io.pid
+ waited = false
+ tlimit = Time.now + timeout_seconds
+ diff = timeout_seconds
+ while diff > 0
+ if Process.waitpid pid, Process::WNOHANG
+ waited = true
+ break
end
- else
- sleep 0.1
+ if io.respond_to?(:read_nonblock)
+ if IO.select([io], nil, nil, diff)
+ begin
+ io.read_nonblock(1024)
+ rescue Errno::EAGAIN, IO::WaitReadable, EOFError
+ break
+ end while true
+ end
+ else
+ sleep 0.1
+ end
+ diff = tlimit - Time.now
end
- diff = tlimit - Time.now
- end
- if !waited
- Process.kill(:KILL, pid)
- Process.waitpid pid
- faildesc = pretty(testsrc, "not finished in #{timeout_seconds} seconds", nil)
- end
- io.close
- faildesc
- }
+ if !waited
+ Process.kill(:KILL, pid)
+ Process.waitpid pid
+ faildesc = pretty(testsrc, "not finished in #{timeout_seconds} seconds", nil)
+ end
+ io.close
+ faildesc
+ }
+ end
end
def flunk(message = '')
- newtest
- show_progress('') { message }
+ add_assertion '', -> as do
+ as.show_progress('') { message }
+ end
+end
+
+def show_limit(testsrc, opt = '', **argh)
+ return if BT.quiet
+
+ add_assertion testsrc, -> as do
+ result = as.get_result_string(opt, **argh)
+ Assertion.errbuf << result
+ end
end
def pretty(src, desc, result)
@@ -436,67 +740,6 @@ def untabify(str)
str.gsub(/^\t+/) {' ' * (8 * $&.size) }
end
-def make_srcfile(src, frozen_string_literal: nil)
- filename = 'bootstraptest.tmp.rb'
- File.open(filename, 'w') {|f|
- f.puts "#frozen_string_literal:true" if frozen_string_literal
- f.puts "GC.stress = true" if $stress
- f.puts "print(begin; #{src}; end)"
- }
- filename
-end
-
-def get_result_string(src, opt = '', **argh)
- if @ruby
- filename = make_srcfile(src, **argh)
- begin
- `#{@ruby} -W0 #{opt} #{filename}`
- ensure
- raise Interrupt if $? and $?.signaled? && $?.termsig == Signal.list["INT"]
- raise CoreDumpError, "core dumped" if $? and $?.coredump?
- end
- else
- eval(src).to_s
- end
-end
-
-def with_stderr
- out = err = nil
- begin
- r, w = IO.pipe
- stderr = $stderr.dup
- $stderr.reopen(w)
- w.close
- reader = Thread.start {r.read}
- begin
- out = yield
- ensure
- $stderr.reopen(stderr)
- err = reader.value
- end
- ensure
- w.close rescue nil
- r.close rescue nil
- end
- return out, err
-end
-
-def newtest
- @location = File.basename(caller(2).first)
- @count += 1
- cleanup_coredump
-end
-
-def error(msg, additional_message)
- msg = "#{@failed}\##{@count} #{@location}#{@reset}: #{msg} #{additional_message}"
- if @tty
- $stderr.puts "#{erase}#{msg}"
- else
- @errbuf.push msg
- end
- @error += 1
-end
-
def in_temporary_working_directory(dir)
if dir
Dir.mkdir dir
@@ -513,18 +756,32 @@ def in_temporary_working_directory(dir)
end
def cleanup_coredump
- FileUtils.rm_f 'core'
+ if File.file?('core')
+ require 'time'
+ Dir.glob('/tmp/bootstraptest-core.*').each do |f|
+ if Time.now - File.mtime(f) > 7 * 24 * 60 * 60 # 7 days
+ warn "Deleting an old core file: #{f}"
+ FileUtils.rm(f)
+ end
+ end
+ core_path = "/tmp/bootstraptest-core.#{Time.now.utc.iso8601}"
+ warn "A core file is found. Saving it at: #{core_path.dump}"
+ FileUtils.mv('core', core_path)
+ cmd = ['gdb', BT.ruby, '-c', core_path, '-ex', 'bt', '-batch']
+ p cmd # debugging why it's not working
+ system(*cmd)
+ end
FileUtils.rm_f Dir.glob('core.*')
- FileUtils.rm_f @ruby+'.stackdump' if @ruby
+ FileUtils.rm_f BT.ruby+'.stackdump' if BT.ruby
end
class CoreDumpError < StandardError; end
def check_coredump
if File.file?('core') or not Dir.glob('core.*').empty? or
- (@ruby and File.exist?(@ruby+'.stackdump'))
+ (BT.ruby and File.exist?(BT.ruby+'.stackdump'))
raise CoreDumpError, "core dumped"
end
end
-main
+exit main
diff --git a/bootstraptest/test_attr.rb b/bootstraptest/test_attr.rb
index 721a847145..3cb9d3eb39 100644
--- a/bootstraptest/test_attr.rb
+++ b/bootstraptest/test_attr.rb
@@ -34,3 +34,19 @@ assert_equal %{ok}, %{
print "ok"
end
}, '[ruby-core:15120]'
+
+assert_equal %{ok}, %{
+ class Big
+ attr_reader :foo
+ def initialize
+ @foo = "ok"
+ end
+ end
+
+ obj = Big.new
+ 100.times do |i|
+ obj.instance_variable_set(:"@ivar_\#{i}", i)
+ end
+
+ Big.new.foo
+}
diff --git a/bootstraptest/test_autoload.rb b/bootstraptest/test_autoload.rb
index a9f8e6dacd..9e0850bc52 100644
--- a/bootstraptest/test_autoload.rb
+++ b/bootstraptest/test_autoload.rb
@@ -1,7 +1,7 @@
assert_equal 'ok', %q{
- File.unlink('zzz.rb') if File.file?('zzz.rb')
+ File.unlink('zzz1.rb') if File.file?('zzz1.rb')
instance_eval do
- autoload :ZZZ, './zzz.rb'
+ autoload :ZZZ, './zzz1.rb'
begin
ZZZ
rescue LoadError
@@ -11,9 +11,9 @@ assert_equal 'ok', %q{
}, '[ruby-dev:43816]'
assert_equal 'ok', %q{
- open('zzz.rb', 'w') {|f| f.puts '' }
+ open('zzz2.rb', 'w') {|f| f.puts '' }
instance_eval do
- autoload :ZZZ, './zzz.rb'
+ autoload :ZZZ, './zzz2.rb'
begin
ZZZ
rescue NameError
@@ -23,29 +23,29 @@ assert_equal 'ok', %q{
}, '[ruby-dev:43816]'
assert_equal 'ok', %q{
- open('zzz.rb', 'w') {|f| f.puts 'class ZZZ; def self.ok;:ok;end;end'}
+ open('zzz3.rb', 'w') {|f| f.puts 'class ZZZ; def self.ok;:ok;end;end'}
instance_eval do
- autoload :ZZZ, './zzz.rb'
+ autoload :ZZZ, './zzz3.rb'
ZZZ.ok
end
}, '[ruby-dev:43816]'
assert_equal 'ok', %q{
- open("zzz.rb", "w") {|f| f.puts "class ZZZ; def self.ok;:ok;end;end"}
- autoload :ZZZ, "./zzz.rb"
+ open("zzz4.rb", "w") {|f| f.puts "class ZZZ; def self.ok;:ok;end;end"}
+ autoload :ZZZ, "./zzz4.rb"
ZZZ.ok
}
assert_equal 'ok', %q{
- open("zzz.rb", "w") {|f| f.puts "class ZZZ; def self.ok;:ok;end;end"}
- autoload :ZZZ, "./zzz.rb"
- require "./zzz.rb"
+ open("zzz5.rb", "w") {|f| f.puts "class ZZZ; def self.ok;:ok;end;end"}
+ autoload :ZZZ, "./zzz5.rb"
+ require "./zzz5.rb"
ZZZ.ok
}
assert_equal 'okok', %q{
- open("zzz.rb", "w") {|f| f.puts "class ZZZ; def self.ok;:ok;end;end"}
- autoload :ZZZ, "./zzz.rb"
+ open("zzz6.rb", "w") {|f| f.puts "class ZZZ; def self.ok;:ok;end;end"}
+ autoload :ZZZ, "./zzz6.rb"
t1 = Thread.new {ZZZ.ok}
t2 = Thread.new {ZZZ.ok}
[t1.value, t2.value].join
@@ -60,9 +60,9 @@ assert_finish 5, %q{
}, '[ruby-core:21696]'
assert_equal 'A::C', %q{
- open("zzz.rb", "w") {}
+ open("zzz7.rb", "w") {}
class A
- autoload :C, "./zzz"
+ autoload :C, "./zzz7"
class C
end
C
diff --git a/bootstraptest/test_constant_cache.rb b/bootstraptest/test_constant_cache.rb
new file mode 100644
index 0000000000..1fa83256ed
--- /dev/null
+++ b/bootstraptest/test_constant_cache.rb
@@ -0,0 +1,187 @@
+# Constant lookup is cached.
+assert_equal '1', %q{
+ CONST = 1
+
+ def const
+ CONST
+ end
+
+ const
+ const
+}
+
+# Invalidate when a constant is set.
+assert_equal '2', %q{
+ CONST = 1
+
+ def const
+ CONST
+ end
+
+ const
+
+ CONST = 2
+
+ const
+}
+
+# Invalidate when a constant of the same name is set.
+assert_equal '1', %q{
+ CONST = 1
+
+ def const
+ CONST
+ end
+
+ const
+
+ class Container
+ CONST = 2
+ end
+
+ const
+}
+
+# Invalidate when a constant is removed.
+assert_equal 'missing', %q{
+ class Container
+ CONST = 1
+
+ def const
+ CONST
+ end
+
+ def self.const_missing(name)
+ 'missing'
+ end
+
+ new.const
+ remove_const :CONST
+ end
+
+ Container.new.const
+}
+
+# Invalidate when a constant's visibility changes.
+assert_equal 'missing', %q{
+ class Container
+ CONST = 1
+
+ def self.const_missing(name)
+ 'missing'
+ end
+ end
+
+ def const
+ Container::CONST
+ end
+
+ const
+
+ Container.private_constant :CONST
+
+ const
+}
+
+# Invalidate when a constant's visibility changes even if the call to the
+# visibility change method fails.
+assert_equal 'missing', %q{
+ class Container
+ CONST1 = 1
+
+ def self.const_missing(name)
+ 'missing'
+ end
+ end
+
+ def const1
+ Container::CONST1
+ end
+
+ const1
+
+ begin
+ Container.private_constant :CONST1, :CONST2
+ rescue NameError
+ end
+
+ const1
+}
+
+# Invalidate when a module is included.
+assert_equal 'INCLUDE', %q{
+ module Include
+ CONST = :INCLUDE
+ end
+
+ class Parent
+ CONST = :PARENT
+ end
+
+ class Child < Parent
+ def const
+ CONST
+ end
+
+ new.const
+
+ include Include
+ end
+
+ Child.new.const
+}
+
+# Invalidate when const_missing is hit.
+assert_equal '2', %q{
+ module Container
+ Foo = 1
+ Bar = 2
+
+ class << self
+ attr_accessor :count
+
+ def const_missing(name)
+ @count += 1
+ @count == 1 ? Foo : Bar
+ end
+ end
+
+ @count = 0
+ end
+
+ def const
+ Container::Baz
+ end
+
+ const
+ const
+}
+
+# Invalidate when the iseq gets cleaned up.
+assert_equal '2', %q{
+ CONSTANT = 1
+
+ iseq = RubyVM::InstructionSequence.compile(<<~RUBY)
+ CONSTANT
+ RUBY
+
+ iseq.eval
+ iseq = nil
+
+ GC.start
+ CONSTANT = 2
+}
+
+# Invalidate when the iseq gets cleaned up even if it was never in the cache.
+assert_equal '2', %q{
+ CONSTANT = 1
+
+ iseq = RubyVM::InstructionSequence.compile(<<~RUBY)
+ CONSTANT
+ RUBY
+
+ iseq = nil
+
+ GC.start
+ CONSTANT = 2
+}
diff --git a/bootstraptest/test_eval.rb b/bootstraptest/test_eval.rb
index 5d2593c306..a9f389c673 100644
--- a/bootstraptest/test_eval.rb
+++ b/bootstraptest/test_eval.rb
@@ -116,6 +116,33 @@ assert_equal %q{1}, %q{
Const
}
}
+assert_equal %q{1}, %q{
+ class TrueClass
+ Const = 1
+ end
+ true.instance_eval %{
+ Const
+ }
+}
+assert_equal %q{[:Const]}, %q{
+ mod = Module.new
+ mod.instance_eval %{
+ Const = 1
+ }
+ raise if defined?(Module::Const)
+ mod.singleton_class.constants
+}
+assert_equal %q{can't define singleton}, %q{
+ begin
+ 123.instance_eval %{
+ Const = 1
+ }
+ "bad"
+ rescue TypeError => e
+ raise "bad" if defined?(Integer::Const)
+ e.message
+ end
+}
assert_equal %q{top}, %q{
Const = :top
class C
@@ -191,7 +218,7 @@ assert_equal %q{[10, main]}, %q{
%w[break next redo].each do |keyword|
assert_match %r"Can't escape from eval with #{keyword}\b", %{
- STDERR.reopen(STDOUT)
+ $stderr = STDOUT
begin
eval "0 rescue #{keyword}"
rescue SyntaxError => e
@@ -201,7 +228,7 @@ assert_equal %q{[10, main]}, %q{
end
assert_normal_exit %q{
- STDERR.reopen(STDOUT)
+ $stderr = STDOUT
class Foo
def self.add_method
class_eval("def some-bad-name; puts 'hello' unless @some_variable.some_function(''); end")
diff --git a/bootstraptest/test_fiber.rb b/bootstraptest/test_fiber.rb
index 35e1bf6851..2614dd13bf 100644
--- a/bootstraptest/test_fiber.rb
+++ b/bootstraptest/test_fiber.rb
@@ -19,12 +19,12 @@ assert_equal %q{ok}, %q{
}
assert_equal %q{ok}, %q{
- 10_000.times.collect{Fiber.new{}}
+ 100.times.collect{Fiber.new{}}
:ok
}
assert_equal %q{ok}, %q{
- fibers = 100.times.collect{Fiber.new{Fiber.yield}}
+ fibers = 1000.times.collect{Fiber.new{Fiber.yield}}
fibers.each(&:resume)
fibers.each(&:resume)
:ok
diff --git a/bootstraptest/test_flow.rb b/bootstraptest/test_flow.rb
index 9da6d45cbd..35f19db588 100644
--- a/bootstraptest/test_flow.rb
+++ b/bootstraptest/test_flow.rb
@@ -534,11 +534,11 @@ assert_equal %Q{ENSURE\n}, %q{
['[ruby-core:39125]', %q{
class Bug5234
include Enumerable
- def each
+ def each(&block)
begin
yield :foo
ensure
- proc
+ proc(&block)
end
end
end
@@ -547,11 +547,11 @@ assert_equal %Q{ENSURE\n}, %q{
['[ruby-dev:45656]', %q{
class Bug6460
include Enumerable
- def each
+ def each(&block)
begin
yield :foo
ensure
- 1.times { Proc.new }
+ 1.times { Proc.new(&block) }
end
end
end
diff --git a/bootstraptest/test_insns.rb b/bootstraptest/test_insns.rb
index abfb53f7dc..91fba9b011 100644
--- a/bootstraptest/test_insns.rb
+++ b/bootstraptest/test_insns.rb
@@ -86,11 +86,8 @@ tests = [
[ 'putobject', %q{ /(?<x>x)/ =~ "x"; x == "x" }, ],
[ 'putspecialobject', %q{ {//=>true}[//] }, ],
- [ 'putiseq', %q{ -> { true }.() }, ],
[ 'putstring', %q{ "true" }, ],
[ 'tostring / concatstrings', %q{ "#{true}" }, ],
- [ 'freezestring', %q{ "#{true}" }, fsl, ],
- [ 'freezestring', %q{ "#{true}" }, '-d', fsl, ],
[ 'toregexp', %q{ /#{true}/ =~ "true" && $~ }, ],
[ 'intern', %q{ :"#{true}" }, ],
@@ -123,6 +120,7 @@ tests = [
[ 'dup', %q{ x = y = true; x }, ],
[ 'dupn', %q{ Object::X ||= true }, ],
[ 'reverse', %q{ q, (w, e), r = 1, [2, 3], 4; e == 3 }, ],
+ [ 'swap', %q{ !!defined?([[]]) }, ],
[ 'swap', <<-'},', ], # {
x = [[false, true]]
for i, j in x # here
@@ -387,14 +385,13 @@ tests = [
[ 'opt_empty_p', %q{ ''.empty? }, ],
[ 'opt_empty_p', %q{ [].empty? }, ],
[ 'opt_empty_p', %q{ {}.empty? }, ],
- [ 'opt_empty_p', %q{ Queue.new.empty? }, ],
+ [ 'opt_empty_p', %q{ Thread::Queue.new.empty? }, ],
[ 'opt_succ', %q{ 1.succ == 2 }, ],
if defined? $FIXNUM_MAX then
[ 'opt_succ',%Q{ #{ $FIXNUM_MAX }.succ == #{ $FIXNUM_MAX + 1 } }, ]
end,
[ 'opt_succ', %q{ '1'.succ == '2' }, ],
- [ 'opt_succ', %q{ x = Time.at(0); x.succ == Time.at(1) }, ],
[ 'opt_not', %q{ ! false }, ],
[ 'opt_neq', <<-'},', ], # {
@@ -412,8 +409,6 @@ tests = [
class String; def =~ other; true; end; end
'true' =~ /true/
},
-
- [ 'opt_call_c_function', 'Struct.new(:x).new.x = true', ],
]
# normal path
diff --git a/bootstraptest/test_io.rb b/bootstraptest/test_io.rb
index 89c00d0b88..666e5a011b 100644
--- a/bootstraptest/test_io.rb
+++ b/bootstraptest/test_io.rb
@@ -1,3 +1,4 @@
+/freebsd/ =~ RUBY_PLATFORM or
assert_finish 5, %q{
r, w = IO.pipe
t1 = Thread.new { r.sysread(1) }
@@ -30,7 +31,8 @@ assert_finish 10, %q{
end
}, '[ruby-dev:32566]'
-assert_finish 1, %q{
+/freebsd/ =~ RUBY_PLATFORM or
+assert_finish 5, %q{
r, w = IO.pipe
Thread.new {
w << "ab"
@@ -83,6 +85,7 @@ assert_normal_exit %q{
ARGF.set_encoding "foo"
}
+/freebsd/ =~ RUBY_PLATFORM or
10.times do
assert_normal_exit %q{
at_exit { p :foo }
diff --git a/bootstraptest/test_jump.rb b/bootstraptest/test_jump.rb
index 18a2737ea3..d07c47a56d 100644
--- a/bootstraptest/test_jump.rb
+++ b/bootstraptest/test_jump.rb
@@ -147,7 +147,7 @@ assert_equal %q{131}, %q{
}
}
assert_match %r{Invalid retry}, %q{
-STDERR.reopen(STDOUT)
+$stderr = STDOUT
begin
eval %q{
1.times{
@@ -297,7 +297,7 @@ assert_equal "true", %q{
}, '[ruby-core:21379]'
assert_match %r{Invalid yield}, %q{
-STDERR.reopen(STDOUT)
+$stderr = STDOUT
begin
eval %q{
class Object
diff --git a/bootstraptest/test_literal.rb b/bootstraptest/test_literal.rb
index 9b3c10d519..a0d4ee08c6 100644
--- a/bootstraptest/test_literal.rb
+++ b/bootstraptest/test_literal.rb
@@ -65,8 +65,11 @@ assert_equal ':a3c', ':"a#{1+2}c".inspect'
assert_equal 'Symbol', ':"a#{1+2}c".class'
# xstring
-assert_equal "foo\n", %q(`echo foo`)
-assert_equal "foo\n", %q(s = "foo"; `echo #{s}`)
+# WASI doesn't support spawning a new process for now.
+if /wasi/ !~ target_platform
+ assert_equal "foo\n", %q(`echo foo`)
+ assert_equal "foo\n", %q(s = "foo"; `echo #{s}`)
+end
# regexp
assert_equal '', '//.source'
diff --git a/bootstraptest/test_method.rb b/bootstraptest/test_method.rb
index 3462aa9434..04c9eb2d11 100644
--- a/bootstraptest/test_method.rb
+++ b/bootstraptest/test_method.rb
@@ -22,7 +22,7 @@ assert_match /\Awrong number of arguments \(.*\b0\b.* 1\)\z/, %q{
}
# default argument
-assert_equal '1', 'def m(x=1) x end; m()'
+assert_equal '1', 'def m(x=1) x end; m();'
assert_equal '1', 'def m(x=7) x end; m(1)'
assert_equal '1', 'def m(a,x=1) x end; m(7)'
assert_equal '1', 'def m(a,x=7) x end; m(7,1)'
diff --git a/bootstraptest/test_proc.rb b/bootstraptest/test_proc.rb
index 6d2c557c3c..637603243d 100644
--- a/bootstraptest/test_proc.rb
+++ b/bootstraptest/test_proc.rb
@@ -367,8 +367,8 @@ assert_equal 'ok', %q{
assert_equal 'ok', %q{
class Foo
- def call_it
- p = Proc.new
+ def call_it(&block)
+ p = Proc.new(&block)
p.call
end
end
diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb
new file mode 100644
index 0000000000..67e66b03ee
--- /dev/null
+++ b/bootstraptest/test_ractor.rb
@@ -0,0 +1,1628 @@
+# Ractor.current returns a current ractor
+assert_equal 'Ractor', %q{
+ Ractor.current.class
+}
+
+# Ractor.new returns new Ractor
+assert_equal 'Ractor', %q{
+ Ractor.new{}.class
+}
+
+# Ractor.allocate is not supported
+assert_equal "[:ok, :ok]", %q{
+ rs = []
+ begin
+ Ractor.allocate
+ rescue => e
+ rs << :ok if e.message == 'allocator undefined for Ractor'
+ end
+
+ begin
+ Ractor.new{}.dup
+ rescue
+ rs << :ok if e.message == 'allocator undefined for Ractor'
+ end
+
+ rs
+}
+
+# A Ractor can have a name
+assert_equal 'test-name', %q{
+ r = Ractor.new name: 'test-name' do
+ end
+ r.name
+}
+
+# If Ractor doesn't have a name, Ractor#name returns nil.
+assert_equal 'nil', %q{
+ r = Ractor.new do
+ end
+ r.name.inspect
+}
+
+# Raises exceptions if initialize with an invalid name
+assert_equal 'ok', %q{
+ begin
+ r = Ractor.new(name: [{}]) {}
+ rescue TypeError => e
+ 'ok'
+ end
+}
+
+# Ractor.new must call with a block
+assert_equal "must be called with a block", %q{
+ begin
+ Ractor.new
+ rescue ArgumentError => e
+ e.message
+ end
+}
+
+# Ractor#inspect
+# Return only id and status for main ractor
+assert_equal "#<Ractor:#1 running>", %q{
+ Ractor.current.inspect
+}
+
+# Return id, loc, and status for no-name ractor
+assert_match /^#<Ractor:#([^ ]*?) .+:[0-9]+ terminated>$/, %q{
+ r = Ractor.new { '' }
+ r.take
+ sleep 0.1 until r.inspect =~ /terminated/
+ r.inspect
+}
+
+# Return id, name, loc, and status for named ractor
+assert_match /^#<Ractor:#([^ ]*?) Test Ractor .+:[0-9]+ terminated>$/, %q{
+ r = Ractor.new(name: 'Test Ractor') { '' }
+ r.take
+ sleep 0.1 until r.inspect =~ /terminated/
+ r.inspect
+}
+
+# A return value of a Ractor block will be a message from the Ractor.
+assert_equal 'ok', %q{
+ # join
+ r = Ractor.new do
+ 'ok'
+ end
+ r.take
+}
+
+# Passed arguments to Ractor.new will be a block parameter
+# The values are passed with Ractor-communication pass.
+assert_equal 'ok', %q{
+ # ping-pong with arg
+ r = Ractor.new 'ok' do |msg|
+ msg
+ end
+ r.take
+}
+
+# Pass multiple arguments to Ractor.new
+assert_equal 'ok', %q{
+ # ping-pong with two args
+ r = Ractor.new 'ping', 'pong' do |msg, msg2|
+ [msg, msg2]
+ end
+ 'ok' if r.take == ['ping', 'pong']
+}
+
+# Ractor#send passes an object with copy to a Ractor
+# and Ractor.receive in the Ractor block can receive the passed value.
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ msg = Ractor.receive
+ end
+ r.send 'ok'
+ r.take
+}
+
+# Ractor#receive_if can filter the message
+assert_equal '[2, 3, 1]', %q{
+ r = Ractor.new Ractor.current do |main|
+ main << 1
+ main << 2
+ main << 3
+ end
+ a = []
+ a << Ractor.receive_if{|msg| msg == 2}
+ a << Ractor.receive_if{|msg| msg == 3}
+ a << Ractor.receive
+}
+
+# Ractor#receive_if with break
+assert_equal '[2, [1, :break], 3]', %q{
+ r = Ractor.new Ractor.current do |main|
+ main << 1
+ main << 2
+ main << 3
+ end
+
+ a = []
+ a << Ractor.receive_if{|msg| msg == 2}
+ a << Ractor.receive_if{|msg| break [msg, :break]}
+ a << Ractor.receive
+}
+
+# Ractor#receive_if can't be called recursively
+assert_equal '[[:e1, 1], [:e2, 2]]', %q{
+ r = Ractor.new Ractor.current do |main|
+ main << 1
+ main << 2
+ main << 3
+ end
+
+ a = []
+
+ Ractor.receive_if do |msg|
+ begin
+ Ractor.receive
+ rescue Ractor::Error
+ a << [:e1, msg]
+ end
+ true # delete 1 from queue
+ end
+
+ Ractor.receive_if do |msg|
+ begin
+ Ractor.receive_if{}
+ rescue Ractor::Error
+ a << [:e2, msg]
+ end
+ true # delete 2 from queue
+ end
+
+ a #
+}
+
+# dtoa race condition
+assert_equal '[:ok, :ok, :ok]', %q{
+ n = 3
+ n.times.map{
+ Ractor.new{
+ 10_000.times{ rand.to_s }
+ :ok
+ }
+ }.map(&:take)
+}
+
+# Ractor.make_shareable issue for locals in proc [Bug #18023]
+assert_equal '[:a, :b, :c, :d, :e]', %q{
+ v1, v2, v3, v4, v5 = :a, :b, :c, :d, :e
+ closure = Ractor.current.instance_eval{ Proc.new { [v1, v2, v3, v4, v5] } }
+
+ Ractor.make_shareable(closure).call
+}
+
+# Ractor.make_shareable issue for locals in proc [Bug #18023]
+assert_equal '[:a, :b, :c, :d, :e, :f, :g]', %q{
+ a = :a
+ closure = Ractor.current.instance_eval do
+ -> {
+ b, c, d = :b, :c, :d
+ -> {
+ e, f, g = :e, :f, :g
+ -> { [a, b, c, d, e, f, g] }
+ }.call
+ }.call
+ end
+
+ Ractor.make_shareable(closure).call
+}
+
+# Now autoload in non-main Ractor is not supported
+assert_equal 'ok', %q{
+ autoload :Foo, 'foo.rb'
+ r = Ractor.new do
+ p Foo
+ rescue Ractor::UnsafeError
+ :ok
+ end
+ r.take
+}
+
+###
+###
+# Ractor still has several memory corruption so skip huge number of tests
+if ENV['GITHUB_WORKFLOW'] &&
+ ENV['GITHUB_WORKFLOW'] == 'Compilations'
+ # ignore the follow
+else
+
+# Ractor.select(*ractors) receives a values from a ractors.
+# It is similar to select(2) and Go's select syntax.
+# The return value is [ch, received_value]
+assert_equal 'ok', %q{
+ # select 1
+ r1 = Ractor.new{'r1'}
+ r, obj = Ractor.select(r1)
+ 'ok' if r == r1 and obj == 'r1'
+}
+
+# Ractor.select from two ractors.
+assert_equal '["r1", "r2"]', %q{
+ # select 2
+ r1 = Ractor.new{'r1'}
+ r2 = Ractor.new{'r2'}
+ rs = [r1, r2]
+ as = []
+ r, obj = Ractor.select(*rs)
+ rs.delete(r)
+ as << obj
+ r, obj = Ractor.select(*rs)
+ as << obj
+ as.sort #=> ["r1", "r2"]
+}
+
+# Ractor.select from multiple ractors.
+assert_equal 30.times.map { 'ok' }.to_s, %q{
+ def test n
+ rs = (1..n).map do |i|
+ Ractor.new(i) do |i|
+ "r#{i}"
+ end
+ end
+ as = []
+ all_rs = rs.dup
+
+ n.times{
+ r, obj = Ractor.select(*rs)
+ as << [r, obj]
+ rs.delete(r)
+ }
+
+ if as.map{|r, o| r.object_id}.sort == all_rs.map{|r| r.object_id}.sort &&
+ as.map{|r, o| o}.sort == (1..n).map{|i| "r#{i}"}.sort
+ 'ok'
+ else
+ 'ng'
+ end
+ end
+
+ 30.times.map{|i|
+ test i
+ }
+} unless ENV['RUN_OPTS'] =~ /--mjit-call-threshold=5/ || # This always fails with --mjit-wait --mjit-call-threshold=5
+ (ENV.key?('TRAVIS') && ENV['TRAVIS_CPU_ARCH'] == 'arm64') || # https://bugs.ruby-lang.org/issues/17878
+ true # too flaky everywhere http://ci.rvm.jp/results/trunk@ruby-sp1/4321096
+
+# Exception for empty select
+assert_match /specify at least one ractor/, %q{
+ begin
+ Ractor.select
+ rescue ArgumentError => e
+ e.message
+ end
+}
+
+# Outgoing port of a ractor will be closed when the Ractor is terminated.
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ 'finish'
+ end
+
+ r.take
+ sleep 0.1 until r.inspect =~ /terminated/
+
+ begin
+ o = r.take
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ "ng: #{o}"
+ end
+}
+
+# Raise Ractor::ClosedError when try to send into a terminated ractor
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ end
+
+ r.take # closed
+ sleep 0.1 until r.inspect =~ /terminated/
+
+ begin
+ r.send(1)
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# Raise Ractor::ClosedError when try to send into a closed actor
+assert_equal 'ok', %q{
+ r = Ractor.new { Ractor.receive }
+ r.close_incoming
+
+ begin
+ r.send(1)
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# Raise Ractor::ClosedError when try to take from closed actor
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ Ractor.yield 1
+ Ractor.receive
+ end
+
+ r.close_outgoing
+ begin
+ r.take
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# Can mix with Thread#interrupt and Ractor#take [Bug #17366]
+assert_equal 'err', %q{
+ Ractor.new{
+ t = Thread.current
+ begin
+ Thread.new{ t.raise "err" }.join
+ rescue => e
+ e.message
+ end
+ }.take
+}
+
+# Killed Ractor's thread yields nil
+assert_equal 'nil', %q{
+ Ractor.new{
+ t = Thread.current
+ Thread.new{ t.kill }.join
+ }.take.inspect #=> nil
+}
+
+# Ractor.yield raises Ractor::ClosedError when outgoing port is closed.
+assert_equal 'ok', %q{
+ r = Ractor.new Ractor.current do |main|
+ Ractor.receive
+ main << true
+ Ractor.yield 1
+ end
+
+ r.close_outgoing
+ r << true
+ Ractor.receive
+
+ begin
+ r.take
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# Raise Ractor::ClosedError when try to send into a ractor with closed incoming port
+assert_equal 'ok', %q{
+ r = Ractor.new { Ractor.receive }
+ r.close_incoming
+
+ begin
+ r.send(1)
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# A ractor with closed incoming port still can send messages out
+assert_equal '[1, 2]', %q{
+ r = Ractor.new do
+ Ractor.yield 1
+ 2
+ end
+ r.close_incoming
+
+ [r.take, r.take]
+}
+
+# Raise Ractor::ClosedError when try to take from a ractor with closed outgoing port
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ Ractor.yield 1
+ Ractor.receive
+ end
+
+ sleep 0.01 # wait for Ractor.yield in r
+ r.close_outgoing
+ begin
+ r.take
+ rescue Ractor::ClosedError
+ 'ok'
+ else
+ 'ng'
+ end
+}
+
+# A ractor with closed outgoing port still can receive messages from incoming port
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ Ractor.receive
+ end
+
+ r.close_outgoing
+ begin
+ r.send(1)
+ rescue Ractor::ClosedError
+ 'ng'
+ else
+ 'ok'
+ end
+}
+
+# Ractor.main returns main ractor
+assert_equal 'true', %q{
+ Ractor.new{
+ Ractor.main
+ }.take == Ractor.current
+}
+
+# a ractor with closed outgoing port should terminate
+assert_equal 'ok', %q{
+ Ractor.new do
+ close_outgoing
+ end
+
+ true until Ractor.count == 1
+ :ok
+}
+
+# multiple Ractors can receive (wait) from one Ractor
+assert_equal '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]', %q{
+ pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.receive
+ end
+ end
+
+ RN = 10
+ rs = RN.times.map{|i|
+ Ractor.new pipe, i do |pipe, i|
+ msg = pipe.take
+ msg # ping-pong
+ end
+ }
+ RN.times{|i|
+ pipe << i
+ }
+ RN.times.map{
+ r, n = Ractor.select(*rs)
+ rs.delete r
+ n
+ }.sort
+} unless /mswin/ =~ RUBY_PLATFORM # randomly hangs on mswin https://github.com/ruby/ruby/actions/runs/3753871445/jobs/6377551069#step:20:131
+
+# Ractor.select also support multiple take, receive and yield
+assert_equal '[true, true, true]', %q{
+ RN = 10
+ CR = Ractor.current
+
+ rs = (1..RN).map{
+ Ractor.new do
+ CR.send 'send' + CR.take #=> 'sendyield'
+ 'take'
+ end
+ }
+ received = []
+ take = []
+ yielded = []
+ until rs.empty?
+ r, v = Ractor.select(CR, *rs, yield_value: 'yield')
+ case r
+ when :receive
+ received << v
+ when :yield
+ yielded << v
+ else
+ take << v
+ rs.delete r
+ end
+ end
+ [received.all?('sendyield'), yielded.all?(nil), take.all?('take')]
+}
+
+# multiple Ractors can send to one Ractor
+assert_equal '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]', %q{
+ pipe = Ractor.new do
+ loop do
+ Ractor.yield Ractor.receive
+ end
+ end
+
+ RN = 10
+ RN.times.map{|i|
+ Ractor.new pipe, i do |pipe, i|
+ pipe << i
+ end
+ }
+ RN.times.map{
+ pipe.take
+ }.sort
+}
+
+# an exception in a Ractor will be re-raised at Ractor#receive
+assert_equal '[RuntimeError, "ok", true]', %q{
+ r = Ractor.new do
+ raise 'ok' # exception will be transferred receiver
+ end
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ [e.cause.class, #=> RuntimeError
+ e.cause.message, #=> 'ok'
+ e.ractor == r] #=> true
+ end
+}
+
+# threads in a ractor will killed
+assert_equal '{:ok=>3}', %q{
+ Ractor.new Ractor.current do |main|
+ q = Thread::Queue.new
+ Thread.new do
+ q << true
+ loop{}
+ ensure
+ main << :ok
+ end
+
+ Thread.new do
+ q << true
+ while true
+ end
+ ensure
+ main << :ok
+ end
+
+ Thread.new do
+ q << true
+ sleep 1
+ ensure
+ main << :ok
+ end
+
+ # wait for the start of all threads
+ 3.times{q.pop}
+ end
+
+ 3.times.map{Ractor.receive}.tally
+}
+
+# unshareable object are copied
+assert_equal 'false', %q{
+ obj = 'str'.dup
+ r = Ractor.new obj do |msg|
+ msg.object_id
+ end
+
+ obj.object_id == r.take
+}
+
+# To copy the object, now Marshal#dump is used
+assert_equal "allocator undefined for Thread", %q{
+ obj = Thread.new{}
+ begin
+ r = Ractor.new obj do |msg|
+ msg
+ end
+ rescue TypeError => e
+ e.message #=> no _dump_data is defined for class Thread
+ else
+ 'ng'
+ end
+}
+
+# send shareable and unshareable objects
+assert_equal "ok", %q{
+ echo_ractor = Ractor.new do
+ loop do
+ v = Ractor.receive
+ Ractor.yield v
+ end
+ end
+
+ class C; end
+ module M; end
+ S = Struct.new(:a, :b, :c, :d)
+
+ shareable_objects = [
+ true,
+ false,
+ nil,
+ 1,
+ 1.1, # Float
+ 1+2r, # Rational
+ 3+4i, # Complex
+ 2**128, # Bignum
+ :sym, # Symbol
+ 'xyzzy'.to_sym, # dynamic symbol
+ 'frozen'.freeze, # frozen String
+ /regexp/, # regexp literal
+ /reg{true}exp/.freeze, # frozen dregexp
+ [1, 2].freeze, # frozen Array which only refers to shareable
+ {a: 1}.freeze, # frozen Hash which only refers to shareable
+ [{a: 1}.freeze, 'str'.freeze].freeze, # nested frozen container
+ S.new(1, 2).freeze, # frozen Struct
+ S.new(1, 2, 3, 4).freeze, # frozen Struct
+ (1..2), # Range on Struct
+ (1..), # Range on Struct
+ (..1), # Range on Struct
+ C, # class
+ M, # module
+ Ractor.current, # Ractor
+ ]
+
+ unshareable_objects = [
+ 'mutable str'.dup,
+ [:array],
+ {hash: true},
+ S.new(1, 2),
+ S.new(1, 2, 3, 4),
+ S.new("a", 2).freeze, # frozen, but refers to an unshareable object
+ ]
+
+ results = []
+
+ shareable_objects.map{|o|
+ echo_ractor << o
+ o2 = echo_ractor.take
+ results << "#{o} is copied" unless o.object_id == o2.object_id
+ }
+
+ unshareable_objects.map{|o|
+ echo_ractor << o
+ o2 = echo_ractor.take
+ results << "#{o.inspect} is not copied" if o.object_id == o2.object_id
+ }
+
+ if results.empty?
+ :ok
+ else
+ results.inspect
+ end
+}
+
+# frozen Objects are shareable
+assert_equal [false, true, false].inspect, %q{
+ class C
+ def initialize freeze
+ @a = 1
+ @b = :sym
+ @c = 'frozen_str'
+ @c.freeze if freeze
+ @d = true
+ end
+ end
+
+ def check obj1
+ obj2 = Ractor.new obj1 do |obj|
+ obj
+ end.take
+
+ obj1.object_id == obj2.object_id
+ end
+
+ results = []
+ results << check(C.new(true)) # false
+ results << check(C.new(true).freeze) # true
+ results << check(C.new(false).freeze) # false
+}
+
+# move example2: String
+# touching moved object causes an error
+assert_equal 'hello world', %q{
+ # move
+ r = Ractor.new do
+ obj = Ractor.receive
+ obj << ' world'
+ end
+
+ str = 'hello'
+ r.send str, move: true
+ modified = r.take
+
+ begin
+ str << ' exception' # raise Ractor::MovedError
+ rescue Ractor::MovedError
+ modified #=> 'hello world'
+ else
+ raise 'unreachable'
+ end
+}
+
+# move example2: Array
+assert_equal '[0, 1]', %q{
+ r = Ractor.new do
+ ary = Ractor.receive
+ ary << 1
+ end
+
+ a1 = [0]
+ r.send a1, move: true
+ a2 = r.take
+ begin
+ a1 << 2 # raise Ractor::MovedError
+ rescue Ractor::MovedError
+ a2.inspect
+ end
+}
+
+# move with yield
+assert_equal 'hello', %q{
+ r = Ractor.new do
+ Thread.current.report_on_exception = false
+ obj = 'hello'
+ Ractor.yield obj, move: true
+ obj << 'world'
+ end
+
+ str = r.take
+ begin
+ r.take
+ rescue Ractor::RemoteError
+ str #=> "hello"
+ end
+}
+
+# yield/move should not make moved object when the yield is not succeeded
+assert_equal '"str"', %q{
+ R = Ractor.new{}
+ M = Ractor.current
+ r = Ractor.new do
+ s = 'str'
+ selected_r, v = Ractor.select R, yield_value: s, move: true
+ raise if selected_r != R # taken from R
+ M.send s.inspect # s should not be a moved object
+ end
+
+ Ractor.receive
+}
+
+# yield/move can fail
+assert_equal "allocator undefined for Thread", %q{
+ r = Ractor.new do
+ obj = Thread.new{}
+ Ractor.yield obj
+ rescue => e
+ e.message
+ end
+ r.take
+}
+
+# Access to global-variables are prohibited
+assert_equal 'can not access global variables $gv from non-main Ractors', %q{
+ $gv = 1
+ r = Ractor.new do
+ $gv
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Access to global-variables are prohibited
+assert_equal 'can not access global variables $gv from non-main Ractors', %q{
+ r = Ractor.new do
+ $gv = 1
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# $stdin,out,err is Ractor local, but shared fds
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ [$stdin, $stdout, $stderr].map{|io|
+ [io.object_id, io.fileno]
+ }
+ end
+
+ [$stdin, $stdout, $stderr].zip(r.take){|io, (oid, fno)|
+ raise "should not be different object" if io.object_id == oid
+ raise "fd should be same" unless io.fileno == fno
+ }
+ 'ok'
+}
+
+# $stdin,out,err belong to Ractor
+assert_equal 'ok', %q{
+ r = Ractor.new do
+ $stdin.itself
+ $stdout.itself
+ $stderr.itself
+ 'ok'
+ end
+
+ r.take
+}
+
+# $DEBUG, $VERBOSE are Ractor local
+assert_equal 'true', %q{
+ $DEBUG = true
+ $VERBOSE = true
+
+ def ractor_local_globals
+ /a(b)(c)d/ =~ 'abcd' # for $~
+ `echo foo` unless /solaris/ =~ RUBY_PLATFORM
+
+ {
+ # ractor-local (derived from created ractor): debug
+ '$DEBUG' => $DEBUG,
+ '$-d' => $-d,
+
+ # ractor-local (derived from created ractor): verbose
+ '$VERBOSE' => $VERBOSE,
+ '$-w' => $-w,
+ '$-W' => $-W,
+ '$-v' => $-v,
+
+ # process-local (readonly): other commandline parameters
+ '$-p' => $-p,
+ '$-l' => $-l,
+ '$-a' => $-a,
+
+ # process-local (readonly): getpid
+ '$$' => $$,
+
+ # thread local: process result
+ '$?' => $?,
+
+ # scope local: match
+ '$~' => $~.inspect,
+ '$&' => $&,
+ '$`' => $`,
+ '$\'' => $',
+ '$+' => $+,
+ '$1' => $1,
+
+ # scope local: last line
+ '$_' => $_,
+
+ # scope local: last backtrace
+ '$@' => $@,
+ '$!' => $!,
+
+ # ractor local: stdin, out, err
+ '$stdin' => $stdin.inspect,
+ '$stdout' => $stdout.inspect,
+ '$stderr' => $stderr.inspect,
+ }
+ end
+
+ h = Ractor.new do
+ ractor_local_globals
+ end.take
+ ractor_local_globals == h #=> true
+}
+
+# selfs are different objects
+assert_equal 'false', %q{
+ r = Ractor.new do
+ self.object_id
+ end
+ r.take == self.object_id #=> false
+}
+
+# self is a Ractor instance
+assert_equal 'true', %q{
+ r = Ractor.new do
+ self.object_id
+ end
+ r.object_id == r.take #=> true
+}
+
+# given block Proc will be isolated, so can not access outer variables.
+assert_equal 'ArgumentError', %q{
+ begin
+ a = true
+ r = Ractor.new do
+ a
+ end
+ rescue => e
+ e.class
+ end
+}
+
+# ivar in shareable-objects are not allowed to access from non-main Ractor
+assert_equal "can not get unshareable values from instance variables of classes/modules from non-main Ractors", %q{
+ class C
+ @iv = 'str'
+ end
+
+ r = Ractor.new do
+ class C
+ p @iv
+ end
+ end
+
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# ivar in shareable-objects are not allowed to access from non-main Ractor
+assert_equal 'can not access instance variables of shareable objects from non-main Ractors', %q{
+ shared = Ractor.new{}
+ shared.instance_variable_set(:@iv, 'str')
+
+ r = Ractor.new shared do |shared|
+ p shared.instance_variable_get(:@iv)
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# ivar in shareable-objects are not allowed to access from non-main Ractor, by @iv (get)
+assert_equal 'can not access instance variables of shareable objects from non-main Ractors', %q{
+ class Ractor
+ def setup
+ @foo = ''
+ end
+
+ def foo
+ @foo
+ end
+ end
+
+ shared = Ractor.new{}
+ shared.setup
+
+ r = Ractor.new shared do |shared|
+ p shared.foo
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# ivar in shareable-objects are not allowed to access from non-main Ractor, by @iv (set)
+assert_equal 'can not access instance variables of shareable objects from non-main Ractors', %q{
+ class Ractor
+ def setup
+ @foo = ''
+ end
+ end
+
+ shared = Ractor.new{}
+
+ r = Ractor.new shared do |shared|
+ p shared.setup
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# But a shareable object is frozen, it is allowed to access ivars from non-main Ractor
+assert_equal '11', %q{
+ [Object.new, [], ].map{|obj|
+ obj.instance_variable_set('@a', 1)
+ Ractor.make_shareable obj = obj.freeze
+
+ Ractor.new obj do |obj|
+ obj.instance_variable_get('@a')
+ end.take.to_s
+ }.join
+}
+
+# and instance variables of classes/modules are accessible if they refer shareable objects
+assert_equal '333', %q{
+ class C
+ @int = 1
+ @str = '-1000'.dup
+ @fstr = '100'.freeze
+
+ def self.int = @int
+ def self.str = @str
+ def self.fstr = @fstr
+ end
+
+ module M
+ @int = 2
+ @str = '-2000'.dup
+ @fstr = '200'.freeze
+
+ def self.int = @int
+ def self.str = @str
+ def self.fstr = @fstr
+ end
+
+ a = Ractor.new{ C.int }.take
+ b = Ractor.new do
+ C.str.to_i
+ rescue Ractor::IsolationError
+ 10
+ end.take
+ c = Ractor.new do
+ C.fstr.to_i
+ end.take
+
+ d = Ractor.new{ M.int }.take
+ e = Ractor.new do
+ M.str.to_i
+ rescue Ractor::IsolationError
+ 20
+ end.take
+ f = Ractor.new do
+ M.fstr.to_i
+ end.take
+
+
+ # 1 + 10 + 100 + 2 + 20 + 200
+ a + b + c + d + e + f
+}
+
+# cvar in shareable-objects are not allowed to access from non-main Ractor
+assert_equal 'can not access class variables from non-main Ractors', %q{
+ class C
+ @@cv = 'str'
+ end
+
+ r = Ractor.new do
+ class C
+ p @@cv
+ end
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# also cached cvar in shareable-objects are not allowed to access from non-main Ractor
+assert_equal 'can not access class variables from non-main Ractors', %q{
+ class C
+ @@cv = 'str'
+ def self.cv
+ @@cv
+ end
+ end
+
+ C.cv # cache
+
+ r = Ractor.new do
+ C.cv
+ end
+
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Getting non-shareable objects via constants by other Ractors is not allowed
+assert_equal 'can not access non-shareable objects in constant C::CONST by non-main Ractor.', %q{
+ class C
+ CONST = 'str'
+ end
+ r = Ractor.new do
+ C::CONST
+ end
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Constant cache should care about non-sharable constants
+assert_equal "can not access non-shareable objects in constant Object::STR by non-main Ractor.", %q{
+ STR = "hello"
+ def str; STR; end
+ s = str() # fill const cache
+ begin
+ Ractor.new{ str() }.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# Setting non-shareable objects into constants by other Ractors is not allowed
+assert_equal 'can not set constants with non-shareable objects by non-main Ractors', %q{
+ class C
+ end
+ r = Ractor.new do
+ C::CONST = 'str'
+ end
+ begin
+ r.take
+ rescue Ractor::RemoteError => e
+ e.cause.message
+ end
+}
+
+# define_method is not allowed
+assert_equal "defined with an un-shareable Proc in a different Ractor", %q{
+ str = "foo"
+ define_method(:buggy){|i| str << "#{i}"}
+ begin
+ Ractor.new{buggy(10)}.take
+ rescue => e
+ e.cause.message
+ end
+}
+
+# Immutable Array and Hash are shareable, so it can be shared with constants
+assert_equal '[1000, 3]', %q{
+ A = Array.new(1000).freeze # [nil, ...]
+ H = {a: 1, b: 2, c: 3}.freeze
+
+ Ractor.new{ [A.size, H.size] }.take
+}
+
+# Ractor.count
+assert_equal '[1, 4, 3, 2, 1]', %q{
+ counts = []
+ counts << Ractor.count
+ ractors = (1..3).map { Ractor.new { Ractor.receive } }
+ counts << Ractor.count
+
+ ractors[0].send('End 0').take
+ sleep 0.1 until ractors[0].inspect =~ /terminated/
+ counts << Ractor.count
+
+ ractors[1].send('End 1').take
+ sleep 0.1 until ractors[1].inspect =~ /terminated/
+ counts << Ractor.count
+
+ ractors[2].send('End 2').take
+ sleep 0.1 until ractors[2].inspect =~ /terminated/
+ counts << Ractor.count
+
+ counts.inspect
+}
+
+# ObjectSpace.each_object can not handle unshareable objects with Ractors
+assert_equal '0', %q{
+ Ractor.new{
+ n = 0
+ ObjectSpace.each_object{|o| n += 1 unless Ractor.shareable?(o)}
+ n
+ }.take
+}
+
+# ObjectSpace._id2ref can not handle unshareable objects with Ractors
+assert_equal 'ok', %q{
+ s = 'hello'
+
+ Ractor.new s.object_id do |id ;s|
+ begin
+ s = ObjectSpace._id2ref(id)
+ rescue => e
+ :ok
+ end
+ end.take
+}
+
+# Ractor.make_shareable(obj)
+assert_equal 'true', %q{
+ class C
+ def initialize
+ @a = 'foo'
+ @b = 'bar'
+ end
+
+ def freeze
+ @c = [:freeze_called]
+ super
+ end
+
+ attr_reader :a, :b, :c
+ end
+ S = Struct.new(:s1, :s2)
+ str = "hello"
+ str.instance_variable_set("@iv", "hello")
+ /a/ =~ 'a'
+ m = $~
+ class N < Numeric
+ def /(other)
+ 1
+ end
+ end
+ ary = []; ary << ary
+
+ a = [[1, ['2', '3']],
+ {Object.new => "hello"},
+ C.new,
+ S.new("x", "y"),
+ ("a".."b"),
+ str,
+ ary, # cycle
+ /regexp/,
+ /#{'r'.upcase}/,
+ m,
+ Complex(N.new,0),
+ Rational(N.new,0),
+ true,
+ false,
+ nil,
+ 1, 1.2, 1+3r, 1+4i, # Numeric
+ ]
+ Ractor.make_shareable(a)
+
+ # check all frozen
+ a.each{|o|
+ raise o.inspect unless o.frozen?
+
+ case o
+ when C
+ raise o.a.inspect unless o.a.frozen?
+ raise o.b.inspect unless o.b.frozen?
+ raise o.c.inspect unless o.c.frozen? && o.c == [:freeze_called]
+ when Rational
+ raise o.numerator.inspect unless o.numerator.frozen?
+ when Complex
+ raise o.real.inspect unless o.real.frozen?
+ when Array
+ if o[0] == 1
+ raise o[1][1].inspect unless o[1][1].frozen?
+ end
+ when Hash
+ o.each{|k, v|
+ raise k.inspect unless k.frozen?
+ raise v.inspect unless v.frozen?
+ }
+ end
+ }
+
+ Ractor.shareable?(a)
+}
+
+# Ractor.make_shareable(obj) doesn't freeze shareable objects
+assert_equal 'true', %q{
+ r = Ractor.new{}
+ Ractor.make_shareable(a = [r])
+ [a.frozen?, a[0].frozen?] == [true, false]
+}
+
+# Ractor.make_shareable(a_proc) makes a proc shareable.
+assert_equal 'true', %q{
+ a = [1, [2, 3], {a: "4"}]
+
+ pr = Ractor.current.instance_eval do
+ Proc.new do
+ a
+ end
+ end
+
+ Ractor.make_shareable(a) # referred value should be shareable
+ Ractor.make_shareable(pr)
+ Ractor.shareable?(pr)
+}
+
+# Ractor.shareable?(recursive_objects)
+assert_equal '[false, false]', %q{
+ y = []
+ x = [y, {}].freeze
+ y << x
+ y.freeze
+ [Ractor.shareable?(x), Ractor.shareable?(y)]
+}
+
+# Ractor.make_shareable(recursive_objects)
+assert_equal '[:ok, false, false]', %q{
+ o = Object.new
+ def o.freeze; raise; end
+ y = []
+ x = [y, o].freeze
+ y << x
+ y.freeze
+ [(Ractor.make_shareable(x) rescue :ok), Ractor.shareable?(x), Ractor.shareable?(y)]
+}
+
+# Ractor.make_shareable with Class/Module
+assert_equal '[C, M]', %q{
+ class C; end
+ module M; end
+
+ Ractor.make_shareable(ary = [C, M])
+}
+
+# define_method() can invoke different Ractor's proc if the proc is shareable.
+assert_equal '1', %q{
+ class C
+ a = 1
+ define_method "foo", Ractor.make_shareable(Proc.new{ a })
+ a = 2
+ end
+
+ Ractor.new{ C.new.foo }.take
+}
+
+# Ractor.make_shareable(a_proc) makes a proc shareable.
+assert_equal 'can not make a Proc shareable because it accesses outer variables (a).', %q{
+ a = b = nil
+ pr = Ractor.current.instance_eval do
+ Proc.new do
+ c = b # assign to a is okay because c is block local variable
+ # reading b is okay
+ a = b # assign to a is not allowed #=> Ractor::Error
+ end
+ end
+
+ begin
+ Ractor.make_shareable(pr)
+ rescue => e
+ e.message
+ end
+}
+
+# Ractor.make_shareable(obj, copy: true) makes copied shareable object.
+assert_equal '[false, false, true, true]', %q{
+ r = []
+ o1 = [1, 2, ["3"]]
+
+ o2 = Ractor.make_shareable(o1, copy: true)
+ r << Ractor.shareable?(o1) # false
+ r << (o1.object_id == o2.object_id) # false
+
+ o3 = Ractor.make_shareable(o1)
+ r << Ractor.shareable?(o1) # true
+ r << (o1.object_id == o3.object_id) # false
+ r
+}
+
+# TracePoint with normal Proc should be Ractor local
+assert_equal '[4, 8]', %q{
+ rs = []
+ TracePoint.new(:line){|tp| rs << tp.lineno if tp.path == __FILE__}.enable do
+ Ractor.new{ # line 4
+ a = 1
+ b = 2
+ }.take
+ c = 3 # line 8
+ end
+ rs
+}
+
+# Ractor deep copies frozen objects (ary)
+assert_equal '[true, false]', %q{
+ Ractor.new([[]].freeze) { |ary|
+ [ary.frozen?, ary.first.frozen? ]
+ }.take
+}
+
+# Ractor deep copies frozen objects (str)
+assert_equal '[true, false]', %q{
+ s = String.new.instance_eval { @x = []; freeze}
+ Ractor.new(s) { |s|
+ [s.frozen?, s.instance_variable_get(:@x).frozen?]
+ }.take
+}
+
+# Can not trap with not isolated Proc on non-main ractor
+assert_equal '[:ok, :ok]', %q{
+ a = []
+ Ractor.new{
+ trap(:INT){p :ok}
+ }.take
+ a << :ok
+
+ begin
+ Ractor.new{
+ s = 'str'
+ trap(:INT){p s}
+ }.take
+ rescue => Ractor::RemoteError
+ a << :ok
+ end
+}
+
+# Ractor-local storage
+assert_equal '[nil, "b", "a"]', %q{
+ ans = []
+ Ractor.current[:key] = 'a'
+ r = Ractor.new{
+ Ractor.yield self[:key]
+ self[:key] = 'b'
+ self[:key]
+ }
+ ans << r.take
+ ans << r.take
+ ans << Ractor.current[:key]
+}
+
+###
+### Synchronization tests
+###
+
+N = 100_000
+
+# fstring pool
+assert_equal "#{N}#{N}", %Q{
+ N = #{N}
+ 2.times.map{
+ Ractor.new{
+ N.times{|i| -(i.to_s)}
+ }
+ }.map{|r| r.take}.join
+}
+
+# enc_table
+assert_equal "100", %Q{
+ Ractor.new do
+ loop do
+ Encoding.find("test-enc-#{rand(5_000)}").inspect
+ rescue ArgumentError => e
+ end
+ end
+
+ src = Encoding.find("UTF-8")
+ 100.times{|i|
+ src.replicate("test-enc-\#{i}")
+ }
+}
+
+# Generic ivtbl
+n = N/2
+assert_equal "#{n}#{n}", %Q{
+ 2.times.map{
+ Ractor.new do
+ #{n}.times do
+ obj = ''
+ obj.instance_variable_set("@a", 1)
+ obj.instance_variable_set("@b", 1)
+ obj.instance_variable_set("@c", 1)
+ obj.instance_variable_defined?("@a")
+ end
+ end
+ }.map{|r| r.take}.join
+}
+
+# NameError
+assert_equal "ok", %q{
+ begin
+ bar
+ rescue => err
+ end
+ begin
+ Ractor.new{} << err
+ rescue TypeError
+ 'ok'
+ end
+}
+
+assert_equal "ok", %q{
+ GC.disable
+ Ractor.new {}
+ raise "not ok" unless GC.disable
+
+ foo = []
+ 10.times { foo << 1 }
+
+ GC.start
+
+ 'ok'
+}
+
+# Can yield back values while GC is sweeping [Bug #18117]
+assert_equal "ok", %q{
+ workers = (0...8).map do
+ Ractor.new do
+ loop do
+ 10_000.times.map { Object.new }
+ Ractor.yield Time.now
+ end
+ end
+ end
+
+ 1_000.times { idle_worker, tmp_reporter = Ractor.select(*workers) }
+ "ok"
+}
+
+assert_equal "ok", %q{
+ def foo(*); ->{ super }; end
+ begin
+ Ractor.make_shareable(foo)
+ rescue Ractor::IsolationError
+ "ok"
+ end
+}
+
+assert_equal "ok", %q{
+ def foo(**); ->{ super }; end
+ begin
+ Ractor.make_shareable(foo)
+ rescue Ractor::IsolationError
+ "ok"
+ end
+}
+
+assert_equal "ok", %q{
+ def foo(...); ->{ super }; end
+ begin
+ Ractor.make_shareable(foo)
+ rescue Ractor::IsolationError
+ "ok"
+ end
+}
+
+assert_equal "ok", %q{
+ def foo((x), (y)); ->{ super }; end
+ begin
+ Ractor.make_shareable(foo([], []))
+ rescue Ractor::IsolationError
+ "ok"
+ end
+}
+
+assert_equal "ok", %q{
+ module M
+ def foo
+ @foo
+ end
+ end
+
+ class A
+ include M
+
+ def initialize
+ 100.times { |i| instance_variable_set(:"@var_#{i}", "bad: #{i}") }
+ @foo = 2
+ end
+ end
+
+ class B
+ include M
+
+ def initialize
+ @foo = 1
+ end
+ end
+
+ Ractor.new do
+ b = B.new
+ 100_000.times do
+ raise unless b.foo == 1
+ end
+ end
+
+ a = A.new
+ 100_000.times do
+ raise unless a.foo == 2
+ end
+
+ "ok"
+}
+
+assert_match /\Atest_ractor\.rb:1:\s+warning:\s+Ractor is experimental/, %q{
+ Warning[:experimental] = $VERBOSE = true
+ STDERR.reopen(STDOUT)
+ eval("Ractor.new{}.take", nil, "test_ractor.rb", 1)
+}
+
+end # if !ENV['GITHUB_WORKFLOW']
diff --git a/bootstraptest/test_syntax.rb b/bootstraptest/test_syntax.rb
index fa27bf2aeb..948e2d7809 100644
--- a/bootstraptest/test_syntax.rb
+++ b/bootstraptest/test_syntax.rb
@@ -628,7 +628,7 @@ assert_equal '2', %q{
}
assert_match /invalid multibyte char/, %q{
- STDERR.reopen(STDOUT)
+ $stderr = STDOUT
eval("\"\xf0".force_encoding("utf-8"))
}, '[ruby-dev:32429]'
diff --git a/bootstraptest/test_thread.rb b/bootstraptest/test_thread.rb
index 38a55ff229..5361828403 100644
--- a/bootstraptest/test_thread.rb
+++ b/bootstraptest/test_thread.rb
@@ -243,7 +243,7 @@ assert_equal 'true', %{
}
assert_equal 'ok', %{
- open("zzz.rb", "w") do |f|
+ open("zzz_t1.rb", "w") do |f|
f.puts <<-END
begin
Thread.new { fork { GC.start } }.join
@@ -254,7 +254,7 @@ assert_equal 'ok', %{
end
END
end
- require "./zzz.rb"
+ require "./zzz_t1.rb"
$result
}
@@ -408,7 +408,7 @@ assert_equal 'ok', %q{
}
assert_equal 'ok', %{
- open("zzz.rb", "w") do |f|
+ open("zzz_t2.rb", "w") do |f|
f.puts <<-'end;' # do
begin
m = Thread::Mutex.new
@@ -432,7 +432,7 @@ assert_equal 'ok', %{
end
end;
end
- require "./zzz.rb"
+ require "./zzz_t2.rb"
$result
}
diff --git a/bootstraptest/test_yjit.rb b/bootstraptest/test_yjit.rb
new file mode 100644
index 0000000000..5c655b8f25
--- /dev/null
+++ b/bootstraptest/test_yjit.rb
@@ -0,0 +1,3530 @@
+# Regression test for yielding with autosplat to block with
+# optional parameters. https://github.com/Shopify/yjit/issues/313
+assert_equal '[:a, :b, :a, :b]', %q{
+ def yielder(arg) = yield(arg) + yield(arg)
+
+ yielder([:a, :b]) do |c = :c, d = :d|
+ [c, d]
+ end
+}
+
+# Regression test for GC mishap while doing shape transition
+assert_equal '[:ok]', %q{
+ # [Bug #19601]
+ class RegressionTest
+ def initialize
+ @a = @b = @fourth_ivar_does_shape_transition = nil
+ end
+
+ def extender
+ @first_extended_ivar = [:ok]
+ end
+ end
+
+ GC.stress = true
+
+ # Used to crash due to GC run in rb_ensure_iv_list_size()
+ # not marking the newly allocated [:ok].
+ RegressionTest.new.extender.itself
+}
+
+assert_equal 'true', %q{
+ # regression test for tracking type of locals for too long
+ def local_setting_cmp(five)
+ victim = 5
+ five.define_singleton_method(:respond_to?) do |_, _|
+ victim = nil
+ end
+
+ # +1 makes YJIT track that victim is a number and
+ # defined? calls respond_to? from above indirectly
+ unless (victim + 1) && defined?(five.something)
+ # Would return wrong result if we still think `five` is a number
+ victim.nil?
+ end
+ end
+
+ local_setting_cmp(Object.new)
+ local_setting_cmp(Object.new)
+}
+
+assert_equal '18374962167983112447', %q{
+ # regression test for incorrectly discarding 32 bits of a pointer when it
+ # comes to default values.
+ def large_literal_default(n: 0xff00_fabcafe0_00ff)
+ n
+ end
+
+ def call_graph_root
+ large_literal_default
+ end
+
+ call_graph_root
+ call_graph_root
+}
+
+assert_normal_exit %q{
+ # regression test for a leak caught by an assert on --yjit-call-threshold=2
+ Foo = 1
+
+ eval("def foo = [#{(['Foo,']*256).join}]")
+
+ foo
+ foo
+
+ Object.send(:remove_const, :Foo)
+}
+
+assert_normal_exit %q{
+ # Test to ensure send on overriden c functions
+ # doesn't corrupt the stack
+ class Bar
+ def bar(x)
+ x
+ end
+ end
+
+ class Foo
+ def bar
+ Bar.new
+ end
+ end
+
+ foo = Foo.new
+ # before this change, this line would error
+ # because "s" would still be on the stack
+ # String.to_s is the overridden method here
+ p foo.bar.bar("s".__send__(:to_s))
+}
+
+
+assert_equal '[nil, nil, nil, nil, nil, nil]', %q{
+ [NilClass, TrueClass, FalseClass, Integer, Float, Symbol].each do |klass|
+ klass.class_eval("def foo = @foo")
+ end
+
+ [nil, true, false, 0xFABCAFE, 0.42, :cake].map do |instance|
+ instance.foo
+ instance.foo
+ end
+}
+
+assert_equal '0', %q{
+ # This is a regression test for incomplete invalidation from
+ # opt_setinlinecache. This test might be brittle, so
+ # feel free to remove it in the future if it's too annoying.
+ # This test assumes --yjit-call-threshold=2.
+ module M
+ Foo = 1
+ def foo
+ Foo
+ end
+
+ def pin_self_type_then_foo
+ _ = @foo
+ foo
+ end
+
+ def only_ints
+ 1 + self
+ foo
+ end
+ end
+
+ class Integer
+ include M
+ end
+
+ class Sub
+ include M
+ end
+
+ foo_method = M.instance_method(:foo)
+
+ dbg = ->(message) do
+ return # comment this out to get printouts
+
+ $stderr.puts RubyVM::YJIT.disasm(foo_method)
+ $stderr.puts message
+ end
+
+ 2.times { 42.only_ints }
+
+ dbg["There should be two versions of getinlineache"]
+
+ module M
+ remove_const(:Foo)
+ end
+
+ dbg["There should be no getinlinecaches"]
+
+ 2.times do
+ 42.only_ints
+ rescue NameError => err
+ _ = "caught name error #{err}"
+ end
+
+ dbg["There should be one version of getinlineache"]
+
+ 2.times do
+ Sub.new.pin_self_type_then_foo
+ rescue NameError
+ _ = 'second specialization'
+ end
+
+ dbg["There should be two versions of getinlineache"]
+
+ module M
+ Foo = 1
+ end
+
+ dbg["There should still be two versions of getinlineache"]
+
+ 42.only_ints
+
+ dbg["There should be no getinlinecaches"]
+
+ # Find name of the first VM instruction in M#foo.
+ insns = RubyVM::InstructionSequence.of(foo_method).to_a
+ if defined?(RubyVM::YJIT.blocks_for) && (insns.last.find { Array === _1 }&.first == :opt_getinlinecache)
+ RubyVM::YJIT.blocks_for(RubyVM::InstructionSequence.of(foo_method))
+ .filter { _1.iseq_start_index == 0 }.count
+ else
+ 0 # skip the test
+ end
+}
+
+# Check that frozen objects are respected
+assert_equal 'great', %q{
+ class Foo
+ attr_accessor :bar
+ def initialize
+ @bar = 1
+ freeze
+ end
+ end
+
+ foo = Foo.new
+
+ 5.times do
+ begin
+ foo.bar = 2
+ rescue FrozenError
+ end
+ end
+
+ foo.bar == 1 ? "great" : "NG"
+}
+
+# Check that global variable set works
+assert_equal 'string', %q{
+ def foo
+ $foo = "string"
+ end
+
+ foo
+}
+
+# Check that exceptions work when setting global variables
+assert_equal 'rescued', %q{
+ def set_var
+ $var = 100
+ rescue
+ :rescued
+ end
+
+ set_var
+ trace_var(:$var) { raise }
+ set_var
+}
+
+# Check that global variables work
+assert_equal 'string', %q{
+ $foo = "string"
+
+ def foo
+ $foo
+ end
+
+ foo
+}
+
+# Check that exceptions work when getting global variable
+assert_equal 'rescued', %q{
+ Warning[:deprecated] = true
+
+ module Warning
+ def warn(message)
+ raise
+ end
+ end
+
+ def get_var
+ $=
+ rescue
+ :rescued
+ end
+
+ $VERBOSE = true
+ get_var
+ get_var
+}
+
+# Check that global tracepoints work
+assert_equal 'true', %q{
+ def foo
+ 1
+ end
+
+ foo
+ foo
+ foo
+
+ called = false
+
+ tp = TracePoint.new(:return) { |event|
+ if event.method_id == :foo
+ called = true
+ end
+ }
+ tp.enable
+ foo
+ tp.disable
+ called
+}
+
+# Check that local tracepoints work
+assert_equal 'true', %q{
+ def foo
+ 1
+ end
+
+ foo
+ foo
+ foo
+
+ called = false
+
+ tp = TracePoint.new(:return) { |_| called = true }
+ tp.enable(target: method(:foo))
+ foo
+ tp.disable
+ called
+}
+
+# Make sure that optional param methods return the correct value
+assert_equal '1', %q{
+ def m(ary = [])
+ yield(ary)
+ end
+
+ # Warm the JIT with a 0 param call
+ 2.times { m { } }
+ m(1) { |v| v }
+}
+
+# Test for topn
+assert_equal 'array', %q{
+ def threequals(a)
+ case a
+ when Array
+ "array"
+ when Hash
+ "hash"
+ else
+ "unknown"
+ end
+ end
+
+ threequals([])
+ threequals([])
+ threequals([])
+}
+
+# Test for opt_mod
+assert_equal '2', %q{
+ def mod(a, b)
+ a % b
+ end
+
+ mod(7, 5)
+ mod(7, 5)
+}
+
+# Test for opt_mult
+assert_equal '12', %q{
+ def mult(a, b)
+ a * b
+ end
+
+ mult(6, 2)
+ mult(6, 2)
+}
+
+# Test for opt_div
+assert_equal '3', %q{
+ def div(a, b)
+ a / b
+ end
+
+ div(6, 2)
+ div(6, 2)
+}
+
+# BOP redefined methods work when JIT compiled
+assert_equal 'false', %q{
+ def less_than x
+ x < 10
+ end
+
+ class Integer
+ def < x
+ false
+ end
+ end
+
+ less_than 2
+ less_than 2
+ less_than 2
+}
+
+# BOP redefinition works on Integer#<
+assert_equal 'false', %q{
+ def less_than x
+ x < 10
+ end
+
+ less_than 2
+ less_than 2
+
+ class Integer
+ def < x
+ false
+ end
+ end
+
+ less_than 2
+}
+
+# Putobject, less-than operator, fixnums
+assert_equal '2', %q{
+ def check_index(index)
+ if 0x40000000 < index
+ raise "wat? #{index}"
+ end
+ index
+ end
+ check_index 2
+ check_index 2
+}
+
+# foo leaves a temp on the stack before the call
+assert_equal '6', %q{
+ def bar
+ return 5
+ end
+
+ def foo
+ return 1 + bar
+ end
+
+ foo()
+ retval = foo()
+}
+
+# Method with one arguments
+# foo leaves a temp on the stack before the call
+assert_equal '7', %q{
+ def bar(a)
+ return a + 1
+ end
+
+ def foo
+ return 1 + bar(5)
+ end
+
+ foo()
+ retval = foo()
+}
+
+# Method with two arguments
+# foo leaves a temp on the stack before the call
+assert_equal '0', %q{
+ def bar(a, b)
+ return a - b
+ end
+
+ def foo
+ return 1 + bar(1, 2)
+ end
+
+ foo()
+ retval = foo()
+}
+
+# Passing argument types to callees
+assert_equal '8.5', %q{
+ def foo(x, y)
+ x + y
+ end
+
+ def bar
+ foo(7, 1.5)
+ end
+
+ bar
+ bar
+}
+
+# Recursive Ruby-to-Ruby calls
+assert_equal '21', %q{
+ def fib(n)
+ if n < 2
+ return n
+ end
+
+ return fib(n-1) + fib(n-2)
+ end
+
+ r = fib(8)
+}
+
+# Ruby-to-Ruby call and C call
+assert_normal_exit %q{
+ def bar
+ puts('hi!')
+ end
+
+ def foo
+ bar
+ end
+
+ foo()
+ foo()
+}
+
+# Method aliasing
+assert_equal '42', %q{
+ class Foo
+ def method_a
+ 42
+ end
+
+ alias method_b method_a
+
+ def method_a
+ :somethingelse
+ end
+ end
+
+ @obj = Foo.new
+
+ def test
+ @obj.method_b
+ end
+
+ test
+ test
+}
+
+# Method aliasing with method from parent class
+assert_equal '777', %q{
+ class A
+ def method_a
+ 777
+ end
+ end
+
+ class B < A
+ alias method_b method_a
+ end
+
+ @obj = B.new
+
+ def test
+ @obj.method_b
+ end
+
+ test
+ test
+}
+
+# The hash method is a C function and uses the self argument
+assert_equal 'true', %q{
+ def lehashself
+ hash
+ end
+
+ a = lehashself
+ b = lehashself
+ a == b
+}
+
+# Method redefinition (code invalidation) test
+assert_equal '1', %q{
+ def ret1
+ return 1
+ end
+
+ klass = Class.new do
+ def alias_then_hash(klass, method_to_redefine)
+ # Redefine the method to be ret1
+ klass.alias_method(method_to_redefine, :ret1)
+ hash
+ end
+ end
+
+ instance = klass.new
+
+ i = 0
+ while i < 12
+ if i < 11
+ # Redefine the bar method
+ instance.alias_then_hash(klass, :bar)
+ else
+ # Redefine the hash method to be ret1
+ retval = instance.alias_then_hash(klass, :hash)
+ end
+ i += 1
+ end
+
+ retval
+}
+
+# Code invalidation and opt_getinlinecache
+assert_normal_exit %q{
+ class Foo; end
+
+ # Uses the class constant Foo
+ def use_constant(arg)
+ [Foo.new, arg]
+ end
+
+ def propagate_type
+ i = Array.new
+ i.itself # make it remember that i is on-heap
+ use_constant(i)
+ end
+
+ propagate_type
+ propagate_type
+ use_constant(Foo.new)
+ class Jo; end # bump global constant state
+ use_constant(3)
+}
+
+# Method redefinition (code invalidation) and GC
+assert_equal '7', %q{
+ def bar()
+ return 5
+ end
+
+ def foo()
+ bar()
+ end
+
+ foo()
+ foo()
+
+ def bar()
+ return 7
+ end
+
+ 4.times { GC.start }
+
+ foo()
+ foo()
+}
+
+# Method redefinition with two block versions
+assert_equal '7', %q{
+ def bar()
+ return 5
+ end
+
+ def foo(n)
+ return ((n < 5)? 5:false), bar()
+ end
+
+ foo(4)
+ foo(4)
+ foo(10)
+ foo(10)
+
+ def bar()
+ return 7
+ end
+
+ 4.times { GC.start }
+
+ foo(4)
+ foo(4)[1]
+}
+
+# Method redefinition while the method is on the stack
+assert_equal '[777, 1]', %q{
+ def foo
+ redef()
+ 777
+ end
+
+ def redef
+ # Redefine the global foo
+ eval("def foo; 1; end", TOPLEVEL_BINDING)
+
+ # Collect dead code
+ GC.stress = true
+ GC.start
+
+ # But we will return to the original foo,
+ # which remains alive because it's on the stack
+ end
+
+ # Must produce [777, 1]
+ [foo, foo]
+}
+
+# Test for GC safety. Don't invalidate dead iseqs.
+assert_normal_exit %q{
+ Class.new do
+ def foo
+ itself
+ end
+
+ new.foo
+ new.foo
+ new.foo
+ new.foo
+ end
+
+ 4.times { GC.start }
+ def itself
+ self
+ end
+}
+
+# test setinstancevariable on extended objects
+assert_equal '1', %q{
+ class Extended
+ attr_reader :one
+
+ def write_many
+ @a = 1
+ @b = 2
+ @c = 3
+ @d = 4
+ @one = 1
+ end
+ end
+
+ foo = Extended.new
+ foo.write_many
+ foo.write_many
+ foo.write_many
+}
+
+# test setinstancevariable on embedded objects
+assert_equal '1', %q{
+ class Embedded
+ attr_reader :one
+
+ def write_one
+ @one = 1
+ end
+ end
+
+ foo = Embedded.new
+ foo.write_one
+ foo.write_one
+ foo.write_one
+}
+
+# test setinstancevariable after extension
+assert_equal '[10, 11, 12, 13, 1]', %q{
+ class WillExtend
+ attr_reader :one
+
+ def make_extended
+ @foo1 = 10
+ @foo2 = 11
+ @foo3 = 12
+ @foo4 = 13
+ end
+
+ def write_one
+ @one = 1
+ end
+
+ def read_all
+ [@foo1, @foo2, @foo3, @foo4, @one]
+ end
+ end
+
+ foo = WillExtend.new
+ foo.write_one
+ foo.write_one
+ foo.make_extended
+ foo.write_one
+ foo.read_all
+}
+
+# test setinstancevariable on frozen object
+assert_equal 'object was not modified', %q{
+ class WillFreeze
+ def write
+ @ivar = 1
+ end
+ end
+
+ wf = WillFreeze.new
+ wf.write
+ wf.write
+ wf.freeze
+
+ begin
+ wf.write
+ rescue FrozenError
+ "object was not modified"
+ end
+}
+
+# Test getinstancevariable and inline caches
+assert_equal '6', %q{
+ class Foo
+ def initialize
+ @x1 = 1
+ @x2 = 1
+ @x2 = 1
+ @x3 = 1
+ @x4 = 3
+ end
+
+ def bar
+ x = 1
+ @x4 + @x4
+ end
+ end
+
+ f = Foo.new
+ f.bar
+ f.bar
+}
+
+# Test that getinstancevariable codegen checks for extended table size
+assert_equal "nil\n", %q{
+ class A
+ def read
+ @ins1000
+ end
+ end
+
+ ins = A.new
+ other = A.new
+ 10.times { other.instance_variable_set(:"@otr#{_1}", 'value') }
+ 1001.times { ins.instance_variable_set(:"@ins#{_1}", 'value') }
+
+ ins.read
+ ins.read
+ ins.read
+
+ p other.read
+}
+
+# Test that opt_aref checks the class of the receiver
+assert_equal 'special', %q{
+ def foo(array)
+ array[30]
+ end
+
+ foo([])
+ foo([])
+
+ special = []
+ def special.[](idx)
+ 'special'
+ end
+
+ foo(special)
+}
+
+# Test that object references in generated code get marked and moved
+assert_equal "good", %q{
+ def bar
+ "good"
+ end
+
+ def foo
+ bar
+ end
+
+ foo
+ foo
+
+ begin
+ GC.verify_compaction_references(expand_heap: true, toward: :empty)
+ rescue NotImplementedError
+ # in case compaction isn't supported
+ end
+
+ foo
+}
+
+# Test polymorphic getinstancevariable. T_OBJECT -> T_STRING
+assert_equal 'ok', %q{
+ @hello = @h1 = @h2 = @h3 = @h4 = 'ok'
+ str = ""
+ str.instance_variable_set(:@hello, 'ok')
+
+ public def get
+ @hello
+ end
+
+ get
+ get
+ str.get
+ str.get
+}
+
+# Test polymorphic getinstancevariable, two different classes
+assert_equal 'ok', %q{
+ class Embedded
+ def initialize
+ @ivar = 0
+ end
+
+ def get
+ @ivar
+ end
+ end
+
+ class Extended < Embedded
+ def initialize
+ @v1 = @v2 = @v3 = @v4 = @ivar = 'ok'
+ end
+ end
+
+ embed = Embedded.new
+ extend = Extended.new
+
+ embed.get
+ embed.get
+ extend.get
+ extend.get
+}
+
+# Test megamorphic getinstancevariable
+assert_equal 'ok', %q{
+ parent = Class.new do
+ def initialize
+ @hello = @h1 = @h2 = @h3 = @h4 = 'ok'
+ end
+
+ def get
+ @hello
+ end
+ end
+
+ subclasses = 300.times.map { Class.new(parent) }
+ subclasses.each { _1.new.get }
+ parent.new.get
+}
+
+# Test polymorphic opt_aref. array -> hash
+assert_equal '[42, :key]', %q{
+ def index(obj, idx)
+ obj[idx]
+ end
+
+ index([], 0) # get over compilation threshold
+
+ [
+ index([42], 0),
+ index({0=>:key}, 0),
+ ]
+}
+
+# Test polymorphic opt_aref. hash -> array -> custom class
+assert_equal '[nil, nil, :custom]', %q{
+ def index(obj, idx)
+ obj[idx]
+ end
+
+ custom = Object.new
+ def custom.[](_idx)
+ :custom
+ end
+
+ index({}, 0) # get over compilation threshold
+
+ [
+ index({}, 0),
+ index([], 0),
+ index(custom, 0)
+ ]
+}
+
+# Test polymorphic opt_aref. array -> custom class
+assert_equal '[42, :custom]', %q{
+ def index(obj, idx)
+ obj[idx]
+ end
+
+ custom = Object.new
+ def custom.[](_idx)
+ :custom
+ end
+
+ index([], 0) # get over compilation threshold
+
+ [
+ index([42], 0),
+ index(custom, 0)
+ ]
+}
+
+# Test custom hash method with opt_aref
+assert_equal '[nil, :ok]', %q{
+ def index(obj, idx)
+ obj[idx]
+ end
+
+ custom = Object.new
+ def custom.hash
+ 42
+ end
+
+ h = {custom => :ok}
+
+ [
+ index(h, 0),
+ index(h, custom)
+ ]
+}
+
+# Test default value block for Hash with opt_aref
+assert_equal '[42, :default]', %q{
+ def index(obj, idx)
+ obj[idx]
+ end
+
+ h = Hash.new { :default }
+ h[0] = 42
+
+ [
+ index(h, 0),
+ index(h, 1)
+ ]
+}
+
+# A regression test for making sure cfp->sp is proper when
+# hitting stubs. See :stub-sp-flush:
+assert_equal 'ok', %q{
+ class D
+ def foo
+ Object.new
+ end
+ end
+
+ GC.stress = true
+ 10.times do
+ D.new.foo
+ # ^
+ # This hits a stub with sp_offset > 0
+ end
+
+ :ok
+}
+
+# Test polymorphic callsite, cfunc -> iseq
+assert_equal '[Cfunc, Iseq]', %q{
+ public def call_itself
+ itself # the polymorphic callsite
+ end
+
+ class Cfunc; end
+
+ class Iseq
+ def itself
+ self
+ end
+ end
+
+ call_itself # cross threshold
+
+ [Cfunc.call_itself, Iseq.call_itself]
+}
+
+# Test polymorphic callsite, iseq -> cfunc
+assert_equal '[Iseq, Cfunc]', %q{
+ public def call_itself
+ itself # the polymorphic callsite
+ end
+
+ class Cfunc; end
+
+ class Iseq
+ def itself
+ self
+ end
+ end
+
+ call_itself # cross threshold
+
+ [Iseq.call_itself, Cfunc.call_itself]
+}
+
+# attr_reader method
+assert_equal '[100, 299]', %q{
+ class A
+ attr_reader :foo
+
+ def initialize
+ @foo = 100
+ end
+
+ # Make it extended
+ def fill!
+ @bar = @jojo = @as = @sdfsdf = @foo = 299
+ end
+ end
+
+ def bar(ins)
+ ins.foo
+ end
+
+ ins = A.new
+ oth = A.new
+ oth.fill!
+
+ bar(ins)
+ bar(oth)
+
+ [bar(ins), bar(oth)]
+}
+
+# get ivar on object, then on hash
+assert_equal '[42, 100]', %q{
+ class Hash
+ attr_accessor :foo
+ end
+
+ class A
+ attr_reader :foo
+
+ def initialize
+ @foo = 42
+ end
+ end
+
+ def use(val)
+ val.foo
+ end
+
+
+ h = {}
+ h.foo = 100
+ obj = A.new
+
+ use(obj)
+ [use(obj), use(h)]
+}
+
+# get ivar on String
+assert_equal '[nil, nil, 42, 42]', %q{
+ # @foo to exercise the getinstancevariable instruction
+ public def get_foo
+ @foo
+ end
+
+ get_foo
+ get_foo # compile it for the top level object
+
+ class String
+ attr_reader :foo
+ end
+
+ def run
+ str = String.new
+
+ getter = str.foo
+ insn = str.get_foo
+
+ str.instance_variable_set(:@foo, 42)
+
+ [getter, insn, str.foo, str.get_foo]
+ end
+
+ run
+ run
+}
+
+# splatting an empty array on a getter
+assert_equal '42', %q{
+ @foo = 42
+ module Kernel
+ attr_reader :foo
+ end
+
+ def run
+ foo(*[])
+ end
+
+ run
+ run
+}
+
+# splatting an empty array on a specialized method
+assert_equal 'ok', %q{
+ def run
+ "ok".to_s(*[])
+ end
+
+ run
+ run
+}
+
+# splatting an single element array on a specialized method
+assert_equal '[1]', %q{
+ def run
+ [].<<(*[1])
+ end
+
+ run
+ run
+}
+
+# specialized method with wrong args
+assert_equal 'ok', %q{
+ def run(x)
+ "bad".to_s(123) if x
+ rescue
+ :ok
+ end
+
+ run(false)
+ run(true)
+}
+
+# getinstancevariable on Symbol
+assert_equal '[nil, nil]', %q{
+ # @foo to exercise the getinstancevariable instruction
+ public def get_foo
+ @foo
+ end
+
+ dyn_sym = ("a" + "b").to_sym
+ sym = :static
+
+ # compile get_foo
+ dyn_sym.get_foo
+ dyn_sym.get_foo
+
+ [dyn_sym.get_foo, sym.get_foo]
+}
+
+# attr_reader on Symbol
+assert_equal '[nil, nil]', %q{
+ class Symbol
+ attr_reader :foo
+ end
+
+ public def get_foo
+ foo
+ end
+
+ dyn_sym = ("a" + "b").to_sym
+ sym = :static
+
+ # compile get_foo
+ dyn_sym.get_foo
+ dyn_sym.get_foo
+
+ [dyn_sym.get_foo, sym.get_foo]
+}
+
+# passing too few arguments to method with optional parameters
+assert_equal 'raised', %q{
+ def opt(a, b = 0)
+ end
+
+ def use
+ opt
+ end
+
+ use rescue nil
+ begin
+ use
+ :ng
+ rescue ArgumentError
+ :raised
+ end
+}
+
+# passing too many arguments to method with optional parameters
+assert_equal 'raised', %q{
+ def opt(a, b = 0)
+ end
+
+ def use
+ opt(1, 2, 3, 4)
+ end
+
+ use rescue nil
+ begin
+ use
+ :ng
+ rescue ArgumentError
+ :raised
+ end
+}
+
+# test calling Ruby method with a block
+assert_equal '[1, 2, 42]', %q{
+ def thing(a, b)
+ [a, b, yield]
+ end
+
+ def use
+ thing(1,2) { 42 }
+ end
+
+ use
+ use
+}
+
+# test calling C method with a block
+assert_equal '[42, 42]', %q{
+ def use(array, initial)
+ array.reduce(initial) { |a, b| a + b }
+ end
+
+ use([], 0)
+ [use([2, 2], 38), use([14, 14, 14], 0)]
+}
+
+# test calling block param
+assert_equal '[1, 2, 42]', %q{
+ def foo(&block)
+ block.call
+ end
+
+ [foo {1}, foo {2}, foo {42}]
+}
+
+# test calling without block param
+assert_equal '[1, false, 2, false]', %q{
+ def bar
+ block_given? && yield
+ end
+
+ def foo(&block)
+ bar(&block)
+ end
+
+ [foo { 1 }, foo, foo { 2 }, foo]
+}
+
+# test calling block param failing
+assert_equal '42', %q{
+ def foo(&block)
+ block.call
+ end
+
+ foo {} # warmup
+
+ begin
+ foo
+ rescue NoMethodError => e
+ 42 if nil == e.receiver
+ end
+}
+
+# test calling method taking block param
+assert_equal '[Proc, 1, 2, 3, Proc]', %q{
+ def three(a, b, c, &block)
+ [a, b, c, block.class]
+ end
+
+ def zero(&block)
+ block.class
+ end
+
+ def use_three
+ three(1, 2, 3) {}
+ end
+
+ def use_zero
+ zero {}
+ end
+
+ use_three
+ use_zero
+
+ [use_zero] + use_three
+}
+
+# test building empty array
+assert_equal '[]', %q{
+ def build_arr
+ []
+ end
+
+ build_arr
+ build_arr
+}
+
+# test building array of one element
+assert_equal '[5]', %q{
+ def build_arr(val)
+ [val]
+ end
+
+ build_arr(5)
+ build_arr(5)
+}
+
+# test building array of several element
+assert_equal '[5, 5, 5, 5, 5]', %q{
+ def build_arr(val)
+ [val, val, val, val, val]
+ end
+
+ build_arr(5)
+ build_arr(5)
+}
+
+# test building empty hash
+assert_equal '{}', %q{
+ def build_hash
+ {}
+ end
+
+ build_hash
+ build_hash
+}
+
+# test building hash with values
+assert_equal '{:foo=>:bar}', %q{
+ def build_hash(val)
+ { foo: val }
+ end
+
+ build_hash(:bar)
+ build_hash(:bar)
+}
+
+# test string interpolation with known types
+assert_equal 'foobar', %q{
+ def make_str
+ foo = -"foo"
+ bar = -"bar"
+ "#{foo}#{bar}"
+ end
+
+ make_str
+ make_str
+}
+
+# test string interpolation with unknown types
+assert_equal 'foobar', %q{
+ def make_str(foo, bar)
+ "#{foo}#{bar}"
+ end
+
+ make_str("foo", "bar")
+ make_str("foo", "bar")
+}
+
+# test string interpolation with known non-strings
+assert_equal 'foo123', %q{
+ def make_str
+ foo = -"foo"
+ bar = 123
+ "#{foo}#{bar}"
+ end
+
+ make_str
+ make_str
+}
+
+# test string interpolation with unknown non-strings
+assert_equal 'foo123', %q{
+ def make_str(foo, bar)
+ "#{foo}#{bar}"
+ end
+
+ make_str("foo", 123)
+ make_str("foo", 123)
+}
+
+# test that invalidation of String#to_s doesn't crash
+assert_equal 'meh', %q{
+ def inval_method
+ "".to_s
+ end
+
+ inval_method
+
+ class String
+ def to_s
+ "meh"
+ end
+ end
+
+ inval_method
+}
+
+# test that overriding to_s on a String subclass works consistently
+assert_equal 'meh', %q{
+ class MyString < String
+ def to_s
+ "meh"
+ end
+ end
+
+ def test_to_s(obj)
+ obj.to_s
+ end
+
+ OBJ = MyString.new
+
+ # Should return '' both times
+ test_to_s("")
+ test_to_s("")
+
+ # Can return '' if YJIT optimises String#to_s too aggressively
+ test_to_s(OBJ)
+ test_to_s(OBJ)
+}
+
+# test string interpolation with overridden to_s
+assert_equal 'foo', %q{
+ class String
+ def to_s
+ "bad"
+ end
+ end
+
+ def make_str(foo)
+ "#{foo}"
+ end
+
+ make_str("foo")
+ make_str("foo")
+}
+
+# Test that String unary plus returns the same object ID for an unfrozen string.
+assert_equal 'true', %q{
+ def jittable_method
+ str = "bar"
+
+ old_obj_id = str.object_id
+ uplus_str = +str
+
+ uplus_str.object_id == old_obj_id
+ end
+ jittable_method
+}
+
+# Test that String unary plus returns a different unfrozen string when given a frozen string
+assert_equal 'false', %q{
+ # Logic needs to be inside an ISEQ, such as a method, for YJIT to compile it
+ def jittable_method
+ frozen_str = "foo".freeze
+
+ old_obj_id = frozen_str.object_id
+ uplus_str = +frozen_str
+
+ uplus_str.object_id == old_obj_id || uplus_str.frozen?
+ end
+
+ jittable_method
+}
+
+# String-subclass objects should behave as expected inside string-interpolation via concatstrings
+assert_equal 'monkeys / monkeys, yo!', %q{
+ class MyString < String
+ # This is a terrible idea in production code, but we'd like YJIT to match CRuby
+ def to_s
+ super + ", yo!"
+ end
+ end
+
+ def jittable_method
+ m = MyString.new('monkeys')
+ "#{m} / #{m.to_s}"
+ end
+
+ jittable_method
+}
+
+# String-subclass objects should behave as expected for string equality
+assert_equal 'false', %q{
+ class MyString < String
+ # This is a terrible idea in production code, but we'd like YJIT to match CRuby
+ def ==(b)
+ "#{self}_" == b
+ end
+ end
+
+ def jittable_method
+ ma = MyString.new("a")
+
+ # Check equality with string-subclass receiver
+ ma == "a" || ma != "a_" ||
+ # Check equality with string receiver
+ "a_" == ma || "a" != ma ||
+ # Check equality between string subclasses
+ ma != MyString.new("a_") ||
+ # Make sure "string always equals itself" check isn't used with overridden equality
+ ma == ma
+ end
+ jittable_method
+}
+
+# Test to_s duplicates a string subclass object but not a string
+assert_equal 'false', %q{
+ class MyString < String; end
+
+ def jittable_method
+ a = "a"
+ ma = MyString.new("a")
+
+ a.object_id != a.to_s.object_id ||
+ ma.object_id == ma.to_s.object_id
+ end
+ jittable_method
+}
+
+# Test freeze on string subclass
+assert_equal 'true', %q{
+ class MyString < String; end
+
+ def jittable_method
+ fma = MyString.new("a").freeze
+
+ # Freezing a string subclass should not duplicate it
+ fma.object_id == fma.freeze.object_id
+ end
+ jittable_method
+}
+
+# Test unary minus on string subclass
+assert_equal 'true', %q{
+ class MyString < String; end
+
+ def jittable_method
+ ma = MyString.new("a")
+ fma = MyString.new("a").freeze
+
+ # Unary minus on frozen string subclass should not duplicate it
+ fma.object_id == (-fma).object_id &&
+ # Unary minus on unfrozen string subclass should duplicate it
+ ma.object_id != (-ma).object_id
+ end
+ jittable_method
+}
+
+# Test unary plus on string subclass
+assert_equal 'true', %q{
+ class MyString < String; end
+
+ def jittable_method
+ fma = MyString.new("a").freeze
+
+ # Unary plus on frozen string subclass should not duplicate it
+ fma.object_id != (+fma).object_id
+ end
+ jittable_method
+}
+
+# Test << operator on string subclass
+assert_equal 'abab', %q{
+ class MyString < String; end
+
+ def jittable_method
+ a = -"a"
+ mb = MyString.new("b")
+
+ buf = String.new
+ mbuf = MyString.new
+
+ buf << a << mb
+ mbuf << a << mb
+
+ buf + mbuf
+ end
+ jittable_method
+}
+
+# test invokebuiltin as used in struct assignment
+assert_equal '123', %q{
+ def foo(obj)
+ obj.foo = 123
+ end
+
+ struct = Struct.new(:foo)
+ obj = struct.new
+ foo(obj)
+ foo(obj)
+}
+
+# test invokebuiltin_delegate as used inside Dir.open
+assert_equal '.', %q{
+ def foo(path)
+ Dir.open(path).path
+ end
+
+ foo(".")
+ foo(".")
+}
+
+# test invokebuiltin_delegate_leave in method called from jit
+assert_normal_exit %q{
+ def foo(obj)
+ obj.clone
+ end
+
+ foo(Object.new)
+ foo(Object.new)
+}
+
+# test invokebuiltin_delegate_leave in method called from cfunc
+assert_normal_exit %q{
+ def foo(obj)
+ [obj].map(&:clone)
+ end
+
+ foo(Object.new)
+ foo(Object.new)
+}
+
+# defining TrueClass#!
+assert_equal '[false, false, :ok]', %q{
+ def foo(obj)
+ !obj
+ end
+
+ x = foo(true)
+ y = foo(true)
+
+ class TrueClass
+ def !
+ :ok
+ end
+ end
+
+ z = foo(true)
+
+ [x, y, z]
+}
+
+# defining FalseClass#!
+assert_equal '[true, true, :ok]', %q{
+ def foo(obj)
+ !obj
+ end
+
+ x = foo(false)
+ y = foo(false)
+
+ class FalseClass
+ def !
+ :ok
+ end
+ end
+
+ z = foo(false)
+
+ [x, y, z]
+}
+
+# defining NilClass#!
+assert_equal '[true, true, :ok]', %q{
+ def foo(obj)
+ !obj
+ end
+
+ x = foo(nil)
+ y = foo(nil)
+
+ class NilClass
+ def !
+ :ok
+ end
+ end
+
+ z = foo(nil)
+
+ [x, y, z]
+}
+
+# polymorphic opt_not
+assert_equal '[true, true, false, false, false, false, false]', %q{
+ def foo(obj)
+ !obj
+ end
+
+ foo(0)
+ [foo(nil), foo(false), foo(true), foo([]), foo(0), foo(4.2), foo(:sym)]
+}
+
+# getlocal with 2 levels
+assert_equal '7', %q{
+ def foo(foo, bar)
+ while foo > 0
+ while bar > 0
+ return foo + bar
+ end
+ end
+ end
+
+ foo(5,2)
+ foo(5,2)
+}
+
+# test pattern matching
+assert_equal '[:ok, :ok]', %q{
+ class C
+ def destructure_keys
+ {}
+ end
+ end
+
+ pattern_match = ->(i) do
+ case i
+ in a: 0
+ :ng
+ else
+ :ok
+ end
+ end
+
+ [{}, C.new].map(&pattern_match)
+}
+
+# Call to object with singleton
+assert_equal '123', %q{
+ obj = Object.new
+ def obj.foo
+ 123
+ end
+
+ def foo(obj)
+ obj.foo()
+ end
+
+ foo(obj)
+ foo(obj)
+}
+
+# Call method on an object that has a non-material
+# singleton class.
+# TODO: assert that it takes no side exits? This
+# test case revealed that we were taking exits unnecessarily.
+assert_normal_exit %q{
+ def foo(obj)
+ obj.itself
+ end
+
+ o = Object.new.singleton_class
+ foo(o)
+ foo(o)
+}
+
+# Call to singleton class
+assert_equal '123', %q{
+ class Foo
+ def self.foo
+ 123
+ end
+ end
+
+ def foo(obj)
+ obj.foo()
+ end
+
+ foo(Foo)
+ foo(Foo)
+}
+
+# invokesuper edge case
+assert_equal '[:A, [:A, :B]]', %q{
+ class B
+ def foo = :B
+ end
+
+ class A < B
+ def foo = [:A, super()]
+ end
+
+ A.new.foo
+ A.new.foo # compile A#foo
+
+ class C < A
+ define_method(:bar, A.instance_method(:foo))
+ end
+
+ C.new.bar
+}
+
+# Same invokesuper bytecode, multiple destinations
+assert_equal '[:Forward, :SecondTerminus]', %q{
+ module Terminus
+ def foo = :Terminus
+ end
+
+ module SecondTerminus
+ def foo = :SecondTerminus
+ end
+
+
+ module Forward
+ def foo = [:Forward, super]
+ end
+
+ class B
+ include SecondTerminus
+ end
+
+ class A < B
+ include Terminus
+ include Forward
+ end
+
+ A.new.foo
+ A.new.foo # compile
+
+ class B
+ include Forward
+ alias bar foo
+ end
+
+ # A.ancestors.take(5) == [A, Forward, Terminus, B, Forward, SecondTerminus]
+
+ A.new.bar
+}
+
+# invokesuper calling into itself
+assert_equal '[:B, [:B, :m]]', %q{
+ module M
+ def foo = :m
+ end
+
+ class B
+ include M
+ def foo = [:B, super]
+ end
+
+ ins = B.new
+ ins.singleton_class # materialize the singleton class
+ ins.foo
+ ins.foo # compile
+
+ ins.singleton_class.define_method(:bar, B.instance_method(:foo))
+ ins.bar
+}
+
+# invokesuper changed ancestor
+assert_equal '[:A, [:M, :B]]', %q{
+ class B
+ def foo
+ :B
+ end
+ end
+
+ class A < B
+ def foo
+ [:A, super]
+ end
+ end
+
+ module M
+ def foo
+ [:M, super]
+ end
+ end
+
+ ins = A.new
+ ins.foo
+ ins.foo
+ A.include(M)
+ ins.foo
+}
+
+# invokesuper changed ancestor via prepend
+assert_equal '[:A, [:M, :B]]', %q{
+ class B
+ def foo
+ :B
+ end
+ end
+
+ class A < B
+ def foo
+ [:A, super]
+ end
+ end
+
+ module M
+ def foo
+ [:M, super]
+ end
+ end
+
+ ins = A.new
+ ins.foo
+ ins.foo
+ B.prepend(M)
+ ins.foo
+}
+
+# invokesuper replaced method
+assert_equal '[:A, :Btwo]', %q{
+ class B
+ def foo
+ :B
+ end
+ end
+
+ class A < B
+ def foo
+ [:A, super]
+ end
+ end
+
+ ins = A.new
+ ins.foo
+ ins.foo
+ class B
+ def foo
+ :Btwo
+ end
+ end
+ ins.foo
+}
+
+# Call to fixnum
+assert_equal '[true, false]', %q{
+ def is_odd(obj)
+ obj.odd?
+ end
+
+ is_odd(1)
+ is_odd(1)
+
+ [is_odd(123), is_odd(456)]
+}
+
+# Call to bignum
+assert_equal '[true, false]', %q{
+ def is_odd(obj)
+ obj.odd?
+ end
+
+ bignum = 99999999999999999999
+ is_odd(bignum)
+ is_odd(bignum)
+
+ [is_odd(bignum), is_odd(bignum+1)]
+}
+
+# Call to fixnum and bignum
+assert_equal '[true, false, true, false]', %q{
+ def is_odd(obj)
+ obj.odd?
+ end
+
+ bignum = 99999999999999999999
+ is_odd(bignum)
+ is_odd(bignum)
+ is_odd(123)
+ is_odd(123)
+
+ [is_odd(123), is_odd(456), is_odd(bignum), is_odd(bignum+1)]
+}
+
+# Call to static and dynamic symbol
+assert_equal 'bar', %q{
+ def to_string(obj)
+ obj.to_s
+ end
+
+ to_string(:foo)
+ to_string(:foo)
+ to_string((-"bar").to_sym)
+ to_string((-"bar").to_sym)
+}
+
+# Call to flonum and heap float
+assert_equal '[nil, nil, nil, 1]', %q{
+ def is_inf(obj)
+ obj.infinite?
+ end
+
+ is_inf(0.0)
+ is_inf(0.0)
+ is_inf(1e256)
+ is_inf(1e256)
+
+ [
+ is_inf(0.0),
+ is_inf(1.0),
+ is_inf(1e256),
+ is_inf(1.0/0.0)
+ ]
+}
+
+assert_equal '[1, 2, 3, 4, 5]', %q{
+ def splatarray
+ [*(1..5)]
+ end
+
+ splatarray
+ splatarray
+}
+
+assert_equal '[1, 1, 2, 1, 2, 3]', %q{
+ def expandarray
+ arr = [1, 2, 3]
+
+ a, = arr
+ b, c, = arr
+ d, e, f = arr
+
+ [a, b, c, d, e, f]
+ end
+
+ expandarray
+ expandarray
+}
+
+assert_equal '[1, 1]', %q{
+ def expandarray_useless_splat
+ arr = (1..10).to_a
+
+ a, * = arr
+ b, (*) = arr
+
+ [a, b]
+ end
+
+ expandarray_useless_splat
+ expandarray_useless_splat
+}
+
+assert_equal '[:not_heap, nil, nil]', %q{
+ def expandarray_not_heap
+ a, b, c = :not_heap
+ [a, b, c]
+ end
+
+ expandarray_not_heap
+ expandarray_not_heap
+}
+
+assert_equal '[:not_array, nil, nil]', %q{
+ def expandarray_not_array(obj)
+ a, b, c = obj
+ [a, b, c]
+ end
+
+ obj = Object.new
+ def obj.to_ary
+ [:not_array]
+ end
+
+ expandarray_not_array(obj)
+ expandarray_not_array(obj)
+}
+
+assert_equal '[1, 2, nil]', %q{
+ def expandarray_rhs_too_small
+ a, b, c = [1, 2]
+ [a, b, c]
+ end
+
+ expandarray_rhs_too_small
+ expandarray_rhs_too_small
+}
+
+assert_equal '[1, [2]]', %q{
+ def expandarray_splat
+ a, *b = [1, 2]
+ [a, b]
+ end
+
+ expandarray_splat
+ expandarray_splat
+}
+
+assert_equal '2', %q{
+ def expandarray_postarg
+ *, a = [1, 2]
+ a
+ end
+
+ expandarray_postarg
+ expandarray_postarg
+}
+
+assert_equal '10', %q{
+ obj = Object.new
+ val = nil
+ obj.define_singleton_method(:to_ary) { val = 10; [] }
+
+ def expandarray_always_call_to_ary(object)
+ * = object
+ end
+
+ expandarray_always_call_to_ary(obj)
+ expandarray_always_call_to_ary(obj)
+
+ val
+}
+
+# regression test of local type change
+assert_equal '1.1', %q{
+def bar(baz, quux)
+ if baz.integer?
+ baz, quux = quux, nil
+ end
+ baz.to_s
+end
+
+bar(123, 1.1)
+bar(123, 1.1)
+}
+
+# test enabling a line TracePoint in a C method call
+assert_equal '[[:line, true]]', %q{
+ events = []
+ events.instance_variable_set(
+ :@tp,
+ TracePoint.new(:line) { |tp| events << [tp.event, tp.lineno] if tp.path == __FILE__ }
+ )
+ def events.to_str
+ @tp.enable; ''
+ end
+
+ # Stay in generated code while enabling tracing
+ def events.compiled(obj)
+ String(obj)
+ @tp.disable; __LINE__
+ end
+
+ line = events.compiled(events)
+ events[0][-1] = (events[0][-1] == line)
+
+ events
+}
+
+# test enabling a c_return TracePoint in a C method call
+assert_equal '[[:c_return, :String, :string_alias, "events_to_str"]]', %q{
+ events = []
+ events.instance_variable_set(:@tp, TracePoint.new(:c_return) { |tp| events << [tp.event, tp.method_id, tp.callee_id, tp.return_value] })
+ def events.to_str
+ @tp.enable; 'events_to_str'
+ end
+
+ # Stay in generated code while enabling tracing
+ alias string_alias String
+ def events.compiled(obj)
+ string_alias(obj)
+ @tp.disable
+ end
+
+ events.compiled(events)
+
+ events
+} unless defined?(RubyVM::MJIT) && RubyVM::MJIT.enabled? # MJIT calls extra Ruby methods
+
+# test enabling a TracePoint that targets a particular line in a C method call
+assert_equal '[true]', %q{
+ events = []
+ events.instance_variable_set(:@tp, TracePoint.new(:line) { |tp| events << tp.lineno })
+ def events.to_str
+ @tp.enable(target: method(:compiled))
+ ''
+ end
+
+ # Stay in generated code while enabling tracing
+ def events.compiled(obj)
+ String(obj)
+ __LINE__
+ end
+
+ line = events.compiled(events)
+ events[0] = (events[0] == line)
+
+ events
+}
+
+# test enabling tracing in the middle of splatarray
+assert_equal '[true]', %q{
+ events = []
+ obj = Object.new
+ obj.instance_variable_set(:@tp, TracePoint.new(:line) { |tp| events << tp.lineno })
+ def obj.to_a
+ @tp.enable(target: method(:compiled))
+ []
+ end
+
+ # Enable tracing in the middle of the splatarray instruction
+ def obj.compiled(obj)
+ * = *obj
+ __LINE__
+ end
+
+ obj.compiled([])
+ line = obj.compiled(obj)
+ events[0] = (events[0] == line)
+
+ events
+}
+
+# test enabling tracing in the middle of opt_aref. Different since the codegen
+# for it ends in a jump.
+assert_equal '[true]', %q{
+ def lookup(hash, tp)
+ hash[42]
+ tp.disable; __LINE__
+ end
+
+ lines = []
+ tp = TracePoint.new(:line) { lines << _1.lineno if _1.path == __FILE__ }
+
+ lookup(:foo, tp)
+ lookup({}, tp)
+
+ enable_tracing_on_missing = Hash.new { tp.enable }
+
+ expected_line = lookup(enable_tracing_on_missing, tp)
+
+ lines[0] = true if lines[0] == expected_line
+
+ lines
+}
+
+# test enabling c_call tracing before compiling
+assert_equal '[[:c_call, :itself]]', %q{
+ def shouldnt_compile
+ itself
+ end
+
+ events = []
+ tp = TracePoint.new(:c_call) { |tp| events << [tp.event, tp.method_id] }
+
+ # assume first call compiles
+ tp.enable { shouldnt_compile }
+
+ events
+} unless defined?(RubyVM::MJIT) && RubyVM::MJIT.enabled? # MJIT calls extra Ruby methods
+
+# test enabling c_return tracing before compiling
+assert_equal '[[:c_return, :itself, main]]', %q{
+ def shouldnt_compile
+ itself
+ end
+
+ events = []
+ tp = TracePoint.new(:c_return) { |tp| events << [tp.event, tp.method_id, tp.return_value] }
+
+ # assume first call compiles
+ tp.enable { shouldnt_compile }
+
+ events
+} unless defined?(RubyVM::MJIT) && RubyVM::MJIT.enabled? # MJIT calls extra Ruby methods
+
+# test c_call invalidation
+assert_equal '[[:c_call, :itself]]', %q{
+ # enable the event once to make sure invalidation
+ # happens the second time we enable it
+ TracePoint.new(:c_call) {}.enable{}
+
+ def compiled
+ itself
+ end
+
+ # assume first call compiles
+ compiled
+
+ events = []
+ tp = TracePoint.new(:c_call) { |tp| events << [tp.event, tp.method_id] }
+ tp.enable { compiled }
+
+ events
+}
+
+# test enabling tracing for a suspended fiber
+assert_equal '[[:return, 42]]', %q{
+ def traced_method
+ Fiber.yield
+ 42
+ end
+
+ events = []
+ tp = TracePoint.new(:return) { events << [_1.event, _1.return_value] }
+ # assume first call compiles
+ fiber = Fiber.new { traced_method }
+ fiber.resume
+ tp.enable(target: method(:traced_method))
+ fiber.resume
+
+ events
+}
+
+# test compiling on non-tracing ractor then running on a tracing one
+assert_equal '[:itself]', %q{
+ def traced_method
+ itself
+ end
+
+ tracing_ractor = Ractor.new do
+ # 1: start tracing
+ events = []
+ tp = TracePoint.new(:c_call) { events << _1.method_id }
+ tp.enable
+ Ractor.yield(nil)
+
+ # 3: run compiled method on tracing ractor
+ Ractor.yield(nil)
+ traced_method
+
+ events
+ ensure
+ tp&.disable
+ end
+
+ tracing_ractor.take
+
+ # 2: compile on non tracing ractor
+ traced_method
+
+ tracing_ractor.take
+ tracing_ractor.take
+}
+
+# Try to hit a lazy branch stub while another ractor enables tracing
+assert_equal '42', %q{
+ def compiled(arg)
+ if arg
+ arg + 1
+ else
+ itself
+ itself
+ end
+ end
+
+ ractor = Ractor.new do
+ compiled(false)
+ Ractor.yield(nil)
+ compiled(41)
+ end
+
+ tp = TracePoint.new(:line) { itself }
+ ractor.take
+ tp.enable
+
+ ractor.take
+}
+
+# Test equality with changing types
+assert_equal '[true, false, false, false]', %q{
+ def eq(a, b)
+ a == b
+ end
+
+ [
+ eq("foo", "foo"),
+ eq("foo", "bar"),
+ eq(:foo, "bar"),
+ eq("foo", :bar)
+ ]
+}
+
+# Redefined String eq
+assert_equal 'true', %q{
+ class String
+ def ==(other)
+ true
+ end
+ end
+
+ def eq(a, b)
+ a == b
+ end
+
+ eq("foo", "bar")
+ eq("foo", "bar")
+}
+
+# Redefined Integer eq
+assert_equal 'true', %q{
+ class Integer
+ def ==(other)
+ true
+ end
+ end
+
+ def eq(a, b)
+ a == b
+ end
+
+ eq(1, 2)
+ eq(1, 2)
+}
+
+# aset on array with invalid key
+assert_normal_exit %q{
+ def foo(arr)
+ arr[:foo] = 123
+ end
+
+ foo([1]) rescue nil
+ foo([1]) rescue nil
+}
+
+# test ractor exception on when setting ivar
+assert_equal '42', %q{
+ class A
+ def self.foo
+ _foo = 1
+ _bar = 2
+ begin
+ @bar = _foo + _bar
+ rescue Ractor::IsolationError
+ 42
+ end
+ end
+ end
+
+ A.foo
+ A.foo
+
+ Ractor.new { A.foo }.take
+}
+
+assert_equal '["plain", "special", "sub", "plain"]', %q{
+ def foo(arg)
+ arg.to_s
+ end
+
+ class Sub < String
+ end
+
+ special = String.new("special")
+ special.singleton_class
+
+ [
+ foo("plain"),
+ foo(special),
+ foo(Sub.new("sub")),
+ foo("plain")
+ ]
+}
+
+assert_equal '["sub", "sub"]', %q{
+ def foo(arg)
+ arg.to_s
+ end
+
+ class Sub < String
+ def to_s
+ super
+ end
+ end
+
+ sub = Sub.new("sub")
+
+ [foo(sub), foo(sub)]
+}
+
+assert_equal '[1]', %q{
+ def kwargs(value:)
+ value
+ end
+
+ 5.times.map { kwargs(value: 1) }.uniq
+}
+
+assert_equal '[:ok]', %q{
+ def kwargs(value:)
+ value
+ end
+
+ 5.times.map { kwargs() rescue :ok }.uniq
+}
+
+assert_equal '[:ok]', %q{
+ def kwargs(a:, b: nil)
+ value
+ end
+
+ 5.times.map { kwargs(b: 123) rescue :ok }.uniq
+}
+
+assert_equal '[[1, 2]]', %q{
+ def kwargs(left:, right:)
+ [left, right]
+ end
+
+ 5.times.flat_map do
+ [
+ kwargs(left: 1, right: 2),
+ kwargs(right: 2, left: 1)
+ ]
+ end.uniq
+}
+
+assert_equal '[[1, 2]]', %q{
+ def kwargs(lead, kwarg:)
+ [lead, kwarg]
+ end
+
+ 5.times.map { kwargs(1, kwarg: 2) }.uniq
+}
+
+# optional and keyword args
+assert_equal '[[1, 2, 3]]', %q{
+ def opt_and_kwargs(a, b=2, c: nil)
+ [a,b,c]
+ end
+
+ 5.times.map { opt_and_kwargs(1, c: 3) }.uniq
+}
+
+assert_equal '[[1, 2, 3]]', %q{
+ def opt_and_kwargs(a, b=nil, c: nil)
+ [a,b,c]
+ end
+
+ 5.times.map { opt_and_kwargs(1, 2, c: 3) }.uniq
+}
+
+# Bug #18453
+assert_equal '[[1, nil, 2]]', %q{
+ def opt_and_kwargs(a = {}, b: nil, c: nil)
+ [a, b, c]
+ end
+
+ 5.times.map { opt_and_kwargs(1, c: 2) }.uniq
+}
+
+assert_equal '[[{}, nil, 1]]', %q{
+ def opt_and_kwargs(a = {}, b: nil, c: nil)
+ [a, b, c]
+ end
+
+ 5.times.map { opt_and_kwargs(c: 1) }.uniq
+}
+
+# leading and keyword arguments are swapped into the right order
+assert_equal '[[1, 2, 3, 4, 5, 6]]', %q{
+ def kwargs(five, six, a:, b:, c:, d:)
+ [a, b, c, d, five, six]
+ end
+
+ 5.times.flat_map do
+ [
+ kwargs(5, 6, a: 1, b: 2, c: 3, d: 4),
+ kwargs(5, 6, a: 1, b: 2, d: 4, c: 3),
+ kwargs(5, 6, a: 1, c: 3, b: 2, d: 4),
+ kwargs(5, 6, a: 1, c: 3, d: 4, b: 2),
+ kwargs(5, 6, a: 1, d: 4, b: 2, c: 3),
+ kwargs(5, 6, a: 1, d: 4, c: 3, b: 2),
+ kwargs(5, 6, b: 2, a: 1, c: 3, d: 4),
+ kwargs(5, 6, b: 2, a: 1, d: 4, c: 3),
+ kwargs(5, 6, b: 2, c: 3, a: 1, d: 4),
+ kwargs(5, 6, b: 2, c: 3, d: 4, a: 1),
+ kwargs(5, 6, b: 2, d: 4, a: 1, c: 3),
+ kwargs(5, 6, b: 2, d: 4, c: 3, a: 1),
+ kwargs(5, 6, c: 3, a: 1, b: 2, d: 4),
+ kwargs(5, 6, c: 3, a: 1, d: 4, b: 2),
+ kwargs(5, 6, c: 3, b: 2, a: 1, d: 4),
+ kwargs(5, 6, c: 3, b: 2, d: 4, a: 1),
+ kwargs(5, 6, c: 3, d: 4, a: 1, b: 2),
+ kwargs(5, 6, c: 3, d: 4, b: 2, a: 1),
+ kwargs(5, 6, d: 4, a: 1, b: 2, c: 3),
+ kwargs(5, 6, d: 4, a: 1, c: 3, b: 2),
+ kwargs(5, 6, d: 4, b: 2, a: 1, c: 3),
+ kwargs(5, 6, d: 4, b: 2, c: 3, a: 1),
+ kwargs(5, 6, d: 4, c: 3, a: 1, b: 2),
+ kwargs(5, 6, d: 4, c: 3, b: 2, a: 1)
+ ]
+ end.uniq
+}
+
+# implicit hashes get skipped and don't break compilation
+assert_equal '[[:key]]', %q{
+ def implicit(hash)
+ hash.keys
+ end
+
+ 5.times.map { implicit(key: :value) }.uniq
+}
+
+# default values on keywords don't mess up argument order
+assert_equal '[2]', %q{
+ def default_value
+ 1
+ end
+
+ def default_expression(value: default_value)
+ value
+ end
+
+ 5.times.map { default_expression(value: 2) }.uniq
+}
+
+# constant default values on keywords
+assert_equal '[3]', %q{
+ def default_expression(value: 3)
+ value
+ end
+
+ 5.times.map { default_expression }.uniq
+}
+
+# non-constant default values on keywords
+assert_equal '[3]', %q{
+ def default_value
+ 3
+ end
+
+ def default_expression(value: default_value)
+ value
+ end
+
+ 5.times.map { default_expression }.uniq
+}
+
+# reordered optional kwargs
+assert_equal '[[100, 1]]', %q{
+ def foo(capacity: 100, max: nil)
+ [capacity, max]
+ end
+
+ 5.times.map { foo(max: 1) }.uniq
+}
+
+# invalid lead param
+assert_equal 'ok', %q{
+ def bar(baz: 2)
+ baz
+ end
+
+ def foo
+ bar(1, baz: 123)
+ end
+
+ begin
+ foo
+ foo
+ rescue ArgumentError => e
+ print "ok"
+ end
+}
+
+# reordered required kwargs
+assert_equal '[[1, 2, 3, 4]]', %q{
+ def foo(default1: 1, required1:, default2: 3, required2:)
+ [default1, required1, default2, required2]
+ end
+
+ 5.times.map { foo(required1: 2, required2: 4) }.uniq
+}
+
+# reordered default expression kwargs
+assert_equal '[[:one, :two, 3]]', %q{
+ def foo(arg1: (1+0), arg2: (2+0), arg3: (3+0))
+ [arg1, arg2, arg3]
+ end
+
+ 5.times.map { foo(arg2: :two, arg1: :one) }.uniq
+}
+
+# complex kwargs
+assert_equal '[[1, 2, 3, 4]]', %q{
+ def foo(required:, specified: 999, simple_default: 3, complex_default: "4".to_i)
+ [required, specified, simple_default, complex_default]
+ end
+
+ 5.times.map { foo(specified: 2, required: 1) }.uniq
+}
+
+# cfunc kwargs
+assert_equal '{:foo=>123}', %q{
+ def foo(bar)
+ bar.store(:value, foo: 123)
+ bar[:value]
+ end
+
+ foo({})
+ foo({})
+}
+
+# cfunc kwargs
+assert_equal '{:foo=>123}', %q{
+ def foo(bar)
+ bar.replace(foo: 123)
+ end
+
+ foo({})
+ foo({})
+}
+
+# cfunc kwargs
+assert_equal '{:foo=>123, :bar=>456}', %q{
+ def foo(bar)
+ bar.replace(foo: 123, bar: 456)
+ end
+
+ foo({})
+ foo({})
+}
+
+# variadic cfunc kwargs
+assert_equal '{:foo=>123}', %q{
+ def foo(bar)
+ bar.merge(foo: 123)
+ end
+
+ foo({})
+ foo({})
+}
+
+# optimized cfunc kwargs
+assert_equal 'false', %q{
+ def foo
+ :foo.eql?(foo: :foo)
+ end
+
+ foo
+ foo
+}
+
+# attr_reader on frozen object
+assert_equal 'false', %q{
+ class Foo
+ attr_reader :exception
+
+ def failed?
+ !exception.nil?
+ end
+ end
+
+ foo = Foo.new.freeze
+ foo.failed?
+ foo.failed?
+}
+
+# regression test for doing kwarg shuffle before checking for interrupts
+assert_equal 'ok', %q{
+ def new_media_drop(attributes:, product_drop:, context:, sources:)
+ nil.nomethod rescue nil # force YJIT to bail to side exit
+
+ [attributes, product_drop, context, sources]
+ end
+
+ def load_medias(product_drop: nil, raw_medias:, context:)
+ raw_medias.map do |raw_media|
+ case new_media_drop(context: context, attributes: raw_media, product_drop: product_drop, sources: [])
+ in [Hash, ProductDrop, Context, Array]
+ else
+ raise "bad shuffle"
+ end
+ end
+ end
+
+ class Context; end
+
+ class ProductDrop
+ attr_reader :title
+ def initialize(title)
+ @title = title
+ end
+ end
+
+ # Make a thread so we have thread switching interrupts
+ th = Thread.new do
+ while true; end
+ end
+ 1_000.times do |i|
+ load_medias(product_drop: ProductDrop.new("foo"), raw_medias: [{}, {}], context: Context.new)
+ end
+ th.kill.join
+
+ :ok
+}
+
+# regression test for tracing attr_accessor methods.
+assert_equal "true", %q{
+ c = Class.new do
+ attr_accessor :x
+ alias y x
+ alias y= x=
+ end
+ obj = c.new
+
+ ar_meth = obj.method(:x)
+ aw_meth = obj.method(:x=)
+ aar_meth = obj.method(:y)
+ aaw_meth = obj.method(:y=)
+ events = []
+ trace = TracePoint.new(:c_call, :c_return){|tp|
+ next if tp.path != __FILE__
+ next if tp.method_id == :call
+ case tp.event
+ when :c_call
+ events << [tp.event, tp.method_id, tp.callee_id]
+ when :c_return
+ events << [tp.event, tp.method_id, tp.callee_id, tp.return_value]
+ end
+ }
+ test_proc = proc do
+ obj.x = 1
+ obj.x
+ obj.y = 2
+ obj.y
+ aw_meth.call(1)
+ ar_meth.call
+ aaw_meth.call(2)
+ aar_meth.call
+ end
+ test_proc.call # populate call caches
+ trace.enable(&test_proc)
+ expected = [
+ [:c_call, :x=, :x=],
+ [:c_return, :x=, :x=, 1],
+ [:c_call, :x, :x],
+ [:c_return, :x, :x, 1],
+ [:c_call, :x=, :y=],
+ [:c_return, :x=, :y=, 2],
+ [:c_call, :x, :y],
+ [:c_return, :x, :y, 2],
+ ] * 2
+
+ expected == events
+}
+
+# duphash
+assert_equal '{:foo=>123}', %q{
+ def foo
+ {foo: 123}
+ end
+
+ foo
+ foo
+}
+
+# newhash
+assert_equal '{:foo=>2}', %q{
+ def foo
+ {foo: 1+1}
+ end
+
+ foo
+ foo
+}
+
+# block invalidation edge case
+assert_equal 'undef', %q{
+ class A
+ def foo(arg)
+ arg.times { A.remove_method(:bar) }
+ self
+ end
+
+ def bar
+ 4
+ end
+
+ def use(arg)
+ # two consecutive sends. When bar is removed, the return address
+ # for calling it is already on foo's control frame
+ foo(arg).bar
+ rescue NoMethodError
+ :undef
+ end
+ end
+
+ A.new.use 0
+ A.new.use 0
+ A.new.use 1
+}
+
+# block invalidation edge case
+assert_equal 'ok', %q{
+ class A
+ Good = :ng
+ def foo(arg)
+ arg.times { A.const_set(:Good, :ok) }
+ self
+ end
+
+ def id(arg)
+ arg
+ end
+
+ def use(arg)
+ # send followed by an opt_getinlinecache.
+ # The return address remains on the control frame
+ # when opt_getinlinecache is invalidated.
+ foo(arg).id(Good)
+ end
+ end
+
+ A.new.use 0
+ A.new.use 0
+ A.new.use 1
+}
+
+assert_equal 'ok', %q{
+ # test hitting a branch stub when out of memory
+ def nimai(jita)
+ if jita
+ :ng
+ else
+ :ok
+ end
+ end
+
+ nimai(true)
+ nimai(true)
+
+ RubyVM::YJIT.simulate_oom! if defined?(RubyVM::YJIT)
+
+ nimai(false)
+}
+
+assert_equal 'new', %q{
+ # test block invalidation while out of memory
+ def foo
+ :old
+ end
+
+ def test
+ foo
+ end
+
+ def bar
+ :bar
+ end
+
+
+ test
+ test
+
+ RubyVM::YJIT.simulate_oom! if defined?(RubyVM::YJIT)
+
+ # Old simulat_omm! leaves one byte of space and this fills it up
+ bar
+ bar
+
+ def foo
+ :new
+ end
+
+ test
+}
+
+assert_equal 'ok', %q{
+ # Try to compile new method while OOM
+ def foo
+ :ok
+ end
+
+ RubyVM::YJIT.simulate_oom! if defined?(RubyVM::YJIT)
+
+ foo
+ foo
+}
+
+# struct aref embedded
+assert_equal '2', %q{
+ def foo(s)
+ s.foo
+ end
+
+ S = Struct.new(:foo)
+ foo(S.new(1))
+ foo(S.new(2))
+}
+
+# struct aref non-embedded
+assert_equal '4', %q{
+ def foo(s)
+ s.d
+ end
+
+ S = Struct.new(:a, :b, :c, :d, :e)
+ foo(S.new(1,2,3,4,5))
+ foo(S.new(1,2,3,4,5))
+}
+
+# struct aset embedded
+assert_equal '123', %q{
+ def foo(s)
+ s.foo = 123
+ end
+
+ s = Struct.new(:foo).new
+ foo(s)
+ s = Struct.new(:foo).new
+ foo(s)
+ s.foo
+}
+
+# struct aset non-embedded
+assert_equal '[1, 2, 3, 4, 5]', %q{
+ def foo(s)
+ s.a = 1
+ s.b = 2
+ s.c = 3
+ s.d = 4
+ s.e = 5
+ end
+
+ S = Struct.new(:a, :b, :c, :d, :e)
+ s = S.new
+ foo(s)
+ s = S.new
+ foo(s)
+ [s.a, s.b, s.c, s.d, s.e]
+}
+
+# struct aref too many args
+assert_equal 'ok', %q{
+ def foo(s)
+ s.foo(:bad)
+ end
+
+ s = Struct.new(:foo).new
+ foo(s) rescue :ok
+ foo(s) rescue :ok
+}
+
+# struct aset too many args
+assert_equal 'ok', %q{
+ def foo(s)
+ s.set_foo(123, :bad)
+ end
+
+ s = Struct.new(:foo) do
+ alias :set_foo :foo=
+ end
+ foo(s) rescue :ok
+ foo(s) rescue :ok
+}
+
+# File.join is a cfunc accepting variable arguments as a Ruby array (argc = -2)
+assert_equal 'foo/bar', %q{
+ def foo
+ File.join("foo", "bar")
+ end
+
+ foo
+ foo
+}
+
+# File.join is a cfunc accepting variable arguments as a Ruby array (argc = -2)
+assert_equal '', %q{
+ def foo
+ File.join()
+ end
+
+ foo
+ foo
+}
+
+# Make sure we're correctly reading RStruct's as.ary union for embedded RStructs
+assert_equal '3,12', %q{
+ pt_struct = Struct.new(:x, :y)
+ p = pt_struct.new(3, 12)
+ def pt_inspect(pt)
+ "#{pt.x},#{pt.y}"
+ end
+
+ # Make sure pt_inspect is JITted
+ 10.times { pt_inspect(p) }
+
+ # Make sure it's returning '3,12' instead of e.g. '3,false'
+ pt_inspect(p)
+}
+
+# Regression test for deadlock between branch_stub_hit and ractor_receive_if
+assert_equal '10', %q{
+ r = Ractor.new Ractor.current do |main|
+ main << 1
+ main << 2
+ main << 3
+ main << 4
+ main << 5
+ main << 6
+ main << 7
+ main << 8
+ main << 9
+ main << 10
+ end
+
+ a = []
+ a << Ractor.receive_if{|msg| msg == 10}
+ a << Ractor.receive_if{|msg| msg == 9}
+ a << Ractor.receive_if{|msg| msg == 8}
+ a << Ractor.receive_if{|msg| msg == 7}
+ a << Ractor.receive_if{|msg| msg == 6}
+ a << Ractor.receive_if{|msg| msg == 5}
+ a << Ractor.receive_if{|msg| msg == 4}
+ a << Ractor.receive_if{|msg| msg == 3}
+ a << Ractor.receive_if{|msg| msg == 2}
+ a << Ractor.receive_if{|msg| msg == 1}
+
+ a.length
+}
+
+# checktype
+assert_equal 'false', %q{
+ def function()
+ [1, 2] in [Integer, String]
+ end
+ function()
+}
+
+# opt_send_without_block (VM_METHOD_TYPE_ATTRSET)
+assert_equal 'foo', %q{
+ class Foo
+ attr_writer :foo
+
+ def foo()
+ self.foo = "foo"
+ end
+ end
+ foo = Foo.new
+ foo.foo
+}
+
+# anytostring, intern
+assert_equal 'true', %q{
+ def foo()
+ :"#{true}"
+ end
+ foo()
+}
+
+# toregexp, objtostring
+assert_equal '/true/', %q{
+ def foo()
+ /#{true}/
+ end
+ foo().inspect
+}
+
+# concatstrings, objtostring
+assert_equal '9001', %q{
+ def foo()
+ "#{9001}"
+ end
+ foo()
+}
+
+# opt_send_without_block (VM_METHOD_TYPE_CFUNC)
+assert_equal 'nil', %q{
+ def foo
+ nil.inspect # argc: 0
+ end
+ foo
+}
+assert_equal '4', %q{
+ def foo
+ 2.pow(2) # argc: 1
+ end
+ foo
+}
+assert_equal 'aba', %q{
+ def foo
+ "abc".tr("c", "a") # argc: 2
+ end
+ foo
+}
+assert_equal 'true', %q{
+ def foo
+ respond_to?(:inspect) # argc: -1
+ end
+ foo
+}
+assert_equal '["a", "b"]', %q{
+ def foo
+ "a\nb".lines(chomp: true) # kwargs
+ end
+ foo
+}
+
+# invokebuiltin
+assert_equal '123', %q{
+ def foo(obj)
+ obj.foo = 123
+ end
+
+ struct = Struct.new(:foo)
+ obj = struct.new
+ foo(obj)
+}
+
+# invokebuiltin_delegate
+assert_equal '.', %q{
+ def foo(path)
+ Dir.open(path).path
+ end
+ foo(".")
+}
+
+# opt_invokebuiltin_delegate_leave
+assert_equal '[0]', %q{"\x00".unpack("c")}
+
+# opt_send_without_block (VM_METHOD_TYPE_ISEQ)
+assert_equal '1', %q{
+ def foo = 1
+ def bar = foo
+ bar
+}
+assert_equal '[1, 2, 3]', %q{
+ def foo(a, b) = [1, a, b]
+ def bar = foo(2, 3)
+ bar
+}
+assert_equal '[1, 2, 3, 4, 5, 6]', %q{
+ def foo(a, b, c:, d:, e: 0, f: 6) = [a, b, c, d, e, f]
+ def bar = foo(1, 2, c: 3, d: 4, e: 5)
+ bar
+}
+assert_equal '[1, 2, 3, 4]', %q{
+ def foo(a, b = 2) = [a, b]
+ def bar = foo(1) + foo(3, 4)
+ bar
+}
+
+assert_equal '1', %q{
+ def foo(a) = a
+ def bar = foo(1) { 2 }
+ bar
+}
+assert_equal '[1, 2]', %q{
+ def foo(a, &block) = [a, block.call]
+ def bar = foo(1) { 2 }
+ bar
+}
+
+# opt_send_without_block (VM_METHOD_TYPE_IVAR)
+assert_equal 'foo', %q{
+ class Foo
+ attr_reader :foo
+
+ def initialize
+ @foo = "foo"
+ end
+ end
+ Foo.new.foo
+}
+
+# opt_send_without_block (VM_METHOD_TYPE_OPTIMIZED)
+assert_equal 'foo', %q{
+ Foo = Struct.new(:bar)
+ Foo.new("bar").bar = "foo"
+}
+assert_equal 'foo', %q{
+ Foo = Struct.new(:bar)
+ Foo.new("foo").bar
+}
+
+# getblockparamproxy
+assert_equal 'foo', %q{
+ def foo(&block)
+ block.call
+ end
+ foo { "foo" }
+}
+
+# getblockparam
+assert_equal 'foo', %q{
+ def foo(&block)
+ block
+ end
+ foo { "foo" }.call
+}
+
+assert_equal '[1, 2]', %q{
+ def foo
+ x = [2]
+ [1, *x]
+ end
+
+ foo
+ foo
+}
+
+# respond_to? with changing symbol
+assert_equal 'false', %q{
+ def foo(name)
+ :sym.respond_to?(name)
+ end
+ foo(:to_s)
+ foo(:to_s)
+ foo(:not_exist)
+}
+
+# respond_to? with method being defined
+assert_equal 'true', %q{
+ def foo
+ :sym.respond_to?(:not_yet_defined)
+ end
+ foo
+ foo
+ module Kernel
+ def not_yet_defined = true
+ end
+ foo
+}
+
+# respond_to? with undef method
+assert_equal 'false', %q{
+ module Kernel
+ def to_be_removed = true
+ end
+ def foo
+ :sym.respond_to?(:to_be_removed)
+ end
+ foo
+ foo
+ class Object
+ undef_method :to_be_removed
+ end
+ foo
+}
+
+# respond_to? with respond_to_missing?
+assert_equal 'true', %q{
+ class Foo
+ end
+ def foo(x)
+ x.respond_to?(:bar)
+ end
+ foo(Foo.new)
+ foo(Foo.new)
+ class Foo
+ def respond_to_missing?(*) = true
+ end
+ foo(Foo.new)
+}
+
+# bmethod
+assert_equal '[1, 2, 3]', %q{
+ one = 1
+ define_method(:foo) do
+ one
+ end
+
+ 3.times.map { |i| foo + i }
+}
+
+# return inside bmethod
+assert_equal 'ok', %q{
+ define_method(:foo) do
+ 1.tap { return :ok }
+ end
+
+ foo
+}
+
+# bmethod optional and keywords
+assert_equal '[[1, nil, 2]]', %q{
+ define_method(:opt_and_kwargs) do |a = {}, b: nil, c: nil|
+ [a, b, c]
+ end
+
+ 5.times.map { opt_and_kwargs(1, c: 2) }.uniq
+}
+
+# bmethod with forwarded block
+assert_equal '2', %q{
+ define_method(:foo) do |&block|
+ block.call
+ end
+
+ def bar(&block)
+ foo(&block)
+ end
+
+ bar { 1 }
+ bar { 2 }
+}
+
+# bmethod with forwarded block and arguments
+assert_equal '5', %q{
+ define_method(:foo) do |n, &block|
+ n + block.call
+ end
+
+ def bar(n, &block)
+ foo(n, &block)
+ end
+
+ bar(0) { 1 }
+ bar(3) { 2 }
+}
+
+# bmethod with forwarded unwanted block
+assert_equal '1', %q{
+ one = 1
+ define_method(:foo) do
+ one
+ end
+
+ def bar(&block)
+ foo(&block)
+ end
+
+ bar { }
+ bar { }
+}
+
+# test for return stub lifetime issue
+assert_equal '1', %q{
+ def foo(n)
+ if n == 2
+ return 1.times { Object.define_method(:foo) {} }
+ end
+
+ foo(n + 1)
+ end
+
+ foo(1)
+}
+
+# case-when with redefined ===
+assert_equal 'ok', %q{
+ class Symbol
+ def ===(a)
+ true
+ end
+ end
+
+ def cw(arg)
+ case arg
+ when :b
+ :ok
+ when 4
+ :ng
+ end
+ end
+
+ cw(4)
+}
+
+assert_normal_exit %{
+ class Bug20997
+ def foo(&) = self.class.name(&)
+
+ new.foo
+ end
+}
diff --git a/bootstraptest/test_yjit_30k_ifelse.rb b/bootstraptest/test_yjit_30k_ifelse.rb
new file mode 100644
index 0000000000..c3afa95e4d
--- /dev/null
+++ b/bootstraptest/test_yjit_30k_ifelse.rb
@@ -0,0 +1,241023 @@
+# This is a torture test for the JIT.
+# There are 30K tiny methods with if-else statements in a 30-deep call hierarchy.
+assert_equal '100000', %q{
+
+def fun_l0_n0(x)
+ if (x < 1)
+ fun_l1_n310(x)
+ else
+ fun_l1_n485(x)
+ end
+end
+
+def fun_l0_n1(x)
+ if (x < 1)
+ fun_l1_n930(x)
+ else
+ fun_l1_n418(x)
+ end
+end
+
+def fun_l0_n2(x)
+ if (x < 1)
+ fun_l1_n549(x)
+ else
+ fun_l1_n44(x)
+ end
+end
+
+def fun_l0_n3(x)
+ if (x < 1)
+ fun_l1_n394(x)
+ else
+ fun_l1_n447(x)
+ end
+end
+
+def fun_l0_n4(x)
+ if (x < 1)
+ fun_l1_n683(x)
+ else
+ fun_l1_n547(x)
+ end
+end
+
+def fun_l0_n5(x)
+ if (x < 1)
+ fun_l1_n320(x)
+ else
+ fun_l1_n896(x)
+ end
+end
+
+def fun_l0_n6(x)
+ if (x < 1)
+ fun_l1_n649(x)
+ else
+ fun_l1_n243(x)
+ end
+end
+
+def fun_l0_n7(x)
+ if (x < 1)
+ fun_l1_n100(x)
+ else
+ fun_l1_n243(x)
+ end
+end
+
+def fun_l0_n8(x)
+ if (x < 1)
+ fun_l1_n839(x)
+ else
+ fun_l1_n720(x)
+ end
+end
+
+def fun_l0_n9(x)
+ if (x < 1)
+ fun_l1_n177(x)
+ else
+ fun_l1_n555(x)
+ end
+end
+
+def fun_l0_n10(x)
+ if (x < 1)
+ fun_l1_n814(x)
+ else
+ fun_l1_n900(x)
+ end
+end
+
+def fun_l0_n11(x)
+ if (x < 1)
+ fun_l1_n585(x)
+ else
+ fun_l1_n901(x)
+ end
+end
+
+def fun_l0_n12(x)
+ if (x < 1)
+ fun_l1_n952(x)
+ else
+ fun_l1_n270(x)
+ end
+end
+
+def fun_l0_n13(x)
+ if (x < 1)
+ fun_l1_n172(x)
+ else
+ fun_l1_n209(x)
+ end
+end
+
+def fun_l0_n14(x)
+ if (x < 1)
+ fun_l1_n514(x)
+ else
+ fun_l1_n414(x)
+ end
+end
+
+def fun_l0_n15(x)
+ if (x < 1)
+ fun_l1_n190(x)
+ else
+ fun_l1_n100(x)
+ end
+end
+
+def fun_l0_n16(x)
+ if (x < 1)
+ fun_l1_n696(x)
+ else
+ fun_l1_n997(x)
+ end
+end
+
+def fun_l0_n17(x)
+ if (x < 1)
+ fun_l1_n568(x)
+ else
+ fun_l1_n820(x)
+ end
+end
+
+def fun_l0_n18(x)
+ if (x < 1)
+ fun_l1_n837(x)
+ else
+ fun_l1_n588(x)
+ end
+end
+
+def fun_l0_n19(x)
+ if (x < 1)
+ fun_l1_n206(x)
+ else
+ fun_l1_n126(x)
+ end
+end
+
+def fun_l0_n20(x)
+ if (x < 1)
+ fun_l1_n317(x)
+ else
+ fun_l1_n722(x)
+ end
+end
+
+def fun_l0_n21(x)
+ if (x < 1)
+ fun_l1_n614(x)
+ else
+ fun_l1_n372(x)
+ end
+end
+
+def fun_l0_n22(x)
+ if (x < 1)
+ fun_l1_n530(x)
+ else
+ fun_l1_n862(x)
+ end
+end
+
+def fun_l0_n23(x)
+ if (x < 1)
+ fun_l1_n889(x)
+ else
+ fun_l1_n271(x)
+ end
+end
+
+def fun_l0_n24(x)
+ if (x < 1)
+ fun_l1_n996(x)
+ else
+ fun_l1_n717(x)
+ end
+end
+
+def fun_l0_n25(x)
+ if (x < 1)
+ fun_l1_n726(x)
+ else
+ fun_l1_n939(x)
+ end
+end
+
+def fun_l0_n26(x)
+ if (x < 1)
+ fun_l1_n316(x)
+ else
+ fun_l1_n293(x)
+ end
+end
+
+def fun_l0_n27(x)
+ if (x < 1)
+ fun_l1_n90(x)
+ else
+ fun_l1_n596(x)
+ end
+end
+
+def fun_l0_n28(x)
+ if (x < 1)
+ fun_l1_n743(x)
+ else
+ fun_l1_n782(x)
+ end
+end
+
+def fun_l0_n29(x)
+ if (x < 1)
+ fun_l1_n896(x)
+ else
+ fun_l1_n247(x)
+ end
+end
+
+def fun_l0_n30(x)
+ if (x < 1)
+ fun_l1_n2(x)
+ else
+ fun_l1_n377(x)
+ end
+end
+
+def fun_l0_n31(x)
+ if (x < 1)
+ fun_l1_n380(x)
+ else
+ fun_l1_n655(x)
+ end
+end
+
+def fun_l0_n32(x)
+ if (x < 1)
+ fun_l1_n572(x)
+ else
+ fun_l1_n778(x)
+ end
+end
+
+def fun_l0_n33(x)
+ if (x < 1)
+ fun_l1_n485(x)
+ else
+ fun_l1_n282(x)
+ end
+end
+
+def fun_l0_n34(x)
+ if (x < 1)
+ fun_l1_n703(x)
+ else
+ fun_l1_n179(x)
+ end
+end
+
+def fun_l0_n35(x)
+ if (x < 1)
+ fun_l1_n281(x)
+ else
+ fun_l1_n572(x)
+ end
+end
+
+def fun_l0_n36(x)
+ if (x < 1)
+ fun_l1_n48(x)
+ else
+ fun_l1_n286(x)
+ end
+end
+
+def fun_l0_n37(x)
+ if (x < 1)
+ fun_l1_n539(x)
+ else
+ fun_l1_n86(x)
+ end
+end
+
+def fun_l0_n38(x)
+ if (x < 1)
+ fun_l1_n918(x)
+ else
+ fun_l1_n778(x)
+ end
+end
+
+def fun_l0_n39(x)
+ if (x < 1)
+ fun_l1_n832(x)
+ else
+ fun_l1_n94(x)
+ end
+end
+
+def fun_l0_n40(x)
+ if (x < 1)
+ fun_l1_n213(x)
+ else
+ fun_l1_n580(x)
+ end
+end
+
+def fun_l0_n41(x)
+ if (x < 1)
+ fun_l1_n413(x)
+ else
+ fun_l1_n793(x)
+ end
+end
+
+def fun_l0_n42(x)
+ if (x < 1)
+ fun_l1_n451(x)
+ else
+ fun_l1_n779(x)
+ end
+end
+
+def fun_l0_n43(x)
+ if (x < 1)
+ fun_l1_n118(x)
+ else
+ fun_l1_n778(x)
+ end
+end
+
+def fun_l0_n44(x)
+ if (x < 1)
+ fun_l1_n162(x)
+ else
+ fun_l1_n901(x)
+ end
+end
+
+def fun_l0_n45(x)
+ if (x < 1)
+ fun_l1_n157(x)
+ else
+ fun_l1_n280(x)
+ end
+end
+
+def fun_l0_n46(x)
+ if (x < 1)
+ fun_l1_n748(x)
+ else
+ fun_l1_n881(x)
+ end
+end
+
+def fun_l0_n47(x)
+ if (x < 1)
+ fun_l1_n529(x)
+ else
+ fun_l1_n732(x)
+ end
+end
+
+def fun_l0_n48(x)
+ if (x < 1)
+ fun_l1_n417(x)
+ else
+ fun_l1_n483(x)
+ end
+end
+
+def fun_l0_n49(x)
+ if (x < 1)
+ fun_l1_n743(x)
+ else
+ fun_l1_n525(x)
+ end
+end
+
+def fun_l0_n50(x)
+ if (x < 1)
+ fun_l1_n14(x)
+ else
+ fun_l1_n309(x)
+ end
+end
+
+def fun_l0_n51(x)
+ if (x < 1)
+ fun_l1_n436(x)
+ else
+ fun_l1_n304(x)
+ end
+end
+
+def fun_l0_n52(x)
+ if (x < 1)
+ fun_l1_n623(x)
+ else
+ fun_l1_n590(x)
+ end
+end
+
+def fun_l0_n53(x)
+ if (x < 1)
+ fun_l1_n696(x)
+ else
+ fun_l1_n53(x)
+ end
+end
+
+def fun_l0_n54(x)
+ if (x < 1)
+ fun_l1_n807(x)
+ else
+ fun_l1_n523(x)
+ end
+end
+
+def fun_l0_n55(x)
+ if (x < 1)
+ fun_l1_n607(x)
+ else
+ fun_l1_n609(x)
+ end
+end
+
+def fun_l0_n56(x)
+ if (x < 1)
+ fun_l1_n721(x)
+ else
+ fun_l1_n994(x)
+ end
+end
+
+def fun_l0_n57(x)
+ if (x < 1)
+ fun_l1_n472(x)
+ else
+ fun_l1_n818(x)
+ end
+end
+
+def fun_l0_n58(x)
+ if (x < 1)
+ fun_l1_n30(x)
+ else
+ fun_l1_n954(x)
+ end
+end
+
+def fun_l0_n59(x)
+ if (x < 1)
+ fun_l1_n223(x)
+ else
+ fun_l1_n148(x)
+ end
+end
+
+def fun_l0_n60(x)
+ if (x < 1)
+ fun_l1_n761(x)
+ else
+ fun_l1_n40(x)
+ end
+end
+
+def fun_l0_n61(x)
+ if (x < 1)
+ fun_l1_n57(x)
+ else
+ fun_l1_n858(x)
+ end
+end
+
+def fun_l0_n62(x)
+ if (x < 1)
+ fun_l1_n114(x)
+ else
+ fun_l1_n767(x)
+ end
+end
+
+def fun_l0_n63(x)
+ if (x < 1)
+ fun_l1_n287(x)
+ else
+ fun_l1_n752(x)
+ end
+end
+
+def fun_l0_n64(x)
+ if (x < 1)
+ fun_l1_n16(x)
+ else
+ fun_l1_n229(x)
+ end
+end
+
+def fun_l0_n65(x)
+ if (x < 1)
+ fun_l1_n230(x)
+ else
+ fun_l1_n954(x)
+ end
+end
+
+def fun_l0_n66(x)
+ if (x < 1)
+ fun_l1_n98(x)
+ else
+ fun_l1_n320(x)
+ end
+end
+
+def fun_l0_n67(x)
+ if (x < 1)
+ fun_l1_n878(x)
+ else
+ fun_l1_n985(x)
+ end
+end
+
+def fun_l0_n68(x)
+ if (x < 1)
+ fun_l1_n32(x)
+ else
+ fun_l1_n648(x)
+ end
+end
+
+def fun_l0_n69(x)
+ if (x < 1)
+ fun_l1_n453(x)
+ else
+ fun_l1_n466(x)
+ end
+end
+
+def fun_l0_n70(x)
+ if (x < 1)
+ fun_l1_n787(x)
+ else
+ fun_l1_n802(x)
+ end
+end
+
+def fun_l0_n71(x)
+ if (x < 1)
+ fun_l1_n656(x)
+ else
+ fun_l1_n347(x)
+ end
+end
+
+def fun_l0_n72(x)
+ if (x < 1)
+ fun_l1_n358(x)
+ else
+ fun_l1_n336(x)
+ end
+end
+
+def fun_l0_n73(x)
+ if (x < 1)
+ fun_l1_n291(x)
+ else
+ fun_l1_n536(x)
+ end
+end
+
+def fun_l0_n74(x)
+ if (x < 1)
+ fun_l1_n795(x)
+ else
+ fun_l1_n606(x)
+ end
+end
+
+def fun_l0_n75(x)
+ if (x < 1)
+ fun_l1_n21(x)
+ else
+ fun_l1_n720(x)
+ end
+end
+
+def fun_l0_n76(x)
+ if (x < 1)
+ fun_l1_n513(x)
+ else
+ fun_l1_n300(x)
+ end
+end
+
+def fun_l0_n77(x)
+ if (x < 1)
+ fun_l1_n358(x)
+ else
+ fun_l1_n332(x)
+ end
+end
+
+def fun_l0_n78(x)
+ if (x < 1)
+ fun_l1_n712(x)
+ else
+ fun_l1_n906(x)
+ end
+end
+
+def fun_l0_n79(x)
+ if (x < 1)
+ fun_l1_n555(x)
+ else
+ fun_l1_n850(x)
+ end
+end
+
+def fun_l0_n80(x)
+ if (x < 1)
+ fun_l1_n320(x)
+ else
+ fun_l1_n892(x)
+ end
+end
+
+def fun_l0_n81(x)
+ if (x < 1)
+ fun_l1_n191(x)
+ else
+ fun_l1_n187(x)
+ end
+end
+
+def fun_l0_n82(x)
+ if (x < 1)
+ fun_l1_n457(x)
+ else
+ fun_l1_n718(x)
+ end
+end
+
+def fun_l0_n83(x)
+ if (x < 1)
+ fun_l1_n314(x)
+ else
+ fun_l1_n697(x)
+ end
+end
+
+def fun_l0_n84(x)
+ if (x < 1)
+ fun_l1_n459(x)
+ else
+ fun_l1_n500(x)
+ end
+end
+
+def fun_l0_n85(x)
+ if (x < 1)
+ fun_l1_n912(x)
+ else
+ fun_l1_n992(x)
+ end
+end
+
+def fun_l0_n86(x)
+ if (x < 1)
+ fun_l1_n523(x)
+ else
+ fun_l1_n201(x)
+ end
+end
+
+def fun_l0_n87(x)
+ if (x < 1)
+ fun_l1_n30(x)
+ else
+ fun_l1_n829(x)
+ end
+end
+
+def fun_l0_n88(x)
+ if (x < 1)
+ fun_l1_n223(x)
+ else
+ fun_l1_n799(x)
+ end
+end
+
+def fun_l0_n89(x)
+ if (x < 1)
+ fun_l1_n289(x)
+ else
+ fun_l1_n289(x)
+ end
+end
+
+def fun_l0_n90(x)
+ if (x < 1)
+ fun_l1_n961(x)
+ else
+ fun_l1_n694(x)
+ end
+end
+
+def fun_l0_n91(x)
+ if (x < 1)
+ fun_l1_n423(x)
+ else
+ fun_l1_n848(x)
+ end
+end
+
+def fun_l0_n92(x)
+ if (x < 1)
+ fun_l1_n612(x)
+ else
+ fun_l1_n358(x)
+ end
+end
+
+def fun_l0_n93(x)
+ if (x < 1)
+ fun_l1_n148(x)
+ else
+ fun_l1_n312(x)
+ end
+end
+
+def fun_l0_n94(x)
+ if (x < 1)
+ fun_l1_n771(x)
+ else
+ fun_l1_n205(x)
+ end
+end
+
+def fun_l0_n95(x)
+ if (x < 1)
+ fun_l1_n573(x)
+ else
+ fun_l1_n692(x)
+ end
+end
+
+def fun_l0_n96(x)
+ if (x < 1)
+ fun_l1_n66(x)
+ else
+ fun_l1_n936(x)
+ end
+end
+
+def fun_l0_n97(x)
+ if (x < 1)
+ fun_l1_n429(x)
+ else
+ fun_l1_n949(x)
+ end
+end
+
+def fun_l0_n98(x)
+ if (x < 1)
+ fun_l1_n737(x)
+ else
+ fun_l1_n338(x)
+ end
+end
+
+def fun_l0_n99(x)
+ if (x < 1)
+ fun_l1_n335(x)
+ else
+ fun_l1_n739(x)
+ end
+end
+
+def fun_l0_n100(x)
+ if (x < 1)
+ fun_l1_n989(x)
+ else
+ fun_l1_n735(x)
+ end
+end
+
+def fun_l0_n101(x)
+ if (x < 1)
+ fun_l1_n559(x)
+ else
+ fun_l1_n478(x)
+ end
+end
+
+def fun_l0_n102(x)
+ if (x < 1)
+ fun_l1_n261(x)
+ else
+ fun_l1_n162(x)
+ end
+end
+
+def fun_l0_n103(x)
+ if (x < 1)
+ fun_l1_n400(x)
+ else
+ fun_l1_n156(x)
+ end
+end
+
+def fun_l0_n104(x)
+ if (x < 1)
+ fun_l1_n747(x)
+ else
+ fun_l1_n361(x)
+ end
+end
+
+def fun_l0_n105(x)
+ if (x < 1)
+ fun_l1_n717(x)
+ else
+ fun_l1_n522(x)
+ end
+end
+
+def fun_l0_n106(x)
+ if (x < 1)
+ fun_l1_n513(x)
+ else
+ fun_l1_n150(x)
+ end
+end
+
+def fun_l0_n107(x)
+ if (x < 1)
+ fun_l1_n710(x)
+ else
+ fun_l1_n602(x)
+ end
+end
+
+def fun_l0_n108(x)
+ if (x < 1)
+ fun_l1_n866(x)
+ else
+ fun_l1_n111(x)
+ end
+end
+
+def fun_l0_n109(x)
+ if (x < 1)
+ fun_l1_n725(x)
+ else
+ fun_l1_n448(x)
+ end
+end
+
+def fun_l0_n110(x)
+ if (x < 1)
+ fun_l1_n703(x)
+ else
+ fun_l1_n127(x)
+ end
+end
+
+def fun_l0_n111(x)
+ if (x < 1)
+ fun_l1_n420(x)
+ else
+ fun_l1_n666(x)
+ end
+end
+
+def fun_l0_n112(x)
+ if (x < 1)
+ fun_l1_n647(x)
+ else
+ fun_l1_n567(x)
+ end
+end
+
+def fun_l0_n113(x)
+ if (x < 1)
+ fun_l1_n543(x)
+ else
+ fun_l1_n992(x)
+ end
+end
+
+def fun_l0_n114(x)
+ if (x < 1)
+ fun_l1_n12(x)
+ else
+ fun_l1_n470(x)
+ end
+end
+
+def fun_l0_n115(x)
+ if (x < 1)
+ fun_l1_n25(x)
+ else
+ fun_l1_n917(x)
+ end
+end
+
+def fun_l0_n116(x)
+ if (x < 1)
+ fun_l1_n201(x)
+ else
+ fun_l1_n110(x)
+ end
+end
+
+def fun_l0_n117(x)
+ if (x < 1)
+ fun_l1_n222(x)
+ else
+ fun_l1_n747(x)
+ end
+end
+
+def fun_l0_n118(x)
+ if (x < 1)
+ fun_l1_n990(x)
+ else
+ fun_l1_n794(x)
+ end
+end
+
+def fun_l0_n119(x)
+ if (x < 1)
+ fun_l1_n712(x)
+ else
+ fun_l1_n5(x)
+ end
+end
+
+def fun_l0_n120(x)
+ if (x < 1)
+ fun_l1_n28(x)
+ else
+ fun_l1_n89(x)
+ end
+end
+
+def fun_l0_n121(x)
+ if (x < 1)
+ fun_l1_n341(x)
+ else
+ fun_l1_n983(x)
+ end
+end
+
+def fun_l0_n122(x)
+ if (x < 1)
+ fun_l1_n123(x)
+ else
+ fun_l1_n452(x)
+ end
+end
+
+def fun_l0_n123(x)
+ if (x < 1)
+ fun_l1_n514(x)
+ else
+ fun_l1_n949(x)
+ end
+end
+
+def fun_l0_n124(x)
+ if (x < 1)
+ fun_l1_n280(x)
+ else
+ fun_l1_n281(x)
+ end
+end
+
+def fun_l0_n125(x)
+ if (x < 1)
+ fun_l1_n512(x)
+ else
+ fun_l1_n772(x)
+ end
+end
+
+def fun_l0_n126(x)
+ if (x < 1)
+ fun_l1_n911(x)
+ else
+ fun_l1_n693(x)
+ end
+end
+
+def fun_l0_n127(x)
+ if (x < 1)
+ fun_l1_n633(x)
+ else
+ fun_l1_n574(x)
+ end
+end
+
+def fun_l0_n128(x)
+ if (x < 1)
+ fun_l1_n318(x)
+ else
+ fun_l1_n266(x)
+ end
+end
+
+def fun_l0_n129(x)
+ if (x < 1)
+ fun_l1_n677(x)
+ else
+ fun_l1_n10(x)
+ end
+end
+
+def fun_l0_n130(x)
+ if (x < 1)
+ fun_l1_n994(x)
+ else
+ fun_l1_n48(x)
+ end
+end
+
+def fun_l0_n131(x)
+ if (x < 1)
+ fun_l1_n335(x)
+ else
+ fun_l1_n473(x)
+ end
+end
+
+def fun_l0_n132(x)
+ if (x < 1)
+ fun_l1_n641(x)
+ else
+ fun_l1_n48(x)
+ end
+end
+
+def fun_l0_n133(x)
+ if (x < 1)
+ fun_l1_n914(x)
+ else
+ fun_l1_n818(x)
+ end
+end
+
+def fun_l0_n134(x)
+ if (x < 1)
+ fun_l1_n479(x)
+ else
+ fun_l1_n761(x)
+ end
+end
+
+def fun_l0_n135(x)
+ if (x < 1)
+ fun_l1_n186(x)
+ else
+ fun_l1_n692(x)
+ end
+end
+
+def fun_l0_n136(x)
+ if (x < 1)
+ fun_l1_n325(x)
+ else
+ fun_l1_n279(x)
+ end
+end
+
+def fun_l0_n137(x)
+ if (x < 1)
+ fun_l1_n493(x)
+ else
+ fun_l1_n293(x)
+ end
+end
+
+def fun_l0_n138(x)
+ if (x < 1)
+ fun_l1_n788(x)
+ else
+ fun_l1_n364(x)
+ end
+end
+
+def fun_l0_n139(x)
+ if (x < 1)
+ fun_l1_n565(x)
+ else
+ fun_l1_n63(x)
+ end
+end
+
+def fun_l0_n140(x)
+ if (x < 1)
+ fun_l1_n128(x)
+ else
+ fun_l1_n299(x)
+ end
+end
+
+def fun_l0_n141(x)
+ if (x < 1)
+ fun_l1_n782(x)
+ else
+ fun_l1_n195(x)
+ end
+end
+
+def fun_l0_n142(x)
+ if (x < 1)
+ fun_l1_n696(x)
+ else
+ fun_l1_n117(x)
+ end
+end
+
+def fun_l0_n143(x)
+ if (x < 1)
+ fun_l1_n263(x)
+ else
+ fun_l1_n683(x)
+ end
+end
+
+def fun_l0_n144(x)
+ if (x < 1)
+ fun_l1_n633(x)
+ else
+ fun_l1_n92(x)
+ end
+end
+
+def fun_l0_n145(x)
+ if (x < 1)
+ fun_l1_n626(x)
+ else
+ fun_l1_n201(x)
+ end
+end
+
+def fun_l0_n146(x)
+ if (x < 1)
+ fun_l1_n296(x)
+ else
+ fun_l1_n425(x)
+ end
+end
+
+def fun_l0_n147(x)
+ if (x < 1)
+ fun_l1_n395(x)
+ else
+ fun_l1_n750(x)
+ end
+end
+
+def fun_l0_n148(x)
+ if (x < 1)
+ fun_l1_n164(x)
+ else
+ fun_l1_n580(x)
+ end
+end
+
+def fun_l0_n149(x)
+ if (x < 1)
+ fun_l1_n904(x)
+ else
+ fun_l1_n264(x)
+ end
+end
+
+def fun_l0_n150(x)
+ if (x < 1)
+ fun_l1_n383(x)
+ else
+ fun_l1_n558(x)
+ end
+end
+
+def fun_l0_n151(x)
+ if (x < 1)
+ fun_l1_n522(x)
+ else
+ fun_l1_n735(x)
+ end
+end
+
+def fun_l0_n152(x)
+ if (x < 1)
+ fun_l1_n496(x)
+ else
+ fun_l1_n562(x)
+ end
+end
+
+def fun_l0_n153(x)
+ if (x < 1)
+ fun_l1_n374(x)
+ else
+ fun_l1_n561(x)
+ end
+end
+
+def fun_l0_n154(x)
+ if (x < 1)
+ fun_l1_n314(x)
+ else
+ fun_l1_n368(x)
+ end
+end
+
+def fun_l0_n155(x)
+ if (x < 1)
+ fun_l1_n456(x)
+ else
+ fun_l1_n189(x)
+ end
+end
+
+def fun_l0_n156(x)
+ if (x < 1)
+ fun_l1_n46(x)
+ else
+ fun_l1_n620(x)
+ end
+end
+
+def fun_l0_n157(x)
+ if (x < 1)
+ fun_l1_n259(x)
+ else
+ fun_l1_n742(x)
+ end
+end
+
+def fun_l0_n158(x)
+ if (x < 1)
+ fun_l1_n881(x)
+ else
+ fun_l1_n786(x)
+ end
+end
+
+def fun_l0_n159(x)
+ if (x < 1)
+ fun_l1_n607(x)
+ else
+ fun_l1_n177(x)
+ end
+end
+
+def fun_l0_n160(x)
+ if (x < 1)
+ fun_l1_n824(x)
+ else
+ fun_l1_n415(x)
+ end
+end
+
+def fun_l0_n161(x)
+ if (x < 1)
+ fun_l1_n443(x)
+ else
+ fun_l1_n787(x)
+ end
+end
+
+def fun_l0_n162(x)
+ if (x < 1)
+ fun_l1_n124(x)
+ else
+ fun_l1_n529(x)
+ end
+end
+
+def fun_l0_n163(x)
+ if (x < 1)
+ fun_l1_n696(x)
+ else
+ fun_l1_n96(x)
+ end
+end
+
+def fun_l0_n164(x)
+ if (x < 1)
+ fun_l1_n775(x)
+ else
+ fun_l1_n549(x)
+ end
+end
+
+def fun_l0_n165(x)
+ if (x < 1)
+ fun_l1_n860(x)
+ else
+ fun_l1_n212(x)
+ end
+end
+
+def fun_l0_n166(x)
+ if (x < 1)
+ fun_l1_n378(x)
+ else
+ fun_l1_n904(x)
+ end
+end
+
+def fun_l0_n167(x)
+ if (x < 1)
+ fun_l1_n15(x)
+ else
+ fun_l1_n640(x)
+ end
+end
+
+def fun_l0_n168(x)
+ if (x < 1)
+ fun_l1_n771(x)
+ else
+ fun_l1_n861(x)
+ end
+end
+
+def fun_l0_n169(x)
+ if (x < 1)
+ fun_l1_n468(x)
+ else
+ fun_l1_n586(x)
+ end
+end
+
+def fun_l0_n170(x)
+ if (x < 1)
+ fun_l1_n477(x)
+ else
+ fun_l1_n674(x)
+ end
+end
+
+def fun_l0_n171(x)
+ if (x < 1)
+ fun_l1_n509(x)
+ else
+ fun_l1_n64(x)
+ end
+end
+
+def fun_l0_n172(x)
+ if (x < 1)
+ fun_l1_n612(x)
+ else
+ fun_l1_n828(x)
+ end
+end
+
+def fun_l0_n173(x)
+ if (x < 1)
+ fun_l1_n440(x)
+ else
+ fun_l1_n204(x)
+ end
+end
+
+def fun_l0_n174(x)
+ if (x < 1)
+ fun_l1_n259(x)
+ else
+ fun_l1_n947(x)
+ end
+end
+
+def fun_l0_n175(x)
+ if (x < 1)
+ fun_l1_n402(x)
+ else
+ fun_l1_n588(x)
+ end
+end
+
+def fun_l0_n176(x)
+ if (x < 1)
+ fun_l1_n245(x)
+ else
+ fun_l1_n540(x)
+ end
+end
+
+def fun_l0_n177(x)
+ if (x < 1)
+ fun_l1_n869(x)
+ else
+ fun_l1_n249(x)
+ end
+end
+
+def fun_l0_n178(x)
+ if (x < 1)
+ fun_l1_n830(x)
+ else
+ fun_l1_n210(x)
+ end
+end
+
+def fun_l0_n179(x)
+ if (x < 1)
+ fun_l1_n692(x)
+ else
+ fun_l1_n222(x)
+ end
+end
+
+def fun_l0_n180(x)
+ if (x < 1)
+ fun_l1_n220(x)
+ else
+ fun_l1_n281(x)
+ end
+end
+
+def fun_l0_n181(x)
+ if (x < 1)
+ fun_l1_n523(x)
+ else
+ fun_l1_n618(x)
+ end
+end
+
+def fun_l0_n182(x)
+ if (x < 1)
+ fun_l1_n44(x)
+ else
+ fun_l1_n422(x)
+ end
+end
+
+def fun_l0_n183(x)
+ if (x < 1)
+ fun_l1_n944(x)
+ else
+ fun_l1_n230(x)
+ end
+end
+
+def fun_l0_n184(x)
+ if (x < 1)
+ fun_l1_n678(x)
+ else
+ fun_l1_n121(x)
+ end
+end
+
+def fun_l0_n185(x)
+ if (x < 1)
+ fun_l1_n73(x)
+ else
+ fun_l1_n398(x)
+ end
+end
+
+def fun_l0_n186(x)
+ if (x < 1)
+ fun_l1_n121(x)
+ else
+ fun_l1_n689(x)
+ end
+end
+
+def fun_l0_n187(x)
+ if (x < 1)
+ fun_l1_n946(x)
+ else
+ fun_l1_n613(x)
+ end
+end
+
+def fun_l0_n188(x)
+ if (x < 1)
+ fun_l1_n987(x)
+ else
+ fun_l1_n785(x)
+ end
+end
+
+def fun_l0_n189(x)
+ if (x < 1)
+ fun_l1_n181(x)
+ else
+ fun_l1_n644(x)
+ end
+end
+
+def fun_l0_n190(x)
+ if (x < 1)
+ fun_l1_n623(x)
+ else
+ fun_l1_n679(x)
+ end
+end
+
+def fun_l0_n191(x)
+ if (x < 1)
+ fun_l1_n784(x)
+ else
+ fun_l1_n274(x)
+ end
+end
+
+def fun_l0_n192(x)
+ if (x < 1)
+ fun_l1_n71(x)
+ else
+ fun_l1_n773(x)
+ end
+end
+
+def fun_l0_n193(x)
+ if (x < 1)
+ fun_l1_n516(x)
+ else
+ fun_l1_n496(x)
+ end
+end
+
+def fun_l0_n194(x)
+ if (x < 1)
+ fun_l1_n608(x)
+ else
+ fun_l1_n460(x)
+ end
+end
+
+def fun_l0_n195(x)
+ if (x < 1)
+ fun_l1_n25(x)
+ else
+ fun_l1_n437(x)
+ end
+end
+
+def fun_l0_n196(x)
+ if (x < 1)
+ fun_l1_n410(x)
+ else
+ fun_l1_n674(x)
+ end
+end
+
+def fun_l0_n197(x)
+ if (x < 1)
+ fun_l1_n949(x)
+ else
+ fun_l1_n708(x)
+ end
+end
+
+def fun_l0_n198(x)
+ if (x < 1)
+ fun_l1_n714(x)
+ else
+ fun_l1_n119(x)
+ end
+end
+
+def fun_l0_n199(x)
+ if (x < 1)
+ fun_l1_n41(x)
+ else
+ fun_l1_n865(x)
+ end
+end
+
+def fun_l0_n200(x)
+ if (x < 1)
+ fun_l1_n383(x)
+ else
+ fun_l1_n90(x)
+ end
+end
+
+def fun_l0_n201(x)
+ if (x < 1)
+ fun_l1_n581(x)
+ else
+ fun_l1_n273(x)
+ end
+end
+
+def fun_l0_n202(x)
+ if (x < 1)
+ fun_l1_n350(x)
+ else
+ fun_l1_n425(x)
+ end
+end
+
+def fun_l0_n203(x)
+ if (x < 1)
+ fun_l1_n259(x)
+ else
+ fun_l1_n922(x)
+ end
+end
+
+def fun_l0_n204(x)
+ if (x < 1)
+ fun_l1_n448(x)
+ else
+ fun_l1_n741(x)
+ end
+end
+
+def fun_l0_n205(x)
+ if (x < 1)
+ fun_l1_n86(x)
+ else
+ fun_l1_n618(x)
+ end
+end
+
+def fun_l0_n206(x)
+ if (x < 1)
+ fun_l1_n412(x)
+ else
+ fun_l1_n646(x)
+ end
+end
+
+def fun_l0_n207(x)
+ if (x < 1)
+ fun_l1_n196(x)
+ else
+ fun_l1_n496(x)
+ end
+end
+
+def fun_l0_n208(x)
+ if (x < 1)
+ fun_l1_n777(x)
+ else
+ fun_l1_n150(x)
+ end
+end
+
+def fun_l0_n209(x)
+ if (x < 1)
+ fun_l1_n20(x)
+ else
+ fun_l1_n718(x)
+ end
+end
+
+def fun_l0_n210(x)
+ if (x < 1)
+ fun_l1_n934(x)
+ else
+ fun_l1_n416(x)
+ end
+end
+
+def fun_l0_n211(x)
+ if (x < 1)
+ fun_l1_n803(x)
+ else
+ fun_l1_n636(x)
+ end
+end
+
+def fun_l0_n212(x)
+ if (x < 1)
+ fun_l1_n93(x)
+ else
+ fun_l1_n791(x)
+ end
+end
+
+def fun_l0_n213(x)
+ if (x < 1)
+ fun_l1_n76(x)
+ else
+ fun_l1_n172(x)
+ end
+end
+
+def fun_l0_n214(x)
+ if (x < 1)
+ fun_l1_n103(x)
+ else
+ fun_l1_n381(x)
+ end
+end
+
+def fun_l0_n215(x)
+ if (x < 1)
+ fun_l1_n433(x)
+ else
+ fun_l1_n349(x)
+ end
+end
+
+def fun_l0_n216(x)
+ if (x < 1)
+ fun_l1_n154(x)
+ else
+ fun_l1_n740(x)
+ end
+end
+
+def fun_l0_n217(x)
+ if (x < 1)
+ fun_l1_n927(x)
+ else
+ fun_l1_n886(x)
+ end
+end
+
+def fun_l0_n218(x)
+ if (x < 1)
+ fun_l1_n804(x)
+ else
+ fun_l1_n6(x)
+ end
+end
+
+def fun_l0_n219(x)
+ if (x < 1)
+ fun_l1_n921(x)
+ else
+ fun_l1_n121(x)
+ end
+end
+
+def fun_l0_n220(x)
+ if (x < 1)
+ fun_l1_n732(x)
+ else
+ fun_l1_n224(x)
+ end
+end
+
+def fun_l0_n221(x)
+ if (x < 1)
+ fun_l1_n818(x)
+ else
+ fun_l1_n105(x)
+ end
+end
+
+def fun_l0_n222(x)
+ if (x < 1)
+ fun_l1_n162(x)
+ else
+ fun_l1_n700(x)
+ end
+end
+
+def fun_l0_n223(x)
+ if (x < 1)
+ fun_l1_n57(x)
+ else
+ fun_l1_n734(x)
+ end
+end
+
+def fun_l0_n224(x)
+ if (x < 1)
+ fun_l1_n283(x)
+ else
+ fun_l1_n87(x)
+ end
+end
+
+def fun_l0_n225(x)
+ if (x < 1)
+ fun_l1_n623(x)
+ else
+ fun_l1_n363(x)
+ end
+end
+
+def fun_l0_n226(x)
+ if (x < 1)
+ fun_l1_n962(x)
+ else
+ fun_l1_n660(x)
+ end
+end
+
+def fun_l0_n227(x)
+ if (x < 1)
+ fun_l1_n255(x)
+ else
+ fun_l1_n536(x)
+ end
+end
+
+def fun_l0_n228(x)
+ if (x < 1)
+ fun_l1_n377(x)
+ else
+ fun_l1_n249(x)
+ end
+end
+
+def fun_l0_n229(x)
+ if (x < 1)
+ fun_l1_n527(x)
+ else
+ fun_l1_n691(x)
+ end
+end
+
+def fun_l0_n230(x)
+ if (x < 1)
+ fun_l1_n781(x)
+ else
+ fun_l1_n494(x)
+ end
+end
+
+def fun_l0_n231(x)
+ if (x < 1)
+ fun_l1_n889(x)
+ else
+ fun_l1_n967(x)
+ end
+end
+
+def fun_l0_n232(x)
+ if (x < 1)
+ fun_l1_n509(x)
+ else
+ fun_l1_n910(x)
+ end
+end
+
+def fun_l0_n233(x)
+ if (x < 1)
+ fun_l1_n354(x)
+ else
+ fun_l1_n5(x)
+ end
+end
+
+def fun_l0_n234(x)
+ if (x < 1)
+ fun_l1_n423(x)
+ else
+ fun_l1_n788(x)
+ end
+end
+
+def fun_l0_n235(x)
+ if (x < 1)
+ fun_l1_n632(x)
+ else
+ fun_l1_n705(x)
+ end
+end
+
+def fun_l0_n236(x)
+ if (x < 1)
+ fun_l1_n955(x)
+ else
+ fun_l1_n908(x)
+ end
+end
+
+def fun_l0_n237(x)
+ if (x < 1)
+ fun_l1_n329(x)
+ else
+ fun_l1_n2(x)
+ end
+end
+
+def fun_l0_n238(x)
+ if (x < 1)
+ fun_l1_n767(x)
+ else
+ fun_l1_n766(x)
+ end
+end
+
+def fun_l0_n239(x)
+ if (x < 1)
+ fun_l1_n304(x)
+ else
+ fun_l1_n507(x)
+ end
+end
+
+def fun_l0_n240(x)
+ if (x < 1)
+ fun_l1_n845(x)
+ else
+ fun_l1_n777(x)
+ end
+end
+
+def fun_l0_n241(x)
+ if (x < 1)
+ fun_l1_n69(x)
+ else
+ fun_l1_n260(x)
+ end
+end
+
+def fun_l0_n242(x)
+ if (x < 1)
+ fun_l1_n194(x)
+ else
+ fun_l1_n412(x)
+ end
+end
+
+def fun_l0_n243(x)
+ if (x < 1)
+ fun_l1_n974(x)
+ else
+ fun_l1_n922(x)
+ end
+end
+
+def fun_l0_n244(x)
+ if (x < 1)
+ fun_l1_n532(x)
+ else
+ fun_l1_n530(x)
+ end
+end
+
+def fun_l0_n245(x)
+ if (x < 1)
+ fun_l1_n554(x)
+ else
+ fun_l1_n656(x)
+ end
+end
+
+def fun_l0_n246(x)
+ if (x < 1)
+ fun_l1_n253(x)
+ else
+ fun_l1_n235(x)
+ end
+end
+
+def fun_l0_n247(x)
+ if (x < 1)
+ fun_l1_n754(x)
+ else
+ fun_l1_n775(x)
+ end
+end
+
+def fun_l0_n248(x)
+ if (x < 1)
+ fun_l1_n89(x)
+ else
+ fun_l1_n129(x)
+ end
+end
+
+def fun_l0_n249(x)
+ if (x < 1)
+ fun_l1_n284(x)
+ else
+ fun_l1_n642(x)
+ end
+end
+
+def fun_l0_n250(x)
+ if (x < 1)
+ fun_l1_n67(x)
+ else
+ fun_l1_n867(x)
+ end
+end
+
+def fun_l0_n251(x)
+ if (x < 1)
+ fun_l1_n926(x)
+ else
+ fun_l1_n442(x)
+ end
+end
+
+def fun_l0_n252(x)
+ if (x < 1)
+ fun_l1_n196(x)
+ else
+ fun_l1_n583(x)
+ end
+end
+
+def fun_l0_n253(x)
+ if (x < 1)
+ fun_l1_n966(x)
+ else
+ fun_l1_n810(x)
+ end
+end
+
+def fun_l0_n254(x)
+ if (x < 1)
+ fun_l1_n984(x)
+ else
+ fun_l1_n752(x)
+ end
+end
+
+def fun_l0_n255(x)
+ if (x < 1)
+ fun_l1_n941(x)
+ else
+ fun_l1_n744(x)
+ end
+end
+
+def fun_l0_n256(x)
+ if (x < 1)
+ fun_l1_n785(x)
+ else
+ fun_l1_n610(x)
+ end
+end
+
+def fun_l0_n257(x)
+ if (x < 1)
+ fun_l1_n622(x)
+ else
+ fun_l1_n730(x)
+ end
+end
+
+def fun_l0_n258(x)
+ if (x < 1)
+ fun_l1_n453(x)
+ else
+ fun_l1_n641(x)
+ end
+end
+
+def fun_l0_n259(x)
+ if (x < 1)
+ fun_l1_n407(x)
+ else
+ fun_l1_n818(x)
+ end
+end
+
+def fun_l0_n260(x)
+ if (x < 1)
+ fun_l1_n252(x)
+ else
+ fun_l1_n205(x)
+ end
+end
+
+def fun_l0_n261(x)
+ if (x < 1)
+ fun_l1_n940(x)
+ else
+ fun_l1_n678(x)
+ end
+end
+
+def fun_l0_n262(x)
+ if (x < 1)
+ fun_l1_n350(x)
+ else
+ fun_l1_n496(x)
+ end
+end
+
+def fun_l0_n263(x)
+ if (x < 1)
+ fun_l1_n688(x)
+ else
+ fun_l1_n193(x)
+ end
+end
+
+def fun_l0_n264(x)
+ if (x < 1)
+ fun_l1_n366(x)
+ else
+ fun_l1_n790(x)
+ end
+end
+
+def fun_l0_n265(x)
+ if (x < 1)
+ fun_l1_n592(x)
+ else
+ fun_l1_n93(x)
+ end
+end
+
+def fun_l0_n266(x)
+ if (x < 1)
+ fun_l1_n189(x)
+ else
+ fun_l1_n467(x)
+ end
+end
+
+def fun_l0_n267(x)
+ if (x < 1)
+ fun_l1_n813(x)
+ else
+ fun_l1_n7(x)
+ end
+end
+
+def fun_l0_n268(x)
+ if (x < 1)
+ fun_l1_n516(x)
+ else
+ fun_l1_n271(x)
+ end
+end
+
+def fun_l0_n269(x)
+ if (x < 1)
+ fun_l1_n0(x)
+ else
+ fun_l1_n121(x)
+ end
+end
+
+def fun_l0_n270(x)
+ if (x < 1)
+ fun_l1_n634(x)
+ else
+ fun_l1_n726(x)
+ end
+end
+
+def fun_l0_n271(x)
+ if (x < 1)
+ fun_l1_n192(x)
+ else
+ fun_l1_n468(x)
+ end
+end
+
+def fun_l0_n272(x)
+ if (x < 1)
+ fun_l1_n278(x)
+ else
+ fun_l1_n355(x)
+ end
+end
+
+def fun_l0_n273(x)
+ if (x < 1)
+ fun_l1_n354(x)
+ else
+ fun_l1_n87(x)
+ end
+end
+
+def fun_l0_n274(x)
+ if (x < 1)
+ fun_l1_n643(x)
+ else
+ fun_l1_n806(x)
+ end
+end
+
+def fun_l0_n275(x)
+ if (x < 1)
+ fun_l1_n389(x)
+ else
+ fun_l1_n559(x)
+ end
+end
+
+def fun_l0_n276(x)
+ if (x < 1)
+ fun_l1_n283(x)
+ else
+ fun_l1_n539(x)
+ end
+end
+
+def fun_l0_n277(x)
+ if (x < 1)
+ fun_l1_n0(x)
+ else
+ fun_l1_n351(x)
+ end
+end
+
+def fun_l0_n278(x)
+ if (x < 1)
+ fun_l1_n813(x)
+ else
+ fun_l1_n513(x)
+ end
+end
+
+def fun_l0_n279(x)
+ if (x < 1)
+ fun_l1_n501(x)
+ else
+ fun_l1_n967(x)
+ end
+end
+
+def fun_l0_n280(x)
+ if (x < 1)
+ fun_l1_n727(x)
+ else
+ fun_l1_n232(x)
+ end
+end
+
+def fun_l0_n281(x)
+ if (x < 1)
+ fun_l1_n946(x)
+ else
+ fun_l1_n693(x)
+ end
+end
+
+def fun_l0_n282(x)
+ if (x < 1)
+ fun_l1_n260(x)
+ else
+ fun_l1_n525(x)
+ end
+end
+
+def fun_l0_n283(x)
+ if (x < 1)
+ fun_l1_n957(x)
+ else
+ fun_l1_n817(x)
+ end
+end
+
+def fun_l0_n284(x)
+ if (x < 1)
+ fun_l1_n91(x)
+ else
+ fun_l1_n735(x)
+ end
+end
+
+def fun_l0_n285(x)
+ if (x < 1)
+ fun_l1_n71(x)
+ else
+ fun_l1_n24(x)
+ end
+end
+
+def fun_l0_n286(x)
+ if (x < 1)
+ fun_l1_n594(x)
+ else
+ fun_l1_n747(x)
+ end
+end
+
+def fun_l0_n287(x)
+ if (x < 1)
+ fun_l1_n869(x)
+ else
+ fun_l1_n414(x)
+ end
+end
+
+def fun_l0_n288(x)
+ if (x < 1)
+ fun_l1_n182(x)
+ else
+ fun_l1_n835(x)
+ end
+end
+
+def fun_l0_n289(x)
+ if (x < 1)
+ fun_l1_n188(x)
+ else
+ fun_l1_n949(x)
+ end
+end
+
+def fun_l0_n290(x)
+ if (x < 1)
+ fun_l1_n35(x)
+ else
+ fun_l1_n259(x)
+ end
+end
+
+def fun_l0_n291(x)
+ if (x < 1)
+ fun_l1_n593(x)
+ else
+ fun_l1_n810(x)
+ end
+end
+
+def fun_l0_n292(x)
+ if (x < 1)
+ fun_l1_n680(x)
+ else
+ fun_l1_n564(x)
+ end
+end
+
+def fun_l0_n293(x)
+ if (x < 1)
+ fun_l1_n57(x)
+ else
+ fun_l1_n91(x)
+ end
+end
+
+def fun_l0_n294(x)
+ if (x < 1)
+ fun_l1_n945(x)
+ else
+ fun_l1_n807(x)
+ end
+end
+
+def fun_l0_n295(x)
+ if (x < 1)
+ fun_l1_n442(x)
+ else
+ fun_l1_n123(x)
+ end
+end
+
+def fun_l0_n296(x)
+ if (x < 1)
+ fun_l1_n536(x)
+ else
+ fun_l1_n987(x)
+ end
+end
+
+def fun_l0_n297(x)
+ if (x < 1)
+ fun_l1_n519(x)
+ else
+ fun_l1_n657(x)
+ end
+end
+
+def fun_l0_n298(x)
+ if (x < 1)
+ fun_l1_n204(x)
+ else
+ fun_l1_n437(x)
+ end
+end
+
+def fun_l0_n299(x)
+ if (x < 1)
+ fun_l1_n289(x)
+ else
+ fun_l1_n385(x)
+ end
+end
+
+def fun_l0_n300(x)
+ if (x < 1)
+ fun_l1_n629(x)
+ else
+ fun_l1_n803(x)
+ end
+end
+
+def fun_l0_n301(x)
+ if (x < 1)
+ fun_l1_n576(x)
+ else
+ fun_l1_n921(x)
+ end
+end
+
+def fun_l0_n302(x)
+ if (x < 1)
+ fun_l1_n466(x)
+ else
+ fun_l1_n475(x)
+ end
+end
+
+def fun_l0_n303(x)
+ if (x < 1)
+ fun_l1_n658(x)
+ else
+ fun_l1_n333(x)
+ end
+end
+
+def fun_l0_n304(x)
+ if (x < 1)
+ fun_l1_n728(x)
+ else
+ fun_l1_n749(x)
+ end
+end
+
+def fun_l0_n305(x)
+ if (x < 1)
+ fun_l1_n68(x)
+ else
+ fun_l1_n552(x)
+ end
+end
+
+def fun_l0_n306(x)
+ if (x < 1)
+ fun_l1_n633(x)
+ else
+ fun_l1_n958(x)
+ end
+end
+
+def fun_l0_n307(x)
+ if (x < 1)
+ fun_l1_n79(x)
+ else
+ fun_l1_n386(x)
+ end
+end
+
+def fun_l0_n308(x)
+ if (x < 1)
+ fun_l1_n831(x)
+ else
+ fun_l1_n176(x)
+ end
+end
+
+def fun_l0_n309(x)
+ if (x < 1)
+ fun_l1_n770(x)
+ else
+ fun_l1_n334(x)
+ end
+end
+
+def fun_l0_n310(x)
+ if (x < 1)
+ fun_l1_n21(x)
+ else
+ fun_l1_n643(x)
+ end
+end
+
+def fun_l0_n311(x)
+ if (x < 1)
+ fun_l1_n673(x)
+ else
+ fun_l1_n298(x)
+ end
+end
+
+def fun_l0_n312(x)
+ if (x < 1)
+ fun_l1_n753(x)
+ else
+ fun_l1_n817(x)
+ end
+end
+
+def fun_l0_n313(x)
+ if (x < 1)
+ fun_l1_n299(x)
+ else
+ fun_l1_n350(x)
+ end
+end
+
+def fun_l0_n314(x)
+ if (x < 1)
+ fun_l1_n190(x)
+ else
+ fun_l1_n519(x)
+ end
+end
+
+def fun_l0_n315(x)
+ if (x < 1)
+ fun_l1_n934(x)
+ else
+ fun_l1_n416(x)
+ end
+end
+
+def fun_l0_n316(x)
+ if (x < 1)
+ fun_l1_n695(x)
+ else
+ fun_l1_n377(x)
+ end
+end
+
+def fun_l0_n317(x)
+ if (x < 1)
+ fun_l1_n194(x)
+ else
+ fun_l1_n747(x)
+ end
+end
+
+def fun_l0_n318(x)
+ if (x < 1)
+ fun_l1_n145(x)
+ else
+ fun_l1_n761(x)
+ end
+end
+
+def fun_l0_n319(x)
+ if (x < 1)
+ fun_l1_n223(x)
+ else
+ fun_l1_n237(x)
+ end
+end
+
+def fun_l0_n320(x)
+ if (x < 1)
+ fun_l1_n907(x)
+ else
+ fun_l1_n653(x)
+ end
+end
+
+def fun_l0_n321(x)
+ if (x < 1)
+ fun_l1_n216(x)
+ else
+ fun_l1_n67(x)
+ end
+end
+
+def fun_l0_n322(x)
+ if (x < 1)
+ fun_l1_n658(x)
+ else
+ fun_l1_n948(x)
+ end
+end
+
+def fun_l0_n323(x)
+ if (x < 1)
+ fun_l1_n358(x)
+ else
+ fun_l1_n926(x)
+ end
+end
+
+def fun_l0_n324(x)
+ if (x < 1)
+ fun_l1_n533(x)
+ else
+ fun_l1_n252(x)
+ end
+end
+
+def fun_l0_n325(x)
+ if (x < 1)
+ fun_l1_n284(x)
+ else
+ fun_l1_n670(x)
+ end
+end
+
+def fun_l0_n326(x)
+ if (x < 1)
+ fun_l1_n327(x)
+ else
+ fun_l1_n298(x)
+ end
+end
+
+def fun_l0_n327(x)
+ if (x < 1)
+ fun_l1_n837(x)
+ else
+ fun_l1_n890(x)
+ end
+end
+
+def fun_l0_n328(x)
+ if (x < 1)
+ fun_l1_n941(x)
+ else
+ fun_l1_n794(x)
+ end
+end
+
+def fun_l0_n329(x)
+ if (x < 1)
+ fun_l1_n859(x)
+ else
+ fun_l1_n970(x)
+ end
+end
+
+def fun_l0_n330(x)
+ if (x < 1)
+ fun_l1_n282(x)
+ else
+ fun_l1_n569(x)
+ end
+end
+
+def fun_l0_n331(x)
+ if (x < 1)
+ fun_l1_n33(x)
+ else
+ fun_l1_n525(x)
+ end
+end
+
+def fun_l0_n332(x)
+ if (x < 1)
+ fun_l1_n847(x)
+ else
+ fun_l1_n540(x)
+ end
+end
+
+def fun_l0_n333(x)
+ if (x < 1)
+ fun_l1_n485(x)
+ else
+ fun_l1_n756(x)
+ end
+end
+
+def fun_l0_n334(x)
+ if (x < 1)
+ fun_l1_n396(x)
+ else
+ fun_l1_n781(x)
+ end
+end
+
+def fun_l0_n335(x)
+ if (x < 1)
+ fun_l1_n225(x)
+ else
+ fun_l1_n417(x)
+ end
+end
+
+def fun_l0_n336(x)
+ if (x < 1)
+ fun_l1_n906(x)
+ else
+ fun_l1_n301(x)
+ end
+end
+
+def fun_l0_n337(x)
+ if (x < 1)
+ fun_l1_n863(x)
+ else
+ fun_l1_n409(x)
+ end
+end
+
+def fun_l0_n338(x)
+ if (x < 1)
+ fun_l1_n103(x)
+ else
+ fun_l1_n460(x)
+ end
+end
+
+def fun_l0_n339(x)
+ if (x < 1)
+ fun_l1_n787(x)
+ else
+ fun_l1_n434(x)
+ end
+end
+
+def fun_l0_n340(x)
+ if (x < 1)
+ fun_l1_n308(x)
+ else
+ fun_l1_n875(x)
+ end
+end
+
+def fun_l0_n341(x)
+ if (x < 1)
+ fun_l1_n146(x)
+ else
+ fun_l1_n566(x)
+ end
+end
+
+def fun_l0_n342(x)
+ if (x < 1)
+ fun_l1_n837(x)
+ else
+ fun_l1_n770(x)
+ end
+end
+
+def fun_l0_n343(x)
+ if (x < 1)
+ fun_l1_n668(x)
+ else
+ fun_l1_n60(x)
+ end
+end
+
+def fun_l0_n344(x)
+ if (x < 1)
+ fun_l1_n305(x)
+ else
+ fun_l1_n388(x)
+ end
+end
+
+def fun_l0_n345(x)
+ if (x < 1)
+ fun_l1_n796(x)
+ else
+ fun_l1_n920(x)
+ end
+end
+
+def fun_l0_n346(x)
+ if (x < 1)
+ fun_l1_n257(x)
+ else
+ fun_l1_n229(x)
+ end
+end
+
+def fun_l0_n347(x)
+ if (x < 1)
+ fun_l1_n740(x)
+ else
+ fun_l1_n372(x)
+ end
+end
+
+def fun_l0_n348(x)
+ if (x < 1)
+ fun_l1_n698(x)
+ else
+ fun_l1_n63(x)
+ end
+end
+
+def fun_l0_n349(x)
+ if (x < 1)
+ fun_l1_n4(x)
+ else
+ fun_l1_n119(x)
+ end
+end
+
+def fun_l0_n350(x)
+ if (x < 1)
+ fun_l1_n186(x)
+ else
+ fun_l1_n352(x)
+ end
+end
+
+def fun_l0_n351(x)
+ if (x < 1)
+ fun_l1_n619(x)
+ else
+ fun_l1_n583(x)
+ end
+end
+
+def fun_l0_n352(x)
+ if (x < 1)
+ fun_l1_n2(x)
+ else
+ fun_l1_n275(x)
+ end
+end
+
+def fun_l0_n353(x)
+ if (x < 1)
+ fun_l1_n843(x)
+ else
+ fun_l1_n986(x)
+ end
+end
+
+def fun_l0_n354(x)
+ if (x < 1)
+ fun_l1_n865(x)
+ else
+ fun_l1_n64(x)
+ end
+end
+
+def fun_l0_n355(x)
+ if (x < 1)
+ fun_l1_n732(x)
+ else
+ fun_l1_n332(x)
+ end
+end
+
+def fun_l0_n356(x)
+ if (x < 1)
+ fun_l1_n642(x)
+ else
+ fun_l1_n559(x)
+ end
+end
+
+def fun_l0_n357(x)
+ if (x < 1)
+ fun_l1_n385(x)
+ else
+ fun_l1_n532(x)
+ end
+end
+
+def fun_l0_n358(x)
+ if (x < 1)
+ fun_l1_n672(x)
+ else
+ fun_l1_n721(x)
+ end
+end
+
+def fun_l0_n359(x)
+ if (x < 1)
+ fun_l1_n663(x)
+ else
+ fun_l1_n590(x)
+ end
+end
+
+def fun_l0_n360(x)
+ if (x < 1)
+ fun_l1_n240(x)
+ else
+ fun_l1_n743(x)
+ end
+end
+
+def fun_l0_n361(x)
+ if (x < 1)
+ fun_l1_n301(x)
+ else
+ fun_l1_n943(x)
+ end
+end
+
+def fun_l0_n362(x)
+ if (x < 1)
+ fun_l1_n108(x)
+ else
+ fun_l1_n975(x)
+ end
+end
+
+def fun_l0_n363(x)
+ if (x < 1)
+ fun_l1_n554(x)
+ else
+ fun_l1_n135(x)
+ end
+end
+
+def fun_l0_n364(x)
+ if (x < 1)
+ fun_l1_n360(x)
+ else
+ fun_l1_n821(x)
+ end
+end
+
+def fun_l0_n365(x)
+ if (x < 1)
+ fun_l1_n992(x)
+ else
+ fun_l1_n158(x)
+ end
+end
+
+def fun_l0_n366(x)
+ if (x < 1)
+ fun_l1_n439(x)
+ else
+ fun_l1_n506(x)
+ end
+end
+
+def fun_l0_n367(x)
+ if (x < 1)
+ fun_l1_n229(x)
+ else
+ fun_l1_n714(x)
+ end
+end
+
+def fun_l0_n368(x)
+ if (x < 1)
+ fun_l1_n459(x)
+ else
+ fun_l1_n874(x)
+ end
+end
+
+def fun_l0_n369(x)
+ if (x < 1)
+ fun_l1_n799(x)
+ else
+ fun_l1_n406(x)
+ end
+end
+
+def fun_l0_n370(x)
+ if (x < 1)
+ fun_l1_n791(x)
+ else
+ fun_l1_n669(x)
+ end
+end
+
+def fun_l0_n371(x)
+ if (x < 1)
+ fun_l1_n99(x)
+ else
+ fun_l1_n344(x)
+ end
+end
+
+def fun_l0_n372(x)
+ if (x < 1)
+ fun_l1_n585(x)
+ else
+ fun_l1_n388(x)
+ end
+end
+
+def fun_l0_n373(x)
+ if (x < 1)
+ fun_l1_n35(x)
+ else
+ fun_l1_n369(x)
+ end
+end
+
+def fun_l0_n374(x)
+ if (x < 1)
+ fun_l1_n41(x)
+ else
+ fun_l1_n378(x)
+ end
+end
+
+def fun_l0_n375(x)
+ if (x < 1)
+ fun_l1_n372(x)
+ else
+ fun_l1_n406(x)
+ end
+end
+
+def fun_l0_n376(x)
+ if (x < 1)
+ fun_l1_n247(x)
+ else
+ fun_l1_n563(x)
+ end
+end
+
+def fun_l0_n377(x)
+ if (x < 1)
+ fun_l1_n998(x)
+ else
+ fun_l1_n302(x)
+ end
+end
+
+def fun_l0_n378(x)
+ if (x < 1)
+ fun_l1_n955(x)
+ else
+ fun_l1_n333(x)
+ end
+end
+
+def fun_l0_n379(x)
+ if (x < 1)
+ fun_l1_n554(x)
+ else
+ fun_l1_n863(x)
+ end
+end
+
+def fun_l0_n380(x)
+ if (x < 1)
+ fun_l1_n248(x)
+ else
+ fun_l1_n297(x)
+ end
+end
+
+def fun_l0_n381(x)
+ if (x < 1)
+ fun_l1_n85(x)
+ else
+ fun_l1_n726(x)
+ end
+end
+
+def fun_l0_n382(x)
+ if (x < 1)
+ fun_l1_n75(x)
+ else
+ fun_l1_n990(x)
+ end
+end
+
+def fun_l0_n383(x)
+ if (x < 1)
+ fun_l1_n353(x)
+ else
+ fun_l1_n218(x)
+ end
+end
+
+def fun_l0_n384(x)
+ if (x < 1)
+ fun_l1_n103(x)
+ else
+ fun_l1_n192(x)
+ end
+end
+
+def fun_l0_n385(x)
+ if (x < 1)
+ fun_l1_n659(x)
+ else
+ fun_l1_n996(x)
+ end
+end
+
+def fun_l0_n386(x)
+ if (x < 1)
+ fun_l1_n528(x)
+ else
+ fun_l1_n654(x)
+ end
+end
+
+def fun_l0_n387(x)
+ if (x < 1)
+ fun_l1_n857(x)
+ else
+ fun_l1_n638(x)
+ end
+end
+
+def fun_l0_n388(x)
+ if (x < 1)
+ fun_l1_n311(x)
+ else
+ fun_l1_n877(x)
+ end
+end
+
+def fun_l0_n389(x)
+ if (x < 1)
+ fun_l1_n406(x)
+ else
+ fun_l1_n191(x)
+ end
+end
+
+def fun_l0_n390(x)
+ if (x < 1)
+ fun_l1_n464(x)
+ else
+ fun_l1_n684(x)
+ end
+end
+
+def fun_l0_n391(x)
+ if (x < 1)
+ fun_l1_n878(x)
+ else
+ fun_l1_n635(x)
+ end
+end
+
+def fun_l0_n392(x)
+ if (x < 1)
+ fun_l1_n863(x)
+ else
+ fun_l1_n486(x)
+ end
+end
+
+def fun_l0_n393(x)
+ if (x < 1)
+ fun_l1_n587(x)
+ else
+ fun_l1_n387(x)
+ end
+end
+
+def fun_l0_n394(x)
+ if (x < 1)
+ fun_l1_n553(x)
+ else
+ fun_l1_n789(x)
+ end
+end
+
+def fun_l0_n395(x)
+ if (x < 1)
+ fun_l1_n420(x)
+ else
+ fun_l1_n148(x)
+ end
+end
+
+def fun_l0_n396(x)
+ if (x < 1)
+ fun_l1_n802(x)
+ else
+ fun_l1_n845(x)
+ end
+end
+
+def fun_l0_n397(x)
+ if (x < 1)
+ fun_l1_n749(x)
+ else
+ fun_l1_n742(x)
+ end
+end
+
+def fun_l0_n398(x)
+ if (x < 1)
+ fun_l1_n929(x)
+ else
+ fun_l1_n458(x)
+ end
+end
+
+def fun_l0_n399(x)
+ if (x < 1)
+ fun_l1_n908(x)
+ else
+ fun_l1_n188(x)
+ end
+end
+
+def fun_l0_n400(x)
+ if (x < 1)
+ fun_l1_n932(x)
+ else
+ fun_l1_n854(x)
+ end
+end
+
+def fun_l0_n401(x)
+ if (x < 1)
+ fun_l1_n29(x)
+ else
+ fun_l1_n666(x)
+ end
+end
+
+def fun_l0_n402(x)
+ if (x < 1)
+ fun_l1_n877(x)
+ else
+ fun_l1_n518(x)
+ end
+end
+
+def fun_l0_n403(x)
+ if (x < 1)
+ fun_l1_n639(x)
+ else
+ fun_l1_n618(x)
+ end
+end
+
+def fun_l0_n404(x)
+ if (x < 1)
+ fun_l1_n177(x)
+ else
+ fun_l1_n487(x)
+ end
+end
+
+def fun_l0_n405(x)
+ if (x < 1)
+ fun_l1_n536(x)
+ else
+ fun_l1_n860(x)
+ end
+end
+
+def fun_l0_n406(x)
+ if (x < 1)
+ fun_l1_n626(x)
+ else
+ fun_l1_n894(x)
+ end
+end
+
+def fun_l0_n407(x)
+ if (x < 1)
+ fun_l1_n533(x)
+ else
+ fun_l1_n367(x)
+ end
+end
+
+def fun_l0_n408(x)
+ if (x < 1)
+ fun_l1_n146(x)
+ else
+ fun_l1_n546(x)
+ end
+end
+
+def fun_l0_n409(x)
+ if (x < 1)
+ fun_l1_n872(x)
+ else
+ fun_l1_n387(x)
+ end
+end
+
+def fun_l0_n410(x)
+ if (x < 1)
+ fun_l1_n726(x)
+ else
+ fun_l1_n973(x)
+ end
+end
+
+def fun_l0_n411(x)
+ if (x < 1)
+ fun_l1_n168(x)
+ else
+ fun_l1_n783(x)
+ end
+end
+
+def fun_l0_n412(x)
+ if (x < 1)
+ fun_l1_n895(x)
+ else
+ fun_l1_n901(x)
+ end
+end
+
+def fun_l0_n413(x)
+ if (x < 1)
+ fun_l1_n235(x)
+ else
+ fun_l1_n593(x)
+ end
+end
+
+def fun_l0_n414(x)
+ if (x < 1)
+ fun_l1_n328(x)
+ else
+ fun_l1_n693(x)
+ end
+end
+
+def fun_l0_n415(x)
+ if (x < 1)
+ fun_l1_n882(x)
+ else
+ fun_l1_n290(x)
+ end
+end
+
+def fun_l0_n416(x)
+ if (x < 1)
+ fun_l1_n433(x)
+ else
+ fun_l1_n220(x)
+ end
+end
+
+def fun_l0_n417(x)
+ if (x < 1)
+ fun_l1_n966(x)
+ else
+ fun_l1_n74(x)
+ end
+end
+
+def fun_l0_n418(x)
+ if (x < 1)
+ fun_l1_n750(x)
+ else
+ fun_l1_n547(x)
+ end
+end
+
+def fun_l0_n419(x)
+ if (x < 1)
+ fun_l1_n94(x)
+ else
+ fun_l1_n794(x)
+ end
+end
+
+def fun_l0_n420(x)
+ if (x < 1)
+ fun_l1_n68(x)
+ else
+ fun_l1_n970(x)
+ end
+end
+
+def fun_l0_n421(x)
+ if (x < 1)
+ fun_l1_n663(x)
+ else
+ fun_l1_n388(x)
+ end
+end
+
+def fun_l0_n422(x)
+ if (x < 1)
+ fun_l1_n686(x)
+ else
+ fun_l1_n67(x)
+ end
+end
+
+def fun_l0_n423(x)
+ if (x < 1)
+ fun_l1_n210(x)
+ else
+ fun_l1_n64(x)
+ end
+end
+
+def fun_l0_n424(x)
+ if (x < 1)
+ fun_l1_n375(x)
+ else
+ fun_l1_n205(x)
+ end
+end
+
+def fun_l0_n425(x)
+ if (x < 1)
+ fun_l1_n832(x)
+ else
+ fun_l1_n325(x)
+ end
+end
+
+def fun_l0_n426(x)
+ if (x < 1)
+ fun_l1_n819(x)
+ else
+ fun_l1_n601(x)
+ end
+end
+
+def fun_l0_n427(x)
+ if (x < 1)
+ fun_l1_n588(x)
+ else
+ fun_l1_n875(x)
+ end
+end
+
+def fun_l0_n428(x)
+ if (x < 1)
+ fun_l1_n682(x)
+ else
+ fun_l1_n523(x)
+ end
+end
+
+def fun_l0_n429(x)
+ if (x < 1)
+ fun_l1_n972(x)
+ else
+ fun_l1_n891(x)
+ end
+end
+
+def fun_l0_n430(x)
+ if (x < 1)
+ fun_l1_n90(x)
+ else
+ fun_l1_n655(x)
+ end
+end
+
+def fun_l0_n431(x)
+ if (x < 1)
+ fun_l1_n393(x)
+ else
+ fun_l1_n454(x)
+ end
+end
+
+def fun_l0_n432(x)
+ if (x < 1)
+ fun_l1_n435(x)
+ else
+ fun_l1_n860(x)
+ end
+end
+
+def fun_l0_n433(x)
+ if (x < 1)
+ fun_l1_n516(x)
+ else
+ fun_l1_n246(x)
+ end
+end
+
+def fun_l0_n434(x)
+ if (x < 1)
+ fun_l1_n216(x)
+ else
+ fun_l1_n78(x)
+ end
+end
+
+def fun_l0_n435(x)
+ if (x < 1)
+ fun_l1_n357(x)
+ else
+ fun_l1_n61(x)
+ end
+end
+
+def fun_l0_n436(x)
+ if (x < 1)
+ fun_l1_n289(x)
+ else
+ fun_l1_n507(x)
+ end
+end
+
+def fun_l0_n437(x)
+ if (x < 1)
+ fun_l1_n182(x)
+ else
+ fun_l1_n289(x)
+ end
+end
+
+def fun_l0_n438(x)
+ if (x < 1)
+ fun_l1_n69(x)
+ else
+ fun_l1_n549(x)
+ end
+end
+
+def fun_l0_n439(x)
+ if (x < 1)
+ fun_l1_n275(x)
+ else
+ fun_l1_n723(x)
+ end
+end
+
+def fun_l0_n440(x)
+ if (x < 1)
+ fun_l1_n469(x)
+ else
+ fun_l1_n243(x)
+ end
+end
+
+def fun_l0_n441(x)
+ if (x < 1)
+ fun_l1_n595(x)
+ else
+ fun_l1_n624(x)
+ end
+end
+
+def fun_l0_n442(x)
+ if (x < 1)
+ fun_l1_n655(x)
+ else
+ fun_l1_n896(x)
+ end
+end
+
+def fun_l0_n443(x)
+ if (x < 1)
+ fun_l1_n926(x)
+ else
+ fun_l1_n503(x)
+ end
+end
+
+def fun_l0_n444(x)
+ if (x < 1)
+ fun_l1_n875(x)
+ else
+ fun_l1_n110(x)
+ end
+end
+
+def fun_l0_n445(x)
+ if (x < 1)
+ fun_l1_n832(x)
+ else
+ fun_l1_n154(x)
+ end
+end
+
+def fun_l0_n446(x)
+ if (x < 1)
+ fun_l1_n538(x)
+ else
+ fun_l1_n75(x)
+ end
+end
+
+def fun_l0_n447(x)
+ if (x < 1)
+ fun_l1_n183(x)
+ else
+ fun_l1_n718(x)
+ end
+end
+
+def fun_l0_n448(x)
+ if (x < 1)
+ fun_l1_n680(x)
+ else
+ fun_l1_n93(x)
+ end
+end
+
+def fun_l0_n449(x)
+ if (x < 1)
+ fun_l1_n147(x)
+ else
+ fun_l1_n924(x)
+ end
+end
+
+def fun_l0_n450(x)
+ if (x < 1)
+ fun_l1_n702(x)
+ else
+ fun_l1_n830(x)
+ end
+end
+
+def fun_l0_n451(x)
+ if (x < 1)
+ fun_l1_n750(x)
+ else
+ fun_l1_n447(x)
+ end
+end
+
+def fun_l0_n452(x)
+ if (x < 1)
+ fun_l1_n520(x)
+ else
+ fun_l1_n69(x)
+ end
+end
+
+def fun_l0_n453(x)
+ if (x < 1)
+ fun_l1_n132(x)
+ else
+ fun_l1_n877(x)
+ end
+end
+
+def fun_l0_n454(x)
+ if (x < 1)
+ fun_l1_n247(x)
+ else
+ fun_l1_n69(x)
+ end
+end
+
+def fun_l0_n455(x)
+ if (x < 1)
+ fun_l1_n180(x)
+ else
+ fun_l1_n645(x)
+ end
+end
+
+def fun_l0_n456(x)
+ if (x < 1)
+ fun_l1_n658(x)
+ else
+ fun_l1_n487(x)
+ end
+end
+
+def fun_l0_n457(x)
+ if (x < 1)
+ fun_l1_n276(x)
+ else
+ fun_l1_n528(x)
+ end
+end
+
+def fun_l0_n458(x)
+ if (x < 1)
+ fun_l1_n30(x)
+ else
+ fun_l1_n456(x)
+ end
+end
+
+def fun_l0_n459(x)
+ if (x < 1)
+ fun_l1_n986(x)
+ else
+ fun_l1_n552(x)
+ end
+end
+
+def fun_l0_n460(x)
+ if (x < 1)
+ fun_l1_n874(x)
+ else
+ fun_l1_n396(x)
+ end
+end
+
+def fun_l0_n461(x)
+ if (x < 1)
+ fun_l1_n524(x)
+ else
+ fun_l1_n335(x)
+ end
+end
+
+def fun_l0_n462(x)
+ if (x < 1)
+ fun_l1_n471(x)
+ else
+ fun_l1_n578(x)
+ end
+end
+
+def fun_l0_n463(x)
+ if (x < 1)
+ fun_l1_n173(x)
+ else
+ fun_l1_n456(x)
+ end
+end
+
+def fun_l0_n464(x)
+ if (x < 1)
+ fun_l1_n872(x)
+ else
+ fun_l1_n745(x)
+ end
+end
+
+def fun_l0_n465(x)
+ if (x < 1)
+ fun_l1_n387(x)
+ else
+ fun_l1_n776(x)
+ end
+end
+
+def fun_l0_n466(x)
+ if (x < 1)
+ fun_l1_n119(x)
+ else
+ fun_l1_n428(x)
+ end
+end
+
+def fun_l0_n467(x)
+ if (x < 1)
+ fun_l1_n416(x)
+ else
+ fun_l1_n738(x)
+ end
+end
+
+def fun_l0_n468(x)
+ if (x < 1)
+ fun_l1_n762(x)
+ else
+ fun_l1_n694(x)
+ end
+end
+
+def fun_l0_n469(x)
+ if (x < 1)
+ fun_l1_n27(x)
+ else
+ fun_l1_n562(x)
+ end
+end
+
+def fun_l0_n470(x)
+ if (x < 1)
+ fun_l1_n12(x)
+ else
+ fun_l1_n812(x)
+ end
+end
+
+def fun_l0_n471(x)
+ if (x < 1)
+ fun_l1_n865(x)
+ else
+ fun_l1_n94(x)
+ end
+end
+
+def fun_l0_n472(x)
+ if (x < 1)
+ fun_l1_n346(x)
+ else
+ fun_l1_n39(x)
+ end
+end
+
+def fun_l0_n473(x)
+ if (x < 1)
+ fun_l1_n309(x)
+ else
+ fun_l1_n158(x)
+ end
+end
+
+def fun_l0_n474(x)
+ if (x < 1)
+ fun_l1_n828(x)
+ else
+ fun_l1_n229(x)
+ end
+end
+
+def fun_l0_n475(x)
+ if (x < 1)
+ fun_l1_n554(x)
+ else
+ fun_l1_n121(x)
+ end
+end
+
+def fun_l0_n476(x)
+ if (x < 1)
+ fun_l1_n818(x)
+ else
+ fun_l1_n390(x)
+ end
+end
+
+def fun_l0_n477(x)
+ if (x < 1)
+ fun_l1_n88(x)
+ else
+ fun_l1_n73(x)
+ end
+end
+
+def fun_l0_n478(x)
+ if (x < 1)
+ fun_l1_n709(x)
+ else
+ fun_l1_n680(x)
+ end
+end
+
+def fun_l0_n479(x)
+ if (x < 1)
+ fun_l1_n271(x)
+ else
+ fun_l1_n439(x)
+ end
+end
+
+def fun_l0_n480(x)
+ if (x < 1)
+ fun_l1_n493(x)
+ else
+ fun_l1_n562(x)
+ end
+end
+
+def fun_l0_n481(x)
+ if (x < 1)
+ fun_l1_n642(x)
+ else
+ fun_l1_n221(x)
+ end
+end
+
+def fun_l0_n482(x)
+ if (x < 1)
+ fun_l1_n838(x)
+ else
+ fun_l1_n345(x)
+ end
+end
+
+def fun_l0_n483(x)
+ if (x < 1)
+ fun_l1_n411(x)
+ else
+ fun_l1_n266(x)
+ end
+end
+
+def fun_l0_n484(x)
+ if (x < 1)
+ fun_l1_n916(x)
+ else
+ fun_l1_n791(x)
+ end
+end
+
+def fun_l0_n485(x)
+ if (x < 1)
+ fun_l1_n392(x)
+ else
+ fun_l1_n68(x)
+ end
+end
+
+def fun_l0_n486(x)
+ if (x < 1)
+ fun_l1_n834(x)
+ else
+ fun_l1_n783(x)
+ end
+end
+
+def fun_l0_n487(x)
+ if (x < 1)
+ fun_l1_n398(x)
+ else
+ fun_l1_n627(x)
+ end
+end
+
+def fun_l0_n488(x)
+ if (x < 1)
+ fun_l1_n904(x)
+ else
+ fun_l1_n167(x)
+ end
+end
+
+def fun_l0_n489(x)
+ if (x < 1)
+ fun_l1_n54(x)
+ else
+ fun_l1_n28(x)
+ end
+end
+
+def fun_l0_n490(x)
+ if (x < 1)
+ fun_l1_n24(x)
+ else
+ fun_l1_n9(x)
+ end
+end
+
+def fun_l0_n491(x)
+ if (x < 1)
+ fun_l1_n497(x)
+ else
+ fun_l1_n297(x)
+ end
+end
+
+def fun_l0_n492(x)
+ if (x < 1)
+ fun_l1_n951(x)
+ else
+ fun_l1_n534(x)
+ end
+end
+
+def fun_l0_n493(x)
+ if (x < 1)
+ fun_l1_n213(x)
+ else
+ fun_l1_n808(x)
+ end
+end
+
+def fun_l0_n494(x)
+ if (x < 1)
+ fun_l1_n904(x)
+ else
+ fun_l1_n94(x)
+ end
+end
+
+def fun_l0_n495(x)
+ if (x < 1)
+ fun_l1_n799(x)
+ else
+ fun_l1_n817(x)
+ end
+end
+
+def fun_l0_n496(x)
+ if (x < 1)
+ fun_l1_n45(x)
+ else
+ fun_l1_n860(x)
+ end
+end
+
+def fun_l0_n497(x)
+ if (x < 1)
+ fun_l1_n482(x)
+ else
+ fun_l1_n783(x)
+ end
+end
+
+def fun_l0_n498(x)
+ if (x < 1)
+ fun_l1_n78(x)
+ else
+ fun_l1_n785(x)
+ end
+end
+
+def fun_l0_n499(x)
+ if (x < 1)
+ fun_l1_n174(x)
+ else
+ fun_l1_n87(x)
+ end
+end
+
+def fun_l0_n500(x)
+ if (x < 1)
+ fun_l1_n608(x)
+ else
+ fun_l1_n7(x)
+ end
+end
+
+def fun_l0_n501(x)
+ if (x < 1)
+ fun_l1_n157(x)
+ else
+ fun_l1_n718(x)
+ end
+end
+
+def fun_l0_n502(x)
+ if (x < 1)
+ fun_l1_n823(x)
+ else
+ fun_l1_n549(x)
+ end
+end
+
+def fun_l0_n503(x)
+ if (x < 1)
+ fun_l1_n13(x)
+ else
+ fun_l1_n502(x)
+ end
+end
+
+def fun_l0_n504(x)
+ if (x < 1)
+ fun_l1_n119(x)
+ else
+ fun_l1_n191(x)
+ end
+end
+
+def fun_l0_n505(x)
+ if (x < 1)
+ fun_l1_n503(x)
+ else
+ fun_l1_n974(x)
+ end
+end
+
+def fun_l0_n506(x)
+ if (x < 1)
+ fun_l1_n893(x)
+ else
+ fun_l1_n582(x)
+ end
+end
+
+def fun_l0_n507(x)
+ if (x < 1)
+ fun_l1_n403(x)
+ else
+ fun_l1_n228(x)
+ end
+end
+
+def fun_l0_n508(x)
+ if (x < 1)
+ fun_l1_n640(x)
+ else
+ fun_l1_n362(x)
+ end
+end
+
+def fun_l0_n509(x)
+ if (x < 1)
+ fun_l1_n629(x)
+ else
+ fun_l1_n824(x)
+ end
+end
+
+def fun_l0_n510(x)
+ if (x < 1)
+ fun_l1_n84(x)
+ else
+ fun_l1_n330(x)
+ end
+end
+
+def fun_l0_n511(x)
+ if (x < 1)
+ fun_l1_n91(x)
+ else
+ fun_l1_n523(x)
+ end
+end
+
+def fun_l0_n512(x)
+ if (x < 1)
+ fun_l1_n696(x)
+ else
+ fun_l1_n126(x)
+ end
+end
+
+def fun_l0_n513(x)
+ if (x < 1)
+ fun_l1_n905(x)
+ else
+ fun_l1_n496(x)
+ end
+end
+
+def fun_l0_n514(x)
+ if (x < 1)
+ fun_l1_n766(x)
+ else
+ fun_l1_n523(x)
+ end
+end
+
+def fun_l0_n515(x)
+ if (x < 1)
+ fun_l1_n798(x)
+ else
+ fun_l1_n698(x)
+ end
+end
+
+def fun_l0_n516(x)
+ if (x < 1)
+ fun_l1_n124(x)
+ else
+ fun_l1_n356(x)
+ end
+end
+
+def fun_l0_n517(x)
+ if (x < 1)
+ fun_l1_n289(x)
+ else
+ fun_l1_n782(x)
+ end
+end
+
+def fun_l0_n518(x)
+ if (x < 1)
+ fun_l1_n483(x)
+ else
+ fun_l1_n586(x)
+ end
+end
+
+def fun_l0_n519(x)
+ if (x < 1)
+ fun_l1_n878(x)
+ else
+ fun_l1_n37(x)
+ end
+end
+
+def fun_l0_n520(x)
+ if (x < 1)
+ fun_l1_n48(x)
+ else
+ fun_l1_n216(x)
+ end
+end
+
+def fun_l0_n521(x)
+ if (x < 1)
+ fun_l1_n805(x)
+ else
+ fun_l1_n846(x)
+ end
+end
+
+def fun_l0_n522(x)
+ if (x < 1)
+ fun_l1_n422(x)
+ else
+ fun_l1_n190(x)
+ end
+end
+
+def fun_l0_n523(x)
+ if (x < 1)
+ fun_l1_n168(x)
+ else
+ fun_l1_n272(x)
+ end
+end
+
+def fun_l0_n524(x)
+ if (x < 1)
+ fun_l1_n766(x)
+ else
+ fun_l1_n125(x)
+ end
+end
+
+def fun_l0_n525(x)
+ if (x < 1)
+ fun_l1_n56(x)
+ else
+ fun_l1_n224(x)
+ end
+end
+
+def fun_l0_n526(x)
+ if (x < 1)
+ fun_l1_n540(x)
+ else
+ fun_l1_n303(x)
+ end
+end
+
+def fun_l0_n527(x)
+ if (x < 1)
+ fun_l1_n846(x)
+ else
+ fun_l1_n955(x)
+ end
+end
+
+def fun_l0_n528(x)
+ if (x < 1)
+ fun_l1_n571(x)
+ else
+ fun_l1_n640(x)
+ end
+end
+
+def fun_l0_n529(x)
+ if (x < 1)
+ fun_l1_n650(x)
+ else
+ fun_l1_n107(x)
+ end
+end
+
+def fun_l0_n530(x)
+ if (x < 1)
+ fun_l1_n240(x)
+ else
+ fun_l1_n139(x)
+ end
+end
+
+def fun_l0_n531(x)
+ if (x < 1)
+ fun_l1_n975(x)
+ else
+ fun_l1_n970(x)
+ end
+end
+
+def fun_l0_n532(x)
+ if (x < 1)
+ fun_l1_n753(x)
+ else
+ fun_l1_n147(x)
+ end
+end
+
+def fun_l0_n533(x)
+ if (x < 1)
+ fun_l1_n269(x)
+ else
+ fun_l1_n292(x)
+ end
+end
+
+def fun_l0_n534(x)
+ if (x < 1)
+ fun_l1_n791(x)
+ else
+ fun_l1_n69(x)
+ end
+end
+
+def fun_l0_n535(x)
+ if (x < 1)
+ fun_l1_n469(x)
+ else
+ fun_l1_n58(x)
+ end
+end
+
+def fun_l0_n536(x)
+ if (x < 1)
+ fun_l1_n526(x)
+ else
+ fun_l1_n265(x)
+ end
+end
+
+def fun_l0_n537(x)
+ if (x < 1)
+ fun_l1_n681(x)
+ else
+ fun_l1_n646(x)
+ end
+end
+
+def fun_l0_n538(x)
+ if (x < 1)
+ fun_l1_n377(x)
+ else
+ fun_l1_n969(x)
+ end
+end
+
+def fun_l0_n539(x)
+ if (x < 1)
+ fun_l1_n935(x)
+ else
+ fun_l1_n556(x)
+ end
+end
+
+def fun_l0_n540(x)
+ if (x < 1)
+ fun_l1_n498(x)
+ else
+ fun_l1_n439(x)
+ end
+end
+
+def fun_l0_n541(x)
+ if (x < 1)
+ fun_l1_n451(x)
+ else
+ fun_l1_n35(x)
+ end
+end
+
+def fun_l0_n542(x)
+ if (x < 1)
+ fun_l1_n32(x)
+ else
+ fun_l1_n795(x)
+ end
+end
+
+def fun_l0_n543(x)
+ if (x < 1)
+ fun_l1_n732(x)
+ else
+ fun_l1_n530(x)
+ end
+end
+
+def fun_l0_n544(x)
+ if (x < 1)
+ fun_l1_n176(x)
+ else
+ fun_l1_n735(x)
+ end
+end
+
+def fun_l0_n545(x)
+ if (x < 1)
+ fun_l1_n34(x)
+ else
+ fun_l1_n989(x)
+ end
+end
+
+def fun_l0_n546(x)
+ if (x < 1)
+ fun_l1_n534(x)
+ else
+ fun_l1_n705(x)
+ end
+end
+
+def fun_l0_n547(x)
+ if (x < 1)
+ fun_l1_n997(x)
+ else
+ fun_l1_n883(x)
+ end
+end
+
+def fun_l0_n548(x)
+ if (x < 1)
+ fun_l1_n379(x)
+ else
+ fun_l1_n33(x)
+ end
+end
+
+def fun_l0_n549(x)
+ if (x < 1)
+ fun_l1_n454(x)
+ else
+ fun_l1_n516(x)
+ end
+end
+
+def fun_l0_n550(x)
+ if (x < 1)
+ fun_l1_n678(x)
+ else
+ fun_l1_n652(x)
+ end
+end
+
+def fun_l0_n551(x)
+ if (x < 1)
+ fun_l1_n13(x)
+ else
+ fun_l1_n864(x)
+ end
+end
+
+def fun_l0_n552(x)
+ if (x < 1)
+ fun_l1_n178(x)
+ else
+ fun_l1_n996(x)
+ end
+end
+
+def fun_l0_n553(x)
+ if (x < 1)
+ fun_l1_n865(x)
+ else
+ fun_l1_n703(x)
+ end
+end
+
+def fun_l0_n554(x)
+ if (x < 1)
+ fun_l1_n306(x)
+ else
+ fun_l1_n24(x)
+ end
+end
+
+def fun_l0_n555(x)
+ if (x < 1)
+ fun_l1_n110(x)
+ else
+ fun_l1_n123(x)
+ end
+end
+
+def fun_l0_n556(x)
+ if (x < 1)
+ fun_l1_n367(x)
+ else
+ fun_l1_n117(x)
+ end
+end
+
+def fun_l0_n557(x)
+ if (x < 1)
+ fun_l1_n967(x)
+ else
+ fun_l1_n662(x)
+ end
+end
+
+def fun_l0_n558(x)
+ if (x < 1)
+ fun_l1_n68(x)
+ else
+ fun_l1_n47(x)
+ end
+end
+
+def fun_l0_n559(x)
+ if (x < 1)
+ fun_l1_n154(x)
+ else
+ fun_l1_n344(x)
+ end
+end
+
+def fun_l0_n560(x)
+ if (x < 1)
+ fun_l1_n835(x)
+ else
+ fun_l1_n577(x)
+ end
+end
+
+def fun_l0_n561(x)
+ if (x < 1)
+ fun_l1_n393(x)
+ else
+ fun_l1_n419(x)
+ end
+end
+
+def fun_l0_n562(x)
+ if (x < 1)
+ fun_l1_n632(x)
+ else
+ fun_l1_n630(x)
+ end
+end
+
+def fun_l0_n563(x)
+ if (x < 1)
+ fun_l1_n9(x)
+ else
+ fun_l1_n109(x)
+ end
+end
+
+def fun_l0_n564(x)
+ if (x < 1)
+ fun_l1_n65(x)
+ else
+ fun_l1_n123(x)
+ end
+end
+
+def fun_l0_n565(x)
+ if (x < 1)
+ fun_l1_n156(x)
+ else
+ fun_l1_n921(x)
+ end
+end
+
+def fun_l0_n566(x)
+ if (x < 1)
+ fun_l1_n88(x)
+ else
+ fun_l1_n743(x)
+ end
+end
+
+def fun_l0_n567(x)
+ if (x < 1)
+ fun_l1_n92(x)
+ else
+ fun_l1_n27(x)
+ end
+end
+
+def fun_l0_n568(x)
+ if (x < 1)
+ fun_l1_n462(x)
+ else
+ fun_l1_n988(x)
+ end
+end
+
+def fun_l0_n569(x)
+ if (x < 1)
+ fun_l1_n241(x)
+ else
+ fun_l1_n199(x)
+ end
+end
+
+def fun_l0_n570(x)
+ if (x < 1)
+ fun_l1_n429(x)
+ else
+ fun_l1_n530(x)
+ end
+end
+
+def fun_l0_n571(x)
+ if (x < 1)
+ fun_l1_n412(x)
+ else
+ fun_l1_n530(x)
+ end
+end
+
+def fun_l0_n572(x)
+ if (x < 1)
+ fun_l1_n100(x)
+ else
+ fun_l1_n270(x)
+ end
+end
+
+def fun_l0_n573(x)
+ if (x < 1)
+ fun_l1_n326(x)
+ else
+ fun_l1_n191(x)
+ end
+end
+
+def fun_l0_n574(x)
+ if (x < 1)
+ fun_l1_n151(x)
+ else
+ fun_l1_n354(x)
+ end
+end
+
+def fun_l0_n575(x)
+ if (x < 1)
+ fun_l1_n322(x)
+ else
+ fun_l1_n534(x)
+ end
+end
+
+def fun_l0_n576(x)
+ if (x < 1)
+ fun_l1_n995(x)
+ else
+ fun_l1_n658(x)
+ end
+end
+
+def fun_l0_n577(x)
+ if (x < 1)
+ fun_l1_n193(x)
+ else
+ fun_l1_n444(x)
+ end
+end
+
+def fun_l0_n578(x)
+ if (x < 1)
+ fun_l1_n527(x)
+ else
+ fun_l1_n517(x)
+ end
+end
+
+def fun_l0_n579(x)
+ if (x < 1)
+ fun_l1_n885(x)
+ else
+ fun_l1_n218(x)
+ end
+end
+
+def fun_l0_n580(x)
+ if (x < 1)
+ fun_l1_n789(x)
+ else
+ fun_l1_n711(x)
+ end
+end
+
+def fun_l0_n581(x)
+ if (x < 1)
+ fun_l1_n848(x)
+ else
+ fun_l1_n321(x)
+ end
+end
+
+def fun_l0_n582(x)
+ if (x < 1)
+ fun_l1_n389(x)
+ else
+ fun_l1_n360(x)
+ end
+end
+
+def fun_l0_n583(x)
+ if (x < 1)
+ fun_l1_n319(x)
+ else
+ fun_l1_n972(x)
+ end
+end
+
+def fun_l0_n584(x)
+ if (x < 1)
+ fun_l1_n421(x)
+ else
+ fun_l1_n659(x)
+ end
+end
+
+def fun_l0_n585(x)
+ if (x < 1)
+ fun_l1_n462(x)
+ else
+ fun_l1_n302(x)
+ end
+end
+
+def fun_l0_n586(x)
+ if (x < 1)
+ fun_l1_n122(x)
+ else
+ fun_l1_n50(x)
+ end
+end
+
+def fun_l0_n587(x)
+ if (x < 1)
+ fun_l1_n795(x)
+ else
+ fun_l1_n622(x)
+ end
+end
+
+def fun_l0_n588(x)
+ if (x < 1)
+ fun_l1_n719(x)
+ else
+ fun_l1_n390(x)
+ end
+end
+
+def fun_l0_n589(x)
+ if (x < 1)
+ fun_l1_n916(x)
+ else
+ fun_l1_n925(x)
+ end
+end
+
+def fun_l0_n590(x)
+ if (x < 1)
+ fun_l1_n541(x)
+ else
+ fun_l1_n139(x)
+ end
+end
+
+def fun_l0_n591(x)
+ if (x < 1)
+ fun_l1_n610(x)
+ else
+ fun_l1_n509(x)
+ end
+end
+
+def fun_l0_n592(x)
+ if (x < 1)
+ fun_l1_n217(x)
+ else
+ fun_l1_n452(x)
+ end
+end
+
+def fun_l0_n593(x)
+ if (x < 1)
+ fun_l1_n456(x)
+ else
+ fun_l1_n888(x)
+ end
+end
+
+def fun_l0_n594(x)
+ if (x < 1)
+ fun_l1_n987(x)
+ else
+ fun_l1_n462(x)
+ end
+end
+
+def fun_l0_n595(x)
+ if (x < 1)
+ fun_l1_n571(x)
+ else
+ fun_l1_n429(x)
+ end
+end
+
+def fun_l0_n596(x)
+ if (x < 1)
+ fun_l1_n751(x)
+ else
+ fun_l1_n773(x)
+ end
+end
+
+def fun_l0_n597(x)
+ if (x < 1)
+ fun_l1_n181(x)
+ else
+ fun_l1_n340(x)
+ end
+end
+
+def fun_l0_n598(x)
+ if (x < 1)
+ fun_l1_n6(x)
+ else
+ fun_l1_n39(x)
+ end
+end
+
+def fun_l0_n599(x)
+ if (x < 1)
+ fun_l1_n814(x)
+ else
+ fun_l1_n320(x)
+ end
+end
+
+def fun_l0_n600(x)
+ if (x < 1)
+ fun_l1_n931(x)
+ else
+ fun_l1_n535(x)
+ end
+end
+
+def fun_l0_n601(x)
+ if (x < 1)
+ fun_l1_n99(x)
+ else
+ fun_l1_n154(x)
+ end
+end
+
+def fun_l0_n602(x)
+ if (x < 1)
+ fun_l1_n981(x)
+ else
+ fun_l1_n817(x)
+ end
+end
+
+def fun_l0_n603(x)
+ if (x < 1)
+ fun_l1_n975(x)
+ else
+ fun_l1_n364(x)
+ end
+end
+
+def fun_l0_n604(x)
+ if (x < 1)
+ fun_l1_n112(x)
+ else
+ fun_l1_n226(x)
+ end
+end
+
+def fun_l0_n605(x)
+ if (x < 1)
+ fun_l1_n995(x)
+ else
+ fun_l1_n95(x)
+ end
+end
+
+def fun_l0_n606(x)
+ if (x < 1)
+ fun_l1_n930(x)
+ else
+ fun_l1_n77(x)
+ end
+end
+
+def fun_l0_n607(x)
+ if (x < 1)
+ fun_l1_n899(x)
+ else
+ fun_l1_n957(x)
+ end
+end
+
+def fun_l0_n608(x)
+ if (x < 1)
+ fun_l1_n844(x)
+ else
+ fun_l1_n119(x)
+ end
+end
+
+def fun_l0_n609(x)
+ if (x < 1)
+ fun_l1_n861(x)
+ else
+ fun_l1_n74(x)
+ end
+end
+
+def fun_l0_n610(x)
+ if (x < 1)
+ fun_l1_n521(x)
+ else
+ fun_l1_n46(x)
+ end
+end
+
+def fun_l0_n611(x)
+ if (x < 1)
+ fun_l1_n258(x)
+ else
+ fun_l1_n208(x)
+ end
+end
+
+def fun_l0_n612(x)
+ if (x < 1)
+ fun_l1_n620(x)
+ else
+ fun_l1_n427(x)
+ end
+end
+
+def fun_l0_n613(x)
+ if (x < 1)
+ fun_l1_n583(x)
+ else
+ fun_l1_n400(x)
+ end
+end
+
+def fun_l0_n614(x)
+ if (x < 1)
+ fun_l1_n933(x)
+ else
+ fun_l1_n810(x)
+ end
+end
+
+def fun_l0_n615(x)
+ if (x < 1)
+ fun_l1_n154(x)
+ else
+ fun_l1_n879(x)
+ end
+end
+
+def fun_l0_n616(x)
+ if (x < 1)
+ fun_l1_n715(x)
+ else
+ fun_l1_n862(x)
+ end
+end
+
+def fun_l0_n617(x)
+ if (x < 1)
+ fun_l1_n178(x)
+ else
+ fun_l1_n285(x)
+ end
+end
+
+def fun_l0_n618(x)
+ if (x < 1)
+ fun_l1_n458(x)
+ else
+ fun_l1_n217(x)
+ end
+end
+
+def fun_l0_n619(x)
+ if (x < 1)
+ fun_l1_n655(x)
+ else
+ fun_l1_n405(x)
+ end
+end
+
+def fun_l0_n620(x)
+ if (x < 1)
+ fun_l1_n470(x)
+ else
+ fun_l1_n306(x)
+ end
+end
+
+def fun_l0_n621(x)
+ if (x < 1)
+ fun_l1_n755(x)
+ else
+ fun_l1_n425(x)
+ end
+end
+
+def fun_l0_n622(x)
+ if (x < 1)
+ fun_l1_n426(x)
+ else
+ fun_l1_n737(x)
+ end
+end
+
+def fun_l0_n623(x)
+ if (x < 1)
+ fun_l1_n79(x)
+ else
+ fun_l1_n55(x)
+ end
+end
+
+def fun_l0_n624(x)
+ if (x < 1)
+ fun_l1_n437(x)
+ else
+ fun_l1_n79(x)
+ end
+end
+
+def fun_l0_n625(x)
+ if (x < 1)
+ fun_l1_n691(x)
+ else
+ fun_l1_n198(x)
+ end
+end
+
+def fun_l0_n626(x)
+ if (x < 1)
+ fun_l1_n703(x)
+ else
+ fun_l1_n585(x)
+ end
+end
+
+def fun_l0_n627(x)
+ if (x < 1)
+ fun_l1_n966(x)
+ else
+ fun_l1_n425(x)
+ end
+end
+
+def fun_l0_n628(x)
+ if (x < 1)
+ fun_l1_n516(x)
+ else
+ fun_l1_n744(x)
+ end
+end
+
+def fun_l0_n629(x)
+ if (x < 1)
+ fun_l1_n40(x)
+ else
+ fun_l1_n942(x)
+ end
+end
+
+def fun_l0_n630(x)
+ if (x < 1)
+ fun_l1_n528(x)
+ else
+ fun_l1_n440(x)
+ end
+end
+
+def fun_l0_n631(x)
+ if (x < 1)
+ fun_l1_n139(x)
+ else
+ fun_l1_n403(x)
+ end
+end
+
+def fun_l0_n632(x)
+ if (x < 1)
+ fun_l1_n421(x)
+ else
+ fun_l1_n510(x)
+ end
+end
+
+def fun_l0_n633(x)
+ if (x < 1)
+ fun_l1_n718(x)
+ else
+ fun_l1_n388(x)
+ end
+end
+
+def fun_l0_n634(x)
+ if (x < 1)
+ fun_l1_n67(x)
+ else
+ fun_l1_n342(x)
+ end
+end
+
+def fun_l0_n635(x)
+ if (x < 1)
+ fun_l1_n943(x)
+ else
+ fun_l1_n941(x)
+ end
+end
+
+def fun_l0_n636(x)
+ if (x < 1)
+ fun_l1_n170(x)
+ else
+ fun_l1_n527(x)
+ end
+end
+
+def fun_l0_n637(x)
+ if (x < 1)
+ fun_l1_n336(x)
+ else
+ fun_l1_n496(x)
+ end
+end
+
+def fun_l0_n638(x)
+ if (x < 1)
+ fun_l1_n730(x)
+ else
+ fun_l1_n10(x)
+ end
+end
+
+def fun_l0_n639(x)
+ if (x < 1)
+ fun_l1_n939(x)
+ else
+ fun_l1_n940(x)
+ end
+end
+
+def fun_l0_n640(x)
+ if (x < 1)
+ fun_l1_n73(x)
+ else
+ fun_l1_n439(x)
+ end
+end
+
+def fun_l0_n641(x)
+ if (x < 1)
+ fun_l1_n460(x)
+ else
+ fun_l1_n828(x)
+ end
+end
+
+def fun_l0_n642(x)
+ if (x < 1)
+ fun_l1_n90(x)
+ else
+ fun_l1_n113(x)
+ end
+end
+
+def fun_l0_n643(x)
+ if (x < 1)
+ fun_l1_n972(x)
+ else
+ fun_l1_n15(x)
+ end
+end
+
+def fun_l0_n644(x)
+ if (x < 1)
+ fun_l1_n417(x)
+ else
+ fun_l1_n228(x)
+ end
+end
+
+def fun_l0_n645(x)
+ if (x < 1)
+ fun_l1_n876(x)
+ else
+ fun_l1_n152(x)
+ end
+end
+
+def fun_l0_n646(x)
+ if (x < 1)
+ fun_l1_n404(x)
+ else
+ fun_l1_n147(x)
+ end
+end
+
+def fun_l0_n647(x)
+ if (x < 1)
+ fun_l1_n802(x)
+ else
+ fun_l1_n824(x)
+ end
+end
+
+def fun_l0_n648(x)
+ if (x < 1)
+ fun_l1_n771(x)
+ else
+ fun_l1_n421(x)
+ end
+end
+
+def fun_l0_n649(x)
+ if (x < 1)
+ fun_l1_n807(x)
+ else
+ fun_l1_n955(x)
+ end
+end
+
+def fun_l0_n650(x)
+ if (x < 1)
+ fun_l1_n221(x)
+ else
+ fun_l1_n367(x)
+ end
+end
+
+def fun_l0_n651(x)
+ if (x < 1)
+ fun_l1_n955(x)
+ else
+ fun_l1_n401(x)
+ end
+end
+
+def fun_l0_n652(x)
+ if (x < 1)
+ fun_l1_n203(x)
+ else
+ fun_l1_n909(x)
+ end
+end
+
+def fun_l0_n653(x)
+ if (x < 1)
+ fun_l1_n154(x)
+ else
+ fun_l1_n535(x)
+ end
+end
+
+def fun_l0_n654(x)
+ if (x < 1)
+ fun_l1_n420(x)
+ else
+ fun_l1_n393(x)
+ end
+end
+
+def fun_l0_n655(x)
+ if (x < 1)
+ fun_l1_n232(x)
+ else
+ fun_l1_n981(x)
+ end
+end
+
+def fun_l0_n656(x)
+ if (x < 1)
+ fun_l1_n873(x)
+ else
+ fun_l1_n510(x)
+ end
+end
+
+def fun_l0_n657(x)
+ if (x < 1)
+ fun_l1_n919(x)
+ else
+ fun_l1_n706(x)
+ end
+end
+
+def fun_l0_n658(x)
+ if (x < 1)
+ fun_l1_n429(x)
+ else
+ fun_l1_n112(x)
+ end
+end
+
+def fun_l0_n659(x)
+ if (x < 1)
+ fun_l1_n547(x)
+ else
+ fun_l1_n712(x)
+ end
+end
+
+def fun_l0_n660(x)
+ if (x < 1)
+ fun_l1_n498(x)
+ else
+ fun_l1_n695(x)
+ end
+end
+
+def fun_l0_n661(x)
+ if (x < 1)
+ fun_l1_n801(x)
+ else
+ fun_l1_n659(x)
+ end
+end
+
+def fun_l0_n662(x)
+ if (x < 1)
+ fun_l1_n796(x)
+ else
+ fun_l1_n229(x)
+ end
+end
+
+def fun_l0_n663(x)
+ if (x < 1)
+ fun_l1_n11(x)
+ else
+ fun_l1_n620(x)
+ end
+end
+
+def fun_l0_n664(x)
+ if (x < 1)
+ fun_l1_n652(x)
+ else
+ fun_l1_n416(x)
+ end
+end
+
+def fun_l0_n665(x)
+ if (x < 1)
+ fun_l1_n655(x)
+ else
+ fun_l1_n331(x)
+ end
+end
+
+def fun_l0_n666(x)
+ if (x < 1)
+ fun_l1_n846(x)
+ else
+ fun_l1_n1(x)
+ end
+end
+
+def fun_l0_n667(x)
+ if (x < 1)
+ fun_l1_n55(x)
+ else
+ fun_l1_n32(x)
+ end
+end
+
+def fun_l0_n668(x)
+ if (x < 1)
+ fun_l1_n241(x)
+ else
+ fun_l1_n627(x)
+ end
+end
+
+def fun_l0_n669(x)
+ if (x < 1)
+ fun_l1_n234(x)
+ else
+ fun_l1_n996(x)
+ end
+end
+
+def fun_l0_n670(x)
+ if (x < 1)
+ fun_l1_n235(x)
+ else
+ fun_l1_n761(x)
+ end
+end
+
+def fun_l0_n671(x)
+ if (x < 1)
+ fun_l1_n990(x)
+ else
+ fun_l1_n236(x)
+ end
+end
+
+def fun_l0_n672(x)
+ if (x < 1)
+ fun_l1_n970(x)
+ else
+ fun_l1_n595(x)
+ end
+end
+
+def fun_l0_n673(x)
+ if (x < 1)
+ fun_l1_n294(x)
+ else
+ fun_l1_n440(x)
+ end
+end
+
+def fun_l0_n674(x)
+ if (x < 1)
+ fun_l1_n597(x)
+ else
+ fun_l1_n956(x)
+ end
+end
+
+def fun_l0_n675(x)
+ if (x < 1)
+ fun_l1_n942(x)
+ else
+ fun_l1_n352(x)
+ end
+end
+
+def fun_l0_n676(x)
+ if (x < 1)
+ fun_l1_n309(x)
+ else
+ fun_l1_n294(x)
+ end
+end
+
+def fun_l0_n677(x)
+ if (x < 1)
+ fun_l1_n752(x)
+ else
+ fun_l1_n447(x)
+ end
+end
+
+def fun_l0_n678(x)
+ if (x < 1)
+ fun_l1_n890(x)
+ else
+ fun_l1_n497(x)
+ end
+end
+
+def fun_l0_n679(x)
+ if (x < 1)
+ fun_l1_n808(x)
+ else
+ fun_l1_n881(x)
+ end
+end
+
+def fun_l0_n680(x)
+ if (x < 1)
+ fun_l1_n966(x)
+ else
+ fun_l1_n460(x)
+ end
+end
+
+def fun_l0_n681(x)
+ if (x < 1)
+ fun_l1_n104(x)
+ else
+ fun_l1_n262(x)
+ end
+end
+
+def fun_l0_n682(x)
+ if (x < 1)
+ fun_l1_n425(x)
+ else
+ fun_l1_n177(x)
+ end
+end
+
+def fun_l0_n683(x)
+ if (x < 1)
+ fun_l1_n15(x)
+ else
+ fun_l1_n458(x)
+ end
+end
+
+def fun_l0_n684(x)
+ if (x < 1)
+ fun_l1_n760(x)
+ else
+ fun_l1_n997(x)
+ end
+end
+
+def fun_l0_n685(x)
+ if (x < 1)
+ fun_l1_n496(x)
+ else
+ fun_l1_n223(x)
+ end
+end
+
+def fun_l0_n686(x)
+ if (x < 1)
+ fun_l1_n363(x)
+ else
+ fun_l1_n543(x)
+ end
+end
+
+def fun_l0_n687(x)
+ if (x < 1)
+ fun_l1_n758(x)
+ else
+ fun_l1_n473(x)
+ end
+end
+
+def fun_l0_n688(x)
+ if (x < 1)
+ fun_l1_n854(x)
+ else
+ fun_l1_n999(x)
+ end
+end
+
+def fun_l0_n689(x)
+ if (x < 1)
+ fun_l1_n336(x)
+ else
+ fun_l1_n388(x)
+ end
+end
+
+def fun_l0_n690(x)
+ if (x < 1)
+ fun_l1_n930(x)
+ else
+ fun_l1_n478(x)
+ end
+end
+
+def fun_l0_n691(x)
+ if (x < 1)
+ fun_l1_n423(x)
+ else
+ fun_l1_n83(x)
+ end
+end
+
+def fun_l0_n692(x)
+ if (x < 1)
+ fun_l1_n797(x)
+ else
+ fun_l1_n427(x)
+ end
+end
+
+def fun_l0_n693(x)
+ if (x < 1)
+ fun_l1_n907(x)
+ else
+ fun_l1_n1(x)
+ end
+end
+
+def fun_l0_n694(x)
+ if (x < 1)
+ fun_l1_n601(x)
+ else
+ fun_l1_n809(x)
+ end
+end
+
+def fun_l0_n695(x)
+ if (x < 1)
+ fun_l1_n252(x)
+ else
+ fun_l1_n233(x)
+ end
+end
+
+def fun_l0_n696(x)
+ if (x < 1)
+ fun_l1_n36(x)
+ else
+ fun_l1_n796(x)
+ end
+end
+
+def fun_l0_n697(x)
+ if (x < 1)
+ fun_l1_n893(x)
+ else
+ fun_l1_n926(x)
+ end
+end
+
+def fun_l0_n698(x)
+ if (x < 1)
+ fun_l1_n511(x)
+ else
+ fun_l1_n88(x)
+ end
+end
+
+def fun_l0_n699(x)
+ if (x < 1)
+ fun_l1_n573(x)
+ else
+ fun_l1_n718(x)
+ end
+end
+
+def fun_l0_n700(x)
+ if (x < 1)
+ fun_l1_n475(x)
+ else
+ fun_l1_n539(x)
+ end
+end
+
+def fun_l0_n701(x)
+ if (x < 1)
+ fun_l1_n238(x)
+ else
+ fun_l1_n253(x)
+ end
+end
+
+def fun_l0_n702(x)
+ if (x < 1)
+ fun_l1_n731(x)
+ else
+ fun_l1_n225(x)
+ end
+end
+
+def fun_l0_n703(x)
+ if (x < 1)
+ fun_l1_n594(x)
+ else
+ fun_l1_n16(x)
+ end
+end
+
+def fun_l0_n704(x)
+ if (x < 1)
+ fun_l1_n732(x)
+ else
+ fun_l1_n354(x)
+ end
+end
+
+def fun_l0_n705(x)
+ if (x < 1)
+ fun_l1_n812(x)
+ else
+ fun_l1_n218(x)
+ end
+end
+
+def fun_l0_n706(x)
+ if (x < 1)
+ fun_l1_n731(x)
+ else
+ fun_l1_n343(x)
+ end
+end
+
+def fun_l0_n707(x)
+ if (x < 1)
+ fun_l1_n134(x)
+ else
+ fun_l1_n752(x)
+ end
+end
+
+def fun_l0_n708(x)
+ if (x < 1)
+ fun_l1_n899(x)
+ else
+ fun_l1_n854(x)
+ end
+end
+
+def fun_l0_n709(x)
+ if (x < 1)
+ fun_l1_n120(x)
+ else
+ fun_l1_n193(x)
+ end
+end
+
+def fun_l0_n710(x)
+ if (x < 1)
+ fun_l1_n22(x)
+ else
+ fun_l1_n254(x)
+ end
+end
+
+def fun_l0_n711(x)
+ if (x < 1)
+ fun_l1_n527(x)
+ else
+ fun_l1_n921(x)
+ end
+end
+
+def fun_l0_n712(x)
+ if (x < 1)
+ fun_l1_n820(x)
+ else
+ fun_l1_n96(x)
+ end
+end
+
+def fun_l0_n713(x)
+ if (x < 1)
+ fun_l1_n145(x)
+ else
+ fun_l1_n226(x)
+ end
+end
+
+def fun_l0_n714(x)
+ if (x < 1)
+ fun_l1_n643(x)
+ else
+ fun_l1_n407(x)
+ end
+end
+
+def fun_l0_n715(x)
+ if (x < 1)
+ fun_l1_n349(x)
+ else
+ fun_l1_n450(x)
+ end
+end
+
+def fun_l0_n716(x)
+ if (x < 1)
+ fun_l1_n954(x)
+ else
+ fun_l1_n707(x)
+ end
+end
+
+def fun_l0_n717(x)
+ if (x < 1)
+ fun_l1_n723(x)
+ else
+ fun_l1_n762(x)
+ end
+end
+
+def fun_l0_n718(x)
+ if (x < 1)
+ fun_l1_n299(x)
+ else
+ fun_l1_n332(x)
+ end
+end
+
+def fun_l0_n719(x)
+ if (x < 1)
+ fun_l1_n493(x)
+ else
+ fun_l1_n521(x)
+ end
+end
+
+def fun_l0_n720(x)
+ if (x < 1)
+ fun_l1_n39(x)
+ else
+ fun_l1_n17(x)
+ end
+end
+
+def fun_l0_n721(x)
+ if (x < 1)
+ fun_l1_n297(x)
+ else
+ fun_l1_n800(x)
+ end
+end
+
+def fun_l0_n722(x)
+ if (x < 1)
+ fun_l1_n758(x)
+ else
+ fun_l1_n135(x)
+ end
+end
+
+def fun_l0_n723(x)
+ if (x < 1)
+ fun_l1_n471(x)
+ else
+ fun_l1_n351(x)
+ end
+end
+
+def fun_l0_n724(x)
+ if (x < 1)
+ fun_l1_n381(x)
+ else
+ fun_l1_n281(x)
+ end
+end
+
+def fun_l0_n725(x)
+ if (x < 1)
+ fun_l1_n624(x)
+ else
+ fun_l1_n162(x)
+ end
+end
+
+def fun_l0_n726(x)
+ if (x < 1)
+ fun_l1_n143(x)
+ else
+ fun_l1_n417(x)
+ end
+end
+
+def fun_l0_n727(x)
+ if (x < 1)
+ fun_l1_n623(x)
+ else
+ fun_l1_n19(x)
+ end
+end
+
+def fun_l0_n728(x)
+ if (x < 1)
+ fun_l1_n687(x)
+ else
+ fun_l1_n574(x)
+ end
+end
+
+def fun_l0_n729(x)
+ if (x < 1)
+ fun_l1_n225(x)
+ else
+ fun_l1_n871(x)
+ end
+end
+
+def fun_l0_n730(x)
+ if (x < 1)
+ fun_l1_n830(x)
+ else
+ fun_l1_n273(x)
+ end
+end
+
+def fun_l0_n731(x)
+ if (x < 1)
+ fun_l1_n95(x)
+ else
+ fun_l1_n864(x)
+ end
+end
+
+def fun_l0_n732(x)
+ if (x < 1)
+ fun_l1_n902(x)
+ else
+ fun_l1_n522(x)
+ end
+end
+
+def fun_l0_n733(x)
+ if (x < 1)
+ fun_l1_n564(x)
+ else
+ fun_l1_n55(x)
+ end
+end
+
+def fun_l0_n734(x)
+ if (x < 1)
+ fun_l1_n481(x)
+ else
+ fun_l1_n880(x)
+ end
+end
+
+def fun_l0_n735(x)
+ if (x < 1)
+ fun_l1_n87(x)
+ else
+ fun_l1_n693(x)
+ end
+end
+
+def fun_l0_n736(x)
+ if (x < 1)
+ fun_l1_n72(x)
+ else
+ fun_l1_n396(x)
+ end
+end
+
+def fun_l0_n737(x)
+ if (x < 1)
+ fun_l1_n920(x)
+ else
+ fun_l1_n852(x)
+ end
+end
+
+def fun_l0_n738(x)
+ if (x < 1)
+ fun_l1_n342(x)
+ else
+ fun_l1_n959(x)
+ end
+end
+
+def fun_l0_n739(x)
+ if (x < 1)
+ fun_l1_n682(x)
+ else
+ fun_l1_n714(x)
+ end
+end
+
+def fun_l0_n740(x)
+ if (x < 1)
+ fun_l1_n264(x)
+ else
+ fun_l1_n325(x)
+ end
+end
+
+def fun_l0_n741(x)
+ if (x < 1)
+ fun_l1_n621(x)
+ else
+ fun_l1_n339(x)
+ end
+end
+
+def fun_l0_n742(x)
+ if (x < 1)
+ fun_l1_n46(x)
+ else
+ fun_l1_n766(x)
+ end
+end
+
+def fun_l0_n743(x)
+ if (x < 1)
+ fun_l1_n333(x)
+ else
+ fun_l1_n353(x)
+ end
+end
+
+def fun_l0_n744(x)
+ if (x < 1)
+ fun_l1_n930(x)
+ else
+ fun_l1_n964(x)
+ end
+end
+
+def fun_l0_n745(x)
+ if (x < 1)
+ fun_l1_n785(x)
+ else
+ fun_l1_n114(x)
+ end
+end
+
+def fun_l0_n746(x)
+ if (x < 1)
+ fun_l1_n402(x)
+ else
+ fun_l1_n803(x)
+ end
+end
+
+def fun_l0_n747(x)
+ if (x < 1)
+ fun_l1_n878(x)
+ else
+ fun_l1_n870(x)
+ end
+end
+
+def fun_l0_n748(x)
+ if (x < 1)
+ fun_l1_n683(x)
+ else
+ fun_l1_n287(x)
+ end
+end
+
+def fun_l0_n749(x)
+ if (x < 1)
+ fun_l1_n816(x)
+ else
+ fun_l1_n849(x)
+ end
+end
+
+def fun_l0_n750(x)
+ if (x < 1)
+ fun_l1_n523(x)
+ else
+ fun_l1_n555(x)
+ end
+end
+
+def fun_l0_n751(x)
+ if (x < 1)
+ fun_l1_n270(x)
+ else
+ fun_l1_n660(x)
+ end
+end
+
+def fun_l0_n752(x)
+ if (x < 1)
+ fun_l1_n786(x)
+ else
+ fun_l1_n749(x)
+ end
+end
+
+def fun_l0_n753(x)
+ if (x < 1)
+ fun_l1_n567(x)
+ else
+ fun_l1_n325(x)
+ end
+end
+
+def fun_l0_n754(x)
+ if (x < 1)
+ fun_l1_n502(x)
+ else
+ fun_l1_n610(x)
+ end
+end
+
+def fun_l0_n755(x)
+ if (x < 1)
+ fun_l1_n336(x)
+ else
+ fun_l1_n354(x)
+ end
+end
+
+def fun_l0_n756(x)
+ if (x < 1)
+ fun_l1_n435(x)
+ else
+ fun_l1_n484(x)
+ end
+end
+
+def fun_l0_n757(x)
+ if (x < 1)
+ fun_l1_n849(x)
+ else
+ fun_l1_n42(x)
+ end
+end
+
+def fun_l0_n758(x)
+ if (x < 1)
+ fun_l1_n294(x)
+ else
+ fun_l1_n483(x)
+ end
+end
+
+def fun_l0_n759(x)
+ if (x < 1)
+ fun_l1_n14(x)
+ else
+ fun_l1_n9(x)
+ end
+end
+
+def fun_l0_n760(x)
+ if (x < 1)
+ fun_l1_n860(x)
+ else
+ fun_l1_n645(x)
+ end
+end
+
+def fun_l0_n761(x)
+ if (x < 1)
+ fun_l1_n577(x)
+ else
+ fun_l1_n0(x)
+ end
+end
+
+def fun_l0_n762(x)
+ if (x < 1)
+ fun_l1_n28(x)
+ else
+ fun_l1_n834(x)
+ end
+end
+
+def fun_l0_n763(x)
+ if (x < 1)
+ fun_l1_n147(x)
+ else
+ fun_l1_n177(x)
+ end
+end
+
+def fun_l0_n764(x)
+ if (x < 1)
+ fun_l1_n429(x)
+ else
+ fun_l1_n646(x)
+ end
+end
+
+def fun_l0_n765(x)
+ if (x < 1)
+ fun_l1_n474(x)
+ else
+ fun_l1_n22(x)
+ end
+end
+
+def fun_l0_n766(x)
+ if (x < 1)
+ fun_l1_n264(x)
+ else
+ fun_l1_n549(x)
+ end
+end
+
+def fun_l0_n767(x)
+ if (x < 1)
+ fun_l1_n351(x)
+ else
+ fun_l1_n813(x)
+ end
+end
+
+def fun_l0_n768(x)
+ if (x < 1)
+ fun_l1_n798(x)
+ else
+ fun_l1_n684(x)
+ end
+end
+
+def fun_l0_n769(x)
+ if (x < 1)
+ fun_l1_n202(x)
+ else
+ fun_l1_n982(x)
+ end
+end
+
+def fun_l0_n770(x)
+ if (x < 1)
+ fun_l1_n611(x)
+ else
+ fun_l1_n52(x)
+ end
+end
+
+def fun_l0_n771(x)
+ if (x < 1)
+ fun_l1_n754(x)
+ else
+ fun_l1_n7(x)
+ end
+end
+
+def fun_l0_n772(x)
+ if (x < 1)
+ fun_l1_n98(x)
+ else
+ fun_l1_n149(x)
+ end
+end
+
+def fun_l0_n773(x)
+ if (x < 1)
+ fun_l1_n358(x)
+ else
+ fun_l1_n603(x)
+ end
+end
+
+def fun_l0_n774(x)
+ if (x < 1)
+ fun_l1_n604(x)
+ else
+ fun_l1_n318(x)
+ end
+end
+
+def fun_l0_n775(x)
+ if (x < 1)
+ fun_l1_n831(x)
+ else
+ fun_l1_n635(x)
+ end
+end
+
+def fun_l0_n776(x)
+ if (x < 1)
+ fun_l1_n738(x)
+ else
+ fun_l1_n563(x)
+ end
+end
+
+def fun_l0_n777(x)
+ if (x < 1)
+ fun_l1_n570(x)
+ else
+ fun_l1_n671(x)
+ end
+end
+
+def fun_l0_n778(x)
+ if (x < 1)
+ fun_l1_n945(x)
+ else
+ fun_l1_n840(x)
+ end
+end
+
+def fun_l0_n779(x)
+ if (x < 1)
+ fun_l1_n116(x)
+ else
+ fun_l1_n942(x)
+ end
+end
+
+def fun_l0_n780(x)
+ if (x < 1)
+ fun_l1_n835(x)
+ else
+ fun_l1_n244(x)
+ end
+end
+
+def fun_l0_n781(x)
+ if (x < 1)
+ fun_l1_n427(x)
+ else
+ fun_l1_n556(x)
+ end
+end
+
+def fun_l0_n782(x)
+ if (x < 1)
+ fun_l1_n280(x)
+ else
+ fun_l1_n926(x)
+ end
+end
+
+def fun_l0_n783(x)
+ if (x < 1)
+ fun_l1_n674(x)
+ else
+ fun_l1_n560(x)
+ end
+end
+
+def fun_l0_n784(x)
+ if (x < 1)
+ fun_l1_n978(x)
+ else
+ fun_l1_n139(x)
+ end
+end
+
+def fun_l0_n785(x)
+ if (x < 1)
+ fun_l1_n928(x)
+ else
+ fun_l1_n11(x)
+ end
+end
+
+def fun_l0_n786(x)
+ if (x < 1)
+ fun_l1_n829(x)
+ else
+ fun_l1_n239(x)
+ end
+end
+
+def fun_l0_n787(x)
+ if (x < 1)
+ fun_l1_n732(x)
+ else
+ fun_l1_n829(x)
+ end
+end
+
+def fun_l0_n788(x)
+ if (x < 1)
+ fun_l1_n968(x)
+ else
+ fun_l1_n596(x)
+ end
+end
+
+def fun_l0_n789(x)
+ if (x < 1)
+ fun_l1_n214(x)
+ else
+ fun_l1_n158(x)
+ end
+end
+
+def fun_l0_n790(x)
+ if (x < 1)
+ fun_l1_n566(x)
+ else
+ fun_l1_n390(x)
+ end
+end
+
+def fun_l0_n791(x)
+ if (x < 1)
+ fun_l1_n936(x)
+ else
+ fun_l1_n31(x)
+ end
+end
+
+def fun_l0_n792(x)
+ if (x < 1)
+ fun_l1_n291(x)
+ else
+ fun_l1_n835(x)
+ end
+end
+
+def fun_l0_n793(x)
+ if (x < 1)
+ fun_l1_n300(x)
+ else
+ fun_l1_n658(x)
+ end
+end
+
+def fun_l0_n794(x)
+ if (x < 1)
+ fun_l1_n609(x)
+ else
+ fun_l1_n156(x)
+ end
+end
+
+def fun_l0_n795(x)
+ if (x < 1)
+ fun_l1_n419(x)
+ else
+ fun_l1_n534(x)
+ end
+end
+
+def fun_l0_n796(x)
+ if (x < 1)
+ fun_l1_n401(x)
+ else
+ fun_l1_n281(x)
+ end
+end
+
+def fun_l0_n797(x)
+ if (x < 1)
+ fun_l1_n538(x)
+ else
+ fun_l1_n905(x)
+ end
+end
+
+def fun_l0_n798(x)
+ if (x < 1)
+ fun_l1_n427(x)
+ else
+ fun_l1_n978(x)
+ end
+end
+
+def fun_l0_n799(x)
+ if (x < 1)
+ fun_l1_n851(x)
+ else
+ fun_l1_n57(x)
+ end
+end
+
+def fun_l0_n800(x)
+ if (x < 1)
+ fun_l1_n836(x)
+ else
+ fun_l1_n306(x)
+ end
+end
+
+def fun_l0_n801(x)
+ if (x < 1)
+ fun_l1_n414(x)
+ else
+ fun_l1_n679(x)
+ end
+end
+
+def fun_l0_n802(x)
+ if (x < 1)
+ fun_l1_n645(x)
+ else
+ fun_l1_n115(x)
+ end
+end
+
+def fun_l0_n803(x)
+ if (x < 1)
+ fun_l1_n884(x)
+ else
+ fun_l1_n801(x)
+ end
+end
+
+def fun_l0_n804(x)
+ if (x < 1)
+ fun_l1_n713(x)
+ else
+ fun_l1_n974(x)
+ end
+end
+
+def fun_l0_n805(x)
+ if (x < 1)
+ fun_l1_n301(x)
+ else
+ fun_l1_n589(x)
+ end
+end
+
+def fun_l0_n806(x)
+ if (x < 1)
+ fun_l1_n912(x)
+ else
+ fun_l1_n766(x)
+ end
+end
+
+def fun_l0_n807(x)
+ if (x < 1)
+ fun_l1_n656(x)
+ else
+ fun_l1_n765(x)
+ end
+end
+
+def fun_l0_n808(x)
+ if (x < 1)
+ fun_l1_n517(x)
+ else
+ fun_l1_n964(x)
+ end
+end
+
+def fun_l0_n809(x)
+ if (x < 1)
+ fun_l1_n878(x)
+ else
+ fun_l1_n69(x)
+ end
+end
+
+def fun_l0_n810(x)
+ if (x < 1)
+ fun_l1_n527(x)
+ else
+ fun_l1_n841(x)
+ end
+end
+
+def fun_l0_n811(x)
+ if (x < 1)
+ fun_l1_n564(x)
+ else
+ fun_l1_n541(x)
+ end
+end
+
+def fun_l0_n812(x)
+ if (x < 1)
+ fun_l1_n754(x)
+ else
+ fun_l1_n681(x)
+ end
+end
+
+def fun_l0_n813(x)
+ if (x < 1)
+ fun_l1_n21(x)
+ else
+ fun_l1_n561(x)
+ end
+end
+
+def fun_l0_n814(x)
+ if (x < 1)
+ fun_l1_n547(x)
+ else
+ fun_l1_n631(x)
+ end
+end
+
+def fun_l0_n815(x)
+ if (x < 1)
+ fun_l1_n956(x)
+ else
+ fun_l1_n889(x)
+ end
+end
+
+def fun_l0_n816(x)
+ if (x < 1)
+ fun_l1_n817(x)
+ else
+ fun_l1_n957(x)
+ end
+end
+
+def fun_l0_n817(x)
+ if (x < 1)
+ fun_l1_n622(x)
+ else
+ fun_l1_n827(x)
+ end
+end
+
+def fun_l0_n818(x)
+ if (x < 1)
+ fun_l1_n7(x)
+ else
+ fun_l1_n208(x)
+ end
+end
+
+def fun_l0_n819(x)
+ if (x < 1)
+ fun_l1_n411(x)
+ else
+ fun_l1_n742(x)
+ end
+end
+
+def fun_l0_n820(x)
+ if (x < 1)
+ fun_l1_n676(x)
+ else
+ fun_l1_n144(x)
+ end
+end
+
+def fun_l0_n821(x)
+ if (x < 1)
+ fun_l1_n489(x)
+ else
+ fun_l1_n963(x)
+ end
+end
+
+def fun_l0_n822(x)
+ if (x < 1)
+ fun_l1_n645(x)
+ else
+ fun_l1_n953(x)
+ end
+end
+
+def fun_l0_n823(x)
+ if (x < 1)
+ fun_l1_n974(x)
+ else
+ fun_l1_n75(x)
+ end
+end
+
+def fun_l0_n824(x)
+ if (x < 1)
+ fun_l1_n295(x)
+ else
+ fun_l1_n249(x)
+ end
+end
+
+def fun_l0_n825(x)
+ if (x < 1)
+ fun_l1_n141(x)
+ else
+ fun_l1_n633(x)
+ end
+end
+
+def fun_l0_n826(x)
+ if (x < 1)
+ fun_l1_n317(x)
+ else
+ fun_l1_n152(x)
+ end
+end
+
+def fun_l0_n827(x)
+ if (x < 1)
+ fun_l1_n570(x)
+ else
+ fun_l1_n377(x)
+ end
+end
+
+def fun_l0_n828(x)
+ if (x < 1)
+ fun_l1_n392(x)
+ else
+ fun_l1_n477(x)
+ end
+end
+
+def fun_l0_n829(x)
+ if (x < 1)
+ fun_l1_n277(x)
+ else
+ fun_l1_n276(x)
+ end
+end
+
+def fun_l0_n830(x)
+ if (x < 1)
+ fun_l1_n579(x)
+ else
+ fun_l1_n648(x)
+ end
+end
+
+def fun_l0_n831(x)
+ if (x < 1)
+ fun_l1_n8(x)
+ else
+ fun_l1_n25(x)
+ end
+end
+
+def fun_l0_n832(x)
+ if (x < 1)
+ fun_l1_n724(x)
+ else
+ fun_l1_n617(x)
+ end
+end
+
+def fun_l0_n833(x)
+ if (x < 1)
+ fun_l1_n771(x)
+ else
+ fun_l1_n619(x)
+ end
+end
+
+def fun_l0_n834(x)
+ if (x < 1)
+ fun_l1_n23(x)
+ else
+ fun_l1_n127(x)
+ end
+end
+
+def fun_l0_n835(x)
+ if (x < 1)
+ fun_l1_n322(x)
+ else
+ fun_l1_n327(x)
+ end
+end
+
+def fun_l0_n836(x)
+ if (x < 1)
+ fun_l1_n7(x)
+ else
+ fun_l1_n600(x)
+ end
+end
+
+def fun_l0_n837(x)
+ if (x < 1)
+ fun_l1_n796(x)
+ else
+ fun_l1_n782(x)
+ end
+end
+
+def fun_l0_n838(x)
+ if (x < 1)
+ fun_l1_n937(x)
+ else
+ fun_l1_n899(x)
+ end
+end
+
+def fun_l0_n839(x)
+ if (x < 1)
+ fun_l1_n16(x)
+ else
+ fun_l1_n349(x)
+ end
+end
+
+def fun_l0_n840(x)
+ if (x < 1)
+ fun_l1_n622(x)
+ else
+ fun_l1_n960(x)
+ end
+end
+
+def fun_l0_n841(x)
+ if (x < 1)
+ fun_l1_n295(x)
+ else
+ fun_l1_n176(x)
+ end
+end
+
+def fun_l0_n842(x)
+ if (x < 1)
+ fun_l1_n302(x)
+ else
+ fun_l1_n639(x)
+ end
+end
+
+def fun_l0_n843(x)
+ if (x < 1)
+ fun_l1_n342(x)
+ else
+ fun_l1_n373(x)
+ end
+end
+
+def fun_l0_n844(x)
+ if (x < 1)
+ fun_l1_n5(x)
+ else
+ fun_l1_n787(x)
+ end
+end
+
+def fun_l0_n845(x)
+ if (x < 1)
+ fun_l1_n89(x)
+ else
+ fun_l1_n951(x)
+ end
+end
+
+def fun_l0_n846(x)
+ if (x < 1)
+ fun_l1_n954(x)
+ else
+ fun_l1_n982(x)
+ end
+end
+
+def fun_l0_n847(x)
+ if (x < 1)
+ fun_l1_n971(x)
+ else
+ fun_l1_n917(x)
+ end
+end
+
+def fun_l0_n848(x)
+ if (x < 1)
+ fun_l1_n845(x)
+ else
+ fun_l1_n343(x)
+ end
+end
+
+def fun_l0_n849(x)
+ if (x < 1)
+ fun_l1_n684(x)
+ else
+ fun_l1_n683(x)
+ end
+end
+
+def fun_l0_n850(x)
+ if (x < 1)
+ fun_l1_n670(x)
+ else
+ fun_l1_n256(x)
+ end
+end
+
+def fun_l0_n851(x)
+ if (x < 1)
+ fun_l1_n405(x)
+ else
+ fun_l1_n123(x)
+ end
+end
+
+def fun_l0_n852(x)
+ if (x < 1)
+ fun_l1_n809(x)
+ else
+ fun_l1_n691(x)
+ end
+end
+
+def fun_l0_n853(x)
+ if (x < 1)
+ fun_l1_n937(x)
+ else
+ fun_l1_n92(x)
+ end
+end
+
+def fun_l0_n854(x)
+ if (x < 1)
+ fun_l1_n735(x)
+ else
+ fun_l1_n948(x)
+ end
+end
+
+def fun_l0_n855(x)
+ if (x < 1)
+ fun_l1_n684(x)
+ else
+ fun_l1_n566(x)
+ end
+end
+
+def fun_l0_n856(x)
+ if (x < 1)
+ fun_l1_n508(x)
+ else
+ fun_l1_n35(x)
+ end
+end
+
+def fun_l0_n857(x)
+ if (x < 1)
+ fun_l1_n865(x)
+ else
+ fun_l1_n930(x)
+ end
+end
+
+def fun_l0_n858(x)
+ if (x < 1)
+ fun_l1_n937(x)
+ else
+ fun_l1_n241(x)
+ end
+end
+
+def fun_l0_n859(x)
+ if (x < 1)
+ fun_l1_n938(x)
+ else
+ fun_l1_n108(x)
+ end
+end
+
+def fun_l0_n860(x)
+ if (x < 1)
+ fun_l1_n491(x)
+ else
+ fun_l1_n119(x)
+ end
+end
+
+def fun_l0_n861(x)
+ if (x < 1)
+ fun_l1_n991(x)
+ else
+ fun_l1_n198(x)
+ end
+end
+
+def fun_l0_n862(x)
+ if (x < 1)
+ fun_l1_n846(x)
+ else
+ fun_l1_n513(x)
+ end
+end
+
+def fun_l0_n863(x)
+ if (x < 1)
+ fun_l1_n279(x)
+ else
+ fun_l1_n229(x)
+ end
+end
+
+def fun_l0_n864(x)
+ if (x < 1)
+ fun_l1_n52(x)
+ else
+ fun_l1_n765(x)
+ end
+end
+
+def fun_l0_n865(x)
+ if (x < 1)
+ fun_l1_n35(x)
+ else
+ fun_l1_n572(x)
+ end
+end
+
+def fun_l0_n866(x)
+ if (x < 1)
+ fun_l1_n4(x)
+ else
+ fun_l1_n645(x)
+ end
+end
+
+def fun_l0_n867(x)
+ if (x < 1)
+ fun_l1_n112(x)
+ else
+ fun_l1_n758(x)
+ end
+end
+
+def fun_l0_n868(x)
+ if (x < 1)
+ fun_l1_n924(x)
+ else
+ fun_l1_n251(x)
+ end
+end
+
+def fun_l0_n869(x)
+ if (x < 1)
+ fun_l1_n558(x)
+ else
+ fun_l1_n818(x)
+ end
+end
+
+def fun_l0_n870(x)
+ if (x < 1)
+ fun_l1_n804(x)
+ else
+ fun_l1_n746(x)
+ end
+end
+
+def fun_l0_n871(x)
+ if (x < 1)
+ fun_l1_n111(x)
+ else
+ fun_l1_n407(x)
+ end
+end
+
+def fun_l0_n872(x)
+ if (x < 1)
+ fun_l1_n973(x)
+ else
+ fun_l1_n384(x)
+ end
+end
+
+def fun_l0_n873(x)
+ if (x < 1)
+ fun_l1_n102(x)
+ else
+ fun_l1_n199(x)
+ end
+end
+
+def fun_l0_n874(x)
+ if (x < 1)
+ fun_l1_n735(x)
+ else
+ fun_l1_n903(x)
+ end
+end
+
+def fun_l0_n875(x)
+ if (x < 1)
+ fun_l1_n95(x)
+ else
+ fun_l1_n720(x)
+ end
+end
+
+def fun_l0_n876(x)
+ if (x < 1)
+ fun_l1_n864(x)
+ else
+ fun_l1_n970(x)
+ end
+end
+
+def fun_l0_n877(x)
+ if (x < 1)
+ fun_l1_n729(x)
+ else
+ fun_l1_n207(x)
+ end
+end
+
+def fun_l0_n878(x)
+ if (x < 1)
+ fun_l1_n104(x)
+ else
+ fun_l1_n209(x)
+ end
+end
+
+def fun_l0_n879(x)
+ if (x < 1)
+ fun_l1_n566(x)
+ else
+ fun_l1_n918(x)
+ end
+end
+
+def fun_l0_n880(x)
+ if (x < 1)
+ fun_l1_n457(x)
+ else
+ fun_l1_n104(x)
+ end
+end
+
+def fun_l0_n881(x)
+ if (x < 1)
+ fun_l1_n745(x)
+ else
+ fun_l1_n476(x)
+ end
+end
+
+def fun_l0_n882(x)
+ if (x < 1)
+ fun_l1_n346(x)
+ else
+ fun_l1_n642(x)
+ end
+end
+
+def fun_l0_n883(x)
+ if (x < 1)
+ fun_l1_n78(x)
+ else
+ fun_l1_n210(x)
+ end
+end
+
+def fun_l0_n884(x)
+ if (x < 1)
+ fun_l1_n864(x)
+ else
+ fun_l1_n439(x)
+ end
+end
+
+def fun_l0_n885(x)
+ if (x < 1)
+ fun_l1_n0(x)
+ else
+ fun_l1_n414(x)
+ end
+end
+
+def fun_l0_n886(x)
+ if (x < 1)
+ fun_l1_n217(x)
+ else
+ fun_l1_n444(x)
+ end
+end
+
+def fun_l0_n887(x)
+ if (x < 1)
+ fun_l1_n193(x)
+ else
+ fun_l1_n788(x)
+ end
+end
+
+def fun_l0_n888(x)
+ if (x < 1)
+ fun_l1_n30(x)
+ else
+ fun_l1_n793(x)
+ end
+end
+
+def fun_l0_n889(x)
+ if (x < 1)
+ fun_l1_n222(x)
+ else
+ fun_l1_n936(x)
+ end
+end
+
+def fun_l0_n890(x)
+ if (x < 1)
+ fun_l1_n758(x)
+ else
+ fun_l1_n52(x)
+ end
+end
+
+def fun_l0_n891(x)
+ if (x < 1)
+ fun_l1_n796(x)
+ else
+ fun_l1_n424(x)
+ end
+end
+
+def fun_l0_n892(x)
+ if (x < 1)
+ fun_l1_n627(x)
+ else
+ fun_l1_n284(x)
+ end
+end
+
+def fun_l0_n893(x)
+ if (x < 1)
+ fun_l1_n42(x)
+ else
+ fun_l1_n193(x)
+ end
+end
+
+def fun_l0_n894(x)
+ if (x < 1)
+ fun_l1_n90(x)
+ else
+ fun_l1_n158(x)
+ end
+end
+
+def fun_l0_n895(x)
+ if (x < 1)
+ fun_l1_n878(x)
+ else
+ fun_l1_n396(x)
+ end
+end
+
+def fun_l0_n896(x)
+ if (x < 1)
+ fun_l1_n665(x)
+ else
+ fun_l1_n731(x)
+ end
+end
+
+def fun_l0_n897(x)
+ if (x < 1)
+ fun_l1_n618(x)
+ else
+ fun_l1_n152(x)
+ end
+end
+
+def fun_l0_n898(x)
+ if (x < 1)
+ fun_l1_n64(x)
+ else
+ fun_l1_n697(x)
+ end
+end
+
+def fun_l0_n899(x)
+ if (x < 1)
+ fun_l1_n354(x)
+ else
+ fun_l1_n657(x)
+ end
+end
+
+def fun_l0_n900(x)
+ if (x < 1)
+ fun_l1_n79(x)
+ else
+ fun_l1_n794(x)
+ end
+end
+
+def fun_l0_n901(x)
+ if (x < 1)
+ fun_l1_n623(x)
+ else
+ fun_l1_n392(x)
+ end
+end
+
+def fun_l0_n902(x)
+ if (x < 1)
+ fun_l1_n529(x)
+ else
+ fun_l1_n708(x)
+ end
+end
+
+def fun_l0_n903(x)
+ if (x < 1)
+ fun_l1_n900(x)
+ else
+ fun_l1_n830(x)
+ end
+end
+
+def fun_l0_n904(x)
+ if (x < 1)
+ fun_l1_n424(x)
+ else
+ fun_l1_n564(x)
+ end
+end
+
+def fun_l0_n905(x)
+ if (x < 1)
+ fun_l1_n557(x)
+ else
+ fun_l1_n612(x)
+ end
+end
+
+def fun_l0_n906(x)
+ if (x < 1)
+ fun_l1_n725(x)
+ else
+ fun_l1_n166(x)
+ end
+end
+
+def fun_l0_n907(x)
+ if (x < 1)
+ fun_l1_n534(x)
+ else
+ fun_l1_n159(x)
+ end
+end
+
+def fun_l0_n908(x)
+ if (x < 1)
+ fun_l1_n50(x)
+ else
+ fun_l1_n201(x)
+ end
+end
+
+def fun_l0_n909(x)
+ if (x < 1)
+ fun_l1_n568(x)
+ else
+ fun_l1_n276(x)
+ end
+end
+
+def fun_l0_n910(x)
+ if (x < 1)
+ fun_l1_n0(x)
+ else
+ fun_l1_n324(x)
+ end
+end
+
+def fun_l0_n911(x)
+ if (x < 1)
+ fun_l1_n926(x)
+ else
+ fun_l1_n524(x)
+ end
+end
+
+def fun_l0_n912(x)
+ if (x < 1)
+ fun_l1_n498(x)
+ else
+ fun_l1_n650(x)
+ end
+end
+
+def fun_l0_n913(x)
+ if (x < 1)
+ fun_l1_n593(x)
+ else
+ fun_l1_n860(x)
+ end
+end
+
+def fun_l0_n914(x)
+ if (x < 1)
+ fun_l1_n726(x)
+ else
+ fun_l1_n440(x)
+ end
+end
+
+def fun_l0_n915(x)
+ if (x < 1)
+ fun_l1_n422(x)
+ else
+ fun_l1_n627(x)
+ end
+end
+
+def fun_l0_n916(x)
+ if (x < 1)
+ fun_l1_n346(x)
+ else
+ fun_l1_n932(x)
+ end
+end
+
+def fun_l0_n917(x)
+ if (x < 1)
+ fun_l1_n403(x)
+ else
+ fun_l1_n58(x)
+ end
+end
+
+def fun_l0_n918(x)
+ if (x < 1)
+ fun_l1_n989(x)
+ else
+ fun_l1_n579(x)
+ end
+end
+
+def fun_l0_n919(x)
+ if (x < 1)
+ fun_l1_n933(x)
+ else
+ fun_l1_n434(x)
+ end
+end
+
+def fun_l0_n920(x)
+ if (x < 1)
+ fun_l1_n899(x)
+ else
+ fun_l1_n377(x)
+ end
+end
+
+def fun_l0_n921(x)
+ if (x < 1)
+ fun_l1_n17(x)
+ else
+ fun_l1_n637(x)
+ end
+end
+
+def fun_l0_n922(x)
+ if (x < 1)
+ fun_l1_n551(x)
+ else
+ fun_l1_n693(x)
+ end
+end
+
+def fun_l0_n923(x)
+ if (x < 1)
+ fun_l1_n861(x)
+ else
+ fun_l1_n248(x)
+ end
+end
+
+def fun_l0_n924(x)
+ if (x < 1)
+ fun_l1_n171(x)
+ else
+ fun_l1_n75(x)
+ end
+end
+
+def fun_l0_n925(x)
+ if (x < 1)
+ fun_l1_n233(x)
+ else
+ fun_l1_n444(x)
+ end
+end
+
+def fun_l0_n926(x)
+ if (x < 1)
+ fun_l1_n310(x)
+ else
+ fun_l1_n941(x)
+ end
+end
+
+def fun_l0_n927(x)
+ if (x < 1)
+ fun_l1_n363(x)
+ else
+ fun_l1_n567(x)
+ end
+end
+
+def fun_l0_n928(x)
+ if (x < 1)
+ fun_l1_n632(x)
+ else
+ fun_l1_n840(x)
+ end
+end
+
+def fun_l0_n929(x)
+ if (x < 1)
+ fun_l1_n368(x)
+ else
+ fun_l1_n713(x)
+ end
+end
+
+def fun_l0_n930(x)
+ if (x < 1)
+ fun_l1_n37(x)
+ else
+ fun_l1_n355(x)
+ end
+end
+
+def fun_l0_n931(x)
+ if (x < 1)
+ fun_l1_n907(x)
+ else
+ fun_l1_n784(x)
+ end
+end
+
+def fun_l0_n932(x)
+ if (x < 1)
+ fun_l1_n591(x)
+ else
+ fun_l1_n844(x)
+ end
+end
+
+def fun_l0_n933(x)
+ if (x < 1)
+ fun_l1_n996(x)
+ else
+ fun_l1_n943(x)
+ end
+end
+
+def fun_l0_n934(x)
+ if (x < 1)
+ fun_l1_n631(x)
+ else
+ fun_l1_n643(x)
+ end
+end
+
+def fun_l0_n935(x)
+ if (x < 1)
+ fun_l1_n875(x)
+ else
+ fun_l1_n89(x)
+ end
+end
+
+def fun_l0_n936(x)
+ if (x < 1)
+ fun_l1_n842(x)
+ else
+ fun_l1_n595(x)
+ end
+end
+
+def fun_l0_n937(x)
+ if (x < 1)
+ fun_l1_n921(x)
+ else
+ fun_l1_n271(x)
+ end
+end
+
+def fun_l0_n938(x)
+ if (x < 1)
+ fun_l1_n222(x)
+ else
+ fun_l1_n170(x)
+ end
+end
+
+def fun_l0_n939(x)
+ if (x < 1)
+ fun_l1_n567(x)
+ else
+ fun_l1_n463(x)
+ end
+end
+
+def fun_l0_n940(x)
+ if (x < 1)
+ fun_l1_n456(x)
+ else
+ fun_l1_n192(x)
+ end
+end
+
+def fun_l0_n941(x)
+ if (x < 1)
+ fun_l1_n32(x)
+ else
+ fun_l1_n951(x)
+ end
+end
+
+def fun_l0_n942(x)
+ if (x < 1)
+ fun_l1_n14(x)
+ else
+ fun_l1_n135(x)
+ end
+end
+
+def fun_l0_n943(x)
+ if (x < 1)
+ fun_l1_n15(x)
+ else
+ fun_l1_n234(x)
+ end
+end
+
+def fun_l0_n944(x)
+ if (x < 1)
+ fun_l1_n119(x)
+ else
+ fun_l1_n33(x)
+ end
+end
+
+def fun_l0_n945(x)
+ if (x < 1)
+ fun_l1_n859(x)
+ else
+ fun_l1_n497(x)
+ end
+end
+
+def fun_l0_n946(x)
+ if (x < 1)
+ fun_l1_n178(x)
+ else
+ fun_l1_n369(x)
+ end
+end
+
+def fun_l0_n947(x)
+ if (x < 1)
+ fun_l1_n64(x)
+ else
+ fun_l1_n356(x)
+ end
+end
+
+def fun_l0_n948(x)
+ if (x < 1)
+ fun_l1_n279(x)
+ else
+ fun_l1_n244(x)
+ end
+end
+
+def fun_l0_n949(x)
+ if (x < 1)
+ fun_l1_n728(x)
+ else
+ fun_l1_n548(x)
+ end
+end
+
+def fun_l0_n950(x)
+ if (x < 1)
+ fun_l1_n3(x)
+ else
+ fun_l1_n698(x)
+ end
+end
+
+def fun_l0_n951(x)
+ if (x < 1)
+ fun_l1_n665(x)
+ else
+ fun_l1_n198(x)
+ end
+end
+
+def fun_l0_n952(x)
+ if (x < 1)
+ fun_l1_n812(x)
+ else
+ fun_l1_n964(x)
+ end
+end
+
+def fun_l0_n953(x)
+ if (x < 1)
+ fun_l1_n851(x)
+ else
+ fun_l1_n877(x)
+ end
+end
+
+def fun_l0_n954(x)
+ if (x < 1)
+ fun_l1_n415(x)
+ else
+ fun_l1_n379(x)
+ end
+end
+
+def fun_l0_n955(x)
+ if (x < 1)
+ fun_l1_n806(x)
+ else
+ fun_l1_n919(x)
+ end
+end
+
+def fun_l0_n956(x)
+ if (x < 1)
+ fun_l1_n207(x)
+ else
+ fun_l1_n798(x)
+ end
+end
+
+def fun_l0_n957(x)
+ if (x < 1)
+ fun_l1_n269(x)
+ else
+ fun_l1_n416(x)
+ end
+end
+
+def fun_l0_n958(x)
+ if (x < 1)
+ fun_l1_n654(x)
+ else
+ fun_l1_n236(x)
+ end
+end
+
+def fun_l0_n959(x)
+ if (x < 1)
+ fun_l1_n392(x)
+ else
+ fun_l1_n623(x)
+ end
+end
+
+def fun_l0_n960(x)
+ if (x < 1)
+ fun_l1_n889(x)
+ else
+ fun_l1_n279(x)
+ end
+end
+
+def fun_l0_n961(x)
+ if (x < 1)
+ fun_l1_n476(x)
+ else
+ fun_l1_n201(x)
+ end
+end
+
+def fun_l0_n962(x)
+ if (x < 1)
+ fun_l1_n799(x)
+ else
+ fun_l1_n570(x)
+ end
+end
+
+def fun_l0_n963(x)
+ if (x < 1)
+ fun_l1_n802(x)
+ else
+ fun_l1_n114(x)
+ end
+end
+
+def fun_l0_n964(x)
+ if (x < 1)
+ fun_l1_n197(x)
+ else
+ fun_l1_n422(x)
+ end
+end
+
+def fun_l0_n965(x)
+ if (x < 1)
+ fun_l1_n37(x)
+ else
+ fun_l1_n691(x)
+ end
+end
+
+def fun_l0_n966(x)
+ if (x < 1)
+ fun_l1_n775(x)
+ else
+ fun_l1_n854(x)
+ end
+end
+
+def fun_l0_n967(x)
+ if (x < 1)
+ fun_l1_n373(x)
+ else
+ fun_l1_n306(x)
+ end
+end
+
+def fun_l0_n968(x)
+ if (x < 1)
+ fun_l1_n154(x)
+ else
+ fun_l1_n122(x)
+ end
+end
+
+def fun_l0_n969(x)
+ if (x < 1)
+ fun_l1_n497(x)
+ else
+ fun_l1_n456(x)
+ end
+end
+
+def fun_l0_n970(x)
+ if (x < 1)
+ fun_l1_n621(x)
+ else
+ fun_l1_n931(x)
+ end
+end
+
+def fun_l0_n971(x)
+ if (x < 1)
+ fun_l1_n963(x)
+ else
+ fun_l1_n613(x)
+ end
+end
+
+def fun_l0_n972(x)
+ if (x < 1)
+ fun_l1_n613(x)
+ else
+ fun_l1_n508(x)
+ end
+end
+
+def fun_l0_n973(x)
+ if (x < 1)
+ fun_l1_n466(x)
+ else
+ fun_l1_n929(x)
+ end
+end
+
+def fun_l0_n974(x)
+ if (x < 1)
+ fun_l1_n247(x)
+ else
+ fun_l1_n610(x)
+ end
+end
+
+def fun_l0_n975(x)
+ if (x < 1)
+ fun_l1_n674(x)
+ else
+ fun_l1_n609(x)
+ end
+end
+
+def fun_l0_n976(x)
+ if (x < 1)
+ fun_l1_n963(x)
+ else
+ fun_l1_n601(x)
+ end
+end
+
+def fun_l0_n977(x)
+ if (x < 1)
+ fun_l1_n728(x)
+ else
+ fun_l1_n242(x)
+ end
+end
+
+def fun_l0_n978(x)
+ if (x < 1)
+ fun_l1_n515(x)
+ else
+ fun_l1_n113(x)
+ end
+end
+
+def fun_l0_n979(x)
+ if (x < 1)
+ fun_l1_n734(x)
+ else
+ fun_l1_n271(x)
+ end
+end
+
+def fun_l0_n980(x)
+ if (x < 1)
+ fun_l1_n837(x)
+ else
+ fun_l1_n733(x)
+ end
+end
+
+def fun_l0_n981(x)
+ if (x < 1)
+ fun_l1_n326(x)
+ else
+ fun_l1_n213(x)
+ end
+end
+
+def fun_l0_n982(x)
+ if (x < 1)
+ fun_l1_n733(x)
+ else
+ fun_l1_n198(x)
+ end
+end
+
+def fun_l0_n983(x)
+ if (x < 1)
+ fun_l1_n989(x)
+ else
+ fun_l1_n700(x)
+ end
+end
+
+def fun_l0_n984(x)
+ if (x < 1)
+ fun_l1_n266(x)
+ else
+ fun_l1_n348(x)
+ end
+end
+
+def fun_l0_n985(x)
+ if (x < 1)
+ fun_l1_n931(x)
+ else
+ fun_l1_n646(x)
+ end
+end
+
+def fun_l0_n986(x)
+ if (x < 1)
+ fun_l1_n435(x)
+ else
+ fun_l1_n747(x)
+ end
+end
+
+def fun_l0_n987(x)
+ if (x < 1)
+ fun_l1_n474(x)
+ else
+ fun_l1_n638(x)
+ end
+end
+
+def fun_l0_n988(x)
+ if (x < 1)
+ fun_l1_n615(x)
+ else
+ fun_l1_n283(x)
+ end
+end
+
+def fun_l0_n989(x)
+ if (x < 1)
+ fun_l1_n410(x)
+ else
+ fun_l1_n289(x)
+ end
+end
+
+def fun_l0_n990(x)
+ if (x < 1)
+ fun_l1_n633(x)
+ else
+ fun_l1_n77(x)
+ end
+end
+
+def fun_l0_n991(x)
+ if (x < 1)
+ fun_l1_n153(x)
+ else
+ fun_l1_n255(x)
+ end
+end
+
+def fun_l0_n992(x)
+ if (x < 1)
+ fun_l1_n447(x)
+ else
+ fun_l1_n239(x)
+ end
+end
+
+def fun_l0_n993(x)
+ if (x < 1)
+ fun_l1_n837(x)
+ else
+ fun_l1_n582(x)
+ end
+end
+
+def fun_l0_n994(x)
+ if (x < 1)
+ fun_l1_n385(x)
+ else
+ fun_l1_n939(x)
+ end
+end
+
+def fun_l0_n995(x)
+ if (x < 1)
+ fun_l1_n236(x)
+ else
+ fun_l1_n497(x)
+ end
+end
+
+def fun_l0_n996(x)
+ if (x < 1)
+ fun_l1_n63(x)
+ else
+ fun_l1_n721(x)
+ end
+end
+
+def fun_l0_n997(x)
+ if (x < 1)
+ fun_l1_n457(x)
+ else
+ fun_l1_n376(x)
+ end
+end
+
+def fun_l0_n998(x)
+ if (x < 1)
+ fun_l1_n362(x)
+ else
+ fun_l1_n478(x)
+ end
+end
+
+def fun_l0_n999(x)
+ if (x < 1)
+ fun_l1_n554(x)
+ else
+ fun_l1_n422(x)
+ end
+end
+
+def fun_l1_n0(x)
+ if (x < 1)
+ fun_l2_n498(x)
+ else
+ fun_l2_n461(x)
+ end
+end
+
+def fun_l1_n1(x)
+ if (x < 1)
+ fun_l2_n424(x)
+ else
+ fun_l2_n137(x)
+ end
+end
+
+def fun_l1_n2(x)
+ if (x < 1)
+ fun_l2_n325(x)
+ else
+ fun_l2_n95(x)
+ end
+end
+
+def fun_l1_n3(x)
+ if (x < 1)
+ fun_l2_n917(x)
+ else
+ fun_l2_n469(x)
+ end
+end
+
+def fun_l1_n4(x)
+ if (x < 1)
+ fun_l2_n730(x)
+ else
+ fun_l2_n147(x)
+ end
+end
+
+def fun_l1_n5(x)
+ if (x < 1)
+ fun_l2_n796(x)
+ else
+ fun_l2_n907(x)
+ end
+end
+
+def fun_l1_n6(x)
+ if (x < 1)
+ fun_l2_n555(x)
+ else
+ fun_l2_n309(x)
+ end
+end
+
+def fun_l1_n7(x)
+ if (x < 1)
+ fun_l2_n475(x)
+ else
+ fun_l2_n193(x)
+ end
+end
+
+def fun_l1_n8(x)
+ if (x < 1)
+ fun_l2_n317(x)
+ else
+ fun_l2_n284(x)
+ end
+end
+
+def fun_l1_n9(x)
+ if (x < 1)
+ fun_l2_n297(x)
+ else
+ fun_l2_n370(x)
+ end
+end
+
+def fun_l1_n10(x)
+ if (x < 1)
+ fun_l2_n66(x)
+ else
+ fun_l2_n959(x)
+ end
+end
+
+def fun_l1_n11(x)
+ if (x < 1)
+ fun_l2_n833(x)
+ else
+ fun_l2_n250(x)
+ end
+end
+
+def fun_l1_n12(x)
+ if (x < 1)
+ fun_l2_n640(x)
+ else
+ fun_l2_n588(x)
+ end
+end
+
+def fun_l1_n13(x)
+ if (x < 1)
+ fun_l2_n91(x)
+ else
+ fun_l2_n294(x)
+ end
+end
+
+def fun_l1_n14(x)
+ if (x < 1)
+ fun_l2_n166(x)
+ else
+ fun_l2_n943(x)
+ end
+end
+
+def fun_l1_n15(x)
+ if (x < 1)
+ fun_l2_n352(x)
+ else
+ fun_l2_n363(x)
+ end
+end
+
+def fun_l1_n16(x)
+ if (x < 1)
+ fun_l2_n84(x)
+ else
+ fun_l2_n278(x)
+ end
+end
+
+def fun_l1_n17(x)
+ if (x < 1)
+ fun_l2_n308(x)
+ else
+ fun_l2_n341(x)
+ end
+end
+
+def fun_l1_n18(x)
+ if (x < 1)
+ fun_l2_n713(x)
+ else
+ fun_l2_n575(x)
+ end
+end
+
+def fun_l1_n19(x)
+ if (x < 1)
+ fun_l2_n81(x)
+ else
+ fun_l2_n301(x)
+ end
+end
+
+def fun_l1_n20(x)
+ if (x < 1)
+ fun_l2_n516(x)
+ else
+ fun_l2_n164(x)
+ end
+end
+
+def fun_l1_n21(x)
+ if (x < 1)
+ fun_l2_n900(x)
+ else
+ fun_l2_n963(x)
+ end
+end
+
+def fun_l1_n22(x)
+ if (x < 1)
+ fun_l2_n472(x)
+ else
+ fun_l2_n288(x)
+ end
+end
+
+def fun_l1_n23(x)
+ if (x < 1)
+ fun_l2_n587(x)
+ else
+ fun_l2_n515(x)
+ end
+end
+
+def fun_l1_n24(x)
+ if (x < 1)
+ fun_l2_n506(x)
+ else
+ fun_l2_n711(x)
+ end
+end
+
+def fun_l1_n25(x)
+ if (x < 1)
+ fun_l2_n935(x)
+ else
+ fun_l2_n177(x)
+ end
+end
+
+def fun_l1_n26(x)
+ if (x < 1)
+ fun_l2_n485(x)
+ else
+ fun_l2_n368(x)
+ end
+end
+
+def fun_l1_n27(x)
+ if (x < 1)
+ fun_l2_n984(x)
+ else
+ fun_l2_n290(x)
+ end
+end
+
+def fun_l1_n28(x)
+ if (x < 1)
+ fun_l2_n36(x)
+ else
+ fun_l2_n837(x)
+ end
+end
+
+def fun_l1_n29(x)
+ if (x < 1)
+ fun_l2_n752(x)
+ else
+ fun_l2_n311(x)
+ end
+end
+
+def fun_l1_n30(x)
+ if (x < 1)
+ fun_l2_n215(x)
+ else
+ fun_l2_n376(x)
+ end
+end
+
+def fun_l1_n31(x)
+ if (x < 1)
+ fun_l2_n533(x)
+ else
+ fun_l2_n92(x)
+ end
+end
+
+def fun_l1_n32(x)
+ if (x < 1)
+ fun_l2_n386(x)
+ else
+ fun_l2_n283(x)
+ end
+end
+
+def fun_l1_n33(x)
+ if (x < 1)
+ fun_l2_n268(x)
+ else
+ fun_l2_n256(x)
+ end
+end
+
+def fun_l1_n34(x)
+ if (x < 1)
+ fun_l2_n348(x)
+ else
+ fun_l2_n464(x)
+ end
+end
+
+def fun_l1_n35(x)
+ if (x < 1)
+ fun_l2_n25(x)
+ else
+ fun_l2_n33(x)
+ end
+end
+
+def fun_l1_n36(x)
+ if (x < 1)
+ fun_l2_n387(x)
+ else
+ fun_l2_n341(x)
+ end
+end
+
+def fun_l1_n37(x)
+ if (x < 1)
+ fun_l2_n580(x)
+ else
+ fun_l2_n476(x)
+ end
+end
+
+def fun_l1_n38(x)
+ if (x < 1)
+ fun_l2_n33(x)
+ else
+ fun_l2_n33(x)
+ end
+end
+
+def fun_l1_n39(x)
+ if (x < 1)
+ fun_l2_n861(x)
+ else
+ fun_l2_n52(x)
+ end
+end
+
+def fun_l1_n40(x)
+ if (x < 1)
+ fun_l2_n740(x)
+ else
+ fun_l2_n943(x)
+ end
+end
+
+def fun_l1_n41(x)
+ if (x < 1)
+ fun_l2_n956(x)
+ else
+ fun_l2_n159(x)
+ end
+end
+
+def fun_l1_n42(x)
+ if (x < 1)
+ fun_l2_n445(x)
+ else
+ fun_l2_n775(x)
+ end
+end
+
+def fun_l1_n43(x)
+ if (x < 1)
+ fun_l2_n667(x)
+ else
+ fun_l2_n282(x)
+ end
+end
+
+def fun_l1_n44(x)
+ if (x < 1)
+ fun_l2_n185(x)
+ else
+ fun_l2_n210(x)
+ end
+end
+
+def fun_l1_n45(x)
+ if (x < 1)
+ fun_l2_n490(x)
+ else
+ fun_l2_n21(x)
+ end
+end
+
+def fun_l1_n46(x)
+ if (x < 1)
+ fun_l2_n751(x)
+ else
+ fun_l2_n639(x)
+ end
+end
+
+def fun_l1_n47(x)
+ if (x < 1)
+ fun_l2_n431(x)
+ else
+ fun_l2_n917(x)
+ end
+end
+
+def fun_l1_n48(x)
+ if (x < 1)
+ fun_l2_n187(x)
+ else
+ fun_l2_n170(x)
+ end
+end
+
+def fun_l1_n49(x)
+ if (x < 1)
+ fun_l2_n780(x)
+ else
+ fun_l2_n77(x)
+ end
+end
+
+def fun_l1_n50(x)
+ if (x < 1)
+ fun_l2_n546(x)
+ else
+ fun_l2_n759(x)
+ end
+end
+
+def fun_l1_n51(x)
+ if (x < 1)
+ fun_l2_n453(x)
+ else
+ fun_l2_n951(x)
+ end
+end
+
+def fun_l1_n52(x)
+ if (x < 1)
+ fun_l2_n989(x)
+ else
+ fun_l2_n716(x)
+ end
+end
+
+def fun_l1_n53(x)
+ if (x < 1)
+ fun_l2_n652(x)
+ else
+ fun_l2_n36(x)
+ end
+end
+
+def fun_l1_n54(x)
+ if (x < 1)
+ fun_l2_n187(x)
+ else
+ fun_l2_n760(x)
+ end
+end
+
+def fun_l1_n55(x)
+ if (x < 1)
+ fun_l2_n706(x)
+ else
+ fun_l2_n927(x)
+ end
+end
+
+def fun_l1_n56(x)
+ if (x < 1)
+ fun_l2_n666(x)
+ else
+ fun_l2_n295(x)
+ end
+end
+
+def fun_l1_n57(x)
+ if (x < 1)
+ fun_l2_n424(x)
+ else
+ fun_l2_n447(x)
+ end
+end
+
+def fun_l1_n58(x)
+ if (x < 1)
+ fun_l2_n408(x)
+ else
+ fun_l2_n337(x)
+ end
+end
+
+def fun_l1_n59(x)
+ if (x < 1)
+ fun_l2_n567(x)
+ else
+ fun_l2_n765(x)
+ end
+end
+
+def fun_l1_n60(x)
+ if (x < 1)
+ fun_l2_n241(x)
+ else
+ fun_l2_n701(x)
+ end
+end
+
+def fun_l1_n61(x)
+ if (x < 1)
+ fun_l2_n831(x)
+ else
+ fun_l2_n382(x)
+ end
+end
+
+def fun_l1_n62(x)
+ if (x < 1)
+ fun_l2_n634(x)
+ else
+ fun_l2_n908(x)
+ end
+end
+
+def fun_l1_n63(x)
+ if (x < 1)
+ fun_l2_n696(x)
+ else
+ fun_l2_n48(x)
+ end
+end
+
+def fun_l1_n64(x)
+ if (x < 1)
+ fun_l2_n528(x)
+ else
+ fun_l2_n110(x)
+ end
+end
+
+def fun_l1_n65(x)
+ if (x < 1)
+ fun_l2_n767(x)
+ else
+ fun_l2_n973(x)
+ end
+end
+
+def fun_l1_n66(x)
+ if (x < 1)
+ fun_l2_n963(x)
+ else
+ fun_l2_n673(x)
+ end
+end
+
+def fun_l1_n67(x)
+ if (x < 1)
+ fun_l2_n927(x)
+ else
+ fun_l2_n309(x)
+ end
+end
+
+def fun_l1_n68(x)
+ if (x < 1)
+ fun_l2_n851(x)
+ else
+ fun_l2_n160(x)
+ end
+end
+
+def fun_l1_n69(x)
+ if (x < 1)
+ fun_l2_n575(x)
+ else
+ fun_l2_n800(x)
+ end
+end
+
+def fun_l1_n70(x)
+ if (x < 1)
+ fun_l2_n396(x)
+ else
+ fun_l2_n382(x)
+ end
+end
+
+def fun_l1_n71(x)
+ if (x < 1)
+ fun_l2_n508(x)
+ else
+ fun_l2_n763(x)
+ end
+end
+
+def fun_l1_n72(x)
+ if (x < 1)
+ fun_l2_n360(x)
+ else
+ fun_l2_n848(x)
+ end
+end
+
+def fun_l1_n73(x)
+ if (x < 1)
+ fun_l2_n878(x)
+ else
+ fun_l2_n83(x)
+ end
+end
+
+def fun_l1_n74(x)
+ if (x < 1)
+ fun_l2_n573(x)
+ else
+ fun_l2_n284(x)
+ end
+end
+
+def fun_l1_n75(x)
+ if (x < 1)
+ fun_l2_n999(x)
+ else
+ fun_l2_n954(x)
+ end
+end
+
+def fun_l1_n76(x)
+ if (x < 1)
+ fun_l2_n182(x)
+ else
+ fun_l2_n997(x)
+ end
+end
+
+def fun_l1_n77(x)
+ if (x < 1)
+ fun_l2_n802(x)
+ else
+ fun_l2_n409(x)
+ end
+end
+
+def fun_l1_n78(x)
+ if (x < 1)
+ fun_l2_n163(x)
+ else
+ fun_l2_n417(x)
+ end
+end
+
+def fun_l1_n79(x)
+ if (x < 1)
+ fun_l2_n328(x)
+ else
+ fun_l2_n688(x)
+ end
+end
+
+def fun_l1_n80(x)
+ if (x < 1)
+ fun_l2_n699(x)
+ else
+ fun_l2_n356(x)
+ end
+end
+
+def fun_l1_n81(x)
+ if (x < 1)
+ fun_l2_n44(x)
+ else
+ fun_l2_n302(x)
+ end
+end
+
+def fun_l1_n82(x)
+ if (x < 1)
+ fun_l2_n597(x)
+ else
+ fun_l2_n892(x)
+ end
+end
+
+def fun_l1_n83(x)
+ if (x < 1)
+ fun_l2_n494(x)
+ else
+ fun_l2_n547(x)
+ end
+end
+
+def fun_l1_n84(x)
+ if (x < 1)
+ fun_l2_n631(x)
+ else
+ fun_l2_n922(x)
+ end
+end
+
+def fun_l1_n85(x)
+ if (x < 1)
+ fun_l2_n797(x)
+ else
+ fun_l2_n874(x)
+ end
+end
+
+def fun_l1_n86(x)
+ if (x < 1)
+ fun_l2_n209(x)
+ else
+ fun_l2_n365(x)
+ end
+end
+
+def fun_l1_n87(x)
+ if (x < 1)
+ fun_l2_n516(x)
+ else
+ fun_l2_n371(x)
+ end
+end
+
+def fun_l1_n88(x)
+ if (x < 1)
+ fun_l2_n654(x)
+ else
+ fun_l2_n88(x)
+ end
+end
+
+def fun_l1_n89(x)
+ if (x < 1)
+ fun_l2_n70(x)
+ else
+ fun_l2_n521(x)
+ end
+end
+
+def fun_l1_n90(x)
+ if (x < 1)
+ fun_l2_n666(x)
+ else
+ fun_l2_n749(x)
+ end
+end
+
+def fun_l1_n91(x)
+ if (x < 1)
+ fun_l2_n504(x)
+ else
+ fun_l2_n339(x)
+ end
+end
+
+def fun_l1_n92(x)
+ if (x < 1)
+ fun_l2_n734(x)
+ else
+ fun_l2_n277(x)
+ end
+end
+
+def fun_l1_n93(x)
+ if (x < 1)
+ fun_l2_n300(x)
+ else
+ fun_l2_n826(x)
+ end
+end
+
+def fun_l1_n94(x)
+ if (x < 1)
+ fun_l2_n733(x)
+ else
+ fun_l2_n560(x)
+ end
+end
+
+def fun_l1_n95(x)
+ if (x < 1)
+ fun_l2_n834(x)
+ else
+ fun_l2_n644(x)
+ end
+end
+
+def fun_l1_n96(x)
+ if (x < 1)
+ fun_l2_n183(x)
+ else
+ fun_l2_n238(x)
+ end
+end
+
+def fun_l1_n97(x)
+ if (x < 1)
+ fun_l2_n372(x)
+ else
+ fun_l2_n753(x)
+ end
+end
+
+def fun_l1_n98(x)
+ if (x < 1)
+ fun_l2_n208(x)
+ else
+ fun_l2_n543(x)
+ end
+end
+
+def fun_l1_n99(x)
+ if (x < 1)
+ fun_l2_n38(x)
+ else
+ fun_l2_n974(x)
+ end
+end
+
+def fun_l1_n100(x)
+ if (x < 1)
+ fun_l2_n620(x)
+ else
+ fun_l2_n503(x)
+ end
+end
+
+def fun_l1_n101(x)
+ if (x < 1)
+ fun_l2_n248(x)
+ else
+ fun_l2_n189(x)
+ end
+end
+
+def fun_l1_n102(x)
+ if (x < 1)
+ fun_l2_n489(x)
+ else
+ fun_l2_n390(x)
+ end
+end
+
+def fun_l1_n103(x)
+ if (x < 1)
+ fun_l2_n452(x)
+ else
+ fun_l2_n22(x)
+ end
+end
+
+def fun_l1_n104(x)
+ if (x < 1)
+ fun_l2_n542(x)
+ else
+ fun_l2_n135(x)
+ end
+end
+
+def fun_l1_n105(x)
+ if (x < 1)
+ fun_l2_n988(x)
+ else
+ fun_l2_n118(x)
+ end
+end
+
+def fun_l1_n106(x)
+ if (x < 1)
+ fun_l2_n817(x)
+ else
+ fun_l2_n534(x)
+ end
+end
+
+def fun_l1_n107(x)
+ if (x < 1)
+ fun_l2_n812(x)
+ else
+ fun_l2_n206(x)
+ end
+end
+
+def fun_l1_n108(x)
+ if (x < 1)
+ fun_l2_n514(x)
+ else
+ fun_l2_n300(x)
+ end
+end
+
+def fun_l1_n109(x)
+ if (x < 1)
+ fun_l2_n482(x)
+ else
+ fun_l2_n376(x)
+ end
+end
+
+def fun_l1_n110(x)
+ if (x < 1)
+ fun_l2_n571(x)
+ else
+ fun_l2_n758(x)
+ end
+end
+
+def fun_l1_n111(x)
+ if (x < 1)
+ fun_l2_n971(x)
+ else
+ fun_l2_n811(x)
+ end
+end
+
+def fun_l1_n112(x)
+ if (x < 1)
+ fun_l2_n23(x)
+ else
+ fun_l2_n844(x)
+ end
+end
+
+def fun_l1_n113(x)
+ if (x < 1)
+ fun_l2_n865(x)
+ else
+ fun_l2_n149(x)
+ end
+end
+
+def fun_l1_n114(x)
+ if (x < 1)
+ fun_l2_n930(x)
+ else
+ fun_l2_n327(x)
+ end
+end
+
+def fun_l1_n115(x)
+ if (x < 1)
+ fun_l2_n67(x)
+ else
+ fun_l2_n442(x)
+ end
+end
+
+def fun_l1_n116(x)
+ if (x < 1)
+ fun_l2_n278(x)
+ else
+ fun_l2_n653(x)
+ end
+end
+
+def fun_l1_n117(x)
+ if (x < 1)
+ fun_l2_n457(x)
+ else
+ fun_l2_n179(x)
+ end
+end
+
+def fun_l1_n118(x)
+ if (x < 1)
+ fun_l2_n384(x)
+ else
+ fun_l2_n594(x)
+ end
+end
+
+def fun_l1_n119(x)
+ if (x < 1)
+ fun_l2_n361(x)
+ else
+ fun_l2_n770(x)
+ end
+end
+
+def fun_l1_n120(x)
+ if (x < 1)
+ fun_l2_n24(x)
+ else
+ fun_l2_n269(x)
+ end
+end
+
+def fun_l1_n121(x)
+ if (x < 1)
+ fun_l2_n57(x)
+ else
+ fun_l2_n2(x)
+ end
+end
+
+def fun_l1_n122(x)
+ if (x < 1)
+ fun_l2_n421(x)
+ else
+ fun_l2_n339(x)
+ end
+end
+
+def fun_l1_n123(x)
+ if (x < 1)
+ fun_l2_n623(x)
+ else
+ fun_l2_n985(x)
+ end
+end
+
+def fun_l1_n124(x)
+ if (x < 1)
+ fun_l2_n609(x)
+ else
+ fun_l2_n530(x)
+ end
+end
+
+def fun_l1_n125(x)
+ if (x < 1)
+ fun_l2_n565(x)
+ else
+ fun_l2_n155(x)
+ end
+end
+
+def fun_l1_n126(x)
+ if (x < 1)
+ fun_l2_n135(x)
+ else
+ fun_l2_n971(x)
+ end
+end
+
+def fun_l1_n127(x)
+ if (x < 1)
+ fun_l2_n691(x)
+ else
+ fun_l2_n503(x)
+ end
+end
+
+def fun_l1_n128(x)
+ if (x < 1)
+ fun_l2_n555(x)
+ else
+ fun_l2_n67(x)
+ end
+end
+
+def fun_l1_n129(x)
+ if (x < 1)
+ fun_l2_n95(x)
+ else
+ fun_l2_n192(x)
+ end
+end
+
+def fun_l1_n130(x)
+ if (x < 1)
+ fun_l2_n432(x)
+ else
+ fun_l2_n771(x)
+ end
+end
+
+def fun_l1_n131(x)
+ if (x < 1)
+ fun_l2_n993(x)
+ else
+ fun_l2_n539(x)
+ end
+end
+
+def fun_l1_n132(x)
+ if (x < 1)
+ fun_l2_n809(x)
+ else
+ fun_l2_n902(x)
+ end
+end
+
+def fun_l1_n133(x)
+ if (x < 1)
+ fun_l2_n95(x)
+ else
+ fun_l2_n928(x)
+ end
+end
+
+def fun_l1_n134(x)
+ if (x < 1)
+ fun_l2_n781(x)
+ else
+ fun_l2_n832(x)
+ end
+end
+
+def fun_l1_n135(x)
+ if (x < 1)
+ fun_l2_n231(x)
+ else
+ fun_l2_n392(x)
+ end
+end
+
+def fun_l1_n136(x)
+ if (x < 1)
+ fun_l2_n102(x)
+ else
+ fun_l2_n449(x)
+ end
+end
+
+def fun_l1_n137(x)
+ if (x < 1)
+ fun_l2_n196(x)
+ else
+ fun_l2_n909(x)
+ end
+end
+
+def fun_l1_n138(x)
+ if (x < 1)
+ fun_l2_n449(x)
+ else
+ fun_l2_n190(x)
+ end
+end
+
+def fun_l1_n139(x)
+ if (x < 1)
+ fun_l2_n638(x)
+ else
+ fun_l2_n149(x)
+ end
+end
+
+def fun_l1_n140(x)
+ if (x < 1)
+ fun_l2_n523(x)
+ else
+ fun_l2_n297(x)
+ end
+end
+
+def fun_l1_n141(x)
+ if (x < 1)
+ fun_l2_n476(x)
+ else
+ fun_l2_n161(x)
+ end
+end
+
+def fun_l1_n142(x)
+ if (x < 1)
+ fun_l2_n893(x)
+ else
+ fun_l2_n686(x)
+ end
+end
+
+def fun_l1_n143(x)
+ if (x < 1)
+ fun_l2_n958(x)
+ else
+ fun_l2_n899(x)
+ end
+end
+
+def fun_l1_n144(x)
+ if (x < 1)
+ fun_l2_n104(x)
+ else
+ fun_l2_n912(x)
+ end
+end
+
+def fun_l1_n145(x)
+ if (x < 1)
+ fun_l2_n332(x)
+ else
+ fun_l2_n579(x)
+ end
+end
+
+def fun_l1_n146(x)
+ if (x < 1)
+ fun_l2_n40(x)
+ else
+ fun_l2_n468(x)
+ end
+end
+
+def fun_l1_n147(x)
+ if (x < 1)
+ fun_l2_n771(x)
+ else
+ fun_l2_n314(x)
+ end
+end
+
+def fun_l1_n148(x)
+ if (x < 1)
+ fun_l2_n340(x)
+ else
+ fun_l2_n203(x)
+ end
+end
+
+def fun_l1_n149(x)
+ if (x < 1)
+ fun_l2_n952(x)
+ else
+ fun_l2_n265(x)
+ end
+end
+
+def fun_l1_n150(x)
+ if (x < 1)
+ fun_l2_n11(x)
+ else
+ fun_l2_n303(x)
+ end
+end
+
+def fun_l1_n151(x)
+ if (x < 1)
+ fun_l2_n394(x)
+ else
+ fun_l2_n317(x)
+ end
+end
+
+def fun_l1_n152(x)
+ if (x < 1)
+ fun_l2_n140(x)
+ else
+ fun_l2_n906(x)
+ end
+end
+
+def fun_l1_n153(x)
+ if (x < 1)
+ fun_l2_n75(x)
+ else
+ fun_l2_n485(x)
+ end
+end
+
+def fun_l1_n154(x)
+ if (x < 1)
+ fun_l2_n842(x)
+ else
+ fun_l2_n370(x)
+ end
+end
+
+def fun_l1_n155(x)
+ if (x < 1)
+ fun_l2_n346(x)
+ else
+ fun_l2_n833(x)
+ end
+end
+
+def fun_l1_n156(x)
+ if (x < 1)
+ fun_l2_n433(x)
+ else
+ fun_l2_n373(x)
+ end
+end
+
+def fun_l1_n157(x)
+ if (x < 1)
+ fun_l2_n978(x)
+ else
+ fun_l2_n99(x)
+ end
+end
+
+def fun_l1_n158(x)
+ if (x < 1)
+ fun_l2_n540(x)
+ else
+ fun_l2_n987(x)
+ end
+end
+
+def fun_l1_n159(x)
+ if (x < 1)
+ fun_l2_n785(x)
+ else
+ fun_l2_n478(x)
+ end
+end
+
+def fun_l1_n160(x)
+ if (x < 1)
+ fun_l2_n282(x)
+ else
+ fun_l2_n222(x)
+ end
+end
+
+def fun_l1_n161(x)
+ if (x < 1)
+ fun_l2_n29(x)
+ else
+ fun_l2_n946(x)
+ end
+end
+
+def fun_l1_n162(x)
+ if (x < 1)
+ fun_l2_n314(x)
+ else
+ fun_l2_n758(x)
+ end
+end
+
+def fun_l1_n163(x)
+ if (x < 1)
+ fun_l2_n861(x)
+ else
+ fun_l2_n799(x)
+ end
+end
+
+def fun_l1_n164(x)
+ if (x < 1)
+ fun_l2_n403(x)
+ else
+ fun_l2_n413(x)
+ end
+end
+
+def fun_l1_n165(x)
+ if (x < 1)
+ fun_l2_n160(x)
+ else
+ fun_l2_n794(x)
+ end
+end
+
+def fun_l1_n166(x)
+ if (x < 1)
+ fun_l2_n114(x)
+ else
+ fun_l2_n238(x)
+ end
+end
+
+def fun_l1_n167(x)
+ if (x < 1)
+ fun_l2_n806(x)
+ else
+ fun_l2_n233(x)
+ end
+end
+
+def fun_l1_n168(x)
+ if (x < 1)
+ fun_l2_n871(x)
+ else
+ fun_l2_n53(x)
+ end
+end
+
+def fun_l1_n169(x)
+ if (x < 1)
+ fun_l2_n644(x)
+ else
+ fun_l2_n503(x)
+ end
+end
+
+def fun_l1_n170(x)
+ if (x < 1)
+ fun_l2_n172(x)
+ else
+ fun_l2_n289(x)
+ end
+end
+
+def fun_l1_n171(x)
+ if (x < 1)
+ fun_l2_n920(x)
+ else
+ fun_l2_n851(x)
+ end
+end
+
+def fun_l1_n172(x)
+ if (x < 1)
+ fun_l2_n552(x)
+ else
+ fun_l2_n822(x)
+ end
+end
+
+def fun_l1_n173(x)
+ if (x < 1)
+ fun_l2_n747(x)
+ else
+ fun_l2_n265(x)
+ end
+end
+
+def fun_l1_n174(x)
+ if (x < 1)
+ fun_l2_n287(x)
+ else
+ fun_l2_n762(x)
+ end
+end
+
+def fun_l1_n175(x)
+ if (x < 1)
+ fun_l2_n627(x)
+ else
+ fun_l2_n951(x)
+ end
+end
+
+def fun_l1_n176(x)
+ if (x < 1)
+ fun_l2_n459(x)
+ else
+ fun_l2_n962(x)
+ end
+end
+
+def fun_l1_n177(x)
+ if (x < 1)
+ fun_l2_n214(x)
+ else
+ fun_l2_n145(x)
+ end
+end
+
+def fun_l1_n178(x)
+ if (x < 1)
+ fun_l2_n620(x)
+ else
+ fun_l2_n995(x)
+ end
+end
+
+def fun_l1_n179(x)
+ if (x < 1)
+ fun_l2_n136(x)
+ else
+ fun_l2_n283(x)
+ end
+end
+
+def fun_l1_n180(x)
+ if (x < 1)
+ fun_l2_n377(x)
+ else
+ fun_l2_n672(x)
+ end
+end
+
+def fun_l1_n181(x)
+ if (x < 1)
+ fun_l2_n218(x)
+ else
+ fun_l2_n434(x)
+ end
+end
+
+def fun_l1_n182(x)
+ if (x < 1)
+ fun_l2_n522(x)
+ else
+ fun_l2_n525(x)
+ end
+end
+
+def fun_l1_n183(x)
+ if (x < 1)
+ fun_l2_n648(x)
+ else
+ fun_l2_n725(x)
+ end
+end
+
+def fun_l1_n184(x)
+ if (x < 1)
+ fun_l2_n579(x)
+ else
+ fun_l2_n974(x)
+ end
+end
+
+def fun_l1_n185(x)
+ if (x < 1)
+ fun_l2_n10(x)
+ else
+ fun_l2_n514(x)
+ end
+end
+
+def fun_l1_n186(x)
+ if (x < 1)
+ fun_l2_n829(x)
+ else
+ fun_l2_n754(x)
+ end
+end
+
+def fun_l1_n187(x)
+ if (x < 1)
+ fun_l2_n380(x)
+ else
+ fun_l2_n401(x)
+ end
+end
+
+def fun_l1_n188(x)
+ if (x < 1)
+ fun_l2_n839(x)
+ else
+ fun_l2_n453(x)
+ end
+end
+
+def fun_l1_n189(x)
+ if (x < 1)
+ fun_l2_n553(x)
+ else
+ fun_l2_n553(x)
+ end
+end
+
+def fun_l1_n190(x)
+ if (x < 1)
+ fun_l2_n972(x)
+ else
+ fun_l2_n187(x)
+ end
+end
+
+def fun_l1_n191(x)
+ if (x < 1)
+ fun_l2_n275(x)
+ else
+ fun_l2_n11(x)
+ end
+end
+
+def fun_l1_n192(x)
+ if (x < 1)
+ fun_l2_n1(x)
+ else
+ fun_l2_n927(x)
+ end
+end
+
+def fun_l1_n193(x)
+ if (x < 1)
+ fun_l2_n569(x)
+ else
+ fun_l2_n557(x)
+ end
+end
+
+def fun_l1_n194(x)
+ if (x < 1)
+ fun_l2_n380(x)
+ else
+ fun_l2_n187(x)
+ end
+end
+
+def fun_l1_n195(x)
+ if (x < 1)
+ fun_l2_n297(x)
+ else
+ fun_l2_n813(x)
+ end
+end
+
+def fun_l1_n196(x)
+ if (x < 1)
+ fun_l2_n565(x)
+ else
+ fun_l2_n514(x)
+ end
+end
+
+def fun_l1_n197(x)
+ if (x < 1)
+ fun_l2_n473(x)
+ else
+ fun_l2_n64(x)
+ end
+end
+
+def fun_l1_n198(x)
+ if (x < 1)
+ fun_l2_n182(x)
+ else
+ fun_l2_n363(x)
+ end
+end
+
+def fun_l1_n199(x)
+ if (x < 1)
+ fun_l2_n660(x)
+ else
+ fun_l2_n506(x)
+ end
+end
+
+def fun_l1_n200(x)
+ if (x < 1)
+ fun_l2_n834(x)
+ else
+ fun_l2_n348(x)
+ end
+end
+
+def fun_l1_n201(x)
+ if (x < 1)
+ fun_l2_n403(x)
+ else
+ fun_l2_n432(x)
+ end
+end
+
+def fun_l1_n202(x)
+ if (x < 1)
+ fun_l2_n115(x)
+ else
+ fun_l2_n145(x)
+ end
+end
+
+def fun_l1_n203(x)
+ if (x < 1)
+ fun_l2_n637(x)
+ else
+ fun_l2_n4(x)
+ end
+end
+
+def fun_l1_n204(x)
+ if (x < 1)
+ fun_l2_n951(x)
+ else
+ fun_l2_n753(x)
+ end
+end
+
+def fun_l1_n205(x)
+ if (x < 1)
+ fun_l2_n703(x)
+ else
+ fun_l2_n563(x)
+ end
+end
+
+def fun_l1_n206(x)
+ if (x < 1)
+ fun_l2_n614(x)
+ else
+ fun_l2_n563(x)
+ end
+end
+
+def fun_l1_n207(x)
+ if (x < 1)
+ fun_l2_n210(x)
+ else
+ fun_l2_n139(x)
+ end
+end
+
+def fun_l1_n208(x)
+ if (x < 1)
+ fun_l2_n471(x)
+ else
+ fun_l2_n346(x)
+ end
+end
+
+def fun_l1_n209(x)
+ if (x < 1)
+ fun_l2_n922(x)
+ else
+ fun_l2_n54(x)
+ end
+end
+
+def fun_l1_n210(x)
+ if (x < 1)
+ fun_l2_n494(x)
+ else
+ fun_l2_n41(x)
+ end
+end
+
+def fun_l1_n211(x)
+ if (x < 1)
+ fun_l2_n87(x)
+ else
+ fun_l2_n190(x)
+ end
+end
+
+def fun_l1_n212(x)
+ if (x < 1)
+ fun_l2_n458(x)
+ else
+ fun_l2_n885(x)
+ end
+end
+
+def fun_l1_n213(x)
+ if (x < 1)
+ fun_l2_n48(x)
+ else
+ fun_l2_n225(x)
+ end
+end
+
+def fun_l1_n214(x)
+ if (x < 1)
+ fun_l2_n706(x)
+ else
+ fun_l2_n694(x)
+ end
+end
+
+def fun_l1_n215(x)
+ if (x < 1)
+ fun_l2_n116(x)
+ else
+ fun_l2_n233(x)
+ end
+end
+
+def fun_l1_n216(x)
+ if (x < 1)
+ fun_l2_n279(x)
+ else
+ fun_l2_n857(x)
+ end
+end
+
+def fun_l1_n217(x)
+ if (x < 1)
+ fun_l2_n643(x)
+ else
+ fun_l2_n289(x)
+ end
+end
+
+def fun_l1_n218(x)
+ if (x < 1)
+ fun_l2_n535(x)
+ else
+ fun_l2_n833(x)
+ end
+end
+
+def fun_l1_n219(x)
+ if (x < 1)
+ fun_l2_n478(x)
+ else
+ fun_l2_n793(x)
+ end
+end
+
+def fun_l1_n220(x)
+ if (x < 1)
+ fun_l2_n57(x)
+ else
+ fun_l2_n64(x)
+ end
+end
+
+def fun_l1_n221(x)
+ if (x < 1)
+ fun_l2_n36(x)
+ else
+ fun_l2_n74(x)
+ end
+end
+
+def fun_l1_n222(x)
+ if (x < 1)
+ fun_l2_n438(x)
+ else
+ fun_l2_n616(x)
+ end
+end
+
+def fun_l1_n223(x)
+ if (x < 1)
+ fun_l2_n900(x)
+ else
+ fun_l2_n744(x)
+ end
+end
+
+def fun_l1_n224(x)
+ if (x < 1)
+ fun_l2_n780(x)
+ else
+ fun_l2_n213(x)
+ end
+end
+
+def fun_l1_n225(x)
+ if (x < 1)
+ fun_l2_n916(x)
+ else
+ fun_l2_n471(x)
+ end
+end
+
+def fun_l1_n226(x)
+ if (x < 1)
+ fun_l2_n236(x)
+ else
+ fun_l2_n980(x)
+ end
+end
+
+def fun_l1_n227(x)
+ if (x < 1)
+ fun_l2_n28(x)
+ else
+ fun_l2_n291(x)
+ end
+end
+
+def fun_l1_n228(x)
+ if (x < 1)
+ fun_l2_n35(x)
+ else
+ fun_l2_n353(x)
+ end
+end
+
+def fun_l1_n229(x)
+ if (x < 1)
+ fun_l2_n404(x)
+ else
+ fun_l2_n254(x)
+ end
+end
+
+def fun_l1_n230(x)
+ if (x < 1)
+ fun_l2_n641(x)
+ else
+ fun_l2_n464(x)
+ end
+end
+
+def fun_l1_n231(x)
+ if (x < 1)
+ fun_l2_n592(x)
+ else
+ fun_l2_n646(x)
+ end
+end
+
+def fun_l1_n232(x)
+ if (x < 1)
+ fun_l2_n162(x)
+ else
+ fun_l2_n215(x)
+ end
+end
+
+def fun_l1_n233(x)
+ if (x < 1)
+ fun_l2_n219(x)
+ else
+ fun_l2_n772(x)
+ end
+end
+
+def fun_l1_n234(x)
+ if (x < 1)
+ fun_l2_n304(x)
+ else
+ fun_l2_n963(x)
+ end
+end
+
+def fun_l1_n235(x)
+ if (x < 1)
+ fun_l2_n610(x)
+ else
+ fun_l2_n716(x)
+ end
+end
+
+def fun_l1_n236(x)
+ if (x < 1)
+ fun_l2_n151(x)
+ else
+ fun_l2_n498(x)
+ end
+end
+
+def fun_l1_n237(x)
+ if (x < 1)
+ fun_l2_n288(x)
+ else
+ fun_l2_n261(x)
+ end
+end
+
+def fun_l1_n238(x)
+ if (x < 1)
+ fun_l2_n139(x)
+ else
+ fun_l2_n18(x)
+ end
+end
+
+def fun_l1_n239(x)
+ if (x < 1)
+ fun_l2_n559(x)
+ else
+ fun_l2_n148(x)
+ end
+end
+
+def fun_l1_n240(x)
+ if (x < 1)
+ fun_l2_n520(x)
+ else
+ fun_l2_n250(x)
+ end
+end
+
+def fun_l1_n241(x)
+ if (x < 1)
+ fun_l2_n42(x)
+ else
+ fun_l2_n196(x)
+ end
+end
+
+def fun_l1_n242(x)
+ if (x < 1)
+ fun_l2_n16(x)
+ else
+ fun_l2_n540(x)
+ end
+end
+
+def fun_l1_n243(x)
+ if (x < 1)
+ fun_l2_n539(x)
+ else
+ fun_l2_n890(x)
+ end
+end
+
+def fun_l1_n244(x)
+ if (x < 1)
+ fun_l2_n559(x)
+ else
+ fun_l2_n423(x)
+ end
+end
+
+def fun_l1_n245(x)
+ if (x < 1)
+ fun_l2_n110(x)
+ else
+ fun_l2_n711(x)
+ end
+end
+
+def fun_l1_n246(x)
+ if (x < 1)
+ fun_l2_n151(x)
+ else
+ fun_l2_n407(x)
+ end
+end
+
+def fun_l1_n247(x)
+ if (x < 1)
+ fun_l2_n118(x)
+ else
+ fun_l2_n209(x)
+ end
+end
+
+def fun_l1_n248(x)
+ if (x < 1)
+ fun_l2_n764(x)
+ else
+ fun_l2_n932(x)
+ end
+end
+
+def fun_l1_n249(x)
+ if (x < 1)
+ fun_l2_n991(x)
+ else
+ fun_l2_n828(x)
+ end
+end
+
+def fun_l1_n250(x)
+ if (x < 1)
+ fun_l2_n354(x)
+ else
+ fun_l2_n53(x)
+ end
+end
+
+def fun_l1_n251(x)
+ if (x < 1)
+ fun_l2_n63(x)
+ else
+ fun_l2_n292(x)
+ end
+end
+
+def fun_l1_n252(x)
+ if (x < 1)
+ fun_l2_n874(x)
+ else
+ fun_l2_n544(x)
+ end
+end
+
+def fun_l1_n253(x)
+ if (x < 1)
+ fun_l2_n336(x)
+ else
+ fun_l2_n57(x)
+ end
+end
+
+def fun_l1_n254(x)
+ if (x < 1)
+ fun_l2_n302(x)
+ else
+ fun_l2_n90(x)
+ end
+end
+
+def fun_l1_n255(x)
+ if (x < 1)
+ fun_l2_n122(x)
+ else
+ fun_l2_n888(x)
+ end
+end
+
+def fun_l1_n256(x)
+ if (x < 1)
+ fun_l2_n326(x)
+ else
+ fun_l2_n616(x)
+ end
+end
+
+def fun_l1_n257(x)
+ if (x < 1)
+ fun_l2_n77(x)
+ else
+ fun_l2_n622(x)
+ end
+end
+
+def fun_l1_n258(x)
+ if (x < 1)
+ fun_l2_n785(x)
+ else
+ fun_l2_n229(x)
+ end
+end
+
+def fun_l1_n259(x)
+ if (x < 1)
+ fun_l2_n504(x)
+ else
+ fun_l2_n766(x)
+ end
+end
+
+def fun_l1_n260(x)
+ if (x < 1)
+ fun_l2_n857(x)
+ else
+ fun_l2_n823(x)
+ end
+end
+
+def fun_l1_n261(x)
+ if (x < 1)
+ fun_l2_n72(x)
+ else
+ fun_l2_n875(x)
+ end
+end
+
+def fun_l1_n262(x)
+ if (x < 1)
+ fun_l2_n215(x)
+ else
+ fun_l2_n549(x)
+ end
+end
+
+def fun_l1_n263(x)
+ if (x < 1)
+ fun_l2_n232(x)
+ else
+ fun_l2_n96(x)
+ end
+end
+
+def fun_l1_n264(x)
+ if (x < 1)
+ fun_l2_n183(x)
+ else
+ fun_l2_n351(x)
+ end
+end
+
+def fun_l1_n265(x)
+ if (x < 1)
+ fun_l2_n906(x)
+ else
+ fun_l2_n154(x)
+ end
+end
+
+def fun_l1_n266(x)
+ if (x < 1)
+ fun_l2_n234(x)
+ else
+ fun_l2_n547(x)
+ end
+end
+
+def fun_l1_n267(x)
+ if (x < 1)
+ fun_l2_n882(x)
+ else
+ fun_l2_n802(x)
+ end
+end
+
+def fun_l1_n268(x)
+ if (x < 1)
+ fun_l2_n844(x)
+ else
+ fun_l2_n430(x)
+ end
+end
+
+def fun_l1_n269(x)
+ if (x < 1)
+ fun_l2_n781(x)
+ else
+ fun_l2_n738(x)
+ end
+end
+
+def fun_l1_n270(x)
+ if (x < 1)
+ fun_l2_n88(x)
+ else
+ fun_l2_n439(x)
+ end
+end
+
+def fun_l1_n271(x)
+ if (x < 1)
+ fun_l2_n334(x)
+ else
+ fun_l2_n508(x)
+ end
+end
+
+def fun_l1_n272(x)
+ if (x < 1)
+ fun_l2_n871(x)
+ else
+ fun_l2_n562(x)
+ end
+end
+
+def fun_l1_n273(x)
+ if (x < 1)
+ fun_l2_n328(x)
+ else
+ fun_l2_n356(x)
+ end
+end
+
+def fun_l1_n274(x)
+ if (x < 1)
+ fun_l2_n654(x)
+ else
+ fun_l2_n511(x)
+ end
+end
+
+def fun_l1_n275(x)
+ if (x < 1)
+ fun_l2_n309(x)
+ else
+ fun_l2_n840(x)
+ end
+end
+
+def fun_l1_n276(x)
+ if (x < 1)
+ fun_l2_n729(x)
+ else
+ fun_l2_n72(x)
+ end
+end
+
+def fun_l1_n277(x)
+ if (x < 1)
+ fun_l2_n993(x)
+ else
+ fun_l2_n858(x)
+ end
+end
+
+def fun_l1_n278(x)
+ if (x < 1)
+ fun_l2_n664(x)
+ else
+ fun_l2_n50(x)
+ end
+end
+
+def fun_l1_n279(x)
+ if (x < 1)
+ fun_l2_n225(x)
+ else
+ fun_l2_n403(x)
+ end
+end
+
+def fun_l1_n280(x)
+ if (x < 1)
+ fun_l2_n780(x)
+ else
+ fun_l2_n396(x)
+ end
+end
+
+def fun_l1_n281(x)
+ if (x < 1)
+ fun_l2_n114(x)
+ else
+ fun_l2_n114(x)
+ end
+end
+
+def fun_l1_n282(x)
+ if (x < 1)
+ fun_l2_n568(x)
+ else
+ fun_l2_n610(x)
+ end
+end
+
+def fun_l1_n283(x)
+ if (x < 1)
+ fun_l2_n539(x)
+ else
+ fun_l2_n990(x)
+ end
+end
+
+def fun_l1_n284(x)
+ if (x < 1)
+ fun_l2_n773(x)
+ else
+ fun_l2_n622(x)
+ end
+end
+
+def fun_l1_n285(x)
+ if (x < 1)
+ fun_l2_n695(x)
+ else
+ fun_l2_n101(x)
+ end
+end
+
+def fun_l1_n286(x)
+ if (x < 1)
+ fun_l2_n239(x)
+ else
+ fun_l2_n242(x)
+ end
+end
+
+def fun_l1_n287(x)
+ if (x < 1)
+ fun_l2_n191(x)
+ else
+ fun_l2_n839(x)
+ end
+end
+
+def fun_l1_n288(x)
+ if (x < 1)
+ fun_l2_n246(x)
+ else
+ fun_l2_n179(x)
+ end
+end
+
+def fun_l1_n289(x)
+ if (x < 1)
+ fun_l2_n300(x)
+ else
+ fun_l2_n975(x)
+ end
+end
+
+def fun_l1_n290(x)
+ if (x < 1)
+ fun_l2_n436(x)
+ else
+ fun_l2_n14(x)
+ end
+end
+
+def fun_l1_n291(x)
+ if (x < 1)
+ fun_l2_n33(x)
+ else
+ fun_l2_n303(x)
+ end
+end
+
+def fun_l1_n292(x)
+ if (x < 1)
+ fun_l2_n462(x)
+ else
+ fun_l2_n399(x)
+ end
+end
+
+def fun_l1_n293(x)
+ if (x < 1)
+ fun_l2_n782(x)
+ else
+ fun_l2_n521(x)
+ end
+end
+
+def fun_l1_n294(x)
+ if (x < 1)
+ fun_l2_n995(x)
+ else
+ fun_l2_n944(x)
+ end
+end
+
+def fun_l1_n295(x)
+ if (x < 1)
+ fun_l2_n700(x)
+ else
+ fun_l2_n250(x)
+ end
+end
+
+def fun_l1_n296(x)
+ if (x < 1)
+ fun_l2_n866(x)
+ else
+ fun_l2_n566(x)
+ end
+end
+
+def fun_l1_n297(x)
+ if (x < 1)
+ fun_l2_n373(x)
+ else
+ fun_l2_n881(x)
+ end
+end
+
+def fun_l1_n298(x)
+ if (x < 1)
+ fun_l2_n382(x)
+ else
+ fun_l2_n729(x)
+ end
+end
+
+def fun_l1_n299(x)
+ if (x < 1)
+ fun_l2_n878(x)
+ else
+ fun_l2_n192(x)
+ end
+end
+
+def fun_l1_n300(x)
+ if (x < 1)
+ fun_l2_n65(x)
+ else
+ fun_l2_n434(x)
+ end
+end
+
+def fun_l1_n301(x)
+ if (x < 1)
+ fun_l2_n513(x)
+ else
+ fun_l2_n617(x)
+ end
+end
+
+def fun_l1_n302(x)
+ if (x < 1)
+ fun_l2_n972(x)
+ else
+ fun_l2_n482(x)
+ end
+end
+
+def fun_l1_n303(x)
+ if (x < 1)
+ fun_l2_n533(x)
+ else
+ fun_l2_n774(x)
+ end
+end
+
+def fun_l1_n304(x)
+ if (x < 1)
+ fun_l2_n439(x)
+ else
+ fun_l2_n351(x)
+ end
+end
+
+def fun_l1_n305(x)
+ if (x < 1)
+ fun_l2_n181(x)
+ else
+ fun_l2_n74(x)
+ end
+end
+
+def fun_l1_n306(x)
+ if (x < 1)
+ fun_l2_n117(x)
+ else
+ fun_l2_n962(x)
+ end
+end
+
+def fun_l1_n307(x)
+ if (x < 1)
+ fun_l2_n499(x)
+ else
+ fun_l2_n183(x)
+ end
+end
+
+def fun_l1_n308(x)
+ if (x < 1)
+ fun_l2_n358(x)
+ else
+ fun_l2_n226(x)
+ end
+end
+
+def fun_l1_n309(x)
+ if (x < 1)
+ fun_l2_n227(x)
+ else
+ fun_l2_n425(x)
+ end
+end
+
+def fun_l1_n310(x)
+ if (x < 1)
+ fun_l2_n724(x)
+ else
+ fun_l2_n278(x)
+ end
+end
+
+def fun_l1_n311(x)
+ if (x < 1)
+ fun_l2_n784(x)
+ else
+ fun_l2_n888(x)
+ end
+end
+
+def fun_l1_n312(x)
+ if (x < 1)
+ fun_l2_n952(x)
+ else
+ fun_l2_n849(x)
+ end
+end
+
+def fun_l1_n313(x)
+ if (x < 1)
+ fun_l2_n921(x)
+ else
+ fun_l2_n31(x)
+ end
+end
+
+def fun_l1_n314(x)
+ if (x < 1)
+ fun_l2_n469(x)
+ else
+ fun_l2_n856(x)
+ end
+end
+
+def fun_l1_n315(x)
+ if (x < 1)
+ fun_l2_n89(x)
+ else
+ fun_l2_n949(x)
+ end
+end
+
+def fun_l1_n316(x)
+ if (x < 1)
+ fun_l2_n216(x)
+ else
+ fun_l2_n410(x)
+ end
+end
+
+def fun_l1_n317(x)
+ if (x < 1)
+ fun_l2_n591(x)
+ else
+ fun_l2_n203(x)
+ end
+end
+
+def fun_l1_n318(x)
+ if (x < 1)
+ fun_l2_n19(x)
+ else
+ fun_l2_n461(x)
+ end
+end
+
+def fun_l1_n319(x)
+ if (x < 1)
+ fun_l2_n875(x)
+ else
+ fun_l2_n996(x)
+ end
+end
+
+def fun_l1_n320(x)
+ if (x < 1)
+ fun_l2_n696(x)
+ else
+ fun_l2_n987(x)
+ end
+end
+
+def fun_l1_n321(x)
+ if (x < 1)
+ fun_l2_n708(x)
+ else
+ fun_l2_n150(x)
+ end
+end
+
+def fun_l1_n322(x)
+ if (x < 1)
+ fun_l2_n324(x)
+ else
+ fun_l2_n814(x)
+ end
+end
+
+def fun_l1_n323(x)
+ if (x < 1)
+ fun_l2_n427(x)
+ else
+ fun_l2_n472(x)
+ end
+end
+
+def fun_l1_n324(x)
+ if (x < 1)
+ fun_l2_n847(x)
+ else
+ fun_l2_n482(x)
+ end
+end
+
+def fun_l1_n325(x)
+ if (x < 1)
+ fun_l2_n335(x)
+ else
+ fun_l2_n854(x)
+ end
+end
+
+def fun_l1_n326(x)
+ if (x < 1)
+ fun_l2_n635(x)
+ else
+ fun_l2_n276(x)
+ end
+end
+
+def fun_l1_n327(x)
+ if (x < 1)
+ fun_l2_n539(x)
+ else
+ fun_l2_n885(x)
+ end
+end
+
+def fun_l1_n328(x)
+ if (x < 1)
+ fun_l2_n979(x)
+ else
+ fun_l2_n558(x)
+ end
+end
+
+def fun_l1_n329(x)
+ if (x < 1)
+ fun_l2_n35(x)
+ else
+ fun_l2_n696(x)
+ end
+end
+
+def fun_l1_n330(x)
+ if (x < 1)
+ fun_l2_n483(x)
+ else
+ fun_l2_n218(x)
+ end
+end
+
+def fun_l1_n331(x)
+ if (x < 1)
+ fun_l2_n3(x)
+ else
+ fun_l2_n324(x)
+ end
+end
+
+def fun_l1_n332(x)
+ if (x < 1)
+ fun_l2_n396(x)
+ else
+ fun_l2_n718(x)
+ end
+end
+
+def fun_l1_n333(x)
+ if (x < 1)
+ fun_l2_n973(x)
+ else
+ fun_l2_n663(x)
+ end
+end
+
+def fun_l1_n334(x)
+ if (x < 1)
+ fun_l2_n441(x)
+ else
+ fun_l2_n468(x)
+ end
+end
+
+def fun_l1_n335(x)
+ if (x < 1)
+ fun_l2_n511(x)
+ else
+ fun_l2_n455(x)
+ end
+end
+
+def fun_l1_n336(x)
+ if (x < 1)
+ fun_l2_n732(x)
+ else
+ fun_l2_n14(x)
+ end
+end
+
+def fun_l1_n337(x)
+ if (x < 1)
+ fun_l2_n484(x)
+ else
+ fun_l2_n482(x)
+ end
+end
+
+def fun_l1_n338(x)
+ if (x < 1)
+ fun_l2_n604(x)
+ else
+ fun_l2_n683(x)
+ end
+end
+
+def fun_l1_n339(x)
+ if (x < 1)
+ fun_l2_n334(x)
+ else
+ fun_l2_n710(x)
+ end
+end
+
+def fun_l1_n340(x)
+ if (x < 1)
+ fun_l2_n233(x)
+ else
+ fun_l2_n394(x)
+ end
+end
+
+def fun_l1_n341(x)
+ if (x < 1)
+ fun_l2_n948(x)
+ else
+ fun_l2_n473(x)
+ end
+end
+
+def fun_l1_n342(x)
+ if (x < 1)
+ fun_l2_n858(x)
+ else
+ fun_l2_n466(x)
+ end
+end
+
+def fun_l1_n343(x)
+ if (x < 1)
+ fun_l2_n647(x)
+ else
+ fun_l2_n604(x)
+ end
+end
+
+def fun_l1_n344(x)
+ if (x < 1)
+ fun_l2_n872(x)
+ else
+ fun_l2_n965(x)
+ end
+end
+
+def fun_l1_n345(x)
+ if (x < 1)
+ fun_l2_n845(x)
+ else
+ fun_l2_n701(x)
+ end
+end
+
+def fun_l1_n346(x)
+ if (x < 1)
+ fun_l2_n75(x)
+ else
+ fun_l2_n332(x)
+ end
+end
+
+def fun_l1_n347(x)
+ if (x < 1)
+ fun_l2_n873(x)
+ else
+ fun_l2_n575(x)
+ end
+end
+
+def fun_l1_n348(x)
+ if (x < 1)
+ fun_l2_n303(x)
+ else
+ fun_l2_n445(x)
+ end
+end
+
+def fun_l1_n349(x)
+ if (x < 1)
+ fun_l2_n45(x)
+ else
+ fun_l2_n312(x)
+ end
+end
+
+def fun_l1_n350(x)
+ if (x < 1)
+ fun_l2_n751(x)
+ else
+ fun_l2_n316(x)
+ end
+end
+
+def fun_l1_n351(x)
+ if (x < 1)
+ fun_l2_n781(x)
+ else
+ fun_l2_n165(x)
+ end
+end
+
+def fun_l1_n352(x)
+ if (x < 1)
+ fun_l2_n120(x)
+ else
+ fun_l2_n595(x)
+ end
+end
+
+def fun_l1_n353(x)
+ if (x < 1)
+ fun_l2_n221(x)
+ else
+ fun_l2_n827(x)
+ end
+end
+
+def fun_l1_n354(x)
+ if (x < 1)
+ fun_l2_n290(x)
+ else
+ fun_l2_n27(x)
+ end
+end
+
+def fun_l1_n355(x)
+ if (x < 1)
+ fun_l2_n377(x)
+ else
+ fun_l2_n28(x)
+ end
+end
+
+def fun_l1_n356(x)
+ if (x < 1)
+ fun_l2_n474(x)
+ else
+ fun_l2_n811(x)
+ end
+end
+
+def fun_l1_n357(x)
+ if (x < 1)
+ fun_l2_n90(x)
+ else
+ fun_l2_n559(x)
+ end
+end
+
+def fun_l1_n358(x)
+ if (x < 1)
+ fun_l2_n66(x)
+ else
+ fun_l2_n234(x)
+ end
+end
+
+def fun_l1_n359(x)
+ if (x < 1)
+ fun_l2_n526(x)
+ else
+ fun_l2_n674(x)
+ end
+end
+
+def fun_l1_n360(x)
+ if (x < 1)
+ fun_l2_n449(x)
+ else
+ fun_l2_n364(x)
+ end
+end
+
+def fun_l1_n361(x)
+ if (x < 1)
+ fun_l2_n790(x)
+ else
+ fun_l2_n835(x)
+ end
+end
+
+def fun_l1_n362(x)
+ if (x < 1)
+ fun_l2_n335(x)
+ else
+ fun_l2_n22(x)
+ end
+end
+
+def fun_l1_n363(x)
+ if (x < 1)
+ fun_l2_n982(x)
+ else
+ fun_l2_n449(x)
+ end
+end
+
+def fun_l1_n364(x)
+ if (x < 1)
+ fun_l2_n980(x)
+ else
+ fun_l2_n861(x)
+ end
+end
+
+def fun_l1_n365(x)
+ if (x < 1)
+ fun_l2_n673(x)
+ else
+ fun_l2_n267(x)
+ end
+end
+
+def fun_l1_n366(x)
+ if (x < 1)
+ fun_l2_n862(x)
+ else
+ fun_l2_n559(x)
+ end
+end
+
+def fun_l1_n367(x)
+ if (x < 1)
+ fun_l2_n637(x)
+ else
+ fun_l2_n275(x)
+ end
+end
+
+def fun_l1_n368(x)
+ if (x < 1)
+ fun_l2_n370(x)
+ else
+ fun_l2_n763(x)
+ end
+end
+
+def fun_l1_n369(x)
+ if (x < 1)
+ fun_l2_n680(x)
+ else
+ fun_l2_n476(x)
+ end
+end
+
+def fun_l1_n370(x)
+ if (x < 1)
+ fun_l2_n799(x)
+ else
+ fun_l2_n724(x)
+ end
+end
+
+def fun_l1_n371(x)
+ if (x < 1)
+ fun_l2_n331(x)
+ else
+ fun_l2_n927(x)
+ end
+end
+
+def fun_l1_n372(x)
+ if (x < 1)
+ fun_l2_n875(x)
+ else
+ fun_l2_n350(x)
+ end
+end
+
+def fun_l1_n373(x)
+ if (x < 1)
+ fun_l2_n734(x)
+ else
+ fun_l2_n172(x)
+ end
+end
+
+def fun_l1_n374(x)
+ if (x < 1)
+ fun_l2_n657(x)
+ else
+ fun_l2_n765(x)
+ end
+end
+
+def fun_l1_n375(x)
+ if (x < 1)
+ fun_l2_n950(x)
+ else
+ fun_l2_n85(x)
+ end
+end
+
+def fun_l1_n376(x)
+ if (x < 1)
+ fun_l2_n276(x)
+ else
+ fun_l2_n892(x)
+ end
+end
+
+def fun_l1_n377(x)
+ if (x < 1)
+ fun_l2_n430(x)
+ else
+ fun_l2_n173(x)
+ end
+end
+
+def fun_l1_n378(x)
+ if (x < 1)
+ fun_l2_n376(x)
+ else
+ fun_l2_n876(x)
+ end
+end
+
+def fun_l1_n379(x)
+ if (x < 1)
+ fun_l2_n981(x)
+ else
+ fun_l2_n314(x)
+ end
+end
+
+def fun_l1_n380(x)
+ if (x < 1)
+ fun_l2_n680(x)
+ else
+ fun_l2_n600(x)
+ end
+end
+
+def fun_l1_n381(x)
+ if (x < 1)
+ fun_l2_n54(x)
+ else
+ fun_l2_n158(x)
+ end
+end
+
+def fun_l1_n382(x)
+ if (x < 1)
+ fun_l2_n692(x)
+ else
+ fun_l2_n673(x)
+ end
+end
+
+def fun_l1_n383(x)
+ if (x < 1)
+ fun_l2_n393(x)
+ else
+ fun_l2_n723(x)
+ end
+end
+
+def fun_l1_n384(x)
+ if (x < 1)
+ fun_l2_n899(x)
+ else
+ fun_l2_n940(x)
+ end
+end
+
+def fun_l1_n385(x)
+ if (x < 1)
+ fun_l2_n473(x)
+ else
+ fun_l2_n846(x)
+ end
+end
+
+def fun_l1_n386(x)
+ if (x < 1)
+ fun_l2_n291(x)
+ else
+ fun_l2_n864(x)
+ end
+end
+
+def fun_l1_n387(x)
+ if (x < 1)
+ fun_l2_n174(x)
+ else
+ fun_l2_n466(x)
+ end
+end
+
+def fun_l1_n388(x)
+ if (x < 1)
+ fun_l2_n883(x)
+ else
+ fun_l2_n613(x)
+ end
+end
+
+def fun_l1_n389(x)
+ if (x < 1)
+ fun_l2_n808(x)
+ else
+ fun_l2_n169(x)
+ end
+end
+
+def fun_l1_n390(x)
+ if (x < 1)
+ fun_l2_n94(x)
+ else
+ fun_l2_n93(x)
+ end
+end
+
+def fun_l1_n391(x)
+ if (x < 1)
+ fun_l2_n874(x)
+ else
+ fun_l2_n400(x)
+ end
+end
+
+def fun_l1_n392(x)
+ if (x < 1)
+ fun_l2_n473(x)
+ else
+ fun_l2_n239(x)
+ end
+end
+
+def fun_l1_n393(x)
+ if (x < 1)
+ fun_l2_n64(x)
+ else
+ fun_l2_n760(x)
+ end
+end
+
+def fun_l1_n394(x)
+ if (x < 1)
+ fun_l2_n947(x)
+ else
+ fun_l2_n354(x)
+ end
+end
+
+def fun_l1_n395(x)
+ if (x < 1)
+ fun_l2_n833(x)
+ else
+ fun_l2_n563(x)
+ end
+end
+
+def fun_l1_n396(x)
+ if (x < 1)
+ fun_l2_n429(x)
+ else
+ fun_l2_n280(x)
+ end
+end
+
+def fun_l1_n397(x)
+ if (x < 1)
+ fun_l2_n132(x)
+ else
+ fun_l2_n486(x)
+ end
+end
+
+def fun_l1_n398(x)
+ if (x < 1)
+ fun_l2_n28(x)
+ else
+ fun_l2_n135(x)
+ end
+end
+
+def fun_l1_n399(x)
+ if (x < 1)
+ fun_l2_n804(x)
+ else
+ fun_l2_n406(x)
+ end
+end
+
+def fun_l1_n400(x)
+ if (x < 1)
+ fun_l2_n661(x)
+ else
+ fun_l2_n216(x)
+ end
+end
+
+def fun_l1_n401(x)
+ if (x < 1)
+ fun_l2_n681(x)
+ else
+ fun_l2_n849(x)
+ end
+end
+
+def fun_l1_n402(x)
+ if (x < 1)
+ fun_l2_n13(x)
+ else
+ fun_l2_n205(x)
+ end
+end
+
+def fun_l1_n403(x)
+ if (x < 1)
+ fun_l2_n911(x)
+ else
+ fun_l2_n63(x)
+ end
+end
+
+def fun_l1_n404(x)
+ if (x < 1)
+ fun_l2_n518(x)
+ else
+ fun_l2_n845(x)
+ end
+end
+
+def fun_l1_n405(x)
+ if (x < 1)
+ fun_l2_n699(x)
+ else
+ fun_l2_n963(x)
+ end
+end
+
+def fun_l1_n406(x)
+ if (x < 1)
+ fun_l2_n373(x)
+ else
+ fun_l2_n485(x)
+ end
+end
+
+def fun_l1_n407(x)
+ if (x < 1)
+ fun_l2_n402(x)
+ else
+ fun_l2_n582(x)
+ end
+end
+
+def fun_l1_n408(x)
+ if (x < 1)
+ fun_l2_n802(x)
+ else
+ fun_l2_n420(x)
+ end
+end
+
+def fun_l1_n409(x)
+ if (x < 1)
+ fun_l2_n728(x)
+ else
+ fun_l2_n50(x)
+ end
+end
+
+def fun_l1_n410(x)
+ if (x < 1)
+ fun_l2_n189(x)
+ else
+ fun_l2_n588(x)
+ end
+end
+
+def fun_l1_n411(x)
+ if (x < 1)
+ fun_l2_n51(x)
+ else
+ fun_l2_n23(x)
+ end
+end
+
+def fun_l1_n412(x)
+ if (x < 1)
+ fun_l2_n548(x)
+ else
+ fun_l2_n200(x)
+ end
+end
+
+def fun_l1_n413(x)
+ if (x < 1)
+ fun_l2_n763(x)
+ else
+ fun_l2_n581(x)
+ end
+end
+
+def fun_l1_n414(x)
+ if (x < 1)
+ fun_l2_n650(x)
+ else
+ fun_l2_n535(x)
+ end
+end
+
+def fun_l1_n415(x)
+ if (x < 1)
+ fun_l2_n500(x)
+ else
+ fun_l2_n579(x)
+ end
+end
+
+def fun_l1_n416(x)
+ if (x < 1)
+ fun_l2_n573(x)
+ else
+ fun_l2_n154(x)
+ end
+end
+
+def fun_l1_n417(x)
+ if (x < 1)
+ fun_l2_n685(x)
+ else
+ fun_l2_n14(x)
+ end
+end
+
+def fun_l1_n418(x)
+ if (x < 1)
+ fun_l2_n849(x)
+ else
+ fun_l2_n91(x)
+ end
+end
+
+def fun_l1_n419(x)
+ if (x < 1)
+ fun_l2_n629(x)
+ else
+ fun_l2_n698(x)
+ end
+end
+
+def fun_l1_n420(x)
+ if (x < 1)
+ fun_l2_n273(x)
+ else
+ fun_l2_n303(x)
+ end
+end
+
+def fun_l1_n421(x)
+ if (x < 1)
+ fun_l2_n8(x)
+ else
+ fun_l2_n476(x)
+ end
+end
+
+def fun_l1_n422(x)
+ if (x < 1)
+ fun_l2_n294(x)
+ else
+ fun_l2_n338(x)
+ end
+end
+
+def fun_l1_n423(x)
+ if (x < 1)
+ fun_l2_n408(x)
+ else
+ fun_l2_n187(x)
+ end
+end
+
+def fun_l1_n424(x)
+ if (x < 1)
+ fun_l2_n349(x)
+ else
+ fun_l2_n896(x)
+ end
+end
+
+def fun_l1_n425(x)
+ if (x < 1)
+ fun_l2_n950(x)
+ else
+ fun_l2_n186(x)
+ end
+end
+
+def fun_l1_n426(x)
+ if (x < 1)
+ fun_l2_n817(x)
+ else
+ fun_l2_n605(x)
+ end
+end
+
+def fun_l1_n427(x)
+ if (x < 1)
+ fun_l2_n317(x)
+ else
+ fun_l2_n31(x)
+ end
+end
+
+def fun_l1_n428(x)
+ if (x < 1)
+ fun_l2_n804(x)
+ else
+ fun_l2_n547(x)
+ end
+end
+
+def fun_l1_n429(x)
+ if (x < 1)
+ fun_l2_n730(x)
+ else
+ fun_l2_n711(x)
+ end
+end
+
+def fun_l1_n430(x)
+ if (x < 1)
+ fun_l2_n822(x)
+ else
+ fun_l2_n749(x)
+ end
+end
+
+def fun_l1_n431(x)
+ if (x < 1)
+ fun_l2_n412(x)
+ else
+ fun_l2_n366(x)
+ end
+end
+
+def fun_l1_n432(x)
+ if (x < 1)
+ fun_l2_n462(x)
+ else
+ fun_l2_n601(x)
+ end
+end
+
+def fun_l1_n433(x)
+ if (x < 1)
+ fun_l2_n971(x)
+ else
+ fun_l2_n878(x)
+ end
+end
+
+def fun_l1_n434(x)
+ if (x < 1)
+ fun_l2_n551(x)
+ else
+ fun_l2_n172(x)
+ end
+end
+
+def fun_l1_n435(x)
+ if (x < 1)
+ fun_l2_n914(x)
+ else
+ fun_l2_n640(x)
+ end
+end
+
+def fun_l1_n436(x)
+ if (x < 1)
+ fun_l2_n957(x)
+ else
+ fun_l2_n846(x)
+ end
+end
+
+def fun_l1_n437(x)
+ if (x < 1)
+ fun_l2_n622(x)
+ else
+ fun_l2_n995(x)
+ end
+end
+
+def fun_l1_n438(x)
+ if (x < 1)
+ fun_l2_n830(x)
+ else
+ fun_l2_n537(x)
+ end
+end
+
+def fun_l1_n439(x)
+ if (x < 1)
+ fun_l2_n415(x)
+ else
+ fun_l2_n863(x)
+ end
+end
+
+def fun_l1_n440(x)
+ if (x < 1)
+ fun_l2_n637(x)
+ else
+ fun_l2_n354(x)
+ end
+end
+
+def fun_l1_n441(x)
+ if (x < 1)
+ fun_l2_n525(x)
+ else
+ fun_l2_n656(x)
+ end
+end
+
+def fun_l1_n442(x)
+ if (x < 1)
+ fun_l2_n284(x)
+ else
+ fun_l2_n722(x)
+ end
+end
+
+def fun_l1_n443(x)
+ if (x < 1)
+ fun_l2_n485(x)
+ else
+ fun_l2_n794(x)
+ end
+end
+
+def fun_l1_n444(x)
+ if (x < 1)
+ fun_l2_n268(x)
+ else
+ fun_l2_n443(x)
+ end
+end
+
+def fun_l1_n445(x)
+ if (x < 1)
+ fun_l2_n743(x)
+ else
+ fun_l2_n723(x)
+ end
+end
+
+def fun_l1_n446(x)
+ if (x < 1)
+ fun_l2_n651(x)
+ else
+ fun_l2_n544(x)
+ end
+end
+
+def fun_l1_n447(x)
+ if (x < 1)
+ fun_l2_n936(x)
+ else
+ fun_l2_n50(x)
+ end
+end
+
+def fun_l1_n448(x)
+ if (x < 1)
+ fun_l2_n473(x)
+ else
+ fun_l2_n749(x)
+ end
+end
+
+def fun_l1_n449(x)
+ if (x < 1)
+ fun_l2_n218(x)
+ else
+ fun_l2_n266(x)
+ end
+end
+
+def fun_l1_n450(x)
+ if (x < 1)
+ fun_l2_n686(x)
+ else
+ fun_l2_n629(x)
+ end
+end
+
+def fun_l1_n451(x)
+ if (x < 1)
+ fun_l2_n648(x)
+ else
+ fun_l2_n322(x)
+ end
+end
+
+def fun_l1_n452(x)
+ if (x < 1)
+ fun_l2_n380(x)
+ else
+ fun_l2_n720(x)
+ end
+end
+
+def fun_l1_n453(x)
+ if (x < 1)
+ fun_l2_n53(x)
+ else
+ fun_l2_n830(x)
+ end
+end
+
+def fun_l1_n454(x)
+ if (x < 1)
+ fun_l2_n315(x)
+ else
+ fun_l2_n742(x)
+ end
+end
+
+def fun_l1_n455(x)
+ if (x < 1)
+ fun_l2_n902(x)
+ else
+ fun_l2_n111(x)
+ end
+end
+
+def fun_l1_n456(x)
+ if (x < 1)
+ fun_l2_n924(x)
+ else
+ fun_l2_n109(x)
+ end
+end
+
+def fun_l1_n457(x)
+ if (x < 1)
+ fun_l2_n249(x)
+ else
+ fun_l2_n865(x)
+ end
+end
+
+def fun_l1_n458(x)
+ if (x < 1)
+ fun_l2_n650(x)
+ else
+ fun_l2_n960(x)
+ end
+end
+
+def fun_l1_n459(x)
+ if (x < 1)
+ fun_l2_n273(x)
+ else
+ fun_l2_n544(x)
+ end
+end
+
+def fun_l1_n460(x)
+ if (x < 1)
+ fun_l2_n120(x)
+ else
+ fun_l2_n322(x)
+ end
+end
+
+def fun_l1_n461(x)
+ if (x < 1)
+ fun_l2_n366(x)
+ else
+ fun_l2_n534(x)
+ end
+end
+
+def fun_l1_n462(x)
+ if (x < 1)
+ fun_l2_n380(x)
+ else
+ fun_l2_n25(x)
+ end
+end
+
+def fun_l1_n463(x)
+ if (x < 1)
+ fun_l2_n333(x)
+ else
+ fun_l2_n303(x)
+ end
+end
+
+def fun_l1_n464(x)
+ if (x < 1)
+ fun_l2_n16(x)
+ else
+ fun_l2_n239(x)
+ end
+end
+
+def fun_l1_n465(x)
+ if (x < 1)
+ fun_l2_n539(x)
+ else
+ fun_l2_n988(x)
+ end
+end
+
+def fun_l1_n466(x)
+ if (x < 1)
+ fun_l2_n580(x)
+ else
+ fun_l2_n86(x)
+ end
+end
+
+def fun_l1_n467(x)
+ if (x < 1)
+ fun_l2_n406(x)
+ else
+ fun_l2_n706(x)
+ end
+end
+
+def fun_l1_n468(x)
+ if (x < 1)
+ fun_l2_n858(x)
+ else
+ fun_l2_n393(x)
+ end
+end
+
+def fun_l1_n469(x)
+ if (x < 1)
+ fun_l2_n64(x)
+ else
+ fun_l2_n482(x)
+ end
+end
+
+def fun_l1_n470(x)
+ if (x < 1)
+ fun_l2_n654(x)
+ else
+ fun_l2_n833(x)
+ end
+end
+
+def fun_l1_n471(x)
+ if (x < 1)
+ fun_l2_n161(x)
+ else
+ fun_l2_n91(x)
+ end
+end
+
+def fun_l1_n472(x)
+ if (x < 1)
+ fun_l2_n98(x)
+ else
+ fun_l2_n661(x)
+ end
+end
+
+def fun_l1_n473(x)
+ if (x < 1)
+ fun_l2_n574(x)
+ else
+ fun_l2_n133(x)
+ end
+end
+
+def fun_l1_n474(x)
+ if (x < 1)
+ fun_l2_n109(x)
+ else
+ fun_l2_n702(x)
+ end
+end
+
+def fun_l1_n475(x)
+ if (x < 1)
+ fun_l2_n808(x)
+ else
+ fun_l2_n584(x)
+ end
+end
+
+def fun_l1_n476(x)
+ if (x < 1)
+ fun_l2_n495(x)
+ else
+ fun_l2_n708(x)
+ end
+end
+
+def fun_l1_n477(x)
+ if (x < 1)
+ fun_l2_n649(x)
+ else
+ fun_l2_n290(x)
+ end
+end
+
+def fun_l1_n478(x)
+ if (x < 1)
+ fun_l2_n718(x)
+ else
+ fun_l2_n998(x)
+ end
+end
+
+def fun_l1_n479(x)
+ if (x < 1)
+ fun_l2_n557(x)
+ else
+ fun_l2_n359(x)
+ end
+end
+
+def fun_l1_n480(x)
+ if (x < 1)
+ fun_l2_n958(x)
+ else
+ fun_l2_n86(x)
+ end
+end
+
+def fun_l1_n481(x)
+ if (x < 1)
+ fun_l2_n245(x)
+ else
+ fun_l2_n124(x)
+ end
+end
+
+def fun_l1_n482(x)
+ if (x < 1)
+ fun_l2_n510(x)
+ else
+ fun_l2_n170(x)
+ end
+end
+
+def fun_l1_n483(x)
+ if (x < 1)
+ fun_l2_n550(x)
+ else
+ fun_l2_n9(x)
+ end
+end
+
+def fun_l1_n484(x)
+ if (x < 1)
+ fun_l2_n82(x)
+ else
+ fun_l2_n351(x)
+ end
+end
+
+def fun_l1_n485(x)
+ if (x < 1)
+ fun_l2_n760(x)
+ else
+ fun_l2_n560(x)
+ end
+end
+
+def fun_l1_n486(x)
+ if (x < 1)
+ fun_l2_n550(x)
+ else
+ fun_l2_n881(x)
+ end
+end
+
+def fun_l1_n487(x)
+ if (x < 1)
+ fun_l2_n62(x)
+ else
+ fun_l2_n213(x)
+ end
+end
+
+def fun_l1_n488(x)
+ if (x < 1)
+ fun_l2_n336(x)
+ else
+ fun_l2_n855(x)
+ end
+end
+
+def fun_l1_n489(x)
+ if (x < 1)
+ fun_l2_n172(x)
+ else
+ fun_l2_n479(x)
+ end
+end
+
+def fun_l1_n490(x)
+ if (x < 1)
+ fun_l2_n821(x)
+ else
+ fun_l2_n767(x)
+ end
+end
+
+def fun_l1_n491(x)
+ if (x < 1)
+ fun_l2_n388(x)
+ else
+ fun_l2_n307(x)
+ end
+end
+
+def fun_l1_n492(x)
+ if (x < 1)
+ fun_l2_n158(x)
+ else
+ fun_l2_n725(x)
+ end
+end
+
+def fun_l1_n493(x)
+ if (x < 1)
+ fun_l2_n681(x)
+ else
+ fun_l2_n767(x)
+ end
+end
+
+def fun_l1_n494(x)
+ if (x < 1)
+ fun_l2_n799(x)
+ else
+ fun_l2_n458(x)
+ end
+end
+
+def fun_l1_n495(x)
+ if (x < 1)
+ fun_l2_n379(x)
+ else
+ fun_l2_n152(x)
+ end
+end
+
+def fun_l1_n496(x)
+ if (x < 1)
+ fun_l2_n996(x)
+ else
+ fun_l2_n296(x)
+ end
+end
+
+def fun_l1_n497(x)
+ if (x < 1)
+ fun_l2_n752(x)
+ else
+ fun_l2_n481(x)
+ end
+end
+
+def fun_l1_n498(x)
+ if (x < 1)
+ fun_l2_n887(x)
+ else
+ fun_l2_n343(x)
+ end
+end
+
+def fun_l1_n499(x)
+ if (x < 1)
+ fun_l2_n481(x)
+ else
+ fun_l2_n368(x)
+ end
+end
+
+def fun_l1_n500(x)
+ if (x < 1)
+ fun_l2_n738(x)
+ else
+ fun_l2_n349(x)
+ end
+end
+
+def fun_l1_n501(x)
+ if (x < 1)
+ fun_l2_n104(x)
+ else
+ fun_l2_n841(x)
+ end
+end
+
+def fun_l1_n502(x)
+ if (x < 1)
+ fun_l2_n549(x)
+ else
+ fun_l2_n684(x)
+ end
+end
+
+def fun_l1_n503(x)
+ if (x < 1)
+ fun_l2_n726(x)
+ else
+ fun_l2_n41(x)
+ end
+end
+
+def fun_l1_n504(x)
+ if (x < 1)
+ fun_l2_n637(x)
+ else
+ fun_l2_n830(x)
+ end
+end
+
+def fun_l1_n505(x)
+ if (x < 1)
+ fun_l2_n38(x)
+ else
+ fun_l2_n815(x)
+ end
+end
+
+def fun_l1_n506(x)
+ if (x < 1)
+ fun_l2_n284(x)
+ else
+ fun_l2_n599(x)
+ end
+end
+
+def fun_l1_n507(x)
+ if (x < 1)
+ fun_l2_n665(x)
+ else
+ fun_l2_n763(x)
+ end
+end
+
+def fun_l1_n508(x)
+ if (x < 1)
+ fun_l2_n755(x)
+ else
+ fun_l2_n51(x)
+ end
+end
+
+def fun_l1_n509(x)
+ if (x < 1)
+ fun_l2_n860(x)
+ else
+ fun_l2_n273(x)
+ end
+end
+
+def fun_l1_n510(x)
+ if (x < 1)
+ fun_l2_n967(x)
+ else
+ fun_l2_n141(x)
+ end
+end
+
+def fun_l1_n511(x)
+ if (x < 1)
+ fun_l2_n637(x)
+ else
+ fun_l2_n953(x)
+ end
+end
+
+def fun_l1_n512(x)
+ if (x < 1)
+ fun_l2_n784(x)
+ else
+ fun_l2_n941(x)
+ end
+end
+
+def fun_l1_n513(x)
+ if (x < 1)
+ fun_l2_n499(x)
+ else
+ fun_l2_n189(x)
+ end
+end
+
+def fun_l1_n514(x)
+ if (x < 1)
+ fun_l2_n207(x)
+ else
+ fun_l2_n187(x)
+ end
+end
+
+def fun_l1_n515(x)
+ if (x < 1)
+ fun_l2_n174(x)
+ else
+ fun_l2_n316(x)
+ end
+end
+
+def fun_l1_n516(x)
+ if (x < 1)
+ fun_l2_n884(x)
+ else
+ fun_l2_n72(x)
+ end
+end
+
+def fun_l1_n517(x)
+ if (x < 1)
+ fun_l2_n878(x)
+ else
+ fun_l2_n423(x)
+ end
+end
+
+def fun_l1_n518(x)
+ if (x < 1)
+ fun_l2_n452(x)
+ else
+ fun_l2_n733(x)
+ end
+end
+
+def fun_l1_n519(x)
+ if (x < 1)
+ fun_l2_n586(x)
+ else
+ fun_l2_n987(x)
+ end
+end
+
+def fun_l1_n520(x)
+ if (x < 1)
+ fun_l2_n638(x)
+ else
+ fun_l2_n276(x)
+ end
+end
+
+def fun_l1_n521(x)
+ if (x < 1)
+ fun_l2_n186(x)
+ else
+ fun_l2_n498(x)
+ end
+end
+
+def fun_l1_n522(x)
+ if (x < 1)
+ fun_l2_n816(x)
+ else
+ fun_l2_n687(x)
+ end
+end
+
+def fun_l1_n523(x)
+ if (x < 1)
+ fun_l2_n572(x)
+ else
+ fun_l2_n878(x)
+ end
+end
+
+def fun_l1_n524(x)
+ if (x < 1)
+ fun_l2_n528(x)
+ else
+ fun_l2_n188(x)
+ end
+end
+
+def fun_l1_n525(x)
+ if (x < 1)
+ fun_l2_n719(x)
+ else
+ fun_l2_n146(x)
+ end
+end
+
+def fun_l1_n526(x)
+ if (x < 1)
+ fun_l2_n730(x)
+ else
+ fun_l2_n209(x)
+ end
+end
+
+def fun_l1_n527(x)
+ if (x < 1)
+ fun_l2_n998(x)
+ else
+ fun_l2_n544(x)
+ end
+end
+
+def fun_l1_n528(x)
+ if (x < 1)
+ fun_l2_n288(x)
+ else
+ fun_l2_n682(x)
+ end
+end
+
+def fun_l1_n529(x)
+ if (x < 1)
+ fun_l2_n808(x)
+ else
+ fun_l2_n651(x)
+ end
+end
+
+def fun_l1_n530(x)
+ if (x < 1)
+ fun_l2_n674(x)
+ else
+ fun_l2_n643(x)
+ end
+end
+
+def fun_l1_n531(x)
+ if (x < 1)
+ fun_l2_n42(x)
+ else
+ fun_l2_n457(x)
+ end
+end
+
+def fun_l1_n532(x)
+ if (x < 1)
+ fun_l2_n188(x)
+ else
+ fun_l2_n218(x)
+ end
+end
+
+def fun_l1_n533(x)
+ if (x < 1)
+ fun_l2_n529(x)
+ else
+ fun_l2_n541(x)
+ end
+end
+
+def fun_l1_n534(x)
+ if (x < 1)
+ fun_l2_n820(x)
+ else
+ fun_l2_n426(x)
+ end
+end
+
+def fun_l1_n535(x)
+ if (x < 1)
+ fun_l2_n730(x)
+ else
+ fun_l2_n36(x)
+ end
+end
+
+def fun_l1_n536(x)
+ if (x < 1)
+ fun_l2_n187(x)
+ else
+ fun_l2_n96(x)
+ end
+end
+
+def fun_l1_n537(x)
+ if (x < 1)
+ fun_l2_n1(x)
+ else
+ fun_l2_n905(x)
+ end
+end
+
+def fun_l1_n538(x)
+ if (x < 1)
+ fun_l2_n252(x)
+ else
+ fun_l2_n597(x)
+ end
+end
+
+def fun_l1_n539(x)
+ if (x < 1)
+ fun_l2_n959(x)
+ else
+ fun_l2_n26(x)
+ end
+end
+
+def fun_l1_n540(x)
+ if (x < 1)
+ fun_l2_n959(x)
+ else
+ fun_l2_n266(x)
+ end
+end
+
+def fun_l1_n541(x)
+ if (x < 1)
+ fun_l2_n902(x)
+ else
+ fun_l2_n957(x)
+ end
+end
+
+def fun_l1_n542(x)
+ if (x < 1)
+ fun_l2_n244(x)
+ else
+ fun_l2_n407(x)
+ end
+end
+
+def fun_l1_n543(x)
+ if (x < 1)
+ fun_l2_n319(x)
+ else
+ fun_l2_n19(x)
+ end
+end
+
+def fun_l1_n544(x)
+ if (x < 1)
+ fun_l2_n251(x)
+ else
+ fun_l2_n92(x)
+ end
+end
+
+def fun_l1_n545(x)
+ if (x < 1)
+ fun_l2_n482(x)
+ else
+ fun_l2_n177(x)
+ end
+end
+
+def fun_l1_n546(x)
+ if (x < 1)
+ fun_l2_n338(x)
+ else
+ fun_l2_n152(x)
+ end
+end
+
+def fun_l1_n547(x)
+ if (x < 1)
+ fun_l2_n902(x)
+ else
+ fun_l2_n243(x)
+ end
+end
+
+def fun_l1_n548(x)
+ if (x < 1)
+ fun_l2_n943(x)
+ else
+ fun_l2_n930(x)
+ end
+end
+
+def fun_l1_n549(x)
+ if (x < 1)
+ fun_l2_n775(x)
+ else
+ fun_l2_n338(x)
+ end
+end
+
+def fun_l1_n550(x)
+ if (x < 1)
+ fun_l2_n286(x)
+ else
+ fun_l2_n347(x)
+ end
+end
+
+def fun_l1_n551(x)
+ if (x < 1)
+ fun_l2_n611(x)
+ else
+ fun_l2_n108(x)
+ end
+end
+
+def fun_l1_n552(x)
+ if (x < 1)
+ fun_l2_n515(x)
+ else
+ fun_l2_n478(x)
+ end
+end
+
+def fun_l1_n553(x)
+ if (x < 1)
+ fun_l2_n68(x)
+ else
+ fun_l2_n943(x)
+ end
+end
+
+def fun_l1_n554(x)
+ if (x < 1)
+ fun_l2_n16(x)
+ else
+ fun_l2_n280(x)
+ end
+end
+
+def fun_l1_n555(x)
+ if (x < 1)
+ fun_l2_n421(x)
+ else
+ fun_l2_n412(x)
+ end
+end
+
+def fun_l1_n556(x)
+ if (x < 1)
+ fun_l2_n653(x)
+ else
+ fun_l2_n527(x)
+ end
+end
+
+def fun_l1_n557(x)
+ if (x < 1)
+ fun_l2_n181(x)
+ else
+ fun_l2_n179(x)
+ end
+end
+
+def fun_l1_n558(x)
+ if (x < 1)
+ fun_l2_n270(x)
+ else
+ fun_l2_n367(x)
+ end
+end
+
+def fun_l1_n559(x)
+ if (x < 1)
+ fun_l2_n323(x)
+ else
+ fun_l2_n453(x)
+ end
+end
+
+def fun_l1_n560(x)
+ if (x < 1)
+ fun_l2_n699(x)
+ else
+ fun_l2_n904(x)
+ end
+end
+
+def fun_l1_n561(x)
+ if (x < 1)
+ fun_l2_n903(x)
+ else
+ fun_l2_n559(x)
+ end
+end
+
+def fun_l1_n562(x)
+ if (x < 1)
+ fun_l2_n573(x)
+ else
+ fun_l2_n880(x)
+ end
+end
+
+def fun_l1_n563(x)
+ if (x < 1)
+ fun_l2_n674(x)
+ else
+ fun_l2_n420(x)
+ end
+end
+
+def fun_l1_n564(x)
+ if (x < 1)
+ fun_l2_n76(x)
+ else
+ fun_l2_n702(x)
+ end
+end
+
+def fun_l1_n565(x)
+ if (x < 1)
+ fun_l2_n78(x)
+ else
+ fun_l2_n741(x)
+ end
+end
+
+def fun_l1_n566(x)
+ if (x < 1)
+ fun_l2_n542(x)
+ else
+ fun_l2_n762(x)
+ end
+end
+
+def fun_l1_n567(x)
+ if (x < 1)
+ fun_l2_n992(x)
+ else
+ fun_l2_n416(x)
+ end
+end
+
+def fun_l1_n568(x)
+ if (x < 1)
+ fun_l2_n510(x)
+ else
+ fun_l2_n68(x)
+ end
+end
+
+def fun_l1_n569(x)
+ if (x < 1)
+ fun_l2_n36(x)
+ else
+ fun_l2_n152(x)
+ end
+end
+
+def fun_l1_n570(x)
+ if (x < 1)
+ fun_l2_n243(x)
+ else
+ fun_l2_n421(x)
+ end
+end
+
+def fun_l1_n571(x)
+ if (x < 1)
+ fun_l2_n48(x)
+ else
+ fun_l2_n50(x)
+ end
+end
+
+def fun_l1_n572(x)
+ if (x < 1)
+ fun_l2_n647(x)
+ else
+ fun_l2_n614(x)
+ end
+end
+
+def fun_l1_n573(x)
+ if (x < 1)
+ fun_l2_n847(x)
+ else
+ fun_l2_n598(x)
+ end
+end
+
+def fun_l1_n574(x)
+ if (x < 1)
+ fun_l2_n462(x)
+ else
+ fun_l2_n492(x)
+ end
+end
+
+def fun_l1_n575(x)
+ if (x < 1)
+ fun_l2_n805(x)
+ else
+ fun_l2_n23(x)
+ end
+end
+
+def fun_l1_n576(x)
+ if (x < 1)
+ fun_l2_n660(x)
+ else
+ fun_l2_n271(x)
+ end
+end
+
+def fun_l1_n577(x)
+ if (x < 1)
+ fun_l2_n277(x)
+ else
+ fun_l2_n259(x)
+ end
+end
+
+def fun_l1_n578(x)
+ if (x < 1)
+ fun_l2_n645(x)
+ else
+ fun_l2_n638(x)
+ end
+end
+
+def fun_l1_n579(x)
+ if (x < 1)
+ fun_l2_n925(x)
+ else
+ fun_l2_n801(x)
+ end
+end
+
+def fun_l1_n580(x)
+ if (x < 1)
+ fun_l2_n52(x)
+ else
+ fun_l2_n703(x)
+ end
+end
+
+def fun_l1_n581(x)
+ if (x < 1)
+ fun_l2_n126(x)
+ else
+ fun_l2_n511(x)
+ end
+end
+
+def fun_l1_n582(x)
+ if (x < 1)
+ fun_l2_n663(x)
+ else
+ fun_l2_n158(x)
+ end
+end
+
+def fun_l1_n583(x)
+ if (x < 1)
+ fun_l2_n604(x)
+ else
+ fun_l2_n498(x)
+ end
+end
+
+def fun_l1_n584(x)
+ if (x < 1)
+ fun_l2_n889(x)
+ else
+ fun_l2_n197(x)
+ end
+end
+
+def fun_l1_n585(x)
+ if (x < 1)
+ fun_l2_n28(x)
+ else
+ fun_l2_n298(x)
+ end
+end
+
+def fun_l1_n586(x)
+ if (x < 1)
+ fun_l2_n165(x)
+ else
+ fun_l2_n340(x)
+ end
+end
+
+def fun_l1_n587(x)
+ if (x < 1)
+ fun_l2_n89(x)
+ else
+ fun_l2_n947(x)
+ end
+end
+
+def fun_l1_n588(x)
+ if (x < 1)
+ fun_l2_n151(x)
+ else
+ fun_l2_n163(x)
+ end
+end
+
+def fun_l1_n589(x)
+ if (x < 1)
+ fun_l2_n200(x)
+ else
+ fun_l2_n855(x)
+ end
+end
+
+def fun_l1_n590(x)
+ if (x < 1)
+ fun_l2_n749(x)
+ else
+ fun_l2_n232(x)
+ end
+end
+
+def fun_l1_n591(x)
+ if (x < 1)
+ fun_l2_n850(x)
+ else
+ fun_l2_n897(x)
+ end
+end
+
+def fun_l1_n592(x)
+ if (x < 1)
+ fun_l2_n287(x)
+ else
+ fun_l2_n266(x)
+ end
+end
+
+def fun_l1_n593(x)
+ if (x < 1)
+ fun_l2_n547(x)
+ else
+ fun_l2_n937(x)
+ end
+end
+
+def fun_l1_n594(x)
+ if (x < 1)
+ fun_l2_n517(x)
+ else
+ fun_l2_n602(x)
+ end
+end
+
+def fun_l1_n595(x)
+ if (x < 1)
+ fun_l2_n962(x)
+ else
+ fun_l2_n387(x)
+ end
+end
+
+def fun_l1_n596(x)
+ if (x < 1)
+ fun_l2_n959(x)
+ else
+ fun_l2_n885(x)
+ end
+end
+
+def fun_l1_n597(x)
+ if (x < 1)
+ fun_l2_n343(x)
+ else
+ fun_l2_n704(x)
+ end
+end
+
+def fun_l1_n598(x)
+ if (x < 1)
+ fun_l2_n256(x)
+ else
+ fun_l2_n213(x)
+ end
+end
+
+def fun_l1_n599(x)
+ if (x < 1)
+ fun_l2_n653(x)
+ else
+ fun_l2_n630(x)
+ end
+end
+
+def fun_l1_n600(x)
+ if (x < 1)
+ fun_l2_n587(x)
+ else
+ fun_l2_n899(x)
+ end
+end
+
+def fun_l1_n601(x)
+ if (x < 1)
+ fun_l2_n798(x)
+ else
+ fun_l2_n318(x)
+ end
+end
+
+def fun_l1_n602(x)
+ if (x < 1)
+ fun_l2_n286(x)
+ else
+ fun_l2_n586(x)
+ end
+end
+
+def fun_l1_n603(x)
+ if (x < 1)
+ fun_l2_n931(x)
+ else
+ fun_l2_n370(x)
+ end
+end
+
+def fun_l1_n604(x)
+ if (x < 1)
+ fun_l2_n311(x)
+ else
+ fun_l2_n476(x)
+ end
+end
+
+def fun_l1_n605(x)
+ if (x < 1)
+ fun_l2_n998(x)
+ else
+ fun_l2_n953(x)
+ end
+end
+
+def fun_l1_n606(x)
+ if (x < 1)
+ fun_l2_n782(x)
+ else
+ fun_l2_n225(x)
+ end
+end
+
+def fun_l1_n607(x)
+ if (x < 1)
+ fun_l2_n696(x)
+ else
+ fun_l2_n797(x)
+ end
+end
+
+def fun_l1_n608(x)
+ if (x < 1)
+ fun_l2_n344(x)
+ else
+ fun_l2_n247(x)
+ end
+end
+
+def fun_l1_n609(x)
+ if (x < 1)
+ fun_l2_n627(x)
+ else
+ fun_l2_n410(x)
+ end
+end
+
+def fun_l1_n610(x)
+ if (x < 1)
+ fun_l2_n431(x)
+ else
+ fun_l2_n56(x)
+ end
+end
+
+def fun_l1_n611(x)
+ if (x < 1)
+ fun_l2_n307(x)
+ else
+ fun_l2_n857(x)
+ end
+end
+
+def fun_l1_n612(x)
+ if (x < 1)
+ fun_l2_n411(x)
+ else
+ fun_l2_n59(x)
+ end
+end
+
+def fun_l1_n613(x)
+ if (x < 1)
+ fun_l2_n38(x)
+ else
+ fun_l2_n880(x)
+ end
+end
+
+def fun_l1_n614(x)
+ if (x < 1)
+ fun_l2_n945(x)
+ else
+ fun_l2_n723(x)
+ end
+end
+
+def fun_l1_n615(x)
+ if (x < 1)
+ fun_l2_n248(x)
+ else
+ fun_l2_n56(x)
+ end
+end
+
+def fun_l1_n616(x)
+ if (x < 1)
+ fun_l2_n453(x)
+ else
+ fun_l2_n4(x)
+ end
+end
+
+def fun_l1_n617(x)
+ if (x < 1)
+ fun_l2_n599(x)
+ else
+ fun_l2_n966(x)
+ end
+end
+
+def fun_l1_n618(x)
+ if (x < 1)
+ fun_l2_n896(x)
+ else
+ fun_l2_n666(x)
+ end
+end
+
+def fun_l1_n619(x)
+ if (x < 1)
+ fun_l2_n715(x)
+ else
+ fun_l2_n918(x)
+ end
+end
+
+def fun_l1_n620(x)
+ if (x < 1)
+ fun_l2_n663(x)
+ else
+ fun_l2_n144(x)
+ end
+end
+
+def fun_l1_n621(x)
+ if (x < 1)
+ fun_l2_n17(x)
+ else
+ fun_l2_n411(x)
+ end
+end
+
+def fun_l1_n622(x)
+ if (x < 1)
+ fun_l2_n851(x)
+ else
+ fun_l2_n858(x)
+ end
+end
+
+def fun_l1_n623(x)
+ if (x < 1)
+ fun_l2_n552(x)
+ else
+ fun_l2_n503(x)
+ end
+end
+
+def fun_l1_n624(x)
+ if (x < 1)
+ fun_l2_n822(x)
+ else
+ fun_l2_n239(x)
+ end
+end
+
+def fun_l1_n625(x)
+ if (x < 1)
+ fun_l2_n917(x)
+ else
+ fun_l2_n120(x)
+ end
+end
+
+def fun_l1_n626(x)
+ if (x < 1)
+ fun_l2_n640(x)
+ else
+ fun_l2_n603(x)
+ end
+end
+
+def fun_l1_n627(x)
+ if (x < 1)
+ fun_l2_n244(x)
+ else
+ fun_l2_n114(x)
+ end
+end
+
+def fun_l1_n628(x)
+ if (x < 1)
+ fun_l2_n374(x)
+ else
+ fun_l2_n295(x)
+ end
+end
+
+def fun_l1_n629(x)
+ if (x < 1)
+ fun_l2_n761(x)
+ else
+ fun_l2_n920(x)
+ end
+end
+
+def fun_l1_n630(x)
+ if (x < 1)
+ fun_l2_n314(x)
+ else
+ fun_l2_n571(x)
+ end
+end
+
+def fun_l1_n631(x)
+ if (x < 1)
+ fun_l2_n830(x)
+ else
+ fun_l2_n838(x)
+ end
+end
+
+def fun_l1_n632(x)
+ if (x < 1)
+ fun_l2_n523(x)
+ else
+ fun_l2_n453(x)
+ end
+end
+
+def fun_l1_n633(x)
+ if (x < 1)
+ fun_l2_n245(x)
+ else
+ fun_l2_n173(x)
+ end
+end
+
+def fun_l1_n634(x)
+ if (x < 1)
+ fun_l2_n939(x)
+ else
+ fun_l2_n774(x)
+ end
+end
+
+def fun_l1_n635(x)
+ if (x < 1)
+ fun_l2_n17(x)
+ else
+ fun_l2_n166(x)
+ end
+end
+
+def fun_l1_n636(x)
+ if (x < 1)
+ fun_l2_n443(x)
+ else
+ fun_l2_n297(x)
+ end
+end
+
+def fun_l1_n637(x)
+ if (x < 1)
+ fun_l2_n280(x)
+ else
+ fun_l2_n776(x)
+ end
+end
+
+def fun_l1_n638(x)
+ if (x < 1)
+ fun_l2_n761(x)
+ else
+ fun_l2_n866(x)
+ end
+end
+
+def fun_l1_n639(x)
+ if (x < 1)
+ fun_l2_n677(x)
+ else
+ fun_l2_n855(x)
+ end
+end
+
+def fun_l1_n640(x)
+ if (x < 1)
+ fun_l2_n252(x)
+ else
+ fun_l2_n766(x)
+ end
+end
+
+def fun_l1_n641(x)
+ if (x < 1)
+ fun_l2_n3(x)
+ else
+ fun_l2_n115(x)
+ end
+end
+
+def fun_l1_n642(x)
+ if (x < 1)
+ fun_l2_n495(x)
+ else
+ fun_l2_n340(x)
+ end
+end
+
+def fun_l1_n643(x)
+ if (x < 1)
+ fun_l2_n991(x)
+ else
+ fun_l2_n119(x)
+ end
+end
+
+def fun_l1_n644(x)
+ if (x < 1)
+ fun_l2_n379(x)
+ else
+ fun_l2_n519(x)
+ end
+end
+
+def fun_l1_n645(x)
+ if (x < 1)
+ fun_l2_n191(x)
+ else
+ fun_l2_n872(x)
+ end
+end
+
+def fun_l1_n646(x)
+ if (x < 1)
+ fun_l2_n979(x)
+ else
+ fun_l2_n980(x)
+ end
+end
+
+def fun_l1_n647(x)
+ if (x < 1)
+ fun_l2_n918(x)
+ else
+ fun_l2_n455(x)
+ end
+end
+
+def fun_l1_n648(x)
+ if (x < 1)
+ fun_l2_n966(x)
+ else
+ fun_l2_n529(x)
+ end
+end
+
+def fun_l1_n649(x)
+ if (x < 1)
+ fun_l2_n838(x)
+ else
+ fun_l2_n529(x)
+ end
+end
+
+def fun_l1_n650(x)
+ if (x < 1)
+ fun_l2_n542(x)
+ else
+ fun_l2_n42(x)
+ end
+end
+
+def fun_l1_n651(x)
+ if (x < 1)
+ fun_l2_n65(x)
+ else
+ fun_l2_n460(x)
+ end
+end
+
+def fun_l1_n652(x)
+ if (x < 1)
+ fun_l2_n479(x)
+ else
+ fun_l2_n251(x)
+ end
+end
+
+def fun_l1_n653(x)
+ if (x < 1)
+ fun_l2_n243(x)
+ else
+ fun_l2_n77(x)
+ end
+end
+
+def fun_l1_n654(x)
+ if (x < 1)
+ fun_l2_n392(x)
+ else
+ fun_l2_n940(x)
+ end
+end
+
+def fun_l1_n655(x)
+ if (x < 1)
+ fun_l2_n875(x)
+ else
+ fun_l2_n157(x)
+ end
+end
+
+def fun_l1_n656(x)
+ if (x < 1)
+ fun_l2_n279(x)
+ else
+ fun_l2_n237(x)
+ end
+end
+
+def fun_l1_n657(x)
+ if (x < 1)
+ fun_l2_n522(x)
+ else
+ fun_l2_n174(x)
+ end
+end
+
+def fun_l1_n658(x)
+ if (x < 1)
+ fun_l2_n669(x)
+ else
+ fun_l2_n399(x)
+ end
+end
+
+def fun_l1_n659(x)
+ if (x < 1)
+ fun_l2_n251(x)
+ else
+ fun_l2_n56(x)
+ end
+end
+
+def fun_l1_n660(x)
+ if (x < 1)
+ fun_l2_n563(x)
+ else
+ fun_l2_n731(x)
+ end
+end
+
+def fun_l1_n661(x)
+ if (x < 1)
+ fun_l2_n684(x)
+ else
+ fun_l2_n292(x)
+ end
+end
+
+def fun_l1_n662(x)
+ if (x < 1)
+ fun_l2_n952(x)
+ else
+ fun_l2_n1(x)
+ end
+end
+
+def fun_l1_n663(x)
+ if (x < 1)
+ fun_l2_n89(x)
+ else
+ fun_l2_n562(x)
+ end
+end
+
+def fun_l1_n664(x)
+ if (x < 1)
+ fun_l2_n480(x)
+ else
+ fun_l2_n488(x)
+ end
+end
+
+def fun_l1_n665(x)
+ if (x < 1)
+ fun_l2_n398(x)
+ else
+ fun_l2_n493(x)
+ end
+end
+
+def fun_l1_n666(x)
+ if (x < 1)
+ fun_l2_n778(x)
+ else
+ fun_l2_n702(x)
+ end
+end
+
+def fun_l1_n667(x)
+ if (x < 1)
+ fun_l2_n332(x)
+ else
+ fun_l2_n441(x)
+ end
+end
+
+def fun_l1_n668(x)
+ if (x < 1)
+ fun_l2_n866(x)
+ else
+ fun_l2_n442(x)
+ end
+end
+
+def fun_l1_n669(x)
+ if (x < 1)
+ fun_l2_n533(x)
+ else
+ fun_l2_n970(x)
+ end
+end
+
+def fun_l1_n670(x)
+ if (x < 1)
+ fun_l2_n926(x)
+ else
+ fun_l2_n883(x)
+ end
+end
+
+def fun_l1_n671(x)
+ if (x < 1)
+ fun_l2_n204(x)
+ else
+ fun_l2_n769(x)
+ end
+end
+
+def fun_l1_n672(x)
+ if (x < 1)
+ fun_l2_n663(x)
+ else
+ fun_l2_n367(x)
+ end
+end
+
+def fun_l1_n673(x)
+ if (x < 1)
+ fun_l2_n978(x)
+ else
+ fun_l2_n750(x)
+ end
+end
+
+def fun_l1_n674(x)
+ if (x < 1)
+ fun_l2_n296(x)
+ else
+ fun_l2_n608(x)
+ end
+end
+
+def fun_l1_n675(x)
+ if (x < 1)
+ fun_l2_n35(x)
+ else
+ fun_l2_n200(x)
+ end
+end
+
+def fun_l1_n676(x)
+ if (x < 1)
+ fun_l2_n618(x)
+ else
+ fun_l2_n286(x)
+ end
+end
+
+def fun_l1_n677(x)
+ if (x < 1)
+ fun_l2_n98(x)
+ else
+ fun_l2_n264(x)
+ end
+end
+
+def fun_l1_n678(x)
+ if (x < 1)
+ fun_l2_n967(x)
+ else
+ fun_l2_n884(x)
+ end
+end
+
+def fun_l1_n679(x)
+ if (x < 1)
+ fun_l2_n351(x)
+ else
+ fun_l2_n854(x)
+ end
+end
+
+def fun_l1_n680(x)
+ if (x < 1)
+ fun_l2_n397(x)
+ else
+ fun_l2_n56(x)
+ end
+end
+
+def fun_l1_n681(x)
+ if (x < 1)
+ fun_l2_n232(x)
+ else
+ fun_l2_n400(x)
+ end
+end
+
+def fun_l1_n682(x)
+ if (x < 1)
+ fun_l2_n321(x)
+ else
+ fun_l2_n500(x)
+ end
+end
+
+def fun_l1_n683(x)
+ if (x < 1)
+ fun_l2_n315(x)
+ else
+ fun_l2_n509(x)
+ end
+end
+
+def fun_l1_n684(x)
+ if (x < 1)
+ fun_l2_n854(x)
+ else
+ fun_l2_n921(x)
+ end
+end
+
+def fun_l1_n685(x)
+ if (x < 1)
+ fun_l2_n823(x)
+ else
+ fun_l2_n995(x)
+ end
+end
+
+def fun_l1_n686(x)
+ if (x < 1)
+ fun_l2_n42(x)
+ else
+ fun_l2_n92(x)
+ end
+end
+
+def fun_l1_n687(x)
+ if (x < 1)
+ fun_l2_n74(x)
+ else
+ fun_l2_n577(x)
+ end
+end
+
+def fun_l1_n688(x)
+ if (x < 1)
+ fun_l2_n47(x)
+ else
+ fun_l2_n664(x)
+ end
+end
+
+def fun_l1_n689(x)
+ if (x < 1)
+ fun_l2_n709(x)
+ else
+ fun_l2_n92(x)
+ end
+end
+
+def fun_l1_n690(x)
+ if (x < 1)
+ fun_l2_n875(x)
+ else
+ fun_l2_n893(x)
+ end
+end
+
+def fun_l1_n691(x)
+ if (x < 1)
+ fun_l2_n934(x)
+ else
+ fun_l2_n339(x)
+ end
+end
+
+def fun_l1_n692(x)
+ if (x < 1)
+ fun_l2_n194(x)
+ else
+ fun_l2_n915(x)
+ end
+end
+
+def fun_l1_n693(x)
+ if (x < 1)
+ fun_l2_n352(x)
+ else
+ fun_l2_n212(x)
+ end
+end
+
+def fun_l1_n694(x)
+ if (x < 1)
+ fun_l2_n554(x)
+ else
+ fun_l2_n461(x)
+ end
+end
+
+def fun_l1_n695(x)
+ if (x < 1)
+ fun_l2_n372(x)
+ else
+ fun_l2_n652(x)
+ end
+end
+
+def fun_l1_n696(x)
+ if (x < 1)
+ fun_l2_n778(x)
+ else
+ fun_l2_n350(x)
+ end
+end
+
+def fun_l1_n697(x)
+ if (x < 1)
+ fun_l2_n656(x)
+ else
+ fun_l2_n467(x)
+ end
+end
+
+def fun_l1_n698(x)
+ if (x < 1)
+ fun_l2_n799(x)
+ else
+ fun_l2_n983(x)
+ end
+end
+
+def fun_l1_n699(x)
+ if (x < 1)
+ fun_l2_n917(x)
+ else
+ fun_l2_n801(x)
+ end
+end
+
+def fun_l1_n700(x)
+ if (x < 1)
+ fun_l2_n276(x)
+ else
+ fun_l2_n778(x)
+ end
+end
+
+def fun_l1_n701(x)
+ if (x < 1)
+ fun_l2_n562(x)
+ else
+ fun_l2_n558(x)
+ end
+end
+
+def fun_l1_n702(x)
+ if (x < 1)
+ fun_l2_n600(x)
+ else
+ fun_l2_n120(x)
+ end
+end
+
+def fun_l1_n703(x)
+ if (x < 1)
+ fun_l2_n346(x)
+ else
+ fun_l2_n975(x)
+ end
+end
+
+def fun_l1_n704(x)
+ if (x < 1)
+ fun_l2_n139(x)
+ else
+ fun_l2_n241(x)
+ end
+end
+
+def fun_l1_n705(x)
+ if (x < 1)
+ fun_l2_n728(x)
+ else
+ fun_l2_n62(x)
+ end
+end
+
+def fun_l1_n706(x)
+ if (x < 1)
+ fun_l2_n719(x)
+ else
+ fun_l2_n955(x)
+ end
+end
+
+def fun_l1_n707(x)
+ if (x < 1)
+ fun_l2_n752(x)
+ else
+ fun_l2_n122(x)
+ end
+end
+
+def fun_l1_n708(x)
+ if (x < 1)
+ fun_l2_n456(x)
+ else
+ fun_l2_n128(x)
+ end
+end
+
+def fun_l1_n709(x)
+ if (x < 1)
+ fun_l2_n4(x)
+ else
+ fun_l2_n399(x)
+ end
+end
+
+def fun_l1_n710(x)
+ if (x < 1)
+ fun_l2_n20(x)
+ else
+ fun_l2_n746(x)
+ end
+end
+
+def fun_l1_n711(x)
+ if (x < 1)
+ fun_l2_n978(x)
+ else
+ fun_l2_n886(x)
+ end
+end
+
+def fun_l1_n712(x)
+ if (x < 1)
+ fun_l2_n417(x)
+ else
+ fun_l2_n337(x)
+ end
+end
+
+def fun_l1_n713(x)
+ if (x < 1)
+ fun_l2_n394(x)
+ else
+ fun_l2_n713(x)
+ end
+end
+
+def fun_l1_n714(x)
+ if (x < 1)
+ fun_l2_n433(x)
+ else
+ fun_l2_n985(x)
+ end
+end
+
+def fun_l1_n715(x)
+ if (x < 1)
+ fun_l2_n937(x)
+ else
+ fun_l2_n709(x)
+ end
+end
+
+def fun_l1_n716(x)
+ if (x < 1)
+ fun_l2_n963(x)
+ else
+ fun_l2_n842(x)
+ end
+end
+
+def fun_l1_n717(x)
+ if (x < 1)
+ fun_l2_n671(x)
+ else
+ fun_l2_n954(x)
+ end
+end
+
+def fun_l1_n718(x)
+ if (x < 1)
+ fun_l2_n948(x)
+ else
+ fun_l2_n128(x)
+ end
+end
+
+def fun_l1_n719(x)
+ if (x < 1)
+ fun_l2_n682(x)
+ else
+ fun_l2_n973(x)
+ end
+end
+
+def fun_l1_n720(x)
+ if (x < 1)
+ fun_l2_n336(x)
+ else
+ fun_l2_n643(x)
+ end
+end
+
+def fun_l1_n721(x)
+ if (x < 1)
+ fun_l2_n224(x)
+ else
+ fun_l2_n96(x)
+ end
+end
+
+def fun_l1_n722(x)
+ if (x < 1)
+ fun_l2_n907(x)
+ else
+ fun_l2_n776(x)
+ end
+end
+
+def fun_l1_n723(x)
+ if (x < 1)
+ fun_l2_n93(x)
+ else
+ fun_l2_n410(x)
+ end
+end
+
+def fun_l1_n724(x)
+ if (x < 1)
+ fun_l2_n282(x)
+ else
+ fun_l2_n134(x)
+ end
+end
+
+def fun_l1_n725(x)
+ if (x < 1)
+ fun_l2_n938(x)
+ else
+ fun_l2_n603(x)
+ end
+end
+
+def fun_l1_n726(x)
+ if (x < 1)
+ fun_l2_n574(x)
+ else
+ fun_l2_n976(x)
+ end
+end
+
+def fun_l1_n727(x)
+ if (x < 1)
+ fun_l2_n428(x)
+ else
+ fun_l2_n658(x)
+ end
+end
+
+def fun_l1_n728(x)
+ if (x < 1)
+ fun_l2_n828(x)
+ else
+ fun_l2_n226(x)
+ end
+end
+
+def fun_l1_n729(x)
+ if (x < 1)
+ fun_l2_n663(x)
+ else
+ fun_l2_n292(x)
+ end
+end
+
+def fun_l1_n730(x)
+ if (x < 1)
+ fun_l2_n802(x)
+ else
+ fun_l2_n853(x)
+ end
+end
+
+def fun_l1_n731(x)
+ if (x < 1)
+ fun_l2_n142(x)
+ else
+ fun_l2_n339(x)
+ end
+end
+
+def fun_l1_n732(x)
+ if (x < 1)
+ fun_l2_n211(x)
+ else
+ fun_l2_n35(x)
+ end
+end
+
+def fun_l1_n733(x)
+ if (x < 1)
+ fun_l2_n660(x)
+ else
+ fun_l2_n179(x)
+ end
+end
+
+def fun_l1_n734(x)
+ if (x < 1)
+ fun_l2_n174(x)
+ else
+ fun_l2_n224(x)
+ end
+end
+
+def fun_l1_n735(x)
+ if (x < 1)
+ fun_l2_n490(x)
+ else
+ fun_l2_n564(x)
+ end
+end
+
+def fun_l1_n736(x)
+ if (x < 1)
+ fun_l2_n597(x)
+ else
+ fun_l2_n153(x)
+ end
+end
+
+def fun_l1_n737(x)
+ if (x < 1)
+ fun_l2_n633(x)
+ else
+ fun_l2_n358(x)
+ end
+end
+
+def fun_l1_n738(x)
+ if (x < 1)
+ fun_l2_n0(x)
+ else
+ fun_l2_n858(x)
+ end
+end
+
+def fun_l1_n739(x)
+ if (x < 1)
+ fun_l2_n335(x)
+ else
+ fun_l2_n531(x)
+ end
+end
+
+def fun_l1_n740(x)
+ if (x < 1)
+ fun_l2_n736(x)
+ else
+ fun_l2_n653(x)
+ end
+end
+
+def fun_l1_n741(x)
+ if (x < 1)
+ fun_l2_n280(x)
+ else
+ fun_l2_n684(x)
+ end
+end
+
+def fun_l1_n742(x)
+ if (x < 1)
+ fun_l2_n976(x)
+ else
+ fun_l2_n987(x)
+ end
+end
+
+def fun_l1_n743(x)
+ if (x < 1)
+ fun_l2_n519(x)
+ else
+ fun_l2_n633(x)
+ end
+end
+
+def fun_l1_n744(x)
+ if (x < 1)
+ fun_l2_n314(x)
+ else
+ fun_l2_n579(x)
+ end
+end
+
+def fun_l1_n745(x)
+ if (x < 1)
+ fun_l2_n365(x)
+ else
+ fun_l2_n972(x)
+ end
+end
+
+def fun_l1_n746(x)
+ if (x < 1)
+ fun_l2_n902(x)
+ else
+ fun_l2_n715(x)
+ end
+end
+
+def fun_l1_n747(x)
+ if (x < 1)
+ fun_l2_n363(x)
+ else
+ fun_l2_n407(x)
+ end
+end
+
+def fun_l1_n748(x)
+ if (x < 1)
+ fun_l2_n660(x)
+ else
+ fun_l2_n814(x)
+ end
+end
+
+def fun_l1_n749(x)
+ if (x < 1)
+ fun_l2_n415(x)
+ else
+ fun_l2_n162(x)
+ end
+end
+
+def fun_l1_n750(x)
+ if (x < 1)
+ fun_l2_n157(x)
+ else
+ fun_l2_n406(x)
+ end
+end
+
+def fun_l1_n751(x)
+ if (x < 1)
+ fun_l2_n388(x)
+ else
+ fun_l2_n212(x)
+ end
+end
+
+def fun_l1_n752(x)
+ if (x < 1)
+ fun_l2_n733(x)
+ else
+ fun_l2_n283(x)
+ end
+end
+
+def fun_l1_n753(x)
+ if (x < 1)
+ fun_l2_n960(x)
+ else
+ fun_l2_n20(x)
+ end
+end
+
+def fun_l1_n754(x)
+ if (x < 1)
+ fun_l2_n797(x)
+ else
+ fun_l2_n202(x)
+ end
+end
+
+def fun_l1_n755(x)
+ if (x < 1)
+ fun_l2_n113(x)
+ else
+ fun_l2_n953(x)
+ end
+end
+
+def fun_l1_n756(x)
+ if (x < 1)
+ fun_l2_n84(x)
+ else
+ fun_l2_n390(x)
+ end
+end
+
+def fun_l1_n757(x)
+ if (x < 1)
+ fun_l2_n355(x)
+ else
+ fun_l2_n761(x)
+ end
+end
+
+def fun_l1_n758(x)
+ if (x < 1)
+ fun_l2_n823(x)
+ else
+ fun_l2_n37(x)
+ end
+end
+
+def fun_l1_n759(x)
+ if (x < 1)
+ fun_l2_n428(x)
+ else
+ fun_l2_n130(x)
+ end
+end
+
+def fun_l1_n760(x)
+ if (x < 1)
+ fun_l2_n364(x)
+ else
+ fun_l2_n680(x)
+ end
+end
+
+def fun_l1_n761(x)
+ if (x < 1)
+ fun_l2_n718(x)
+ else
+ fun_l2_n885(x)
+ end
+end
+
+def fun_l1_n762(x)
+ if (x < 1)
+ fun_l2_n795(x)
+ else
+ fun_l2_n342(x)
+ end
+end
+
+def fun_l1_n763(x)
+ if (x < 1)
+ fun_l2_n561(x)
+ else
+ fun_l2_n593(x)
+ end
+end
+
+def fun_l1_n764(x)
+ if (x < 1)
+ fun_l2_n714(x)
+ else
+ fun_l2_n168(x)
+ end
+end
+
+def fun_l1_n765(x)
+ if (x < 1)
+ fun_l2_n377(x)
+ else
+ fun_l2_n109(x)
+ end
+end
+
+def fun_l1_n766(x)
+ if (x < 1)
+ fun_l2_n477(x)
+ else
+ fun_l2_n871(x)
+ end
+end
+
+def fun_l1_n767(x)
+ if (x < 1)
+ fun_l2_n596(x)
+ else
+ fun_l2_n81(x)
+ end
+end
+
+def fun_l1_n768(x)
+ if (x < 1)
+ fun_l2_n609(x)
+ else
+ fun_l2_n454(x)
+ end
+end
+
+def fun_l1_n769(x)
+ if (x < 1)
+ fun_l2_n308(x)
+ else
+ fun_l2_n890(x)
+ end
+end
+
+def fun_l1_n770(x)
+ if (x < 1)
+ fun_l2_n761(x)
+ else
+ fun_l2_n342(x)
+ end
+end
+
+def fun_l1_n771(x)
+ if (x < 1)
+ fun_l2_n44(x)
+ else
+ fun_l2_n1(x)
+ end
+end
+
+def fun_l1_n772(x)
+ if (x < 1)
+ fun_l2_n457(x)
+ else
+ fun_l2_n571(x)
+ end
+end
+
+def fun_l1_n773(x)
+ if (x < 1)
+ fun_l2_n88(x)
+ else
+ fun_l2_n622(x)
+ end
+end
+
+def fun_l1_n774(x)
+ if (x < 1)
+ fun_l2_n459(x)
+ else
+ fun_l2_n446(x)
+ end
+end
+
+def fun_l1_n775(x)
+ if (x < 1)
+ fun_l2_n400(x)
+ else
+ fun_l2_n918(x)
+ end
+end
+
+def fun_l1_n776(x)
+ if (x < 1)
+ fun_l2_n752(x)
+ else
+ fun_l2_n696(x)
+ end
+end
+
+def fun_l1_n777(x)
+ if (x < 1)
+ fun_l2_n968(x)
+ else
+ fun_l2_n988(x)
+ end
+end
+
+def fun_l1_n778(x)
+ if (x < 1)
+ fun_l2_n242(x)
+ else
+ fun_l2_n496(x)
+ end
+end
+
+def fun_l1_n779(x)
+ if (x < 1)
+ fun_l2_n106(x)
+ else
+ fun_l2_n637(x)
+ end
+end
+
+def fun_l1_n780(x)
+ if (x < 1)
+ fun_l2_n945(x)
+ else
+ fun_l2_n238(x)
+ end
+end
+
+def fun_l1_n781(x)
+ if (x < 1)
+ fun_l2_n269(x)
+ else
+ fun_l2_n582(x)
+ end
+end
+
+def fun_l1_n782(x)
+ if (x < 1)
+ fun_l2_n523(x)
+ else
+ fun_l2_n201(x)
+ end
+end
+
+def fun_l1_n783(x)
+ if (x < 1)
+ fun_l2_n326(x)
+ else
+ fun_l2_n744(x)
+ end
+end
+
+def fun_l1_n784(x)
+ if (x < 1)
+ fun_l2_n49(x)
+ else
+ fun_l2_n943(x)
+ end
+end
+
+def fun_l1_n785(x)
+ if (x < 1)
+ fun_l2_n975(x)
+ else
+ fun_l2_n560(x)
+ end
+end
+
+def fun_l1_n786(x)
+ if (x < 1)
+ fun_l2_n843(x)
+ else
+ fun_l2_n240(x)
+ end
+end
+
+def fun_l1_n787(x)
+ if (x < 1)
+ fun_l2_n179(x)
+ else
+ fun_l2_n423(x)
+ end
+end
+
+def fun_l1_n788(x)
+ if (x < 1)
+ fun_l2_n536(x)
+ else
+ fun_l2_n112(x)
+ end
+end
+
+def fun_l1_n789(x)
+ if (x < 1)
+ fun_l2_n418(x)
+ else
+ fun_l2_n443(x)
+ end
+end
+
+def fun_l1_n790(x)
+ if (x < 1)
+ fun_l2_n770(x)
+ else
+ fun_l2_n127(x)
+ end
+end
+
+def fun_l1_n791(x)
+ if (x < 1)
+ fun_l2_n425(x)
+ else
+ fun_l2_n364(x)
+ end
+end
+
+def fun_l1_n792(x)
+ if (x < 1)
+ fun_l2_n841(x)
+ else
+ fun_l2_n112(x)
+ end
+end
+
+def fun_l1_n793(x)
+ if (x < 1)
+ fun_l2_n64(x)
+ else
+ fun_l2_n294(x)
+ end
+end
+
+def fun_l1_n794(x)
+ if (x < 1)
+ fun_l2_n837(x)
+ else
+ fun_l2_n325(x)
+ end
+end
+
+def fun_l1_n795(x)
+ if (x < 1)
+ fun_l2_n522(x)
+ else
+ fun_l2_n307(x)
+ end
+end
+
+def fun_l1_n796(x)
+ if (x < 1)
+ fun_l2_n526(x)
+ else
+ fun_l2_n957(x)
+ end
+end
+
+def fun_l1_n797(x)
+ if (x < 1)
+ fun_l2_n71(x)
+ else
+ fun_l2_n871(x)
+ end
+end
+
+def fun_l1_n798(x)
+ if (x < 1)
+ fun_l2_n386(x)
+ else
+ fun_l2_n354(x)
+ end
+end
+
+def fun_l1_n799(x)
+ if (x < 1)
+ fun_l2_n669(x)
+ else
+ fun_l2_n237(x)
+ end
+end
+
+def fun_l1_n800(x)
+ if (x < 1)
+ fun_l2_n455(x)
+ else
+ fun_l2_n549(x)
+ end
+end
+
+def fun_l1_n801(x)
+ if (x < 1)
+ fun_l2_n708(x)
+ else
+ fun_l2_n611(x)
+ end
+end
+
+def fun_l1_n802(x)
+ if (x < 1)
+ fun_l2_n35(x)
+ else
+ fun_l2_n239(x)
+ end
+end
+
+def fun_l1_n803(x)
+ if (x < 1)
+ fun_l2_n890(x)
+ else
+ fun_l2_n27(x)
+ end
+end
+
+def fun_l1_n804(x)
+ if (x < 1)
+ fun_l2_n378(x)
+ else
+ fun_l2_n567(x)
+ end
+end
+
+def fun_l1_n805(x)
+ if (x < 1)
+ fun_l2_n672(x)
+ else
+ fun_l2_n320(x)
+ end
+end
+
+def fun_l1_n806(x)
+ if (x < 1)
+ fun_l2_n207(x)
+ else
+ fun_l2_n691(x)
+ end
+end
+
+def fun_l1_n807(x)
+ if (x < 1)
+ fun_l2_n180(x)
+ else
+ fun_l2_n912(x)
+ end
+end
+
+def fun_l1_n808(x)
+ if (x < 1)
+ fun_l2_n370(x)
+ else
+ fun_l2_n529(x)
+ end
+end
+
+def fun_l1_n809(x)
+ if (x < 1)
+ fun_l2_n436(x)
+ else
+ fun_l2_n465(x)
+ end
+end
+
+def fun_l1_n810(x)
+ if (x < 1)
+ fun_l2_n820(x)
+ else
+ fun_l2_n995(x)
+ end
+end
+
+def fun_l1_n811(x)
+ if (x < 1)
+ fun_l2_n821(x)
+ else
+ fun_l2_n898(x)
+ end
+end
+
+def fun_l1_n812(x)
+ if (x < 1)
+ fun_l2_n376(x)
+ else
+ fun_l2_n999(x)
+ end
+end
+
+def fun_l1_n813(x)
+ if (x < 1)
+ fun_l2_n643(x)
+ else
+ fun_l2_n478(x)
+ end
+end
+
+def fun_l1_n814(x)
+ if (x < 1)
+ fun_l2_n475(x)
+ else
+ fun_l2_n137(x)
+ end
+end
+
+def fun_l1_n815(x)
+ if (x < 1)
+ fun_l2_n117(x)
+ else
+ fun_l2_n685(x)
+ end
+end
+
+def fun_l1_n816(x)
+ if (x < 1)
+ fun_l2_n451(x)
+ else
+ fun_l2_n643(x)
+ end
+end
+
+def fun_l1_n817(x)
+ if (x < 1)
+ fun_l2_n22(x)
+ else
+ fun_l2_n939(x)
+ end
+end
+
+def fun_l1_n818(x)
+ if (x < 1)
+ fun_l2_n586(x)
+ else
+ fun_l2_n206(x)
+ end
+end
+
+def fun_l1_n819(x)
+ if (x < 1)
+ fun_l2_n443(x)
+ else
+ fun_l2_n661(x)
+ end
+end
+
+def fun_l1_n820(x)
+ if (x < 1)
+ fun_l2_n656(x)
+ else
+ fun_l2_n93(x)
+ end
+end
+
+def fun_l1_n821(x)
+ if (x < 1)
+ fun_l2_n794(x)
+ else
+ fun_l2_n354(x)
+ end
+end
+
+def fun_l1_n822(x)
+ if (x < 1)
+ fun_l2_n519(x)
+ else
+ fun_l2_n446(x)
+ end
+end
+
+def fun_l1_n823(x)
+ if (x < 1)
+ fun_l2_n0(x)
+ else
+ fun_l2_n233(x)
+ end
+end
+
+def fun_l1_n824(x)
+ if (x < 1)
+ fun_l2_n376(x)
+ else
+ fun_l2_n186(x)
+ end
+end
+
+def fun_l1_n825(x)
+ if (x < 1)
+ fun_l2_n292(x)
+ else
+ fun_l2_n303(x)
+ end
+end
+
+def fun_l1_n826(x)
+ if (x < 1)
+ fun_l2_n871(x)
+ else
+ fun_l2_n515(x)
+ end
+end
+
+def fun_l1_n827(x)
+ if (x < 1)
+ fun_l2_n227(x)
+ else
+ fun_l2_n232(x)
+ end
+end
+
+def fun_l1_n828(x)
+ if (x < 1)
+ fun_l2_n860(x)
+ else
+ fun_l2_n341(x)
+ end
+end
+
+def fun_l1_n829(x)
+ if (x < 1)
+ fun_l2_n140(x)
+ else
+ fun_l2_n304(x)
+ end
+end
+
+def fun_l1_n830(x)
+ if (x < 1)
+ fun_l2_n796(x)
+ else
+ fun_l2_n943(x)
+ end
+end
+
+def fun_l1_n831(x)
+ if (x < 1)
+ fun_l2_n71(x)
+ else
+ fun_l2_n8(x)
+ end
+end
+
+def fun_l1_n832(x)
+ if (x < 1)
+ fun_l2_n328(x)
+ else
+ fun_l2_n836(x)
+ end
+end
+
+def fun_l1_n833(x)
+ if (x < 1)
+ fun_l2_n322(x)
+ else
+ fun_l2_n653(x)
+ end
+end
+
+def fun_l1_n834(x)
+ if (x < 1)
+ fun_l2_n471(x)
+ else
+ fun_l2_n147(x)
+ end
+end
+
+def fun_l1_n835(x)
+ if (x < 1)
+ fun_l2_n601(x)
+ else
+ fun_l2_n249(x)
+ end
+end
+
+def fun_l1_n836(x)
+ if (x < 1)
+ fun_l2_n105(x)
+ else
+ fun_l2_n63(x)
+ end
+end
+
+def fun_l1_n837(x)
+ if (x < 1)
+ fun_l2_n773(x)
+ else
+ fun_l2_n597(x)
+ end
+end
+
+def fun_l1_n838(x)
+ if (x < 1)
+ fun_l2_n933(x)
+ else
+ fun_l2_n964(x)
+ end
+end
+
+def fun_l1_n839(x)
+ if (x < 1)
+ fun_l2_n123(x)
+ else
+ fun_l2_n533(x)
+ end
+end
+
+def fun_l1_n840(x)
+ if (x < 1)
+ fun_l2_n94(x)
+ else
+ fun_l2_n127(x)
+ end
+end
+
+def fun_l1_n841(x)
+ if (x < 1)
+ fun_l2_n981(x)
+ else
+ fun_l2_n819(x)
+ end
+end
+
+def fun_l1_n842(x)
+ if (x < 1)
+ fun_l2_n743(x)
+ else
+ fun_l2_n747(x)
+ end
+end
+
+def fun_l1_n843(x)
+ if (x < 1)
+ fun_l2_n735(x)
+ else
+ fun_l2_n110(x)
+ end
+end
+
+def fun_l1_n844(x)
+ if (x < 1)
+ fun_l2_n273(x)
+ else
+ fun_l2_n753(x)
+ end
+end
+
+def fun_l1_n845(x)
+ if (x < 1)
+ fun_l2_n316(x)
+ else
+ fun_l2_n391(x)
+ end
+end
+
+def fun_l1_n846(x)
+ if (x < 1)
+ fun_l2_n221(x)
+ else
+ fun_l2_n251(x)
+ end
+end
+
+def fun_l1_n847(x)
+ if (x < 1)
+ fun_l2_n795(x)
+ else
+ fun_l2_n513(x)
+ end
+end
+
+def fun_l1_n848(x)
+ if (x < 1)
+ fun_l2_n986(x)
+ else
+ fun_l2_n472(x)
+ end
+end
+
+def fun_l1_n849(x)
+ if (x < 1)
+ fun_l2_n863(x)
+ else
+ fun_l2_n759(x)
+ end
+end
+
+def fun_l1_n850(x)
+ if (x < 1)
+ fun_l2_n512(x)
+ else
+ fun_l2_n502(x)
+ end
+end
+
+def fun_l1_n851(x)
+ if (x < 1)
+ fun_l2_n561(x)
+ else
+ fun_l2_n697(x)
+ end
+end
+
+def fun_l1_n852(x)
+ if (x < 1)
+ fun_l2_n816(x)
+ else
+ fun_l2_n90(x)
+ end
+end
+
+def fun_l1_n853(x)
+ if (x < 1)
+ fun_l2_n656(x)
+ else
+ fun_l2_n174(x)
+ end
+end
+
+def fun_l1_n854(x)
+ if (x < 1)
+ fun_l2_n439(x)
+ else
+ fun_l2_n90(x)
+ end
+end
+
+def fun_l1_n855(x)
+ if (x < 1)
+ fun_l2_n15(x)
+ else
+ fun_l2_n402(x)
+ end
+end
+
+def fun_l1_n856(x)
+ if (x < 1)
+ fun_l2_n224(x)
+ else
+ fun_l2_n177(x)
+ end
+end
+
+def fun_l1_n857(x)
+ if (x < 1)
+ fun_l2_n675(x)
+ else
+ fun_l2_n113(x)
+ end
+end
+
+def fun_l1_n858(x)
+ if (x < 1)
+ fun_l2_n388(x)
+ else
+ fun_l2_n529(x)
+ end
+end
+
+def fun_l1_n859(x)
+ if (x < 1)
+ fun_l2_n628(x)
+ else
+ fun_l2_n93(x)
+ end
+end
+
+def fun_l1_n860(x)
+ if (x < 1)
+ fun_l2_n67(x)
+ else
+ fun_l2_n90(x)
+ end
+end
+
+def fun_l1_n861(x)
+ if (x < 1)
+ fun_l2_n637(x)
+ else
+ fun_l2_n68(x)
+ end
+end
+
+def fun_l1_n862(x)
+ if (x < 1)
+ fun_l2_n767(x)
+ else
+ fun_l2_n86(x)
+ end
+end
+
+def fun_l1_n863(x)
+ if (x < 1)
+ fun_l2_n50(x)
+ else
+ fun_l2_n777(x)
+ end
+end
+
+def fun_l1_n864(x)
+ if (x < 1)
+ fun_l2_n846(x)
+ else
+ fun_l2_n783(x)
+ end
+end
+
+def fun_l1_n865(x)
+ if (x < 1)
+ fun_l2_n756(x)
+ else
+ fun_l2_n703(x)
+ end
+end
+
+def fun_l1_n866(x)
+ if (x < 1)
+ fun_l2_n972(x)
+ else
+ fun_l2_n249(x)
+ end
+end
+
+def fun_l1_n867(x)
+ if (x < 1)
+ fun_l2_n636(x)
+ else
+ fun_l2_n841(x)
+ end
+end
+
+def fun_l1_n868(x)
+ if (x < 1)
+ fun_l2_n889(x)
+ else
+ fun_l2_n899(x)
+ end
+end
+
+def fun_l1_n869(x)
+ if (x < 1)
+ fun_l2_n55(x)
+ else
+ fun_l2_n45(x)
+ end
+end
+
+def fun_l1_n870(x)
+ if (x < 1)
+ fun_l2_n740(x)
+ else
+ fun_l2_n722(x)
+ end
+end
+
+def fun_l1_n871(x)
+ if (x < 1)
+ fun_l2_n865(x)
+ else
+ fun_l2_n695(x)
+ end
+end
+
+def fun_l1_n872(x)
+ if (x < 1)
+ fun_l2_n54(x)
+ else
+ fun_l2_n581(x)
+ end
+end
+
+def fun_l1_n873(x)
+ if (x < 1)
+ fun_l2_n328(x)
+ else
+ fun_l2_n861(x)
+ end
+end
+
+def fun_l1_n874(x)
+ if (x < 1)
+ fun_l2_n645(x)
+ else
+ fun_l2_n121(x)
+ end
+end
+
+def fun_l1_n875(x)
+ if (x < 1)
+ fun_l2_n71(x)
+ else
+ fun_l2_n329(x)
+ end
+end
+
+def fun_l1_n876(x)
+ if (x < 1)
+ fun_l2_n944(x)
+ else
+ fun_l2_n95(x)
+ end
+end
+
+def fun_l1_n877(x)
+ if (x < 1)
+ fun_l2_n270(x)
+ else
+ fun_l2_n801(x)
+ end
+end
+
+def fun_l1_n878(x)
+ if (x < 1)
+ fun_l2_n115(x)
+ else
+ fun_l2_n414(x)
+ end
+end
+
+def fun_l1_n879(x)
+ if (x < 1)
+ fun_l2_n154(x)
+ else
+ fun_l2_n605(x)
+ end
+end
+
+def fun_l1_n880(x)
+ if (x < 1)
+ fun_l2_n859(x)
+ else
+ fun_l2_n307(x)
+ end
+end
+
+def fun_l1_n881(x)
+ if (x < 1)
+ fun_l2_n443(x)
+ else
+ fun_l2_n267(x)
+ end
+end
+
+def fun_l1_n882(x)
+ if (x < 1)
+ fun_l2_n280(x)
+ else
+ fun_l2_n175(x)
+ end
+end
+
+def fun_l1_n883(x)
+ if (x < 1)
+ fun_l2_n176(x)
+ else
+ fun_l2_n286(x)
+ end
+end
+
+def fun_l1_n884(x)
+ if (x < 1)
+ fun_l2_n422(x)
+ else
+ fun_l2_n469(x)
+ end
+end
+
+def fun_l1_n885(x)
+ if (x < 1)
+ fun_l2_n532(x)
+ else
+ fun_l2_n795(x)
+ end
+end
+
+def fun_l1_n886(x)
+ if (x < 1)
+ fun_l2_n966(x)
+ else
+ fun_l2_n46(x)
+ end
+end
+
+def fun_l1_n887(x)
+ if (x < 1)
+ fun_l2_n446(x)
+ else
+ fun_l2_n944(x)
+ end
+end
+
+def fun_l1_n888(x)
+ if (x < 1)
+ fun_l2_n256(x)
+ else
+ fun_l2_n757(x)
+ end
+end
+
+def fun_l1_n889(x)
+ if (x < 1)
+ fun_l2_n566(x)
+ else
+ fun_l2_n992(x)
+ end
+end
+
+def fun_l1_n890(x)
+ if (x < 1)
+ fun_l2_n55(x)
+ else
+ fun_l2_n121(x)
+ end
+end
+
+def fun_l1_n891(x)
+ if (x < 1)
+ fun_l2_n597(x)
+ else
+ fun_l2_n257(x)
+ end
+end
+
+def fun_l1_n892(x)
+ if (x < 1)
+ fun_l2_n79(x)
+ else
+ fun_l2_n249(x)
+ end
+end
+
+def fun_l1_n893(x)
+ if (x < 1)
+ fun_l2_n436(x)
+ else
+ fun_l2_n738(x)
+ end
+end
+
+def fun_l1_n894(x)
+ if (x < 1)
+ fun_l2_n244(x)
+ else
+ fun_l2_n184(x)
+ end
+end
+
+def fun_l1_n895(x)
+ if (x < 1)
+ fun_l2_n342(x)
+ else
+ fun_l2_n794(x)
+ end
+end
+
+def fun_l1_n896(x)
+ if (x < 1)
+ fun_l2_n588(x)
+ else
+ fun_l2_n235(x)
+ end
+end
+
+def fun_l1_n897(x)
+ if (x < 1)
+ fun_l2_n443(x)
+ else
+ fun_l2_n462(x)
+ end
+end
+
+def fun_l1_n898(x)
+ if (x < 1)
+ fun_l2_n986(x)
+ else
+ fun_l2_n282(x)
+ end
+end
+
+def fun_l1_n899(x)
+ if (x < 1)
+ fun_l2_n61(x)
+ else
+ fun_l2_n516(x)
+ end
+end
+
+def fun_l1_n900(x)
+ if (x < 1)
+ fun_l2_n993(x)
+ else
+ fun_l2_n337(x)
+ end
+end
+
+def fun_l1_n901(x)
+ if (x < 1)
+ fun_l2_n398(x)
+ else
+ fun_l2_n425(x)
+ end
+end
+
+def fun_l1_n902(x)
+ if (x < 1)
+ fun_l2_n177(x)
+ else
+ fun_l2_n741(x)
+ end
+end
+
+def fun_l1_n903(x)
+ if (x < 1)
+ fun_l2_n597(x)
+ else
+ fun_l2_n331(x)
+ end
+end
+
+def fun_l1_n904(x)
+ if (x < 1)
+ fun_l2_n740(x)
+ else
+ fun_l2_n527(x)
+ end
+end
+
+def fun_l1_n905(x)
+ if (x < 1)
+ fun_l2_n138(x)
+ else
+ fun_l2_n248(x)
+ end
+end
+
+def fun_l1_n906(x)
+ if (x < 1)
+ fun_l2_n318(x)
+ else
+ fun_l2_n941(x)
+ end
+end
+
+def fun_l1_n907(x)
+ if (x < 1)
+ fun_l2_n493(x)
+ else
+ fun_l2_n376(x)
+ end
+end
+
+def fun_l1_n908(x)
+ if (x < 1)
+ fun_l2_n899(x)
+ else
+ fun_l2_n745(x)
+ end
+end
+
+def fun_l1_n909(x)
+ if (x < 1)
+ fun_l2_n963(x)
+ else
+ fun_l2_n384(x)
+ end
+end
+
+def fun_l1_n910(x)
+ if (x < 1)
+ fun_l2_n490(x)
+ else
+ fun_l2_n702(x)
+ end
+end
+
+def fun_l1_n911(x)
+ if (x < 1)
+ fun_l2_n712(x)
+ else
+ fun_l2_n810(x)
+ end
+end
+
+def fun_l1_n912(x)
+ if (x < 1)
+ fun_l2_n245(x)
+ else
+ fun_l2_n609(x)
+ end
+end
+
+def fun_l1_n913(x)
+ if (x < 1)
+ fun_l2_n997(x)
+ else
+ fun_l2_n241(x)
+ end
+end
+
+def fun_l1_n914(x)
+ if (x < 1)
+ fun_l2_n428(x)
+ else
+ fun_l2_n697(x)
+ end
+end
+
+def fun_l1_n915(x)
+ if (x < 1)
+ fun_l2_n257(x)
+ else
+ fun_l2_n626(x)
+ end
+end
+
+def fun_l1_n916(x)
+ if (x < 1)
+ fun_l2_n378(x)
+ else
+ fun_l2_n457(x)
+ end
+end
+
+def fun_l1_n917(x)
+ if (x < 1)
+ fun_l2_n378(x)
+ else
+ fun_l2_n958(x)
+ end
+end
+
+def fun_l1_n918(x)
+ if (x < 1)
+ fun_l2_n213(x)
+ else
+ fun_l2_n962(x)
+ end
+end
+
+def fun_l1_n919(x)
+ if (x < 1)
+ fun_l2_n466(x)
+ else
+ fun_l2_n461(x)
+ end
+end
+
+def fun_l1_n920(x)
+ if (x < 1)
+ fun_l2_n835(x)
+ else
+ fun_l2_n460(x)
+ end
+end
+
+def fun_l1_n921(x)
+ if (x < 1)
+ fun_l2_n642(x)
+ else
+ fun_l2_n680(x)
+ end
+end
+
+def fun_l1_n922(x)
+ if (x < 1)
+ fun_l2_n467(x)
+ else
+ fun_l2_n225(x)
+ end
+end
+
+def fun_l1_n923(x)
+ if (x < 1)
+ fun_l2_n752(x)
+ else
+ fun_l2_n462(x)
+ end
+end
+
+def fun_l1_n924(x)
+ if (x < 1)
+ fun_l2_n113(x)
+ else
+ fun_l2_n59(x)
+ end
+end
+
+def fun_l1_n925(x)
+ if (x < 1)
+ fun_l2_n329(x)
+ else
+ fun_l2_n994(x)
+ end
+end
+
+def fun_l1_n926(x)
+ if (x < 1)
+ fun_l2_n815(x)
+ else
+ fun_l2_n249(x)
+ end
+end
+
+def fun_l1_n927(x)
+ if (x < 1)
+ fun_l2_n216(x)
+ else
+ fun_l2_n591(x)
+ end
+end
+
+def fun_l1_n928(x)
+ if (x < 1)
+ fun_l2_n441(x)
+ else
+ fun_l2_n437(x)
+ end
+end
+
+def fun_l1_n929(x)
+ if (x < 1)
+ fun_l2_n433(x)
+ else
+ fun_l2_n185(x)
+ end
+end
+
+def fun_l1_n930(x)
+ if (x < 1)
+ fun_l2_n125(x)
+ else
+ fun_l2_n940(x)
+ end
+end
+
+def fun_l1_n931(x)
+ if (x < 1)
+ fun_l2_n203(x)
+ else
+ fun_l2_n292(x)
+ end
+end
+
+def fun_l1_n932(x)
+ if (x < 1)
+ fun_l2_n986(x)
+ else
+ fun_l2_n234(x)
+ end
+end
+
+def fun_l1_n933(x)
+ if (x < 1)
+ fun_l2_n735(x)
+ else
+ fun_l2_n20(x)
+ end
+end
+
+def fun_l1_n934(x)
+ if (x < 1)
+ fun_l2_n878(x)
+ else
+ fun_l2_n166(x)
+ end
+end
+
+def fun_l1_n935(x)
+ if (x < 1)
+ fun_l2_n123(x)
+ else
+ fun_l2_n52(x)
+ end
+end
+
+def fun_l1_n936(x)
+ if (x < 1)
+ fun_l2_n99(x)
+ else
+ fun_l2_n336(x)
+ end
+end
+
+def fun_l1_n937(x)
+ if (x < 1)
+ fun_l2_n731(x)
+ else
+ fun_l2_n363(x)
+ end
+end
+
+def fun_l1_n938(x)
+ if (x < 1)
+ fun_l2_n988(x)
+ else
+ fun_l2_n978(x)
+ end
+end
+
+def fun_l1_n939(x)
+ if (x < 1)
+ fun_l2_n592(x)
+ else
+ fun_l2_n932(x)
+ end
+end
+
+def fun_l1_n940(x)
+ if (x < 1)
+ fun_l2_n681(x)
+ else
+ fun_l2_n868(x)
+ end
+end
+
+def fun_l1_n941(x)
+ if (x < 1)
+ fun_l2_n863(x)
+ else
+ fun_l2_n457(x)
+ end
+end
+
+def fun_l1_n942(x)
+ if (x < 1)
+ fun_l2_n526(x)
+ else
+ fun_l2_n960(x)
+ end
+end
+
+def fun_l1_n943(x)
+ if (x < 1)
+ fun_l2_n859(x)
+ else
+ fun_l2_n478(x)
+ end
+end
+
+def fun_l1_n944(x)
+ if (x < 1)
+ fun_l2_n947(x)
+ else
+ fun_l2_n889(x)
+ end
+end
+
+def fun_l1_n945(x)
+ if (x < 1)
+ fun_l2_n845(x)
+ else
+ fun_l2_n211(x)
+ end
+end
+
+def fun_l1_n946(x)
+ if (x < 1)
+ fun_l2_n445(x)
+ else
+ fun_l2_n600(x)
+ end
+end
+
+def fun_l1_n947(x)
+ if (x < 1)
+ fun_l2_n932(x)
+ else
+ fun_l2_n534(x)
+ end
+end
+
+def fun_l1_n948(x)
+ if (x < 1)
+ fun_l2_n224(x)
+ else
+ fun_l2_n32(x)
+ end
+end
+
+def fun_l1_n949(x)
+ if (x < 1)
+ fun_l2_n63(x)
+ else
+ fun_l2_n121(x)
+ end
+end
+
+def fun_l1_n950(x)
+ if (x < 1)
+ fun_l2_n55(x)
+ else
+ fun_l2_n90(x)
+ end
+end
+
+def fun_l1_n951(x)
+ if (x < 1)
+ fun_l2_n880(x)
+ else
+ fun_l2_n936(x)
+ end
+end
+
+def fun_l1_n952(x)
+ if (x < 1)
+ fun_l2_n971(x)
+ else
+ fun_l2_n40(x)
+ end
+end
+
+def fun_l1_n953(x)
+ if (x < 1)
+ fun_l2_n880(x)
+ else
+ fun_l2_n200(x)
+ end
+end
+
+def fun_l1_n954(x)
+ if (x < 1)
+ fun_l2_n60(x)
+ else
+ fun_l2_n473(x)
+ end
+end
+
+def fun_l1_n955(x)
+ if (x < 1)
+ fun_l2_n855(x)
+ else
+ fun_l2_n601(x)
+ end
+end
+
+def fun_l1_n956(x)
+ if (x < 1)
+ fun_l2_n282(x)
+ else
+ fun_l2_n542(x)
+ end
+end
+
+def fun_l1_n957(x)
+ if (x < 1)
+ fun_l2_n116(x)
+ else
+ fun_l2_n584(x)
+ end
+end
+
+def fun_l1_n958(x)
+ if (x < 1)
+ fun_l2_n845(x)
+ else
+ fun_l2_n814(x)
+ end
+end
+
+def fun_l1_n959(x)
+ if (x < 1)
+ fun_l2_n443(x)
+ else
+ fun_l2_n239(x)
+ end
+end
+
+def fun_l1_n960(x)
+ if (x < 1)
+ fun_l2_n79(x)
+ else
+ fun_l2_n556(x)
+ end
+end
+
+def fun_l1_n961(x)
+ if (x < 1)
+ fun_l2_n664(x)
+ else
+ fun_l2_n282(x)
+ end
+end
+
+def fun_l1_n962(x)
+ if (x < 1)
+ fun_l2_n273(x)
+ else
+ fun_l2_n393(x)
+ end
+end
+
+def fun_l1_n963(x)
+ if (x < 1)
+ fun_l2_n120(x)
+ else
+ fun_l2_n506(x)
+ end
+end
+
+def fun_l1_n964(x)
+ if (x < 1)
+ fun_l2_n362(x)
+ else
+ fun_l2_n631(x)
+ end
+end
+
+def fun_l1_n965(x)
+ if (x < 1)
+ fun_l2_n219(x)
+ else
+ fun_l2_n260(x)
+ end
+end
+
+def fun_l1_n966(x)
+ if (x < 1)
+ fun_l2_n802(x)
+ else
+ fun_l2_n732(x)
+ end
+end
+
+def fun_l1_n967(x)
+ if (x < 1)
+ fun_l2_n761(x)
+ else
+ fun_l2_n580(x)
+ end
+end
+
+def fun_l1_n968(x)
+ if (x < 1)
+ fun_l2_n529(x)
+ else
+ fun_l2_n783(x)
+ end
+end
+
+def fun_l1_n969(x)
+ if (x < 1)
+ fun_l2_n138(x)
+ else
+ fun_l2_n434(x)
+ end
+end
+
+def fun_l1_n970(x)
+ if (x < 1)
+ fun_l2_n266(x)
+ else
+ fun_l2_n522(x)
+ end
+end
+
+def fun_l1_n971(x)
+ if (x < 1)
+ fun_l2_n352(x)
+ else
+ fun_l2_n51(x)
+ end
+end
+
+def fun_l1_n972(x)
+ if (x < 1)
+ fun_l2_n619(x)
+ else
+ fun_l2_n68(x)
+ end
+end
+
+def fun_l1_n973(x)
+ if (x < 1)
+ fun_l2_n510(x)
+ else
+ fun_l2_n74(x)
+ end
+end
+
+def fun_l1_n974(x)
+ if (x < 1)
+ fun_l2_n442(x)
+ else
+ fun_l2_n258(x)
+ end
+end
+
+def fun_l1_n975(x)
+ if (x < 1)
+ fun_l2_n106(x)
+ else
+ fun_l2_n364(x)
+ end
+end
+
+def fun_l1_n976(x)
+ if (x < 1)
+ fun_l2_n412(x)
+ else
+ fun_l2_n669(x)
+ end
+end
+
+def fun_l1_n977(x)
+ if (x < 1)
+ fun_l2_n394(x)
+ else
+ fun_l2_n435(x)
+ end
+end
+
+def fun_l1_n978(x)
+ if (x < 1)
+ fun_l2_n630(x)
+ else
+ fun_l2_n686(x)
+ end
+end
+
+def fun_l1_n979(x)
+ if (x < 1)
+ fun_l2_n683(x)
+ else
+ fun_l2_n99(x)
+ end
+end
+
+def fun_l1_n980(x)
+ if (x < 1)
+ fun_l2_n352(x)
+ else
+ fun_l2_n394(x)
+ end
+end
+
+def fun_l1_n981(x)
+ if (x < 1)
+ fun_l2_n856(x)
+ else
+ fun_l2_n11(x)
+ end
+end
+
+def fun_l1_n982(x)
+ if (x < 1)
+ fun_l2_n484(x)
+ else
+ fun_l2_n230(x)
+ end
+end
+
+def fun_l1_n983(x)
+ if (x < 1)
+ fun_l2_n609(x)
+ else
+ fun_l2_n882(x)
+ end
+end
+
+def fun_l1_n984(x)
+ if (x < 1)
+ fun_l2_n529(x)
+ else
+ fun_l2_n589(x)
+ end
+end
+
+def fun_l1_n985(x)
+ if (x < 1)
+ fun_l2_n545(x)
+ else
+ fun_l2_n139(x)
+ end
+end
+
+def fun_l1_n986(x)
+ if (x < 1)
+ fun_l2_n679(x)
+ else
+ fun_l2_n938(x)
+ end
+end
+
+def fun_l1_n987(x)
+ if (x < 1)
+ fun_l2_n803(x)
+ else
+ fun_l2_n454(x)
+ end
+end
+
+def fun_l1_n988(x)
+ if (x < 1)
+ fun_l2_n403(x)
+ else
+ fun_l2_n976(x)
+ end
+end
+
+def fun_l1_n989(x)
+ if (x < 1)
+ fun_l2_n364(x)
+ else
+ fun_l2_n96(x)
+ end
+end
+
+def fun_l1_n990(x)
+ if (x < 1)
+ fun_l2_n48(x)
+ else
+ fun_l2_n442(x)
+ end
+end
+
+def fun_l1_n991(x)
+ if (x < 1)
+ fun_l2_n842(x)
+ else
+ fun_l2_n810(x)
+ end
+end
+
+def fun_l1_n992(x)
+ if (x < 1)
+ fun_l2_n237(x)
+ else
+ fun_l2_n145(x)
+ end
+end
+
+def fun_l1_n993(x)
+ if (x < 1)
+ fun_l2_n279(x)
+ else
+ fun_l2_n783(x)
+ end
+end
+
+def fun_l1_n994(x)
+ if (x < 1)
+ fun_l2_n323(x)
+ else
+ fun_l2_n829(x)
+ end
+end
+
+def fun_l1_n995(x)
+ if (x < 1)
+ fun_l2_n904(x)
+ else
+ fun_l2_n738(x)
+ end
+end
+
+def fun_l1_n996(x)
+ if (x < 1)
+ fun_l2_n390(x)
+ else
+ fun_l2_n169(x)
+ end
+end
+
+def fun_l1_n997(x)
+ if (x < 1)
+ fun_l2_n583(x)
+ else
+ fun_l2_n993(x)
+ end
+end
+
+def fun_l1_n998(x)
+ if (x < 1)
+ fun_l2_n752(x)
+ else
+ fun_l2_n240(x)
+ end
+end
+
+def fun_l1_n999(x)
+ if (x < 1)
+ fun_l2_n489(x)
+ else
+ fun_l2_n175(x)
+ end
+end
+
+def fun_l2_n0(x)
+ if (x < 1)
+ fun_l3_n293(x)
+ else
+ fun_l3_n569(x)
+ end
+end
+
+def fun_l2_n1(x)
+ if (x < 1)
+ fun_l3_n310(x)
+ else
+ fun_l3_n785(x)
+ end
+end
+
+def fun_l2_n2(x)
+ if (x < 1)
+ fun_l3_n484(x)
+ else
+ fun_l3_n800(x)
+ end
+end
+
+def fun_l2_n3(x)
+ if (x < 1)
+ fun_l3_n353(x)
+ else
+ fun_l3_n871(x)
+ end
+end
+
+def fun_l2_n4(x)
+ if (x < 1)
+ fun_l3_n100(x)
+ else
+ fun_l3_n796(x)
+ end
+end
+
+def fun_l2_n5(x)
+ if (x < 1)
+ fun_l3_n547(x)
+ else
+ fun_l3_n663(x)
+ end
+end
+
+def fun_l2_n6(x)
+ if (x < 1)
+ fun_l3_n756(x)
+ else
+ fun_l3_n69(x)
+ end
+end
+
+def fun_l2_n7(x)
+ if (x < 1)
+ fun_l3_n393(x)
+ else
+ fun_l3_n333(x)
+ end
+end
+
+def fun_l2_n8(x)
+ if (x < 1)
+ fun_l3_n167(x)
+ else
+ fun_l3_n47(x)
+ end
+end
+
+def fun_l2_n9(x)
+ if (x < 1)
+ fun_l3_n969(x)
+ else
+ fun_l3_n772(x)
+ end
+end
+
+def fun_l2_n10(x)
+ if (x < 1)
+ fun_l3_n667(x)
+ else
+ fun_l3_n62(x)
+ end
+end
+
+def fun_l2_n11(x)
+ if (x < 1)
+ fun_l3_n671(x)
+ else
+ fun_l3_n303(x)
+ end
+end
+
+def fun_l2_n12(x)
+ if (x < 1)
+ fun_l3_n116(x)
+ else
+ fun_l3_n537(x)
+ end
+end
+
+def fun_l2_n13(x)
+ if (x < 1)
+ fun_l3_n363(x)
+ else
+ fun_l3_n426(x)
+ end
+end
+
+def fun_l2_n14(x)
+ if (x < 1)
+ fun_l3_n23(x)
+ else
+ fun_l3_n800(x)
+ end
+end
+
+def fun_l2_n15(x)
+ if (x < 1)
+ fun_l3_n344(x)
+ else
+ fun_l3_n590(x)
+ end
+end
+
+def fun_l2_n16(x)
+ if (x < 1)
+ fun_l3_n759(x)
+ else
+ fun_l3_n531(x)
+ end
+end
+
+def fun_l2_n17(x)
+ if (x < 1)
+ fun_l3_n902(x)
+ else
+ fun_l3_n948(x)
+ end
+end
+
+def fun_l2_n18(x)
+ if (x < 1)
+ fun_l3_n407(x)
+ else
+ fun_l3_n743(x)
+ end
+end
+
+def fun_l2_n19(x)
+ if (x < 1)
+ fun_l3_n360(x)
+ else
+ fun_l3_n953(x)
+ end
+end
+
+def fun_l2_n20(x)
+ if (x < 1)
+ fun_l3_n968(x)
+ else
+ fun_l3_n685(x)
+ end
+end
+
+def fun_l2_n21(x)
+ if (x < 1)
+ fun_l3_n964(x)
+ else
+ fun_l3_n210(x)
+ end
+end
+
+def fun_l2_n22(x)
+ if (x < 1)
+ fun_l3_n346(x)
+ else
+ fun_l3_n902(x)
+ end
+end
+
+def fun_l2_n23(x)
+ if (x < 1)
+ fun_l3_n154(x)
+ else
+ fun_l3_n52(x)
+ end
+end
+
+def fun_l2_n24(x)
+ if (x < 1)
+ fun_l3_n229(x)
+ else
+ fun_l3_n137(x)
+ end
+end
+
+def fun_l2_n25(x)
+ if (x < 1)
+ fun_l3_n390(x)
+ else
+ fun_l3_n445(x)
+ end
+end
+
+def fun_l2_n26(x)
+ if (x < 1)
+ fun_l3_n11(x)
+ else
+ fun_l3_n113(x)
+ end
+end
+
+def fun_l2_n27(x)
+ if (x < 1)
+ fun_l3_n756(x)
+ else
+ fun_l3_n935(x)
+ end
+end
+
+def fun_l2_n28(x)
+ if (x < 1)
+ fun_l3_n567(x)
+ else
+ fun_l3_n984(x)
+ end
+end
+
+def fun_l2_n29(x)
+ if (x < 1)
+ fun_l3_n982(x)
+ else
+ fun_l3_n335(x)
+ end
+end
+
+def fun_l2_n30(x)
+ if (x < 1)
+ fun_l3_n78(x)
+ else
+ fun_l3_n571(x)
+ end
+end
+
+def fun_l2_n31(x)
+ if (x < 1)
+ fun_l3_n93(x)
+ else
+ fun_l3_n455(x)
+ end
+end
+
+def fun_l2_n32(x)
+ if (x < 1)
+ fun_l3_n118(x)
+ else
+ fun_l3_n713(x)
+ end
+end
+
+def fun_l2_n33(x)
+ if (x < 1)
+ fun_l3_n561(x)
+ else
+ fun_l3_n0(x)
+ end
+end
+
+def fun_l2_n34(x)
+ if (x < 1)
+ fun_l3_n153(x)
+ else
+ fun_l3_n77(x)
+ end
+end
+
+def fun_l2_n35(x)
+ if (x < 1)
+ fun_l3_n815(x)
+ else
+ fun_l3_n642(x)
+ end
+end
+
+def fun_l2_n36(x)
+ if (x < 1)
+ fun_l3_n503(x)
+ else
+ fun_l3_n428(x)
+ end
+end
+
+def fun_l2_n37(x)
+ if (x < 1)
+ fun_l3_n780(x)
+ else
+ fun_l3_n301(x)
+ end
+end
+
+def fun_l2_n38(x)
+ if (x < 1)
+ fun_l3_n827(x)
+ else
+ fun_l3_n444(x)
+ end
+end
+
+def fun_l2_n39(x)
+ if (x < 1)
+ fun_l3_n808(x)
+ else
+ fun_l3_n76(x)
+ end
+end
+
+def fun_l2_n40(x)
+ if (x < 1)
+ fun_l3_n216(x)
+ else
+ fun_l3_n943(x)
+ end
+end
+
+def fun_l2_n41(x)
+ if (x < 1)
+ fun_l3_n237(x)
+ else
+ fun_l3_n935(x)
+ end
+end
+
+def fun_l2_n42(x)
+ if (x < 1)
+ fun_l3_n769(x)
+ else
+ fun_l3_n564(x)
+ end
+end
+
+def fun_l2_n43(x)
+ if (x < 1)
+ fun_l3_n108(x)
+ else
+ fun_l3_n180(x)
+ end
+end
+
+def fun_l2_n44(x)
+ if (x < 1)
+ fun_l3_n750(x)
+ else
+ fun_l3_n614(x)
+ end
+end
+
+def fun_l2_n45(x)
+ if (x < 1)
+ fun_l3_n497(x)
+ else
+ fun_l3_n584(x)
+ end
+end
+
+def fun_l2_n46(x)
+ if (x < 1)
+ fun_l3_n364(x)
+ else
+ fun_l3_n218(x)
+ end
+end
+
+def fun_l2_n47(x)
+ if (x < 1)
+ fun_l3_n938(x)
+ else
+ fun_l3_n707(x)
+ end
+end
+
+def fun_l2_n48(x)
+ if (x < 1)
+ fun_l3_n14(x)
+ else
+ fun_l3_n178(x)
+ end
+end
+
+def fun_l2_n49(x)
+ if (x < 1)
+ fun_l3_n923(x)
+ else
+ fun_l3_n493(x)
+ end
+end
+
+def fun_l2_n50(x)
+ if (x < 1)
+ fun_l3_n481(x)
+ else
+ fun_l3_n193(x)
+ end
+end
+
+def fun_l2_n51(x)
+ if (x < 1)
+ fun_l3_n876(x)
+ else
+ fun_l3_n59(x)
+ end
+end
+
+def fun_l2_n52(x)
+ if (x < 1)
+ fun_l3_n930(x)
+ else
+ fun_l3_n813(x)
+ end
+end
+
+def fun_l2_n53(x)
+ if (x < 1)
+ fun_l3_n369(x)
+ else
+ fun_l3_n972(x)
+ end
+end
+
+def fun_l2_n54(x)
+ if (x < 1)
+ fun_l3_n95(x)
+ else
+ fun_l3_n573(x)
+ end
+end
+
+def fun_l2_n55(x)
+ if (x < 1)
+ fun_l3_n148(x)
+ else
+ fun_l3_n369(x)
+ end
+end
+
+def fun_l2_n56(x)
+ if (x < 1)
+ fun_l3_n476(x)
+ else
+ fun_l3_n54(x)
+ end
+end
+
+def fun_l2_n57(x)
+ if (x < 1)
+ fun_l3_n672(x)
+ else
+ fun_l3_n592(x)
+ end
+end
+
+def fun_l2_n58(x)
+ if (x < 1)
+ fun_l3_n648(x)
+ else
+ fun_l3_n169(x)
+ end
+end
+
+def fun_l2_n59(x)
+ if (x < 1)
+ fun_l3_n844(x)
+ else
+ fun_l3_n422(x)
+ end
+end
+
+def fun_l2_n60(x)
+ if (x < 1)
+ fun_l3_n6(x)
+ else
+ fun_l3_n763(x)
+ end
+end
+
+def fun_l2_n61(x)
+ if (x < 1)
+ fun_l3_n35(x)
+ else
+ fun_l3_n316(x)
+ end
+end
+
+def fun_l2_n62(x)
+ if (x < 1)
+ fun_l3_n487(x)
+ else
+ fun_l3_n469(x)
+ end
+end
+
+def fun_l2_n63(x)
+ if (x < 1)
+ fun_l3_n272(x)
+ else
+ fun_l3_n909(x)
+ end
+end
+
+def fun_l2_n64(x)
+ if (x < 1)
+ fun_l3_n266(x)
+ else
+ fun_l3_n347(x)
+ end
+end
+
+def fun_l2_n65(x)
+ if (x < 1)
+ fun_l3_n696(x)
+ else
+ fun_l3_n499(x)
+ end
+end
+
+def fun_l2_n66(x)
+ if (x < 1)
+ fun_l3_n523(x)
+ else
+ fun_l3_n834(x)
+ end
+end
+
+def fun_l2_n67(x)
+ if (x < 1)
+ fun_l3_n966(x)
+ else
+ fun_l3_n454(x)
+ end
+end
+
+def fun_l2_n68(x)
+ if (x < 1)
+ fun_l3_n406(x)
+ else
+ fun_l3_n644(x)
+ end
+end
+
+def fun_l2_n69(x)
+ if (x < 1)
+ fun_l3_n616(x)
+ else
+ fun_l3_n90(x)
+ end
+end
+
+def fun_l2_n70(x)
+ if (x < 1)
+ fun_l3_n158(x)
+ else
+ fun_l3_n910(x)
+ end
+end
+
+def fun_l2_n71(x)
+ if (x < 1)
+ fun_l3_n391(x)
+ else
+ fun_l3_n761(x)
+ end
+end
+
+def fun_l2_n72(x)
+ if (x < 1)
+ fun_l3_n585(x)
+ else
+ fun_l3_n405(x)
+ end
+end
+
+def fun_l2_n73(x)
+ if (x < 1)
+ fun_l3_n227(x)
+ else
+ fun_l3_n583(x)
+ end
+end
+
+def fun_l2_n74(x)
+ if (x < 1)
+ fun_l3_n580(x)
+ else
+ fun_l3_n110(x)
+ end
+end
+
+def fun_l2_n75(x)
+ if (x < 1)
+ fun_l3_n393(x)
+ else
+ fun_l3_n643(x)
+ end
+end
+
+def fun_l2_n76(x)
+ if (x < 1)
+ fun_l3_n935(x)
+ else
+ fun_l3_n593(x)
+ end
+end
+
+def fun_l2_n77(x)
+ if (x < 1)
+ fun_l3_n239(x)
+ else
+ fun_l3_n704(x)
+ end
+end
+
+def fun_l2_n78(x)
+ if (x < 1)
+ fun_l3_n192(x)
+ else
+ fun_l3_n722(x)
+ end
+end
+
+def fun_l2_n79(x)
+ if (x < 1)
+ fun_l3_n119(x)
+ else
+ fun_l3_n426(x)
+ end
+end
+
+def fun_l2_n80(x)
+ if (x < 1)
+ fun_l3_n692(x)
+ else
+ fun_l3_n895(x)
+ end
+end
+
+def fun_l2_n81(x)
+ if (x < 1)
+ fun_l3_n246(x)
+ else
+ fun_l3_n340(x)
+ end
+end
+
+def fun_l2_n82(x)
+ if (x < 1)
+ fun_l3_n368(x)
+ else
+ fun_l3_n255(x)
+ end
+end
+
+def fun_l2_n83(x)
+ if (x < 1)
+ fun_l3_n942(x)
+ else
+ fun_l3_n595(x)
+ end
+end
+
+def fun_l2_n84(x)
+ if (x < 1)
+ fun_l3_n629(x)
+ else
+ fun_l3_n622(x)
+ end
+end
+
+def fun_l2_n85(x)
+ if (x < 1)
+ fun_l3_n415(x)
+ else
+ fun_l3_n615(x)
+ end
+end
+
+def fun_l2_n86(x)
+ if (x < 1)
+ fun_l3_n295(x)
+ else
+ fun_l3_n809(x)
+ end
+end
+
+def fun_l2_n87(x)
+ if (x < 1)
+ fun_l3_n957(x)
+ else
+ fun_l3_n452(x)
+ end
+end
+
+def fun_l2_n88(x)
+ if (x < 1)
+ fun_l3_n490(x)
+ else
+ fun_l3_n184(x)
+ end
+end
+
+def fun_l2_n89(x)
+ if (x < 1)
+ fun_l3_n483(x)
+ else
+ fun_l3_n963(x)
+ end
+end
+
+def fun_l2_n90(x)
+ if (x < 1)
+ fun_l3_n262(x)
+ else
+ fun_l3_n458(x)
+ end
+end
+
+def fun_l2_n91(x)
+ if (x < 1)
+ fun_l3_n277(x)
+ else
+ fun_l3_n30(x)
+ end
+end
+
+def fun_l2_n92(x)
+ if (x < 1)
+ fun_l3_n310(x)
+ else
+ fun_l3_n215(x)
+ end
+end
+
+def fun_l2_n93(x)
+ if (x < 1)
+ fun_l3_n660(x)
+ else
+ fun_l3_n798(x)
+ end
+end
+
+def fun_l2_n94(x)
+ if (x < 1)
+ fun_l3_n761(x)
+ else
+ fun_l3_n10(x)
+ end
+end
+
+def fun_l2_n95(x)
+ if (x < 1)
+ fun_l3_n4(x)
+ else
+ fun_l3_n65(x)
+ end
+end
+
+def fun_l2_n96(x)
+ if (x < 1)
+ fun_l3_n444(x)
+ else
+ fun_l3_n378(x)
+ end
+end
+
+def fun_l2_n97(x)
+ if (x < 1)
+ fun_l3_n119(x)
+ else
+ fun_l3_n96(x)
+ end
+end
+
+def fun_l2_n98(x)
+ if (x < 1)
+ fun_l3_n970(x)
+ else
+ fun_l3_n471(x)
+ end
+end
+
+def fun_l2_n99(x)
+ if (x < 1)
+ fun_l3_n544(x)
+ else
+ fun_l3_n1(x)
+ end
+end
+
+def fun_l2_n100(x)
+ if (x < 1)
+ fun_l3_n253(x)
+ else
+ fun_l3_n332(x)
+ end
+end
+
+def fun_l2_n101(x)
+ if (x < 1)
+ fun_l3_n69(x)
+ else
+ fun_l3_n946(x)
+ end
+end
+
+def fun_l2_n102(x)
+ if (x < 1)
+ fun_l3_n458(x)
+ else
+ fun_l3_n885(x)
+ end
+end
+
+def fun_l2_n103(x)
+ if (x < 1)
+ fun_l3_n732(x)
+ else
+ fun_l3_n601(x)
+ end
+end
+
+def fun_l2_n104(x)
+ if (x < 1)
+ fun_l3_n737(x)
+ else
+ fun_l3_n530(x)
+ end
+end
+
+def fun_l2_n105(x)
+ if (x < 1)
+ fun_l3_n125(x)
+ else
+ fun_l3_n561(x)
+ end
+end
+
+def fun_l2_n106(x)
+ if (x < 1)
+ fun_l3_n717(x)
+ else
+ fun_l3_n660(x)
+ end
+end
+
+def fun_l2_n107(x)
+ if (x < 1)
+ fun_l3_n863(x)
+ else
+ fun_l3_n211(x)
+ end
+end
+
+def fun_l2_n108(x)
+ if (x < 1)
+ fun_l3_n221(x)
+ else
+ fun_l3_n718(x)
+ end
+end
+
+def fun_l2_n109(x)
+ if (x < 1)
+ fun_l3_n100(x)
+ else
+ fun_l3_n673(x)
+ end
+end
+
+def fun_l2_n110(x)
+ if (x < 1)
+ fun_l3_n434(x)
+ else
+ fun_l3_n348(x)
+ end
+end
+
+def fun_l2_n111(x)
+ if (x < 1)
+ fun_l3_n923(x)
+ else
+ fun_l3_n413(x)
+ end
+end
+
+def fun_l2_n112(x)
+ if (x < 1)
+ fun_l3_n253(x)
+ else
+ fun_l3_n461(x)
+ end
+end
+
+def fun_l2_n113(x)
+ if (x < 1)
+ fun_l3_n946(x)
+ else
+ fun_l3_n406(x)
+ end
+end
+
+def fun_l2_n114(x)
+ if (x < 1)
+ fun_l3_n976(x)
+ else
+ fun_l3_n354(x)
+ end
+end
+
+def fun_l2_n115(x)
+ if (x < 1)
+ fun_l3_n205(x)
+ else
+ fun_l3_n798(x)
+ end
+end
+
+def fun_l2_n116(x)
+ if (x < 1)
+ fun_l3_n304(x)
+ else
+ fun_l3_n145(x)
+ end
+end
+
+def fun_l2_n117(x)
+ if (x < 1)
+ fun_l3_n917(x)
+ else
+ fun_l3_n276(x)
+ end
+end
+
+def fun_l2_n118(x)
+ if (x < 1)
+ fun_l3_n880(x)
+ else
+ fun_l3_n532(x)
+ end
+end
+
+def fun_l2_n119(x)
+ if (x < 1)
+ fun_l3_n39(x)
+ else
+ fun_l3_n549(x)
+ end
+end
+
+def fun_l2_n120(x)
+ if (x < 1)
+ fun_l3_n325(x)
+ else
+ fun_l3_n250(x)
+ end
+end
+
+def fun_l2_n121(x)
+ if (x < 1)
+ fun_l3_n953(x)
+ else
+ fun_l3_n259(x)
+ end
+end
+
+def fun_l2_n122(x)
+ if (x < 1)
+ fun_l3_n572(x)
+ else
+ fun_l3_n747(x)
+ end
+end
+
+def fun_l2_n123(x)
+ if (x < 1)
+ fun_l3_n639(x)
+ else
+ fun_l3_n810(x)
+ end
+end
+
+def fun_l2_n124(x)
+ if (x < 1)
+ fun_l3_n954(x)
+ else
+ fun_l3_n814(x)
+ end
+end
+
+def fun_l2_n125(x)
+ if (x < 1)
+ fun_l3_n932(x)
+ else
+ fun_l3_n275(x)
+ end
+end
+
+def fun_l2_n126(x)
+ if (x < 1)
+ fun_l3_n472(x)
+ else
+ fun_l3_n184(x)
+ end
+end
+
+def fun_l2_n127(x)
+ if (x < 1)
+ fun_l3_n725(x)
+ else
+ fun_l3_n497(x)
+ end
+end
+
+def fun_l2_n128(x)
+ if (x < 1)
+ fun_l3_n94(x)
+ else
+ fun_l3_n801(x)
+ end
+end
+
+def fun_l2_n129(x)
+ if (x < 1)
+ fun_l3_n476(x)
+ else
+ fun_l3_n936(x)
+ end
+end
+
+def fun_l2_n130(x)
+ if (x < 1)
+ fun_l3_n706(x)
+ else
+ fun_l3_n738(x)
+ end
+end
+
+def fun_l2_n131(x)
+ if (x < 1)
+ fun_l3_n535(x)
+ else
+ fun_l3_n401(x)
+ end
+end
+
+def fun_l2_n132(x)
+ if (x < 1)
+ fun_l3_n459(x)
+ else
+ fun_l3_n998(x)
+ end
+end
+
+def fun_l2_n133(x)
+ if (x < 1)
+ fun_l3_n158(x)
+ else
+ fun_l3_n288(x)
+ end
+end
+
+def fun_l2_n134(x)
+ if (x < 1)
+ fun_l3_n47(x)
+ else
+ fun_l3_n104(x)
+ end
+end
+
+def fun_l2_n135(x)
+ if (x < 1)
+ fun_l3_n154(x)
+ else
+ fun_l3_n239(x)
+ end
+end
+
+def fun_l2_n136(x)
+ if (x < 1)
+ fun_l3_n634(x)
+ else
+ fun_l3_n718(x)
+ end
+end
+
+def fun_l2_n137(x)
+ if (x < 1)
+ fun_l3_n867(x)
+ else
+ fun_l3_n578(x)
+ end
+end
+
+def fun_l2_n138(x)
+ if (x < 1)
+ fun_l3_n905(x)
+ else
+ fun_l3_n434(x)
+ end
+end
+
+def fun_l2_n139(x)
+ if (x < 1)
+ fun_l3_n596(x)
+ else
+ fun_l3_n516(x)
+ end
+end
+
+def fun_l2_n140(x)
+ if (x < 1)
+ fun_l3_n150(x)
+ else
+ fun_l3_n901(x)
+ end
+end
+
+def fun_l2_n141(x)
+ if (x < 1)
+ fun_l3_n792(x)
+ else
+ fun_l3_n50(x)
+ end
+end
+
+def fun_l2_n142(x)
+ if (x < 1)
+ fun_l3_n539(x)
+ else
+ fun_l3_n206(x)
+ end
+end
+
+def fun_l2_n143(x)
+ if (x < 1)
+ fun_l3_n690(x)
+ else
+ fun_l3_n788(x)
+ end
+end
+
+def fun_l2_n144(x)
+ if (x < 1)
+ fun_l3_n535(x)
+ else
+ fun_l3_n868(x)
+ end
+end
+
+def fun_l2_n145(x)
+ if (x < 1)
+ fun_l3_n869(x)
+ else
+ fun_l3_n865(x)
+ end
+end
+
+def fun_l2_n146(x)
+ if (x < 1)
+ fun_l3_n574(x)
+ else
+ fun_l3_n798(x)
+ end
+end
+
+def fun_l2_n147(x)
+ if (x < 1)
+ fun_l3_n876(x)
+ else
+ fun_l3_n658(x)
+ end
+end
+
+def fun_l2_n148(x)
+ if (x < 1)
+ fun_l3_n582(x)
+ else
+ fun_l3_n694(x)
+ end
+end
+
+def fun_l2_n149(x)
+ if (x < 1)
+ fun_l3_n137(x)
+ else
+ fun_l3_n187(x)
+ end
+end
+
+def fun_l2_n150(x)
+ if (x < 1)
+ fun_l3_n30(x)
+ else
+ fun_l3_n666(x)
+ end
+end
+
+def fun_l2_n151(x)
+ if (x < 1)
+ fun_l3_n264(x)
+ else
+ fun_l3_n236(x)
+ end
+end
+
+def fun_l2_n152(x)
+ if (x < 1)
+ fun_l3_n984(x)
+ else
+ fun_l3_n469(x)
+ end
+end
+
+def fun_l2_n153(x)
+ if (x < 1)
+ fun_l3_n152(x)
+ else
+ fun_l3_n578(x)
+ end
+end
+
+def fun_l2_n154(x)
+ if (x < 1)
+ fun_l3_n452(x)
+ else
+ fun_l3_n807(x)
+ end
+end
+
+def fun_l2_n155(x)
+ if (x < 1)
+ fun_l3_n82(x)
+ else
+ fun_l3_n589(x)
+ end
+end
+
+def fun_l2_n156(x)
+ if (x < 1)
+ fun_l3_n356(x)
+ else
+ fun_l3_n454(x)
+ end
+end
+
+def fun_l2_n157(x)
+ if (x < 1)
+ fun_l3_n814(x)
+ else
+ fun_l3_n376(x)
+ end
+end
+
+def fun_l2_n158(x)
+ if (x < 1)
+ fun_l3_n755(x)
+ else
+ fun_l3_n966(x)
+ end
+end
+
+def fun_l2_n159(x)
+ if (x < 1)
+ fun_l3_n167(x)
+ else
+ fun_l3_n571(x)
+ end
+end
+
+def fun_l2_n160(x)
+ if (x < 1)
+ fun_l3_n919(x)
+ else
+ fun_l3_n771(x)
+ end
+end
+
+def fun_l2_n161(x)
+ if (x < 1)
+ fun_l3_n147(x)
+ else
+ fun_l3_n936(x)
+ end
+end
+
+def fun_l2_n162(x)
+ if (x < 1)
+ fun_l3_n638(x)
+ else
+ fun_l3_n23(x)
+ end
+end
+
+def fun_l2_n163(x)
+ if (x < 1)
+ fun_l3_n349(x)
+ else
+ fun_l3_n46(x)
+ end
+end
+
+def fun_l2_n164(x)
+ if (x < 1)
+ fun_l3_n55(x)
+ else
+ fun_l3_n795(x)
+ end
+end
+
+def fun_l2_n165(x)
+ if (x < 1)
+ fun_l3_n131(x)
+ else
+ fun_l3_n233(x)
+ end
+end
+
+def fun_l2_n166(x)
+ if (x < 1)
+ fun_l3_n324(x)
+ else
+ fun_l3_n215(x)
+ end
+end
+
+def fun_l2_n167(x)
+ if (x < 1)
+ fun_l3_n244(x)
+ else
+ fun_l3_n641(x)
+ end
+end
+
+def fun_l2_n168(x)
+ if (x < 1)
+ fun_l3_n744(x)
+ else
+ fun_l3_n337(x)
+ end
+end
+
+def fun_l2_n169(x)
+ if (x < 1)
+ fun_l3_n760(x)
+ else
+ fun_l3_n82(x)
+ end
+end
+
+def fun_l2_n170(x)
+ if (x < 1)
+ fun_l3_n877(x)
+ else
+ fun_l3_n566(x)
+ end
+end
+
+def fun_l2_n171(x)
+ if (x < 1)
+ fun_l3_n211(x)
+ else
+ fun_l3_n101(x)
+ end
+end
+
+def fun_l2_n172(x)
+ if (x < 1)
+ fun_l3_n143(x)
+ else
+ fun_l3_n537(x)
+ end
+end
+
+def fun_l2_n173(x)
+ if (x < 1)
+ fun_l3_n210(x)
+ else
+ fun_l3_n150(x)
+ end
+end
+
+def fun_l2_n174(x)
+ if (x < 1)
+ fun_l3_n772(x)
+ else
+ fun_l3_n42(x)
+ end
+end
+
+def fun_l2_n175(x)
+ if (x < 1)
+ fun_l3_n482(x)
+ else
+ fun_l3_n606(x)
+ end
+end
+
+def fun_l2_n176(x)
+ if (x < 1)
+ fun_l3_n68(x)
+ else
+ fun_l3_n888(x)
+ end
+end
+
+def fun_l2_n177(x)
+ if (x < 1)
+ fun_l3_n313(x)
+ else
+ fun_l3_n947(x)
+ end
+end
+
+def fun_l2_n178(x)
+ if (x < 1)
+ fun_l3_n116(x)
+ else
+ fun_l3_n812(x)
+ end
+end
+
+def fun_l2_n179(x)
+ if (x < 1)
+ fun_l3_n741(x)
+ else
+ fun_l3_n994(x)
+ end
+end
+
+def fun_l2_n180(x)
+ if (x < 1)
+ fun_l3_n667(x)
+ else
+ fun_l3_n335(x)
+ end
+end
+
+def fun_l2_n181(x)
+ if (x < 1)
+ fun_l3_n994(x)
+ else
+ fun_l3_n502(x)
+ end
+end
+
+def fun_l2_n182(x)
+ if (x < 1)
+ fun_l3_n528(x)
+ else
+ fun_l3_n272(x)
+ end
+end
+
+def fun_l2_n183(x)
+ if (x < 1)
+ fun_l3_n649(x)
+ else
+ fun_l3_n200(x)
+ end
+end
+
+def fun_l2_n184(x)
+ if (x < 1)
+ fun_l3_n105(x)
+ else
+ fun_l3_n897(x)
+ end
+end
+
+def fun_l2_n185(x)
+ if (x < 1)
+ fun_l3_n980(x)
+ else
+ fun_l3_n651(x)
+ end
+end
+
+def fun_l2_n186(x)
+ if (x < 1)
+ fun_l3_n835(x)
+ else
+ fun_l3_n388(x)
+ end
+end
+
+def fun_l2_n187(x)
+ if (x < 1)
+ fun_l3_n769(x)
+ else
+ fun_l3_n162(x)
+ end
+end
+
+def fun_l2_n188(x)
+ if (x < 1)
+ fun_l3_n934(x)
+ else
+ fun_l3_n874(x)
+ end
+end
+
+def fun_l2_n189(x)
+ if (x < 1)
+ fun_l3_n407(x)
+ else
+ fun_l3_n180(x)
+ end
+end
+
+def fun_l2_n190(x)
+ if (x < 1)
+ fun_l3_n594(x)
+ else
+ fun_l3_n929(x)
+ end
+end
+
+def fun_l2_n191(x)
+ if (x < 1)
+ fun_l3_n303(x)
+ else
+ fun_l3_n528(x)
+ end
+end
+
+def fun_l2_n192(x)
+ if (x < 1)
+ fun_l3_n768(x)
+ else
+ fun_l3_n226(x)
+ end
+end
+
+def fun_l2_n193(x)
+ if (x < 1)
+ fun_l3_n618(x)
+ else
+ fun_l3_n821(x)
+ end
+end
+
+def fun_l2_n194(x)
+ if (x < 1)
+ fun_l3_n953(x)
+ else
+ fun_l3_n969(x)
+ end
+end
+
+def fun_l2_n195(x)
+ if (x < 1)
+ fun_l3_n574(x)
+ else
+ fun_l3_n344(x)
+ end
+end
+
+def fun_l2_n196(x)
+ if (x < 1)
+ fun_l3_n943(x)
+ else
+ fun_l3_n304(x)
+ end
+end
+
+def fun_l2_n197(x)
+ if (x < 1)
+ fun_l3_n276(x)
+ else
+ fun_l3_n502(x)
+ end
+end
+
+def fun_l2_n198(x)
+ if (x < 1)
+ fun_l3_n597(x)
+ else
+ fun_l3_n197(x)
+ end
+end
+
+def fun_l2_n199(x)
+ if (x < 1)
+ fun_l3_n517(x)
+ else
+ fun_l3_n34(x)
+ end
+end
+
+def fun_l2_n200(x)
+ if (x < 1)
+ fun_l3_n980(x)
+ else
+ fun_l3_n672(x)
+ end
+end
+
+def fun_l2_n201(x)
+ if (x < 1)
+ fun_l3_n510(x)
+ else
+ fun_l3_n804(x)
+ end
+end
+
+def fun_l2_n202(x)
+ if (x < 1)
+ fun_l3_n735(x)
+ else
+ fun_l3_n367(x)
+ end
+end
+
+def fun_l2_n203(x)
+ if (x < 1)
+ fun_l3_n546(x)
+ else
+ fun_l3_n502(x)
+ end
+end
+
+def fun_l2_n204(x)
+ if (x < 1)
+ fun_l3_n189(x)
+ else
+ fun_l3_n639(x)
+ end
+end
+
+def fun_l2_n205(x)
+ if (x < 1)
+ fun_l3_n396(x)
+ else
+ fun_l3_n862(x)
+ end
+end
+
+def fun_l2_n206(x)
+ if (x < 1)
+ fun_l3_n679(x)
+ else
+ fun_l3_n881(x)
+ end
+end
+
+def fun_l2_n207(x)
+ if (x < 1)
+ fun_l3_n342(x)
+ else
+ fun_l3_n522(x)
+ end
+end
+
+def fun_l2_n208(x)
+ if (x < 1)
+ fun_l3_n173(x)
+ else
+ fun_l3_n323(x)
+ end
+end
+
+def fun_l2_n209(x)
+ if (x < 1)
+ fun_l3_n693(x)
+ else
+ fun_l3_n160(x)
+ end
+end
+
+def fun_l2_n210(x)
+ if (x < 1)
+ fun_l3_n827(x)
+ else
+ fun_l3_n580(x)
+ end
+end
+
+def fun_l2_n211(x)
+ if (x < 1)
+ fun_l3_n213(x)
+ else
+ fun_l3_n100(x)
+ end
+end
+
+def fun_l2_n212(x)
+ if (x < 1)
+ fun_l3_n915(x)
+ else
+ fun_l3_n531(x)
+ end
+end
+
+def fun_l2_n213(x)
+ if (x < 1)
+ fun_l3_n260(x)
+ else
+ fun_l3_n279(x)
+ end
+end
+
+def fun_l2_n214(x)
+ if (x < 1)
+ fun_l3_n468(x)
+ else
+ fun_l3_n257(x)
+ end
+end
+
+def fun_l2_n215(x)
+ if (x < 1)
+ fun_l3_n990(x)
+ else
+ fun_l3_n740(x)
+ end
+end
+
+def fun_l2_n216(x)
+ if (x < 1)
+ fun_l3_n291(x)
+ else
+ fun_l3_n625(x)
+ end
+end
+
+def fun_l2_n217(x)
+ if (x < 1)
+ fun_l3_n493(x)
+ else
+ fun_l3_n726(x)
+ end
+end
+
+def fun_l2_n218(x)
+ if (x < 1)
+ fun_l3_n714(x)
+ else
+ fun_l3_n793(x)
+ end
+end
+
+def fun_l2_n219(x)
+ if (x < 1)
+ fun_l3_n683(x)
+ else
+ fun_l3_n715(x)
+ end
+end
+
+def fun_l2_n220(x)
+ if (x < 1)
+ fun_l3_n249(x)
+ else
+ fun_l3_n266(x)
+ end
+end
+
+def fun_l2_n221(x)
+ if (x < 1)
+ fun_l3_n324(x)
+ else
+ fun_l3_n22(x)
+ end
+end
+
+def fun_l2_n222(x)
+ if (x < 1)
+ fun_l3_n978(x)
+ else
+ fun_l3_n885(x)
+ end
+end
+
+def fun_l2_n223(x)
+ if (x < 1)
+ fun_l3_n821(x)
+ else
+ fun_l3_n412(x)
+ end
+end
+
+def fun_l2_n224(x)
+ if (x < 1)
+ fun_l3_n636(x)
+ else
+ fun_l3_n641(x)
+ end
+end
+
+def fun_l2_n225(x)
+ if (x < 1)
+ fun_l3_n971(x)
+ else
+ fun_l3_n288(x)
+ end
+end
+
+def fun_l2_n226(x)
+ if (x < 1)
+ fun_l3_n429(x)
+ else
+ fun_l3_n323(x)
+ end
+end
+
+def fun_l2_n227(x)
+ if (x < 1)
+ fun_l3_n909(x)
+ else
+ fun_l3_n267(x)
+ end
+end
+
+def fun_l2_n228(x)
+ if (x < 1)
+ fun_l3_n437(x)
+ else
+ fun_l3_n985(x)
+ end
+end
+
+def fun_l2_n229(x)
+ if (x < 1)
+ fun_l3_n131(x)
+ else
+ fun_l3_n298(x)
+ end
+end
+
+def fun_l2_n230(x)
+ if (x < 1)
+ fun_l3_n865(x)
+ else
+ fun_l3_n288(x)
+ end
+end
+
+def fun_l2_n231(x)
+ if (x < 1)
+ fun_l3_n772(x)
+ else
+ fun_l3_n531(x)
+ end
+end
+
+def fun_l2_n232(x)
+ if (x < 1)
+ fun_l3_n30(x)
+ else
+ fun_l3_n422(x)
+ end
+end
+
+def fun_l2_n233(x)
+ if (x < 1)
+ fun_l3_n82(x)
+ else
+ fun_l3_n32(x)
+ end
+end
+
+def fun_l2_n234(x)
+ if (x < 1)
+ fun_l3_n25(x)
+ else
+ fun_l3_n518(x)
+ end
+end
+
+def fun_l2_n235(x)
+ if (x < 1)
+ fun_l3_n313(x)
+ else
+ fun_l3_n179(x)
+ end
+end
+
+def fun_l2_n236(x)
+ if (x < 1)
+ fun_l3_n819(x)
+ else
+ fun_l3_n586(x)
+ end
+end
+
+def fun_l2_n237(x)
+ if (x < 1)
+ fun_l3_n398(x)
+ else
+ fun_l3_n921(x)
+ end
+end
+
+def fun_l2_n238(x)
+ if (x < 1)
+ fun_l3_n667(x)
+ else
+ fun_l3_n795(x)
+ end
+end
+
+def fun_l2_n239(x)
+ if (x < 1)
+ fun_l3_n862(x)
+ else
+ fun_l3_n222(x)
+ end
+end
+
+def fun_l2_n240(x)
+ if (x < 1)
+ fun_l3_n968(x)
+ else
+ fun_l3_n320(x)
+ end
+end
+
+def fun_l2_n241(x)
+ if (x < 1)
+ fun_l3_n559(x)
+ else
+ fun_l3_n50(x)
+ end
+end
+
+def fun_l2_n242(x)
+ if (x < 1)
+ fun_l3_n647(x)
+ else
+ fun_l3_n232(x)
+ end
+end
+
+def fun_l2_n243(x)
+ if (x < 1)
+ fun_l3_n425(x)
+ else
+ fun_l3_n815(x)
+ end
+end
+
+def fun_l2_n244(x)
+ if (x < 1)
+ fun_l3_n752(x)
+ else
+ fun_l3_n690(x)
+ end
+end
+
+def fun_l2_n245(x)
+ if (x < 1)
+ fun_l3_n382(x)
+ else
+ fun_l3_n817(x)
+ end
+end
+
+def fun_l2_n246(x)
+ if (x < 1)
+ fun_l3_n880(x)
+ else
+ fun_l3_n380(x)
+ end
+end
+
+def fun_l2_n247(x)
+ if (x < 1)
+ fun_l3_n444(x)
+ else
+ fun_l3_n75(x)
+ end
+end
+
+def fun_l2_n248(x)
+ if (x < 1)
+ fun_l3_n368(x)
+ else
+ fun_l3_n314(x)
+ end
+end
+
+def fun_l2_n249(x)
+ if (x < 1)
+ fun_l3_n131(x)
+ else
+ fun_l3_n398(x)
+ end
+end
+
+def fun_l2_n250(x)
+ if (x < 1)
+ fun_l3_n376(x)
+ else
+ fun_l3_n449(x)
+ end
+end
+
+def fun_l2_n251(x)
+ if (x < 1)
+ fun_l3_n404(x)
+ else
+ fun_l3_n301(x)
+ end
+end
+
+def fun_l2_n252(x)
+ if (x < 1)
+ fun_l3_n274(x)
+ else
+ fun_l3_n430(x)
+ end
+end
+
+def fun_l2_n253(x)
+ if (x < 1)
+ fun_l3_n734(x)
+ else
+ fun_l3_n255(x)
+ end
+end
+
+def fun_l2_n254(x)
+ if (x < 1)
+ fun_l3_n30(x)
+ else
+ fun_l3_n944(x)
+ end
+end
+
+def fun_l2_n255(x)
+ if (x < 1)
+ fun_l3_n978(x)
+ else
+ fun_l3_n898(x)
+ end
+end
+
+def fun_l2_n256(x)
+ if (x < 1)
+ fun_l3_n175(x)
+ else
+ fun_l3_n477(x)
+ end
+end
+
+def fun_l2_n257(x)
+ if (x < 1)
+ fun_l3_n84(x)
+ else
+ fun_l3_n772(x)
+ end
+end
+
+def fun_l2_n258(x)
+ if (x < 1)
+ fun_l3_n605(x)
+ else
+ fun_l3_n144(x)
+ end
+end
+
+def fun_l2_n259(x)
+ if (x < 1)
+ fun_l3_n51(x)
+ else
+ fun_l3_n542(x)
+ end
+end
+
+def fun_l2_n260(x)
+ if (x < 1)
+ fun_l3_n940(x)
+ else
+ fun_l3_n316(x)
+ end
+end
+
+def fun_l2_n261(x)
+ if (x < 1)
+ fun_l3_n702(x)
+ else
+ fun_l3_n197(x)
+ end
+end
+
+def fun_l2_n262(x)
+ if (x < 1)
+ fun_l3_n164(x)
+ else
+ fun_l3_n21(x)
+ end
+end
+
+def fun_l2_n263(x)
+ if (x < 1)
+ fun_l3_n432(x)
+ else
+ fun_l3_n496(x)
+ end
+end
+
+def fun_l2_n264(x)
+ if (x < 1)
+ fun_l3_n265(x)
+ else
+ fun_l3_n40(x)
+ end
+end
+
+def fun_l2_n265(x)
+ if (x < 1)
+ fun_l3_n951(x)
+ else
+ fun_l3_n723(x)
+ end
+end
+
+def fun_l2_n266(x)
+ if (x < 1)
+ fun_l3_n279(x)
+ else
+ fun_l3_n167(x)
+ end
+end
+
+def fun_l2_n267(x)
+ if (x < 1)
+ fun_l3_n51(x)
+ else
+ fun_l3_n283(x)
+ end
+end
+
+def fun_l2_n268(x)
+ if (x < 1)
+ fun_l3_n389(x)
+ else
+ fun_l3_n264(x)
+ end
+end
+
+def fun_l2_n269(x)
+ if (x < 1)
+ fun_l3_n68(x)
+ else
+ fun_l3_n484(x)
+ end
+end
+
+def fun_l2_n270(x)
+ if (x < 1)
+ fun_l3_n435(x)
+ else
+ fun_l3_n204(x)
+ end
+end
+
+def fun_l2_n271(x)
+ if (x < 1)
+ fun_l3_n717(x)
+ else
+ fun_l3_n763(x)
+ end
+end
+
+def fun_l2_n272(x)
+ if (x < 1)
+ fun_l3_n901(x)
+ else
+ fun_l3_n332(x)
+ end
+end
+
+def fun_l2_n273(x)
+ if (x < 1)
+ fun_l3_n358(x)
+ else
+ fun_l3_n608(x)
+ end
+end
+
+def fun_l2_n274(x)
+ if (x < 1)
+ fun_l3_n885(x)
+ else
+ fun_l3_n490(x)
+ end
+end
+
+def fun_l2_n275(x)
+ if (x < 1)
+ fun_l3_n82(x)
+ else
+ fun_l3_n87(x)
+ end
+end
+
+def fun_l2_n276(x)
+ if (x < 1)
+ fun_l3_n698(x)
+ else
+ fun_l3_n846(x)
+ end
+end
+
+def fun_l2_n277(x)
+ if (x < 1)
+ fun_l3_n690(x)
+ else
+ fun_l3_n457(x)
+ end
+end
+
+def fun_l2_n278(x)
+ if (x < 1)
+ fun_l3_n925(x)
+ else
+ fun_l3_n674(x)
+ end
+end
+
+def fun_l2_n279(x)
+ if (x < 1)
+ fun_l3_n797(x)
+ else
+ fun_l3_n315(x)
+ end
+end
+
+def fun_l2_n280(x)
+ if (x < 1)
+ fun_l3_n73(x)
+ else
+ fun_l3_n2(x)
+ end
+end
+
+def fun_l2_n281(x)
+ if (x < 1)
+ fun_l3_n301(x)
+ else
+ fun_l3_n315(x)
+ end
+end
+
+def fun_l2_n282(x)
+ if (x < 1)
+ fun_l3_n473(x)
+ else
+ fun_l3_n639(x)
+ end
+end
+
+def fun_l2_n283(x)
+ if (x < 1)
+ fun_l3_n812(x)
+ else
+ fun_l3_n241(x)
+ end
+end
+
+def fun_l2_n284(x)
+ if (x < 1)
+ fun_l3_n0(x)
+ else
+ fun_l3_n445(x)
+ end
+end
+
+def fun_l2_n285(x)
+ if (x < 1)
+ fun_l3_n587(x)
+ else
+ fun_l3_n806(x)
+ end
+end
+
+def fun_l2_n286(x)
+ if (x < 1)
+ fun_l3_n169(x)
+ else
+ fun_l3_n546(x)
+ end
+end
+
+def fun_l2_n287(x)
+ if (x < 1)
+ fun_l3_n623(x)
+ else
+ fun_l3_n677(x)
+ end
+end
+
+def fun_l2_n288(x)
+ if (x < 1)
+ fun_l3_n804(x)
+ else
+ fun_l3_n80(x)
+ end
+end
+
+def fun_l2_n289(x)
+ if (x < 1)
+ fun_l3_n812(x)
+ else
+ fun_l3_n197(x)
+ end
+end
+
+def fun_l2_n290(x)
+ if (x < 1)
+ fun_l3_n551(x)
+ else
+ fun_l3_n896(x)
+ end
+end
+
+def fun_l2_n291(x)
+ if (x < 1)
+ fun_l3_n686(x)
+ else
+ fun_l3_n884(x)
+ end
+end
+
+def fun_l2_n292(x)
+ if (x < 1)
+ fun_l3_n427(x)
+ else
+ fun_l3_n251(x)
+ end
+end
+
+def fun_l2_n293(x)
+ if (x < 1)
+ fun_l3_n375(x)
+ else
+ fun_l3_n309(x)
+ end
+end
+
+def fun_l2_n294(x)
+ if (x < 1)
+ fun_l3_n371(x)
+ else
+ fun_l3_n184(x)
+ end
+end
+
+def fun_l2_n295(x)
+ if (x < 1)
+ fun_l3_n332(x)
+ else
+ fun_l3_n250(x)
+ end
+end
+
+def fun_l2_n296(x)
+ if (x < 1)
+ fun_l3_n333(x)
+ else
+ fun_l3_n254(x)
+ end
+end
+
+def fun_l2_n297(x)
+ if (x < 1)
+ fun_l3_n24(x)
+ else
+ fun_l3_n528(x)
+ end
+end
+
+def fun_l2_n298(x)
+ if (x < 1)
+ fun_l3_n96(x)
+ else
+ fun_l3_n653(x)
+ end
+end
+
+def fun_l2_n299(x)
+ if (x < 1)
+ fun_l3_n920(x)
+ else
+ fun_l3_n475(x)
+ end
+end
+
+def fun_l2_n300(x)
+ if (x < 1)
+ fun_l3_n337(x)
+ else
+ fun_l3_n378(x)
+ end
+end
+
+def fun_l2_n301(x)
+ if (x < 1)
+ fun_l3_n778(x)
+ else
+ fun_l3_n216(x)
+ end
+end
+
+def fun_l2_n302(x)
+ if (x < 1)
+ fun_l3_n366(x)
+ else
+ fun_l3_n562(x)
+ end
+end
+
+def fun_l2_n303(x)
+ if (x < 1)
+ fun_l3_n656(x)
+ else
+ fun_l3_n779(x)
+ end
+end
+
+def fun_l2_n304(x)
+ if (x < 1)
+ fun_l3_n526(x)
+ else
+ fun_l3_n783(x)
+ end
+end
+
+def fun_l2_n305(x)
+ if (x < 1)
+ fun_l3_n500(x)
+ else
+ fun_l3_n744(x)
+ end
+end
+
+def fun_l2_n306(x)
+ if (x < 1)
+ fun_l3_n953(x)
+ else
+ fun_l3_n394(x)
+ end
+end
+
+def fun_l2_n307(x)
+ if (x < 1)
+ fun_l3_n385(x)
+ else
+ fun_l3_n937(x)
+ end
+end
+
+def fun_l2_n308(x)
+ if (x < 1)
+ fun_l3_n742(x)
+ else
+ fun_l3_n984(x)
+ end
+end
+
+def fun_l2_n309(x)
+ if (x < 1)
+ fun_l3_n388(x)
+ else
+ fun_l3_n704(x)
+ end
+end
+
+def fun_l2_n310(x)
+ if (x < 1)
+ fun_l3_n252(x)
+ else
+ fun_l3_n585(x)
+ end
+end
+
+def fun_l2_n311(x)
+ if (x < 1)
+ fun_l3_n121(x)
+ else
+ fun_l3_n815(x)
+ end
+end
+
+def fun_l2_n312(x)
+ if (x < 1)
+ fun_l3_n657(x)
+ else
+ fun_l3_n338(x)
+ end
+end
+
+def fun_l2_n313(x)
+ if (x < 1)
+ fun_l3_n115(x)
+ else
+ fun_l3_n748(x)
+ end
+end
+
+def fun_l2_n314(x)
+ if (x < 1)
+ fun_l3_n237(x)
+ else
+ fun_l3_n319(x)
+ end
+end
+
+def fun_l2_n315(x)
+ if (x < 1)
+ fun_l3_n684(x)
+ else
+ fun_l3_n428(x)
+ end
+end
+
+def fun_l2_n316(x)
+ if (x < 1)
+ fun_l3_n416(x)
+ else
+ fun_l3_n499(x)
+ end
+end
+
+def fun_l2_n317(x)
+ if (x < 1)
+ fun_l3_n305(x)
+ else
+ fun_l3_n888(x)
+ end
+end
+
+def fun_l2_n318(x)
+ if (x < 1)
+ fun_l3_n506(x)
+ else
+ fun_l3_n579(x)
+ end
+end
+
+def fun_l2_n319(x)
+ if (x < 1)
+ fun_l3_n324(x)
+ else
+ fun_l3_n459(x)
+ end
+end
+
+def fun_l2_n320(x)
+ if (x < 1)
+ fun_l3_n278(x)
+ else
+ fun_l3_n89(x)
+ end
+end
+
+def fun_l2_n321(x)
+ if (x < 1)
+ fun_l3_n319(x)
+ else
+ fun_l3_n903(x)
+ end
+end
+
+def fun_l2_n322(x)
+ if (x < 1)
+ fun_l3_n96(x)
+ else
+ fun_l3_n334(x)
+ end
+end
+
+def fun_l2_n323(x)
+ if (x < 1)
+ fun_l3_n913(x)
+ else
+ fun_l3_n309(x)
+ end
+end
+
+def fun_l2_n324(x)
+ if (x < 1)
+ fun_l3_n186(x)
+ else
+ fun_l3_n453(x)
+ end
+end
+
+def fun_l2_n325(x)
+ if (x < 1)
+ fun_l3_n223(x)
+ else
+ fun_l3_n761(x)
+ end
+end
+
+def fun_l2_n326(x)
+ if (x < 1)
+ fun_l3_n757(x)
+ else
+ fun_l3_n700(x)
+ end
+end
+
+def fun_l2_n327(x)
+ if (x < 1)
+ fun_l3_n316(x)
+ else
+ fun_l3_n3(x)
+ end
+end
+
+def fun_l2_n328(x)
+ if (x < 1)
+ fun_l3_n680(x)
+ else
+ fun_l3_n275(x)
+ end
+end
+
+def fun_l2_n329(x)
+ if (x < 1)
+ fun_l3_n731(x)
+ else
+ fun_l3_n971(x)
+ end
+end
+
+def fun_l2_n330(x)
+ if (x < 1)
+ fun_l3_n672(x)
+ else
+ fun_l3_n389(x)
+ end
+end
+
+def fun_l2_n331(x)
+ if (x < 1)
+ fun_l3_n947(x)
+ else
+ fun_l3_n778(x)
+ end
+end
+
+def fun_l2_n332(x)
+ if (x < 1)
+ fun_l3_n23(x)
+ else
+ fun_l3_n478(x)
+ end
+end
+
+def fun_l2_n333(x)
+ if (x < 1)
+ fun_l3_n778(x)
+ else
+ fun_l3_n894(x)
+ end
+end
+
+def fun_l2_n334(x)
+ if (x < 1)
+ fun_l3_n198(x)
+ else
+ fun_l3_n830(x)
+ end
+end
+
+def fun_l2_n335(x)
+ if (x < 1)
+ fun_l3_n874(x)
+ else
+ fun_l3_n861(x)
+ end
+end
+
+def fun_l2_n336(x)
+ if (x < 1)
+ fun_l3_n66(x)
+ else
+ fun_l3_n47(x)
+ end
+end
+
+def fun_l2_n337(x)
+ if (x < 1)
+ fun_l3_n65(x)
+ else
+ fun_l3_n849(x)
+ end
+end
+
+def fun_l2_n338(x)
+ if (x < 1)
+ fun_l3_n122(x)
+ else
+ fun_l3_n891(x)
+ end
+end
+
+def fun_l2_n339(x)
+ if (x < 1)
+ fun_l3_n377(x)
+ else
+ fun_l3_n862(x)
+ end
+end
+
+def fun_l2_n340(x)
+ if (x < 1)
+ fun_l3_n278(x)
+ else
+ fun_l3_n495(x)
+ end
+end
+
+def fun_l2_n341(x)
+ if (x < 1)
+ fun_l3_n396(x)
+ else
+ fun_l3_n842(x)
+ end
+end
+
+def fun_l2_n342(x)
+ if (x < 1)
+ fun_l3_n971(x)
+ else
+ fun_l3_n296(x)
+ end
+end
+
+def fun_l2_n343(x)
+ if (x < 1)
+ fun_l3_n137(x)
+ else
+ fun_l3_n815(x)
+ end
+end
+
+def fun_l2_n344(x)
+ if (x < 1)
+ fun_l3_n910(x)
+ else
+ fun_l3_n515(x)
+ end
+end
+
+def fun_l2_n345(x)
+ if (x < 1)
+ fun_l3_n415(x)
+ else
+ fun_l3_n766(x)
+ end
+end
+
+def fun_l2_n346(x)
+ if (x < 1)
+ fun_l3_n816(x)
+ else
+ fun_l3_n169(x)
+ end
+end
+
+def fun_l2_n347(x)
+ if (x < 1)
+ fun_l3_n223(x)
+ else
+ fun_l3_n776(x)
+ end
+end
+
+def fun_l2_n348(x)
+ if (x < 1)
+ fun_l3_n316(x)
+ else
+ fun_l3_n497(x)
+ end
+end
+
+def fun_l2_n349(x)
+ if (x < 1)
+ fun_l3_n116(x)
+ else
+ fun_l3_n536(x)
+ end
+end
+
+def fun_l2_n350(x)
+ if (x < 1)
+ fun_l3_n252(x)
+ else
+ fun_l3_n981(x)
+ end
+end
+
+def fun_l2_n351(x)
+ if (x < 1)
+ fun_l3_n783(x)
+ else
+ fun_l3_n596(x)
+ end
+end
+
+def fun_l2_n352(x)
+ if (x < 1)
+ fun_l3_n72(x)
+ else
+ fun_l3_n978(x)
+ end
+end
+
+def fun_l2_n353(x)
+ if (x < 1)
+ fun_l3_n910(x)
+ else
+ fun_l3_n493(x)
+ end
+end
+
+def fun_l2_n354(x)
+ if (x < 1)
+ fun_l3_n671(x)
+ else
+ fun_l3_n297(x)
+ end
+end
+
+def fun_l2_n355(x)
+ if (x < 1)
+ fun_l3_n820(x)
+ else
+ fun_l3_n456(x)
+ end
+end
+
+def fun_l2_n356(x)
+ if (x < 1)
+ fun_l3_n23(x)
+ else
+ fun_l3_n760(x)
+ end
+end
+
+def fun_l2_n357(x)
+ if (x < 1)
+ fun_l3_n676(x)
+ else
+ fun_l3_n531(x)
+ end
+end
+
+def fun_l2_n358(x)
+ if (x < 1)
+ fun_l3_n849(x)
+ else
+ fun_l3_n592(x)
+ end
+end
+
+def fun_l2_n359(x)
+ if (x < 1)
+ fun_l3_n492(x)
+ else
+ fun_l3_n845(x)
+ end
+end
+
+def fun_l2_n360(x)
+ if (x < 1)
+ fun_l3_n58(x)
+ else
+ fun_l3_n896(x)
+ end
+end
+
+def fun_l2_n361(x)
+ if (x < 1)
+ fun_l3_n427(x)
+ else
+ fun_l3_n434(x)
+ end
+end
+
+def fun_l2_n362(x)
+ if (x < 1)
+ fun_l3_n129(x)
+ else
+ fun_l3_n582(x)
+ end
+end
+
+def fun_l2_n363(x)
+ if (x < 1)
+ fun_l3_n901(x)
+ else
+ fun_l3_n405(x)
+ end
+end
+
+def fun_l2_n364(x)
+ if (x < 1)
+ fun_l3_n577(x)
+ else
+ fun_l3_n550(x)
+ end
+end
+
+def fun_l2_n365(x)
+ if (x < 1)
+ fun_l3_n717(x)
+ else
+ fun_l3_n660(x)
+ end
+end
+
+def fun_l2_n366(x)
+ if (x < 1)
+ fun_l3_n605(x)
+ else
+ fun_l3_n755(x)
+ end
+end
+
+def fun_l2_n367(x)
+ if (x < 1)
+ fun_l3_n875(x)
+ else
+ fun_l3_n784(x)
+ end
+end
+
+def fun_l2_n368(x)
+ if (x < 1)
+ fun_l3_n914(x)
+ else
+ fun_l3_n351(x)
+ end
+end
+
+def fun_l2_n369(x)
+ if (x < 1)
+ fun_l3_n684(x)
+ else
+ fun_l3_n932(x)
+ end
+end
+
+def fun_l2_n370(x)
+ if (x < 1)
+ fun_l3_n796(x)
+ else
+ fun_l3_n920(x)
+ end
+end
+
+def fun_l2_n371(x)
+ if (x < 1)
+ fun_l3_n456(x)
+ else
+ fun_l3_n188(x)
+ end
+end
+
+def fun_l2_n372(x)
+ if (x < 1)
+ fun_l3_n891(x)
+ else
+ fun_l3_n905(x)
+ end
+end
+
+def fun_l2_n373(x)
+ if (x < 1)
+ fun_l3_n906(x)
+ else
+ fun_l3_n993(x)
+ end
+end
+
+def fun_l2_n374(x)
+ if (x < 1)
+ fun_l3_n811(x)
+ else
+ fun_l3_n629(x)
+ end
+end
+
+def fun_l2_n375(x)
+ if (x < 1)
+ fun_l3_n5(x)
+ else
+ fun_l3_n304(x)
+ end
+end
+
+def fun_l2_n376(x)
+ if (x < 1)
+ fun_l3_n240(x)
+ else
+ fun_l3_n859(x)
+ end
+end
+
+def fun_l2_n377(x)
+ if (x < 1)
+ fun_l3_n346(x)
+ else
+ fun_l3_n196(x)
+ end
+end
+
+def fun_l2_n378(x)
+ if (x < 1)
+ fun_l3_n918(x)
+ else
+ fun_l3_n371(x)
+ end
+end
+
+def fun_l2_n379(x)
+ if (x < 1)
+ fun_l3_n112(x)
+ else
+ fun_l3_n269(x)
+ end
+end
+
+def fun_l2_n380(x)
+ if (x < 1)
+ fun_l3_n46(x)
+ else
+ fun_l3_n397(x)
+ end
+end
+
+def fun_l2_n381(x)
+ if (x < 1)
+ fun_l3_n977(x)
+ else
+ fun_l3_n647(x)
+ end
+end
+
+def fun_l2_n382(x)
+ if (x < 1)
+ fun_l3_n788(x)
+ else
+ fun_l3_n418(x)
+ end
+end
+
+def fun_l2_n383(x)
+ if (x < 1)
+ fun_l3_n395(x)
+ else
+ fun_l3_n268(x)
+ end
+end
+
+def fun_l2_n384(x)
+ if (x < 1)
+ fun_l3_n39(x)
+ else
+ fun_l3_n950(x)
+ end
+end
+
+def fun_l2_n385(x)
+ if (x < 1)
+ fun_l3_n301(x)
+ else
+ fun_l3_n241(x)
+ end
+end
+
+def fun_l2_n386(x)
+ if (x < 1)
+ fun_l3_n936(x)
+ else
+ fun_l3_n416(x)
+ end
+end
+
+def fun_l2_n387(x)
+ if (x < 1)
+ fun_l3_n750(x)
+ else
+ fun_l3_n152(x)
+ end
+end
+
+def fun_l2_n388(x)
+ if (x < 1)
+ fun_l3_n210(x)
+ else
+ fun_l3_n824(x)
+ end
+end
+
+def fun_l2_n389(x)
+ if (x < 1)
+ fun_l3_n3(x)
+ else
+ fun_l3_n872(x)
+ end
+end
+
+def fun_l2_n390(x)
+ if (x < 1)
+ fun_l3_n944(x)
+ else
+ fun_l3_n49(x)
+ end
+end
+
+def fun_l2_n391(x)
+ if (x < 1)
+ fun_l3_n645(x)
+ else
+ fun_l3_n604(x)
+ end
+end
+
+def fun_l2_n392(x)
+ if (x < 1)
+ fun_l3_n539(x)
+ else
+ fun_l3_n374(x)
+ end
+end
+
+def fun_l2_n393(x)
+ if (x < 1)
+ fun_l3_n728(x)
+ else
+ fun_l3_n620(x)
+ end
+end
+
+def fun_l2_n394(x)
+ if (x < 1)
+ fun_l3_n538(x)
+ else
+ fun_l3_n233(x)
+ end
+end
+
+def fun_l2_n395(x)
+ if (x < 1)
+ fun_l3_n977(x)
+ else
+ fun_l3_n351(x)
+ end
+end
+
+def fun_l2_n396(x)
+ if (x < 1)
+ fun_l3_n54(x)
+ else
+ fun_l3_n958(x)
+ end
+end
+
+def fun_l2_n397(x)
+ if (x < 1)
+ fun_l3_n237(x)
+ else
+ fun_l3_n54(x)
+ end
+end
+
+def fun_l2_n398(x)
+ if (x < 1)
+ fun_l3_n846(x)
+ else
+ fun_l3_n234(x)
+ end
+end
+
+def fun_l2_n399(x)
+ if (x < 1)
+ fun_l3_n934(x)
+ else
+ fun_l3_n128(x)
+ end
+end
+
+def fun_l2_n400(x)
+ if (x < 1)
+ fun_l3_n15(x)
+ else
+ fun_l3_n288(x)
+ end
+end
+
+def fun_l2_n401(x)
+ if (x < 1)
+ fun_l3_n944(x)
+ else
+ fun_l3_n455(x)
+ end
+end
+
+def fun_l2_n402(x)
+ if (x < 1)
+ fun_l3_n962(x)
+ else
+ fun_l3_n785(x)
+ end
+end
+
+def fun_l2_n403(x)
+ if (x < 1)
+ fun_l3_n408(x)
+ else
+ fun_l3_n689(x)
+ end
+end
+
+def fun_l2_n404(x)
+ if (x < 1)
+ fun_l3_n722(x)
+ else
+ fun_l3_n704(x)
+ end
+end
+
+def fun_l2_n405(x)
+ if (x < 1)
+ fun_l3_n410(x)
+ else
+ fun_l3_n543(x)
+ end
+end
+
+def fun_l2_n406(x)
+ if (x < 1)
+ fun_l3_n953(x)
+ else
+ fun_l3_n351(x)
+ end
+end
+
+def fun_l2_n407(x)
+ if (x < 1)
+ fun_l3_n426(x)
+ else
+ fun_l3_n467(x)
+ end
+end
+
+def fun_l2_n408(x)
+ if (x < 1)
+ fun_l3_n204(x)
+ else
+ fun_l3_n163(x)
+ end
+end
+
+def fun_l2_n409(x)
+ if (x < 1)
+ fun_l3_n418(x)
+ else
+ fun_l3_n896(x)
+ end
+end
+
+def fun_l2_n410(x)
+ if (x < 1)
+ fun_l3_n915(x)
+ else
+ fun_l3_n800(x)
+ end
+end
+
+def fun_l2_n411(x)
+ if (x < 1)
+ fun_l3_n580(x)
+ else
+ fun_l3_n507(x)
+ end
+end
+
+def fun_l2_n412(x)
+ if (x < 1)
+ fun_l3_n672(x)
+ else
+ fun_l3_n938(x)
+ end
+end
+
+def fun_l2_n413(x)
+ if (x < 1)
+ fun_l3_n112(x)
+ else
+ fun_l3_n927(x)
+ end
+end
+
+def fun_l2_n414(x)
+ if (x < 1)
+ fun_l3_n801(x)
+ else
+ fun_l3_n604(x)
+ end
+end
+
+def fun_l2_n415(x)
+ if (x < 1)
+ fun_l3_n773(x)
+ else
+ fun_l3_n951(x)
+ end
+end
+
+def fun_l2_n416(x)
+ if (x < 1)
+ fun_l3_n335(x)
+ else
+ fun_l3_n157(x)
+ end
+end
+
+def fun_l2_n417(x)
+ if (x < 1)
+ fun_l3_n480(x)
+ else
+ fun_l3_n106(x)
+ end
+end
+
+def fun_l2_n418(x)
+ if (x < 1)
+ fun_l3_n523(x)
+ else
+ fun_l3_n969(x)
+ end
+end
+
+def fun_l2_n419(x)
+ if (x < 1)
+ fun_l3_n371(x)
+ else
+ fun_l3_n698(x)
+ end
+end
+
+def fun_l2_n420(x)
+ if (x < 1)
+ fun_l3_n102(x)
+ else
+ fun_l3_n47(x)
+ end
+end
+
+def fun_l2_n421(x)
+ if (x < 1)
+ fun_l3_n64(x)
+ else
+ fun_l3_n574(x)
+ end
+end
+
+def fun_l2_n422(x)
+ if (x < 1)
+ fun_l3_n208(x)
+ else
+ fun_l3_n904(x)
+ end
+end
+
+def fun_l2_n423(x)
+ if (x < 1)
+ fun_l3_n715(x)
+ else
+ fun_l3_n414(x)
+ end
+end
+
+def fun_l2_n424(x)
+ if (x < 1)
+ fun_l3_n455(x)
+ else
+ fun_l3_n219(x)
+ end
+end
+
+def fun_l2_n425(x)
+ if (x < 1)
+ fun_l3_n210(x)
+ else
+ fun_l3_n160(x)
+ end
+end
+
+def fun_l2_n426(x)
+ if (x < 1)
+ fun_l3_n729(x)
+ else
+ fun_l3_n512(x)
+ end
+end
+
+def fun_l2_n427(x)
+ if (x < 1)
+ fun_l3_n159(x)
+ else
+ fun_l3_n482(x)
+ end
+end
+
+def fun_l2_n428(x)
+ if (x < 1)
+ fun_l3_n182(x)
+ else
+ fun_l3_n687(x)
+ end
+end
+
+def fun_l2_n429(x)
+ if (x < 1)
+ fun_l3_n690(x)
+ else
+ fun_l3_n856(x)
+ end
+end
+
+def fun_l2_n430(x)
+ if (x < 1)
+ fun_l3_n494(x)
+ else
+ fun_l3_n365(x)
+ end
+end
+
+def fun_l2_n431(x)
+ if (x < 1)
+ fun_l3_n346(x)
+ else
+ fun_l3_n82(x)
+ end
+end
+
+def fun_l2_n432(x)
+ if (x < 1)
+ fun_l3_n311(x)
+ else
+ fun_l3_n412(x)
+ end
+end
+
+def fun_l2_n433(x)
+ if (x < 1)
+ fun_l3_n824(x)
+ else
+ fun_l3_n494(x)
+ end
+end
+
+def fun_l2_n434(x)
+ if (x < 1)
+ fun_l3_n471(x)
+ else
+ fun_l3_n174(x)
+ end
+end
+
+def fun_l2_n435(x)
+ if (x < 1)
+ fun_l3_n10(x)
+ else
+ fun_l3_n319(x)
+ end
+end
+
+def fun_l2_n436(x)
+ if (x < 1)
+ fun_l3_n223(x)
+ else
+ fun_l3_n518(x)
+ end
+end
+
+def fun_l2_n437(x)
+ if (x < 1)
+ fun_l3_n497(x)
+ else
+ fun_l3_n685(x)
+ end
+end
+
+def fun_l2_n438(x)
+ if (x < 1)
+ fun_l3_n639(x)
+ else
+ fun_l3_n695(x)
+ end
+end
+
+def fun_l2_n439(x)
+ if (x < 1)
+ fun_l3_n898(x)
+ else
+ fun_l3_n114(x)
+ end
+end
+
+def fun_l2_n440(x)
+ if (x < 1)
+ fun_l3_n12(x)
+ else
+ fun_l3_n344(x)
+ end
+end
+
+def fun_l2_n441(x)
+ if (x < 1)
+ fun_l3_n556(x)
+ else
+ fun_l3_n684(x)
+ end
+end
+
+def fun_l2_n442(x)
+ if (x < 1)
+ fun_l3_n403(x)
+ else
+ fun_l3_n528(x)
+ end
+end
+
+def fun_l2_n443(x)
+ if (x < 1)
+ fun_l3_n514(x)
+ else
+ fun_l3_n943(x)
+ end
+end
+
+def fun_l2_n444(x)
+ if (x < 1)
+ fun_l3_n218(x)
+ else
+ fun_l3_n94(x)
+ end
+end
+
+def fun_l2_n445(x)
+ if (x < 1)
+ fun_l3_n0(x)
+ else
+ fun_l3_n731(x)
+ end
+end
+
+def fun_l2_n446(x)
+ if (x < 1)
+ fun_l3_n468(x)
+ else
+ fun_l3_n197(x)
+ end
+end
+
+def fun_l2_n447(x)
+ if (x < 1)
+ fun_l3_n79(x)
+ else
+ fun_l3_n900(x)
+ end
+end
+
+def fun_l2_n448(x)
+ if (x < 1)
+ fun_l3_n706(x)
+ else
+ fun_l3_n250(x)
+ end
+end
+
+def fun_l2_n449(x)
+ if (x < 1)
+ fun_l3_n607(x)
+ else
+ fun_l3_n514(x)
+ end
+end
+
+def fun_l2_n450(x)
+ if (x < 1)
+ fun_l3_n855(x)
+ else
+ fun_l3_n223(x)
+ end
+end
+
+def fun_l2_n451(x)
+ if (x < 1)
+ fun_l3_n824(x)
+ else
+ fun_l3_n426(x)
+ end
+end
+
+def fun_l2_n452(x)
+ if (x < 1)
+ fun_l3_n137(x)
+ else
+ fun_l3_n555(x)
+ end
+end
+
+def fun_l2_n453(x)
+ if (x < 1)
+ fun_l3_n553(x)
+ else
+ fun_l3_n130(x)
+ end
+end
+
+def fun_l2_n454(x)
+ if (x < 1)
+ fun_l3_n350(x)
+ else
+ fun_l3_n545(x)
+ end
+end
+
+def fun_l2_n455(x)
+ if (x < 1)
+ fun_l3_n181(x)
+ else
+ fun_l3_n950(x)
+ end
+end
+
+def fun_l2_n456(x)
+ if (x < 1)
+ fun_l3_n581(x)
+ else
+ fun_l3_n957(x)
+ end
+end
+
+def fun_l2_n457(x)
+ if (x < 1)
+ fun_l3_n688(x)
+ else
+ fun_l3_n560(x)
+ end
+end
+
+def fun_l2_n458(x)
+ if (x < 1)
+ fun_l3_n655(x)
+ else
+ fun_l3_n323(x)
+ end
+end
+
+def fun_l2_n459(x)
+ if (x < 1)
+ fun_l3_n92(x)
+ else
+ fun_l3_n796(x)
+ end
+end
+
+def fun_l2_n460(x)
+ if (x < 1)
+ fun_l3_n68(x)
+ else
+ fun_l3_n152(x)
+ end
+end
+
+def fun_l2_n461(x)
+ if (x < 1)
+ fun_l3_n153(x)
+ else
+ fun_l3_n929(x)
+ end
+end
+
+def fun_l2_n462(x)
+ if (x < 1)
+ fun_l3_n401(x)
+ else
+ fun_l3_n248(x)
+ end
+end
+
+def fun_l2_n463(x)
+ if (x < 1)
+ fun_l3_n4(x)
+ else
+ fun_l3_n906(x)
+ end
+end
+
+def fun_l2_n464(x)
+ if (x < 1)
+ fun_l3_n313(x)
+ else
+ fun_l3_n911(x)
+ end
+end
+
+def fun_l2_n465(x)
+ if (x < 1)
+ fun_l3_n490(x)
+ else
+ fun_l3_n430(x)
+ end
+end
+
+def fun_l2_n466(x)
+ if (x < 1)
+ fun_l3_n537(x)
+ else
+ fun_l3_n470(x)
+ end
+end
+
+def fun_l2_n467(x)
+ if (x < 1)
+ fun_l3_n666(x)
+ else
+ fun_l3_n695(x)
+ end
+end
+
+def fun_l2_n468(x)
+ if (x < 1)
+ fun_l3_n126(x)
+ else
+ fun_l3_n201(x)
+ end
+end
+
+def fun_l2_n469(x)
+ if (x < 1)
+ fun_l3_n111(x)
+ else
+ fun_l3_n741(x)
+ end
+end
+
+def fun_l2_n470(x)
+ if (x < 1)
+ fun_l3_n391(x)
+ else
+ fun_l3_n83(x)
+ end
+end
+
+def fun_l2_n471(x)
+ if (x < 1)
+ fun_l3_n619(x)
+ else
+ fun_l3_n425(x)
+ end
+end
+
+def fun_l2_n472(x)
+ if (x < 1)
+ fun_l3_n637(x)
+ else
+ fun_l3_n865(x)
+ end
+end
+
+def fun_l2_n473(x)
+ if (x < 1)
+ fun_l3_n182(x)
+ else
+ fun_l3_n350(x)
+ end
+end
+
+def fun_l2_n474(x)
+ if (x < 1)
+ fun_l3_n752(x)
+ else
+ fun_l3_n815(x)
+ end
+end
+
+def fun_l2_n475(x)
+ if (x < 1)
+ fun_l3_n184(x)
+ else
+ fun_l3_n683(x)
+ end
+end
+
+def fun_l2_n476(x)
+ if (x < 1)
+ fun_l3_n324(x)
+ else
+ fun_l3_n232(x)
+ end
+end
+
+def fun_l2_n477(x)
+ if (x < 1)
+ fun_l3_n588(x)
+ else
+ fun_l3_n586(x)
+ end
+end
+
+def fun_l2_n478(x)
+ if (x < 1)
+ fun_l3_n320(x)
+ else
+ fun_l3_n599(x)
+ end
+end
+
+def fun_l2_n479(x)
+ if (x < 1)
+ fun_l3_n999(x)
+ else
+ fun_l3_n678(x)
+ end
+end
+
+def fun_l2_n480(x)
+ if (x < 1)
+ fun_l3_n523(x)
+ else
+ fun_l3_n637(x)
+ end
+end
+
+def fun_l2_n481(x)
+ if (x < 1)
+ fun_l3_n291(x)
+ else
+ fun_l3_n347(x)
+ end
+end
+
+def fun_l2_n482(x)
+ if (x < 1)
+ fun_l3_n873(x)
+ else
+ fun_l3_n435(x)
+ end
+end
+
+def fun_l2_n483(x)
+ if (x < 1)
+ fun_l3_n718(x)
+ else
+ fun_l3_n903(x)
+ end
+end
+
+def fun_l2_n484(x)
+ if (x < 1)
+ fun_l3_n188(x)
+ else
+ fun_l3_n751(x)
+ end
+end
+
+def fun_l2_n485(x)
+ if (x < 1)
+ fun_l3_n501(x)
+ else
+ fun_l3_n235(x)
+ end
+end
+
+def fun_l2_n486(x)
+ if (x < 1)
+ fun_l3_n578(x)
+ else
+ fun_l3_n224(x)
+ end
+end
+
+def fun_l2_n487(x)
+ if (x < 1)
+ fun_l3_n757(x)
+ else
+ fun_l3_n145(x)
+ end
+end
+
+def fun_l2_n488(x)
+ if (x < 1)
+ fun_l3_n17(x)
+ else
+ fun_l3_n727(x)
+ end
+end
+
+def fun_l2_n489(x)
+ if (x < 1)
+ fun_l3_n699(x)
+ else
+ fun_l3_n102(x)
+ end
+end
+
+def fun_l2_n490(x)
+ if (x < 1)
+ fun_l3_n518(x)
+ else
+ fun_l3_n15(x)
+ end
+end
+
+def fun_l2_n491(x)
+ if (x < 1)
+ fun_l3_n889(x)
+ else
+ fun_l3_n798(x)
+ end
+end
+
+def fun_l2_n492(x)
+ if (x < 1)
+ fun_l3_n715(x)
+ else
+ fun_l3_n27(x)
+ end
+end
+
+def fun_l2_n493(x)
+ if (x < 1)
+ fun_l3_n237(x)
+ else
+ fun_l3_n130(x)
+ end
+end
+
+def fun_l2_n494(x)
+ if (x < 1)
+ fun_l3_n99(x)
+ else
+ fun_l3_n223(x)
+ end
+end
+
+def fun_l2_n495(x)
+ if (x < 1)
+ fun_l3_n65(x)
+ else
+ fun_l3_n811(x)
+ end
+end
+
+def fun_l2_n496(x)
+ if (x < 1)
+ fun_l3_n457(x)
+ else
+ fun_l3_n527(x)
+ end
+end
+
+def fun_l2_n497(x)
+ if (x < 1)
+ fun_l3_n312(x)
+ else
+ fun_l3_n902(x)
+ end
+end
+
+def fun_l2_n498(x)
+ if (x < 1)
+ fun_l3_n63(x)
+ else
+ fun_l3_n948(x)
+ end
+end
+
+def fun_l2_n499(x)
+ if (x < 1)
+ fun_l3_n652(x)
+ else
+ fun_l3_n752(x)
+ end
+end
+
+def fun_l2_n500(x)
+ if (x < 1)
+ fun_l3_n583(x)
+ else
+ fun_l3_n427(x)
+ end
+end
+
+def fun_l2_n501(x)
+ if (x < 1)
+ fun_l3_n536(x)
+ else
+ fun_l3_n671(x)
+ end
+end
+
+def fun_l2_n502(x)
+ if (x < 1)
+ fun_l3_n122(x)
+ else
+ fun_l3_n170(x)
+ end
+end
+
+def fun_l2_n503(x)
+ if (x < 1)
+ fun_l3_n280(x)
+ else
+ fun_l3_n390(x)
+ end
+end
+
+def fun_l2_n504(x)
+ if (x < 1)
+ fun_l3_n427(x)
+ else
+ fun_l3_n267(x)
+ end
+end
+
+def fun_l2_n505(x)
+ if (x < 1)
+ fun_l3_n241(x)
+ else
+ fun_l3_n345(x)
+ end
+end
+
+def fun_l2_n506(x)
+ if (x < 1)
+ fun_l3_n960(x)
+ else
+ fun_l3_n537(x)
+ end
+end
+
+def fun_l2_n507(x)
+ if (x < 1)
+ fun_l3_n947(x)
+ else
+ fun_l3_n3(x)
+ end
+end
+
+def fun_l2_n508(x)
+ if (x < 1)
+ fun_l3_n974(x)
+ else
+ fun_l3_n597(x)
+ end
+end
+
+def fun_l2_n509(x)
+ if (x < 1)
+ fun_l3_n978(x)
+ else
+ fun_l3_n70(x)
+ end
+end
+
+def fun_l2_n510(x)
+ if (x < 1)
+ fun_l3_n855(x)
+ else
+ fun_l3_n259(x)
+ end
+end
+
+def fun_l2_n511(x)
+ if (x < 1)
+ fun_l3_n987(x)
+ else
+ fun_l3_n182(x)
+ end
+end
+
+def fun_l2_n512(x)
+ if (x < 1)
+ fun_l3_n787(x)
+ else
+ fun_l3_n656(x)
+ end
+end
+
+def fun_l2_n513(x)
+ if (x < 1)
+ fun_l3_n348(x)
+ else
+ fun_l3_n242(x)
+ end
+end
+
+def fun_l2_n514(x)
+ if (x < 1)
+ fun_l3_n530(x)
+ else
+ fun_l3_n718(x)
+ end
+end
+
+def fun_l2_n515(x)
+ if (x < 1)
+ fun_l3_n840(x)
+ else
+ fun_l3_n302(x)
+ end
+end
+
+def fun_l2_n516(x)
+ if (x < 1)
+ fun_l3_n678(x)
+ else
+ fun_l3_n732(x)
+ end
+end
+
+def fun_l2_n517(x)
+ if (x < 1)
+ fun_l3_n160(x)
+ else
+ fun_l3_n344(x)
+ end
+end
+
+def fun_l2_n518(x)
+ if (x < 1)
+ fun_l3_n518(x)
+ else
+ fun_l3_n830(x)
+ end
+end
+
+def fun_l2_n519(x)
+ if (x < 1)
+ fun_l3_n595(x)
+ else
+ fun_l3_n95(x)
+ end
+end
+
+def fun_l2_n520(x)
+ if (x < 1)
+ fun_l3_n939(x)
+ else
+ fun_l3_n954(x)
+ end
+end
+
+def fun_l2_n521(x)
+ if (x < 1)
+ fun_l3_n931(x)
+ else
+ fun_l3_n686(x)
+ end
+end
+
+def fun_l2_n522(x)
+ if (x < 1)
+ fun_l3_n117(x)
+ else
+ fun_l3_n501(x)
+ end
+end
+
+def fun_l2_n523(x)
+ if (x < 1)
+ fun_l3_n727(x)
+ else
+ fun_l3_n712(x)
+ end
+end
+
+def fun_l2_n524(x)
+ if (x < 1)
+ fun_l3_n152(x)
+ else
+ fun_l3_n453(x)
+ end
+end
+
+def fun_l2_n525(x)
+ if (x < 1)
+ fun_l3_n375(x)
+ else
+ fun_l3_n186(x)
+ end
+end
+
+def fun_l2_n526(x)
+ if (x < 1)
+ fun_l3_n835(x)
+ else
+ fun_l3_n465(x)
+ end
+end
+
+def fun_l2_n527(x)
+ if (x < 1)
+ fun_l3_n411(x)
+ else
+ fun_l3_n208(x)
+ end
+end
+
+def fun_l2_n528(x)
+ if (x < 1)
+ fun_l3_n696(x)
+ else
+ fun_l3_n537(x)
+ end
+end
+
+def fun_l2_n529(x)
+ if (x < 1)
+ fun_l3_n606(x)
+ else
+ fun_l3_n682(x)
+ end
+end
+
+def fun_l2_n530(x)
+ if (x < 1)
+ fun_l3_n920(x)
+ else
+ fun_l3_n592(x)
+ end
+end
+
+def fun_l2_n531(x)
+ if (x < 1)
+ fun_l3_n201(x)
+ else
+ fun_l3_n27(x)
+ end
+end
+
+def fun_l2_n532(x)
+ if (x < 1)
+ fun_l3_n643(x)
+ else
+ fun_l3_n642(x)
+ end
+end
+
+def fun_l2_n533(x)
+ if (x < 1)
+ fun_l3_n92(x)
+ else
+ fun_l3_n576(x)
+ end
+end
+
+def fun_l2_n534(x)
+ if (x < 1)
+ fun_l3_n135(x)
+ else
+ fun_l3_n738(x)
+ end
+end
+
+def fun_l2_n535(x)
+ if (x < 1)
+ fun_l3_n706(x)
+ else
+ fun_l3_n810(x)
+ end
+end
+
+def fun_l2_n536(x)
+ if (x < 1)
+ fun_l3_n922(x)
+ else
+ fun_l3_n456(x)
+ end
+end
+
+def fun_l2_n537(x)
+ if (x < 1)
+ fun_l3_n511(x)
+ else
+ fun_l3_n279(x)
+ end
+end
+
+def fun_l2_n538(x)
+ if (x < 1)
+ fun_l3_n541(x)
+ else
+ fun_l3_n826(x)
+ end
+end
+
+def fun_l2_n539(x)
+ if (x < 1)
+ fun_l3_n7(x)
+ else
+ fun_l3_n362(x)
+ end
+end
+
+def fun_l2_n540(x)
+ if (x < 1)
+ fun_l3_n132(x)
+ else
+ fun_l3_n819(x)
+ end
+end
+
+def fun_l2_n541(x)
+ if (x < 1)
+ fun_l3_n600(x)
+ else
+ fun_l3_n155(x)
+ end
+end
+
+def fun_l2_n542(x)
+ if (x < 1)
+ fun_l3_n930(x)
+ else
+ fun_l3_n827(x)
+ end
+end
+
+def fun_l2_n543(x)
+ if (x < 1)
+ fun_l3_n974(x)
+ else
+ fun_l3_n957(x)
+ end
+end
+
+def fun_l2_n544(x)
+ if (x < 1)
+ fun_l3_n912(x)
+ else
+ fun_l3_n503(x)
+ end
+end
+
+def fun_l2_n545(x)
+ if (x < 1)
+ fun_l3_n605(x)
+ else
+ fun_l3_n966(x)
+ end
+end
+
+def fun_l2_n546(x)
+ if (x < 1)
+ fun_l3_n523(x)
+ else
+ fun_l3_n105(x)
+ end
+end
+
+def fun_l2_n547(x)
+ if (x < 1)
+ fun_l3_n352(x)
+ else
+ fun_l3_n762(x)
+ end
+end
+
+def fun_l2_n548(x)
+ if (x < 1)
+ fun_l3_n65(x)
+ else
+ fun_l3_n714(x)
+ end
+end
+
+def fun_l2_n549(x)
+ if (x < 1)
+ fun_l3_n665(x)
+ else
+ fun_l3_n799(x)
+ end
+end
+
+def fun_l2_n550(x)
+ if (x < 1)
+ fun_l3_n624(x)
+ else
+ fun_l3_n131(x)
+ end
+end
+
+def fun_l2_n551(x)
+ if (x < 1)
+ fun_l3_n53(x)
+ else
+ fun_l3_n377(x)
+ end
+end
+
+def fun_l2_n552(x)
+ if (x < 1)
+ fun_l3_n377(x)
+ else
+ fun_l3_n532(x)
+ end
+end
+
+def fun_l2_n553(x)
+ if (x < 1)
+ fun_l3_n302(x)
+ else
+ fun_l3_n593(x)
+ end
+end
+
+def fun_l2_n554(x)
+ if (x < 1)
+ fun_l3_n573(x)
+ else
+ fun_l3_n727(x)
+ end
+end
+
+def fun_l2_n555(x)
+ if (x < 1)
+ fun_l3_n575(x)
+ else
+ fun_l3_n838(x)
+ end
+end
+
+def fun_l2_n556(x)
+ if (x < 1)
+ fun_l3_n92(x)
+ else
+ fun_l3_n137(x)
+ end
+end
+
+def fun_l2_n557(x)
+ if (x < 1)
+ fun_l3_n921(x)
+ else
+ fun_l3_n683(x)
+ end
+end
+
+def fun_l2_n558(x)
+ if (x < 1)
+ fun_l3_n987(x)
+ else
+ fun_l3_n646(x)
+ end
+end
+
+def fun_l2_n559(x)
+ if (x < 1)
+ fun_l3_n574(x)
+ else
+ fun_l3_n493(x)
+ end
+end
+
+def fun_l2_n560(x)
+ if (x < 1)
+ fun_l3_n950(x)
+ else
+ fun_l3_n139(x)
+ end
+end
+
+def fun_l2_n561(x)
+ if (x < 1)
+ fun_l3_n276(x)
+ else
+ fun_l3_n491(x)
+ end
+end
+
+def fun_l2_n562(x)
+ if (x < 1)
+ fun_l3_n226(x)
+ else
+ fun_l3_n840(x)
+ end
+end
+
+def fun_l2_n563(x)
+ if (x < 1)
+ fun_l3_n473(x)
+ else
+ fun_l3_n907(x)
+ end
+end
+
+def fun_l2_n564(x)
+ if (x < 1)
+ fun_l3_n695(x)
+ else
+ fun_l3_n951(x)
+ end
+end
+
+def fun_l2_n565(x)
+ if (x < 1)
+ fun_l3_n415(x)
+ else
+ fun_l3_n12(x)
+ end
+end
+
+def fun_l2_n566(x)
+ if (x < 1)
+ fun_l3_n32(x)
+ else
+ fun_l3_n759(x)
+ end
+end
+
+def fun_l2_n567(x)
+ if (x < 1)
+ fun_l3_n876(x)
+ else
+ fun_l3_n297(x)
+ end
+end
+
+def fun_l2_n568(x)
+ if (x < 1)
+ fun_l3_n520(x)
+ else
+ fun_l3_n713(x)
+ end
+end
+
+def fun_l2_n569(x)
+ if (x < 1)
+ fun_l3_n819(x)
+ else
+ fun_l3_n505(x)
+ end
+end
+
+def fun_l2_n570(x)
+ if (x < 1)
+ fun_l3_n661(x)
+ else
+ fun_l3_n447(x)
+ end
+end
+
+def fun_l2_n571(x)
+ if (x < 1)
+ fun_l3_n538(x)
+ else
+ fun_l3_n532(x)
+ end
+end
+
+def fun_l2_n572(x)
+ if (x < 1)
+ fun_l3_n584(x)
+ else
+ fun_l3_n304(x)
+ end
+end
+
+def fun_l2_n573(x)
+ if (x < 1)
+ fun_l3_n76(x)
+ else
+ fun_l3_n512(x)
+ end
+end
+
+def fun_l2_n574(x)
+ if (x < 1)
+ fun_l3_n22(x)
+ else
+ fun_l3_n328(x)
+ end
+end
+
+def fun_l2_n575(x)
+ if (x < 1)
+ fun_l3_n397(x)
+ else
+ fun_l3_n640(x)
+ end
+end
+
+def fun_l2_n576(x)
+ if (x < 1)
+ fun_l3_n227(x)
+ else
+ fun_l3_n236(x)
+ end
+end
+
+def fun_l2_n577(x)
+ if (x < 1)
+ fun_l3_n743(x)
+ else
+ fun_l3_n974(x)
+ end
+end
+
+def fun_l2_n578(x)
+ if (x < 1)
+ fun_l3_n970(x)
+ else
+ fun_l3_n229(x)
+ end
+end
+
+def fun_l2_n579(x)
+ if (x < 1)
+ fun_l3_n401(x)
+ else
+ fun_l3_n672(x)
+ end
+end
+
+def fun_l2_n580(x)
+ if (x < 1)
+ fun_l3_n837(x)
+ else
+ fun_l3_n804(x)
+ end
+end
+
+def fun_l2_n581(x)
+ if (x < 1)
+ fun_l3_n526(x)
+ else
+ fun_l3_n763(x)
+ end
+end
+
+def fun_l2_n582(x)
+ if (x < 1)
+ fun_l3_n2(x)
+ else
+ fun_l3_n692(x)
+ end
+end
+
+def fun_l2_n583(x)
+ if (x < 1)
+ fun_l3_n561(x)
+ else
+ fun_l3_n394(x)
+ end
+end
+
+def fun_l2_n584(x)
+ if (x < 1)
+ fun_l3_n60(x)
+ else
+ fun_l3_n682(x)
+ end
+end
+
+def fun_l2_n585(x)
+ if (x < 1)
+ fun_l3_n646(x)
+ else
+ fun_l3_n776(x)
+ end
+end
+
+def fun_l2_n586(x)
+ if (x < 1)
+ fun_l3_n466(x)
+ else
+ fun_l3_n91(x)
+ end
+end
+
+def fun_l2_n587(x)
+ if (x < 1)
+ fun_l3_n294(x)
+ else
+ fun_l3_n813(x)
+ end
+end
+
+def fun_l2_n588(x)
+ if (x < 1)
+ fun_l3_n958(x)
+ else
+ fun_l3_n64(x)
+ end
+end
+
+def fun_l2_n589(x)
+ if (x < 1)
+ fun_l3_n703(x)
+ else
+ fun_l3_n937(x)
+ end
+end
+
+def fun_l2_n590(x)
+ if (x < 1)
+ fun_l3_n1(x)
+ else
+ fun_l3_n970(x)
+ end
+end
+
+def fun_l2_n591(x)
+ if (x < 1)
+ fun_l3_n566(x)
+ else
+ fun_l3_n750(x)
+ end
+end
+
+def fun_l2_n592(x)
+ if (x < 1)
+ fun_l3_n905(x)
+ else
+ fun_l3_n840(x)
+ end
+end
+
+def fun_l2_n593(x)
+ if (x < 1)
+ fun_l3_n23(x)
+ else
+ fun_l3_n862(x)
+ end
+end
+
+def fun_l2_n594(x)
+ if (x < 1)
+ fun_l3_n739(x)
+ else
+ fun_l3_n4(x)
+ end
+end
+
+def fun_l2_n595(x)
+ if (x < 1)
+ fun_l3_n818(x)
+ else
+ fun_l3_n117(x)
+ end
+end
+
+def fun_l2_n596(x)
+ if (x < 1)
+ fun_l3_n891(x)
+ else
+ fun_l3_n599(x)
+ end
+end
+
+def fun_l2_n597(x)
+ if (x < 1)
+ fun_l3_n731(x)
+ else
+ fun_l3_n960(x)
+ end
+end
+
+def fun_l2_n598(x)
+ if (x < 1)
+ fun_l3_n323(x)
+ else
+ fun_l3_n811(x)
+ end
+end
+
+def fun_l2_n599(x)
+ if (x < 1)
+ fun_l3_n299(x)
+ else
+ fun_l3_n188(x)
+ end
+end
+
+def fun_l2_n600(x)
+ if (x < 1)
+ fun_l3_n129(x)
+ else
+ fun_l3_n730(x)
+ end
+end
+
+def fun_l2_n601(x)
+ if (x < 1)
+ fun_l3_n412(x)
+ else
+ fun_l3_n353(x)
+ end
+end
+
+def fun_l2_n602(x)
+ if (x < 1)
+ fun_l3_n658(x)
+ else
+ fun_l3_n774(x)
+ end
+end
+
+def fun_l2_n603(x)
+ if (x < 1)
+ fun_l3_n378(x)
+ else
+ fun_l3_n722(x)
+ end
+end
+
+def fun_l2_n604(x)
+ if (x < 1)
+ fun_l3_n482(x)
+ else
+ fun_l3_n949(x)
+ end
+end
+
+def fun_l2_n605(x)
+ if (x < 1)
+ fun_l3_n996(x)
+ else
+ fun_l3_n169(x)
+ end
+end
+
+def fun_l2_n606(x)
+ if (x < 1)
+ fun_l3_n653(x)
+ else
+ fun_l3_n966(x)
+ end
+end
+
+def fun_l2_n607(x)
+ if (x < 1)
+ fun_l3_n179(x)
+ else
+ fun_l3_n113(x)
+ end
+end
+
+def fun_l2_n608(x)
+ if (x < 1)
+ fun_l3_n157(x)
+ else
+ fun_l3_n692(x)
+ end
+end
+
+def fun_l2_n609(x)
+ if (x < 1)
+ fun_l3_n648(x)
+ else
+ fun_l3_n318(x)
+ end
+end
+
+def fun_l2_n610(x)
+ if (x < 1)
+ fun_l3_n979(x)
+ else
+ fun_l3_n642(x)
+ end
+end
+
+def fun_l2_n611(x)
+ if (x < 1)
+ fun_l3_n909(x)
+ else
+ fun_l3_n986(x)
+ end
+end
+
+def fun_l2_n612(x)
+ if (x < 1)
+ fun_l3_n387(x)
+ else
+ fun_l3_n586(x)
+ end
+end
+
+def fun_l2_n613(x)
+ if (x < 1)
+ fun_l3_n99(x)
+ else
+ fun_l3_n690(x)
+ end
+end
+
+def fun_l2_n614(x)
+ if (x < 1)
+ fun_l3_n485(x)
+ else
+ fun_l3_n733(x)
+ end
+end
+
+def fun_l2_n615(x)
+ if (x < 1)
+ fun_l3_n735(x)
+ else
+ fun_l3_n827(x)
+ end
+end
+
+def fun_l2_n616(x)
+ if (x < 1)
+ fun_l3_n496(x)
+ else
+ fun_l3_n912(x)
+ end
+end
+
+def fun_l2_n617(x)
+ if (x < 1)
+ fun_l3_n363(x)
+ else
+ fun_l3_n845(x)
+ end
+end
+
+def fun_l2_n618(x)
+ if (x < 1)
+ fun_l3_n891(x)
+ else
+ fun_l3_n964(x)
+ end
+end
+
+def fun_l2_n619(x)
+ if (x < 1)
+ fun_l3_n266(x)
+ else
+ fun_l3_n189(x)
+ end
+end
+
+def fun_l2_n620(x)
+ if (x < 1)
+ fun_l3_n906(x)
+ else
+ fun_l3_n218(x)
+ end
+end
+
+def fun_l2_n621(x)
+ if (x < 1)
+ fun_l3_n145(x)
+ else
+ fun_l3_n279(x)
+ end
+end
+
+def fun_l2_n622(x)
+ if (x < 1)
+ fun_l3_n797(x)
+ else
+ fun_l3_n328(x)
+ end
+end
+
+def fun_l2_n623(x)
+ if (x < 1)
+ fun_l3_n910(x)
+ else
+ fun_l3_n250(x)
+ end
+end
+
+def fun_l2_n624(x)
+ if (x < 1)
+ fun_l3_n111(x)
+ else
+ fun_l3_n884(x)
+ end
+end
+
+def fun_l2_n625(x)
+ if (x < 1)
+ fun_l3_n852(x)
+ else
+ fun_l3_n985(x)
+ end
+end
+
+def fun_l2_n626(x)
+ if (x < 1)
+ fun_l3_n163(x)
+ else
+ fun_l3_n885(x)
+ end
+end
+
+def fun_l2_n627(x)
+ if (x < 1)
+ fun_l3_n338(x)
+ else
+ fun_l3_n562(x)
+ end
+end
+
+def fun_l2_n628(x)
+ if (x < 1)
+ fun_l3_n300(x)
+ else
+ fun_l3_n245(x)
+ end
+end
+
+def fun_l2_n629(x)
+ if (x < 1)
+ fun_l3_n747(x)
+ else
+ fun_l3_n123(x)
+ end
+end
+
+def fun_l2_n630(x)
+ if (x < 1)
+ fun_l3_n42(x)
+ else
+ fun_l3_n46(x)
+ end
+end
+
+def fun_l2_n631(x)
+ if (x < 1)
+ fun_l3_n256(x)
+ else
+ fun_l3_n960(x)
+ end
+end
+
+def fun_l2_n632(x)
+ if (x < 1)
+ fun_l3_n679(x)
+ else
+ fun_l3_n393(x)
+ end
+end
+
+def fun_l2_n633(x)
+ if (x < 1)
+ fun_l3_n286(x)
+ else
+ fun_l3_n147(x)
+ end
+end
+
+def fun_l2_n634(x)
+ if (x < 1)
+ fun_l3_n62(x)
+ else
+ fun_l3_n994(x)
+ end
+end
+
+def fun_l2_n635(x)
+ if (x < 1)
+ fun_l3_n479(x)
+ else
+ fun_l3_n502(x)
+ end
+end
+
+def fun_l2_n636(x)
+ if (x < 1)
+ fun_l3_n504(x)
+ else
+ fun_l3_n557(x)
+ end
+end
+
+def fun_l2_n637(x)
+ if (x < 1)
+ fun_l3_n716(x)
+ else
+ fun_l3_n763(x)
+ end
+end
+
+def fun_l2_n638(x)
+ if (x < 1)
+ fun_l3_n972(x)
+ else
+ fun_l3_n391(x)
+ end
+end
+
+def fun_l2_n639(x)
+ if (x < 1)
+ fun_l3_n842(x)
+ else
+ fun_l3_n740(x)
+ end
+end
+
+def fun_l2_n640(x)
+ if (x < 1)
+ fun_l3_n854(x)
+ else
+ fun_l3_n52(x)
+ end
+end
+
+def fun_l2_n641(x)
+ if (x < 1)
+ fun_l3_n321(x)
+ else
+ fun_l3_n109(x)
+ end
+end
+
+def fun_l2_n642(x)
+ if (x < 1)
+ fun_l3_n868(x)
+ else
+ fun_l3_n926(x)
+ end
+end
+
+def fun_l2_n643(x)
+ if (x < 1)
+ fun_l3_n864(x)
+ else
+ fun_l3_n98(x)
+ end
+end
+
+def fun_l2_n644(x)
+ if (x < 1)
+ fun_l3_n373(x)
+ else
+ fun_l3_n105(x)
+ end
+end
+
+def fun_l2_n645(x)
+ if (x < 1)
+ fun_l3_n293(x)
+ else
+ fun_l3_n846(x)
+ end
+end
+
+def fun_l2_n646(x)
+ if (x < 1)
+ fun_l3_n24(x)
+ else
+ fun_l3_n83(x)
+ end
+end
+
+def fun_l2_n647(x)
+ if (x < 1)
+ fun_l3_n431(x)
+ else
+ fun_l3_n772(x)
+ end
+end
+
+def fun_l2_n648(x)
+ if (x < 1)
+ fun_l3_n667(x)
+ else
+ fun_l3_n108(x)
+ end
+end
+
+def fun_l2_n649(x)
+ if (x < 1)
+ fun_l3_n93(x)
+ else
+ fun_l3_n394(x)
+ end
+end
+
+def fun_l2_n650(x)
+ if (x < 1)
+ fun_l3_n176(x)
+ else
+ fun_l3_n315(x)
+ end
+end
+
+def fun_l2_n651(x)
+ if (x < 1)
+ fun_l3_n684(x)
+ else
+ fun_l3_n635(x)
+ end
+end
+
+def fun_l2_n652(x)
+ if (x < 1)
+ fun_l3_n554(x)
+ else
+ fun_l3_n88(x)
+ end
+end
+
+def fun_l2_n653(x)
+ if (x < 1)
+ fun_l3_n165(x)
+ else
+ fun_l3_n962(x)
+ end
+end
+
+def fun_l2_n654(x)
+ if (x < 1)
+ fun_l3_n631(x)
+ else
+ fun_l3_n80(x)
+ end
+end
+
+def fun_l2_n655(x)
+ if (x < 1)
+ fun_l3_n529(x)
+ else
+ fun_l3_n711(x)
+ end
+end
+
+def fun_l2_n656(x)
+ if (x < 1)
+ fun_l3_n809(x)
+ else
+ fun_l3_n44(x)
+ end
+end
+
+def fun_l2_n657(x)
+ if (x < 1)
+ fun_l3_n226(x)
+ else
+ fun_l3_n939(x)
+ end
+end
+
+def fun_l2_n658(x)
+ if (x < 1)
+ fun_l3_n355(x)
+ else
+ fun_l3_n750(x)
+ end
+end
+
+def fun_l2_n659(x)
+ if (x < 1)
+ fun_l3_n111(x)
+ else
+ fun_l3_n883(x)
+ end
+end
+
+def fun_l2_n660(x)
+ if (x < 1)
+ fun_l3_n22(x)
+ else
+ fun_l3_n544(x)
+ end
+end
+
+def fun_l2_n661(x)
+ if (x < 1)
+ fun_l3_n334(x)
+ else
+ fun_l3_n58(x)
+ end
+end
+
+def fun_l2_n662(x)
+ if (x < 1)
+ fun_l3_n690(x)
+ else
+ fun_l3_n647(x)
+ end
+end
+
+def fun_l2_n663(x)
+ if (x < 1)
+ fun_l3_n886(x)
+ else
+ fun_l3_n142(x)
+ end
+end
+
+def fun_l2_n664(x)
+ if (x < 1)
+ fun_l3_n713(x)
+ else
+ fun_l3_n710(x)
+ end
+end
+
+def fun_l2_n665(x)
+ if (x < 1)
+ fun_l3_n657(x)
+ else
+ fun_l3_n24(x)
+ end
+end
+
+def fun_l2_n666(x)
+ if (x < 1)
+ fun_l3_n993(x)
+ else
+ fun_l3_n741(x)
+ end
+end
+
+def fun_l2_n667(x)
+ if (x < 1)
+ fun_l3_n265(x)
+ else
+ fun_l3_n143(x)
+ end
+end
+
+def fun_l2_n668(x)
+ if (x < 1)
+ fun_l3_n725(x)
+ else
+ fun_l3_n272(x)
+ end
+end
+
+def fun_l2_n669(x)
+ if (x < 1)
+ fun_l3_n171(x)
+ else
+ fun_l3_n95(x)
+ end
+end
+
+def fun_l2_n670(x)
+ if (x < 1)
+ fun_l3_n409(x)
+ else
+ fun_l3_n774(x)
+ end
+end
+
+def fun_l2_n671(x)
+ if (x < 1)
+ fun_l3_n566(x)
+ else
+ fun_l3_n251(x)
+ end
+end
+
+def fun_l2_n672(x)
+ if (x < 1)
+ fun_l3_n914(x)
+ else
+ fun_l3_n716(x)
+ end
+end
+
+def fun_l2_n673(x)
+ if (x < 1)
+ fun_l3_n889(x)
+ else
+ fun_l3_n339(x)
+ end
+end
+
+def fun_l2_n674(x)
+ if (x < 1)
+ fun_l3_n808(x)
+ else
+ fun_l3_n858(x)
+ end
+end
+
+def fun_l2_n675(x)
+ if (x < 1)
+ fun_l3_n895(x)
+ else
+ fun_l3_n180(x)
+ end
+end
+
+def fun_l2_n676(x)
+ if (x < 1)
+ fun_l3_n674(x)
+ else
+ fun_l3_n183(x)
+ end
+end
+
+def fun_l2_n677(x)
+ if (x < 1)
+ fun_l3_n934(x)
+ else
+ fun_l3_n292(x)
+ end
+end
+
+def fun_l2_n678(x)
+ if (x < 1)
+ fun_l3_n302(x)
+ else
+ fun_l3_n511(x)
+ end
+end
+
+def fun_l2_n679(x)
+ if (x < 1)
+ fun_l3_n847(x)
+ else
+ fun_l3_n70(x)
+ end
+end
+
+def fun_l2_n680(x)
+ if (x < 1)
+ fun_l3_n200(x)
+ else
+ fun_l3_n436(x)
+ end
+end
+
+def fun_l2_n681(x)
+ if (x < 1)
+ fun_l3_n44(x)
+ else
+ fun_l3_n145(x)
+ end
+end
+
+def fun_l2_n682(x)
+ if (x < 1)
+ fun_l3_n929(x)
+ else
+ fun_l3_n605(x)
+ end
+end
+
+def fun_l2_n683(x)
+ if (x < 1)
+ fun_l3_n990(x)
+ else
+ fun_l3_n641(x)
+ end
+end
+
+def fun_l2_n684(x)
+ if (x < 1)
+ fun_l3_n679(x)
+ else
+ fun_l3_n976(x)
+ end
+end
+
+def fun_l2_n685(x)
+ if (x < 1)
+ fun_l3_n405(x)
+ else
+ fun_l3_n658(x)
+ end
+end
+
+def fun_l2_n686(x)
+ if (x < 1)
+ fun_l3_n685(x)
+ else
+ fun_l3_n379(x)
+ end
+end
+
+def fun_l2_n687(x)
+ if (x < 1)
+ fun_l3_n279(x)
+ else
+ fun_l3_n415(x)
+ end
+end
+
+def fun_l2_n688(x)
+ if (x < 1)
+ fun_l3_n234(x)
+ else
+ fun_l3_n634(x)
+ end
+end
+
+def fun_l2_n689(x)
+ if (x < 1)
+ fun_l3_n119(x)
+ else
+ fun_l3_n234(x)
+ end
+end
+
+def fun_l2_n690(x)
+ if (x < 1)
+ fun_l3_n213(x)
+ else
+ fun_l3_n102(x)
+ end
+end
+
+def fun_l2_n691(x)
+ if (x < 1)
+ fun_l3_n744(x)
+ else
+ fun_l3_n31(x)
+ end
+end
+
+def fun_l2_n692(x)
+ if (x < 1)
+ fun_l3_n29(x)
+ else
+ fun_l3_n238(x)
+ end
+end
+
+def fun_l2_n693(x)
+ if (x < 1)
+ fun_l3_n323(x)
+ else
+ fun_l3_n388(x)
+ end
+end
+
+def fun_l2_n694(x)
+ if (x < 1)
+ fun_l3_n70(x)
+ else
+ fun_l3_n356(x)
+ end
+end
+
+def fun_l2_n695(x)
+ if (x < 1)
+ fun_l3_n589(x)
+ else
+ fun_l3_n949(x)
+ end
+end
+
+def fun_l2_n696(x)
+ if (x < 1)
+ fun_l3_n328(x)
+ else
+ fun_l3_n472(x)
+ end
+end
+
+def fun_l2_n697(x)
+ if (x < 1)
+ fun_l3_n705(x)
+ else
+ fun_l3_n709(x)
+ end
+end
+
+def fun_l2_n698(x)
+ if (x < 1)
+ fun_l3_n817(x)
+ else
+ fun_l3_n927(x)
+ end
+end
+
+def fun_l2_n699(x)
+ if (x < 1)
+ fun_l3_n916(x)
+ else
+ fun_l3_n66(x)
+ end
+end
+
+def fun_l2_n700(x)
+ if (x < 1)
+ fun_l3_n461(x)
+ else
+ fun_l3_n648(x)
+ end
+end
+
+def fun_l2_n701(x)
+ if (x < 1)
+ fun_l3_n71(x)
+ else
+ fun_l3_n218(x)
+ end
+end
+
+def fun_l2_n702(x)
+ if (x < 1)
+ fun_l3_n490(x)
+ else
+ fun_l3_n706(x)
+ end
+end
+
+def fun_l2_n703(x)
+ if (x < 1)
+ fun_l3_n480(x)
+ else
+ fun_l3_n665(x)
+ end
+end
+
+def fun_l2_n704(x)
+ if (x < 1)
+ fun_l3_n965(x)
+ else
+ fun_l3_n394(x)
+ end
+end
+
+def fun_l2_n705(x)
+ if (x < 1)
+ fun_l3_n65(x)
+ else
+ fun_l3_n761(x)
+ end
+end
+
+def fun_l2_n706(x)
+ if (x < 1)
+ fun_l3_n690(x)
+ else
+ fun_l3_n50(x)
+ end
+end
+
+def fun_l2_n707(x)
+ if (x < 1)
+ fun_l3_n665(x)
+ else
+ fun_l3_n589(x)
+ end
+end
+
+def fun_l2_n708(x)
+ if (x < 1)
+ fun_l3_n331(x)
+ else
+ fun_l3_n299(x)
+ end
+end
+
+def fun_l2_n709(x)
+ if (x < 1)
+ fun_l3_n656(x)
+ else
+ fun_l3_n966(x)
+ end
+end
+
+def fun_l2_n710(x)
+ if (x < 1)
+ fun_l3_n610(x)
+ else
+ fun_l3_n767(x)
+ end
+end
+
+def fun_l2_n711(x)
+ if (x < 1)
+ fun_l3_n281(x)
+ else
+ fun_l3_n942(x)
+ end
+end
+
+def fun_l2_n712(x)
+ if (x < 1)
+ fun_l3_n474(x)
+ else
+ fun_l3_n373(x)
+ end
+end
+
+def fun_l2_n713(x)
+ if (x < 1)
+ fun_l3_n668(x)
+ else
+ fun_l3_n881(x)
+ end
+end
+
+def fun_l2_n714(x)
+ if (x < 1)
+ fun_l3_n112(x)
+ else
+ fun_l3_n914(x)
+ end
+end
+
+def fun_l2_n715(x)
+ if (x < 1)
+ fun_l3_n276(x)
+ else
+ fun_l3_n946(x)
+ end
+end
+
+def fun_l2_n716(x)
+ if (x < 1)
+ fun_l3_n340(x)
+ else
+ fun_l3_n474(x)
+ end
+end
+
+def fun_l2_n717(x)
+ if (x < 1)
+ fun_l3_n179(x)
+ else
+ fun_l3_n740(x)
+ end
+end
+
+def fun_l2_n718(x)
+ if (x < 1)
+ fun_l3_n830(x)
+ else
+ fun_l3_n139(x)
+ end
+end
+
+def fun_l2_n719(x)
+ if (x < 1)
+ fun_l3_n945(x)
+ else
+ fun_l3_n98(x)
+ end
+end
+
+def fun_l2_n720(x)
+ if (x < 1)
+ fun_l3_n912(x)
+ else
+ fun_l3_n457(x)
+ end
+end
+
+def fun_l2_n721(x)
+ if (x < 1)
+ fun_l3_n4(x)
+ else
+ fun_l3_n798(x)
+ end
+end
+
+def fun_l2_n722(x)
+ if (x < 1)
+ fun_l3_n438(x)
+ else
+ fun_l3_n677(x)
+ end
+end
+
+def fun_l2_n723(x)
+ if (x < 1)
+ fun_l3_n447(x)
+ else
+ fun_l3_n418(x)
+ end
+end
+
+def fun_l2_n724(x)
+ if (x < 1)
+ fun_l3_n936(x)
+ else
+ fun_l3_n572(x)
+ end
+end
+
+def fun_l2_n725(x)
+ if (x < 1)
+ fun_l3_n856(x)
+ else
+ fun_l3_n429(x)
+ end
+end
+
+def fun_l2_n726(x)
+ if (x < 1)
+ fun_l3_n747(x)
+ else
+ fun_l3_n823(x)
+ end
+end
+
+def fun_l2_n727(x)
+ if (x < 1)
+ fun_l3_n908(x)
+ else
+ fun_l3_n446(x)
+ end
+end
+
+def fun_l2_n728(x)
+ if (x < 1)
+ fun_l3_n170(x)
+ else
+ fun_l3_n546(x)
+ end
+end
+
+def fun_l2_n729(x)
+ if (x < 1)
+ fun_l3_n45(x)
+ else
+ fun_l3_n76(x)
+ end
+end
+
+def fun_l2_n730(x)
+ if (x < 1)
+ fun_l3_n688(x)
+ else
+ fun_l3_n826(x)
+ end
+end
+
+def fun_l2_n731(x)
+ if (x < 1)
+ fun_l3_n805(x)
+ else
+ fun_l3_n88(x)
+ end
+end
+
+def fun_l2_n732(x)
+ if (x < 1)
+ fun_l3_n191(x)
+ else
+ fun_l3_n395(x)
+ end
+end
+
+def fun_l2_n733(x)
+ if (x < 1)
+ fun_l3_n161(x)
+ else
+ fun_l3_n600(x)
+ end
+end
+
+def fun_l2_n734(x)
+ if (x < 1)
+ fun_l3_n586(x)
+ else
+ fun_l3_n770(x)
+ end
+end
+
+def fun_l2_n735(x)
+ if (x < 1)
+ fun_l3_n139(x)
+ else
+ fun_l3_n108(x)
+ end
+end
+
+def fun_l2_n736(x)
+ if (x < 1)
+ fun_l3_n719(x)
+ else
+ fun_l3_n210(x)
+ end
+end
+
+def fun_l2_n737(x)
+ if (x < 1)
+ fun_l3_n327(x)
+ else
+ fun_l3_n138(x)
+ end
+end
+
+def fun_l2_n738(x)
+ if (x < 1)
+ fun_l3_n755(x)
+ else
+ fun_l3_n785(x)
+ end
+end
+
+def fun_l2_n739(x)
+ if (x < 1)
+ fun_l3_n499(x)
+ else
+ fun_l3_n998(x)
+ end
+end
+
+def fun_l2_n740(x)
+ if (x < 1)
+ fun_l3_n303(x)
+ else
+ fun_l3_n646(x)
+ end
+end
+
+def fun_l2_n741(x)
+ if (x < 1)
+ fun_l3_n385(x)
+ else
+ fun_l3_n202(x)
+ end
+end
+
+def fun_l2_n742(x)
+ if (x < 1)
+ fun_l3_n290(x)
+ else
+ fun_l3_n617(x)
+ end
+end
+
+def fun_l2_n743(x)
+ if (x < 1)
+ fun_l3_n735(x)
+ else
+ fun_l3_n322(x)
+ end
+end
+
+def fun_l2_n744(x)
+ if (x < 1)
+ fun_l3_n541(x)
+ else
+ fun_l3_n841(x)
+ end
+end
+
+def fun_l2_n745(x)
+ if (x < 1)
+ fun_l3_n124(x)
+ else
+ fun_l3_n927(x)
+ end
+end
+
+def fun_l2_n746(x)
+ if (x < 1)
+ fun_l3_n909(x)
+ else
+ fun_l3_n981(x)
+ end
+end
+
+def fun_l2_n747(x)
+ if (x < 1)
+ fun_l3_n535(x)
+ else
+ fun_l3_n154(x)
+ end
+end
+
+def fun_l2_n748(x)
+ if (x < 1)
+ fun_l3_n804(x)
+ else
+ fun_l3_n837(x)
+ end
+end
+
+def fun_l2_n749(x)
+ if (x < 1)
+ fun_l3_n787(x)
+ else
+ fun_l3_n890(x)
+ end
+end
+
+def fun_l2_n750(x)
+ if (x < 1)
+ fun_l3_n273(x)
+ else
+ fun_l3_n539(x)
+ end
+end
+
+def fun_l2_n751(x)
+ if (x < 1)
+ fun_l3_n397(x)
+ else
+ fun_l3_n630(x)
+ end
+end
+
+def fun_l2_n752(x)
+ if (x < 1)
+ fun_l3_n757(x)
+ else
+ fun_l3_n294(x)
+ end
+end
+
+def fun_l2_n753(x)
+ if (x < 1)
+ fun_l3_n731(x)
+ else
+ fun_l3_n121(x)
+ end
+end
+
+def fun_l2_n754(x)
+ if (x < 1)
+ fun_l3_n391(x)
+ else
+ fun_l3_n640(x)
+ end
+end
+
+def fun_l2_n755(x)
+ if (x < 1)
+ fun_l3_n441(x)
+ else
+ fun_l3_n731(x)
+ end
+end
+
+def fun_l2_n756(x)
+ if (x < 1)
+ fun_l3_n947(x)
+ else
+ fun_l3_n175(x)
+ end
+end
+
+def fun_l2_n757(x)
+ if (x < 1)
+ fun_l3_n294(x)
+ else
+ fun_l3_n732(x)
+ end
+end
+
+def fun_l2_n758(x)
+ if (x < 1)
+ fun_l3_n862(x)
+ else
+ fun_l3_n490(x)
+ end
+end
+
+def fun_l2_n759(x)
+ if (x < 1)
+ fun_l3_n959(x)
+ else
+ fun_l3_n58(x)
+ end
+end
+
+def fun_l2_n760(x)
+ if (x < 1)
+ fun_l3_n245(x)
+ else
+ fun_l3_n201(x)
+ end
+end
+
+def fun_l2_n761(x)
+ if (x < 1)
+ fun_l3_n174(x)
+ else
+ fun_l3_n763(x)
+ end
+end
+
+def fun_l2_n762(x)
+ if (x < 1)
+ fun_l3_n276(x)
+ else
+ fun_l3_n624(x)
+ end
+end
+
+def fun_l2_n763(x)
+ if (x < 1)
+ fun_l3_n130(x)
+ else
+ fun_l3_n452(x)
+ end
+end
+
+def fun_l2_n764(x)
+ if (x < 1)
+ fun_l3_n476(x)
+ else
+ fun_l3_n519(x)
+ end
+end
+
+def fun_l2_n765(x)
+ if (x < 1)
+ fun_l3_n911(x)
+ else
+ fun_l3_n28(x)
+ end
+end
+
+def fun_l2_n766(x)
+ if (x < 1)
+ fun_l3_n290(x)
+ else
+ fun_l3_n528(x)
+ end
+end
+
+def fun_l2_n767(x)
+ if (x < 1)
+ fun_l3_n89(x)
+ else
+ fun_l3_n284(x)
+ end
+end
+
+def fun_l2_n768(x)
+ if (x < 1)
+ fun_l3_n307(x)
+ else
+ fun_l3_n832(x)
+ end
+end
+
+def fun_l2_n769(x)
+ if (x < 1)
+ fun_l3_n204(x)
+ else
+ fun_l3_n358(x)
+ end
+end
+
+def fun_l2_n770(x)
+ if (x < 1)
+ fun_l3_n380(x)
+ else
+ fun_l3_n938(x)
+ end
+end
+
+def fun_l2_n771(x)
+ if (x < 1)
+ fun_l3_n245(x)
+ else
+ fun_l3_n156(x)
+ end
+end
+
+def fun_l2_n772(x)
+ if (x < 1)
+ fun_l3_n77(x)
+ else
+ fun_l3_n842(x)
+ end
+end
+
+def fun_l2_n773(x)
+ if (x < 1)
+ fun_l3_n972(x)
+ else
+ fun_l3_n143(x)
+ end
+end
+
+def fun_l2_n774(x)
+ if (x < 1)
+ fun_l3_n308(x)
+ else
+ fun_l3_n713(x)
+ end
+end
+
+def fun_l2_n775(x)
+ if (x < 1)
+ fun_l3_n714(x)
+ else
+ fun_l3_n329(x)
+ end
+end
+
+def fun_l2_n776(x)
+ if (x < 1)
+ fun_l3_n805(x)
+ else
+ fun_l3_n141(x)
+ end
+end
+
+def fun_l2_n777(x)
+ if (x < 1)
+ fun_l3_n155(x)
+ else
+ fun_l3_n873(x)
+ end
+end
+
+def fun_l2_n778(x)
+ if (x < 1)
+ fun_l3_n551(x)
+ else
+ fun_l3_n293(x)
+ end
+end
+
+def fun_l2_n779(x)
+ if (x < 1)
+ fun_l3_n857(x)
+ else
+ fun_l3_n551(x)
+ end
+end
+
+def fun_l2_n780(x)
+ if (x < 1)
+ fun_l3_n30(x)
+ else
+ fun_l3_n230(x)
+ end
+end
+
+def fun_l2_n781(x)
+ if (x < 1)
+ fun_l3_n958(x)
+ else
+ fun_l3_n463(x)
+ end
+end
+
+def fun_l2_n782(x)
+ if (x < 1)
+ fun_l3_n89(x)
+ else
+ fun_l3_n514(x)
+ end
+end
+
+def fun_l2_n783(x)
+ if (x < 1)
+ fun_l3_n290(x)
+ else
+ fun_l3_n394(x)
+ end
+end
+
+def fun_l2_n784(x)
+ if (x < 1)
+ fun_l3_n582(x)
+ else
+ fun_l3_n927(x)
+ end
+end
+
+def fun_l2_n785(x)
+ if (x < 1)
+ fun_l3_n739(x)
+ else
+ fun_l3_n822(x)
+ end
+end
+
+def fun_l2_n786(x)
+ if (x < 1)
+ fun_l3_n1(x)
+ else
+ fun_l3_n162(x)
+ end
+end
+
+def fun_l2_n787(x)
+ if (x < 1)
+ fun_l3_n88(x)
+ else
+ fun_l3_n800(x)
+ end
+end
+
+def fun_l2_n788(x)
+ if (x < 1)
+ fun_l3_n592(x)
+ else
+ fun_l3_n235(x)
+ end
+end
+
+def fun_l2_n789(x)
+ if (x < 1)
+ fun_l3_n337(x)
+ else
+ fun_l3_n204(x)
+ end
+end
+
+def fun_l2_n790(x)
+ if (x < 1)
+ fun_l3_n481(x)
+ else
+ fun_l3_n361(x)
+ end
+end
+
+def fun_l2_n791(x)
+ if (x < 1)
+ fun_l3_n441(x)
+ else
+ fun_l3_n883(x)
+ end
+end
+
+def fun_l2_n792(x)
+ if (x < 1)
+ fun_l3_n424(x)
+ else
+ fun_l3_n387(x)
+ end
+end
+
+def fun_l2_n793(x)
+ if (x < 1)
+ fun_l3_n961(x)
+ else
+ fun_l3_n710(x)
+ end
+end
+
+def fun_l2_n794(x)
+ if (x < 1)
+ fun_l3_n653(x)
+ else
+ fun_l3_n869(x)
+ end
+end
+
+def fun_l2_n795(x)
+ if (x < 1)
+ fun_l3_n466(x)
+ else
+ fun_l3_n195(x)
+ end
+end
+
+def fun_l2_n796(x)
+ if (x < 1)
+ fun_l3_n30(x)
+ else
+ fun_l3_n137(x)
+ end
+end
+
+def fun_l2_n797(x)
+ if (x < 1)
+ fun_l3_n974(x)
+ else
+ fun_l3_n911(x)
+ end
+end
+
+def fun_l2_n798(x)
+ if (x < 1)
+ fun_l3_n500(x)
+ else
+ fun_l3_n607(x)
+ end
+end
+
+def fun_l2_n799(x)
+ if (x < 1)
+ fun_l3_n964(x)
+ else
+ fun_l3_n425(x)
+ end
+end
+
+def fun_l2_n800(x)
+ if (x < 1)
+ fun_l3_n381(x)
+ else
+ fun_l3_n618(x)
+ end
+end
+
+def fun_l2_n801(x)
+ if (x < 1)
+ fun_l3_n803(x)
+ else
+ fun_l3_n697(x)
+ end
+end
+
+def fun_l2_n802(x)
+ if (x < 1)
+ fun_l3_n372(x)
+ else
+ fun_l3_n331(x)
+ end
+end
+
+def fun_l2_n803(x)
+ if (x < 1)
+ fun_l3_n450(x)
+ else
+ fun_l3_n707(x)
+ end
+end
+
+def fun_l2_n804(x)
+ if (x < 1)
+ fun_l3_n760(x)
+ else
+ fun_l3_n800(x)
+ end
+end
+
+def fun_l2_n805(x)
+ if (x < 1)
+ fun_l3_n695(x)
+ else
+ fun_l3_n426(x)
+ end
+end
+
+def fun_l2_n806(x)
+ if (x < 1)
+ fun_l3_n312(x)
+ else
+ fun_l3_n517(x)
+ end
+end
+
+def fun_l2_n807(x)
+ if (x < 1)
+ fun_l3_n794(x)
+ else
+ fun_l3_n37(x)
+ end
+end
+
+def fun_l2_n808(x)
+ if (x < 1)
+ fun_l3_n591(x)
+ else
+ fun_l3_n933(x)
+ end
+end
+
+def fun_l2_n809(x)
+ if (x < 1)
+ fun_l3_n974(x)
+ else
+ fun_l3_n69(x)
+ end
+end
+
+def fun_l2_n810(x)
+ if (x < 1)
+ fun_l3_n263(x)
+ else
+ fun_l3_n628(x)
+ end
+end
+
+def fun_l2_n811(x)
+ if (x < 1)
+ fun_l3_n647(x)
+ else
+ fun_l3_n133(x)
+ end
+end
+
+def fun_l2_n812(x)
+ if (x < 1)
+ fun_l3_n396(x)
+ else
+ fun_l3_n872(x)
+ end
+end
+
+def fun_l2_n813(x)
+ if (x < 1)
+ fun_l3_n716(x)
+ else
+ fun_l3_n254(x)
+ end
+end
+
+def fun_l2_n814(x)
+ if (x < 1)
+ fun_l3_n181(x)
+ else
+ fun_l3_n786(x)
+ end
+end
+
+def fun_l2_n815(x)
+ if (x < 1)
+ fun_l3_n163(x)
+ else
+ fun_l3_n175(x)
+ end
+end
+
+def fun_l2_n816(x)
+ if (x < 1)
+ fun_l3_n835(x)
+ else
+ fun_l3_n713(x)
+ end
+end
+
+def fun_l2_n817(x)
+ if (x < 1)
+ fun_l3_n54(x)
+ else
+ fun_l3_n280(x)
+ end
+end
+
+def fun_l2_n818(x)
+ if (x < 1)
+ fun_l3_n261(x)
+ else
+ fun_l3_n958(x)
+ end
+end
+
+def fun_l2_n819(x)
+ if (x < 1)
+ fun_l3_n821(x)
+ else
+ fun_l3_n418(x)
+ end
+end
+
+def fun_l2_n820(x)
+ if (x < 1)
+ fun_l3_n541(x)
+ else
+ fun_l3_n547(x)
+ end
+end
+
+def fun_l2_n821(x)
+ if (x < 1)
+ fun_l3_n726(x)
+ else
+ fun_l3_n386(x)
+ end
+end
+
+def fun_l2_n822(x)
+ if (x < 1)
+ fun_l3_n369(x)
+ else
+ fun_l3_n781(x)
+ end
+end
+
+def fun_l2_n823(x)
+ if (x < 1)
+ fun_l3_n392(x)
+ else
+ fun_l3_n357(x)
+ end
+end
+
+def fun_l2_n824(x)
+ if (x < 1)
+ fun_l3_n167(x)
+ else
+ fun_l3_n764(x)
+ end
+end
+
+def fun_l2_n825(x)
+ if (x < 1)
+ fun_l3_n339(x)
+ else
+ fun_l3_n853(x)
+ end
+end
+
+def fun_l2_n826(x)
+ if (x < 1)
+ fun_l3_n613(x)
+ else
+ fun_l3_n767(x)
+ end
+end
+
+def fun_l2_n827(x)
+ if (x < 1)
+ fun_l3_n31(x)
+ else
+ fun_l3_n35(x)
+ end
+end
+
+def fun_l2_n828(x)
+ if (x < 1)
+ fun_l3_n838(x)
+ else
+ fun_l3_n939(x)
+ end
+end
+
+def fun_l2_n829(x)
+ if (x < 1)
+ fun_l3_n344(x)
+ else
+ fun_l3_n568(x)
+ end
+end
+
+def fun_l2_n830(x)
+ if (x < 1)
+ fun_l3_n892(x)
+ else
+ fun_l3_n564(x)
+ end
+end
+
+def fun_l2_n831(x)
+ if (x < 1)
+ fun_l3_n520(x)
+ else
+ fun_l3_n897(x)
+ end
+end
+
+def fun_l2_n832(x)
+ if (x < 1)
+ fun_l3_n986(x)
+ else
+ fun_l3_n523(x)
+ end
+end
+
+def fun_l2_n833(x)
+ if (x < 1)
+ fun_l3_n649(x)
+ else
+ fun_l3_n776(x)
+ end
+end
+
+def fun_l2_n834(x)
+ if (x < 1)
+ fun_l3_n613(x)
+ else
+ fun_l3_n170(x)
+ end
+end
+
+def fun_l2_n835(x)
+ if (x < 1)
+ fun_l3_n657(x)
+ else
+ fun_l3_n790(x)
+ end
+end
+
+def fun_l2_n836(x)
+ if (x < 1)
+ fun_l3_n976(x)
+ else
+ fun_l3_n785(x)
+ end
+end
+
+def fun_l2_n837(x)
+ if (x < 1)
+ fun_l3_n746(x)
+ else
+ fun_l3_n448(x)
+ end
+end
+
+def fun_l2_n838(x)
+ if (x < 1)
+ fun_l3_n911(x)
+ else
+ fun_l3_n978(x)
+ end
+end
+
+def fun_l2_n839(x)
+ if (x < 1)
+ fun_l3_n917(x)
+ else
+ fun_l3_n484(x)
+ end
+end
+
+def fun_l2_n840(x)
+ if (x < 1)
+ fun_l3_n930(x)
+ else
+ fun_l3_n617(x)
+ end
+end
+
+def fun_l2_n841(x)
+ if (x < 1)
+ fun_l3_n753(x)
+ else
+ fun_l3_n587(x)
+ end
+end
+
+def fun_l2_n842(x)
+ if (x < 1)
+ fun_l3_n21(x)
+ else
+ fun_l3_n256(x)
+ end
+end
+
+def fun_l2_n843(x)
+ if (x < 1)
+ fun_l3_n697(x)
+ else
+ fun_l3_n828(x)
+ end
+end
+
+def fun_l2_n844(x)
+ if (x < 1)
+ fun_l3_n59(x)
+ else
+ fun_l3_n855(x)
+ end
+end
+
+def fun_l2_n845(x)
+ if (x < 1)
+ fun_l3_n637(x)
+ else
+ fun_l3_n667(x)
+ end
+end
+
+def fun_l2_n846(x)
+ if (x < 1)
+ fun_l3_n229(x)
+ else
+ fun_l3_n379(x)
+ end
+end
+
+def fun_l2_n847(x)
+ if (x < 1)
+ fun_l3_n908(x)
+ else
+ fun_l3_n53(x)
+ end
+end
+
+def fun_l2_n848(x)
+ if (x < 1)
+ fun_l3_n72(x)
+ else
+ fun_l3_n882(x)
+ end
+end
+
+def fun_l2_n849(x)
+ if (x < 1)
+ fun_l3_n890(x)
+ else
+ fun_l3_n153(x)
+ end
+end
+
+def fun_l2_n850(x)
+ if (x < 1)
+ fun_l3_n301(x)
+ else
+ fun_l3_n404(x)
+ end
+end
+
+def fun_l2_n851(x)
+ if (x < 1)
+ fun_l3_n451(x)
+ else
+ fun_l3_n365(x)
+ end
+end
+
+def fun_l2_n852(x)
+ if (x < 1)
+ fun_l3_n591(x)
+ else
+ fun_l3_n974(x)
+ end
+end
+
+def fun_l2_n853(x)
+ if (x < 1)
+ fun_l3_n534(x)
+ else
+ fun_l3_n649(x)
+ end
+end
+
+def fun_l2_n854(x)
+ if (x < 1)
+ fun_l3_n438(x)
+ else
+ fun_l3_n853(x)
+ end
+end
+
+def fun_l2_n855(x)
+ if (x < 1)
+ fun_l3_n181(x)
+ else
+ fun_l3_n888(x)
+ end
+end
+
+def fun_l2_n856(x)
+ if (x < 1)
+ fun_l3_n856(x)
+ else
+ fun_l3_n139(x)
+ end
+end
+
+def fun_l2_n857(x)
+ if (x < 1)
+ fun_l3_n664(x)
+ else
+ fun_l3_n659(x)
+ end
+end
+
+def fun_l2_n858(x)
+ if (x < 1)
+ fun_l3_n838(x)
+ else
+ fun_l3_n157(x)
+ end
+end
+
+def fun_l2_n859(x)
+ if (x < 1)
+ fun_l3_n542(x)
+ else
+ fun_l3_n512(x)
+ end
+end
+
+def fun_l2_n860(x)
+ if (x < 1)
+ fun_l3_n213(x)
+ else
+ fun_l3_n954(x)
+ end
+end
+
+def fun_l2_n861(x)
+ if (x < 1)
+ fun_l3_n580(x)
+ else
+ fun_l3_n525(x)
+ end
+end
+
+def fun_l2_n862(x)
+ if (x < 1)
+ fun_l3_n649(x)
+ else
+ fun_l3_n376(x)
+ end
+end
+
+def fun_l2_n863(x)
+ if (x < 1)
+ fun_l3_n846(x)
+ else
+ fun_l3_n643(x)
+ end
+end
+
+def fun_l2_n864(x)
+ if (x < 1)
+ fun_l3_n899(x)
+ else
+ fun_l3_n667(x)
+ end
+end
+
+def fun_l2_n865(x)
+ if (x < 1)
+ fun_l3_n326(x)
+ else
+ fun_l3_n664(x)
+ end
+end
+
+def fun_l2_n866(x)
+ if (x < 1)
+ fun_l3_n155(x)
+ else
+ fun_l3_n369(x)
+ end
+end
+
+def fun_l2_n867(x)
+ if (x < 1)
+ fun_l3_n583(x)
+ else
+ fun_l3_n12(x)
+ end
+end
+
+def fun_l2_n868(x)
+ if (x < 1)
+ fun_l3_n637(x)
+ else
+ fun_l3_n711(x)
+ end
+end
+
+def fun_l2_n869(x)
+ if (x < 1)
+ fun_l3_n120(x)
+ else
+ fun_l3_n449(x)
+ end
+end
+
+def fun_l2_n870(x)
+ if (x < 1)
+ fun_l3_n459(x)
+ else
+ fun_l3_n83(x)
+ end
+end
+
+def fun_l2_n871(x)
+ if (x < 1)
+ fun_l3_n298(x)
+ else
+ fun_l3_n6(x)
+ end
+end
+
+def fun_l2_n872(x)
+ if (x < 1)
+ fun_l3_n640(x)
+ else
+ fun_l3_n319(x)
+ end
+end
+
+def fun_l2_n873(x)
+ if (x < 1)
+ fun_l3_n777(x)
+ else
+ fun_l3_n903(x)
+ end
+end
+
+def fun_l2_n874(x)
+ if (x < 1)
+ fun_l3_n16(x)
+ else
+ fun_l3_n50(x)
+ end
+end
+
+def fun_l2_n875(x)
+ if (x < 1)
+ fun_l3_n732(x)
+ else
+ fun_l3_n711(x)
+ end
+end
+
+def fun_l2_n876(x)
+ if (x < 1)
+ fun_l3_n958(x)
+ else
+ fun_l3_n949(x)
+ end
+end
+
+def fun_l2_n877(x)
+ if (x < 1)
+ fun_l3_n992(x)
+ else
+ fun_l3_n592(x)
+ end
+end
+
+def fun_l2_n878(x)
+ if (x < 1)
+ fun_l3_n283(x)
+ else
+ fun_l3_n178(x)
+ end
+end
+
+def fun_l2_n879(x)
+ if (x < 1)
+ fun_l3_n179(x)
+ else
+ fun_l3_n963(x)
+ end
+end
+
+def fun_l2_n880(x)
+ if (x < 1)
+ fun_l3_n345(x)
+ else
+ fun_l3_n585(x)
+ end
+end
+
+def fun_l2_n881(x)
+ if (x < 1)
+ fun_l3_n859(x)
+ else
+ fun_l3_n204(x)
+ end
+end
+
+def fun_l2_n882(x)
+ if (x < 1)
+ fun_l3_n718(x)
+ else
+ fun_l3_n354(x)
+ end
+end
+
+def fun_l2_n883(x)
+ if (x < 1)
+ fun_l3_n58(x)
+ else
+ fun_l3_n34(x)
+ end
+end
+
+def fun_l2_n884(x)
+ if (x < 1)
+ fun_l3_n442(x)
+ else
+ fun_l3_n853(x)
+ end
+end
+
+def fun_l2_n885(x)
+ if (x < 1)
+ fun_l3_n37(x)
+ else
+ fun_l3_n665(x)
+ end
+end
+
+def fun_l2_n886(x)
+ if (x < 1)
+ fun_l3_n468(x)
+ else
+ fun_l3_n283(x)
+ end
+end
+
+def fun_l2_n887(x)
+ if (x < 1)
+ fun_l3_n674(x)
+ else
+ fun_l3_n940(x)
+ end
+end
+
+def fun_l2_n888(x)
+ if (x < 1)
+ fun_l3_n556(x)
+ else
+ fun_l3_n191(x)
+ end
+end
+
+def fun_l2_n889(x)
+ if (x < 1)
+ fun_l3_n451(x)
+ else
+ fun_l3_n33(x)
+ end
+end
+
+def fun_l2_n890(x)
+ if (x < 1)
+ fun_l3_n724(x)
+ else
+ fun_l3_n355(x)
+ end
+end
+
+def fun_l2_n891(x)
+ if (x < 1)
+ fun_l3_n811(x)
+ else
+ fun_l3_n969(x)
+ end
+end
+
+def fun_l2_n892(x)
+ if (x < 1)
+ fun_l3_n122(x)
+ else
+ fun_l3_n956(x)
+ end
+end
+
+def fun_l2_n893(x)
+ if (x < 1)
+ fun_l3_n416(x)
+ else
+ fun_l3_n373(x)
+ end
+end
+
+def fun_l2_n894(x)
+ if (x < 1)
+ fun_l3_n990(x)
+ else
+ fun_l3_n148(x)
+ end
+end
+
+def fun_l2_n895(x)
+ if (x < 1)
+ fun_l3_n52(x)
+ else
+ fun_l3_n605(x)
+ end
+end
+
+def fun_l2_n896(x)
+ if (x < 1)
+ fun_l3_n523(x)
+ else
+ fun_l3_n929(x)
+ end
+end
+
+def fun_l2_n897(x)
+ if (x < 1)
+ fun_l3_n631(x)
+ else
+ fun_l3_n167(x)
+ end
+end
+
+def fun_l2_n898(x)
+ if (x < 1)
+ fun_l3_n366(x)
+ else
+ fun_l3_n666(x)
+ end
+end
+
+def fun_l2_n899(x)
+ if (x < 1)
+ fun_l3_n758(x)
+ else
+ fun_l3_n441(x)
+ end
+end
+
+def fun_l2_n900(x)
+ if (x < 1)
+ fun_l3_n561(x)
+ else
+ fun_l3_n766(x)
+ end
+end
+
+def fun_l2_n901(x)
+ if (x < 1)
+ fun_l3_n323(x)
+ else
+ fun_l3_n348(x)
+ end
+end
+
+def fun_l2_n902(x)
+ if (x < 1)
+ fun_l3_n455(x)
+ else
+ fun_l3_n78(x)
+ end
+end
+
+def fun_l2_n903(x)
+ if (x < 1)
+ fun_l3_n373(x)
+ else
+ fun_l3_n646(x)
+ end
+end
+
+def fun_l2_n904(x)
+ if (x < 1)
+ fun_l3_n595(x)
+ else
+ fun_l3_n742(x)
+ end
+end
+
+def fun_l2_n905(x)
+ if (x < 1)
+ fun_l3_n102(x)
+ else
+ fun_l3_n140(x)
+ end
+end
+
+def fun_l2_n906(x)
+ if (x < 1)
+ fun_l3_n458(x)
+ else
+ fun_l3_n528(x)
+ end
+end
+
+def fun_l2_n907(x)
+ if (x < 1)
+ fun_l3_n970(x)
+ else
+ fun_l3_n841(x)
+ end
+end
+
+def fun_l2_n908(x)
+ if (x < 1)
+ fun_l3_n97(x)
+ else
+ fun_l3_n250(x)
+ end
+end
+
+def fun_l2_n909(x)
+ if (x < 1)
+ fun_l3_n381(x)
+ else
+ fun_l3_n44(x)
+ end
+end
+
+def fun_l2_n910(x)
+ if (x < 1)
+ fun_l3_n812(x)
+ else
+ fun_l3_n225(x)
+ end
+end
+
+def fun_l2_n911(x)
+ if (x < 1)
+ fun_l3_n849(x)
+ else
+ fun_l3_n683(x)
+ end
+end
+
+def fun_l2_n912(x)
+ if (x < 1)
+ fun_l3_n958(x)
+ else
+ fun_l3_n590(x)
+ end
+end
+
+def fun_l2_n913(x)
+ if (x < 1)
+ fun_l3_n858(x)
+ else
+ fun_l3_n140(x)
+ end
+end
+
+def fun_l2_n914(x)
+ if (x < 1)
+ fun_l3_n997(x)
+ else
+ fun_l3_n959(x)
+ end
+end
+
+def fun_l2_n915(x)
+ if (x < 1)
+ fun_l3_n184(x)
+ else
+ fun_l3_n910(x)
+ end
+end
+
+def fun_l2_n916(x)
+ if (x < 1)
+ fun_l3_n369(x)
+ else
+ fun_l3_n180(x)
+ end
+end
+
+def fun_l2_n917(x)
+ if (x < 1)
+ fun_l3_n537(x)
+ else
+ fun_l3_n927(x)
+ end
+end
+
+def fun_l2_n918(x)
+ if (x < 1)
+ fun_l3_n739(x)
+ else
+ fun_l3_n222(x)
+ end
+end
+
+def fun_l2_n919(x)
+ if (x < 1)
+ fun_l3_n640(x)
+ else
+ fun_l3_n406(x)
+ end
+end
+
+def fun_l2_n920(x)
+ if (x < 1)
+ fun_l3_n371(x)
+ else
+ fun_l3_n976(x)
+ end
+end
+
+def fun_l2_n921(x)
+ if (x < 1)
+ fun_l3_n262(x)
+ else
+ fun_l3_n532(x)
+ end
+end
+
+def fun_l2_n922(x)
+ if (x < 1)
+ fun_l3_n772(x)
+ else
+ fun_l3_n368(x)
+ end
+end
+
+def fun_l2_n923(x)
+ if (x < 1)
+ fun_l3_n935(x)
+ else
+ fun_l3_n109(x)
+ end
+end
+
+def fun_l2_n924(x)
+ if (x < 1)
+ fun_l3_n982(x)
+ else
+ fun_l3_n643(x)
+ end
+end
+
+def fun_l2_n925(x)
+ if (x < 1)
+ fun_l3_n849(x)
+ else
+ fun_l3_n266(x)
+ end
+end
+
+def fun_l2_n926(x)
+ if (x < 1)
+ fun_l3_n977(x)
+ else
+ fun_l3_n803(x)
+ end
+end
+
+def fun_l2_n927(x)
+ if (x < 1)
+ fun_l3_n706(x)
+ else
+ fun_l3_n441(x)
+ end
+end
+
+def fun_l2_n928(x)
+ if (x < 1)
+ fun_l3_n781(x)
+ else
+ fun_l3_n374(x)
+ end
+end
+
+def fun_l2_n929(x)
+ if (x < 1)
+ fun_l3_n122(x)
+ else
+ fun_l3_n851(x)
+ end
+end
+
+def fun_l2_n930(x)
+ if (x < 1)
+ fun_l3_n821(x)
+ else
+ fun_l3_n904(x)
+ end
+end
+
+def fun_l2_n931(x)
+ if (x < 1)
+ fun_l3_n379(x)
+ else
+ fun_l3_n211(x)
+ end
+end
+
+def fun_l2_n932(x)
+ if (x < 1)
+ fun_l3_n572(x)
+ else
+ fun_l3_n169(x)
+ end
+end
+
+def fun_l2_n933(x)
+ if (x < 1)
+ fun_l3_n620(x)
+ else
+ fun_l3_n703(x)
+ end
+end
+
+def fun_l2_n934(x)
+ if (x < 1)
+ fun_l3_n238(x)
+ else
+ fun_l3_n220(x)
+ end
+end
+
+def fun_l2_n935(x)
+ if (x < 1)
+ fun_l3_n652(x)
+ else
+ fun_l3_n822(x)
+ end
+end
+
+def fun_l2_n936(x)
+ if (x < 1)
+ fun_l3_n818(x)
+ else
+ fun_l3_n384(x)
+ end
+end
+
+def fun_l2_n937(x)
+ if (x < 1)
+ fun_l3_n560(x)
+ else
+ fun_l3_n875(x)
+ end
+end
+
+def fun_l2_n938(x)
+ if (x < 1)
+ fun_l3_n417(x)
+ else
+ fun_l3_n900(x)
+ end
+end
+
+def fun_l2_n939(x)
+ if (x < 1)
+ fun_l3_n740(x)
+ else
+ fun_l3_n431(x)
+ end
+end
+
+def fun_l2_n940(x)
+ if (x < 1)
+ fun_l3_n1(x)
+ else
+ fun_l3_n147(x)
+ end
+end
+
+def fun_l2_n941(x)
+ if (x < 1)
+ fun_l3_n64(x)
+ else
+ fun_l3_n317(x)
+ end
+end
+
+def fun_l2_n942(x)
+ if (x < 1)
+ fun_l3_n347(x)
+ else
+ fun_l3_n778(x)
+ end
+end
+
+def fun_l2_n943(x)
+ if (x < 1)
+ fun_l3_n819(x)
+ else
+ fun_l3_n446(x)
+ end
+end
+
+def fun_l2_n944(x)
+ if (x < 1)
+ fun_l3_n492(x)
+ else
+ fun_l3_n381(x)
+ end
+end
+
+def fun_l2_n945(x)
+ if (x < 1)
+ fun_l3_n594(x)
+ else
+ fun_l3_n413(x)
+ end
+end
+
+def fun_l2_n946(x)
+ if (x < 1)
+ fun_l3_n580(x)
+ else
+ fun_l3_n838(x)
+ end
+end
+
+def fun_l2_n947(x)
+ if (x < 1)
+ fun_l3_n880(x)
+ else
+ fun_l3_n768(x)
+ end
+end
+
+def fun_l2_n948(x)
+ if (x < 1)
+ fun_l3_n9(x)
+ else
+ fun_l3_n888(x)
+ end
+end
+
+def fun_l2_n949(x)
+ if (x < 1)
+ fun_l3_n365(x)
+ else
+ fun_l3_n322(x)
+ end
+end
+
+def fun_l2_n950(x)
+ if (x < 1)
+ fun_l3_n283(x)
+ else
+ fun_l3_n921(x)
+ end
+end
+
+def fun_l2_n951(x)
+ if (x < 1)
+ fun_l3_n740(x)
+ else
+ fun_l3_n336(x)
+ end
+end
+
+def fun_l2_n952(x)
+ if (x < 1)
+ fun_l3_n827(x)
+ else
+ fun_l3_n683(x)
+ end
+end
+
+def fun_l2_n953(x)
+ if (x < 1)
+ fun_l3_n289(x)
+ else
+ fun_l3_n915(x)
+ end
+end
+
+def fun_l2_n954(x)
+ if (x < 1)
+ fun_l3_n234(x)
+ else
+ fun_l3_n198(x)
+ end
+end
+
+def fun_l2_n955(x)
+ if (x < 1)
+ fun_l3_n364(x)
+ else
+ fun_l3_n152(x)
+ end
+end
+
+def fun_l2_n956(x)
+ if (x < 1)
+ fun_l3_n370(x)
+ else
+ fun_l3_n786(x)
+ end
+end
+
+def fun_l2_n957(x)
+ if (x < 1)
+ fun_l3_n672(x)
+ else
+ fun_l3_n375(x)
+ end
+end
+
+def fun_l2_n958(x)
+ if (x < 1)
+ fun_l3_n423(x)
+ else
+ fun_l3_n448(x)
+ end
+end
+
+def fun_l2_n959(x)
+ if (x < 1)
+ fun_l3_n887(x)
+ else
+ fun_l3_n33(x)
+ end
+end
+
+def fun_l2_n960(x)
+ if (x < 1)
+ fun_l3_n280(x)
+ else
+ fun_l3_n334(x)
+ end
+end
+
+def fun_l2_n961(x)
+ if (x < 1)
+ fun_l3_n535(x)
+ else
+ fun_l3_n524(x)
+ end
+end
+
+def fun_l2_n962(x)
+ if (x < 1)
+ fun_l3_n689(x)
+ else
+ fun_l3_n426(x)
+ end
+end
+
+def fun_l2_n963(x)
+ if (x < 1)
+ fun_l3_n235(x)
+ else
+ fun_l3_n286(x)
+ end
+end
+
+def fun_l2_n964(x)
+ if (x < 1)
+ fun_l3_n116(x)
+ else
+ fun_l3_n308(x)
+ end
+end
+
+def fun_l2_n965(x)
+ if (x < 1)
+ fun_l3_n174(x)
+ else
+ fun_l3_n823(x)
+ end
+end
+
+def fun_l2_n966(x)
+ if (x < 1)
+ fun_l3_n792(x)
+ else
+ fun_l3_n29(x)
+ end
+end
+
+def fun_l2_n967(x)
+ if (x < 1)
+ fun_l3_n755(x)
+ else
+ fun_l3_n689(x)
+ end
+end
+
+def fun_l2_n968(x)
+ if (x < 1)
+ fun_l3_n167(x)
+ else
+ fun_l3_n287(x)
+ end
+end
+
+def fun_l2_n969(x)
+ if (x < 1)
+ fun_l3_n836(x)
+ else
+ fun_l3_n892(x)
+ end
+end
+
+def fun_l2_n970(x)
+ if (x < 1)
+ fun_l3_n201(x)
+ else
+ fun_l3_n677(x)
+ end
+end
+
+def fun_l2_n971(x)
+ if (x < 1)
+ fun_l3_n990(x)
+ else
+ fun_l3_n251(x)
+ end
+end
+
+def fun_l2_n972(x)
+ if (x < 1)
+ fun_l3_n500(x)
+ else
+ fun_l3_n686(x)
+ end
+end
+
+def fun_l2_n973(x)
+ if (x < 1)
+ fun_l3_n969(x)
+ else
+ fun_l3_n819(x)
+ end
+end
+
+def fun_l2_n974(x)
+ if (x < 1)
+ fun_l3_n87(x)
+ else
+ fun_l3_n120(x)
+ end
+end
+
+def fun_l2_n975(x)
+ if (x < 1)
+ fun_l3_n701(x)
+ else
+ fun_l3_n569(x)
+ end
+end
+
+def fun_l2_n976(x)
+ if (x < 1)
+ fun_l3_n128(x)
+ else
+ fun_l3_n310(x)
+ end
+end
+
+def fun_l2_n977(x)
+ if (x < 1)
+ fun_l3_n690(x)
+ else
+ fun_l3_n5(x)
+ end
+end
+
+def fun_l2_n978(x)
+ if (x < 1)
+ fun_l3_n432(x)
+ else
+ fun_l3_n964(x)
+ end
+end
+
+def fun_l2_n979(x)
+ if (x < 1)
+ fun_l3_n198(x)
+ else
+ fun_l3_n161(x)
+ end
+end
+
+def fun_l2_n980(x)
+ if (x < 1)
+ fun_l3_n415(x)
+ else
+ fun_l3_n768(x)
+ end
+end
+
+def fun_l2_n981(x)
+ if (x < 1)
+ fun_l3_n635(x)
+ else
+ fun_l3_n569(x)
+ end
+end
+
+def fun_l2_n982(x)
+ if (x < 1)
+ fun_l3_n326(x)
+ else
+ fun_l3_n338(x)
+ end
+end
+
+def fun_l2_n983(x)
+ if (x < 1)
+ fun_l3_n243(x)
+ else
+ fun_l3_n48(x)
+ end
+end
+
+def fun_l2_n984(x)
+ if (x < 1)
+ fun_l3_n204(x)
+ else
+ fun_l3_n141(x)
+ end
+end
+
+def fun_l2_n985(x)
+ if (x < 1)
+ fun_l3_n805(x)
+ else
+ fun_l3_n577(x)
+ end
+end
+
+def fun_l2_n986(x)
+ if (x < 1)
+ fun_l3_n237(x)
+ else
+ fun_l3_n833(x)
+ end
+end
+
+def fun_l2_n987(x)
+ if (x < 1)
+ fun_l3_n643(x)
+ else
+ fun_l3_n629(x)
+ end
+end
+
+def fun_l2_n988(x)
+ if (x < 1)
+ fun_l3_n911(x)
+ else
+ fun_l3_n712(x)
+ end
+end
+
+def fun_l2_n989(x)
+ if (x < 1)
+ fun_l3_n936(x)
+ else
+ fun_l3_n300(x)
+ end
+end
+
+def fun_l2_n990(x)
+ if (x < 1)
+ fun_l3_n561(x)
+ else
+ fun_l3_n281(x)
+ end
+end
+
+def fun_l2_n991(x)
+ if (x < 1)
+ fun_l3_n226(x)
+ else
+ fun_l3_n203(x)
+ end
+end
+
+def fun_l2_n992(x)
+ if (x < 1)
+ fun_l3_n727(x)
+ else
+ fun_l3_n437(x)
+ end
+end
+
+def fun_l2_n993(x)
+ if (x < 1)
+ fun_l3_n608(x)
+ else
+ fun_l3_n169(x)
+ end
+end
+
+def fun_l2_n994(x)
+ if (x < 1)
+ fun_l3_n30(x)
+ else
+ fun_l3_n980(x)
+ end
+end
+
+def fun_l2_n995(x)
+ if (x < 1)
+ fun_l3_n652(x)
+ else
+ fun_l3_n122(x)
+ end
+end
+
+def fun_l2_n996(x)
+ if (x < 1)
+ fun_l3_n334(x)
+ else
+ fun_l3_n668(x)
+ end
+end
+
+def fun_l2_n997(x)
+ if (x < 1)
+ fun_l3_n241(x)
+ else
+ fun_l3_n515(x)
+ end
+end
+
+def fun_l2_n998(x)
+ if (x < 1)
+ fun_l3_n665(x)
+ else
+ fun_l3_n54(x)
+ end
+end
+
+def fun_l2_n999(x)
+ if (x < 1)
+ fun_l3_n568(x)
+ else
+ fun_l3_n652(x)
+ end
+end
+
+def fun_l3_n0(x)
+ if (x < 1)
+ fun_l4_n169(x)
+ else
+ fun_l4_n923(x)
+ end
+end
+
+def fun_l3_n1(x)
+ if (x < 1)
+ fun_l4_n214(x)
+ else
+ fun_l4_n506(x)
+ end
+end
+
+def fun_l3_n2(x)
+ if (x < 1)
+ fun_l4_n513(x)
+ else
+ fun_l4_n409(x)
+ end
+end
+
+def fun_l3_n3(x)
+ if (x < 1)
+ fun_l4_n133(x)
+ else
+ fun_l4_n293(x)
+ end
+end
+
+def fun_l3_n4(x)
+ if (x < 1)
+ fun_l4_n550(x)
+ else
+ fun_l4_n95(x)
+ end
+end
+
+def fun_l3_n5(x)
+ if (x < 1)
+ fun_l4_n13(x)
+ else
+ fun_l4_n508(x)
+ end
+end
+
+def fun_l3_n6(x)
+ if (x < 1)
+ fun_l4_n607(x)
+ else
+ fun_l4_n140(x)
+ end
+end
+
+def fun_l3_n7(x)
+ if (x < 1)
+ fun_l4_n65(x)
+ else
+ fun_l4_n90(x)
+ end
+end
+
+def fun_l3_n8(x)
+ if (x < 1)
+ fun_l4_n516(x)
+ else
+ fun_l4_n445(x)
+ end
+end
+
+def fun_l3_n9(x)
+ if (x < 1)
+ fun_l4_n713(x)
+ else
+ fun_l4_n487(x)
+ end
+end
+
+def fun_l3_n10(x)
+ if (x < 1)
+ fun_l4_n16(x)
+ else
+ fun_l4_n547(x)
+ end
+end
+
+def fun_l3_n11(x)
+ if (x < 1)
+ fun_l4_n561(x)
+ else
+ fun_l4_n530(x)
+ end
+end
+
+def fun_l3_n12(x)
+ if (x < 1)
+ fun_l4_n866(x)
+ else
+ fun_l4_n187(x)
+ end
+end
+
+def fun_l3_n13(x)
+ if (x < 1)
+ fun_l4_n8(x)
+ else
+ fun_l4_n458(x)
+ end
+end
+
+def fun_l3_n14(x)
+ if (x < 1)
+ fun_l4_n627(x)
+ else
+ fun_l4_n122(x)
+ end
+end
+
+def fun_l3_n15(x)
+ if (x < 1)
+ fun_l4_n988(x)
+ else
+ fun_l4_n4(x)
+ end
+end
+
+def fun_l3_n16(x)
+ if (x < 1)
+ fun_l4_n654(x)
+ else
+ fun_l4_n863(x)
+ end
+end
+
+def fun_l3_n17(x)
+ if (x < 1)
+ fun_l4_n112(x)
+ else
+ fun_l4_n223(x)
+ end
+end
+
+def fun_l3_n18(x)
+ if (x < 1)
+ fun_l4_n808(x)
+ else
+ fun_l4_n161(x)
+ end
+end
+
+def fun_l3_n19(x)
+ if (x < 1)
+ fun_l4_n265(x)
+ else
+ fun_l4_n860(x)
+ end
+end
+
+def fun_l3_n20(x)
+ if (x < 1)
+ fun_l4_n60(x)
+ else
+ fun_l4_n943(x)
+ end
+end
+
+def fun_l3_n21(x)
+ if (x < 1)
+ fun_l4_n707(x)
+ else
+ fun_l4_n990(x)
+ end
+end
+
+def fun_l3_n22(x)
+ if (x < 1)
+ fun_l4_n774(x)
+ else
+ fun_l4_n686(x)
+ end
+end
+
+def fun_l3_n23(x)
+ if (x < 1)
+ fun_l4_n552(x)
+ else
+ fun_l4_n935(x)
+ end
+end
+
+def fun_l3_n24(x)
+ if (x < 1)
+ fun_l4_n752(x)
+ else
+ fun_l4_n279(x)
+ end
+end
+
+def fun_l3_n25(x)
+ if (x < 1)
+ fun_l4_n325(x)
+ else
+ fun_l4_n440(x)
+ end
+end
+
+def fun_l3_n26(x)
+ if (x < 1)
+ fun_l4_n330(x)
+ else
+ fun_l4_n233(x)
+ end
+end
+
+def fun_l3_n27(x)
+ if (x < 1)
+ fun_l4_n112(x)
+ else
+ fun_l4_n399(x)
+ end
+end
+
+def fun_l3_n28(x)
+ if (x < 1)
+ fun_l4_n420(x)
+ else
+ fun_l4_n570(x)
+ end
+end
+
+def fun_l3_n29(x)
+ if (x < 1)
+ fun_l4_n515(x)
+ else
+ fun_l4_n485(x)
+ end
+end
+
+def fun_l3_n30(x)
+ if (x < 1)
+ fun_l4_n694(x)
+ else
+ fun_l4_n279(x)
+ end
+end
+
+def fun_l3_n31(x)
+ if (x < 1)
+ fun_l4_n683(x)
+ else
+ fun_l4_n396(x)
+ end
+end
+
+def fun_l3_n32(x)
+ if (x < 1)
+ fun_l4_n997(x)
+ else
+ fun_l4_n978(x)
+ end
+end
+
+def fun_l3_n33(x)
+ if (x < 1)
+ fun_l4_n260(x)
+ else
+ fun_l4_n895(x)
+ end
+end
+
+def fun_l3_n34(x)
+ if (x < 1)
+ fun_l4_n510(x)
+ else
+ fun_l4_n147(x)
+ end
+end
+
+def fun_l3_n35(x)
+ if (x < 1)
+ fun_l4_n415(x)
+ else
+ fun_l4_n590(x)
+ end
+end
+
+def fun_l3_n36(x)
+ if (x < 1)
+ fun_l4_n164(x)
+ else
+ fun_l4_n990(x)
+ end
+end
+
+def fun_l3_n37(x)
+ if (x < 1)
+ fun_l4_n744(x)
+ else
+ fun_l4_n981(x)
+ end
+end
+
+def fun_l3_n38(x)
+ if (x < 1)
+ fun_l4_n243(x)
+ else
+ fun_l4_n385(x)
+ end
+end
+
+def fun_l3_n39(x)
+ if (x < 1)
+ fun_l4_n874(x)
+ else
+ fun_l4_n941(x)
+ end
+end
+
+def fun_l3_n40(x)
+ if (x < 1)
+ fun_l4_n39(x)
+ else
+ fun_l4_n115(x)
+ end
+end
+
+def fun_l3_n41(x)
+ if (x < 1)
+ fun_l4_n829(x)
+ else
+ fun_l4_n991(x)
+ end
+end
+
+def fun_l3_n42(x)
+ if (x < 1)
+ fun_l4_n234(x)
+ else
+ fun_l4_n359(x)
+ end
+end
+
+def fun_l3_n43(x)
+ if (x < 1)
+ fun_l4_n481(x)
+ else
+ fun_l4_n368(x)
+ end
+end
+
+def fun_l3_n44(x)
+ if (x < 1)
+ fun_l4_n969(x)
+ else
+ fun_l4_n716(x)
+ end
+end
+
+def fun_l3_n45(x)
+ if (x < 1)
+ fun_l4_n858(x)
+ else
+ fun_l4_n446(x)
+ end
+end
+
+def fun_l3_n46(x)
+ if (x < 1)
+ fun_l4_n786(x)
+ else
+ fun_l4_n418(x)
+ end
+end
+
+def fun_l3_n47(x)
+ if (x < 1)
+ fun_l4_n261(x)
+ else
+ fun_l4_n346(x)
+ end
+end
+
+def fun_l3_n48(x)
+ if (x < 1)
+ fun_l4_n422(x)
+ else
+ fun_l4_n710(x)
+ end
+end
+
+def fun_l3_n49(x)
+ if (x < 1)
+ fun_l4_n533(x)
+ else
+ fun_l4_n599(x)
+ end
+end
+
+def fun_l3_n50(x)
+ if (x < 1)
+ fun_l4_n922(x)
+ else
+ fun_l4_n90(x)
+ end
+end
+
+def fun_l3_n51(x)
+ if (x < 1)
+ fun_l4_n718(x)
+ else
+ fun_l4_n854(x)
+ end
+end
+
+def fun_l3_n52(x)
+ if (x < 1)
+ fun_l4_n993(x)
+ else
+ fun_l4_n996(x)
+ end
+end
+
+def fun_l3_n53(x)
+ if (x < 1)
+ fun_l4_n628(x)
+ else
+ fun_l4_n589(x)
+ end
+end
+
+def fun_l3_n54(x)
+ if (x < 1)
+ fun_l4_n735(x)
+ else
+ fun_l4_n258(x)
+ end
+end
+
+def fun_l3_n55(x)
+ if (x < 1)
+ fun_l4_n794(x)
+ else
+ fun_l4_n559(x)
+ end
+end
+
+def fun_l3_n56(x)
+ if (x < 1)
+ fun_l4_n771(x)
+ else
+ fun_l4_n812(x)
+ end
+end
+
+def fun_l3_n57(x)
+ if (x < 1)
+ fun_l4_n106(x)
+ else
+ fun_l4_n144(x)
+ end
+end
+
+def fun_l3_n58(x)
+ if (x < 1)
+ fun_l4_n2(x)
+ else
+ fun_l4_n49(x)
+ end
+end
+
+def fun_l3_n59(x)
+ if (x < 1)
+ fun_l4_n540(x)
+ else
+ fun_l4_n164(x)
+ end
+end
+
+def fun_l3_n60(x)
+ if (x < 1)
+ fun_l4_n638(x)
+ else
+ fun_l4_n984(x)
+ end
+end
+
+def fun_l3_n61(x)
+ if (x < 1)
+ fun_l4_n144(x)
+ else
+ fun_l4_n236(x)
+ end
+end
+
+def fun_l3_n62(x)
+ if (x < 1)
+ fun_l4_n345(x)
+ else
+ fun_l4_n65(x)
+ end
+end
+
+def fun_l3_n63(x)
+ if (x < 1)
+ fun_l4_n112(x)
+ else
+ fun_l4_n216(x)
+ end
+end
+
+def fun_l3_n64(x)
+ if (x < 1)
+ fun_l4_n213(x)
+ else
+ fun_l4_n370(x)
+ end
+end
+
+def fun_l3_n65(x)
+ if (x < 1)
+ fun_l4_n845(x)
+ else
+ fun_l4_n672(x)
+ end
+end
+
+def fun_l3_n66(x)
+ if (x < 1)
+ fun_l4_n951(x)
+ else
+ fun_l4_n415(x)
+ end
+end
+
+def fun_l3_n67(x)
+ if (x < 1)
+ fun_l4_n997(x)
+ else
+ fun_l4_n760(x)
+ end
+end
+
+def fun_l3_n68(x)
+ if (x < 1)
+ fun_l4_n595(x)
+ else
+ fun_l4_n517(x)
+ end
+end
+
+def fun_l3_n69(x)
+ if (x < 1)
+ fun_l4_n776(x)
+ else
+ fun_l4_n550(x)
+ end
+end
+
+def fun_l3_n70(x)
+ if (x < 1)
+ fun_l4_n360(x)
+ else
+ fun_l4_n836(x)
+ end
+end
+
+def fun_l3_n71(x)
+ if (x < 1)
+ fun_l4_n214(x)
+ else
+ fun_l4_n532(x)
+ end
+end
+
+def fun_l3_n72(x)
+ if (x < 1)
+ fun_l4_n138(x)
+ else
+ fun_l4_n881(x)
+ end
+end
+
+def fun_l3_n73(x)
+ if (x < 1)
+ fun_l4_n793(x)
+ else
+ fun_l4_n866(x)
+ end
+end
+
+def fun_l3_n74(x)
+ if (x < 1)
+ fun_l4_n883(x)
+ else
+ fun_l4_n730(x)
+ end
+end
+
+def fun_l3_n75(x)
+ if (x < 1)
+ fun_l4_n525(x)
+ else
+ fun_l4_n188(x)
+ end
+end
+
+def fun_l3_n76(x)
+ if (x < 1)
+ fun_l4_n528(x)
+ else
+ fun_l4_n723(x)
+ end
+end
+
+def fun_l3_n77(x)
+ if (x < 1)
+ fun_l4_n954(x)
+ else
+ fun_l4_n581(x)
+ end
+end
+
+def fun_l3_n78(x)
+ if (x < 1)
+ fun_l4_n614(x)
+ else
+ fun_l4_n941(x)
+ end
+end
+
+def fun_l3_n79(x)
+ if (x < 1)
+ fun_l4_n396(x)
+ else
+ fun_l4_n539(x)
+ end
+end
+
+def fun_l3_n80(x)
+ if (x < 1)
+ fun_l4_n1(x)
+ else
+ fun_l4_n708(x)
+ end
+end
+
+def fun_l3_n81(x)
+ if (x < 1)
+ fun_l4_n626(x)
+ else
+ fun_l4_n418(x)
+ end
+end
+
+def fun_l3_n82(x)
+ if (x < 1)
+ fun_l4_n5(x)
+ else
+ fun_l4_n108(x)
+ end
+end
+
+def fun_l3_n83(x)
+ if (x < 1)
+ fun_l4_n683(x)
+ else
+ fun_l4_n679(x)
+ end
+end
+
+def fun_l3_n84(x)
+ if (x < 1)
+ fun_l4_n613(x)
+ else
+ fun_l4_n596(x)
+ end
+end
+
+def fun_l3_n85(x)
+ if (x < 1)
+ fun_l4_n490(x)
+ else
+ fun_l4_n178(x)
+ end
+end
+
+def fun_l3_n86(x)
+ if (x < 1)
+ fun_l4_n286(x)
+ else
+ fun_l4_n724(x)
+ end
+end
+
+def fun_l3_n87(x)
+ if (x < 1)
+ fun_l4_n989(x)
+ else
+ fun_l4_n711(x)
+ end
+end
+
+def fun_l3_n88(x)
+ if (x < 1)
+ fun_l4_n422(x)
+ else
+ fun_l4_n259(x)
+ end
+end
+
+def fun_l3_n89(x)
+ if (x < 1)
+ fun_l4_n938(x)
+ else
+ fun_l4_n123(x)
+ end
+end
+
+def fun_l3_n90(x)
+ if (x < 1)
+ fun_l4_n589(x)
+ else
+ fun_l4_n231(x)
+ end
+end
+
+def fun_l3_n91(x)
+ if (x < 1)
+ fun_l4_n484(x)
+ else
+ fun_l4_n439(x)
+ end
+end
+
+def fun_l3_n92(x)
+ if (x < 1)
+ fun_l4_n469(x)
+ else
+ fun_l4_n737(x)
+ end
+end
+
+def fun_l3_n93(x)
+ if (x < 1)
+ fun_l4_n93(x)
+ else
+ fun_l4_n907(x)
+ end
+end
+
+def fun_l3_n94(x)
+ if (x < 1)
+ fun_l4_n468(x)
+ else
+ fun_l4_n219(x)
+ end
+end
+
+def fun_l3_n95(x)
+ if (x < 1)
+ fun_l4_n409(x)
+ else
+ fun_l4_n921(x)
+ end
+end
+
+def fun_l3_n96(x)
+ if (x < 1)
+ fun_l4_n952(x)
+ else
+ fun_l4_n167(x)
+ end
+end
+
+def fun_l3_n97(x)
+ if (x < 1)
+ fun_l4_n177(x)
+ else
+ fun_l4_n462(x)
+ end
+end
+
+def fun_l3_n98(x)
+ if (x < 1)
+ fun_l4_n351(x)
+ else
+ fun_l4_n89(x)
+ end
+end
+
+def fun_l3_n99(x)
+ if (x < 1)
+ fun_l4_n869(x)
+ else
+ fun_l4_n502(x)
+ end
+end
+
+def fun_l3_n100(x)
+ if (x < 1)
+ fun_l4_n985(x)
+ else
+ fun_l4_n297(x)
+ end
+end
+
+def fun_l3_n101(x)
+ if (x < 1)
+ fun_l4_n48(x)
+ else
+ fun_l4_n895(x)
+ end
+end
+
+def fun_l3_n102(x)
+ if (x < 1)
+ fun_l4_n92(x)
+ else
+ fun_l4_n36(x)
+ end
+end
+
+def fun_l3_n103(x)
+ if (x < 1)
+ fun_l4_n610(x)
+ else
+ fun_l4_n616(x)
+ end
+end
+
+def fun_l3_n104(x)
+ if (x < 1)
+ fun_l4_n472(x)
+ else
+ fun_l4_n689(x)
+ end
+end
+
+def fun_l3_n105(x)
+ if (x < 1)
+ fun_l4_n75(x)
+ else
+ fun_l4_n161(x)
+ end
+end
+
+def fun_l3_n106(x)
+ if (x < 1)
+ fun_l4_n300(x)
+ else
+ fun_l4_n767(x)
+ end
+end
+
+def fun_l3_n107(x)
+ if (x < 1)
+ fun_l4_n707(x)
+ else
+ fun_l4_n229(x)
+ end
+end
+
+def fun_l3_n108(x)
+ if (x < 1)
+ fun_l4_n761(x)
+ else
+ fun_l4_n97(x)
+ end
+end
+
+def fun_l3_n109(x)
+ if (x < 1)
+ fun_l4_n734(x)
+ else
+ fun_l4_n290(x)
+ end
+end
+
+def fun_l3_n110(x)
+ if (x < 1)
+ fun_l4_n819(x)
+ else
+ fun_l4_n567(x)
+ end
+end
+
+def fun_l3_n111(x)
+ if (x < 1)
+ fun_l4_n904(x)
+ else
+ fun_l4_n322(x)
+ end
+end
+
+def fun_l3_n112(x)
+ if (x < 1)
+ fun_l4_n907(x)
+ else
+ fun_l4_n667(x)
+ end
+end
+
+def fun_l3_n113(x)
+ if (x < 1)
+ fun_l4_n473(x)
+ else
+ fun_l4_n620(x)
+ end
+end
+
+def fun_l3_n114(x)
+ if (x < 1)
+ fun_l4_n278(x)
+ else
+ fun_l4_n998(x)
+ end
+end
+
+def fun_l3_n115(x)
+ if (x < 1)
+ fun_l4_n185(x)
+ else
+ fun_l4_n962(x)
+ end
+end
+
+def fun_l3_n116(x)
+ if (x < 1)
+ fun_l4_n989(x)
+ else
+ fun_l4_n993(x)
+ end
+end
+
+def fun_l3_n117(x)
+ if (x < 1)
+ fun_l4_n140(x)
+ else
+ fun_l4_n456(x)
+ end
+end
+
+def fun_l3_n118(x)
+ if (x < 1)
+ fun_l4_n920(x)
+ else
+ fun_l4_n701(x)
+ end
+end
+
+def fun_l3_n119(x)
+ if (x < 1)
+ fun_l4_n263(x)
+ else
+ fun_l4_n837(x)
+ end
+end
+
+def fun_l3_n120(x)
+ if (x < 1)
+ fun_l4_n54(x)
+ else
+ fun_l4_n964(x)
+ end
+end
+
+def fun_l3_n121(x)
+ if (x < 1)
+ fun_l4_n540(x)
+ else
+ fun_l4_n543(x)
+ end
+end
+
+def fun_l3_n122(x)
+ if (x < 1)
+ fun_l4_n51(x)
+ else
+ fun_l4_n453(x)
+ end
+end
+
+def fun_l3_n123(x)
+ if (x < 1)
+ fun_l4_n726(x)
+ else
+ fun_l4_n785(x)
+ end
+end
+
+def fun_l3_n124(x)
+ if (x < 1)
+ fun_l4_n932(x)
+ else
+ fun_l4_n583(x)
+ end
+end
+
+def fun_l3_n125(x)
+ if (x < 1)
+ fun_l4_n471(x)
+ else
+ fun_l4_n518(x)
+ end
+end
+
+def fun_l3_n126(x)
+ if (x < 1)
+ fun_l4_n351(x)
+ else
+ fun_l4_n825(x)
+ end
+end
+
+def fun_l3_n127(x)
+ if (x < 1)
+ fun_l4_n759(x)
+ else
+ fun_l4_n203(x)
+ end
+end
+
+def fun_l3_n128(x)
+ if (x < 1)
+ fun_l4_n336(x)
+ else
+ fun_l4_n861(x)
+ end
+end
+
+def fun_l3_n129(x)
+ if (x < 1)
+ fun_l4_n573(x)
+ else
+ fun_l4_n568(x)
+ end
+end
+
+def fun_l3_n130(x)
+ if (x < 1)
+ fun_l4_n788(x)
+ else
+ fun_l4_n259(x)
+ end
+end
+
+def fun_l3_n131(x)
+ if (x < 1)
+ fun_l4_n392(x)
+ else
+ fun_l4_n932(x)
+ end
+end
+
+def fun_l3_n132(x)
+ if (x < 1)
+ fun_l4_n919(x)
+ else
+ fun_l4_n650(x)
+ end
+end
+
+def fun_l3_n133(x)
+ if (x < 1)
+ fun_l4_n895(x)
+ else
+ fun_l4_n983(x)
+ end
+end
+
+def fun_l3_n134(x)
+ if (x < 1)
+ fun_l4_n389(x)
+ else
+ fun_l4_n358(x)
+ end
+end
+
+def fun_l3_n135(x)
+ if (x < 1)
+ fun_l4_n732(x)
+ else
+ fun_l4_n747(x)
+ end
+end
+
+def fun_l3_n136(x)
+ if (x < 1)
+ fun_l4_n756(x)
+ else
+ fun_l4_n592(x)
+ end
+end
+
+def fun_l3_n137(x)
+ if (x < 1)
+ fun_l4_n581(x)
+ else
+ fun_l4_n24(x)
+ end
+end
+
+def fun_l3_n138(x)
+ if (x < 1)
+ fun_l4_n932(x)
+ else
+ fun_l4_n599(x)
+ end
+end
+
+def fun_l3_n139(x)
+ if (x < 1)
+ fun_l4_n955(x)
+ else
+ fun_l4_n148(x)
+ end
+end
+
+def fun_l3_n140(x)
+ if (x < 1)
+ fun_l4_n332(x)
+ else
+ fun_l4_n677(x)
+ end
+end
+
+def fun_l3_n141(x)
+ if (x < 1)
+ fun_l4_n152(x)
+ else
+ fun_l4_n696(x)
+ end
+end
+
+def fun_l3_n142(x)
+ if (x < 1)
+ fun_l4_n295(x)
+ else
+ fun_l4_n205(x)
+ end
+end
+
+def fun_l3_n143(x)
+ if (x < 1)
+ fun_l4_n543(x)
+ else
+ fun_l4_n951(x)
+ end
+end
+
+def fun_l3_n144(x)
+ if (x < 1)
+ fun_l4_n656(x)
+ else
+ fun_l4_n494(x)
+ end
+end
+
+def fun_l3_n145(x)
+ if (x < 1)
+ fun_l4_n729(x)
+ else
+ fun_l4_n749(x)
+ end
+end
+
+def fun_l3_n146(x)
+ if (x < 1)
+ fun_l4_n197(x)
+ else
+ fun_l4_n3(x)
+ end
+end
+
+def fun_l3_n147(x)
+ if (x < 1)
+ fun_l4_n519(x)
+ else
+ fun_l4_n36(x)
+ end
+end
+
+def fun_l3_n148(x)
+ if (x < 1)
+ fun_l4_n100(x)
+ else
+ fun_l4_n463(x)
+ end
+end
+
+def fun_l3_n149(x)
+ if (x < 1)
+ fun_l4_n890(x)
+ else
+ fun_l4_n947(x)
+ end
+end
+
+def fun_l3_n150(x)
+ if (x < 1)
+ fun_l4_n512(x)
+ else
+ fun_l4_n982(x)
+ end
+end
+
+def fun_l3_n151(x)
+ if (x < 1)
+ fun_l4_n750(x)
+ else
+ fun_l4_n781(x)
+ end
+end
+
+def fun_l3_n152(x)
+ if (x < 1)
+ fun_l4_n193(x)
+ else
+ fun_l4_n530(x)
+ end
+end
+
+def fun_l3_n153(x)
+ if (x < 1)
+ fun_l4_n189(x)
+ else
+ fun_l4_n209(x)
+ end
+end
+
+def fun_l3_n154(x)
+ if (x < 1)
+ fun_l4_n343(x)
+ else
+ fun_l4_n44(x)
+ end
+end
+
+def fun_l3_n155(x)
+ if (x < 1)
+ fun_l4_n536(x)
+ else
+ fun_l4_n228(x)
+ end
+end
+
+def fun_l3_n156(x)
+ if (x < 1)
+ fun_l4_n475(x)
+ else
+ fun_l4_n790(x)
+ end
+end
+
+def fun_l3_n157(x)
+ if (x < 1)
+ fun_l4_n954(x)
+ else
+ fun_l4_n242(x)
+ end
+end
+
+def fun_l3_n158(x)
+ if (x < 1)
+ fun_l4_n19(x)
+ else
+ fun_l4_n735(x)
+ end
+end
+
+def fun_l3_n159(x)
+ if (x < 1)
+ fun_l4_n900(x)
+ else
+ fun_l4_n456(x)
+ end
+end
+
+def fun_l3_n160(x)
+ if (x < 1)
+ fun_l4_n193(x)
+ else
+ fun_l4_n606(x)
+ end
+end
+
+def fun_l3_n161(x)
+ if (x < 1)
+ fun_l4_n684(x)
+ else
+ fun_l4_n622(x)
+ end
+end
+
+def fun_l3_n162(x)
+ if (x < 1)
+ fun_l4_n260(x)
+ else
+ fun_l4_n582(x)
+ end
+end
+
+def fun_l3_n163(x)
+ if (x < 1)
+ fun_l4_n713(x)
+ else
+ fun_l4_n601(x)
+ end
+end
+
+def fun_l3_n164(x)
+ if (x < 1)
+ fun_l4_n819(x)
+ else
+ fun_l4_n642(x)
+ end
+end
+
+def fun_l3_n165(x)
+ if (x < 1)
+ fun_l4_n45(x)
+ else
+ fun_l4_n777(x)
+ end
+end
+
+def fun_l3_n166(x)
+ if (x < 1)
+ fun_l4_n256(x)
+ else
+ fun_l4_n92(x)
+ end
+end
+
+def fun_l3_n167(x)
+ if (x < 1)
+ fun_l4_n975(x)
+ else
+ fun_l4_n962(x)
+ end
+end
+
+def fun_l3_n168(x)
+ if (x < 1)
+ fun_l4_n579(x)
+ else
+ fun_l4_n208(x)
+ end
+end
+
+def fun_l3_n169(x)
+ if (x < 1)
+ fun_l4_n133(x)
+ else
+ fun_l4_n636(x)
+ end
+end
+
+def fun_l3_n170(x)
+ if (x < 1)
+ fun_l4_n863(x)
+ else
+ fun_l4_n168(x)
+ end
+end
+
+def fun_l3_n171(x)
+ if (x < 1)
+ fun_l4_n396(x)
+ else
+ fun_l4_n654(x)
+ end
+end
+
+def fun_l3_n172(x)
+ if (x < 1)
+ fun_l4_n963(x)
+ else
+ fun_l4_n420(x)
+ end
+end
+
+def fun_l3_n173(x)
+ if (x < 1)
+ fun_l4_n487(x)
+ else
+ fun_l4_n573(x)
+ end
+end
+
+def fun_l3_n174(x)
+ if (x < 1)
+ fun_l4_n847(x)
+ else
+ fun_l4_n476(x)
+ end
+end
+
+def fun_l3_n175(x)
+ if (x < 1)
+ fun_l4_n712(x)
+ else
+ fun_l4_n522(x)
+ end
+end
+
+def fun_l3_n176(x)
+ if (x < 1)
+ fun_l4_n292(x)
+ else
+ fun_l4_n717(x)
+ end
+end
+
+def fun_l3_n177(x)
+ if (x < 1)
+ fun_l4_n945(x)
+ else
+ fun_l4_n146(x)
+ end
+end
+
+def fun_l3_n178(x)
+ if (x < 1)
+ fun_l4_n163(x)
+ else
+ fun_l4_n28(x)
+ end
+end
+
+def fun_l3_n179(x)
+ if (x < 1)
+ fun_l4_n738(x)
+ else
+ fun_l4_n502(x)
+ end
+end
+
+def fun_l3_n180(x)
+ if (x < 1)
+ fun_l4_n8(x)
+ else
+ fun_l4_n56(x)
+ end
+end
+
+def fun_l3_n181(x)
+ if (x < 1)
+ fun_l4_n58(x)
+ else
+ fun_l4_n155(x)
+ end
+end
+
+def fun_l3_n182(x)
+ if (x < 1)
+ fun_l4_n298(x)
+ else
+ fun_l4_n580(x)
+ end
+end
+
+def fun_l3_n183(x)
+ if (x < 1)
+ fun_l4_n860(x)
+ else
+ fun_l4_n176(x)
+ end
+end
+
+def fun_l3_n184(x)
+ if (x < 1)
+ fun_l4_n108(x)
+ else
+ fun_l4_n986(x)
+ end
+end
+
+def fun_l3_n185(x)
+ if (x < 1)
+ fun_l4_n366(x)
+ else
+ fun_l4_n110(x)
+ end
+end
+
+def fun_l3_n186(x)
+ if (x < 1)
+ fun_l4_n177(x)
+ else
+ fun_l4_n158(x)
+ end
+end
+
+def fun_l3_n187(x)
+ if (x < 1)
+ fun_l4_n212(x)
+ else
+ fun_l4_n803(x)
+ end
+end
+
+def fun_l3_n188(x)
+ if (x < 1)
+ fun_l4_n946(x)
+ else
+ fun_l4_n281(x)
+ end
+end
+
+def fun_l3_n189(x)
+ if (x < 1)
+ fun_l4_n610(x)
+ else
+ fun_l4_n691(x)
+ end
+end
+
+def fun_l3_n190(x)
+ if (x < 1)
+ fun_l4_n784(x)
+ else
+ fun_l4_n300(x)
+ end
+end
+
+def fun_l3_n191(x)
+ if (x < 1)
+ fun_l4_n272(x)
+ else
+ fun_l4_n809(x)
+ end
+end
+
+def fun_l3_n192(x)
+ if (x < 1)
+ fun_l4_n812(x)
+ else
+ fun_l4_n633(x)
+ end
+end
+
+def fun_l3_n193(x)
+ if (x < 1)
+ fun_l4_n880(x)
+ else
+ fun_l4_n267(x)
+ end
+end
+
+def fun_l3_n194(x)
+ if (x < 1)
+ fun_l4_n454(x)
+ else
+ fun_l4_n567(x)
+ end
+end
+
+def fun_l3_n195(x)
+ if (x < 1)
+ fun_l4_n919(x)
+ else
+ fun_l4_n184(x)
+ end
+end
+
+def fun_l3_n196(x)
+ if (x < 1)
+ fun_l4_n294(x)
+ else
+ fun_l4_n150(x)
+ end
+end
+
+def fun_l3_n197(x)
+ if (x < 1)
+ fun_l4_n543(x)
+ else
+ fun_l4_n319(x)
+ end
+end
+
+def fun_l3_n198(x)
+ if (x < 1)
+ fun_l4_n91(x)
+ else
+ fun_l4_n378(x)
+ end
+end
+
+def fun_l3_n199(x)
+ if (x < 1)
+ fun_l4_n590(x)
+ else
+ fun_l4_n195(x)
+ end
+end
+
+def fun_l3_n200(x)
+ if (x < 1)
+ fun_l4_n532(x)
+ else
+ fun_l4_n346(x)
+ end
+end
+
+def fun_l3_n201(x)
+ if (x < 1)
+ fun_l4_n340(x)
+ else
+ fun_l4_n129(x)
+ end
+end
+
+def fun_l3_n202(x)
+ if (x < 1)
+ fun_l4_n193(x)
+ else
+ fun_l4_n676(x)
+ end
+end
+
+def fun_l3_n203(x)
+ if (x < 1)
+ fun_l4_n210(x)
+ else
+ fun_l4_n982(x)
+ end
+end
+
+def fun_l3_n204(x)
+ if (x < 1)
+ fun_l4_n247(x)
+ else
+ fun_l4_n550(x)
+ end
+end
+
+def fun_l3_n205(x)
+ if (x < 1)
+ fun_l4_n727(x)
+ else
+ fun_l4_n737(x)
+ end
+end
+
+def fun_l3_n206(x)
+ if (x < 1)
+ fun_l4_n111(x)
+ else
+ fun_l4_n952(x)
+ end
+end
+
+def fun_l3_n207(x)
+ if (x < 1)
+ fun_l4_n996(x)
+ else
+ fun_l4_n865(x)
+ end
+end
+
+def fun_l3_n208(x)
+ if (x < 1)
+ fun_l4_n945(x)
+ else
+ fun_l4_n488(x)
+ end
+end
+
+def fun_l3_n209(x)
+ if (x < 1)
+ fun_l4_n697(x)
+ else
+ fun_l4_n366(x)
+ end
+end
+
+def fun_l3_n210(x)
+ if (x < 1)
+ fun_l4_n180(x)
+ else
+ fun_l4_n995(x)
+ end
+end
+
+def fun_l3_n211(x)
+ if (x < 1)
+ fun_l4_n972(x)
+ else
+ fun_l4_n783(x)
+ end
+end
+
+def fun_l3_n212(x)
+ if (x < 1)
+ fun_l4_n58(x)
+ else
+ fun_l4_n430(x)
+ end
+end
+
+def fun_l3_n213(x)
+ if (x < 1)
+ fun_l4_n904(x)
+ else
+ fun_l4_n703(x)
+ end
+end
+
+def fun_l3_n214(x)
+ if (x < 1)
+ fun_l4_n882(x)
+ else
+ fun_l4_n648(x)
+ end
+end
+
+def fun_l3_n215(x)
+ if (x < 1)
+ fun_l4_n800(x)
+ else
+ fun_l4_n680(x)
+ end
+end
+
+def fun_l3_n216(x)
+ if (x < 1)
+ fun_l4_n205(x)
+ else
+ fun_l4_n185(x)
+ end
+end
+
+def fun_l3_n217(x)
+ if (x < 1)
+ fun_l4_n242(x)
+ else
+ fun_l4_n834(x)
+ end
+end
+
+def fun_l3_n218(x)
+ if (x < 1)
+ fun_l4_n551(x)
+ else
+ fun_l4_n399(x)
+ end
+end
+
+def fun_l3_n219(x)
+ if (x < 1)
+ fun_l4_n820(x)
+ else
+ fun_l4_n403(x)
+ end
+end
+
+def fun_l3_n220(x)
+ if (x < 1)
+ fun_l4_n874(x)
+ else
+ fun_l4_n584(x)
+ end
+end
+
+def fun_l3_n221(x)
+ if (x < 1)
+ fun_l4_n845(x)
+ else
+ fun_l4_n56(x)
+ end
+end
+
+def fun_l3_n222(x)
+ if (x < 1)
+ fun_l4_n947(x)
+ else
+ fun_l4_n299(x)
+ end
+end
+
+def fun_l3_n223(x)
+ if (x < 1)
+ fun_l4_n533(x)
+ else
+ fun_l4_n184(x)
+ end
+end
+
+def fun_l3_n224(x)
+ if (x < 1)
+ fun_l4_n760(x)
+ else
+ fun_l4_n507(x)
+ end
+end
+
+def fun_l3_n225(x)
+ if (x < 1)
+ fun_l4_n47(x)
+ else
+ fun_l4_n712(x)
+ end
+end
+
+def fun_l3_n226(x)
+ if (x < 1)
+ fun_l4_n470(x)
+ else
+ fun_l4_n760(x)
+ end
+end
+
+def fun_l3_n227(x)
+ if (x < 1)
+ fun_l4_n932(x)
+ else
+ fun_l4_n292(x)
+ end
+end
+
+def fun_l3_n228(x)
+ if (x < 1)
+ fun_l4_n44(x)
+ else
+ fun_l4_n855(x)
+ end
+end
+
+def fun_l3_n229(x)
+ if (x < 1)
+ fun_l4_n385(x)
+ else
+ fun_l4_n284(x)
+ end
+end
+
+def fun_l3_n230(x)
+ if (x < 1)
+ fun_l4_n272(x)
+ else
+ fun_l4_n407(x)
+ end
+end
+
+def fun_l3_n231(x)
+ if (x < 1)
+ fun_l4_n820(x)
+ else
+ fun_l4_n375(x)
+ end
+end
+
+def fun_l3_n232(x)
+ if (x < 1)
+ fun_l4_n165(x)
+ else
+ fun_l4_n38(x)
+ end
+end
+
+def fun_l3_n233(x)
+ if (x < 1)
+ fun_l4_n772(x)
+ else
+ fun_l4_n179(x)
+ end
+end
+
+def fun_l3_n234(x)
+ if (x < 1)
+ fun_l4_n980(x)
+ else
+ fun_l4_n678(x)
+ end
+end
+
+def fun_l3_n235(x)
+ if (x < 1)
+ fun_l4_n279(x)
+ else
+ fun_l4_n834(x)
+ end
+end
+
+def fun_l3_n236(x)
+ if (x < 1)
+ fun_l4_n227(x)
+ else
+ fun_l4_n11(x)
+ end
+end
+
+def fun_l3_n237(x)
+ if (x < 1)
+ fun_l4_n724(x)
+ else
+ fun_l4_n659(x)
+ end
+end
+
+def fun_l3_n238(x)
+ if (x < 1)
+ fun_l4_n569(x)
+ else
+ fun_l4_n693(x)
+ end
+end
+
+def fun_l3_n239(x)
+ if (x < 1)
+ fun_l4_n933(x)
+ else
+ fun_l4_n897(x)
+ end
+end
+
+def fun_l3_n240(x)
+ if (x < 1)
+ fun_l4_n215(x)
+ else
+ fun_l4_n784(x)
+ end
+end
+
+def fun_l3_n241(x)
+ if (x < 1)
+ fun_l4_n964(x)
+ else
+ fun_l4_n430(x)
+ end
+end
+
+def fun_l3_n242(x)
+ if (x < 1)
+ fun_l4_n91(x)
+ else
+ fun_l4_n190(x)
+ end
+end
+
+def fun_l3_n243(x)
+ if (x < 1)
+ fun_l4_n375(x)
+ else
+ fun_l4_n638(x)
+ end
+end
+
+def fun_l3_n244(x)
+ if (x < 1)
+ fun_l4_n381(x)
+ else
+ fun_l4_n324(x)
+ end
+end
+
+def fun_l3_n245(x)
+ if (x < 1)
+ fun_l4_n983(x)
+ else
+ fun_l4_n248(x)
+ end
+end
+
+def fun_l3_n246(x)
+ if (x < 1)
+ fun_l4_n336(x)
+ else
+ fun_l4_n647(x)
+ end
+end
+
+def fun_l3_n247(x)
+ if (x < 1)
+ fun_l4_n64(x)
+ else
+ fun_l4_n957(x)
+ end
+end
+
+def fun_l3_n248(x)
+ if (x < 1)
+ fun_l4_n313(x)
+ else
+ fun_l4_n662(x)
+ end
+end
+
+def fun_l3_n249(x)
+ if (x < 1)
+ fun_l4_n528(x)
+ else
+ fun_l4_n448(x)
+ end
+end
+
+def fun_l3_n250(x)
+ if (x < 1)
+ fun_l4_n9(x)
+ else
+ fun_l4_n424(x)
+ end
+end
+
+def fun_l3_n251(x)
+ if (x < 1)
+ fun_l4_n290(x)
+ else
+ fun_l4_n416(x)
+ end
+end
+
+def fun_l3_n252(x)
+ if (x < 1)
+ fun_l4_n159(x)
+ else
+ fun_l4_n780(x)
+ end
+end
+
+def fun_l3_n253(x)
+ if (x < 1)
+ fun_l4_n863(x)
+ else
+ fun_l4_n192(x)
+ end
+end
+
+def fun_l3_n254(x)
+ if (x < 1)
+ fun_l4_n658(x)
+ else
+ fun_l4_n816(x)
+ end
+end
+
+def fun_l3_n255(x)
+ if (x < 1)
+ fun_l4_n686(x)
+ else
+ fun_l4_n586(x)
+ end
+end
+
+def fun_l3_n256(x)
+ if (x < 1)
+ fun_l4_n110(x)
+ else
+ fun_l4_n290(x)
+ end
+end
+
+def fun_l3_n257(x)
+ if (x < 1)
+ fun_l4_n355(x)
+ else
+ fun_l4_n862(x)
+ end
+end
+
+def fun_l3_n258(x)
+ if (x < 1)
+ fun_l4_n63(x)
+ else
+ fun_l4_n205(x)
+ end
+end
+
+def fun_l3_n259(x)
+ if (x < 1)
+ fun_l4_n766(x)
+ else
+ fun_l4_n130(x)
+ end
+end
+
+def fun_l3_n260(x)
+ if (x < 1)
+ fun_l4_n670(x)
+ else
+ fun_l4_n313(x)
+ end
+end
+
+def fun_l3_n261(x)
+ if (x < 1)
+ fun_l4_n738(x)
+ else
+ fun_l4_n796(x)
+ end
+end
+
+def fun_l3_n262(x)
+ if (x < 1)
+ fun_l4_n925(x)
+ else
+ fun_l4_n267(x)
+ end
+end
+
+def fun_l3_n263(x)
+ if (x < 1)
+ fun_l4_n515(x)
+ else
+ fun_l4_n298(x)
+ end
+end
+
+def fun_l3_n264(x)
+ if (x < 1)
+ fun_l4_n883(x)
+ else
+ fun_l4_n809(x)
+ end
+end
+
+def fun_l3_n265(x)
+ if (x < 1)
+ fun_l4_n138(x)
+ else
+ fun_l4_n665(x)
+ end
+end
+
+def fun_l3_n266(x)
+ if (x < 1)
+ fun_l4_n141(x)
+ else
+ fun_l4_n641(x)
+ end
+end
+
+def fun_l3_n267(x)
+ if (x < 1)
+ fun_l4_n936(x)
+ else
+ fun_l4_n233(x)
+ end
+end
+
+def fun_l3_n268(x)
+ if (x < 1)
+ fun_l4_n28(x)
+ else
+ fun_l4_n351(x)
+ end
+end
+
+def fun_l3_n269(x)
+ if (x < 1)
+ fun_l4_n62(x)
+ else
+ fun_l4_n17(x)
+ end
+end
+
+def fun_l3_n270(x)
+ if (x < 1)
+ fun_l4_n489(x)
+ else
+ fun_l4_n408(x)
+ end
+end
+
+def fun_l3_n271(x)
+ if (x < 1)
+ fun_l4_n276(x)
+ else
+ fun_l4_n799(x)
+ end
+end
+
+def fun_l3_n272(x)
+ if (x < 1)
+ fun_l4_n103(x)
+ else
+ fun_l4_n804(x)
+ end
+end
+
+def fun_l3_n273(x)
+ if (x < 1)
+ fun_l4_n850(x)
+ else
+ fun_l4_n388(x)
+ end
+end
+
+def fun_l3_n274(x)
+ if (x < 1)
+ fun_l4_n651(x)
+ else
+ fun_l4_n484(x)
+ end
+end
+
+def fun_l3_n275(x)
+ if (x < 1)
+ fun_l4_n693(x)
+ else
+ fun_l4_n20(x)
+ end
+end
+
+def fun_l3_n276(x)
+ if (x < 1)
+ fun_l4_n897(x)
+ else
+ fun_l4_n328(x)
+ end
+end
+
+def fun_l3_n277(x)
+ if (x < 1)
+ fun_l4_n632(x)
+ else
+ fun_l4_n120(x)
+ end
+end
+
+def fun_l3_n278(x)
+ if (x < 1)
+ fun_l4_n919(x)
+ else
+ fun_l4_n921(x)
+ end
+end
+
+def fun_l3_n279(x)
+ if (x < 1)
+ fun_l4_n461(x)
+ else
+ fun_l4_n219(x)
+ end
+end
+
+def fun_l3_n280(x)
+ if (x < 1)
+ fun_l4_n867(x)
+ else
+ fun_l4_n682(x)
+ end
+end
+
+def fun_l3_n281(x)
+ if (x < 1)
+ fun_l4_n233(x)
+ else
+ fun_l4_n589(x)
+ end
+end
+
+def fun_l3_n282(x)
+ if (x < 1)
+ fun_l4_n516(x)
+ else
+ fun_l4_n970(x)
+ end
+end
+
+def fun_l3_n283(x)
+ if (x < 1)
+ fun_l4_n300(x)
+ else
+ fun_l4_n934(x)
+ end
+end
+
+def fun_l3_n284(x)
+ if (x < 1)
+ fun_l4_n736(x)
+ else
+ fun_l4_n516(x)
+ end
+end
+
+def fun_l3_n285(x)
+ if (x < 1)
+ fun_l4_n401(x)
+ else
+ fun_l4_n539(x)
+ end
+end
+
+def fun_l3_n286(x)
+ if (x < 1)
+ fun_l4_n342(x)
+ else
+ fun_l4_n142(x)
+ end
+end
+
+def fun_l3_n287(x)
+ if (x < 1)
+ fun_l4_n44(x)
+ else
+ fun_l4_n311(x)
+ end
+end
+
+def fun_l3_n288(x)
+ if (x < 1)
+ fun_l4_n698(x)
+ else
+ fun_l4_n407(x)
+ end
+end
+
+def fun_l3_n289(x)
+ if (x < 1)
+ fun_l4_n714(x)
+ else
+ fun_l4_n324(x)
+ end
+end
+
+def fun_l3_n290(x)
+ if (x < 1)
+ fun_l4_n862(x)
+ else
+ fun_l4_n807(x)
+ end
+end
+
+def fun_l3_n291(x)
+ if (x < 1)
+ fun_l4_n446(x)
+ else
+ fun_l4_n378(x)
+ end
+end
+
+def fun_l3_n292(x)
+ if (x < 1)
+ fun_l4_n418(x)
+ else
+ fun_l4_n194(x)
+ end
+end
+
+def fun_l3_n293(x)
+ if (x < 1)
+ fun_l4_n336(x)
+ else
+ fun_l4_n895(x)
+ end
+end
+
+def fun_l3_n294(x)
+ if (x < 1)
+ fun_l4_n928(x)
+ else
+ fun_l4_n154(x)
+ end
+end
+
+def fun_l3_n295(x)
+ if (x < 1)
+ fun_l4_n764(x)
+ else
+ fun_l4_n335(x)
+ end
+end
+
+def fun_l3_n296(x)
+ if (x < 1)
+ fun_l4_n840(x)
+ else
+ fun_l4_n513(x)
+ end
+end
+
+def fun_l3_n297(x)
+ if (x < 1)
+ fun_l4_n520(x)
+ else
+ fun_l4_n838(x)
+ end
+end
+
+def fun_l3_n298(x)
+ if (x < 1)
+ fun_l4_n831(x)
+ else
+ fun_l4_n344(x)
+ end
+end
+
+def fun_l3_n299(x)
+ if (x < 1)
+ fun_l4_n110(x)
+ else
+ fun_l4_n19(x)
+ end
+end
+
+def fun_l3_n300(x)
+ if (x < 1)
+ fun_l4_n272(x)
+ else
+ fun_l4_n952(x)
+ end
+end
+
+def fun_l3_n301(x)
+ if (x < 1)
+ fun_l4_n659(x)
+ else
+ fun_l4_n439(x)
+ end
+end
+
+def fun_l3_n302(x)
+ if (x < 1)
+ fun_l4_n787(x)
+ else
+ fun_l4_n189(x)
+ end
+end
+
+def fun_l3_n303(x)
+ if (x < 1)
+ fun_l4_n549(x)
+ else
+ fun_l4_n594(x)
+ end
+end
+
+def fun_l3_n304(x)
+ if (x < 1)
+ fun_l4_n255(x)
+ else
+ fun_l4_n822(x)
+ end
+end
+
+def fun_l3_n305(x)
+ if (x < 1)
+ fun_l4_n312(x)
+ else
+ fun_l4_n935(x)
+ end
+end
+
+def fun_l3_n306(x)
+ if (x < 1)
+ fun_l4_n831(x)
+ else
+ fun_l4_n299(x)
+ end
+end
+
+def fun_l3_n307(x)
+ if (x < 1)
+ fun_l4_n154(x)
+ else
+ fun_l4_n520(x)
+ end
+end
+
+def fun_l3_n308(x)
+ if (x < 1)
+ fun_l4_n768(x)
+ else
+ fun_l4_n676(x)
+ end
+end
+
+def fun_l3_n309(x)
+ if (x < 1)
+ fun_l4_n816(x)
+ else
+ fun_l4_n447(x)
+ end
+end
+
+def fun_l3_n310(x)
+ if (x < 1)
+ fun_l4_n689(x)
+ else
+ fun_l4_n244(x)
+ end
+end
+
+def fun_l3_n311(x)
+ if (x < 1)
+ fun_l4_n389(x)
+ else
+ fun_l4_n326(x)
+ end
+end
+
+def fun_l3_n312(x)
+ if (x < 1)
+ fun_l4_n125(x)
+ else
+ fun_l4_n502(x)
+ end
+end
+
+def fun_l3_n313(x)
+ if (x < 1)
+ fun_l4_n327(x)
+ else
+ fun_l4_n123(x)
+ end
+end
+
+def fun_l3_n314(x)
+ if (x < 1)
+ fun_l4_n558(x)
+ else
+ fun_l4_n940(x)
+ end
+end
+
+def fun_l3_n315(x)
+ if (x < 1)
+ fun_l4_n652(x)
+ else
+ fun_l4_n10(x)
+ end
+end
+
+def fun_l3_n316(x)
+ if (x < 1)
+ fun_l4_n483(x)
+ else
+ fun_l4_n315(x)
+ end
+end
+
+def fun_l3_n317(x)
+ if (x < 1)
+ fun_l4_n567(x)
+ else
+ fun_l4_n540(x)
+ end
+end
+
+def fun_l3_n318(x)
+ if (x < 1)
+ fun_l4_n449(x)
+ else
+ fun_l4_n760(x)
+ end
+end
+
+def fun_l3_n319(x)
+ if (x < 1)
+ fun_l4_n111(x)
+ else
+ fun_l4_n173(x)
+ end
+end
+
+def fun_l3_n320(x)
+ if (x < 1)
+ fun_l4_n373(x)
+ else
+ fun_l4_n615(x)
+ end
+end
+
+def fun_l3_n321(x)
+ if (x < 1)
+ fun_l4_n570(x)
+ else
+ fun_l4_n27(x)
+ end
+end
+
+def fun_l3_n322(x)
+ if (x < 1)
+ fun_l4_n235(x)
+ else
+ fun_l4_n695(x)
+ end
+end
+
+def fun_l3_n323(x)
+ if (x < 1)
+ fun_l4_n788(x)
+ else
+ fun_l4_n57(x)
+ end
+end
+
+def fun_l3_n324(x)
+ if (x < 1)
+ fun_l4_n7(x)
+ else
+ fun_l4_n75(x)
+ end
+end
+
+def fun_l3_n325(x)
+ if (x < 1)
+ fun_l4_n502(x)
+ else
+ fun_l4_n459(x)
+ end
+end
+
+def fun_l3_n326(x)
+ if (x < 1)
+ fun_l4_n572(x)
+ else
+ fun_l4_n324(x)
+ end
+end
+
+def fun_l3_n327(x)
+ if (x < 1)
+ fun_l4_n860(x)
+ else
+ fun_l4_n250(x)
+ end
+end
+
+def fun_l3_n328(x)
+ if (x < 1)
+ fun_l4_n242(x)
+ else
+ fun_l4_n667(x)
+ end
+end
+
+def fun_l3_n329(x)
+ if (x < 1)
+ fun_l4_n589(x)
+ else
+ fun_l4_n236(x)
+ end
+end
+
+def fun_l3_n330(x)
+ if (x < 1)
+ fun_l4_n100(x)
+ else
+ fun_l4_n476(x)
+ end
+end
+
+def fun_l3_n331(x)
+ if (x < 1)
+ fun_l4_n779(x)
+ else
+ fun_l4_n667(x)
+ end
+end
+
+def fun_l3_n332(x)
+ if (x < 1)
+ fun_l4_n277(x)
+ else
+ fun_l4_n441(x)
+ end
+end
+
+def fun_l3_n333(x)
+ if (x < 1)
+ fun_l4_n379(x)
+ else
+ fun_l4_n331(x)
+ end
+end
+
+def fun_l3_n334(x)
+ if (x < 1)
+ fun_l4_n802(x)
+ else
+ fun_l4_n588(x)
+ end
+end
+
+def fun_l3_n335(x)
+ if (x < 1)
+ fun_l4_n385(x)
+ else
+ fun_l4_n918(x)
+ end
+end
+
+def fun_l3_n336(x)
+ if (x < 1)
+ fun_l4_n532(x)
+ else
+ fun_l4_n254(x)
+ end
+end
+
+def fun_l3_n337(x)
+ if (x < 1)
+ fun_l4_n920(x)
+ else
+ fun_l4_n134(x)
+ end
+end
+
+def fun_l3_n338(x)
+ if (x < 1)
+ fun_l4_n803(x)
+ else
+ fun_l4_n746(x)
+ end
+end
+
+def fun_l3_n339(x)
+ if (x < 1)
+ fun_l4_n924(x)
+ else
+ fun_l4_n878(x)
+ end
+end
+
+def fun_l3_n340(x)
+ if (x < 1)
+ fun_l4_n166(x)
+ else
+ fun_l4_n776(x)
+ end
+end
+
+def fun_l3_n341(x)
+ if (x < 1)
+ fun_l4_n240(x)
+ else
+ fun_l4_n898(x)
+ end
+end
+
+def fun_l3_n342(x)
+ if (x < 1)
+ fun_l4_n810(x)
+ else
+ fun_l4_n423(x)
+ end
+end
+
+def fun_l3_n343(x)
+ if (x < 1)
+ fun_l4_n209(x)
+ else
+ fun_l4_n514(x)
+ end
+end
+
+def fun_l3_n344(x)
+ if (x < 1)
+ fun_l4_n505(x)
+ else
+ fun_l4_n321(x)
+ end
+end
+
+def fun_l3_n345(x)
+ if (x < 1)
+ fun_l4_n88(x)
+ else
+ fun_l4_n39(x)
+ end
+end
+
+def fun_l3_n346(x)
+ if (x < 1)
+ fun_l4_n848(x)
+ else
+ fun_l4_n357(x)
+ end
+end
+
+def fun_l3_n347(x)
+ if (x < 1)
+ fun_l4_n450(x)
+ else
+ fun_l4_n704(x)
+ end
+end
+
+def fun_l3_n348(x)
+ if (x < 1)
+ fun_l4_n881(x)
+ else
+ fun_l4_n916(x)
+ end
+end
+
+def fun_l3_n349(x)
+ if (x < 1)
+ fun_l4_n16(x)
+ else
+ fun_l4_n996(x)
+ end
+end
+
+def fun_l3_n350(x)
+ if (x < 1)
+ fun_l4_n839(x)
+ else
+ fun_l4_n697(x)
+ end
+end
+
+def fun_l3_n351(x)
+ if (x < 1)
+ fun_l4_n177(x)
+ else
+ fun_l4_n859(x)
+ end
+end
+
+def fun_l3_n352(x)
+ if (x < 1)
+ fun_l4_n684(x)
+ else
+ fun_l4_n778(x)
+ end
+end
+
+def fun_l3_n353(x)
+ if (x < 1)
+ fun_l4_n604(x)
+ else
+ fun_l4_n280(x)
+ end
+end
+
+def fun_l3_n354(x)
+ if (x < 1)
+ fun_l4_n366(x)
+ else
+ fun_l4_n576(x)
+ end
+end
+
+def fun_l3_n355(x)
+ if (x < 1)
+ fun_l4_n584(x)
+ else
+ fun_l4_n228(x)
+ end
+end
+
+def fun_l3_n356(x)
+ if (x < 1)
+ fun_l4_n844(x)
+ else
+ fun_l4_n799(x)
+ end
+end
+
+def fun_l3_n357(x)
+ if (x < 1)
+ fun_l4_n489(x)
+ else
+ fun_l4_n905(x)
+ end
+end
+
+def fun_l3_n358(x)
+ if (x < 1)
+ fun_l4_n781(x)
+ else
+ fun_l4_n565(x)
+ end
+end
+
+def fun_l3_n359(x)
+ if (x < 1)
+ fun_l4_n110(x)
+ else
+ fun_l4_n326(x)
+ end
+end
+
+def fun_l3_n360(x)
+ if (x < 1)
+ fun_l4_n915(x)
+ else
+ fun_l4_n489(x)
+ end
+end
+
+def fun_l3_n361(x)
+ if (x < 1)
+ fun_l4_n110(x)
+ else
+ fun_l4_n733(x)
+ end
+end
+
+def fun_l3_n362(x)
+ if (x < 1)
+ fun_l4_n93(x)
+ else
+ fun_l4_n549(x)
+ end
+end
+
+def fun_l3_n363(x)
+ if (x < 1)
+ fun_l4_n270(x)
+ else
+ fun_l4_n883(x)
+ end
+end
+
+def fun_l3_n364(x)
+ if (x < 1)
+ fun_l4_n512(x)
+ else
+ fun_l4_n555(x)
+ end
+end
+
+def fun_l3_n365(x)
+ if (x < 1)
+ fun_l4_n800(x)
+ else
+ fun_l4_n672(x)
+ end
+end
+
+def fun_l3_n366(x)
+ if (x < 1)
+ fun_l4_n535(x)
+ else
+ fun_l4_n320(x)
+ end
+end
+
+def fun_l3_n367(x)
+ if (x < 1)
+ fun_l4_n883(x)
+ else
+ fun_l4_n365(x)
+ end
+end
+
+def fun_l3_n368(x)
+ if (x < 1)
+ fun_l4_n306(x)
+ else
+ fun_l4_n614(x)
+ end
+end
+
+def fun_l3_n369(x)
+ if (x < 1)
+ fun_l4_n914(x)
+ else
+ fun_l4_n20(x)
+ end
+end
+
+def fun_l3_n370(x)
+ if (x < 1)
+ fun_l4_n230(x)
+ else
+ fun_l4_n585(x)
+ end
+end
+
+def fun_l3_n371(x)
+ if (x < 1)
+ fun_l4_n544(x)
+ else
+ fun_l4_n752(x)
+ end
+end
+
+def fun_l3_n372(x)
+ if (x < 1)
+ fun_l4_n984(x)
+ else
+ fun_l4_n738(x)
+ end
+end
+
+def fun_l3_n373(x)
+ if (x < 1)
+ fun_l4_n478(x)
+ else
+ fun_l4_n618(x)
+ end
+end
+
+def fun_l3_n374(x)
+ if (x < 1)
+ fun_l4_n941(x)
+ else
+ fun_l4_n746(x)
+ end
+end
+
+def fun_l3_n375(x)
+ if (x < 1)
+ fun_l4_n121(x)
+ else
+ fun_l4_n798(x)
+ end
+end
+
+def fun_l3_n376(x)
+ if (x < 1)
+ fun_l4_n851(x)
+ else
+ fun_l4_n681(x)
+ end
+end
+
+def fun_l3_n377(x)
+ if (x < 1)
+ fun_l4_n502(x)
+ else
+ fun_l4_n538(x)
+ end
+end
+
+def fun_l3_n378(x)
+ if (x < 1)
+ fun_l4_n432(x)
+ else
+ fun_l4_n226(x)
+ end
+end
+
+def fun_l3_n379(x)
+ if (x < 1)
+ fun_l4_n734(x)
+ else
+ fun_l4_n684(x)
+ end
+end
+
+def fun_l3_n380(x)
+ if (x < 1)
+ fun_l4_n912(x)
+ else
+ fun_l4_n116(x)
+ end
+end
+
+def fun_l3_n381(x)
+ if (x < 1)
+ fun_l4_n291(x)
+ else
+ fun_l4_n464(x)
+ end
+end
+
+def fun_l3_n382(x)
+ if (x < 1)
+ fun_l4_n285(x)
+ else
+ fun_l4_n943(x)
+ end
+end
+
+def fun_l3_n383(x)
+ if (x < 1)
+ fun_l4_n738(x)
+ else
+ fun_l4_n532(x)
+ end
+end
+
+def fun_l3_n384(x)
+ if (x < 1)
+ fun_l4_n503(x)
+ else
+ fun_l4_n724(x)
+ end
+end
+
+def fun_l3_n385(x)
+ if (x < 1)
+ fun_l4_n208(x)
+ else
+ fun_l4_n683(x)
+ end
+end
+
+def fun_l3_n386(x)
+ if (x < 1)
+ fun_l4_n673(x)
+ else
+ fun_l4_n230(x)
+ end
+end
+
+def fun_l3_n387(x)
+ if (x < 1)
+ fun_l4_n921(x)
+ else
+ fun_l4_n800(x)
+ end
+end
+
+def fun_l3_n388(x)
+ if (x < 1)
+ fun_l4_n461(x)
+ else
+ fun_l4_n369(x)
+ end
+end
+
+def fun_l3_n389(x)
+ if (x < 1)
+ fun_l4_n86(x)
+ else
+ fun_l4_n403(x)
+ end
+end
+
+def fun_l3_n390(x)
+ if (x < 1)
+ fun_l4_n883(x)
+ else
+ fun_l4_n92(x)
+ end
+end
+
+def fun_l3_n391(x)
+ if (x < 1)
+ fun_l4_n562(x)
+ else
+ fun_l4_n826(x)
+ end
+end
+
+def fun_l3_n392(x)
+ if (x < 1)
+ fun_l4_n459(x)
+ else
+ fun_l4_n821(x)
+ end
+end
+
+def fun_l3_n393(x)
+ if (x < 1)
+ fun_l4_n774(x)
+ else
+ fun_l4_n376(x)
+ end
+end
+
+def fun_l3_n394(x)
+ if (x < 1)
+ fun_l4_n114(x)
+ else
+ fun_l4_n451(x)
+ end
+end
+
+def fun_l3_n395(x)
+ if (x < 1)
+ fun_l4_n689(x)
+ else
+ fun_l4_n13(x)
+ end
+end
+
+def fun_l3_n396(x)
+ if (x < 1)
+ fun_l4_n929(x)
+ else
+ fun_l4_n777(x)
+ end
+end
+
+def fun_l3_n397(x)
+ if (x < 1)
+ fun_l4_n634(x)
+ else
+ fun_l4_n329(x)
+ end
+end
+
+def fun_l3_n398(x)
+ if (x < 1)
+ fun_l4_n972(x)
+ else
+ fun_l4_n572(x)
+ end
+end
+
+def fun_l3_n399(x)
+ if (x < 1)
+ fun_l4_n231(x)
+ else
+ fun_l4_n14(x)
+ end
+end
+
+def fun_l3_n400(x)
+ if (x < 1)
+ fun_l4_n357(x)
+ else
+ fun_l4_n881(x)
+ end
+end
+
+def fun_l3_n401(x)
+ if (x < 1)
+ fun_l4_n283(x)
+ else
+ fun_l4_n614(x)
+ end
+end
+
+def fun_l3_n402(x)
+ if (x < 1)
+ fun_l4_n634(x)
+ else
+ fun_l4_n653(x)
+ end
+end
+
+def fun_l3_n403(x)
+ if (x < 1)
+ fun_l4_n660(x)
+ else
+ fun_l4_n127(x)
+ end
+end
+
+def fun_l3_n404(x)
+ if (x < 1)
+ fun_l4_n523(x)
+ else
+ fun_l4_n467(x)
+ end
+end
+
+def fun_l3_n405(x)
+ if (x < 1)
+ fun_l4_n593(x)
+ else
+ fun_l4_n78(x)
+ end
+end
+
+def fun_l3_n406(x)
+ if (x < 1)
+ fun_l4_n514(x)
+ else
+ fun_l4_n783(x)
+ end
+end
+
+def fun_l3_n407(x)
+ if (x < 1)
+ fun_l4_n460(x)
+ else
+ fun_l4_n137(x)
+ end
+end
+
+def fun_l3_n408(x)
+ if (x < 1)
+ fun_l4_n3(x)
+ else
+ fun_l4_n224(x)
+ end
+end
+
+def fun_l3_n409(x)
+ if (x < 1)
+ fun_l4_n11(x)
+ else
+ fun_l4_n531(x)
+ end
+end
+
+def fun_l3_n410(x)
+ if (x < 1)
+ fun_l4_n94(x)
+ else
+ fun_l4_n717(x)
+ end
+end
+
+def fun_l3_n411(x)
+ if (x < 1)
+ fun_l4_n729(x)
+ else
+ fun_l4_n526(x)
+ end
+end
+
+def fun_l3_n412(x)
+ if (x < 1)
+ fun_l4_n86(x)
+ else
+ fun_l4_n26(x)
+ end
+end
+
+def fun_l3_n413(x)
+ if (x < 1)
+ fun_l4_n249(x)
+ else
+ fun_l4_n572(x)
+ end
+end
+
+def fun_l3_n414(x)
+ if (x < 1)
+ fun_l4_n578(x)
+ else
+ fun_l4_n512(x)
+ end
+end
+
+def fun_l3_n415(x)
+ if (x < 1)
+ fun_l4_n176(x)
+ else
+ fun_l4_n414(x)
+ end
+end
+
+def fun_l3_n416(x)
+ if (x < 1)
+ fun_l4_n757(x)
+ else
+ fun_l4_n385(x)
+ end
+end
+
+def fun_l3_n417(x)
+ if (x < 1)
+ fun_l4_n249(x)
+ else
+ fun_l4_n180(x)
+ end
+end
+
+def fun_l3_n418(x)
+ if (x < 1)
+ fun_l4_n193(x)
+ else
+ fun_l4_n294(x)
+ end
+end
+
+def fun_l3_n419(x)
+ if (x < 1)
+ fun_l4_n441(x)
+ else
+ fun_l4_n932(x)
+ end
+end
+
+def fun_l3_n420(x)
+ if (x < 1)
+ fun_l4_n492(x)
+ else
+ fun_l4_n239(x)
+ end
+end
+
+def fun_l3_n421(x)
+ if (x < 1)
+ fun_l4_n49(x)
+ else
+ fun_l4_n738(x)
+ end
+end
+
+def fun_l3_n422(x)
+ if (x < 1)
+ fun_l4_n546(x)
+ else
+ fun_l4_n285(x)
+ end
+end
+
+def fun_l3_n423(x)
+ if (x < 1)
+ fun_l4_n687(x)
+ else
+ fun_l4_n973(x)
+ end
+end
+
+def fun_l3_n424(x)
+ if (x < 1)
+ fun_l4_n876(x)
+ else
+ fun_l4_n591(x)
+ end
+end
+
+def fun_l3_n425(x)
+ if (x < 1)
+ fun_l4_n626(x)
+ else
+ fun_l4_n739(x)
+ end
+end
+
+def fun_l3_n426(x)
+ if (x < 1)
+ fun_l4_n439(x)
+ else
+ fun_l4_n709(x)
+ end
+end
+
+def fun_l3_n427(x)
+ if (x < 1)
+ fun_l4_n710(x)
+ else
+ fun_l4_n305(x)
+ end
+end
+
+def fun_l3_n428(x)
+ if (x < 1)
+ fun_l4_n61(x)
+ else
+ fun_l4_n520(x)
+ end
+end
+
+def fun_l3_n429(x)
+ if (x < 1)
+ fun_l4_n172(x)
+ else
+ fun_l4_n12(x)
+ end
+end
+
+def fun_l3_n430(x)
+ if (x < 1)
+ fun_l4_n432(x)
+ else
+ fun_l4_n174(x)
+ end
+end
+
+def fun_l3_n431(x)
+ if (x < 1)
+ fun_l4_n194(x)
+ else
+ fun_l4_n673(x)
+ end
+end
+
+def fun_l3_n432(x)
+ if (x < 1)
+ fun_l4_n952(x)
+ else
+ fun_l4_n798(x)
+ end
+end
+
+def fun_l3_n433(x)
+ if (x < 1)
+ fun_l4_n915(x)
+ else
+ fun_l4_n263(x)
+ end
+end
+
+def fun_l3_n434(x)
+ if (x < 1)
+ fun_l4_n735(x)
+ else
+ fun_l4_n882(x)
+ end
+end
+
+def fun_l3_n435(x)
+ if (x < 1)
+ fun_l4_n499(x)
+ else
+ fun_l4_n947(x)
+ end
+end
+
+def fun_l3_n436(x)
+ if (x < 1)
+ fun_l4_n744(x)
+ else
+ fun_l4_n187(x)
+ end
+end
+
+def fun_l3_n437(x)
+ if (x < 1)
+ fun_l4_n244(x)
+ else
+ fun_l4_n366(x)
+ end
+end
+
+def fun_l3_n438(x)
+ if (x < 1)
+ fun_l4_n228(x)
+ else
+ fun_l4_n866(x)
+ end
+end
+
+def fun_l3_n439(x)
+ if (x < 1)
+ fun_l4_n617(x)
+ else
+ fun_l4_n924(x)
+ end
+end
+
+def fun_l3_n440(x)
+ if (x < 1)
+ fun_l4_n302(x)
+ else
+ fun_l4_n691(x)
+ end
+end
+
+def fun_l3_n441(x)
+ if (x < 1)
+ fun_l4_n559(x)
+ else
+ fun_l4_n758(x)
+ end
+end
+
+def fun_l3_n442(x)
+ if (x < 1)
+ fun_l4_n412(x)
+ else
+ fun_l4_n544(x)
+ end
+end
+
+def fun_l3_n443(x)
+ if (x < 1)
+ fun_l4_n130(x)
+ else
+ fun_l4_n764(x)
+ end
+end
+
+def fun_l3_n444(x)
+ if (x < 1)
+ fun_l4_n135(x)
+ else
+ fun_l4_n65(x)
+ end
+end
+
+def fun_l3_n445(x)
+ if (x < 1)
+ fun_l4_n224(x)
+ else
+ fun_l4_n148(x)
+ end
+end
+
+def fun_l3_n446(x)
+ if (x < 1)
+ fun_l4_n238(x)
+ else
+ fun_l4_n808(x)
+ end
+end
+
+def fun_l3_n447(x)
+ if (x < 1)
+ fun_l4_n832(x)
+ else
+ fun_l4_n460(x)
+ end
+end
+
+def fun_l3_n448(x)
+ if (x < 1)
+ fun_l4_n897(x)
+ else
+ fun_l4_n817(x)
+ end
+end
+
+def fun_l3_n449(x)
+ if (x < 1)
+ fun_l4_n587(x)
+ else
+ fun_l4_n926(x)
+ end
+end
+
+def fun_l3_n450(x)
+ if (x < 1)
+ fun_l4_n575(x)
+ else
+ fun_l4_n274(x)
+ end
+end
+
+def fun_l3_n451(x)
+ if (x < 1)
+ fun_l4_n42(x)
+ else
+ fun_l4_n714(x)
+ end
+end
+
+def fun_l3_n452(x)
+ if (x < 1)
+ fun_l4_n101(x)
+ else
+ fun_l4_n470(x)
+ end
+end
+
+def fun_l3_n453(x)
+ if (x < 1)
+ fun_l4_n906(x)
+ else
+ fun_l4_n724(x)
+ end
+end
+
+def fun_l3_n454(x)
+ if (x < 1)
+ fun_l4_n682(x)
+ else
+ fun_l4_n709(x)
+ end
+end
+
+def fun_l3_n455(x)
+ if (x < 1)
+ fun_l4_n929(x)
+ else
+ fun_l4_n274(x)
+ end
+end
+
+def fun_l3_n456(x)
+ if (x < 1)
+ fun_l4_n10(x)
+ else
+ fun_l4_n101(x)
+ end
+end
+
+def fun_l3_n457(x)
+ if (x < 1)
+ fun_l4_n825(x)
+ else
+ fun_l4_n632(x)
+ end
+end
+
+def fun_l3_n458(x)
+ if (x < 1)
+ fun_l4_n2(x)
+ else
+ fun_l4_n723(x)
+ end
+end
+
+def fun_l3_n459(x)
+ if (x < 1)
+ fun_l4_n998(x)
+ else
+ fun_l4_n546(x)
+ end
+end
+
+def fun_l3_n460(x)
+ if (x < 1)
+ fun_l4_n483(x)
+ else
+ fun_l4_n445(x)
+ end
+end
+
+def fun_l3_n461(x)
+ if (x < 1)
+ fun_l4_n896(x)
+ else
+ fun_l4_n993(x)
+ end
+end
+
+def fun_l3_n462(x)
+ if (x < 1)
+ fun_l4_n710(x)
+ else
+ fun_l4_n529(x)
+ end
+end
+
+def fun_l3_n463(x)
+ if (x < 1)
+ fun_l4_n913(x)
+ else
+ fun_l4_n65(x)
+ end
+end
+
+def fun_l3_n464(x)
+ if (x < 1)
+ fun_l4_n712(x)
+ else
+ fun_l4_n412(x)
+ end
+end
+
+def fun_l3_n465(x)
+ if (x < 1)
+ fun_l4_n296(x)
+ else
+ fun_l4_n37(x)
+ end
+end
+
+def fun_l3_n466(x)
+ if (x < 1)
+ fun_l4_n241(x)
+ else
+ fun_l4_n649(x)
+ end
+end
+
+def fun_l3_n467(x)
+ if (x < 1)
+ fun_l4_n296(x)
+ else
+ fun_l4_n80(x)
+ end
+end
+
+def fun_l3_n468(x)
+ if (x < 1)
+ fun_l4_n270(x)
+ else
+ fun_l4_n192(x)
+ end
+end
+
+def fun_l3_n469(x)
+ if (x < 1)
+ fun_l4_n655(x)
+ else
+ fun_l4_n605(x)
+ end
+end
+
+def fun_l3_n470(x)
+ if (x < 1)
+ fun_l4_n180(x)
+ else
+ fun_l4_n807(x)
+ end
+end
+
+def fun_l3_n471(x)
+ if (x < 1)
+ fun_l4_n915(x)
+ else
+ fun_l4_n902(x)
+ end
+end
+
+def fun_l3_n472(x)
+ if (x < 1)
+ fun_l4_n527(x)
+ else
+ fun_l4_n638(x)
+ end
+end
+
+def fun_l3_n473(x)
+ if (x < 1)
+ fun_l4_n775(x)
+ else
+ fun_l4_n572(x)
+ end
+end
+
+def fun_l3_n474(x)
+ if (x < 1)
+ fun_l4_n856(x)
+ else
+ fun_l4_n182(x)
+ end
+end
+
+def fun_l3_n475(x)
+ if (x < 1)
+ fun_l4_n434(x)
+ else
+ fun_l4_n232(x)
+ end
+end
+
+def fun_l3_n476(x)
+ if (x < 1)
+ fun_l4_n208(x)
+ else
+ fun_l4_n37(x)
+ end
+end
+
+def fun_l3_n477(x)
+ if (x < 1)
+ fun_l4_n146(x)
+ else
+ fun_l4_n764(x)
+ end
+end
+
+def fun_l3_n478(x)
+ if (x < 1)
+ fun_l4_n806(x)
+ else
+ fun_l4_n148(x)
+ end
+end
+
+def fun_l3_n479(x)
+ if (x < 1)
+ fun_l4_n46(x)
+ else
+ fun_l4_n488(x)
+ end
+end
+
+def fun_l3_n480(x)
+ if (x < 1)
+ fun_l4_n637(x)
+ else
+ fun_l4_n48(x)
+ end
+end
+
+def fun_l3_n481(x)
+ if (x < 1)
+ fun_l4_n496(x)
+ else
+ fun_l4_n891(x)
+ end
+end
+
+def fun_l3_n482(x)
+ if (x < 1)
+ fun_l4_n282(x)
+ else
+ fun_l4_n736(x)
+ end
+end
+
+def fun_l3_n483(x)
+ if (x < 1)
+ fun_l4_n377(x)
+ else
+ fun_l4_n603(x)
+ end
+end
+
+def fun_l3_n484(x)
+ if (x < 1)
+ fun_l4_n689(x)
+ else
+ fun_l4_n201(x)
+ end
+end
+
+def fun_l3_n485(x)
+ if (x < 1)
+ fun_l4_n495(x)
+ else
+ fun_l4_n721(x)
+ end
+end
+
+def fun_l3_n486(x)
+ if (x < 1)
+ fun_l4_n128(x)
+ else
+ fun_l4_n400(x)
+ end
+end
+
+def fun_l3_n487(x)
+ if (x < 1)
+ fun_l4_n993(x)
+ else
+ fun_l4_n53(x)
+ end
+end
+
+def fun_l3_n488(x)
+ if (x < 1)
+ fun_l4_n913(x)
+ else
+ fun_l4_n814(x)
+ end
+end
+
+def fun_l3_n489(x)
+ if (x < 1)
+ fun_l4_n816(x)
+ else
+ fun_l4_n196(x)
+ end
+end
+
+def fun_l3_n490(x)
+ if (x < 1)
+ fun_l4_n754(x)
+ else
+ fun_l4_n451(x)
+ end
+end
+
+def fun_l3_n491(x)
+ if (x < 1)
+ fun_l4_n57(x)
+ else
+ fun_l4_n597(x)
+ end
+end
+
+def fun_l3_n492(x)
+ if (x < 1)
+ fun_l4_n539(x)
+ else
+ fun_l4_n893(x)
+ end
+end
+
+def fun_l3_n493(x)
+ if (x < 1)
+ fun_l4_n542(x)
+ else
+ fun_l4_n956(x)
+ end
+end
+
+def fun_l3_n494(x)
+ if (x < 1)
+ fun_l4_n479(x)
+ else
+ fun_l4_n448(x)
+ end
+end
+
+def fun_l3_n495(x)
+ if (x < 1)
+ fun_l4_n263(x)
+ else
+ fun_l4_n912(x)
+ end
+end
+
+def fun_l3_n496(x)
+ if (x < 1)
+ fun_l4_n463(x)
+ else
+ fun_l4_n112(x)
+ end
+end
+
+def fun_l3_n497(x)
+ if (x < 1)
+ fun_l4_n806(x)
+ else
+ fun_l4_n486(x)
+ end
+end
+
+def fun_l3_n498(x)
+ if (x < 1)
+ fun_l4_n155(x)
+ else
+ fun_l4_n298(x)
+ end
+end
+
+def fun_l3_n499(x)
+ if (x < 1)
+ fun_l4_n538(x)
+ else
+ fun_l4_n908(x)
+ end
+end
+
+def fun_l3_n500(x)
+ if (x < 1)
+ fun_l4_n769(x)
+ else
+ fun_l4_n681(x)
+ end
+end
+
+def fun_l3_n501(x)
+ if (x < 1)
+ fun_l4_n544(x)
+ else
+ fun_l4_n749(x)
+ end
+end
+
+def fun_l3_n502(x)
+ if (x < 1)
+ fun_l4_n505(x)
+ else
+ fun_l4_n66(x)
+ end
+end
+
+def fun_l3_n503(x)
+ if (x < 1)
+ fun_l4_n900(x)
+ else
+ fun_l4_n917(x)
+ end
+end
+
+def fun_l3_n504(x)
+ if (x < 1)
+ fun_l4_n982(x)
+ else
+ fun_l4_n707(x)
+ end
+end
+
+def fun_l3_n505(x)
+ if (x < 1)
+ fun_l4_n102(x)
+ else
+ fun_l4_n564(x)
+ end
+end
+
+def fun_l3_n506(x)
+ if (x < 1)
+ fun_l4_n332(x)
+ else
+ fun_l4_n367(x)
+ end
+end
+
+def fun_l3_n507(x)
+ if (x < 1)
+ fun_l4_n969(x)
+ else
+ fun_l4_n354(x)
+ end
+end
+
+def fun_l3_n508(x)
+ if (x < 1)
+ fun_l4_n405(x)
+ else
+ fun_l4_n485(x)
+ end
+end
+
+def fun_l3_n509(x)
+ if (x < 1)
+ fun_l4_n477(x)
+ else
+ fun_l4_n629(x)
+ end
+end
+
+def fun_l3_n510(x)
+ if (x < 1)
+ fun_l4_n753(x)
+ else
+ fun_l4_n78(x)
+ end
+end
+
+def fun_l3_n511(x)
+ if (x < 1)
+ fun_l4_n221(x)
+ else
+ fun_l4_n637(x)
+ end
+end
+
+def fun_l3_n512(x)
+ if (x < 1)
+ fun_l4_n540(x)
+ else
+ fun_l4_n267(x)
+ end
+end
+
+def fun_l3_n513(x)
+ if (x < 1)
+ fun_l4_n750(x)
+ else
+ fun_l4_n626(x)
+ end
+end
+
+def fun_l3_n514(x)
+ if (x < 1)
+ fun_l4_n160(x)
+ else
+ fun_l4_n32(x)
+ end
+end
+
+def fun_l3_n515(x)
+ if (x < 1)
+ fun_l4_n914(x)
+ else
+ fun_l4_n967(x)
+ end
+end
+
+def fun_l3_n516(x)
+ if (x < 1)
+ fun_l4_n380(x)
+ else
+ fun_l4_n83(x)
+ end
+end
+
+def fun_l3_n517(x)
+ if (x < 1)
+ fun_l4_n175(x)
+ else
+ fun_l4_n830(x)
+ end
+end
+
+def fun_l3_n518(x)
+ if (x < 1)
+ fun_l4_n579(x)
+ else
+ fun_l4_n773(x)
+ end
+end
+
+def fun_l3_n519(x)
+ if (x < 1)
+ fun_l4_n707(x)
+ else
+ fun_l4_n690(x)
+ end
+end
+
+def fun_l3_n520(x)
+ if (x < 1)
+ fun_l4_n587(x)
+ else
+ fun_l4_n754(x)
+ end
+end
+
+def fun_l3_n521(x)
+ if (x < 1)
+ fun_l4_n922(x)
+ else
+ fun_l4_n156(x)
+ end
+end
+
+def fun_l3_n522(x)
+ if (x < 1)
+ fun_l4_n299(x)
+ else
+ fun_l4_n937(x)
+ end
+end
+
+def fun_l3_n523(x)
+ if (x < 1)
+ fun_l4_n996(x)
+ else
+ fun_l4_n786(x)
+ end
+end
+
+def fun_l3_n524(x)
+ if (x < 1)
+ fun_l4_n988(x)
+ else
+ fun_l4_n930(x)
+ end
+end
+
+def fun_l3_n525(x)
+ if (x < 1)
+ fun_l4_n475(x)
+ else
+ fun_l4_n43(x)
+ end
+end
+
+def fun_l3_n526(x)
+ if (x < 1)
+ fun_l4_n367(x)
+ else
+ fun_l4_n167(x)
+ end
+end
+
+def fun_l3_n527(x)
+ if (x < 1)
+ fun_l4_n31(x)
+ else
+ fun_l4_n394(x)
+ end
+end
+
+def fun_l3_n528(x)
+ if (x < 1)
+ fun_l4_n92(x)
+ else
+ fun_l4_n304(x)
+ end
+end
+
+def fun_l3_n529(x)
+ if (x < 1)
+ fun_l4_n369(x)
+ else
+ fun_l4_n458(x)
+ end
+end
+
+def fun_l3_n530(x)
+ if (x < 1)
+ fun_l4_n475(x)
+ else
+ fun_l4_n8(x)
+ end
+end
+
+def fun_l3_n531(x)
+ if (x < 1)
+ fun_l4_n678(x)
+ else
+ fun_l4_n970(x)
+ end
+end
+
+def fun_l3_n532(x)
+ if (x < 1)
+ fun_l4_n349(x)
+ else
+ fun_l4_n853(x)
+ end
+end
+
+def fun_l3_n533(x)
+ if (x < 1)
+ fun_l4_n918(x)
+ else
+ fun_l4_n820(x)
+ end
+end
+
+def fun_l3_n534(x)
+ if (x < 1)
+ fun_l4_n369(x)
+ else
+ fun_l4_n445(x)
+ end
+end
+
+def fun_l3_n535(x)
+ if (x < 1)
+ fun_l4_n437(x)
+ else
+ fun_l4_n930(x)
+ end
+end
+
+def fun_l3_n536(x)
+ if (x < 1)
+ fun_l4_n143(x)
+ else
+ fun_l4_n986(x)
+ end
+end
+
+def fun_l3_n537(x)
+ if (x < 1)
+ fun_l4_n603(x)
+ else
+ fun_l4_n818(x)
+ end
+end
+
+def fun_l3_n538(x)
+ if (x < 1)
+ fun_l4_n14(x)
+ else
+ fun_l4_n994(x)
+ end
+end
+
+def fun_l3_n539(x)
+ if (x < 1)
+ fun_l4_n650(x)
+ else
+ fun_l4_n115(x)
+ end
+end
+
+def fun_l3_n540(x)
+ if (x < 1)
+ fun_l4_n74(x)
+ else
+ fun_l4_n559(x)
+ end
+end
+
+def fun_l3_n541(x)
+ if (x < 1)
+ fun_l4_n601(x)
+ else
+ fun_l4_n756(x)
+ end
+end
+
+def fun_l3_n542(x)
+ if (x < 1)
+ fun_l4_n733(x)
+ else
+ fun_l4_n274(x)
+ end
+end
+
+def fun_l3_n543(x)
+ if (x < 1)
+ fun_l4_n819(x)
+ else
+ fun_l4_n97(x)
+ end
+end
+
+def fun_l3_n544(x)
+ if (x < 1)
+ fun_l4_n949(x)
+ else
+ fun_l4_n868(x)
+ end
+end
+
+def fun_l3_n545(x)
+ if (x < 1)
+ fun_l4_n346(x)
+ else
+ fun_l4_n0(x)
+ end
+end
+
+def fun_l3_n546(x)
+ if (x < 1)
+ fun_l4_n820(x)
+ else
+ fun_l4_n188(x)
+ end
+end
+
+def fun_l3_n547(x)
+ if (x < 1)
+ fun_l4_n403(x)
+ else
+ fun_l4_n670(x)
+ end
+end
+
+def fun_l3_n548(x)
+ if (x < 1)
+ fun_l4_n449(x)
+ else
+ fun_l4_n96(x)
+ end
+end
+
+def fun_l3_n549(x)
+ if (x < 1)
+ fun_l4_n585(x)
+ else
+ fun_l4_n388(x)
+ end
+end
+
+def fun_l3_n550(x)
+ if (x < 1)
+ fun_l4_n963(x)
+ else
+ fun_l4_n979(x)
+ end
+end
+
+def fun_l3_n551(x)
+ if (x < 1)
+ fun_l4_n892(x)
+ else
+ fun_l4_n583(x)
+ end
+end
+
+def fun_l3_n552(x)
+ if (x < 1)
+ fun_l4_n846(x)
+ else
+ fun_l4_n508(x)
+ end
+end
+
+def fun_l3_n553(x)
+ if (x < 1)
+ fun_l4_n160(x)
+ else
+ fun_l4_n428(x)
+ end
+end
+
+def fun_l3_n554(x)
+ if (x < 1)
+ fun_l4_n538(x)
+ else
+ fun_l4_n641(x)
+ end
+end
+
+def fun_l3_n555(x)
+ if (x < 1)
+ fun_l4_n523(x)
+ else
+ fun_l4_n697(x)
+ end
+end
+
+def fun_l3_n556(x)
+ if (x < 1)
+ fun_l4_n212(x)
+ else
+ fun_l4_n0(x)
+ end
+end
+
+def fun_l3_n557(x)
+ if (x < 1)
+ fun_l4_n255(x)
+ else
+ fun_l4_n860(x)
+ end
+end
+
+def fun_l3_n558(x)
+ if (x < 1)
+ fun_l4_n807(x)
+ else
+ fun_l4_n865(x)
+ end
+end
+
+def fun_l3_n559(x)
+ if (x < 1)
+ fun_l4_n735(x)
+ else
+ fun_l4_n352(x)
+ end
+end
+
+def fun_l3_n560(x)
+ if (x < 1)
+ fun_l4_n327(x)
+ else
+ fun_l4_n739(x)
+ end
+end
+
+def fun_l3_n561(x)
+ if (x < 1)
+ fun_l4_n637(x)
+ else
+ fun_l4_n628(x)
+ end
+end
+
+def fun_l3_n562(x)
+ if (x < 1)
+ fun_l4_n281(x)
+ else
+ fun_l4_n394(x)
+ end
+end
+
+def fun_l3_n563(x)
+ if (x < 1)
+ fun_l4_n440(x)
+ else
+ fun_l4_n179(x)
+ end
+end
+
+def fun_l3_n564(x)
+ if (x < 1)
+ fun_l4_n893(x)
+ else
+ fun_l4_n396(x)
+ end
+end
+
+def fun_l3_n565(x)
+ if (x < 1)
+ fun_l4_n94(x)
+ else
+ fun_l4_n134(x)
+ end
+end
+
+def fun_l3_n566(x)
+ if (x < 1)
+ fun_l4_n968(x)
+ else
+ fun_l4_n444(x)
+ end
+end
+
+def fun_l3_n567(x)
+ if (x < 1)
+ fun_l4_n85(x)
+ else
+ fun_l4_n281(x)
+ end
+end
+
+def fun_l3_n568(x)
+ if (x < 1)
+ fun_l4_n164(x)
+ else
+ fun_l4_n229(x)
+ end
+end
+
+def fun_l3_n569(x)
+ if (x < 1)
+ fun_l4_n594(x)
+ else
+ fun_l4_n756(x)
+ end
+end
+
+def fun_l3_n570(x)
+ if (x < 1)
+ fun_l4_n77(x)
+ else
+ fun_l4_n370(x)
+ end
+end
+
+def fun_l3_n571(x)
+ if (x < 1)
+ fun_l4_n724(x)
+ else
+ fun_l4_n965(x)
+ end
+end
+
+def fun_l3_n572(x)
+ if (x < 1)
+ fun_l4_n394(x)
+ else
+ fun_l4_n595(x)
+ end
+end
+
+def fun_l3_n573(x)
+ if (x < 1)
+ fun_l4_n54(x)
+ else
+ fun_l4_n41(x)
+ end
+end
+
+def fun_l3_n574(x)
+ if (x < 1)
+ fun_l4_n951(x)
+ else
+ fun_l4_n414(x)
+ end
+end
+
+def fun_l3_n575(x)
+ if (x < 1)
+ fun_l4_n297(x)
+ else
+ fun_l4_n126(x)
+ end
+end
+
+def fun_l3_n576(x)
+ if (x < 1)
+ fun_l4_n538(x)
+ else
+ fun_l4_n276(x)
+ end
+end
+
+def fun_l3_n577(x)
+ if (x < 1)
+ fun_l4_n882(x)
+ else
+ fun_l4_n152(x)
+ end
+end
+
+def fun_l3_n578(x)
+ if (x < 1)
+ fun_l4_n950(x)
+ else
+ fun_l4_n822(x)
+ end
+end
+
+def fun_l3_n579(x)
+ if (x < 1)
+ fun_l4_n484(x)
+ else
+ fun_l4_n99(x)
+ end
+end
+
+def fun_l3_n580(x)
+ if (x < 1)
+ fun_l4_n160(x)
+ else
+ fun_l4_n98(x)
+ end
+end
+
+def fun_l3_n581(x)
+ if (x < 1)
+ fun_l4_n10(x)
+ else
+ fun_l4_n537(x)
+ end
+end
+
+def fun_l3_n582(x)
+ if (x < 1)
+ fun_l4_n419(x)
+ else
+ fun_l4_n285(x)
+ end
+end
+
+def fun_l3_n583(x)
+ if (x < 1)
+ fun_l4_n330(x)
+ else
+ fun_l4_n732(x)
+ end
+end
+
+def fun_l3_n584(x)
+ if (x < 1)
+ fun_l4_n64(x)
+ else
+ fun_l4_n461(x)
+ end
+end
+
+def fun_l3_n585(x)
+ if (x < 1)
+ fun_l4_n326(x)
+ else
+ fun_l4_n134(x)
+ end
+end
+
+def fun_l3_n586(x)
+ if (x < 1)
+ fun_l4_n423(x)
+ else
+ fun_l4_n945(x)
+ end
+end
+
+def fun_l3_n587(x)
+ if (x < 1)
+ fun_l4_n192(x)
+ else
+ fun_l4_n459(x)
+ end
+end
+
+def fun_l3_n588(x)
+ if (x < 1)
+ fun_l4_n292(x)
+ else
+ fun_l4_n697(x)
+ end
+end
+
+def fun_l3_n589(x)
+ if (x < 1)
+ fun_l4_n612(x)
+ else
+ fun_l4_n902(x)
+ end
+end
+
+def fun_l3_n590(x)
+ if (x < 1)
+ fun_l4_n842(x)
+ else
+ fun_l4_n300(x)
+ end
+end
+
+def fun_l3_n591(x)
+ if (x < 1)
+ fun_l4_n265(x)
+ else
+ fun_l4_n906(x)
+ end
+end
+
+def fun_l3_n592(x)
+ if (x < 1)
+ fun_l4_n82(x)
+ else
+ fun_l4_n417(x)
+ end
+end
+
+def fun_l3_n593(x)
+ if (x < 1)
+ fun_l4_n896(x)
+ else
+ fun_l4_n794(x)
+ end
+end
+
+def fun_l3_n594(x)
+ if (x < 1)
+ fun_l4_n760(x)
+ else
+ fun_l4_n83(x)
+ end
+end
+
+def fun_l3_n595(x)
+ if (x < 1)
+ fun_l4_n750(x)
+ else
+ fun_l4_n778(x)
+ end
+end
+
+def fun_l3_n596(x)
+ if (x < 1)
+ fun_l4_n655(x)
+ else
+ fun_l4_n8(x)
+ end
+end
+
+def fun_l3_n597(x)
+ if (x < 1)
+ fun_l4_n810(x)
+ else
+ fun_l4_n824(x)
+ end
+end
+
+def fun_l3_n598(x)
+ if (x < 1)
+ fun_l4_n904(x)
+ else
+ fun_l4_n921(x)
+ end
+end
+
+def fun_l3_n599(x)
+ if (x < 1)
+ fun_l4_n16(x)
+ else
+ fun_l4_n96(x)
+ end
+end
+
+def fun_l3_n600(x)
+ if (x < 1)
+ fun_l4_n879(x)
+ else
+ fun_l4_n757(x)
+ end
+end
+
+def fun_l3_n601(x)
+ if (x < 1)
+ fun_l4_n840(x)
+ else
+ fun_l4_n62(x)
+ end
+end
+
+def fun_l3_n602(x)
+ if (x < 1)
+ fun_l4_n547(x)
+ else
+ fun_l4_n45(x)
+ end
+end
+
+def fun_l3_n603(x)
+ if (x < 1)
+ fun_l4_n978(x)
+ else
+ fun_l4_n646(x)
+ end
+end
+
+def fun_l3_n604(x)
+ if (x < 1)
+ fun_l4_n100(x)
+ else
+ fun_l4_n654(x)
+ end
+end
+
+def fun_l3_n605(x)
+ if (x < 1)
+ fun_l4_n257(x)
+ else
+ fun_l4_n465(x)
+ end
+end
+
+def fun_l3_n606(x)
+ if (x < 1)
+ fun_l4_n427(x)
+ else
+ fun_l4_n489(x)
+ end
+end
+
+def fun_l3_n607(x)
+ if (x < 1)
+ fun_l4_n465(x)
+ else
+ fun_l4_n48(x)
+ end
+end
+
+def fun_l3_n608(x)
+ if (x < 1)
+ fun_l4_n571(x)
+ else
+ fun_l4_n332(x)
+ end
+end
+
+def fun_l3_n609(x)
+ if (x < 1)
+ fun_l4_n813(x)
+ else
+ fun_l4_n462(x)
+ end
+end
+
+def fun_l3_n610(x)
+ if (x < 1)
+ fun_l4_n931(x)
+ else
+ fun_l4_n913(x)
+ end
+end
+
+def fun_l3_n611(x)
+ if (x < 1)
+ fun_l4_n20(x)
+ else
+ fun_l4_n933(x)
+ end
+end
+
+def fun_l3_n612(x)
+ if (x < 1)
+ fun_l4_n913(x)
+ else
+ fun_l4_n787(x)
+ end
+end
+
+def fun_l3_n613(x)
+ if (x < 1)
+ fun_l4_n523(x)
+ else
+ fun_l4_n735(x)
+ end
+end
+
+def fun_l3_n614(x)
+ if (x < 1)
+ fun_l4_n315(x)
+ else
+ fun_l4_n913(x)
+ end
+end
+
+def fun_l3_n615(x)
+ if (x < 1)
+ fun_l4_n308(x)
+ else
+ fun_l4_n865(x)
+ end
+end
+
+def fun_l3_n616(x)
+ if (x < 1)
+ fun_l4_n847(x)
+ else
+ fun_l4_n506(x)
+ end
+end
+
+def fun_l3_n617(x)
+ if (x < 1)
+ fun_l4_n387(x)
+ else
+ fun_l4_n551(x)
+ end
+end
+
+def fun_l3_n618(x)
+ if (x < 1)
+ fun_l4_n276(x)
+ else
+ fun_l4_n991(x)
+ end
+end
+
+def fun_l3_n619(x)
+ if (x < 1)
+ fun_l4_n524(x)
+ else
+ fun_l4_n202(x)
+ end
+end
+
+def fun_l3_n620(x)
+ if (x < 1)
+ fun_l4_n836(x)
+ else
+ fun_l4_n548(x)
+ end
+end
+
+def fun_l3_n621(x)
+ if (x < 1)
+ fun_l4_n76(x)
+ else
+ fun_l4_n5(x)
+ end
+end
+
+def fun_l3_n622(x)
+ if (x < 1)
+ fun_l4_n223(x)
+ else
+ fun_l4_n438(x)
+ end
+end
+
+def fun_l3_n623(x)
+ if (x < 1)
+ fun_l4_n31(x)
+ else
+ fun_l4_n590(x)
+ end
+end
+
+def fun_l3_n624(x)
+ if (x < 1)
+ fun_l4_n609(x)
+ else
+ fun_l4_n513(x)
+ end
+end
+
+def fun_l3_n625(x)
+ if (x < 1)
+ fun_l4_n966(x)
+ else
+ fun_l4_n278(x)
+ end
+end
+
+def fun_l3_n626(x)
+ if (x < 1)
+ fun_l4_n82(x)
+ else
+ fun_l4_n100(x)
+ end
+end
+
+def fun_l3_n627(x)
+ if (x < 1)
+ fun_l4_n312(x)
+ else
+ fun_l4_n746(x)
+ end
+end
+
+def fun_l3_n628(x)
+ if (x < 1)
+ fun_l4_n682(x)
+ else
+ fun_l4_n791(x)
+ end
+end
+
+def fun_l3_n629(x)
+ if (x < 1)
+ fun_l4_n166(x)
+ else
+ fun_l4_n678(x)
+ end
+end
+
+def fun_l3_n630(x)
+ if (x < 1)
+ fun_l4_n507(x)
+ else
+ fun_l4_n778(x)
+ end
+end
+
+def fun_l3_n631(x)
+ if (x < 1)
+ fun_l4_n97(x)
+ else
+ fun_l4_n13(x)
+ end
+end
+
+def fun_l3_n632(x)
+ if (x < 1)
+ fun_l4_n976(x)
+ else
+ fun_l4_n794(x)
+ end
+end
+
+def fun_l3_n633(x)
+ if (x < 1)
+ fun_l4_n994(x)
+ else
+ fun_l4_n743(x)
+ end
+end
+
+def fun_l3_n634(x)
+ if (x < 1)
+ fun_l4_n212(x)
+ else
+ fun_l4_n754(x)
+ end
+end
+
+def fun_l3_n635(x)
+ if (x < 1)
+ fun_l4_n602(x)
+ else
+ fun_l4_n997(x)
+ end
+end
+
+def fun_l3_n636(x)
+ if (x < 1)
+ fun_l4_n282(x)
+ else
+ fun_l4_n133(x)
+ end
+end
+
+def fun_l3_n637(x)
+ if (x < 1)
+ fun_l4_n413(x)
+ else
+ fun_l4_n952(x)
+ end
+end
+
+def fun_l3_n638(x)
+ if (x < 1)
+ fun_l4_n843(x)
+ else
+ fun_l4_n783(x)
+ end
+end
+
+def fun_l3_n639(x)
+ if (x < 1)
+ fun_l4_n946(x)
+ else
+ fun_l4_n833(x)
+ end
+end
+
+def fun_l3_n640(x)
+ if (x < 1)
+ fun_l4_n384(x)
+ else
+ fun_l4_n444(x)
+ end
+end
+
+def fun_l3_n641(x)
+ if (x < 1)
+ fun_l4_n699(x)
+ else
+ fun_l4_n26(x)
+ end
+end
+
+def fun_l3_n642(x)
+ if (x < 1)
+ fun_l4_n96(x)
+ else
+ fun_l4_n623(x)
+ end
+end
+
+def fun_l3_n643(x)
+ if (x < 1)
+ fun_l4_n928(x)
+ else
+ fun_l4_n8(x)
+ end
+end
+
+def fun_l3_n644(x)
+ if (x < 1)
+ fun_l4_n804(x)
+ else
+ fun_l4_n618(x)
+ end
+end
+
+def fun_l3_n645(x)
+ if (x < 1)
+ fun_l4_n513(x)
+ else
+ fun_l4_n214(x)
+ end
+end
+
+def fun_l3_n646(x)
+ if (x < 1)
+ fun_l4_n605(x)
+ else
+ fun_l4_n920(x)
+ end
+end
+
+def fun_l3_n647(x)
+ if (x < 1)
+ fun_l4_n774(x)
+ else
+ fun_l4_n297(x)
+ end
+end
+
+def fun_l3_n648(x)
+ if (x < 1)
+ fun_l4_n871(x)
+ else
+ fun_l4_n75(x)
+ end
+end
+
+def fun_l3_n649(x)
+ if (x < 1)
+ fun_l4_n196(x)
+ else
+ fun_l4_n277(x)
+ end
+end
+
+def fun_l3_n650(x)
+ if (x < 1)
+ fun_l4_n25(x)
+ else
+ fun_l4_n681(x)
+ end
+end
+
+def fun_l3_n651(x)
+ if (x < 1)
+ fun_l4_n603(x)
+ else
+ fun_l4_n46(x)
+ end
+end
+
+def fun_l3_n652(x)
+ if (x < 1)
+ fun_l4_n623(x)
+ else
+ fun_l4_n604(x)
+ end
+end
+
+def fun_l3_n653(x)
+ if (x < 1)
+ fun_l4_n854(x)
+ else
+ fun_l4_n955(x)
+ end
+end
+
+def fun_l3_n654(x)
+ if (x < 1)
+ fun_l4_n667(x)
+ else
+ fun_l4_n988(x)
+ end
+end
+
+def fun_l3_n655(x)
+ if (x < 1)
+ fun_l4_n263(x)
+ else
+ fun_l4_n831(x)
+ end
+end
+
+def fun_l3_n656(x)
+ if (x < 1)
+ fun_l4_n591(x)
+ else
+ fun_l4_n593(x)
+ end
+end
+
+def fun_l3_n657(x)
+ if (x < 1)
+ fun_l4_n308(x)
+ else
+ fun_l4_n547(x)
+ end
+end
+
+def fun_l3_n658(x)
+ if (x < 1)
+ fun_l4_n780(x)
+ else
+ fun_l4_n174(x)
+ end
+end
+
+def fun_l3_n659(x)
+ if (x < 1)
+ fun_l4_n54(x)
+ else
+ fun_l4_n681(x)
+ end
+end
+
+def fun_l3_n660(x)
+ if (x < 1)
+ fun_l4_n279(x)
+ else
+ fun_l4_n282(x)
+ end
+end
+
+def fun_l3_n661(x)
+ if (x < 1)
+ fun_l4_n953(x)
+ else
+ fun_l4_n832(x)
+ end
+end
+
+def fun_l3_n662(x)
+ if (x < 1)
+ fun_l4_n525(x)
+ else
+ fun_l4_n850(x)
+ end
+end
+
+def fun_l3_n663(x)
+ if (x < 1)
+ fun_l4_n606(x)
+ else
+ fun_l4_n167(x)
+ end
+end
+
+def fun_l3_n664(x)
+ if (x < 1)
+ fun_l4_n941(x)
+ else
+ fun_l4_n591(x)
+ end
+end
+
+def fun_l3_n665(x)
+ if (x < 1)
+ fun_l4_n781(x)
+ else
+ fun_l4_n162(x)
+ end
+end
+
+def fun_l3_n666(x)
+ if (x < 1)
+ fun_l4_n848(x)
+ else
+ fun_l4_n133(x)
+ end
+end
+
+def fun_l3_n667(x)
+ if (x < 1)
+ fun_l4_n341(x)
+ else
+ fun_l4_n212(x)
+ end
+end
+
+def fun_l3_n668(x)
+ if (x < 1)
+ fun_l4_n320(x)
+ else
+ fun_l4_n131(x)
+ end
+end
+
+def fun_l3_n669(x)
+ if (x < 1)
+ fun_l4_n18(x)
+ else
+ fun_l4_n385(x)
+ end
+end
+
+def fun_l3_n670(x)
+ if (x < 1)
+ fun_l4_n917(x)
+ else
+ fun_l4_n988(x)
+ end
+end
+
+def fun_l3_n671(x)
+ if (x < 1)
+ fun_l4_n50(x)
+ else
+ fun_l4_n27(x)
+ end
+end
+
+def fun_l3_n672(x)
+ if (x < 1)
+ fun_l4_n277(x)
+ else
+ fun_l4_n238(x)
+ end
+end
+
+def fun_l3_n673(x)
+ if (x < 1)
+ fun_l4_n303(x)
+ else
+ fun_l4_n643(x)
+ end
+end
+
+def fun_l3_n674(x)
+ if (x < 1)
+ fun_l4_n794(x)
+ else
+ fun_l4_n906(x)
+ end
+end
+
+def fun_l3_n675(x)
+ if (x < 1)
+ fun_l4_n688(x)
+ else
+ fun_l4_n312(x)
+ end
+end
+
+def fun_l3_n676(x)
+ if (x < 1)
+ fun_l4_n833(x)
+ else
+ fun_l4_n900(x)
+ end
+end
+
+def fun_l3_n677(x)
+ if (x < 1)
+ fun_l4_n649(x)
+ else
+ fun_l4_n822(x)
+ end
+end
+
+def fun_l3_n678(x)
+ if (x < 1)
+ fun_l4_n958(x)
+ else
+ fun_l4_n939(x)
+ end
+end
+
+def fun_l3_n679(x)
+ if (x < 1)
+ fun_l4_n949(x)
+ else
+ fun_l4_n887(x)
+ end
+end
+
+def fun_l3_n680(x)
+ if (x < 1)
+ fun_l4_n630(x)
+ else
+ fun_l4_n508(x)
+ end
+end
+
+def fun_l3_n681(x)
+ if (x < 1)
+ fun_l4_n553(x)
+ else
+ fun_l4_n940(x)
+ end
+end
+
+def fun_l3_n682(x)
+ if (x < 1)
+ fun_l4_n180(x)
+ else
+ fun_l4_n950(x)
+ end
+end
+
+def fun_l3_n683(x)
+ if (x < 1)
+ fun_l4_n898(x)
+ else
+ fun_l4_n374(x)
+ end
+end
+
+def fun_l3_n684(x)
+ if (x < 1)
+ fun_l4_n802(x)
+ else
+ fun_l4_n527(x)
+ end
+end
+
+def fun_l3_n685(x)
+ if (x < 1)
+ fun_l4_n751(x)
+ else
+ fun_l4_n967(x)
+ end
+end
+
+def fun_l3_n686(x)
+ if (x < 1)
+ fun_l4_n235(x)
+ else
+ fun_l4_n559(x)
+ end
+end
+
+def fun_l3_n687(x)
+ if (x < 1)
+ fun_l4_n694(x)
+ else
+ fun_l4_n819(x)
+ end
+end
+
+def fun_l3_n688(x)
+ if (x < 1)
+ fun_l4_n167(x)
+ else
+ fun_l4_n91(x)
+ end
+end
+
+def fun_l3_n689(x)
+ if (x < 1)
+ fun_l4_n692(x)
+ else
+ fun_l4_n422(x)
+ end
+end
+
+def fun_l3_n690(x)
+ if (x < 1)
+ fun_l4_n344(x)
+ else
+ fun_l4_n738(x)
+ end
+end
+
+def fun_l3_n691(x)
+ if (x < 1)
+ fun_l4_n394(x)
+ else
+ fun_l4_n603(x)
+ end
+end
+
+def fun_l3_n692(x)
+ if (x < 1)
+ fun_l4_n909(x)
+ else
+ fun_l4_n528(x)
+ end
+end
+
+def fun_l3_n693(x)
+ if (x < 1)
+ fun_l4_n180(x)
+ else
+ fun_l4_n22(x)
+ end
+end
+
+def fun_l3_n694(x)
+ if (x < 1)
+ fun_l4_n6(x)
+ else
+ fun_l4_n777(x)
+ end
+end
+
+def fun_l3_n695(x)
+ if (x < 1)
+ fun_l4_n327(x)
+ else
+ fun_l4_n948(x)
+ end
+end
+
+def fun_l3_n696(x)
+ if (x < 1)
+ fun_l4_n748(x)
+ else
+ fun_l4_n706(x)
+ end
+end
+
+def fun_l3_n697(x)
+ if (x < 1)
+ fun_l4_n720(x)
+ else
+ fun_l4_n693(x)
+ end
+end
+
+def fun_l3_n698(x)
+ if (x < 1)
+ fun_l4_n282(x)
+ else
+ fun_l4_n755(x)
+ end
+end
+
+def fun_l3_n699(x)
+ if (x < 1)
+ fun_l4_n975(x)
+ else
+ fun_l4_n415(x)
+ end
+end
+
+def fun_l3_n700(x)
+ if (x < 1)
+ fun_l4_n684(x)
+ else
+ fun_l4_n738(x)
+ end
+end
+
+def fun_l3_n701(x)
+ if (x < 1)
+ fun_l4_n58(x)
+ else
+ fun_l4_n892(x)
+ end
+end
+
+def fun_l3_n702(x)
+ if (x < 1)
+ fun_l4_n366(x)
+ else
+ fun_l4_n189(x)
+ end
+end
+
+def fun_l3_n703(x)
+ if (x < 1)
+ fun_l4_n422(x)
+ else
+ fun_l4_n535(x)
+ end
+end
+
+def fun_l3_n704(x)
+ if (x < 1)
+ fun_l4_n886(x)
+ else
+ fun_l4_n223(x)
+ end
+end
+
+def fun_l3_n705(x)
+ if (x < 1)
+ fun_l4_n57(x)
+ else
+ fun_l4_n62(x)
+ end
+end
+
+def fun_l3_n706(x)
+ if (x < 1)
+ fun_l4_n629(x)
+ else
+ fun_l4_n479(x)
+ end
+end
+
+def fun_l3_n707(x)
+ if (x < 1)
+ fun_l4_n477(x)
+ else
+ fun_l4_n302(x)
+ end
+end
+
+def fun_l3_n708(x)
+ if (x < 1)
+ fun_l4_n790(x)
+ else
+ fun_l4_n262(x)
+ end
+end
+
+def fun_l3_n709(x)
+ if (x < 1)
+ fun_l4_n772(x)
+ else
+ fun_l4_n995(x)
+ end
+end
+
+def fun_l3_n710(x)
+ if (x < 1)
+ fun_l4_n549(x)
+ else
+ fun_l4_n651(x)
+ end
+end
+
+def fun_l3_n711(x)
+ if (x < 1)
+ fun_l4_n77(x)
+ else
+ fun_l4_n555(x)
+ end
+end
+
+def fun_l3_n712(x)
+ if (x < 1)
+ fun_l4_n515(x)
+ else
+ fun_l4_n734(x)
+ end
+end
+
+def fun_l3_n713(x)
+ if (x < 1)
+ fun_l4_n778(x)
+ else
+ fun_l4_n884(x)
+ end
+end
+
+def fun_l3_n714(x)
+ if (x < 1)
+ fun_l4_n235(x)
+ else
+ fun_l4_n637(x)
+ end
+end
+
+def fun_l3_n715(x)
+ if (x < 1)
+ fun_l4_n516(x)
+ else
+ fun_l4_n249(x)
+ end
+end
+
+def fun_l3_n716(x)
+ if (x < 1)
+ fun_l4_n44(x)
+ else
+ fun_l4_n307(x)
+ end
+end
+
+def fun_l3_n717(x)
+ if (x < 1)
+ fun_l4_n482(x)
+ else
+ fun_l4_n4(x)
+ end
+end
+
+def fun_l3_n718(x)
+ if (x < 1)
+ fun_l4_n195(x)
+ else
+ fun_l4_n410(x)
+ end
+end
+
+def fun_l3_n719(x)
+ if (x < 1)
+ fun_l4_n862(x)
+ else
+ fun_l4_n91(x)
+ end
+end
+
+def fun_l3_n720(x)
+ if (x < 1)
+ fun_l4_n591(x)
+ else
+ fun_l4_n391(x)
+ end
+end
+
+def fun_l3_n721(x)
+ if (x < 1)
+ fun_l4_n776(x)
+ else
+ fun_l4_n129(x)
+ end
+end
+
+def fun_l3_n722(x)
+ if (x < 1)
+ fun_l4_n857(x)
+ else
+ fun_l4_n160(x)
+ end
+end
+
+def fun_l3_n723(x)
+ if (x < 1)
+ fun_l4_n772(x)
+ else
+ fun_l4_n567(x)
+ end
+end
+
+def fun_l3_n724(x)
+ if (x < 1)
+ fun_l4_n582(x)
+ else
+ fun_l4_n100(x)
+ end
+end
+
+def fun_l3_n725(x)
+ if (x < 1)
+ fun_l4_n962(x)
+ else
+ fun_l4_n846(x)
+ end
+end
+
+def fun_l3_n726(x)
+ if (x < 1)
+ fun_l4_n892(x)
+ else
+ fun_l4_n334(x)
+ end
+end
+
+def fun_l3_n727(x)
+ if (x < 1)
+ fun_l4_n17(x)
+ else
+ fun_l4_n690(x)
+ end
+end
+
+def fun_l3_n728(x)
+ if (x < 1)
+ fun_l4_n453(x)
+ else
+ fun_l4_n234(x)
+ end
+end
+
+def fun_l3_n729(x)
+ if (x < 1)
+ fun_l4_n562(x)
+ else
+ fun_l4_n139(x)
+ end
+end
+
+def fun_l3_n730(x)
+ if (x < 1)
+ fun_l4_n87(x)
+ else
+ fun_l4_n513(x)
+ end
+end
+
+def fun_l3_n731(x)
+ if (x < 1)
+ fun_l4_n565(x)
+ else
+ fun_l4_n289(x)
+ end
+end
+
+def fun_l3_n732(x)
+ if (x < 1)
+ fun_l4_n821(x)
+ else
+ fun_l4_n290(x)
+ end
+end
+
+def fun_l3_n733(x)
+ if (x < 1)
+ fun_l4_n464(x)
+ else
+ fun_l4_n929(x)
+ end
+end
+
+def fun_l3_n734(x)
+ if (x < 1)
+ fun_l4_n617(x)
+ else
+ fun_l4_n230(x)
+ end
+end
+
+def fun_l3_n735(x)
+ if (x < 1)
+ fun_l4_n942(x)
+ else
+ fun_l4_n770(x)
+ end
+end
+
+def fun_l3_n736(x)
+ if (x < 1)
+ fun_l4_n826(x)
+ else
+ fun_l4_n414(x)
+ end
+end
+
+def fun_l3_n737(x)
+ if (x < 1)
+ fun_l4_n47(x)
+ else
+ fun_l4_n357(x)
+ end
+end
+
+def fun_l3_n738(x)
+ if (x < 1)
+ fun_l4_n582(x)
+ else
+ fun_l4_n652(x)
+ end
+end
+
+def fun_l3_n739(x)
+ if (x < 1)
+ fun_l4_n641(x)
+ else
+ fun_l4_n479(x)
+ end
+end
+
+def fun_l3_n740(x)
+ if (x < 1)
+ fun_l4_n11(x)
+ else
+ fun_l4_n965(x)
+ end
+end
+
+def fun_l3_n741(x)
+ if (x < 1)
+ fun_l4_n402(x)
+ else
+ fun_l4_n512(x)
+ end
+end
+
+def fun_l3_n742(x)
+ if (x < 1)
+ fun_l4_n300(x)
+ else
+ fun_l4_n942(x)
+ end
+end
+
+def fun_l3_n743(x)
+ if (x < 1)
+ fun_l4_n770(x)
+ else
+ fun_l4_n690(x)
+ end
+end
+
+def fun_l3_n744(x)
+ if (x < 1)
+ fun_l4_n575(x)
+ else
+ fun_l4_n158(x)
+ end
+end
+
+def fun_l3_n745(x)
+ if (x < 1)
+ fun_l4_n760(x)
+ else
+ fun_l4_n552(x)
+ end
+end
+
+def fun_l3_n746(x)
+ if (x < 1)
+ fun_l4_n887(x)
+ else
+ fun_l4_n319(x)
+ end
+end
+
+def fun_l3_n747(x)
+ if (x < 1)
+ fun_l4_n967(x)
+ else
+ fun_l4_n561(x)
+ end
+end
+
+def fun_l3_n748(x)
+ if (x < 1)
+ fun_l4_n4(x)
+ else
+ fun_l4_n766(x)
+ end
+end
+
+def fun_l3_n749(x)
+ if (x < 1)
+ fun_l4_n993(x)
+ else
+ fun_l4_n991(x)
+ end
+end
+
+def fun_l3_n750(x)
+ if (x < 1)
+ fun_l4_n890(x)
+ else
+ fun_l4_n663(x)
+ end
+end
+
+def fun_l3_n751(x)
+ if (x < 1)
+ fun_l4_n267(x)
+ else
+ fun_l4_n449(x)
+ end
+end
+
+def fun_l3_n752(x)
+ if (x < 1)
+ fun_l4_n540(x)
+ else
+ fun_l4_n226(x)
+ end
+end
+
+def fun_l3_n753(x)
+ if (x < 1)
+ fun_l4_n893(x)
+ else
+ fun_l4_n116(x)
+ end
+end
+
+def fun_l3_n754(x)
+ if (x < 1)
+ fun_l4_n570(x)
+ else
+ fun_l4_n192(x)
+ end
+end
+
+def fun_l3_n755(x)
+ if (x < 1)
+ fun_l4_n712(x)
+ else
+ fun_l4_n81(x)
+ end
+end
+
+def fun_l3_n756(x)
+ if (x < 1)
+ fun_l4_n833(x)
+ else
+ fun_l4_n24(x)
+ end
+end
+
+def fun_l3_n757(x)
+ if (x < 1)
+ fun_l4_n740(x)
+ else
+ fun_l4_n803(x)
+ end
+end
+
+def fun_l3_n758(x)
+ if (x < 1)
+ fun_l4_n552(x)
+ else
+ fun_l4_n551(x)
+ end
+end
+
+def fun_l3_n759(x)
+ if (x < 1)
+ fun_l4_n570(x)
+ else
+ fun_l4_n677(x)
+ end
+end
+
+def fun_l3_n760(x)
+ if (x < 1)
+ fun_l4_n243(x)
+ else
+ fun_l4_n677(x)
+ end
+end
+
+def fun_l3_n761(x)
+ if (x < 1)
+ fun_l4_n65(x)
+ else
+ fun_l4_n660(x)
+ end
+end
+
+def fun_l3_n762(x)
+ if (x < 1)
+ fun_l4_n989(x)
+ else
+ fun_l4_n761(x)
+ end
+end
+
+def fun_l3_n763(x)
+ if (x < 1)
+ fun_l4_n971(x)
+ else
+ fun_l4_n280(x)
+ end
+end
+
+def fun_l3_n764(x)
+ if (x < 1)
+ fun_l4_n441(x)
+ else
+ fun_l4_n506(x)
+ end
+end
+
+def fun_l3_n765(x)
+ if (x < 1)
+ fun_l4_n577(x)
+ else
+ fun_l4_n586(x)
+ end
+end
+
+def fun_l3_n766(x)
+ if (x < 1)
+ fun_l4_n385(x)
+ else
+ fun_l4_n60(x)
+ end
+end
+
+def fun_l3_n767(x)
+ if (x < 1)
+ fun_l4_n380(x)
+ else
+ fun_l4_n32(x)
+ end
+end
+
+def fun_l3_n768(x)
+ if (x < 1)
+ fun_l4_n797(x)
+ else
+ fun_l4_n437(x)
+ end
+end
+
+def fun_l3_n769(x)
+ if (x < 1)
+ fun_l4_n615(x)
+ else
+ fun_l4_n369(x)
+ end
+end
+
+def fun_l3_n770(x)
+ if (x < 1)
+ fun_l4_n779(x)
+ else
+ fun_l4_n194(x)
+ end
+end
+
+def fun_l3_n771(x)
+ if (x < 1)
+ fun_l4_n287(x)
+ else
+ fun_l4_n819(x)
+ end
+end
+
+def fun_l3_n772(x)
+ if (x < 1)
+ fun_l4_n405(x)
+ else
+ fun_l4_n738(x)
+ end
+end
+
+def fun_l3_n773(x)
+ if (x < 1)
+ fun_l4_n706(x)
+ else
+ fun_l4_n174(x)
+ end
+end
+
+def fun_l3_n774(x)
+ if (x < 1)
+ fun_l4_n690(x)
+ else
+ fun_l4_n841(x)
+ end
+end
+
+def fun_l3_n775(x)
+ if (x < 1)
+ fun_l4_n256(x)
+ else
+ fun_l4_n179(x)
+ end
+end
+
+def fun_l3_n776(x)
+ if (x < 1)
+ fun_l4_n342(x)
+ else
+ fun_l4_n359(x)
+ end
+end
+
+def fun_l3_n777(x)
+ if (x < 1)
+ fun_l4_n371(x)
+ else
+ fun_l4_n680(x)
+ end
+end
+
+def fun_l3_n778(x)
+ if (x < 1)
+ fun_l4_n996(x)
+ else
+ fun_l4_n343(x)
+ end
+end
+
+def fun_l3_n779(x)
+ if (x < 1)
+ fun_l4_n96(x)
+ else
+ fun_l4_n893(x)
+ end
+end
+
+def fun_l3_n780(x)
+ if (x < 1)
+ fun_l4_n706(x)
+ else
+ fun_l4_n140(x)
+ end
+end
+
+def fun_l3_n781(x)
+ if (x < 1)
+ fun_l4_n349(x)
+ else
+ fun_l4_n317(x)
+ end
+end
+
+def fun_l3_n782(x)
+ if (x < 1)
+ fun_l4_n491(x)
+ else
+ fun_l4_n616(x)
+ end
+end
+
+def fun_l3_n783(x)
+ if (x < 1)
+ fun_l4_n227(x)
+ else
+ fun_l4_n356(x)
+ end
+end
+
+def fun_l3_n784(x)
+ if (x < 1)
+ fun_l4_n387(x)
+ else
+ fun_l4_n532(x)
+ end
+end
+
+def fun_l3_n785(x)
+ if (x < 1)
+ fun_l4_n648(x)
+ else
+ fun_l4_n34(x)
+ end
+end
+
+def fun_l3_n786(x)
+ if (x < 1)
+ fun_l4_n562(x)
+ else
+ fun_l4_n476(x)
+ end
+end
+
+def fun_l3_n787(x)
+ if (x < 1)
+ fun_l4_n162(x)
+ else
+ fun_l4_n384(x)
+ end
+end
+
+def fun_l3_n788(x)
+ if (x < 1)
+ fun_l4_n221(x)
+ else
+ fun_l4_n617(x)
+ end
+end
+
+def fun_l3_n789(x)
+ if (x < 1)
+ fun_l4_n670(x)
+ else
+ fun_l4_n350(x)
+ end
+end
+
+def fun_l3_n790(x)
+ if (x < 1)
+ fun_l4_n202(x)
+ else
+ fun_l4_n112(x)
+ end
+end
+
+def fun_l3_n791(x)
+ if (x < 1)
+ fun_l4_n390(x)
+ else
+ fun_l4_n868(x)
+ end
+end
+
+def fun_l3_n792(x)
+ if (x < 1)
+ fun_l4_n746(x)
+ else
+ fun_l4_n833(x)
+ end
+end
+
+def fun_l3_n793(x)
+ if (x < 1)
+ fun_l4_n471(x)
+ else
+ fun_l4_n60(x)
+ end
+end
+
+def fun_l3_n794(x)
+ if (x < 1)
+ fun_l4_n583(x)
+ else
+ fun_l4_n470(x)
+ end
+end
+
+def fun_l3_n795(x)
+ if (x < 1)
+ fun_l4_n567(x)
+ else
+ fun_l4_n218(x)
+ end
+end
+
+def fun_l3_n796(x)
+ if (x < 1)
+ fun_l4_n140(x)
+ else
+ fun_l4_n690(x)
+ end
+end
+
+def fun_l3_n797(x)
+ if (x < 1)
+ fun_l4_n190(x)
+ else
+ fun_l4_n437(x)
+ end
+end
+
+def fun_l3_n798(x)
+ if (x < 1)
+ fun_l4_n684(x)
+ else
+ fun_l4_n843(x)
+ end
+end
+
+def fun_l3_n799(x)
+ if (x < 1)
+ fun_l4_n825(x)
+ else
+ fun_l4_n354(x)
+ end
+end
+
+def fun_l3_n800(x)
+ if (x < 1)
+ fun_l4_n711(x)
+ else
+ fun_l4_n606(x)
+ end
+end
+
+def fun_l3_n801(x)
+ if (x < 1)
+ fun_l4_n756(x)
+ else
+ fun_l4_n464(x)
+ end
+end
+
+def fun_l3_n802(x)
+ if (x < 1)
+ fun_l4_n143(x)
+ else
+ fun_l4_n962(x)
+ end
+end
+
+def fun_l3_n803(x)
+ if (x < 1)
+ fun_l4_n223(x)
+ else
+ fun_l4_n293(x)
+ end
+end
+
+def fun_l3_n804(x)
+ if (x < 1)
+ fun_l4_n832(x)
+ else
+ fun_l4_n632(x)
+ end
+end
+
+def fun_l3_n805(x)
+ if (x < 1)
+ fun_l4_n649(x)
+ else
+ fun_l4_n259(x)
+ end
+end
+
+def fun_l3_n806(x)
+ if (x < 1)
+ fun_l4_n617(x)
+ else
+ fun_l4_n993(x)
+ end
+end
+
+def fun_l3_n807(x)
+ if (x < 1)
+ fun_l4_n257(x)
+ else
+ fun_l4_n712(x)
+ end
+end
+
+def fun_l3_n808(x)
+ if (x < 1)
+ fun_l4_n632(x)
+ else
+ fun_l4_n563(x)
+ end
+end
+
+def fun_l3_n809(x)
+ if (x < 1)
+ fun_l4_n154(x)
+ else
+ fun_l4_n561(x)
+ end
+end
+
+def fun_l3_n810(x)
+ if (x < 1)
+ fun_l4_n411(x)
+ else
+ fun_l4_n42(x)
+ end
+end
+
+def fun_l3_n811(x)
+ if (x < 1)
+ fun_l4_n560(x)
+ else
+ fun_l4_n531(x)
+ end
+end
+
+def fun_l3_n812(x)
+ if (x < 1)
+ fun_l4_n759(x)
+ else
+ fun_l4_n588(x)
+ end
+end
+
+def fun_l3_n813(x)
+ if (x < 1)
+ fun_l4_n311(x)
+ else
+ fun_l4_n813(x)
+ end
+end
+
+def fun_l3_n814(x)
+ if (x < 1)
+ fun_l4_n521(x)
+ else
+ fun_l4_n877(x)
+ end
+end
+
+def fun_l3_n815(x)
+ if (x < 1)
+ fun_l4_n578(x)
+ else
+ fun_l4_n37(x)
+ end
+end
+
+def fun_l3_n816(x)
+ if (x < 1)
+ fun_l4_n337(x)
+ else
+ fun_l4_n562(x)
+ end
+end
+
+def fun_l3_n817(x)
+ if (x < 1)
+ fun_l4_n523(x)
+ else
+ fun_l4_n445(x)
+ end
+end
+
+def fun_l3_n818(x)
+ if (x < 1)
+ fun_l4_n256(x)
+ else
+ fun_l4_n551(x)
+ end
+end
+
+def fun_l3_n819(x)
+ if (x < 1)
+ fun_l4_n718(x)
+ else
+ fun_l4_n463(x)
+ end
+end
+
+def fun_l3_n820(x)
+ if (x < 1)
+ fun_l4_n780(x)
+ else
+ fun_l4_n103(x)
+ end
+end
+
+def fun_l3_n821(x)
+ if (x < 1)
+ fun_l4_n729(x)
+ else
+ fun_l4_n331(x)
+ end
+end
+
+def fun_l3_n822(x)
+ if (x < 1)
+ fun_l4_n84(x)
+ else
+ fun_l4_n176(x)
+ end
+end
+
+def fun_l3_n823(x)
+ if (x < 1)
+ fun_l4_n231(x)
+ else
+ fun_l4_n681(x)
+ end
+end
+
+def fun_l3_n824(x)
+ if (x < 1)
+ fun_l4_n675(x)
+ else
+ fun_l4_n837(x)
+ end
+end
+
+def fun_l3_n825(x)
+ if (x < 1)
+ fun_l4_n140(x)
+ else
+ fun_l4_n932(x)
+ end
+end
+
+def fun_l3_n826(x)
+ if (x < 1)
+ fun_l4_n268(x)
+ else
+ fun_l4_n908(x)
+ end
+end
+
+def fun_l3_n827(x)
+ if (x < 1)
+ fun_l4_n959(x)
+ else
+ fun_l4_n643(x)
+ end
+end
+
+def fun_l3_n828(x)
+ if (x < 1)
+ fun_l4_n290(x)
+ else
+ fun_l4_n718(x)
+ end
+end
+
+def fun_l3_n829(x)
+ if (x < 1)
+ fun_l4_n430(x)
+ else
+ fun_l4_n114(x)
+ end
+end
+
+def fun_l3_n830(x)
+ if (x < 1)
+ fun_l4_n817(x)
+ else
+ fun_l4_n296(x)
+ end
+end
+
+def fun_l3_n831(x)
+ if (x < 1)
+ fun_l4_n586(x)
+ else
+ fun_l4_n345(x)
+ end
+end
+
+def fun_l3_n832(x)
+ if (x < 1)
+ fun_l4_n321(x)
+ else
+ fun_l4_n805(x)
+ end
+end
+
+def fun_l3_n833(x)
+ if (x < 1)
+ fun_l4_n57(x)
+ else
+ fun_l4_n373(x)
+ end
+end
+
+def fun_l3_n834(x)
+ if (x < 1)
+ fun_l4_n344(x)
+ else
+ fun_l4_n654(x)
+ end
+end
+
+def fun_l3_n835(x)
+ if (x < 1)
+ fun_l4_n761(x)
+ else
+ fun_l4_n295(x)
+ end
+end
+
+def fun_l3_n836(x)
+ if (x < 1)
+ fun_l4_n169(x)
+ else
+ fun_l4_n184(x)
+ end
+end
+
+def fun_l3_n837(x)
+ if (x < 1)
+ fun_l4_n482(x)
+ else
+ fun_l4_n127(x)
+ end
+end
+
+def fun_l3_n838(x)
+ if (x < 1)
+ fun_l4_n260(x)
+ else
+ fun_l4_n662(x)
+ end
+end
+
+def fun_l3_n839(x)
+ if (x < 1)
+ fun_l4_n548(x)
+ else
+ fun_l4_n197(x)
+ end
+end
+
+def fun_l3_n840(x)
+ if (x < 1)
+ fun_l4_n96(x)
+ else
+ fun_l4_n17(x)
+ end
+end
+
+def fun_l3_n841(x)
+ if (x < 1)
+ fun_l4_n946(x)
+ else
+ fun_l4_n447(x)
+ end
+end
+
+def fun_l3_n842(x)
+ if (x < 1)
+ fun_l4_n66(x)
+ else
+ fun_l4_n110(x)
+ end
+end
+
+def fun_l3_n843(x)
+ if (x < 1)
+ fun_l4_n574(x)
+ else
+ fun_l4_n887(x)
+ end
+end
+
+def fun_l3_n844(x)
+ if (x < 1)
+ fun_l4_n912(x)
+ else
+ fun_l4_n465(x)
+ end
+end
+
+def fun_l3_n845(x)
+ if (x < 1)
+ fun_l4_n401(x)
+ else
+ fun_l4_n903(x)
+ end
+end
+
+def fun_l3_n846(x)
+ if (x < 1)
+ fun_l4_n929(x)
+ else
+ fun_l4_n238(x)
+ end
+end
+
+def fun_l3_n847(x)
+ if (x < 1)
+ fun_l4_n579(x)
+ else
+ fun_l4_n924(x)
+ end
+end
+
+def fun_l3_n848(x)
+ if (x < 1)
+ fun_l4_n697(x)
+ else
+ fun_l4_n157(x)
+ end
+end
+
+def fun_l3_n849(x)
+ if (x < 1)
+ fun_l4_n608(x)
+ else
+ fun_l4_n426(x)
+ end
+end
+
+def fun_l3_n850(x)
+ if (x < 1)
+ fun_l4_n710(x)
+ else
+ fun_l4_n360(x)
+ end
+end
+
+def fun_l3_n851(x)
+ if (x < 1)
+ fun_l4_n558(x)
+ else
+ fun_l4_n152(x)
+ end
+end
+
+def fun_l3_n852(x)
+ if (x < 1)
+ fun_l4_n276(x)
+ else
+ fun_l4_n895(x)
+ end
+end
+
+def fun_l3_n853(x)
+ if (x < 1)
+ fun_l4_n945(x)
+ else
+ fun_l4_n251(x)
+ end
+end
+
+def fun_l3_n854(x)
+ if (x < 1)
+ fun_l4_n131(x)
+ else
+ fun_l4_n677(x)
+ end
+end
+
+def fun_l3_n855(x)
+ if (x < 1)
+ fun_l4_n493(x)
+ else
+ fun_l4_n631(x)
+ end
+end
+
+def fun_l3_n856(x)
+ if (x < 1)
+ fun_l4_n608(x)
+ else
+ fun_l4_n556(x)
+ end
+end
+
+def fun_l3_n857(x)
+ if (x < 1)
+ fun_l4_n808(x)
+ else
+ fun_l4_n672(x)
+ end
+end
+
+def fun_l3_n858(x)
+ if (x < 1)
+ fun_l4_n407(x)
+ else
+ fun_l4_n648(x)
+ end
+end
+
+def fun_l3_n859(x)
+ if (x < 1)
+ fun_l4_n217(x)
+ else
+ fun_l4_n763(x)
+ end
+end
+
+def fun_l3_n860(x)
+ if (x < 1)
+ fun_l4_n941(x)
+ else
+ fun_l4_n159(x)
+ end
+end
+
+def fun_l3_n861(x)
+ if (x < 1)
+ fun_l4_n364(x)
+ else
+ fun_l4_n241(x)
+ end
+end
+
+def fun_l3_n862(x)
+ if (x < 1)
+ fun_l4_n915(x)
+ else
+ fun_l4_n729(x)
+ end
+end
+
+def fun_l3_n863(x)
+ if (x < 1)
+ fun_l4_n137(x)
+ else
+ fun_l4_n317(x)
+ end
+end
+
+def fun_l3_n864(x)
+ if (x < 1)
+ fun_l4_n818(x)
+ else
+ fun_l4_n782(x)
+ end
+end
+
+def fun_l3_n865(x)
+ if (x < 1)
+ fun_l4_n577(x)
+ else
+ fun_l4_n418(x)
+ end
+end
+
+def fun_l3_n866(x)
+ if (x < 1)
+ fun_l4_n882(x)
+ else
+ fun_l4_n7(x)
+ end
+end
+
+def fun_l3_n867(x)
+ if (x < 1)
+ fun_l4_n238(x)
+ else
+ fun_l4_n944(x)
+ end
+end
+
+def fun_l3_n868(x)
+ if (x < 1)
+ fun_l4_n105(x)
+ else
+ fun_l4_n465(x)
+ end
+end
+
+def fun_l3_n869(x)
+ if (x < 1)
+ fun_l4_n841(x)
+ else
+ fun_l4_n262(x)
+ end
+end
+
+def fun_l3_n870(x)
+ if (x < 1)
+ fun_l4_n32(x)
+ else
+ fun_l4_n992(x)
+ end
+end
+
+def fun_l3_n871(x)
+ if (x < 1)
+ fun_l4_n431(x)
+ else
+ fun_l4_n391(x)
+ end
+end
+
+def fun_l3_n872(x)
+ if (x < 1)
+ fun_l4_n829(x)
+ else
+ fun_l4_n879(x)
+ end
+end
+
+def fun_l3_n873(x)
+ if (x < 1)
+ fun_l4_n617(x)
+ else
+ fun_l4_n657(x)
+ end
+end
+
+def fun_l3_n874(x)
+ if (x < 1)
+ fun_l4_n529(x)
+ else
+ fun_l4_n717(x)
+ end
+end
+
+def fun_l3_n875(x)
+ if (x < 1)
+ fun_l4_n928(x)
+ else
+ fun_l4_n476(x)
+ end
+end
+
+def fun_l3_n876(x)
+ if (x < 1)
+ fun_l4_n388(x)
+ else
+ fun_l4_n949(x)
+ end
+end
+
+def fun_l3_n877(x)
+ if (x < 1)
+ fun_l4_n826(x)
+ else
+ fun_l4_n560(x)
+ end
+end
+
+def fun_l3_n878(x)
+ if (x < 1)
+ fun_l4_n863(x)
+ else
+ fun_l4_n122(x)
+ end
+end
+
+def fun_l3_n879(x)
+ if (x < 1)
+ fun_l4_n964(x)
+ else
+ fun_l4_n793(x)
+ end
+end
+
+def fun_l3_n880(x)
+ if (x < 1)
+ fun_l4_n141(x)
+ else
+ fun_l4_n132(x)
+ end
+end
+
+def fun_l3_n881(x)
+ if (x < 1)
+ fun_l4_n735(x)
+ else
+ fun_l4_n592(x)
+ end
+end
+
+def fun_l3_n882(x)
+ if (x < 1)
+ fun_l4_n585(x)
+ else
+ fun_l4_n341(x)
+ end
+end
+
+def fun_l3_n883(x)
+ if (x < 1)
+ fun_l4_n843(x)
+ else
+ fun_l4_n692(x)
+ end
+end
+
+def fun_l3_n884(x)
+ if (x < 1)
+ fun_l4_n360(x)
+ else
+ fun_l4_n833(x)
+ end
+end
+
+def fun_l3_n885(x)
+ if (x < 1)
+ fun_l4_n31(x)
+ else
+ fun_l4_n62(x)
+ end
+end
+
+def fun_l3_n886(x)
+ if (x < 1)
+ fun_l4_n756(x)
+ else
+ fun_l4_n699(x)
+ end
+end
+
+def fun_l3_n887(x)
+ if (x < 1)
+ fun_l4_n71(x)
+ else
+ fun_l4_n256(x)
+ end
+end
+
+def fun_l3_n888(x)
+ if (x < 1)
+ fun_l4_n406(x)
+ else
+ fun_l4_n493(x)
+ end
+end
+
+def fun_l3_n889(x)
+ if (x < 1)
+ fun_l4_n489(x)
+ else
+ fun_l4_n874(x)
+ end
+end
+
+def fun_l3_n890(x)
+ if (x < 1)
+ fun_l4_n365(x)
+ else
+ fun_l4_n910(x)
+ end
+end
+
+def fun_l3_n891(x)
+ if (x < 1)
+ fun_l4_n262(x)
+ else
+ fun_l4_n665(x)
+ end
+end
+
+def fun_l3_n892(x)
+ if (x < 1)
+ fun_l4_n274(x)
+ else
+ fun_l4_n849(x)
+ end
+end
+
+def fun_l3_n893(x)
+ if (x < 1)
+ fun_l4_n880(x)
+ else
+ fun_l4_n739(x)
+ end
+end
+
+def fun_l3_n894(x)
+ if (x < 1)
+ fun_l4_n504(x)
+ else
+ fun_l4_n128(x)
+ end
+end
+
+def fun_l3_n895(x)
+ if (x < 1)
+ fun_l4_n555(x)
+ else
+ fun_l4_n281(x)
+ end
+end
+
+def fun_l3_n896(x)
+ if (x < 1)
+ fun_l4_n270(x)
+ else
+ fun_l4_n225(x)
+ end
+end
+
+def fun_l3_n897(x)
+ if (x < 1)
+ fun_l4_n38(x)
+ else
+ fun_l4_n26(x)
+ end
+end
+
+def fun_l3_n898(x)
+ if (x < 1)
+ fun_l4_n944(x)
+ else
+ fun_l4_n217(x)
+ end
+end
+
+def fun_l3_n899(x)
+ if (x < 1)
+ fun_l4_n967(x)
+ else
+ fun_l4_n853(x)
+ end
+end
+
+def fun_l3_n900(x)
+ if (x < 1)
+ fun_l4_n686(x)
+ else
+ fun_l4_n760(x)
+ end
+end
+
+def fun_l3_n901(x)
+ if (x < 1)
+ fun_l4_n698(x)
+ else
+ fun_l4_n842(x)
+ end
+end
+
+def fun_l3_n902(x)
+ if (x < 1)
+ fun_l4_n344(x)
+ else
+ fun_l4_n701(x)
+ end
+end
+
+def fun_l3_n903(x)
+ if (x < 1)
+ fun_l4_n756(x)
+ else
+ fun_l4_n321(x)
+ end
+end
+
+def fun_l3_n904(x)
+ if (x < 1)
+ fun_l4_n504(x)
+ else
+ fun_l4_n228(x)
+ end
+end
+
+def fun_l3_n905(x)
+ if (x < 1)
+ fun_l4_n135(x)
+ else
+ fun_l4_n57(x)
+ end
+end
+
+def fun_l3_n906(x)
+ if (x < 1)
+ fun_l4_n279(x)
+ else
+ fun_l4_n912(x)
+ end
+end
+
+def fun_l3_n907(x)
+ if (x < 1)
+ fun_l4_n962(x)
+ else
+ fun_l4_n418(x)
+ end
+end
+
+def fun_l3_n908(x)
+ if (x < 1)
+ fun_l4_n972(x)
+ else
+ fun_l4_n14(x)
+ end
+end
+
+def fun_l3_n909(x)
+ if (x < 1)
+ fun_l4_n231(x)
+ else
+ fun_l4_n763(x)
+ end
+end
+
+def fun_l3_n910(x)
+ if (x < 1)
+ fun_l4_n859(x)
+ else
+ fun_l4_n243(x)
+ end
+end
+
+def fun_l3_n911(x)
+ if (x < 1)
+ fun_l4_n153(x)
+ else
+ fun_l4_n136(x)
+ end
+end
+
+def fun_l3_n912(x)
+ if (x < 1)
+ fun_l4_n325(x)
+ else
+ fun_l4_n113(x)
+ end
+end
+
+def fun_l3_n913(x)
+ if (x < 1)
+ fun_l4_n625(x)
+ else
+ fun_l4_n179(x)
+ end
+end
+
+def fun_l3_n914(x)
+ if (x < 1)
+ fun_l4_n54(x)
+ else
+ fun_l4_n88(x)
+ end
+end
+
+def fun_l3_n915(x)
+ if (x < 1)
+ fun_l4_n332(x)
+ else
+ fun_l4_n3(x)
+ end
+end
+
+def fun_l3_n916(x)
+ if (x < 1)
+ fun_l4_n748(x)
+ else
+ fun_l4_n21(x)
+ end
+end
+
+def fun_l3_n917(x)
+ if (x < 1)
+ fun_l4_n132(x)
+ else
+ fun_l4_n132(x)
+ end
+end
+
+def fun_l3_n918(x)
+ if (x < 1)
+ fun_l4_n295(x)
+ else
+ fun_l4_n429(x)
+ end
+end
+
+def fun_l3_n919(x)
+ if (x < 1)
+ fun_l4_n105(x)
+ else
+ fun_l4_n984(x)
+ end
+end
+
+def fun_l3_n920(x)
+ if (x < 1)
+ fun_l4_n319(x)
+ else
+ fun_l4_n304(x)
+ end
+end
+
+def fun_l3_n921(x)
+ if (x < 1)
+ fun_l4_n756(x)
+ else
+ fun_l4_n639(x)
+ end
+end
+
+def fun_l3_n922(x)
+ if (x < 1)
+ fun_l4_n678(x)
+ else
+ fun_l4_n209(x)
+ end
+end
+
+def fun_l3_n923(x)
+ if (x < 1)
+ fun_l4_n755(x)
+ else
+ fun_l4_n941(x)
+ end
+end
+
+def fun_l3_n924(x)
+ if (x < 1)
+ fun_l4_n311(x)
+ else
+ fun_l4_n272(x)
+ end
+end
+
+def fun_l3_n925(x)
+ if (x < 1)
+ fun_l4_n543(x)
+ else
+ fun_l4_n896(x)
+ end
+end
+
+def fun_l3_n926(x)
+ if (x < 1)
+ fun_l4_n994(x)
+ else
+ fun_l4_n671(x)
+ end
+end
+
+def fun_l3_n927(x)
+ if (x < 1)
+ fun_l4_n125(x)
+ else
+ fun_l4_n852(x)
+ end
+end
+
+def fun_l3_n928(x)
+ if (x < 1)
+ fun_l4_n483(x)
+ else
+ fun_l4_n350(x)
+ end
+end
+
+def fun_l3_n929(x)
+ if (x < 1)
+ fun_l4_n622(x)
+ else
+ fun_l4_n511(x)
+ end
+end
+
+def fun_l3_n930(x)
+ if (x < 1)
+ fun_l4_n134(x)
+ else
+ fun_l4_n288(x)
+ end
+end
+
+def fun_l3_n931(x)
+ if (x < 1)
+ fun_l4_n278(x)
+ else
+ fun_l4_n800(x)
+ end
+end
+
+def fun_l3_n932(x)
+ if (x < 1)
+ fun_l4_n139(x)
+ else
+ fun_l4_n966(x)
+ end
+end
+
+def fun_l3_n933(x)
+ if (x < 1)
+ fun_l4_n275(x)
+ else
+ fun_l4_n506(x)
+ end
+end
+
+def fun_l3_n934(x)
+ if (x < 1)
+ fun_l4_n503(x)
+ else
+ fun_l4_n204(x)
+ end
+end
+
+def fun_l3_n935(x)
+ if (x < 1)
+ fun_l4_n993(x)
+ else
+ fun_l4_n126(x)
+ end
+end
+
+def fun_l3_n936(x)
+ if (x < 1)
+ fun_l4_n704(x)
+ else
+ fun_l4_n462(x)
+ end
+end
+
+def fun_l3_n937(x)
+ if (x < 1)
+ fun_l4_n922(x)
+ else
+ fun_l4_n722(x)
+ end
+end
+
+def fun_l3_n938(x)
+ if (x < 1)
+ fun_l4_n937(x)
+ else
+ fun_l4_n195(x)
+ end
+end
+
+def fun_l3_n939(x)
+ if (x < 1)
+ fun_l4_n661(x)
+ else
+ fun_l4_n265(x)
+ end
+end
+
+def fun_l3_n940(x)
+ if (x < 1)
+ fun_l4_n423(x)
+ else
+ fun_l4_n230(x)
+ end
+end
+
+def fun_l3_n941(x)
+ if (x < 1)
+ fun_l4_n698(x)
+ else
+ fun_l4_n226(x)
+ end
+end
+
+def fun_l3_n942(x)
+ if (x < 1)
+ fun_l4_n501(x)
+ else
+ fun_l4_n816(x)
+ end
+end
+
+def fun_l3_n943(x)
+ if (x < 1)
+ fun_l4_n650(x)
+ else
+ fun_l4_n925(x)
+ end
+end
+
+def fun_l3_n944(x)
+ if (x < 1)
+ fun_l4_n829(x)
+ else
+ fun_l4_n647(x)
+ end
+end
+
+def fun_l3_n945(x)
+ if (x < 1)
+ fun_l4_n401(x)
+ else
+ fun_l4_n791(x)
+ end
+end
+
+def fun_l3_n946(x)
+ if (x < 1)
+ fun_l4_n551(x)
+ else
+ fun_l4_n104(x)
+ end
+end
+
+def fun_l3_n947(x)
+ if (x < 1)
+ fun_l4_n927(x)
+ else
+ fun_l4_n213(x)
+ end
+end
+
+def fun_l3_n948(x)
+ if (x < 1)
+ fun_l4_n912(x)
+ else
+ fun_l4_n609(x)
+ end
+end
+
+def fun_l3_n949(x)
+ if (x < 1)
+ fun_l4_n302(x)
+ else
+ fun_l4_n157(x)
+ end
+end
+
+def fun_l3_n950(x)
+ if (x < 1)
+ fun_l4_n955(x)
+ else
+ fun_l4_n492(x)
+ end
+end
+
+def fun_l3_n951(x)
+ if (x < 1)
+ fun_l4_n486(x)
+ else
+ fun_l4_n412(x)
+ end
+end
+
+def fun_l3_n952(x)
+ if (x < 1)
+ fun_l4_n850(x)
+ else
+ fun_l4_n885(x)
+ end
+end
+
+def fun_l3_n953(x)
+ if (x < 1)
+ fun_l4_n84(x)
+ else
+ fun_l4_n622(x)
+ end
+end
+
+def fun_l3_n954(x)
+ if (x < 1)
+ fun_l4_n235(x)
+ else
+ fun_l4_n67(x)
+ end
+end
+
+def fun_l3_n955(x)
+ if (x < 1)
+ fun_l4_n347(x)
+ else
+ fun_l4_n757(x)
+ end
+end
+
+def fun_l3_n956(x)
+ if (x < 1)
+ fun_l4_n825(x)
+ else
+ fun_l4_n10(x)
+ end
+end
+
+def fun_l3_n957(x)
+ if (x < 1)
+ fun_l4_n393(x)
+ else
+ fun_l4_n482(x)
+ end
+end
+
+def fun_l3_n958(x)
+ if (x < 1)
+ fun_l4_n0(x)
+ else
+ fun_l4_n556(x)
+ end
+end
+
+def fun_l3_n959(x)
+ if (x < 1)
+ fun_l4_n806(x)
+ else
+ fun_l4_n783(x)
+ end
+end
+
+def fun_l3_n960(x)
+ if (x < 1)
+ fun_l4_n964(x)
+ else
+ fun_l4_n843(x)
+ end
+end
+
+def fun_l3_n961(x)
+ if (x < 1)
+ fun_l4_n508(x)
+ else
+ fun_l4_n874(x)
+ end
+end
+
+def fun_l3_n962(x)
+ if (x < 1)
+ fun_l4_n15(x)
+ else
+ fun_l4_n197(x)
+ end
+end
+
+def fun_l3_n963(x)
+ if (x < 1)
+ fun_l4_n989(x)
+ else
+ fun_l4_n380(x)
+ end
+end
+
+def fun_l3_n964(x)
+ if (x < 1)
+ fun_l4_n315(x)
+ else
+ fun_l4_n196(x)
+ end
+end
+
+def fun_l3_n965(x)
+ if (x < 1)
+ fun_l4_n510(x)
+ else
+ fun_l4_n275(x)
+ end
+end
+
+def fun_l3_n966(x)
+ if (x < 1)
+ fun_l4_n725(x)
+ else
+ fun_l4_n94(x)
+ end
+end
+
+def fun_l3_n967(x)
+ if (x < 1)
+ fun_l4_n733(x)
+ else
+ fun_l4_n577(x)
+ end
+end
+
+def fun_l3_n968(x)
+ if (x < 1)
+ fun_l4_n280(x)
+ else
+ fun_l4_n702(x)
+ end
+end
+
+def fun_l3_n969(x)
+ if (x < 1)
+ fun_l4_n41(x)
+ else
+ fun_l4_n343(x)
+ end
+end
+
+def fun_l3_n970(x)
+ if (x < 1)
+ fun_l4_n102(x)
+ else
+ fun_l4_n785(x)
+ end
+end
+
+def fun_l3_n971(x)
+ if (x < 1)
+ fun_l4_n460(x)
+ else
+ fun_l4_n388(x)
+ end
+end
+
+def fun_l3_n972(x)
+ if (x < 1)
+ fun_l4_n31(x)
+ else
+ fun_l4_n421(x)
+ end
+end
+
+def fun_l3_n973(x)
+ if (x < 1)
+ fun_l4_n587(x)
+ else
+ fun_l4_n401(x)
+ end
+end
+
+def fun_l3_n974(x)
+ if (x < 1)
+ fun_l4_n593(x)
+ else
+ fun_l4_n418(x)
+ end
+end
+
+def fun_l3_n975(x)
+ if (x < 1)
+ fun_l4_n12(x)
+ else
+ fun_l4_n929(x)
+ end
+end
+
+def fun_l3_n976(x)
+ if (x < 1)
+ fun_l4_n516(x)
+ else
+ fun_l4_n402(x)
+ end
+end
+
+def fun_l3_n977(x)
+ if (x < 1)
+ fun_l4_n590(x)
+ else
+ fun_l4_n876(x)
+ end
+end
+
+def fun_l3_n978(x)
+ if (x < 1)
+ fun_l4_n505(x)
+ else
+ fun_l4_n94(x)
+ end
+end
+
+def fun_l3_n979(x)
+ if (x < 1)
+ fun_l4_n625(x)
+ else
+ fun_l4_n344(x)
+ end
+end
+
+def fun_l3_n980(x)
+ if (x < 1)
+ fun_l4_n128(x)
+ else
+ fun_l4_n454(x)
+ end
+end
+
+def fun_l3_n981(x)
+ if (x < 1)
+ fun_l4_n937(x)
+ else
+ fun_l4_n277(x)
+ end
+end
+
+def fun_l3_n982(x)
+ if (x < 1)
+ fun_l4_n237(x)
+ else
+ fun_l4_n113(x)
+ end
+end
+
+def fun_l3_n983(x)
+ if (x < 1)
+ fun_l4_n668(x)
+ else
+ fun_l4_n668(x)
+ end
+end
+
+def fun_l3_n984(x)
+ if (x < 1)
+ fun_l4_n783(x)
+ else
+ fun_l4_n771(x)
+ end
+end
+
+def fun_l3_n985(x)
+ if (x < 1)
+ fun_l4_n135(x)
+ else
+ fun_l4_n967(x)
+ end
+end
+
+def fun_l3_n986(x)
+ if (x < 1)
+ fun_l4_n29(x)
+ else
+ fun_l4_n313(x)
+ end
+end
+
+def fun_l3_n987(x)
+ if (x < 1)
+ fun_l4_n765(x)
+ else
+ fun_l4_n885(x)
+ end
+end
+
+def fun_l3_n988(x)
+ if (x < 1)
+ fun_l4_n242(x)
+ else
+ fun_l4_n622(x)
+ end
+end
+
+def fun_l3_n989(x)
+ if (x < 1)
+ fun_l4_n916(x)
+ else
+ fun_l4_n518(x)
+ end
+end
+
+def fun_l3_n990(x)
+ if (x < 1)
+ fun_l4_n523(x)
+ else
+ fun_l4_n468(x)
+ end
+end
+
+def fun_l3_n991(x)
+ if (x < 1)
+ fun_l4_n904(x)
+ else
+ fun_l4_n601(x)
+ end
+end
+
+def fun_l3_n992(x)
+ if (x < 1)
+ fun_l4_n437(x)
+ else
+ fun_l4_n77(x)
+ end
+end
+
+def fun_l3_n993(x)
+ if (x < 1)
+ fun_l4_n957(x)
+ else
+ fun_l4_n619(x)
+ end
+end
+
+def fun_l3_n994(x)
+ if (x < 1)
+ fun_l4_n540(x)
+ else
+ fun_l4_n108(x)
+ end
+end
+
+def fun_l3_n995(x)
+ if (x < 1)
+ fun_l4_n8(x)
+ else
+ fun_l4_n428(x)
+ end
+end
+
+def fun_l3_n996(x)
+ if (x < 1)
+ fun_l4_n53(x)
+ else
+ fun_l4_n278(x)
+ end
+end
+
+def fun_l3_n997(x)
+ if (x < 1)
+ fun_l4_n930(x)
+ else
+ fun_l4_n671(x)
+ end
+end
+
+def fun_l3_n998(x)
+ if (x < 1)
+ fun_l4_n574(x)
+ else
+ fun_l4_n879(x)
+ end
+end
+
+def fun_l3_n999(x)
+ if (x < 1)
+ fun_l4_n316(x)
+ else
+ fun_l4_n93(x)
+ end
+end
+
+def fun_l4_n0(x)
+ if (x < 1)
+ fun_l5_n524(x)
+ else
+ fun_l5_n284(x)
+ end
+end
+
+def fun_l4_n1(x)
+ if (x < 1)
+ fun_l5_n845(x)
+ else
+ fun_l5_n863(x)
+ end
+end
+
+def fun_l4_n2(x)
+ if (x < 1)
+ fun_l5_n258(x)
+ else
+ fun_l5_n860(x)
+ end
+end
+
+def fun_l4_n3(x)
+ if (x < 1)
+ fun_l5_n253(x)
+ else
+ fun_l5_n763(x)
+ end
+end
+
+def fun_l4_n4(x)
+ if (x < 1)
+ fun_l5_n833(x)
+ else
+ fun_l5_n807(x)
+ end
+end
+
+def fun_l4_n5(x)
+ if (x < 1)
+ fun_l5_n890(x)
+ else
+ fun_l5_n669(x)
+ end
+end
+
+def fun_l4_n6(x)
+ if (x < 1)
+ fun_l5_n396(x)
+ else
+ fun_l5_n388(x)
+ end
+end
+
+def fun_l4_n7(x)
+ if (x < 1)
+ fun_l5_n926(x)
+ else
+ fun_l5_n661(x)
+ end
+end
+
+def fun_l4_n8(x)
+ if (x < 1)
+ fun_l5_n990(x)
+ else
+ fun_l5_n765(x)
+ end
+end
+
+def fun_l4_n9(x)
+ if (x < 1)
+ fun_l5_n978(x)
+ else
+ fun_l5_n342(x)
+ end
+end
+
+def fun_l4_n10(x)
+ if (x < 1)
+ fun_l5_n500(x)
+ else
+ fun_l5_n758(x)
+ end
+end
+
+def fun_l4_n11(x)
+ if (x < 1)
+ fun_l5_n10(x)
+ else
+ fun_l5_n796(x)
+ end
+end
+
+def fun_l4_n12(x)
+ if (x < 1)
+ fun_l5_n173(x)
+ else
+ fun_l5_n87(x)
+ end
+end
+
+def fun_l4_n13(x)
+ if (x < 1)
+ fun_l5_n852(x)
+ else
+ fun_l5_n93(x)
+ end
+end
+
+def fun_l4_n14(x)
+ if (x < 1)
+ fun_l5_n526(x)
+ else
+ fun_l5_n144(x)
+ end
+end
+
+def fun_l4_n15(x)
+ if (x < 1)
+ fun_l5_n398(x)
+ else
+ fun_l5_n632(x)
+ end
+end
+
+def fun_l4_n16(x)
+ if (x < 1)
+ fun_l5_n774(x)
+ else
+ fun_l5_n716(x)
+ end
+end
+
+def fun_l4_n17(x)
+ if (x < 1)
+ fun_l5_n412(x)
+ else
+ fun_l5_n95(x)
+ end
+end
+
+def fun_l4_n18(x)
+ if (x < 1)
+ fun_l5_n446(x)
+ else
+ fun_l5_n885(x)
+ end
+end
+
+def fun_l4_n19(x)
+ if (x < 1)
+ fun_l5_n420(x)
+ else
+ fun_l5_n420(x)
+ end
+end
+
+def fun_l4_n20(x)
+ if (x < 1)
+ fun_l5_n274(x)
+ else
+ fun_l5_n813(x)
+ end
+end
+
+def fun_l4_n21(x)
+ if (x < 1)
+ fun_l5_n174(x)
+ else
+ fun_l5_n781(x)
+ end
+end
+
+def fun_l4_n22(x)
+ if (x < 1)
+ fun_l5_n903(x)
+ else
+ fun_l5_n224(x)
+ end
+end
+
+def fun_l4_n23(x)
+ if (x < 1)
+ fun_l5_n317(x)
+ else
+ fun_l5_n900(x)
+ end
+end
+
+def fun_l4_n24(x)
+ if (x < 1)
+ fun_l5_n600(x)
+ else
+ fun_l5_n515(x)
+ end
+end
+
+def fun_l4_n25(x)
+ if (x < 1)
+ fun_l5_n151(x)
+ else
+ fun_l5_n790(x)
+ end
+end
+
+def fun_l4_n26(x)
+ if (x < 1)
+ fun_l5_n256(x)
+ else
+ fun_l5_n449(x)
+ end
+end
+
+def fun_l4_n27(x)
+ if (x < 1)
+ fun_l5_n985(x)
+ else
+ fun_l5_n763(x)
+ end
+end
+
+def fun_l4_n28(x)
+ if (x < 1)
+ fun_l5_n961(x)
+ else
+ fun_l5_n86(x)
+ end
+end
+
+def fun_l4_n29(x)
+ if (x < 1)
+ fun_l5_n359(x)
+ else
+ fun_l5_n894(x)
+ end
+end
+
+def fun_l4_n30(x)
+ if (x < 1)
+ fun_l5_n426(x)
+ else
+ fun_l5_n983(x)
+ end
+end
+
+def fun_l4_n31(x)
+ if (x < 1)
+ fun_l5_n996(x)
+ else
+ fun_l5_n343(x)
+ end
+end
+
+def fun_l4_n32(x)
+ if (x < 1)
+ fun_l5_n300(x)
+ else
+ fun_l5_n719(x)
+ end
+end
+
+def fun_l4_n33(x)
+ if (x < 1)
+ fun_l5_n755(x)
+ else
+ fun_l5_n623(x)
+ end
+end
+
+def fun_l4_n34(x)
+ if (x < 1)
+ fun_l5_n681(x)
+ else
+ fun_l5_n410(x)
+ end
+end
+
+def fun_l4_n35(x)
+ if (x < 1)
+ fun_l5_n356(x)
+ else
+ fun_l5_n351(x)
+ end
+end
+
+def fun_l4_n36(x)
+ if (x < 1)
+ fun_l5_n463(x)
+ else
+ fun_l5_n402(x)
+ end
+end
+
+def fun_l4_n37(x)
+ if (x < 1)
+ fun_l5_n250(x)
+ else
+ fun_l5_n681(x)
+ end
+end
+
+def fun_l4_n38(x)
+ if (x < 1)
+ fun_l5_n573(x)
+ else
+ fun_l5_n622(x)
+ end
+end
+
+def fun_l4_n39(x)
+ if (x < 1)
+ fun_l5_n545(x)
+ else
+ fun_l5_n210(x)
+ end
+end
+
+def fun_l4_n40(x)
+ if (x < 1)
+ fun_l5_n264(x)
+ else
+ fun_l5_n239(x)
+ end
+end
+
+def fun_l4_n41(x)
+ if (x < 1)
+ fun_l5_n635(x)
+ else
+ fun_l5_n224(x)
+ end
+end
+
+def fun_l4_n42(x)
+ if (x < 1)
+ fun_l5_n806(x)
+ else
+ fun_l5_n125(x)
+ end
+end
+
+def fun_l4_n43(x)
+ if (x < 1)
+ fun_l5_n480(x)
+ else
+ fun_l5_n625(x)
+ end
+end
+
+def fun_l4_n44(x)
+ if (x < 1)
+ fun_l5_n644(x)
+ else
+ fun_l5_n465(x)
+ end
+end
+
+def fun_l4_n45(x)
+ if (x < 1)
+ fun_l5_n999(x)
+ else
+ fun_l5_n759(x)
+ end
+end
+
+def fun_l4_n46(x)
+ if (x < 1)
+ fun_l5_n866(x)
+ else
+ fun_l5_n536(x)
+ end
+end
+
+def fun_l4_n47(x)
+ if (x < 1)
+ fun_l5_n328(x)
+ else
+ fun_l5_n533(x)
+ end
+end
+
+def fun_l4_n48(x)
+ if (x < 1)
+ fun_l5_n902(x)
+ else
+ fun_l5_n919(x)
+ end
+end
+
+def fun_l4_n49(x)
+ if (x < 1)
+ fun_l5_n197(x)
+ else
+ fun_l5_n262(x)
+ end
+end
+
+def fun_l4_n50(x)
+ if (x < 1)
+ fun_l5_n172(x)
+ else
+ fun_l5_n731(x)
+ end
+end
+
+def fun_l4_n51(x)
+ if (x < 1)
+ fun_l5_n502(x)
+ else
+ fun_l5_n149(x)
+ end
+end
+
+def fun_l4_n52(x)
+ if (x < 1)
+ fun_l5_n69(x)
+ else
+ fun_l5_n536(x)
+ end
+end
+
+def fun_l4_n53(x)
+ if (x < 1)
+ fun_l5_n932(x)
+ else
+ fun_l5_n482(x)
+ end
+end
+
+def fun_l4_n54(x)
+ if (x < 1)
+ fun_l5_n982(x)
+ else
+ fun_l5_n207(x)
+ end
+end
+
+def fun_l4_n55(x)
+ if (x < 1)
+ fun_l5_n949(x)
+ else
+ fun_l5_n9(x)
+ end
+end
+
+def fun_l4_n56(x)
+ if (x < 1)
+ fun_l5_n672(x)
+ else
+ fun_l5_n924(x)
+ end
+end
+
+def fun_l4_n57(x)
+ if (x < 1)
+ fun_l5_n757(x)
+ else
+ fun_l5_n609(x)
+ end
+end
+
+def fun_l4_n58(x)
+ if (x < 1)
+ fun_l5_n251(x)
+ else
+ fun_l5_n471(x)
+ end
+end
+
+def fun_l4_n59(x)
+ if (x < 1)
+ fun_l5_n878(x)
+ else
+ fun_l5_n626(x)
+ end
+end
+
+def fun_l4_n60(x)
+ if (x < 1)
+ fun_l5_n859(x)
+ else
+ fun_l5_n646(x)
+ end
+end
+
+def fun_l4_n61(x)
+ if (x < 1)
+ fun_l5_n417(x)
+ else
+ fun_l5_n587(x)
+ end
+end
+
+def fun_l4_n62(x)
+ if (x < 1)
+ fun_l5_n164(x)
+ else
+ fun_l5_n861(x)
+ end
+end
+
+def fun_l4_n63(x)
+ if (x < 1)
+ fun_l5_n591(x)
+ else
+ fun_l5_n79(x)
+ end
+end
+
+def fun_l4_n64(x)
+ if (x < 1)
+ fun_l5_n269(x)
+ else
+ fun_l5_n336(x)
+ end
+end
+
+def fun_l4_n65(x)
+ if (x < 1)
+ fun_l5_n420(x)
+ else
+ fun_l5_n557(x)
+ end
+end
+
+def fun_l4_n66(x)
+ if (x < 1)
+ fun_l5_n61(x)
+ else
+ fun_l5_n690(x)
+ end
+end
+
+def fun_l4_n67(x)
+ if (x < 1)
+ fun_l5_n939(x)
+ else
+ fun_l5_n139(x)
+ end
+end
+
+def fun_l4_n68(x)
+ if (x < 1)
+ fun_l5_n430(x)
+ else
+ fun_l5_n625(x)
+ end
+end
+
+def fun_l4_n69(x)
+ if (x < 1)
+ fun_l5_n532(x)
+ else
+ fun_l5_n909(x)
+ end
+end
+
+def fun_l4_n70(x)
+ if (x < 1)
+ fun_l5_n937(x)
+ else
+ fun_l5_n886(x)
+ end
+end
+
+def fun_l4_n71(x)
+ if (x < 1)
+ fun_l5_n554(x)
+ else
+ fun_l5_n898(x)
+ end
+end
+
+def fun_l4_n72(x)
+ if (x < 1)
+ fun_l5_n390(x)
+ else
+ fun_l5_n690(x)
+ end
+end
+
+def fun_l4_n73(x)
+ if (x < 1)
+ fun_l5_n980(x)
+ else
+ fun_l5_n248(x)
+ end
+end
+
+def fun_l4_n74(x)
+ if (x < 1)
+ fun_l5_n438(x)
+ else
+ fun_l5_n247(x)
+ end
+end
+
+def fun_l4_n75(x)
+ if (x < 1)
+ fun_l5_n798(x)
+ else
+ fun_l5_n399(x)
+ end
+end
+
+def fun_l4_n76(x)
+ if (x < 1)
+ fun_l5_n419(x)
+ else
+ fun_l5_n754(x)
+ end
+end
+
+def fun_l4_n77(x)
+ if (x < 1)
+ fun_l5_n875(x)
+ else
+ fun_l5_n18(x)
+ end
+end
+
+def fun_l4_n78(x)
+ if (x < 1)
+ fun_l5_n695(x)
+ else
+ fun_l5_n336(x)
+ end
+end
+
+def fun_l4_n79(x)
+ if (x < 1)
+ fun_l5_n185(x)
+ else
+ fun_l5_n627(x)
+ end
+end
+
+def fun_l4_n80(x)
+ if (x < 1)
+ fun_l5_n359(x)
+ else
+ fun_l5_n465(x)
+ end
+end
+
+def fun_l4_n81(x)
+ if (x < 1)
+ fun_l5_n284(x)
+ else
+ fun_l5_n232(x)
+ end
+end
+
+def fun_l4_n82(x)
+ if (x < 1)
+ fun_l5_n947(x)
+ else
+ fun_l5_n748(x)
+ end
+end
+
+def fun_l4_n83(x)
+ if (x < 1)
+ fun_l5_n254(x)
+ else
+ fun_l5_n836(x)
+ end
+end
+
+def fun_l4_n84(x)
+ if (x < 1)
+ fun_l5_n181(x)
+ else
+ fun_l5_n789(x)
+ end
+end
+
+def fun_l4_n85(x)
+ if (x < 1)
+ fun_l5_n682(x)
+ else
+ fun_l5_n547(x)
+ end
+end
+
+def fun_l4_n86(x)
+ if (x < 1)
+ fun_l5_n35(x)
+ else
+ fun_l5_n157(x)
+ end
+end
+
+def fun_l4_n87(x)
+ if (x < 1)
+ fun_l5_n981(x)
+ else
+ fun_l5_n724(x)
+ end
+end
+
+def fun_l4_n88(x)
+ if (x < 1)
+ fun_l5_n35(x)
+ else
+ fun_l5_n914(x)
+ end
+end
+
+def fun_l4_n89(x)
+ if (x < 1)
+ fun_l5_n844(x)
+ else
+ fun_l5_n668(x)
+ end
+end
+
+def fun_l4_n90(x)
+ if (x < 1)
+ fun_l5_n615(x)
+ else
+ fun_l5_n415(x)
+ end
+end
+
+def fun_l4_n91(x)
+ if (x < 1)
+ fun_l5_n141(x)
+ else
+ fun_l5_n538(x)
+ end
+end
+
+def fun_l4_n92(x)
+ if (x < 1)
+ fun_l5_n563(x)
+ else
+ fun_l5_n60(x)
+ end
+end
+
+def fun_l4_n93(x)
+ if (x < 1)
+ fun_l5_n967(x)
+ else
+ fun_l5_n549(x)
+ end
+end
+
+def fun_l4_n94(x)
+ if (x < 1)
+ fun_l5_n992(x)
+ else
+ fun_l5_n34(x)
+ end
+end
+
+def fun_l4_n95(x)
+ if (x < 1)
+ fun_l5_n187(x)
+ else
+ fun_l5_n980(x)
+ end
+end
+
+def fun_l4_n96(x)
+ if (x < 1)
+ fun_l5_n543(x)
+ else
+ fun_l5_n318(x)
+ end
+end
+
+def fun_l4_n97(x)
+ if (x < 1)
+ fun_l5_n253(x)
+ else
+ fun_l5_n709(x)
+ end
+end
+
+def fun_l4_n98(x)
+ if (x < 1)
+ fun_l5_n233(x)
+ else
+ fun_l5_n963(x)
+ end
+end
+
+def fun_l4_n99(x)
+ if (x < 1)
+ fun_l5_n944(x)
+ else
+ fun_l5_n688(x)
+ end
+end
+
+def fun_l4_n100(x)
+ if (x < 1)
+ fun_l5_n422(x)
+ else
+ fun_l5_n805(x)
+ end
+end
+
+def fun_l4_n101(x)
+ if (x < 1)
+ fun_l5_n125(x)
+ else
+ fun_l5_n729(x)
+ end
+end
+
+def fun_l4_n102(x)
+ if (x < 1)
+ fun_l5_n339(x)
+ else
+ fun_l5_n394(x)
+ end
+end
+
+def fun_l4_n103(x)
+ if (x < 1)
+ fun_l5_n149(x)
+ else
+ fun_l5_n839(x)
+ end
+end
+
+def fun_l4_n104(x)
+ if (x < 1)
+ fun_l5_n750(x)
+ else
+ fun_l5_n514(x)
+ end
+end
+
+def fun_l4_n105(x)
+ if (x < 1)
+ fun_l5_n726(x)
+ else
+ fun_l5_n251(x)
+ end
+end
+
+def fun_l4_n106(x)
+ if (x < 1)
+ fun_l5_n810(x)
+ else
+ fun_l5_n46(x)
+ end
+end
+
+def fun_l4_n107(x)
+ if (x < 1)
+ fun_l5_n702(x)
+ else
+ fun_l5_n224(x)
+ end
+end
+
+def fun_l4_n108(x)
+ if (x < 1)
+ fun_l5_n604(x)
+ else
+ fun_l5_n24(x)
+ end
+end
+
+def fun_l4_n109(x)
+ if (x < 1)
+ fun_l5_n511(x)
+ else
+ fun_l5_n529(x)
+ end
+end
+
+def fun_l4_n110(x)
+ if (x < 1)
+ fun_l5_n193(x)
+ else
+ fun_l5_n210(x)
+ end
+end
+
+def fun_l4_n111(x)
+ if (x < 1)
+ fun_l5_n599(x)
+ else
+ fun_l5_n85(x)
+ end
+end
+
+def fun_l4_n112(x)
+ if (x < 1)
+ fun_l5_n910(x)
+ else
+ fun_l5_n292(x)
+ end
+end
+
+def fun_l4_n113(x)
+ if (x < 1)
+ fun_l5_n851(x)
+ else
+ fun_l5_n269(x)
+ end
+end
+
+def fun_l4_n114(x)
+ if (x < 1)
+ fun_l5_n739(x)
+ else
+ fun_l5_n439(x)
+ end
+end
+
+def fun_l4_n115(x)
+ if (x < 1)
+ fun_l5_n580(x)
+ else
+ fun_l5_n656(x)
+ end
+end
+
+def fun_l4_n116(x)
+ if (x < 1)
+ fun_l5_n992(x)
+ else
+ fun_l5_n703(x)
+ end
+end
+
+def fun_l4_n117(x)
+ if (x < 1)
+ fun_l5_n549(x)
+ else
+ fun_l5_n300(x)
+ end
+end
+
+def fun_l4_n118(x)
+ if (x < 1)
+ fun_l5_n785(x)
+ else
+ fun_l5_n596(x)
+ end
+end
+
+def fun_l4_n119(x)
+ if (x < 1)
+ fun_l5_n236(x)
+ else
+ fun_l5_n84(x)
+ end
+end
+
+def fun_l4_n120(x)
+ if (x < 1)
+ fun_l5_n78(x)
+ else
+ fun_l5_n610(x)
+ end
+end
+
+def fun_l4_n121(x)
+ if (x < 1)
+ fun_l5_n591(x)
+ else
+ fun_l5_n557(x)
+ end
+end
+
+def fun_l4_n122(x)
+ if (x < 1)
+ fun_l5_n927(x)
+ else
+ fun_l5_n40(x)
+ end
+end
+
+def fun_l4_n123(x)
+ if (x < 1)
+ fun_l5_n620(x)
+ else
+ fun_l5_n173(x)
+ end
+end
+
+def fun_l4_n124(x)
+ if (x < 1)
+ fun_l5_n558(x)
+ else
+ fun_l5_n330(x)
+ end
+end
+
+def fun_l4_n125(x)
+ if (x < 1)
+ fun_l5_n535(x)
+ else
+ fun_l5_n636(x)
+ end
+end
+
+def fun_l4_n126(x)
+ if (x < 1)
+ fun_l5_n401(x)
+ else
+ fun_l5_n747(x)
+ end
+end
+
+def fun_l4_n127(x)
+ if (x < 1)
+ fun_l5_n172(x)
+ else
+ fun_l5_n124(x)
+ end
+end
+
+def fun_l4_n128(x)
+ if (x < 1)
+ fun_l5_n449(x)
+ else
+ fun_l5_n619(x)
+ end
+end
+
+def fun_l4_n129(x)
+ if (x < 1)
+ fun_l5_n564(x)
+ else
+ fun_l5_n306(x)
+ end
+end
+
+def fun_l4_n130(x)
+ if (x < 1)
+ fun_l5_n196(x)
+ else
+ fun_l5_n170(x)
+ end
+end
+
+def fun_l4_n131(x)
+ if (x < 1)
+ fun_l5_n383(x)
+ else
+ fun_l5_n87(x)
+ end
+end
+
+def fun_l4_n132(x)
+ if (x < 1)
+ fun_l5_n568(x)
+ else
+ fun_l5_n54(x)
+ end
+end
+
+def fun_l4_n133(x)
+ if (x < 1)
+ fun_l5_n780(x)
+ else
+ fun_l5_n184(x)
+ end
+end
+
+def fun_l4_n134(x)
+ if (x < 1)
+ fun_l5_n880(x)
+ else
+ fun_l5_n38(x)
+ end
+end
+
+def fun_l4_n135(x)
+ if (x < 1)
+ fun_l5_n819(x)
+ else
+ fun_l5_n440(x)
+ end
+end
+
+def fun_l4_n136(x)
+ if (x < 1)
+ fun_l5_n301(x)
+ else
+ fun_l5_n676(x)
+ end
+end
+
+def fun_l4_n137(x)
+ if (x < 1)
+ fun_l5_n69(x)
+ else
+ fun_l5_n333(x)
+ end
+end
+
+def fun_l4_n138(x)
+ if (x < 1)
+ fun_l5_n303(x)
+ else
+ fun_l5_n69(x)
+ end
+end
+
+def fun_l4_n139(x)
+ if (x < 1)
+ fun_l5_n939(x)
+ else
+ fun_l5_n103(x)
+ end
+end
+
+def fun_l4_n140(x)
+ if (x < 1)
+ fun_l5_n517(x)
+ else
+ fun_l5_n24(x)
+ end
+end
+
+def fun_l4_n141(x)
+ if (x < 1)
+ fun_l5_n923(x)
+ else
+ fun_l5_n968(x)
+ end
+end
+
+def fun_l4_n142(x)
+ if (x < 1)
+ fun_l5_n196(x)
+ else
+ fun_l5_n841(x)
+ end
+end
+
+def fun_l4_n143(x)
+ if (x < 1)
+ fun_l5_n726(x)
+ else
+ fun_l5_n715(x)
+ end
+end
+
+def fun_l4_n144(x)
+ if (x < 1)
+ fun_l5_n434(x)
+ else
+ fun_l5_n771(x)
+ end
+end
+
+def fun_l4_n145(x)
+ if (x < 1)
+ fun_l5_n211(x)
+ else
+ fun_l5_n963(x)
+ end
+end
+
+def fun_l4_n146(x)
+ if (x < 1)
+ fun_l5_n534(x)
+ else
+ fun_l5_n5(x)
+ end
+end
+
+def fun_l4_n147(x)
+ if (x < 1)
+ fun_l5_n409(x)
+ else
+ fun_l5_n298(x)
+ end
+end
+
+def fun_l4_n148(x)
+ if (x < 1)
+ fun_l5_n447(x)
+ else
+ fun_l5_n474(x)
+ end
+end
+
+def fun_l4_n149(x)
+ if (x < 1)
+ fun_l5_n181(x)
+ else
+ fun_l5_n87(x)
+ end
+end
+
+def fun_l4_n150(x)
+ if (x < 1)
+ fun_l5_n871(x)
+ else
+ fun_l5_n201(x)
+ end
+end
+
+def fun_l4_n151(x)
+ if (x < 1)
+ fun_l5_n539(x)
+ else
+ fun_l5_n855(x)
+ end
+end
+
+def fun_l4_n152(x)
+ if (x < 1)
+ fun_l5_n387(x)
+ else
+ fun_l5_n730(x)
+ end
+end
+
+def fun_l4_n153(x)
+ if (x < 1)
+ fun_l5_n785(x)
+ else
+ fun_l5_n774(x)
+ end
+end
+
+def fun_l4_n154(x)
+ if (x < 1)
+ fun_l5_n924(x)
+ else
+ fun_l5_n414(x)
+ end
+end
+
+def fun_l4_n155(x)
+ if (x < 1)
+ fun_l5_n110(x)
+ else
+ fun_l5_n669(x)
+ end
+end
+
+def fun_l4_n156(x)
+ if (x < 1)
+ fun_l5_n551(x)
+ else
+ fun_l5_n456(x)
+ end
+end
+
+def fun_l4_n157(x)
+ if (x < 1)
+ fun_l5_n872(x)
+ else
+ fun_l5_n397(x)
+ end
+end
+
+def fun_l4_n158(x)
+ if (x < 1)
+ fun_l5_n208(x)
+ else
+ fun_l5_n464(x)
+ end
+end
+
+def fun_l4_n159(x)
+ if (x < 1)
+ fun_l5_n236(x)
+ else
+ fun_l5_n532(x)
+ end
+end
+
+def fun_l4_n160(x)
+ if (x < 1)
+ fun_l5_n820(x)
+ else
+ fun_l5_n146(x)
+ end
+end
+
+def fun_l4_n161(x)
+ if (x < 1)
+ fun_l5_n154(x)
+ else
+ fun_l5_n388(x)
+ end
+end
+
+def fun_l4_n162(x)
+ if (x < 1)
+ fun_l5_n456(x)
+ else
+ fun_l5_n309(x)
+ end
+end
+
+def fun_l4_n163(x)
+ if (x < 1)
+ fun_l5_n201(x)
+ else
+ fun_l5_n728(x)
+ end
+end
+
+def fun_l4_n164(x)
+ if (x < 1)
+ fun_l5_n806(x)
+ else
+ fun_l5_n156(x)
+ end
+end
+
+def fun_l4_n165(x)
+ if (x < 1)
+ fun_l5_n984(x)
+ else
+ fun_l5_n996(x)
+ end
+end
+
+def fun_l4_n166(x)
+ if (x < 1)
+ fun_l5_n253(x)
+ else
+ fun_l5_n527(x)
+ end
+end
+
+def fun_l4_n167(x)
+ if (x < 1)
+ fun_l5_n513(x)
+ else
+ fun_l5_n145(x)
+ end
+end
+
+def fun_l4_n168(x)
+ if (x < 1)
+ fun_l5_n694(x)
+ else
+ fun_l5_n841(x)
+ end
+end
+
+def fun_l4_n169(x)
+ if (x < 1)
+ fun_l5_n463(x)
+ else
+ fun_l5_n193(x)
+ end
+end
+
+def fun_l4_n170(x)
+ if (x < 1)
+ fun_l5_n638(x)
+ else
+ fun_l5_n252(x)
+ end
+end
+
+def fun_l4_n171(x)
+ if (x < 1)
+ fun_l5_n166(x)
+ else
+ fun_l5_n134(x)
+ end
+end
+
+def fun_l4_n172(x)
+ if (x < 1)
+ fun_l5_n172(x)
+ else
+ fun_l5_n179(x)
+ end
+end
+
+def fun_l4_n173(x)
+ if (x < 1)
+ fun_l5_n218(x)
+ else
+ fun_l5_n124(x)
+ end
+end
+
+def fun_l4_n174(x)
+ if (x < 1)
+ fun_l5_n370(x)
+ else
+ fun_l5_n742(x)
+ end
+end
+
+def fun_l4_n175(x)
+ if (x < 1)
+ fun_l5_n593(x)
+ else
+ fun_l5_n542(x)
+ end
+end
+
+def fun_l4_n176(x)
+ if (x < 1)
+ fun_l5_n438(x)
+ else
+ fun_l5_n606(x)
+ end
+end
+
+def fun_l4_n177(x)
+ if (x < 1)
+ fun_l5_n316(x)
+ else
+ fun_l5_n92(x)
+ end
+end
+
+def fun_l4_n178(x)
+ if (x < 1)
+ fun_l5_n222(x)
+ else
+ fun_l5_n461(x)
+ end
+end
+
+def fun_l4_n179(x)
+ if (x < 1)
+ fun_l5_n244(x)
+ else
+ fun_l5_n536(x)
+ end
+end
+
+def fun_l4_n180(x)
+ if (x < 1)
+ fun_l5_n120(x)
+ else
+ fun_l5_n905(x)
+ end
+end
+
+def fun_l4_n181(x)
+ if (x < 1)
+ fun_l5_n601(x)
+ else
+ fun_l5_n62(x)
+ end
+end
+
+def fun_l4_n182(x)
+ if (x < 1)
+ fun_l5_n701(x)
+ else
+ fun_l5_n25(x)
+ end
+end
+
+def fun_l4_n183(x)
+ if (x < 1)
+ fun_l5_n361(x)
+ else
+ fun_l5_n433(x)
+ end
+end
+
+def fun_l4_n184(x)
+ if (x < 1)
+ fun_l5_n29(x)
+ else
+ fun_l5_n302(x)
+ end
+end
+
+def fun_l4_n185(x)
+ if (x < 1)
+ fun_l5_n697(x)
+ else
+ fun_l5_n849(x)
+ end
+end
+
+def fun_l4_n186(x)
+ if (x < 1)
+ fun_l5_n76(x)
+ else
+ fun_l5_n402(x)
+ end
+end
+
+def fun_l4_n187(x)
+ if (x < 1)
+ fun_l5_n38(x)
+ else
+ fun_l5_n818(x)
+ end
+end
+
+def fun_l4_n188(x)
+ if (x < 1)
+ fun_l5_n730(x)
+ else
+ fun_l5_n456(x)
+ end
+end
+
+def fun_l4_n189(x)
+ if (x < 1)
+ fun_l5_n828(x)
+ else
+ fun_l5_n796(x)
+ end
+end
+
+def fun_l4_n190(x)
+ if (x < 1)
+ fun_l5_n86(x)
+ else
+ fun_l5_n976(x)
+ end
+end
+
+def fun_l4_n191(x)
+ if (x < 1)
+ fun_l5_n267(x)
+ else
+ fun_l5_n497(x)
+ end
+end
+
+def fun_l4_n192(x)
+ if (x < 1)
+ fun_l5_n534(x)
+ else
+ fun_l5_n449(x)
+ end
+end
+
+def fun_l4_n193(x)
+ if (x < 1)
+ fun_l5_n97(x)
+ else
+ fun_l5_n595(x)
+ end
+end
+
+def fun_l4_n194(x)
+ if (x < 1)
+ fun_l5_n821(x)
+ else
+ fun_l5_n823(x)
+ end
+end
+
+def fun_l4_n195(x)
+ if (x < 1)
+ fun_l5_n936(x)
+ else
+ fun_l5_n490(x)
+ end
+end
+
+def fun_l4_n196(x)
+ if (x < 1)
+ fun_l5_n579(x)
+ else
+ fun_l5_n684(x)
+ end
+end
+
+def fun_l4_n197(x)
+ if (x < 1)
+ fun_l5_n742(x)
+ else
+ fun_l5_n874(x)
+ end
+end
+
+def fun_l4_n198(x)
+ if (x < 1)
+ fun_l5_n904(x)
+ else
+ fun_l5_n394(x)
+ end
+end
+
+def fun_l4_n199(x)
+ if (x < 1)
+ fun_l5_n229(x)
+ else
+ fun_l5_n406(x)
+ end
+end
+
+def fun_l4_n200(x)
+ if (x < 1)
+ fun_l5_n332(x)
+ else
+ fun_l5_n803(x)
+ end
+end
+
+def fun_l4_n201(x)
+ if (x < 1)
+ fun_l5_n635(x)
+ else
+ fun_l5_n294(x)
+ end
+end
+
+def fun_l4_n202(x)
+ if (x < 1)
+ fun_l5_n397(x)
+ else
+ fun_l5_n924(x)
+ end
+end
+
+def fun_l4_n203(x)
+ if (x < 1)
+ fun_l5_n199(x)
+ else
+ fun_l5_n751(x)
+ end
+end
+
+def fun_l4_n204(x)
+ if (x < 1)
+ fun_l5_n570(x)
+ else
+ fun_l5_n70(x)
+ end
+end
+
+def fun_l4_n205(x)
+ if (x < 1)
+ fun_l5_n344(x)
+ else
+ fun_l5_n713(x)
+ end
+end
+
+def fun_l4_n206(x)
+ if (x < 1)
+ fun_l5_n568(x)
+ else
+ fun_l5_n40(x)
+ end
+end
+
+def fun_l4_n207(x)
+ if (x < 1)
+ fun_l5_n460(x)
+ else
+ fun_l5_n311(x)
+ end
+end
+
+def fun_l4_n208(x)
+ if (x < 1)
+ fun_l5_n995(x)
+ else
+ fun_l5_n147(x)
+ end
+end
+
+def fun_l4_n209(x)
+ if (x < 1)
+ fun_l5_n295(x)
+ else
+ fun_l5_n752(x)
+ end
+end
+
+def fun_l4_n210(x)
+ if (x < 1)
+ fun_l5_n644(x)
+ else
+ fun_l5_n555(x)
+ end
+end
+
+def fun_l4_n211(x)
+ if (x < 1)
+ fun_l5_n196(x)
+ else
+ fun_l5_n418(x)
+ end
+end
+
+def fun_l4_n212(x)
+ if (x < 1)
+ fun_l5_n761(x)
+ else
+ fun_l5_n66(x)
+ end
+end
+
+def fun_l4_n213(x)
+ if (x < 1)
+ fun_l5_n906(x)
+ else
+ fun_l5_n521(x)
+ end
+end
+
+def fun_l4_n214(x)
+ if (x < 1)
+ fun_l5_n97(x)
+ else
+ fun_l5_n431(x)
+ end
+end
+
+def fun_l4_n215(x)
+ if (x < 1)
+ fun_l5_n450(x)
+ else
+ fun_l5_n58(x)
+ end
+end
+
+def fun_l4_n216(x)
+ if (x < 1)
+ fun_l5_n576(x)
+ else
+ fun_l5_n675(x)
+ end
+end
+
+def fun_l4_n217(x)
+ if (x < 1)
+ fun_l5_n764(x)
+ else
+ fun_l5_n653(x)
+ end
+end
+
+def fun_l4_n218(x)
+ if (x < 1)
+ fun_l5_n591(x)
+ else
+ fun_l5_n398(x)
+ end
+end
+
+def fun_l4_n219(x)
+ if (x < 1)
+ fun_l5_n94(x)
+ else
+ fun_l5_n411(x)
+ end
+end
+
+def fun_l4_n220(x)
+ if (x < 1)
+ fun_l5_n13(x)
+ else
+ fun_l5_n117(x)
+ end
+end
+
+def fun_l4_n221(x)
+ if (x < 1)
+ fun_l5_n872(x)
+ else
+ fun_l5_n799(x)
+ end
+end
+
+def fun_l4_n222(x)
+ if (x < 1)
+ fun_l5_n692(x)
+ else
+ fun_l5_n99(x)
+ end
+end
+
+def fun_l4_n223(x)
+ if (x < 1)
+ fun_l5_n861(x)
+ else
+ fun_l5_n999(x)
+ end
+end
+
+def fun_l4_n224(x)
+ if (x < 1)
+ fun_l5_n547(x)
+ else
+ fun_l5_n955(x)
+ end
+end
+
+def fun_l4_n225(x)
+ if (x < 1)
+ fun_l5_n305(x)
+ else
+ fun_l5_n894(x)
+ end
+end
+
+def fun_l4_n226(x)
+ if (x < 1)
+ fun_l5_n128(x)
+ else
+ fun_l5_n662(x)
+ end
+end
+
+def fun_l4_n227(x)
+ if (x < 1)
+ fun_l5_n858(x)
+ else
+ fun_l5_n323(x)
+ end
+end
+
+def fun_l4_n228(x)
+ if (x < 1)
+ fun_l5_n923(x)
+ else
+ fun_l5_n206(x)
+ end
+end
+
+def fun_l4_n229(x)
+ if (x < 1)
+ fun_l5_n486(x)
+ else
+ fun_l5_n603(x)
+ end
+end
+
+def fun_l4_n230(x)
+ if (x < 1)
+ fun_l5_n116(x)
+ else
+ fun_l5_n887(x)
+ end
+end
+
+def fun_l4_n231(x)
+ if (x < 1)
+ fun_l5_n791(x)
+ else
+ fun_l5_n762(x)
+ end
+end
+
+def fun_l4_n232(x)
+ if (x < 1)
+ fun_l5_n511(x)
+ else
+ fun_l5_n890(x)
+ end
+end
+
+def fun_l4_n233(x)
+ if (x < 1)
+ fun_l5_n600(x)
+ else
+ fun_l5_n777(x)
+ end
+end
+
+def fun_l4_n234(x)
+ if (x < 1)
+ fun_l5_n458(x)
+ else
+ fun_l5_n379(x)
+ end
+end
+
+def fun_l4_n235(x)
+ if (x < 1)
+ fun_l5_n907(x)
+ else
+ fun_l5_n463(x)
+ end
+end
+
+def fun_l4_n236(x)
+ if (x < 1)
+ fun_l5_n177(x)
+ else
+ fun_l5_n732(x)
+ end
+end
+
+def fun_l4_n237(x)
+ if (x < 1)
+ fun_l5_n761(x)
+ else
+ fun_l5_n936(x)
+ end
+end
+
+def fun_l4_n238(x)
+ if (x < 1)
+ fun_l5_n757(x)
+ else
+ fun_l5_n733(x)
+ end
+end
+
+def fun_l4_n239(x)
+ if (x < 1)
+ fun_l5_n844(x)
+ else
+ fun_l5_n527(x)
+ end
+end
+
+def fun_l4_n240(x)
+ if (x < 1)
+ fun_l5_n594(x)
+ else
+ fun_l5_n620(x)
+ end
+end
+
+def fun_l4_n241(x)
+ if (x < 1)
+ fun_l5_n722(x)
+ else
+ fun_l5_n779(x)
+ end
+end
+
+def fun_l4_n242(x)
+ if (x < 1)
+ fun_l5_n663(x)
+ else
+ fun_l5_n237(x)
+ end
+end
+
+def fun_l4_n243(x)
+ if (x < 1)
+ fun_l5_n256(x)
+ else
+ fun_l5_n366(x)
+ end
+end
+
+def fun_l4_n244(x)
+ if (x < 1)
+ fun_l5_n614(x)
+ else
+ fun_l5_n895(x)
+ end
+end
+
+def fun_l4_n245(x)
+ if (x < 1)
+ fun_l5_n459(x)
+ else
+ fun_l5_n193(x)
+ end
+end
+
+def fun_l4_n246(x)
+ if (x < 1)
+ fun_l5_n780(x)
+ else
+ fun_l5_n175(x)
+ end
+end
+
+def fun_l4_n247(x)
+ if (x < 1)
+ fun_l5_n186(x)
+ else
+ fun_l5_n488(x)
+ end
+end
+
+def fun_l4_n248(x)
+ if (x < 1)
+ fun_l5_n156(x)
+ else
+ fun_l5_n807(x)
+ end
+end
+
+def fun_l4_n249(x)
+ if (x < 1)
+ fun_l5_n862(x)
+ else
+ fun_l5_n537(x)
+ end
+end
+
+def fun_l4_n250(x)
+ if (x < 1)
+ fun_l5_n880(x)
+ else
+ fun_l5_n528(x)
+ end
+end
+
+def fun_l4_n251(x)
+ if (x < 1)
+ fun_l5_n163(x)
+ else
+ fun_l5_n94(x)
+ end
+end
+
+def fun_l4_n252(x)
+ if (x < 1)
+ fun_l5_n937(x)
+ else
+ fun_l5_n582(x)
+ end
+end
+
+def fun_l4_n253(x)
+ if (x < 1)
+ fun_l5_n368(x)
+ else
+ fun_l5_n584(x)
+ end
+end
+
+def fun_l4_n254(x)
+ if (x < 1)
+ fun_l5_n566(x)
+ else
+ fun_l5_n449(x)
+ end
+end
+
+def fun_l4_n255(x)
+ if (x < 1)
+ fun_l5_n876(x)
+ else
+ fun_l5_n0(x)
+ end
+end
+
+def fun_l4_n256(x)
+ if (x < 1)
+ fun_l5_n842(x)
+ else
+ fun_l5_n469(x)
+ end
+end
+
+def fun_l4_n257(x)
+ if (x < 1)
+ fun_l5_n85(x)
+ else
+ fun_l5_n961(x)
+ end
+end
+
+def fun_l4_n258(x)
+ if (x < 1)
+ fun_l5_n120(x)
+ else
+ fun_l5_n893(x)
+ end
+end
+
+def fun_l4_n259(x)
+ if (x < 1)
+ fun_l5_n243(x)
+ else
+ fun_l5_n630(x)
+ end
+end
+
+def fun_l4_n260(x)
+ if (x < 1)
+ fun_l5_n710(x)
+ else
+ fun_l5_n715(x)
+ end
+end
+
+def fun_l4_n261(x)
+ if (x < 1)
+ fun_l5_n423(x)
+ else
+ fun_l5_n910(x)
+ end
+end
+
+def fun_l4_n262(x)
+ if (x < 1)
+ fun_l5_n505(x)
+ else
+ fun_l5_n532(x)
+ end
+end
+
+def fun_l4_n263(x)
+ if (x < 1)
+ fun_l5_n775(x)
+ else
+ fun_l5_n236(x)
+ end
+end
+
+def fun_l4_n264(x)
+ if (x < 1)
+ fun_l5_n140(x)
+ else
+ fun_l5_n295(x)
+ end
+end
+
+def fun_l4_n265(x)
+ if (x < 1)
+ fun_l5_n554(x)
+ else
+ fun_l5_n88(x)
+ end
+end
+
+def fun_l4_n266(x)
+ if (x < 1)
+ fun_l5_n831(x)
+ else
+ fun_l5_n307(x)
+ end
+end
+
+def fun_l4_n267(x)
+ if (x < 1)
+ fun_l5_n303(x)
+ else
+ fun_l5_n960(x)
+ end
+end
+
+def fun_l4_n268(x)
+ if (x < 1)
+ fun_l5_n322(x)
+ else
+ fun_l5_n264(x)
+ end
+end
+
+def fun_l4_n269(x)
+ if (x < 1)
+ fun_l5_n823(x)
+ else
+ fun_l5_n374(x)
+ end
+end
+
+def fun_l4_n270(x)
+ if (x < 1)
+ fun_l5_n76(x)
+ else
+ fun_l5_n918(x)
+ end
+end
+
+def fun_l4_n271(x)
+ if (x < 1)
+ fun_l5_n591(x)
+ else
+ fun_l5_n45(x)
+ end
+end
+
+def fun_l4_n272(x)
+ if (x < 1)
+ fun_l5_n686(x)
+ else
+ fun_l5_n836(x)
+ end
+end
+
+def fun_l4_n273(x)
+ if (x < 1)
+ fun_l5_n13(x)
+ else
+ fun_l5_n676(x)
+ end
+end
+
+def fun_l4_n274(x)
+ if (x < 1)
+ fun_l5_n120(x)
+ else
+ fun_l5_n180(x)
+ end
+end
+
+def fun_l4_n275(x)
+ if (x < 1)
+ fun_l5_n901(x)
+ else
+ fun_l5_n101(x)
+ end
+end
+
+def fun_l4_n276(x)
+ if (x < 1)
+ fun_l5_n289(x)
+ else
+ fun_l5_n238(x)
+ end
+end
+
+def fun_l4_n277(x)
+ if (x < 1)
+ fun_l5_n513(x)
+ else
+ fun_l5_n887(x)
+ end
+end
+
+def fun_l4_n278(x)
+ if (x < 1)
+ fun_l5_n799(x)
+ else
+ fun_l5_n763(x)
+ end
+end
+
+def fun_l4_n279(x)
+ if (x < 1)
+ fun_l5_n628(x)
+ else
+ fun_l5_n373(x)
+ end
+end
+
+def fun_l4_n280(x)
+ if (x < 1)
+ fun_l5_n661(x)
+ else
+ fun_l5_n826(x)
+ end
+end
+
+def fun_l4_n281(x)
+ if (x < 1)
+ fun_l5_n35(x)
+ else
+ fun_l5_n409(x)
+ end
+end
+
+def fun_l4_n282(x)
+ if (x < 1)
+ fun_l5_n721(x)
+ else
+ fun_l5_n719(x)
+ end
+end
+
+def fun_l4_n283(x)
+ if (x < 1)
+ fun_l5_n729(x)
+ else
+ fun_l5_n901(x)
+ end
+end
+
+def fun_l4_n284(x)
+ if (x < 1)
+ fun_l5_n17(x)
+ else
+ fun_l5_n482(x)
+ end
+end
+
+def fun_l4_n285(x)
+ if (x < 1)
+ fun_l5_n166(x)
+ else
+ fun_l5_n53(x)
+ end
+end
+
+def fun_l4_n286(x)
+ if (x < 1)
+ fun_l5_n873(x)
+ else
+ fun_l5_n383(x)
+ end
+end
+
+def fun_l4_n287(x)
+ if (x < 1)
+ fun_l5_n269(x)
+ else
+ fun_l5_n49(x)
+ end
+end
+
+def fun_l4_n288(x)
+ if (x < 1)
+ fun_l5_n292(x)
+ else
+ fun_l5_n353(x)
+ end
+end
+
+def fun_l4_n289(x)
+ if (x < 1)
+ fun_l5_n651(x)
+ else
+ fun_l5_n606(x)
+ end
+end
+
+def fun_l4_n290(x)
+ if (x < 1)
+ fun_l5_n287(x)
+ else
+ fun_l5_n444(x)
+ end
+end
+
+def fun_l4_n291(x)
+ if (x < 1)
+ fun_l5_n181(x)
+ else
+ fun_l5_n195(x)
+ end
+end
+
+def fun_l4_n292(x)
+ if (x < 1)
+ fun_l5_n20(x)
+ else
+ fun_l5_n58(x)
+ end
+end
+
+def fun_l4_n293(x)
+ if (x < 1)
+ fun_l5_n185(x)
+ else
+ fun_l5_n759(x)
+ end
+end
+
+def fun_l4_n294(x)
+ if (x < 1)
+ fun_l5_n938(x)
+ else
+ fun_l5_n849(x)
+ end
+end
+
+def fun_l4_n295(x)
+ if (x < 1)
+ fun_l5_n187(x)
+ else
+ fun_l5_n469(x)
+ end
+end
+
+def fun_l4_n296(x)
+ if (x < 1)
+ fun_l5_n516(x)
+ else
+ fun_l5_n314(x)
+ end
+end
+
+def fun_l4_n297(x)
+ if (x < 1)
+ fun_l5_n585(x)
+ else
+ fun_l5_n344(x)
+ end
+end
+
+def fun_l4_n298(x)
+ if (x < 1)
+ fun_l5_n637(x)
+ else
+ fun_l5_n103(x)
+ end
+end
+
+def fun_l4_n299(x)
+ if (x < 1)
+ fun_l5_n185(x)
+ else
+ fun_l5_n593(x)
+ end
+end
+
+def fun_l4_n300(x)
+ if (x < 1)
+ fun_l5_n773(x)
+ else
+ fun_l5_n758(x)
+ end
+end
+
+def fun_l4_n301(x)
+ if (x < 1)
+ fun_l5_n444(x)
+ else
+ fun_l5_n945(x)
+ end
+end
+
+def fun_l4_n302(x)
+ if (x < 1)
+ fun_l5_n728(x)
+ else
+ fun_l5_n484(x)
+ end
+end
+
+def fun_l4_n303(x)
+ if (x < 1)
+ fun_l5_n412(x)
+ else
+ fun_l5_n106(x)
+ end
+end
+
+def fun_l4_n304(x)
+ if (x < 1)
+ fun_l5_n399(x)
+ else
+ fun_l5_n234(x)
+ end
+end
+
+def fun_l4_n305(x)
+ if (x < 1)
+ fun_l5_n886(x)
+ else
+ fun_l5_n406(x)
+ end
+end
+
+def fun_l4_n306(x)
+ if (x < 1)
+ fun_l5_n535(x)
+ else
+ fun_l5_n338(x)
+ end
+end
+
+def fun_l4_n307(x)
+ if (x < 1)
+ fun_l5_n898(x)
+ else
+ fun_l5_n859(x)
+ end
+end
+
+def fun_l4_n308(x)
+ if (x < 1)
+ fun_l5_n25(x)
+ else
+ fun_l5_n476(x)
+ end
+end
+
+def fun_l4_n309(x)
+ if (x < 1)
+ fun_l5_n451(x)
+ else
+ fun_l5_n665(x)
+ end
+end
+
+def fun_l4_n310(x)
+ if (x < 1)
+ fun_l5_n937(x)
+ else
+ fun_l5_n555(x)
+ end
+end
+
+def fun_l4_n311(x)
+ if (x < 1)
+ fun_l5_n982(x)
+ else
+ fun_l5_n89(x)
+ end
+end
+
+def fun_l4_n312(x)
+ if (x < 1)
+ fun_l5_n32(x)
+ else
+ fun_l5_n908(x)
+ end
+end
+
+def fun_l4_n313(x)
+ if (x < 1)
+ fun_l5_n963(x)
+ else
+ fun_l5_n267(x)
+ end
+end
+
+def fun_l4_n314(x)
+ if (x < 1)
+ fun_l5_n3(x)
+ else
+ fun_l5_n60(x)
+ end
+end
+
+def fun_l4_n315(x)
+ if (x < 1)
+ fun_l5_n763(x)
+ else
+ fun_l5_n488(x)
+ end
+end
+
+def fun_l4_n316(x)
+ if (x < 1)
+ fun_l5_n696(x)
+ else
+ fun_l5_n663(x)
+ end
+end
+
+def fun_l4_n317(x)
+ if (x < 1)
+ fun_l5_n851(x)
+ else
+ fun_l5_n487(x)
+ end
+end
+
+def fun_l4_n318(x)
+ if (x < 1)
+ fun_l5_n327(x)
+ else
+ fun_l5_n433(x)
+ end
+end
+
+def fun_l4_n319(x)
+ if (x < 1)
+ fun_l5_n242(x)
+ else
+ fun_l5_n471(x)
+ end
+end
+
+def fun_l4_n320(x)
+ if (x < 1)
+ fun_l5_n786(x)
+ else
+ fun_l5_n622(x)
+ end
+end
+
+def fun_l4_n321(x)
+ if (x < 1)
+ fun_l5_n94(x)
+ else
+ fun_l5_n934(x)
+ end
+end
+
+def fun_l4_n322(x)
+ if (x < 1)
+ fun_l5_n665(x)
+ else
+ fun_l5_n386(x)
+ end
+end
+
+def fun_l4_n323(x)
+ if (x < 1)
+ fun_l5_n96(x)
+ else
+ fun_l5_n466(x)
+ end
+end
+
+def fun_l4_n324(x)
+ if (x < 1)
+ fun_l5_n394(x)
+ else
+ fun_l5_n595(x)
+ end
+end
+
+def fun_l4_n325(x)
+ if (x < 1)
+ fun_l5_n544(x)
+ else
+ fun_l5_n688(x)
+ end
+end
+
+def fun_l4_n326(x)
+ if (x < 1)
+ fun_l5_n295(x)
+ else
+ fun_l5_n206(x)
+ end
+end
+
+def fun_l4_n327(x)
+ if (x < 1)
+ fun_l5_n128(x)
+ else
+ fun_l5_n607(x)
+ end
+end
+
+def fun_l4_n328(x)
+ if (x < 1)
+ fun_l5_n987(x)
+ else
+ fun_l5_n109(x)
+ end
+end
+
+def fun_l4_n329(x)
+ if (x < 1)
+ fun_l5_n842(x)
+ else
+ fun_l5_n217(x)
+ end
+end
+
+def fun_l4_n330(x)
+ if (x < 1)
+ fun_l5_n92(x)
+ else
+ fun_l5_n774(x)
+ end
+end
+
+def fun_l4_n331(x)
+ if (x < 1)
+ fun_l5_n761(x)
+ else
+ fun_l5_n34(x)
+ end
+end
+
+def fun_l4_n332(x)
+ if (x < 1)
+ fun_l5_n867(x)
+ else
+ fun_l5_n645(x)
+ end
+end
+
+def fun_l4_n333(x)
+ if (x < 1)
+ fun_l5_n273(x)
+ else
+ fun_l5_n592(x)
+ end
+end
+
+def fun_l4_n334(x)
+ if (x < 1)
+ fun_l5_n29(x)
+ else
+ fun_l5_n985(x)
+ end
+end
+
+def fun_l4_n335(x)
+ if (x < 1)
+ fun_l5_n430(x)
+ else
+ fun_l5_n346(x)
+ end
+end
+
+def fun_l4_n336(x)
+ if (x < 1)
+ fun_l5_n367(x)
+ else
+ fun_l5_n686(x)
+ end
+end
+
+def fun_l4_n337(x)
+ if (x < 1)
+ fun_l5_n868(x)
+ else
+ fun_l5_n5(x)
+ end
+end
+
+def fun_l4_n338(x)
+ if (x < 1)
+ fun_l5_n998(x)
+ else
+ fun_l5_n64(x)
+ end
+end
+
+def fun_l4_n339(x)
+ if (x < 1)
+ fun_l5_n594(x)
+ else
+ fun_l5_n311(x)
+ end
+end
+
+def fun_l4_n340(x)
+ if (x < 1)
+ fun_l5_n547(x)
+ else
+ fun_l5_n573(x)
+ end
+end
+
+def fun_l4_n341(x)
+ if (x < 1)
+ fun_l5_n590(x)
+ else
+ fun_l5_n923(x)
+ end
+end
+
+def fun_l4_n342(x)
+ if (x < 1)
+ fun_l5_n538(x)
+ else
+ fun_l5_n118(x)
+ end
+end
+
+def fun_l4_n343(x)
+ if (x < 1)
+ fun_l5_n322(x)
+ else
+ fun_l5_n970(x)
+ end
+end
+
+def fun_l4_n344(x)
+ if (x < 1)
+ fun_l5_n809(x)
+ else
+ fun_l5_n958(x)
+ end
+end
+
+def fun_l4_n345(x)
+ if (x < 1)
+ fun_l5_n613(x)
+ else
+ fun_l5_n199(x)
+ end
+end
+
+def fun_l4_n346(x)
+ if (x < 1)
+ fun_l5_n298(x)
+ else
+ fun_l5_n273(x)
+ end
+end
+
+def fun_l4_n347(x)
+ if (x < 1)
+ fun_l5_n560(x)
+ else
+ fun_l5_n803(x)
+ end
+end
+
+def fun_l4_n348(x)
+ if (x < 1)
+ fun_l5_n672(x)
+ else
+ fun_l5_n952(x)
+ end
+end
+
+def fun_l4_n349(x)
+ if (x < 1)
+ fun_l5_n58(x)
+ else
+ fun_l5_n267(x)
+ end
+end
+
+def fun_l4_n350(x)
+ if (x < 1)
+ fun_l5_n933(x)
+ else
+ fun_l5_n773(x)
+ end
+end
+
+def fun_l4_n351(x)
+ if (x < 1)
+ fun_l5_n574(x)
+ else
+ fun_l5_n692(x)
+ end
+end
+
+def fun_l4_n352(x)
+ if (x < 1)
+ fun_l5_n537(x)
+ else
+ fun_l5_n312(x)
+ end
+end
+
+def fun_l4_n353(x)
+ if (x < 1)
+ fun_l5_n696(x)
+ else
+ fun_l5_n739(x)
+ end
+end
+
+def fun_l4_n354(x)
+ if (x < 1)
+ fun_l5_n871(x)
+ else
+ fun_l5_n86(x)
+ end
+end
+
+def fun_l4_n355(x)
+ if (x < 1)
+ fun_l5_n3(x)
+ else
+ fun_l5_n950(x)
+ end
+end
+
+def fun_l4_n356(x)
+ if (x < 1)
+ fun_l5_n560(x)
+ else
+ fun_l5_n45(x)
+ end
+end
+
+def fun_l4_n357(x)
+ if (x < 1)
+ fun_l5_n666(x)
+ else
+ fun_l5_n874(x)
+ end
+end
+
+def fun_l4_n358(x)
+ if (x < 1)
+ fun_l5_n457(x)
+ else
+ fun_l5_n7(x)
+ end
+end
+
+def fun_l4_n359(x)
+ if (x < 1)
+ fun_l5_n674(x)
+ else
+ fun_l5_n985(x)
+ end
+end
+
+def fun_l4_n360(x)
+ if (x < 1)
+ fun_l5_n960(x)
+ else
+ fun_l5_n228(x)
+ end
+end
+
+def fun_l4_n361(x)
+ if (x < 1)
+ fun_l5_n597(x)
+ else
+ fun_l5_n487(x)
+ end
+end
+
+def fun_l4_n362(x)
+ if (x < 1)
+ fun_l5_n145(x)
+ else
+ fun_l5_n278(x)
+ end
+end
+
+def fun_l4_n363(x)
+ if (x < 1)
+ fun_l5_n949(x)
+ else
+ fun_l5_n353(x)
+ end
+end
+
+def fun_l4_n364(x)
+ if (x < 1)
+ fun_l5_n604(x)
+ else
+ fun_l5_n250(x)
+ end
+end
+
+def fun_l4_n365(x)
+ if (x < 1)
+ fun_l5_n347(x)
+ else
+ fun_l5_n964(x)
+ end
+end
+
+def fun_l4_n366(x)
+ if (x < 1)
+ fun_l5_n331(x)
+ else
+ fun_l5_n657(x)
+ end
+end
+
+def fun_l4_n367(x)
+ if (x < 1)
+ fun_l5_n74(x)
+ else
+ fun_l5_n504(x)
+ end
+end
+
+def fun_l4_n368(x)
+ if (x < 1)
+ fun_l5_n9(x)
+ else
+ fun_l5_n993(x)
+ end
+end
+
+def fun_l4_n369(x)
+ if (x < 1)
+ fun_l5_n492(x)
+ else
+ fun_l5_n155(x)
+ end
+end
+
+def fun_l4_n370(x)
+ if (x < 1)
+ fun_l5_n848(x)
+ else
+ fun_l5_n178(x)
+ end
+end
+
+def fun_l4_n371(x)
+ if (x < 1)
+ fun_l5_n395(x)
+ else
+ fun_l5_n837(x)
+ end
+end
+
+def fun_l4_n372(x)
+ if (x < 1)
+ fun_l5_n834(x)
+ else
+ fun_l5_n719(x)
+ end
+end
+
+def fun_l4_n373(x)
+ if (x < 1)
+ fun_l5_n422(x)
+ else
+ fun_l5_n776(x)
+ end
+end
+
+def fun_l4_n374(x)
+ if (x < 1)
+ fun_l5_n293(x)
+ else
+ fun_l5_n385(x)
+ end
+end
+
+def fun_l4_n375(x)
+ if (x < 1)
+ fun_l5_n487(x)
+ else
+ fun_l5_n61(x)
+ end
+end
+
+def fun_l4_n376(x)
+ if (x < 1)
+ fun_l5_n493(x)
+ else
+ fun_l5_n629(x)
+ end
+end
+
+def fun_l4_n377(x)
+ if (x < 1)
+ fun_l5_n429(x)
+ else
+ fun_l5_n157(x)
+ end
+end
+
+def fun_l4_n378(x)
+ if (x < 1)
+ fun_l5_n145(x)
+ else
+ fun_l5_n988(x)
+ end
+end
+
+def fun_l4_n379(x)
+ if (x < 1)
+ fun_l5_n430(x)
+ else
+ fun_l5_n246(x)
+ end
+end
+
+def fun_l4_n380(x)
+ if (x < 1)
+ fun_l5_n320(x)
+ else
+ fun_l5_n284(x)
+ end
+end
+
+def fun_l4_n381(x)
+ if (x < 1)
+ fun_l5_n278(x)
+ else
+ fun_l5_n163(x)
+ end
+end
+
+def fun_l4_n382(x)
+ if (x < 1)
+ fun_l5_n999(x)
+ else
+ fun_l5_n907(x)
+ end
+end
+
+def fun_l4_n383(x)
+ if (x < 1)
+ fun_l5_n979(x)
+ else
+ fun_l5_n370(x)
+ end
+end
+
+def fun_l4_n384(x)
+ if (x < 1)
+ fun_l5_n58(x)
+ else
+ fun_l5_n60(x)
+ end
+end
+
+def fun_l4_n385(x)
+ if (x < 1)
+ fun_l5_n558(x)
+ else
+ fun_l5_n524(x)
+ end
+end
+
+def fun_l4_n386(x)
+ if (x < 1)
+ fun_l5_n438(x)
+ else
+ fun_l5_n799(x)
+ end
+end
+
+def fun_l4_n387(x)
+ if (x < 1)
+ fun_l5_n696(x)
+ else
+ fun_l5_n463(x)
+ end
+end
+
+def fun_l4_n388(x)
+ if (x < 1)
+ fun_l5_n376(x)
+ else
+ fun_l5_n943(x)
+ end
+end
+
+def fun_l4_n389(x)
+ if (x < 1)
+ fun_l5_n21(x)
+ else
+ fun_l5_n663(x)
+ end
+end
+
+def fun_l4_n390(x)
+ if (x < 1)
+ fun_l5_n8(x)
+ else
+ fun_l5_n348(x)
+ end
+end
+
+def fun_l4_n391(x)
+ if (x < 1)
+ fun_l5_n908(x)
+ else
+ fun_l5_n695(x)
+ end
+end
+
+def fun_l4_n392(x)
+ if (x < 1)
+ fun_l5_n97(x)
+ else
+ fun_l5_n466(x)
+ end
+end
+
+def fun_l4_n393(x)
+ if (x < 1)
+ fun_l5_n480(x)
+ else
+ fun_l5_n972(x)
+ end
+end
+
+def fun_l4_n394(x)
+ if (x < 1)
+ fun_l5_n85(x)
+ else
+ fun_l5_n849(x)
+ end
+end
+
+def fun_l4_n395(x)
+ if (x < 1)
+ fun_l5_n897(x)
+ else
+ fun_l5_n13(x)
+ end
+end
+
+def fun_l4_n396(x)
+ if (x < 1)
+ fun_l5_n628(x)
+ else
+ fun_l5_n743(x)
+ end
+end
+
+def fun_l4_n397(x)
+ if (x < 1)
+ fun_l5_n29(x)
+ else
+ fun_l5_n185(x)
+ end
+end
+
+def fun_l4_n398(x)
+ if (x < 1)
+ fun_l5_n464(x)
+ else
+ fun_l5_n742(x)
+ end
+end
+
+def fun_l4_n399(x)
+ if (x < 1)
+ fun_l5_n348(x)
+ else
+ fun_l5_n70(x)
+ end
+end
+
+def fun_l4_n400(x)
+ if (x < 1)
+ fun_l5_n397(x)
+ else
+ fun_l5_n132(x)
+ end
+end
+
+def fun_l4_n401(x)
+ if (x < 1)
+ fun_l5_n433(x)
+ else
+ fun_l5_n77(x)
+ end
+end
+
+def fun_l4_n402(x)
+ if (x < 1)
+ fun_l5_n24(x)
+ else
+ fun_l5_n160(x)
+ end
+end
+
+def fun_l4_n403(x)
+ if (x < 1)
+ fun_l5_n738(x)
+ else
+ fun_l5_n174(x)
+ end
+end
+
+def fun_l4_n404(x)
+ if (x < 1)
+ fun_l5_n585(x)
+ else
+ fun_l5_n670(x)
+ end
+end
+
+def fun_l4_n405(x)
+ if (x < 1)
+ fun_l5_n320(x)
+ else
+ fun_l5_n245(x)
+ end
+end
+
+def fun_l4_n406(x)
+ if (x < 1)
+ fun_l5_n357(x)
+ else
+ fun_l5_n742(x)
+ end
+end
+
+def fun_l4_n407(x)
+ if (x < 1)
+ fun_l5_n16(x)
+ else
+ fun_l5_n227(x)
+ end
+end
+
+def fun_l4_n408(x)
+ if (x < 1)
+ fun_l5_n360(x)
+ else
+ fun_l5_n18(x)
+ end
+end
+
+def fun_l4_n409(x)
+ if (x < 1)
+ fun_l5_n210(x)
+ else
+ fun_l5_n253(x)
+ end
+end
+
+def fun_l4_n410(x)
+ if (x < 1)
+ fun_l5_n915(x)
+ else
+ fun_l5_n605(x)
+ end
+end
+
+def fun_l4_n411(x)
+ if (x < 1)
+ fun_l5_n511(x)
+ else
+ fun_l5_n118(x)
+ end
+end
+
+def fun_l4_n412(x)
+ if (x < 1)
+ fun_l5_n680(x)
+ else
+ fun_l5_n445(x)
+ end
+end
+
+def fun_l4_n413(x)
+ if (x < 1)
+ fun_l5_n722(x)
+ else
+ fun_l5_n440(x)
+ end
+end
+
+def fun_l4_n414(x)
+ if (x < 1)
+ fun_l5_n382(x)
+ else
+ fun_l5_n380(x)
+ end
+end
+
+def fun_l4_n415(x)
+ if (x < 1)
+ fun_l5_n548(x)
+ else
+ fun_l5_n462(x)
+ end
+end
+
+def fun_l4_n416(x)
+ if (x < 1)
+ fun_l5_n12(x)
+ else
+ fun_l5_n952(x)
+ end
+end
+
+def fun_l4_n417(x)
+ if (x < 1)
+ fun_l5_n814(x)
+ else
+ fun_l5_n720(x)
+ end
+end
+
+def fun_l4_n418(x)
+ if (x < 1)
+ fun_l5_n804(x)
+ else
+ fun_l5_n243(x)
+ end
+end
+
+def fun_l4_n419(x)
+ if (x < 1)
+ fun_l5_n654(x)
+ else
+ fun_l5_n950(x)
+ end
+end
+
+def fun_l4_n420(x)
+ if (x < 1)
+ fun_l5_n998(x)
+ else
+ fun_l5_n937(x)
+ end
+end
+
+def fun_l4_n421(x)
+ if (x < 1)
+ fun_l5_n476(x)
+ else
+ fun_l5_n245(x)
+ end
+end
+
+def fun_l4_n422(x)
+ if (x < 1)
+ fun_l5_n778(x)
+ else
+ fun_l5_n950(x)
+ end
+end
+
+def fun_l4_n423(x)
+ if (x < 1)
+ fun_l5_n255(x)
+ else
+ fun_l5_n438(x)
+ end
+end
+
+def fun_l4_n424(x)
+ if (x < 1)
+ fun_l5_n906(x)
+ else
+ fun_l5_n329(x)
+ end
+end
+
+def fun_l4_n425(x)
+ if (x < 1)
+ fun_l5_n644(x)
+ else
+ fun_l5_n512(x)
+ end
+end
+
+def fun_l4_n426(x)
+ if (x < 1)
+ fun_l5_n558(x)
+ else
+ fun_l5_n925(x)
+ end
+end
+
+def fun_l4_n427(x)
+ if (x < 1)
+ fun_l5_n745(x)
+ else
+ fun_l5_n168(x)
+ end
+end
+
+def fun_l4_n428(x)
+ if (x < 1)
+ fun_l5_n682(x)
+ else
+ fun_l5_n298(x)
+ end
+end
+
+def fun_l4_n429(x)
+ if (x < 1)
+ fun_l5_n211(x)
+ else
+ fun_l5_n608(x)
+ end
+end
+
+def fun_l4_n430(x)
+ if (x < 1)
+ fun_l5_n100(x)
+ else
+ fun_l5_n287(x)
+ end
+end
+
+def fun_l4_n431(x)
+ if (x < 1)
+ fun_l5_n513(x)
+ else
+ fun_l5_n764(x)
+ end
+end
+
+def fun_l4_n432(x)
+ if (x < 1)
+ fun_l5_n925(x)
+ else
+ fun_l5_n857(x)
+ end
+end
+
+def fun_l4_n433(x)
+ if (x < 1)
+ fun_l5_n279(x)
+ else
+ fun_l5_n429(x)
+ end
+end
+
+def fun_l4_n434(x)
+ if (x < 1)
+ fun_l5_n376(x)
+ else
+ fun_l5_n811(x)
+ end
+end
+
+def fun_l4_n435(x)
+ if (x < 1)
+ fun_l5_n893(x)
+ else
+ fun_l5_n136(x)
+ end
+end
+
+def fun_l4_n436(x)
+ if (x < 1)
+ fun_l5_n283(x)
+ else
+ fun_l5_n508(x)
+ end
+end
+
+def fun_l4_n437(x)
+ if (x < 1)
+ fun_l5_n478(x)
+ else
+ fun_l5_n589(x)
+ end
+end
+
+def fun_l4_n438(x)
+ if (x < 1)
+ fun_l5_n460(x)
+ else
+ fun_l5_n589(x)
+ end
+end
+
+def fun_l4_n439(x)
+ if (x < 1)
+ fun_l5_n745(x)
+ else
+ fun_l5_n5(x)
+ end
+end
+
+def fun_l4_n440(x)
+ if (x < 1)
+ fun_l5_n601(x)
+ else
+ fun_l5_n322(x)
+ end
+end
+
+def fun_l4_n441(x)
+ if (x < 1)
+ fun_l5_n703(x)
+ else
+ fun_l5_n322(x)
+ end
+end
+
+def fun_l4_n442(x)
+ if (x < 1)
+ fun_l5_n810(x)
+ else
+ fun_l5_n363(x)
+ end
+end
+
+def fun_l4_n443(x)
+ if (x < 1)
+ fun_l5_n62(x)
+ else
+ fun_l5_n995(x)
+ end
+end
+
+def fun_l4_n444(x)
+ if (x < 1)
+ fun_l5_n196(x)
+ else
+ fun_l5_n156(x)
+ end
+end
+
+def fun_l4_n445(x)
+ if (x < 1)
+ fun_l5_n456(x)
+ else
+ fun_l5_n820(x)
+ end
+end
+
+def fun_l4_n446(x)
+ if (x < 1)
+ fun_l5_n592(x)
+ else
+ fun_l5_n214(x)
+ end
+end
+
+def fun_l4_n447(x)
+ if (x < 1)
+ fun_l5_n44(x)
+ else
+ fun_l5_n769(x)
+ end
+end
+
+def fun_l4_n448(x)
+ if (x < 1)
+ fun_l5_n694(x)
+ else
+ fun_l5_n909(x)
+ end
+end
+
+def fun_l4_n449(x)
+ if (x < 1)
+ fun_l5_n297(x)
+ else
+ fun_l5_n134(x)
+ end
+end
+
+def fun_l4_n450(x)
+ if (x < 1)
+ fun_l5_n878(x)
+ else
+ fun_l5_n475(x)
+ end
+end
+
+def fun_l4_n451(x)
+ if (x < 1)
+ fun_l5_n338(x)
+ else
+ fun_l5_n700(x)
+ end
+end
+
+def fun_l4_n452(x)
+ if (x < 1)
+ fun_l5_n813(x)
+ else
+ fun_l5_n41(x)
+ end
+end
+
+def fun_l4_n453(x)
+ if (x < 1)
+ fun_l5_n176(x)
+ else
+ fun_l5_n758(x)
+ end
+end
+
+def fun_l4_n454(x)
+ if (x < 1)
+ fun_l5_n60(x)
+ else
+ fun_l5_n608(x)
+ end
+end
+
+def fun_l4_n455(x)
+ if (x < 1)
+ fun_l5_n550(x)
+ else
+ fun_l5_n607(x)
+ end
+end
+
+def fun_l4_n456(x)
+ if (x < 1)
+ fun_l5_n535(x)
+ else
+ fun_l5_n478(x)
+ end
+end
+
+def fun_l4_n457(x)
+ if (x < 1)
+ fun_l5_n768(x)
+ else
+ fun_l5_n613(x)
+ end
+end
+
+def fun_l4_n458(x)
+ if (x < 1)
+ fun_l5_n686(x)
+ else
+ fun_l5_n208(x)
+ end
+end
+
+def fun_l4_n459(x)
+ if (x < 1)
+ fun_l5_n167(x)
+ else
+ fun_l5_n875(x)
+ end
+end
+
+def fun_l4_n460(x)
+ if (x < 1)
+ fun_l5_n202(x)
+ else
+ fun_l5_n440(x)
+ end
+end
+
+def fun_l4_n461(x)
+ if (x < 1)
+ fun_l5_n64(x)
+ else
+ fun_l5_n845(x)
+ end
+end
+
+def fun_l4_n462(x)
+ if (x < 1)
+ fun_l5_n914(x)
+ else
+ fun_l5_n699(x)
+ end
+end
+
+def fun_l4_n463(x)
+ if (x < 1)
+ fun_l5_n204(x)
+ else
+ fun_l5_n723(x)
+ end
+end
+
+def fun_l4_n464(x)
+ if (x < 1)
+ fun_l5_n549(x)
+ else
+ fun_l5_n375(x)
+ end
+end
+
+def fun_l4_n465(x)
+ if (x < 1)
+ fun_l5_n610(x)
+ else
+ fun_l5_n596(x)
+ end
+end
+
+def fun_l4_n466(x)
+ if (x < 1)
+ fun_l5_n571(x)
+ else
+ fun_l5_n618(x)
+ end
+end
+
+def fun_l4_n467(x)
+ if (x < 1)
+ fun_l5_n428(x)
+ else
+ fun_l5_n315(x)
+ end
+end
+
+def fun_l4_n468(x)
+ if (x < 1)
+ fun_l5_n19(x)
+ else
+ fun_l5_n301(x)
+ end
+end
+
+def fun_l4_n469(x)
+ if (x < 1)
+ fun_l5_n38(x)
+ else
+ fun_l5_n895(x)
+ end
+end
+
+def fun_l4_n470(x)
+ if (x < 1)
+ fun_l5_n815(x)
+ else
+ fun_l5_n303(x)
+ end
+end
+
+def fun_l4_n471(x)
+ if (x < 1)
+ fun_l5_n876(x)
+ else
+ fun_l5_n221(x)
+ end
+end
+
+def fun_l4_n472(x)
+ if (x < 1)
+ fun_l5_n199(x)
+ else
+ fun_l5_n354(x)
+ end
+end
+
+def fun_l4_n473(x)
+ if (x < 1)
+ fun_l5_n56(x)
+ else
+ fun_l5_n197(x)
+ end
+end
+
+def fun_l4_n474(x)
+ if (x < 1)
+ fun_l5_n573(x)
+ else
+ fun_l5_n616(x)
+ end
+end
+
+def fun_l4_n475(x)
+ if (x < 1)
+ fun_l5_n313(x)
+ else
+ fun_l5_n209(x)
+ end
+end
+
+def fun_l4_n476(x)
+ if (x < 1)
+ fun_l5_n229(x)
+ else
+ fun_l5_n51(x)
+ end
+end
+
+def fun_l4_n477(x)
+ if (x < 1)
+ fun_l5_n768(x)
+ else
+ fun_l5_n368(x)
+ end
+end
+
+def fun_l4_n478(x)
+ if (x < 1)
+ fun_l5_n80(x)
+ else
+ fun_l5_n411(x)
+ end
+end
+
+def fun_l4_n479(x)
+ if (x < 1)
+ fun_l5_n221(x)
+ else
+ fun_l5_n266(x)
+ end
+end
+
+def fun_l4_n480(x)
+ if (x < 1)
+ fun_l5_n463(x)
+ else
+ fun_l5_n71(x)
+ end
+end
+
+def fun_l4_n481(x)
+ if (x < 1)
+ fun_l5_n446(x)
+ else
+ fun_l5_n610(x)
+ end
+end
+
+def fun_l4_n482(x)
+ if (x < 1)
+ fun_l5_n402(x)
+ else
+ fun_l5_n449(x)
+ end
+end
+
+def fun_l4_n483(x)
+ if (x < 1)
+ fun_l5_n497(x)
+ else
+ fun_l5_n521(x)
+ end
+end
+
+def fun_l4_n484(x)
+ if (x < 1)
+ fun_l5_n895(x)
+ else
+ fun_l5_n361(x)
+ end
+end
+
+def fun_l4_n485(x)
+ if (x < 1)
+ fun_l5_n254(x)
+ else
+ fun_l5_n366(x)
+ end
+end
+
+def fun_l4_n486(x)
+ if (x < 1)
+ fun_l5_n179(x)
+ else
+ fun_l5_n154(x)
+ end
+end
+
+def fun_l4_n487(x)
+ if (x < 1)
+ fun_l5_n905(x)
+ else
+ fun_l5_n30(x)
+ end
+end
+
+def fun_l4_n488(x)
+ if (x < 1)
+ fun_l5_n989(x)
+ else
+ fun_l5_n640(x)
+ end
+end
+
+def fun_l4_n489(x)
+ if (x < 1)
+ fun_l5_n664(x)
+ else
+ fun_l5_n87(x)
+ end
+end
+
+def fun_l4_n490(x)
+ if (x < 1)
+ fun_l5_n518(x)
+ else
+ fun_l5_n986(x)
+ end
+end
+
+def fun_l4_n491(x)
+ if (x < 1)
+ fun_l5_n443(x)
+ else
+ fun_l5_n532(x)
+ end
+end
+
+def fun_l4_n492(x)
+ if (x < 1)
+ fun_l5_n349(x)
+ else
+ fun_l5_n373(x)
+ end
+end
+
+def fun_l4_n493(x)
+ if (x < 1)
+ fun_l5_n208(x)
+ else
+ fun_l5_n404(x)
+ end
+end
+
+def fun_l4_n494(x)
+ if (x < 1)
+ fun_l5_n12(x)
+ else
+ fun_l5_n608(x)
+ end
+end
+
+def fun_l4_n495(x)
+ if (x < 1)
+ fun_l5_n12(x)
+ else
+ fun_l5_n42(x)
+ end
+end
+
+def fun_l4_n496(x)
+ if (x < 1)
+ fun_l5_n442(x)
+ else
+ fun_l5_n809(x)
+ end
+end
+
+def fun_l4_n497(x)
+ if (x < 1)
+ fun_l5_n266(x)
+ else
+ fun_l5_n259(x)
+ end
+end
+
+def fun_l4_n498(x)
+ if (x < 1)
+ fun_l5_n342(x)
+ else
+ fun_l5_n275(x)
+ end
+end
+
+def fun_l4_n499(x)
+ if (x < 1)
+ fun_l5_n806(x)
+ else
+ fun_l5_n797(x)
+ end
+end
+
+def fun_l4_n500(x)
+ if (x < 1)
+ fun_l5_n646(x)
+ else
+ fun_l5_n426(x)
+ end
+end
+
+def fun_l4_n501(x)
+ if (x < 1)
+ fun_l5_n593(x)
+ else
+ fun_l5_n860(x)
+ end
+end
+
+def fun_l4_n502(x)
+ if (x < 1)
+ fun_l5_n355(x)
+ else
+ fun_l5_n411(x)
+ end
+end
+
+def fun_l4_n503(x)
+ if (x < 1)
+ fun_l5_n602(x)
+ else
+ fun_l5_n26(x)
+ end
+end
+
+def fun_l4_n504(x)
+ if (x < 1)
+ fun_l5_n875(x)
+ else
+ fun_l5_n716(x)
+ end
+end
+
+def fun_l4_n505(x)
+ if (x < 1)
+ fun_l5_n248(x)
+ else
+ fun_l5_n388(x)
+ end
+end
+
+def fun_l4_n506(x)
+ if (x < 1)
+ fun_l5_n273(x)
+ else
+ fun_l5_n11(x)
+ end
+end
+
+def fun_l4_n507(x)
+ if (x < 1)
+ fun_l5_n291(x)
+ else
+ fun_l5_n464(x)
+ end
+end
+
+def fun_l4_n508(x)
+ if (x < 1)
+ fun_l5_n309(x)
+ else
+ fun_l5_n198(x)
+ end
+end
+
+def fun_l4_n509(x)
+ if (x < 1)
+ fun_l5_n85(x)
+ else
+ fun_l5_n375(x)
+ end
+end
+
+def fun_l4_n510(x)
+ if (x < 1)
+ fun_l5_n993(x)
+ else
+ fun_l5_n311(x)
+ end
+end
+
+def fun_l4_n511(x)
+ if (x < 1)
+ fun_l5_n824(x)
+ else
+ fun_l5_n936(x)
+ end
+end
+
+def fun_l4_n512(x)
+ if (x < 1)
+ fun_l5_n897(x)
+ else
+ fun_l5_n995(x)
+ end
+end
+
+def fun_l4_n513(x)
+ if (x < 1)
+ fun_l5_n443(x)
+ else
+ fun_l5_n511(x)
+ end
+end
+
+def fun_l4_n514(x)
+ if (x < 1)
+ fun_l5_n965(x)
+ else
+ fun_l5_n506(x)
+ end
+end
+
+def fun_l4_n515(x)
+ if (x < 1)
+ fun_l5_n480(x)
+ else
+ fun_l5_n464(x)
+ end
+end
+
+def fun_l4_n516(x)
+ if (x < 1)
+ fun_l5_n396(x)
+ else
+ fun_l5_n355(x)
+ end
+end
+
+def fun_l4_n517(x)
+ if (x < 1)
+ fun_l5_n782(x)
+ else
+ fun_l5_n624(x)
+ end
+end
+
+def fun_l4_n518(x)
+ if (x < 1)
+ fun_l5_n460(x)
+ else
+ fun_l5_n920(x)
+ end
+end
+
+def fun_l4_n519(x)
+ if (x < 1)
+ fun_l5_n258(x)
+ else
+ fun_l5_n296(x)
+ end
+end
+
+def fun_l4_n520(x)
+ if (x < 1)
+ fun_l5_n344(x)
+ else
+ fun_l5_n180(x)
+ end
+end
+
+def fun_l4_n521(x)
+ if (x < 1)
+ fun_l5_n489(x)
+ else
+ fun_l5_n818(x)
+ end
+end
+
+def fun_l4_n522(x)
+ if (x < 1)
+ fun_l5_n246(x)
+ else
+ fun_l5_n25(x)
+ end
+end
+
+def fun_l4_n523(x)
+ if (x < 1)
+ fun_l5_n165(x)
+ else
+ fun_l5_n835(x)
+ end
+end
+
+def fun_l4_n524(x)
+ if (x < 1)
+ fun_l5_n836(x)
+ else
+ fun_l5_n838(x)
+ end
+end
+
+def fun_l4_n525(x)
+ if (x < 1)
+ fun_l5_n781(x)
+ else
+ fun_l5_n183(x)
+ end
+end
+
+def fun_l4_n526(x)
+ if (x < 1)
+ fun_l5_n335(x)
+ else
+ fun_l5_n809(x)
+ end
+end
+
+def fun_l4_n527(x)
+ if (x < 1)
+ fun_l5_n748(x)
+ else
+ fun_l5_n674(x)
+ end
+end
+
+def fun_l4_n528(x)
+ if (x < 1)
+ fun_l5_n612(x)
+ else
+ fun_l5_n933(x)
+ end
+end
+
+def fun_l4_n529(x)
+ if (x < 1)
+ fun_l5_n781(x)
+ else
+ fun_l5_n174(x)
+ end
+end
+
+def fun_l4_n530(x)
+ if (x < 1)
+ fun_l5_n299(x)
+ else
+ fun_l5_n677(x)
+ end
+end
+
+def fun_l4_n531(x)
+ if (x < 1)
+ fun_l5_n455(x)
+ else
+ fun_l5_n211(x)
+ end
+end
+
+def fun_l4_n532(x)
+ if (x < 1)
+ fun_l5_n981(x)
+ else
+ fun_l5_n605(x)
+ end
+end
+
+def fun_l4_n533(x)
+ if (x < 1)
+ fun_l5_n299(x)
+ else
+ fun_l5_n30(x)
+ end
+end
+
+def fun_l4_n534(x)
+ if (x < 1)
+ fun_l5_n642(x)
+ else
+ fun_l5_n652(x)
+ end
+end
+
+def fun_l4_n535(x)
+ if (x < 1)
+ fun_l5_n904(x)
+ else
+ fun_l5_n133(x)
+ end
+end
+
+def fun_l4_n536(x)
+ if (x < 1)
+ fun_l5_n253(x)
+ else
+ fun_l5_n330(x)
+ end
+end
+
+def fun_l4_n537(x)
+ if (x < 1)
+ fun_l5_n357(x)
+ else
+ fun_l5_n533(x)
+ end
+end
+
+def fun_l4_n538(x)
+ if (x < 1)
+ fun_l5_n484(x)
+ else
+ fun_l5_n438(x)
+ end
+end
+
+def fun_l4_n539(x)
+ if (x < 1)
+ fun_l5_n200(x)
+ else
+ fun_l5_n235(x)
+ end
+end
+
+def fun_l4_n540(x)
+ if (x < 1)
+ fun_l5_n929(x)
+ else
+ fun_l5_n875(x)
+ end
+end
+
+def fun_l4_n541(x)
+ if (x < 1)
+ fun_l5_n237(x)
+ else
+ fun_l5_n688(x)
+ end
+end
+
+def fun_l4_n542(x)
+ if (x < 1)
+ fun_l5_n723(x)
+ else
+ fun_l5_n308(x)
+ end
+end
+
+def fun_l4_n543(x)
+ if (x < 1)
+ fun_l5_n591(x)
+ else
+ fun_l5_n93(x)
+ end
+end
+
+def fun_l4_n544(x)
+ if (x < 1)
+ fun_l5_n258(x)
+ else
+ fun_l5_n205(x)
+ end
+end
+
+def fun_l4_n545(x)
+ if (x < 1)
+ fun_l5_n910(x)
+ else
+ fun_l5_n529(x)
+ end
+end
+
+def fun_l4_n546(x)
+ if (x < 1)
+ fun_l5_n255(x)
+ else
+ fun_l5_n262(x)
+ end
+end
+
+def fun_l4_n547(x)
+ if (x < 1)
+ fun_l5_n674(x)
+ else
+ fun_l5_n813(x)
+ end
+end
+
+def fun_l4_n548(x)
+ if (x < 1)
+ fun_l5_n9(x)
+ else
+ fun_l5_n709(x)
+ end
+end
+
+def fun_l4_n549(x)
+ if (x < 1)
+ fun_l5_n146(x)
+ else
+ fun_l5_n801(x)
+ end
+end
+
+def fun_l4_n550(x)
+ if (x < 1)
+ fun_l5_n593(x)
+ else
+ fun_l5_n428(x)
+ end
+end
+
+def fun_l4_n551(x)
+ if (x < 1)
+ fun_l5_n953(x)
+ else
+ fun_l5_n238(x)
+ end
+end
+
+def fun_l4_n552(x)
+ if (x < 1)
+ fun_l5_n900(x)
+ else
+ fun_l5_n246(x)
+ end
+end
+
+def fun_l4_n553(x)
+ if (x < 1)
+ fun_l5_n400(x)
+ else
+ fun_l5_n150(x)
+ end
+end
+
+def fun_l4_n554(x)
+ if (x < 1)
+ fun_l5_n697(x)
+ else
+ fun_l5_n681(x)
+ end
+end
+
+def fun_l4_n555(x)
+ if (x < 1)
+ fun_l5_n487(x)
+ else
+ fun_l5_n784(x)
+ end
+end
+
+def fun_l4_n556(x)
+ if (x < 1)
+ fun_l5_n485(x)
+ else
+ fun_l5_n984(x)
+ end
+end
+
+def fun_l4_n557(x)
+ if (x < 1)
+ fun_l5_n774(x)
+ else
+ fun_l5_n864(x)
+ end
+end
+
+def fun_l4_n558(x)
+ if (x < 1)
+ fun_l5_n823(x)
+ else
+ fun_l5_n527(x)
+ end
+end
+
+def fun_l4_n559(x)
+ if (x < 1)
+ fun_l5_n204(x)
+ else
+ fun_l5_n114(x)
+ end
+end
+
+def fun_l4_n560(x)
+ if (x < 1)
+ fun_l5_n835(x)
+ else
+ fun_l5_n930(x)
+ end
+end
+
+def fun_l4_n561(x)
+ if (x < 1)
+ fun_l5_n481(x)
+ else
+ fun_l5_n471(x)
+ end
+end
+
+def fun_l4_n562(x)
+ if (x < 1)
+ fun_l5_n459(x)
+ else
+ fun_l5_n526(x)
+ end
+end
+
+def fun_l4_n563(x)
+ if (x < 1)
+ fun_l5_n148(x)
+ else
+ fun_l5_n473(x)
+ end
+end
+
+def fun_l4_n564(x)
+ if (x < 1)
+ fun_l5_n448(x)
+ else
+ fun_l5_n389(x)
+ end
+end
+
+def fun_l4_n565(x)
+ if (x < 1)
+ fun_l5_n600(x)
+ else
+ fun_l5_n405(x)
+ end
+end
+
+def fun_l4_n566(x)
+ if (x < 1)
+ fun_l5_n67(x)
+ else
+ fun_l5_n145(x)
+ end
+end
+
+def fun_l4_n567(x)
+ if (x < 1)
+ fun_l5_n63(x)
+ else
+ fun_l5_n206(x)
+ end
+end
+
+def fun_l4_n568(x)
+ if (x < 1)
+ fun_l5_n463(x)
+ else
+ fun_l5_n288(x)
+ end
+end
+
+def fun_l4_n569(x)
+ if (x < 1)
+ fun_l5_n143(x)
+ else
+ fun_l5_n879(x)
+ end
+end
+
+def fun_l4_n570(x)
+ if (x < 1)
+ fun_l5_n552(x)
+ else
+ fun_l5_n517(x)
+ end
+end
+
+def fun_l4_n571(x)
+ if (x < 1)
+ fun_l5_n527(x)
+ else
+ fun_l5_n680(x)
+ end
+end
+
+def fun_l4_n572(x)
+ if (x < 1)
+ fun_l5_n853(x)
+ else
+ fun_l5_n97(x)
+ end
+end
+
+def fun_l4_n573(x)
+ if (x < 1)
+ fun_l5_n277(x)
+ else
+ fun_l5_n329(x)
+ end
+end
+
+def fun_l4_n574(x)
+ if (x < 1)
+ fun_l5_n952(x)
+ else
+ fun_l5_n515(x)
+ end
+end
+
+def fun_l4_n575(x)
+ if (x < 1)
+ fun_l5_n64(x)
+ else
+ fun_l5_n576(x)
+ end
+end
+
+def fun_l4_n576(x)
+ if (x < 1)
+ fun_l5_n57(x)
+ else
+ fun_l5_n499(x)
+ end
+end
+
+def fun_l4_n577(x)
+ if (x < 1)
+ fun_l5_n10(x)
+ else
+ fun_l5_n208(x)
+ end
+end
+
+def fun_l4_n578(x)
+ if (x < 1)
+ fun_l5_n361(x)
+ else
+ fun_l5_n447(x)
+ end
+end
+
+def fun_l4_n579(x)
+ if (x < 1)
+ fun_l5_n788(x)
+ else
+ fun_l5_n299(x)
+ end
+end
+
+def fun_l4_n580(x)
+ if (x < 1)
+ fun_l5_n973(x)
+ else
+ fun_l5_n753(x)
+ end
+end
+
+def fun_l4_n581(x)
+ if (x < 1)
+ fun_l5_n862(x)
+ else
+ fun_l5_n110(x)
+ end
+end
+
+def fun_l4_n582(x)
+ if (x < 1)
+ fun_l5_n760(x)
+ else
+ fun_l5_n256(x)
+ end
+end
+
+def fun_l4_n583(x)
+ if (x < 1)
+ fun_l5_n8(x)
+ else
+ fun_l5_n922(x)
+ end
+end
+
+def fun_l4_n584(x)
+ if (x < 1)
+ fun_l5_n100(x)
+ else
+ fun_l5_n230(x)
+ end
+end
+
+def fun_l4_n585(x)
+ if (x < 1)
+ fun_l5_n183(x)
+ else
+ fun_l5_n350(x)
+ end
+end
+
+def fun_l4_n586(x)
+ if (x < 1)
+ fun_l5_n294(x)
+ else
+ fun_l5_n673(x)
+ end
+end
+
+def fun_l4_n587(x)
+ if (x < 1)
+ fun_l5_n51(x)
+ else
+ fun_l5_n718(x)
+ end
+end
+
+def fun_l4_n588(x)
+ if (x < 1)
+ fun_l5_n257(x)
+ else
+ fun_l5_n111(x)
+ end
+end
+
+def fun_l4_n589(x)
+ if (x < 1)
+ fun_l5_n727(x)
+ else
+ fun_l5_n585(x)
+ end
+end
+
+def fun_l4_n590(x)
+ if (x < 1)
+ fun_l5_n205(x)
+ else
+ fun_l5_n308(x)
+ end
+end
+
+def fun_l4_n591(x)
+ if (x < 1)
+ fun_l5_n618(x)
+ else
+ fun_l5_n970(x)
+ end
+end
+
+def fun_l4_n592(x)
+ if (x < 1)
+ fun_l5_n631(x)
+ else
+ fun_l5_n513(x)
+ end
+end
+
+def fun_l4_n593(x)
+ if (x < 1)
+ fun_l5_n270(x)
+ else
+ fun_l5_n737(x)
+ end
+end
+
+def fun_l4_n594(x)
+ if (x < 1)
+ fun_l5_n464(x)
+ else
+ fun_l5_n217(x)
+ end
+end
+
+def fun_l4_n595(x)
+ if (x < 1)
+ fun_l5_n650(x)
+ else
+ fun_l5_n616(x)
+ end
+end
+
+def fun_l4_n596(x)
+ if (x < 1)
+ fun_l5_n542(x)
+ else
+ fun_l5_n613(x)
+ end
+end
+
+def fun_l4_n597(x)
+ if (x < 1)
+ fun_l5_n312(x)
+ else
+ fun_l5_n7(x)
+ end
+end
+
+def fun_l4_n598(x)
+ if (x < 1)
+ fun_l5_n164(x)
+ else
+ fun_l5_n592(x)
+ end
+end
+
+def fun_l4_n599(x)
+ if (x < 1)
+ fun_l5_n326(x)
+ else
+ fun_l5_n894(x)
+ end
+end
+
+def fun_l4_n600(x)
+ if (x < 1)
+ fun_l5_n243(x)
+ else
+ fun_l5_n233(x)
+ end
+end
+
+def fun_l4_n601(x)
+ if (x < 1)
+ fun_l5_n600(x)
+ else
+ fun_l5_n7(x)
+ end
+end
+
+def fun_l4_n602(x)
+ if (x < 1)
+ fun_l5_n334(x)
+ else
+ fun_l5_n282(x)
+ end
+end
+
+def fun_l4_n603(x)
+ if (x < 1)
+ fun_l5_n202(x)
+ else
+ fun_l5_n252(x)
+ end
+end
+
+def fun_l4_n604(x)
+ if (x < 1)
+ fun_l5_n173(x)
+ else
+ fun_l5_n685(x)
+ end
+end
+
+def fun_l4_n605(x)
+ if (x < 1)
+ fun_l5_n953(x)
+ else
+ fun_l5_n416(x)
+ end
+end
+
+def fun_l4_n606(x)
+ if (x < 1)
+ fun_l5_n775(x)
+ else
+ fun_l5_n372(x)
+ end
+end
+
+def fun_l4_n607(x)
+ if (x < 1)
+ fun_l5_n723(x)
+ else
+ fun_l5_n894(x)
+ end
+end
+
+def fun_l4_n608(x)
+ if (x < 1)
+ fun_l5_n690(x)
+ else
+ fun_l5_n26(x)
+ end
+end
+
+def fun_l4_n609(x)
+ if (x < 1)
+ fun_l5_n555(x)
+ else
+ fun_l5_n717(x)
+ end
+end
+
+def fun_l4_n610(x)
+ if (x < 1)
+ fun_l5_n597(x)
+ else
+ fun_l5_n106(x)
+ end
+end
+
+def fun_l4_n611(x)
+ if (x < 1)
+ fun_l5_n405(x)
+ else
+ fun_l5_n588(x)
+ end
+end
+
+def fun_l4_n612(x)
+ if (x < 1)
+ fun_l5_n41(x)
+ else
+ fun_l5_n558(x)
+ end
+end
+
+def fun_l4_n613(x)
+ if (x < 1)
+ fun_l5_n875(x)
+ else
+ fun_l5_n492(x)
+ end
+end
+
+def fun_l4_n614(x)
+ if (x < 1)
+ fun_l5_n605(x)
+ else
+ fun_l5_n461(x)
+ end
+end
+
+def fun_l4_n615(x)
+ if (x < 1)
+ fun_l5_n726(x)
+ else
+ fun_l5_n534(x)
+ end
+end
+
+def fun_l4_n616(x)
+ if (x < 1)
+ fun_l5_n919(x)
+ else
+ fun_l5_n404(x)
+ end
+end
+
+def fun_l4_n617(x)
+ if (x < 1)
+ fun_l5_n383(x)
+ else
+ fun_l5_n324(x)
+ end
+end
+
+def fun_l4_n618(x)
+ if (x < 1)
+ fun_l5_n286(x)
+ else
+ fun_l5_n157(x)
+ end
+end
+
+def fun_l4_n619(x)
+ if (x < 1)
+ fun_l5_n713(x)
+ else
+ fun_l5_n388(x)
+ end
+end
+
+def fun_l4_n620(x)
+ if (x < 1)
+ fun_l5_n773(x)
+ else
+ fun_l5_n790(x)
+ end
+end
+
+def fun_l4_n621(x)
+ if (x < 1)
+ fun_l5_n797(x)
+ else
+ fun_l5_n720(x)
+ end
+end
+
+def fun_l4_n622(x)
+ if (x < 1)
+ fun_l5_n799(x)
+ else
+ fun_l5_n423(x)
+ end
+end
+
+def fun_l4_n623(x)
+ if (x < 1)
+ fun_l5_n390(x)
+ else
+ fun_l5_n941(x)
+ end
+end
+
+def fun_l4_n624(x)
+ if (x < 1)
+ fun_l5_n584(x)
+ else
+ fun_l5_n191(x)
+ end
+end
+
+def fun_l4_n625(x)
+ if (x < 1)
+ fun_l5_n243(x)
+ else
+ fun_l5_n208(x)
+ end
+end
+
+def fun_l4_n626(x)
+ if (x < 1)
+ fun_l5_n136(x)
+ else
+ fun_l5_n791(x)
+ end
+end
+
+def fun_l4_n627(x)
+ if (x < 1)
+ fun_l5_n684(x)
+ else
+ fun_l5_n341(x)
+ end
+end
+
+def fun_l4_n628(x)
+ if (x < 1)
+ fun_l5_n876(x)
+ else
+ fun_l5_n27(x)
+ end
+end
+
+def fun_l4_n629(x)
+ if (x < 1)
+ fun_l5_n18(x)
+ else
+ fun_l5_n867(x)
+ end
+end
+
+def fun_l4_n630(x)
+ if (x < 1)
+ fun_l5_n4(x)
+ else
+ fun_l5_n871(x)
+ end
+end
+
+def fun_l4_n631(x)
+ if (x < 1)
+ fun_l5_n578(x)
+ else
+ fun_l5_n988(x)
+ end
+end
+
+def fun_l4_n632(x)
+ if (x < 1)
+ fun_l5_n388(x)
+ else
+ fun_l5_n258(x)
+ end
+end
+
+def fun_l4_n633(x)
+ if (x < 1)
+ fun_l5_n680(x)
+ else
+ fun_l5_n814(x)
+ end
+end
+
+def fun_l4_n634(x)
+ if (x < 1)
+ fun_l5_n857(x)
+ else
+ fun_l5_n595(x)
+ end
+end
+
+def fun_l4_n635(x)
+ if (x < 1)
+ fun_l5_n547(x)
+ else
+ fun_l5_n725(x)
+ end
+end
+
+def fun_l4_n636(x)
+ if (x < 1)
+ fun_l5_n644(x)
+ else
+ fun_l5_n485(x)
+ end
+end
+
+def fun_l4_n637(x)
+ if (x < 1)
+ fun_l5_n528(x)
+ else
+ fun_l5_n653(x)
+ end
+end
+
+def fun_l4_n638(x)
+ if (x < 1)
+ fun_l5_n924(x)
+ else
+ fun_l5_n956(x)
+ end
+end
+
+def fun_l4_n639(x)
+ if (x < 1)
+ fun_l5_n654(x)
+ else
+ fun_l5_n979(x)
+ end
+end
+
+def fun_l4_n640(x)
+ if (x < 1)
+ fun_l5_n287(x)
+ else
+ fun_l5_n778(x)
+ end
+end
+
+def fun_l4_n641(x)
+ if (x < 1)
+ fun_l5_n197(x)
+ else
+ fun_l5_n682(x)
+ end
+end
+
+def fun_l4_n642(x)
+ if (x < 1)
+ fun_l5_n559(x)
+ else
+ fun_l5_n812(x)
+ end
+end
+
+def fun_l4_n643(x)
+ if (x < 1)
+ fun_l5_n970(x)
+ else
+ fun_l5_n43(x)
+ end
+end
+
+def fun_l4_n644(x)
+ if (x < 1)
+ fun_l5_n222(x)
+ else
+ fun_l5_n741(x)
+ end
+end
+
+def fun_l4_n645(x)
+ if (x < 1)
+ fun_l5_n788(x)
+ else
+ fun_l5_n72(x)
+ end
+end
+
+def fun_l4_n646(x)
+ if (x < 1)
+ fun_l5_n877(x)
+ else
+ fun_l5_n371(x)
+ end
+end
+
+def fun_l4_n647(x)
+ if (x < 1)
+ fun_l5_n710(x)
+ else
+ fun_l5_n783(x)
+ end
+end
+
+def fun_l4_n648(x)
+ if (x < 1)
+ fun_l5_n957(x)
+ else
+ fun_l5_n801(x)
+ end
+end
+
+def fun_l4_n649(x)
+ if (x < 1)
+ fun_l5_n127(x)
+ else
+ fun_l5_n664(x)
+ end
+end
+
+def fun_l4_n650(x)
+ if (x < 1)
+ fun_l5_n176(x)
+ else
+ fun_l5_n511(x)
+ end
+end
+
+def fun_l4_n651(x)
+ if (x < 1)
+ fun_l5_n830(x)
+ else
+ fun_l5_n107(x)
+ end
+end
+
+def fun_l4_n652(x)
+ if (x < 1)
+ fun_l5_n207(x)
+ else
+ fun_l5_n894(x)
+ end
+end
+
+def fun_l4_n653(x)
+ if (x < 1)
+ fun_l5_n611(x)
+ else
+ fun_l5_n443(x)
+ end
+end
+
+def fun_l4_n654(x)
+ if (x < 1)
+ fun_l5_n953(x)
+ else
+ fun_l5_n214(x)
+ end
+end
+
+def fun_l4_n655(x)
+ if (x < 1)
+ fun_l5_n109(x)
+ else
+ fun_l5_n706(x)
+ end
+end
+
+def fun_l4_n656(x)
+ if (x < 1)
+ fun_l5_n312(x)
+ else
+ fun_l5_n914(x)
+ end
+end
+
+def fun_l4_n657(x)
+ if (x < 1)
+ fun_l5_n774(x)
+ else
+ fun_l5_n530(x)
+ end
+end
+
+def fun_l4_n658(x)
+ if (x < 1)
+ fun_l5_n79(x)
+ else
+ fun_l5_n303(x)
+ end
+end
+
+def fun_l4_n659(x)
+ if (x < 1)
+ fun_l5_n718(x)
+ else
+ fun_l5_n196(x)
+ end
+end
+
+def fun_l4_n660(x)
+ if (x < 1)
+ fun_l5_n196(x)
+ else
+ fun_l5_n661(x)
+ end
+end
+
+def fun_l4_n661(x)
+ if (x < 1)
+ fun_l5_n654(x)
+ else
+ fun_l5_n17(x)
+ end
+end
+
+def fun_l4_n662(x)
+ if (x < 1)
+ fun_l5_n413(x)
+ else
+ fun_l5_n214(x)
+ end
+end
+
+def fun_l4_n663(x)
+ if (x < 1)
+ fun_l5_n491(x)
+ else
+ fun_l5_n927(x)
+ end
+end
+
+def fun_l4_n664(x)
+ if (x < 1)
+ fun_l5_n671(x)
+ else
+ fun_l5_n611(x)
+ end
+end
+
+def fun_l4_n665(x)
+ if (x < 1)
+ fun_l5_n153(x)
+ else
+ fun_l5_n127(x)
+ end
+end
+
+def fun_l4_n666(x)
+ if (x < 1)
+ fun_l5_n661(x)
+ else
+ fun_l5_n590(x)
+ end
+end
+
+def fun_l4_n667(x)
+ if (x < 1)
+ fun_l5_n539(x)
+ else
+ fun_l5_n692(x)
+ end
+end
+
+def fun_l4_n668(x)
+ if (x < 1)
+ fun_l5_n390(x)
+ else
+ fun_l5_n423(x)
+ end
+end
+
+def fun_l4_n669(x)
+ if (x < 1)
+ fun_l5_n615(x)
+ else
+ fun_l5_n449(x)
+ end
+end
+
+def fun_l4_n670(x)
+ if (x < 1)
+ fun_l5_n49(x)
+ else
+ fun_l5_n632(x)
+ end
+end
+
+def fun_l4_n671(x)
+ if (x < 1)
+ fun_l5_n891(x)
+ else
+ fun_l5_n915(x)
+ end
+end
+
+def fun_l4_n672(x)
+ if (x < 1)
+ fun_l5_n257(x)
+ else
+ fun_l5_n719(x)
+ end
+end
+
+def fun_l4_n673(x)
+ if (x < 1)
+ fun_l5_n620(x)
+ else
+ fun_l5_n473(x)
+ end
+end
+
+def fun_l4_n674(x)
+ if (x < 1)
+ fun_l5_n422(x)
+ else
+ fun_l5_n776(x)
+ end
+end
+
+def fun_l4_n675(x)
+ if (x < 1)
+ fun_l5_n973(x)
+ else
+ fun_l5_n32(x)
+ end
+end
+
+def fun_l4_n676(x)
+ if (x < 1)
+ fun_l5_n434(x)
+ else
+ fun_l5_n85(x)
+ end
+end
+
+def fun_l4_n677(x)
+ if (x < 1)
+ fun_l5_n430(x)
+ else
+ fun_l5_n702(x)
+ end
+end
+
+def fun_l4_n678(x)
+ if (x < 1)
+ fun_l5_n698(x)
+ else
+ fun_l5_n482(x)
+ end
+end
+
+def fun_l4_n679(x)
+ if (x < 1)
+ fun_l5_n365(x)
+ else
+ fun_l5_n83(x)
+ end
+end
+
+def fun_l4_n680(x)
+ if (x < 1)
+ fun_l5_n397(x)
+ else
+ fun_l5_n356(x)
+ end
+end
+
+def fun_l4_n681(x)
+ if (x < 1)
+ fun_l5_n48(x)
+ else
+ fun_l5_n458(x)
+ end
+end
+
+def fun_l4_n682(x)
+ if (x < 1)
+ fun_l5_n205(x)
+ else
+ fun_l5_n693(x)
+ end
+end
+
+def fun_l4_n683(x)
+ if (x < 1)
+ fun_l5_n971(x)
+ else
+ fun_l5_n656(x)
+ end
+end
+
+def fun_l4_n684(x)
+ if (x < 1)
+ fun_l5_n147(x)
+ else
+ fun_l5_n314(x)
+ end
+end
+
+def fun_l4_n685(x)
+ if (x < 1)
+ fun_l5_n347(x)
+ else
+ fun_l5_n281(x)
+ end
+end
+
+def fun_l4_n686(x)
+ if (x < 1)
+ fun_l5_n259(x)
+ else
+ fun_l5_n395(x)
+ end
+end
+
+def fun_l4_n687(x)
+ if (x < 1)
+ fun_l5_n899(x)
+ else
+ fun_l5_n319(x)
+ end
+end
+
+def fun_l4_n688(x)
+ if (x < 1)
+ fun_l5_n293(x)
+ else
+ fun_l5_n831(x)
+ end
+end
+
+def fun_l4_n689(x)
+ if (x < 1)
+ fun_l5_n88(x)
+ else
+ fun_l5_n938(x)
+ end
+end
+
+def fun_l4_n690(x)
+ if (x < 1)
+ fun_l5_n72(x)
+ else
+ fun_l5_n172(x)
+ end
+end
+
+def fun_l4_n691(x)
+ if (x < 1)
+ fun_l5_n330(x)
+ else
+ fun_l5_n70(x)
+ end
+end
+
+def fun_l4_n692(x)
+ if (x < 1)
+ fun_l5_n901(x)
+ else
+ fun_l5_n984(x)
+ end
+end
+
+def fun_l4_n693(x)
+ if (x < 1)
+ fun_l5_n528(x)
+ else
+ fun_l5_n659(x)
+ end
+end
+
+def fun_l4_n694(x)
+ if (x < 1)
+ fun_l5_n522(x)
+ else
+ fun_l5_n219(x)
+ end
+end
+
+def fun_l4_n695(x)
+ if (x < 1)
+ fun_l5_n568(x)
+ else
+ fun_l5_n997(x)
+ end
+end
+
+def fun_l4_n696(x)
+ if (x < 1)
+ fun_l5_n248(x)
+ else
+ fun_l5_n508(x)
+ end
+end
+
+def fun_l4_n697(x)
+ if (x < 1)
+ fun_l5_n71(x)
+ else
+ fun_l5_n590(x)
+ end
+end
+
+def fun_l4_n698(x)
+ if (x < 1)
+ fun_l5_n398(x)
+ else
+ fun_l5_n125(x)
+ end
+end
+
+def fun_l4_n699(x)
+ if (x < 1)
+ fun_l5_n405(x)
+ else
+ fun_l5_n129(x)
+ end
+end
+
+def fun_l4_n700(x)
+ if (x < 1)
+ fun_l5_n818(x)
+ else
+ fun_l5_n792(x)
+ end
+end
+
+def fun_l4_n701(x)
+ if (x < 1)
+ fun_l5_n530(x)
+ else
+ fun_l5_n849(x)
+ end
+end
+
+def fun_l4_n702(x)
+ if (x < 1)
+ fun_l5_n640(x)
+ else
+ fun_l5_n558(x)
+ end
+end
+
+def fun_l4_n703(x)
+ if (x < 1)
+ fun_l5_n717(x)
+ else
+ fun_l5_n113(x)
+ end
+end
+
+def fun_l4_n704(x)
+ if (x < 1)
+ fun_l5_n139(x)
+ else
+ fun_l5_n743(x)
+ end
+end
+
+def fun_l4_n705(x)
+ if (x < 1)
+ fun_l5_n720(x)
+ else
+ fun_l5_n713(x)
+ end
+end
+
+def fun_l4_n706(x)
+ if (x < 1)
+ fun_l5_n774(x)
+ else
+ fun_l5_n90(x)
+ end
+end
+
+def fun_l4_n707(x)
+ if (x < 1)
+ fun_l5_n579(x)
+ else
+ fun_l5_n692(x)
+ end
+end
+
+def fun_l4_n708(x)
+ if (x < 1)
+ fun_l5_n448(x)
+ else
+ fun_l5_n146(x)
+ end
+end
+
+def fun_l4_n709(x)
+ if (x < 1)
+ fun_l5_n249(x)
+ else
+ fun_l5_n163(x)
+ end
+end
+
+def fun_l4_n710(x)
+ if (x < 1)
+ fun_l5_n694(x)
+ else
+ fun_l5_n532(x)
+ end
+end
+
+def fun_l4_n711(x)
+ if (x < 1)
+ fun_l5_n739(x)
+ else
+ fun_l5_n424(x)
+ end
+end
+
+def fun_l4_n712(x)
+ if (x < 1)
+ fun_l5_n700(x)
+ else
+ fun_l5_n761(x)
+ end
+end
+
+def fun_l4_n713(x)
+ if (x < 1)
+ fun_l5_n403(x)
+ else
+ fun_l5_n941(x)
+ end
+end
+
+def fun_l4_n714(x)
+ if (x < 1)
+ fun_l5_n724(x)
+ else
+ fun_l5_n863(x)
+ end
+end
+
+def fun_l4_n715(x)
+ if (x < 1)
+ fun_l5_n392(x)
+ else
+ fun_l5_n617(x)
+ end
+end
+
+def fun_l4_n716(x)
+ if (x < 1)
+ fun_l5_n76(x)
+ else
+ fun_l5_n896(x)
+ end
+end
+
+def fun_l4_n717(x)
+ if (x < 1)
+ fun_l5_n355(x)
+ else
+ fun_l5_n533(x)
+ end
+end
+
+def fun_l4_n718(x)
+ if (x < 1)
+ fun_l5_n225(x)
+ else
+ fun_l5_n273(x)
+ end
+end
+
+def fun_l4_n719(x)
+ if (x < 1)
+ fun_l5_n828(x)
+ else
+ fun_l5_n163(x)
+ end
+end
+
+def fun_l4_n720(x)
+ if (x < 1)
+ fun_l5_n309(x)
+ else
+ fun_l5_n702(x)
+ end
+end
+
+def fun_l4_n721(x)
+ if (x < 1)
+ fun_l5_n959(x)
+ else
+ fun_l5_n370(x)
+ end
+end
+
+def fun_l4_n722(x)
+ if (x < 1)
+ fun_l5_n640(x)
+ else
+ fun_l5_n96(x)
+ end
+end
+
+def fun_l4_n723(x)
+ if (x < 1)
+ fun_l5_n590(x)
+ else
+ fun_l5_n332(x)
+ end
+end
+
+def fun_l4_n724(x)
+ if (x < 1)
+ fun_l5_n14(x)
+ else
+ fun_l5_n161(x)
+ end
+end
+
+def fun_l4_n725(x)
+ if (x < 1)
+ fun_l5_n871(x)
+ else
+ fun_l5_n643(x)
+ end
+end
+
+def fun_l4_n726(x)
+ if (x < 1)
+ fun_l5_n885(x)
+ else
+ fun_l5_n142(x)
+ end
+end
+
+def fun_l4_n727(x)
+ if (x < 1)
+ fun_l5_n994(x)
+ else
+ fun_l5_n823(x)
+ end
+end
+
+def fun_l4_n728(x)
+ if (x < 1)
+ fun_l5_n825(x)
+ else
+ fun_l5_n315(x)
+ end
+end
+
+def fun_l4_n729(x)
+ if (x < 1)
+ fun_l5_n312(x)
+ else
+ fun_l5_n28(x)
+ end
+end
+
+def fun_l4_n730(x)
+ if (x < 1)
+ fun_l5_n545(x)
+ else
+ fun_l5_n87(x)
+ end
+end
+
+def fun_l4_n731(x)
+ if (x < 1)
+ fun_l5_n604(x)
+ else
+ fun_l5_n999(x)
+ end
+end
+
+def fun_l4_n732(x)
+ if (x < 1)
+ fun_l5_n21(x)
+ else
+ fun_l5_n170(x)
+ end
+end
+
+def fun_l4_n733(x)
+ if (x < 1)
+ fun_l5_n662(x)
+ else
+ fun_l5_n407(x)
+ end
+end
+
+def fun_l4_n734(x)
+ if (x < 1)
+ fun_l5_n808(x)
+ else
+ fun_l5_n653(x)
+ end
+end
+
+def fun_l4_n735(x)
+ if (x < 1)
+ fun_l5_n665(x)
+ else
+ fun_l5_n925(x)
+ end
+end
+
+def fun_l4_n736(x)
+ if (x < 1)
+ fun_l5_n186(x)
+ else
+ fun_l5_n817(x)
+ end
+end
+
+def fun_l4_n737(x)
+ if (x < 1)
+ fun_l5_n360(x)
+ else
+ fun_l5_n527(x)
+ end
+end
+
+def fun_l4_n738(x)
+ if (x < 1)
+ fun_l5_n278(x)
+ else
+ fun_l5_n759(x)
+ end
+end
+
+def fun_l4_n739(x)
+ if (x < 1)
+ fun_l5_n710(x)
+ else
+ fun_l5_n909(x)
+ end
+end
+
+def fun_l4_n740(x)
+ if (x < 1)
+ fun_l5_n770(x)
+ else
+ fun_l5_n382(x)
+ end
+end
+
+def fun_l4_n741(x)
+ if (x < 1)
+ fun_l5_n969(x)
+ else
+ fun_l5_n583(x)
+ end
+end
+
+def fun_l4_n742(x)
+ if (x < 1)
+ fun_l5_n653(x)
+ else
+ fun_l5_n258(x)
+ end
+end
+
+def fun_l4_n743(x)
+ if (x < 1)
+ fun_l5_n966(x)
+ else
+ fun_l5_n705(x)
+ end
+end
+
+def fun_l4_n744(x)
+ if (x < 1)
+ fun_l5_n454(x)
+ else
+ fun_l5_n748(x)
+ end
+end
+
+def fun_l4_n745(x)
+ if (x < 1)
+ fun_l5_n595(x)
+ else
+ fun_l5_n865(x)
+ end
+end
+
+def fun_l4_n746(x)
+ if (x < 1)
+ fun_l5_n593(x)
+ else
+ fun_l5_n615(x)
+ end
+end
+
+def fun_l4_n747(x)
+ if (x < 1)
+ fun_l5_n638(x)
+ else
+ fun_l5_n651(x)
+ end
+end
+
+def fun_l4_n748(x)
+ if (x < 1)
+ fun_l5_n331(x)
+ else
+ fun_l5_n847(x)
+ end
+end
+
+def fun_l4_n749(x)
+ if (x < 1)
+ fun_l5_n59(x)
+ else
+ fun_l5_n805(x)
+ end
+end
+
+def fun_l4_n750(x)
+ if (x < 1)
+ fun_l5_n269(x)
+ else
+ fun_l5_n904(x)
+ end
+end
+
+def fun_l4_n751(x)
+ if (x < 1)
+ fun_l5_n292(x)
+ else
+ fun_l5_n459(x)
+ end
+end
+
+def fun_l4_n752(x)
+ if (x < 1)
+ fun_l5_n581(x)
+ else
+ fun_l5_n353(x)
+ end
+end
+
+def fun_l4_n753(x)
+ if (x < 1)
+ fun_l5_n785(x)
+ else
+ fun_l5_n745(x)
+ end
+end
+
+def fun_l4_n754(x)
+ if (x < 1)
+ fun_l5_n317(x)
+ else
+ fun_l5_n604(x)
+ end
+end
+
+def fun_l4_n755(x)
+ if (x < 1)
+ fun_l5_n208(x)
+ else
+ fun_l5_n318(x)
+ end
+end
+
+def fun_l4_n756(x)
+ if (x < 1)
+ fun_l5_n986(x)
+ else
+ fun_l5_n83(x)
+ end
+end
+
+def fun_l4_n757(x)
+ if (x < 1)
+ fun_l5_n946(x)
+ else
+ fun_l5_n314(x)
+ end
+end
+
+def fun_l4_n758(x)
+ if (x < 1)
+ fun_l5_n571(x)
+ else
+ fun_l5_n919(x)
+ end
+end
+
+def fun_l4_n759(x)
+ if (x < 1)
+ fun_l5_n129(x)
+ else
+ fun_l5_n191(x)
+ end
+end
+
+def fun_l4_n760(x)
+ if (x < 1)
+ fun_l5_n838(x)
+ else
+ fun_l5_n29(x)
+ end
+end
+
+def fun_l4_n761(x)
+ if (x < 1)
+ fun_l5_n250(x)
+ else
+ fun_l5_n892(x)
+ end
+end
+
+def fun_l4_n762(x)
+ if (x < 1)
+ fun_l5_n588(x)
+ else
+ fun_l5_n59(x)
+ end
+end
+
+def fun_l4_n763(x)
+ if (x < 1)
+ fun_l5_n831(x)
+ else
+ fun_l5_n668(x)
+ end
+end
+
+def fun_l4_n764(x)
+ if (x < 1)
+ fun_l5_n337(x)
+ else
+ fun_l5_n514(x)
+ end
+end
+
+def fun_l4_n765(x)
+ if (x < 1)
+ fun_l5_n56(x)
+ else
+ fun_l5_n718(x)
+ end
+end
+
+def fun_l4_n766(x)
+ if (x < 1)
+ fun_l5_n189(x)
+ else
+ fun_l5_n103(x)
+ end
+end
+
+def fun_l4_n767(x)
+ if (x < 1)
+ fun_l5_n395(x)
+ else
+ fun_l5_n313(x)
+ end
+end
+
+def fun_l4_n768(x)
+ if (x < 1)
+ fun_l5_n388(x)
+ else
+ fun_l5_n757(x)
+ end
+end
+
+def fun_l4_n769(x)
+ if (x < 1)
+ fun_l5_n933(x)
+ else
+ fun_l5_n979(x)
+ end
+end
+
+def fun_l4_n770(x)
+ if (x < 1)
+ fun_l5_n765(x)
+ else
+ fun_l5_n472(x)
+ end
+end
+
+def fun_l4_n771(x)
+ if (x < 1)
+ fun_l5_n381(x)
+ else
+ fun_l5_n527(x)
+ end
+end
+
+def fun_l4_n772(x)
+ if (x < 1)
+ fun_l5_n314(x)
+ else
+ fun_l5_n990(x)
+ end
+end
+
+def fun_l4_n773(x)
+ if (x < 1)
+ fun_l5_n457(x)
+ else
+ fun_l5_n413(x)
+ end
+end
+
+def fun_l4_n774(x)
+ if (x < 1)
+ fun_l5_n245(x)
+ else
+ fun_l5_n85(x)
+ end
+end
+
+def fun_l4_n775(x)
+ if (x < 1)
+ fun_l5_n432(x)
+ else
+ fun_l5_n987(x)
+ end
+end
+
+def fun_l4_n776(x)
+ if (x < 1)
+ fun_l5_n588(x)
+ else
+ fun_l5_n352(x)
+ end
+end
+
+def fun_l4_n777(x)
+ if (x < 1)
+ fun_l5_n414(x)
+ else
+ fun_l5_n586(x)
+ end
+end
+
+def fun_l4_n778(x)
+ if (x < 1)
+ fun_l5_n290(x)
+ else
+ fun_l5_n776(x)
+ end
+end
+
+def fun_l4_n779(x)
+ if (x < 1)
+ fun_l5_n324(x)
+ else
+ fun_l5_n918(x)
+ end
+end
+
+def fun_l4_n780(x)
+ if (x < 1)
+ fun_l5_n928(x)
+ else
+ fun_l5_n107(x)
+ end
+end
+
+def fun_l4_n781(x)
+ if (x < 1)
+ fun_l5_n244(x)
+ else
+ fun_l5_n434(x)
+ end
+end
+
+def fun_l4_n782(x)
+ if (x < 1)
+ fun_l5_n828(x)
+ else
+ fun_l5_n141(x)
+ end
+end
+
+def fun_l4_n783(x)
+ if (x < 1)
+ fun_l5_n634(x)
+ else
+ fun_l5_n206(x)
+ end
+end
+
+def fun_l4_n784(x)
+ if (x < 1)
+ fun_l5_n387(x)
+ else
+ fun_l5_n57(x)
+ end
+end
+
+def fun_l4_n785(x)
+ if (x < 1)
+ fun_l5_n731(x)
+ else
+ fun_l5_n670(x)
+ end
+end
+
+def fun_l4_n786(x)
+ if (x < 1)
+ fun_l5_n173(x)
+ else
+ fun_l5_n657(x)
+ end
+end
+
+def fun_l4_n787(x)
+ if (x < 1)
+ fun_l5_n661(x)
+ else
+ fun_l5_n286(x)
+ end
+end
+
+def fun_l4_n788(x)
+ if (x < 1)
+ fun_l5_n364(x)
+ else
+ fun_l5_n520(x)
+ end
+end
+
+def fun_l4_n789(x)
+ if (x < 1)
+ fun_l5_n545(x)
+ else
+ fun_l5_n417(x)
+ end
+end
+
+def fun_l4_n790(x)
+ if (x < 1)
+ fun_l5_n270(x)
+ else
+ fun_l5_n550(x)
+ end
+end
+
+def fun_l4_n791(x)
+ if (x < 1)
+ fun_l5_n873(x)
+ else
+ fun_l5_n321(x)
+ end
+end
+
+def fun_l4_n792(x)
+ if (x < 1)
+ fun_l5_n243(x)
+ else
+ fun_l5_n406(x)
+ end
+end
+
+def fun_l4_n793(x)
+ if (x < 1)
+ fun_l5_n229(x)
+ else
+ fun_l5_n400(x)
+ end
+end
+
+def fun_l4_n794(x)
+ if (x < 1)
+ fun_l5_n670(x)
+ else
+ fun_l5_n535(x)
+ end
+end
+
+def fun_l4_n795(x)
+ if (x < 1)
+ fun_l5_n911(x)
+ else
+ fun_l5_n786(x)
+ end
+end
+
+def fun_l4_n796(x)
+ if (x < 1)
+ fun_l5_n247(x)
+ else
+ fun_l5_n775(x)
+ end
+end
+
+def fun_l4_n797(x)
+ if (x < 1)
+ fun_l5_n944(x)
+ else
+ fun_l5_n14(x)
+ end
+end
+
+def fun_l4_n798(x)
+ if (x < 1)
+ fun_l5_n57(x)
+ else
+ fun_l5_n644(x)
+ end
+end
+
+def fun_l4_n799(x)
+ if (x < 1)
+ fun_l5_n583(x)
+ else
+ fun_l5_n599(x)
+ end
+end
+
+def fun_l4_n800(x)
+ if (x < 1)
+ fun_l5_n685(x)
+ else
+ fun_l5_n366(x)
+ end
+end
+
+def fun_l4_n801(x)
+ if (x < 1)
+ fun_l5_n671(x)
+ else
+ fun_l5_n386(x)
+ end
+end
+
+def fun_l4_n802(x)
+ if (x < 1)
+ fun_l5_n19(x)
+ else
+ fun_l5_n403(x)
+ end
+end
+
+def fun_l4_n803(x)
+ if (x < 1)
+ fun_l5_n952(x)
+ else
+ fun_l5_n237(x)
+ end
+end
+
+def fun_l4_n804(x)
+ if (x < 1)
+ fun_l5_n929(x)
+ else
+ fun_l5_n737(x)
+ end
+end
+
+def fun_l4_n805(x)
+ if (x < 1)
+ fun_l5_n197(x)
+ else
+ fun_l5_n322(x)
+ end
+end
+
+def fun_l4_n806(x)
+ if (x < 1)
+ fun_l5_n966(x)
+ else
+ fun_l5_n531(x)
+ end
+end
+
+def fun_l4_n807(x)
+ if (x < 1)
+ fun_l5_n928(x)
+ else
+ fun_l5_n802(x)
+ end
+end
+
+def fun_l4_n808(x)
+ if (x < 1)
+ fun_l5_n34(x)
+ else
+ fun_l5_n107(x)
+ end
+end
+
+def fun_l4_n809(x)
+ if (x < 1)
+ fun_l5_n615(x)
+ else
+ fun_l5_n628(x)
+ end
+end
+
+def fun_l4_n810(x)
+ if (x < 1)
+ fun_l5_n187(x)
+ else
+ fun_l5_n424(x)
+ end
+end
+
+def fun_l4_n811(x)
+ if (x < 1)
+ fun_l5_n189(x)
+ else
+ fun_l5_n639(x)
+ end
+end
+
+def fun_l4_n812(x)
+ if (x < 1)
+ fun_l5_n177(x)
+ else
+ fun_l5_n580(x)
+ end
+end
+
+def fun_l4_n813(x)
+ if (x < 1)
+ fun_l5_n699(x)
+ else
+ fun_l5_n595(x)
+ end
+end
+
+def fun_l4_n814(x)
+ if (x < 1)
+ fun_l5_n44(x)
+ else
+ fun_l5_n966(x)
+ end
+end
+
+def fun_l4_n815(x)
+ if (x < 1)
+ fun_l5_n883(x)
+ else
+ fun_l5_n580(x)
+ end
+end
+
+def fun_l4_n816(x)
+ if (x < 1)
+ fun_l5_n306(x)
+ else
+ fun_l5_n564(x)
+ end
+end
+
+def fun_l4_n817(x)
+ if (x < 1)
+ fun_l5_n337(x)
+ else
+ fun_l5_n912(x)
+ end
+end
+
+def fun_l4_n818(x)
+ if (x < 1)
+ fun_l5_n36(x)
+ else
+ fun_l5_n164(x)
+ end
+end
+
+def fun_l4_n819(x)
+ if (x < 1)
+ fun_l5_n987(x)
+ else
+ fun_l5_n38(x)
+ end
+end
+
+def fun_l4_n820(x)
+ if (x < 1)
+ fun_l5_n656(x)
+ else
+ fun_l5_n647(x)
+ end
+end
+
+def fun_l4_n821(x)
+ if (x < 1)
+ fun_l5_n364(x)
+ else
+ fun_l5_n838(x)
+ end
+end
+
+def fun_l4_n822(x)
+ if (x < 1)
+ fun_l5_n301(x)
+ else
+ fun_l5_n850(x)
+ end
+end
+
+def fun_l4_n823(x)
+ if (x < 1)
+ fun_l5_n191(x)
+ else
+ fun_l5_n812(x)
+ end
+end
+
+def fun_l4_n824(x)
+ if (x < 1)
+ fun_l5_n148(x)
+ else
+ fun_l5_n332(x)
+ end
+end
+
+def fun_l4_n825(x)
+ if (x < 1)
+ fun_l5_n315(x)
+ else
+ fun_l5_n763(x)
+ end
+end
+
+def fun_l4_n826(x)
+ if (x < 1)
+ fun_l5_n612(x)
+ else
+ fun_l5_n993(x)
+ end
+end
+
+def fun_l4_n827(x)
+ if (x < 1)
+ fun_l5_n229(x)
+ else
+ fun_l5_n388(x)
+ end
+end
+
+def fun_l4_n828(x)
+ if (x < 1)
+ fun_l5_n131(x)
+ else
+ fun_l5_n283(x)
+ end
+end
+
+def fun_l4_n829(x)
+ if (x < 1)
+ fun_l5_n59(x)
+ else
+ fun_l5_n280(x)
+ end
+end
+
+def fun_l4_n830(x)
+ if (x < 1)
+ fun_l5_n993(x)
+ else
+ fun_l5_n160(x)
+ end
+end
+
+def fun_l4_n831(x)
+ if (x < 1)
+ fun_l5_n394(x)
+ else
+ fun_l5_n528(x)
+ end
+end
+
+def fun_l4_n832(x)
+ if (x < 1)
+ fun_l5_n376(x)
+ else
+ fun_l5_n201(x)
+ end
+end
+
+def fun_l4_n833(x)
+ if (x < 1)
+ fun_l5_n890(x)
+ else
+ fun_l5_n867(x)
+ end
+end
+
+def fun_l4_n834(x)
+ if (x < 1)
+ fun_l5_n320(x)
+ else
+ fun_l5_n237(x)
+ end
+end
+
+def fun_l4_n835(x)
+ if (x < 1)
+ fun_l5_n771(x)
+ else
+ fun_l5_n83(x)
+ end
+end
+
+def fun_l4_n836(x)
+ if (x < 1)
+ fun_l5_n751(x)
+ else
+ fun_l5_n2(x)
+ end
+end
+
+def fun_l4_n837(x)
+ if (x < 1)
+ fun_l5_n825(x)
+ else
+ fun_l5_n930(x)
+ end
+end
+
+def fun_l4_n838(x)
+ if (x < 1)
+ fun_l5_n968(x)
+ else
+ fun_l5_n136(x)
+ end
+end
+
+def fun_l4_n839(x)
+ if (x < 1)
+ fun_l5_n529(x)
+ else
+ fun_l5_n626(x)
+ end
+end
+
+def fun_l4_n840(x)
+ if (x < 1)
+ fun_l5_n228(x)
+ else
+ fun_l5_n915(x)
+ end
+end
+
+def fun_l4_n841(x)
+ if (x < 1)
+ fun_l5_n270(x)
+ else
+ fun_l5_n813(x)
+ end
+end
+
+def fun_l4_n842(x)
+ if (x < 1)
+ fun_l5_n392(x)
+ else
+ fun_l5_n60(x)
+ end
+end
+
+def fun_l4_n843(x)
+ if (x < 1)
+ fun_l5_n470(x)
+ else
+ fun_l5_n699(x)
+ end
+end
+
+def fun_l4_n844(x)
+ if (x < 1)
+ fun_l5_n68(x)
+ else
+ fun_l5_n163(x)
+ end
+end
+
+def fun_l4_n845(x)
+ if (x < 1)
+ fun_l5_n469(x)
+ else
+ fun_l5_n472(x)
+ end
+end
+
+def fun_l4_n846(x)
+ if (x < 1)
+ fun_l5_n640(x)
+ else
+ fun_l5_n311(x)
+ end
+end
+
+def fun_l4_n847(x)
+ if (x < 1)
+ fun_l5_n968(x)
+ else
+ fun_l5_n414(x)
+ end
+end
+
+def fun_l4_n848(x)
+ if (x < 1)
+ fun_l5_n111(x)
+ else
+ fun_l5_n340(x)
+ end
+end
+
+def fun_l4_n849(x)
+ if (x < 1)
+ fun_l5_n906(x)
+ else
+ fun_l5_n278(x)
+ end
+end
+
+def fun_l4_n850(x)
+ if (x < 1)
+ fun_l5_n353(x)
+ else
+ fun_l5_n590(x)
+ end
+end
+
+def fun_l4_n851(x)
+ if (x < 1)
+ fun_l5_n218(x)
+ else
+ fun_l5_n341(x)
+ end
+end
+
+def fun_l4_n852(x)
+ if (x < 1)
+ fun_l5_n527(x)
+ else
+ fun_l5_n273(x)
+ end
+end
+
+def fun_l4_n853(x)
+ if (x < 1)
+ fun_l5_n589(x)
+ else
+ fun_l5_n417(x)
+ end
+end
+
+def fun_l4_n854(x)
+ if (x < 1)
+ fun_l5_n411(x)
+ else
+ fun_l5_n848(x)
+ end
+end
+
+def fun_l4_n855(x)
+ if (x < 1)
+ fun_l5_n607(x)
+ else
+ fun_l5_n65(x)
+ end
+end
+
+def fun_l4_n856(x)
+ if (x < 1)
+ fun_l5_n240(x)
+ else
+ fun_l5_n24(x)
+ end
+end
+
+def fun_l4_n857(x)
+ if (x < 1)
+ fun_l5_n165(x)
+ else
+ fun_l5_n362(x)
+ end
+end
+
+def fun_l4_n858(x)
+ if (x < 1)
+ fun_l5_n559(x)
+ else
+ fun_l5_n253(x)
+ end
+end
+
+def fun_l4_n859(x)
+ if (x < 1)
+ fun_l5_n834(x)
+ else
+ fun_l5_n217(x)
+ end
+end
+
+def fun_l4_n860(x)
+ if (x < 1)
+ fun_l5_n891(x)
+ else
+ fun_l5_n369(x)
+ end
+end
+
+def fun_l4_n861(x)
+ if (x < 1)
+ fun_l5_n676(x)
+ else
+ fun_l5_n614(x)
+ end
+end
+
+def fun_l4_n862(x)
+ if (x < 1)
+ fun_l5_n527(x)
+ else
+ fun_l5_n318(x)
+ end
+end
+
+def fun_l4_n863(x)
+ if (x < 1)
+ fun_l5_n991(x)
+ else
+ fun_l5_n997(x)
+ end
+end
+
+def fun_l4_n864(x)
+ if (x < 1)
+ fun_l5_n842(x)
+ else
+ fun_l5_n370(x)
+ end
+end
+
+def fun_l4_n865(x)
+ if (x < 1)
+ fun_l5_n623(x)
+ else
+ fun_l5_n741(x)
+ end
+end
+
+def fun_l4_n866(x)
+ if (x < 1)
+ fun_l5_n58(x)
+ else
+ fun_l5_n953(x)
+ end
+end
+
+def fun_l4_n867(x)
+ if (x < 1)
+ fun_l5_n269(x)
+ else
+ fun_l5_n341(x)
+ end
+end
+
+def fun_l4_n868(x)
+ if (x < 1)
+ fun_l5_n814(x)
+ else
+ fun_l5_n849(x)
+ end
+end
+
+def fun_l4_n869(x)
+ if (x < 1)
+ fun_l5_n163(x)
+ else
+ fun_l5_n246(x)
+ end
+end
+
+def fun_l4_n870(x)
+ if (x < 1)
+ fun_l5_n739(x)
+ else
+ fun_l5_n524(x)
+ end
+end
+
+def fun_l4_n871(x)
+ if (x < 1)
+ fun_l5_n589(x)
+ else
+ fun_l5_n592(x)
+ end
+end
+
+def fun_l4_n872(x)
+ if (x < 1)
+ fun_l5_n922(x)
+ else
+ fun_l5_n401(x)
+ end
+end
+
+def fun_l4_n873(x)
+ if (x < 1)
+ fun_l5_n600(x)
+ else
+ fun_l5_n184(x)
+ end
+end
+
+def fun_l4_n874(x)
+ if (x < 1)
+ fun_l5_n424(x)
+ else
+ fun_l5_n627(x)
+ end
+end
+
+def fun_l4_n875(x)
+ if (x < 1)
+ fun_l5_n48(x)
+ else
+ fun_l5_n127(x)
+ end
+end
+
+def fun_l4_n876(x)
+ if (x < 1)
+ fun_l5_n687(x)
+ else
+ fun_l5_n451(x)
+ end
+end
+
+def fun_l4_n877(x)
+ if (x < 1)
+ fun_l5_n849(x)
+ else
+ fun_l5_n480(x)
+ end
+end
+
+def fun_l4_n878(x)
+ if (x < 1)
+ fun_l5_n801(x)
+ else
+ fun_l5_n60(x)
+ end
+end
+
+def fun_l4_n879(x)
+ if (x < 1)
+ fun_l5_n964(x)
+ else
+ fun_l5_n790(x)
+ end
+end
+
+def fun_l4_n880(x)
+ if (x < 1)
+ fun_l5_n483(x)
+ else
+ fun_l5_n817(x)
+ end
+end
+
+def fun_l4_n881(x)
+ if (x < 1)
+ fun_l5_n91(x)
+ else
+ fun_l5_n776(x)
+ end
+end
+
+def fun_l4_n882(x)
+ if (x < 1)
+ fun_l5_n8(x)
+ else
+ fun_l5_n726(x)
+ end
+end
+
+def fun_l4_n883(x)
+ if (x < 1)
+ fun_l5_n63(x)
+ else
+ fun_l5_n570(x)
+ end
+end
+
+def fun_l4_n884(x)
+ if (x < 1)
+ fun_l5_n691(x)
+ else
+ fun_l5_n117(x)
+ end
+end
+
+def fun_l4_n885(x)
+ if (x < 1)
+ fun_l5_n262(x)
+ else
+ fun_l5_n38(x)
+ end
+end
+
+def fun_l4_n886(x)
+ if (x < 1)
+ fun_l5_n678(x)
+ else
+ fun_l5_n108(x)
+ end
+end
+
+def fun_l4_n887(x)
+ if (x < 1)
+ fun_l5_n775(x)
+ else
+ fun_l5_n751(x)
+ end
+end
+
+def fun_l4_n888(x)
+ if (x < 1)
+ fun_l5_n917(x)
+ else
+ fun_l5_n769(x)
+ end
+end
+
+def fun_l4_n889(x)
+ if (x < 1)
+ fun_l5_n191(x)
+ else
+ fun_l5_n662(x)
+ end
+end
+
+def fun_l4_n890(x)
+ if (x < 1)
+ fun_l5_n34(x)
+ else
+ fun_l5_n806(x)
+ end
+end
+
+def fun_l4_n891(x)
+ if (x < 1)
+ fun_l5_n705(x)
+ else
+ fun_l5_n198(x)
+ end
+end
+
+def fun_l4_n892(x)
+ if (x < 1)
+ fun_l5_n647(x)
+ else
+ fun_l5_n0(x)
+ end
+end
+
+def fun_l4_n893(x)
+ if (x < 1)
+ fun_l5_n336(x)
+ else
+ fun_l5_n795(x)
+ end
+end
+
+def fun_l4_n894(x)
+ if (x < 1)
+ fun_l5_n984(x)
+ else
+ fun_l5_n751(x)
+ end
+end
+
+def fun_l4_n895(x)
+ if (x < 1)
+ fun_l5_n373(x)
+ else
+ fun_l5_n12(x)
+ end
+end
+
+def fun_l4_n896(x)
+ if (x < 1)
+ fun_l5_n672(x)
+ else
+ fun_l5_n17(x)
+ end
+end
+
+def fun_l4_n897(x)
+ if (x < 1)
+ fun_l5_n67(x)
+ else
+ fun_l5_n183(x)
+ end
+end
+
+def fun_l4_n898(x)
+ if (x < 1)
+ fun_l5_n557(x)
+ else
+ fun_l5_n43(x)
+ end
+end
+
+def fun_l4_n899(x)
+ if (x < 1)
+ fun_l5_n210(x)
+ else
+ fun_l5_n904(x)
+ end
+end
+
+def fun_l4_n900(x)
+ if (x < 1)
+ fun_l5_n665(x)
+ else
+ fun_l5_n173(x)
+ end
+end
+
+def fun_l4_n901(x)
+ if (x < 1)
+ fun_l5_n268(x)
+ else
+ fun_l5_n907(x)
+ end
+end
+
+def fun_l4_n902(x)
+ if (x < 1)
+ fun_l5_n19(x)
+ else
+ fun_l5_n145(x)
+ end
+end
+
+def fun_l4_n903(x)
+ if (x < 1)
+ fun_l5_n158(x)
+ else
+ fun_l5_n261(x)
+ end
+end
+
+def fun_l4_n904(x)
+ if (x < 1)
+ fun_l5_n677(x)
+ else
+ fun_l5_n880(x)
+ end
+end
+
+def fun_l4_n905(x)
+ if (x < 1)
+ fun_l5_n262(x)
+ else
+ fun_l5_n790(x)
+ end
+end
+
+def fun_l4_n906(x)
+ if (x < 1)
+ fun_l5_n775(x)
+ else
+ fun_l5_n785(x)
+ end
+end
+
+def fun_l4_n907(x)
+ if (x < 1)
+ fun_l5_n629(x)
+ else
+ fun_l5_n312(x)
+ end
+end
+
+def fun_l4_n908(x)
+ if (x < 1)
+ fun_l5_n84(x)
+ else
+ fun_l5_n605(x)
+ end
+end
+
+def fun_l4_n909(x)
+ if (x < 1)
+ fun_l5_n346(x)
+ else
+ fun_l5_n245(x)
+ end
+end
+
+def fun_l4_n910(x)
+ if (x < 1)
+ fun_l5_n768(x)
+ else
+ fun_l5_n47(x)
+ end
+end
+
+def fun_l4_n911(x)
+ if (x < 1)
+ fun_l5_n48(x)
+ else
+ fun_l5_n406(x)
+ end
+end
+
+def fun_l4_n912(x)
+ if (x < 1)
+ fun_l5_n493(x)
+ else
+ fun_l5_n608(x)
+ end
+end
+
+def fun_l4_n913(x)
+ if (x < 1)
+ fun_l5_n456(x)
+ else
+ fun_l5_n176(x)
+ end
+end
+
+def fun_l4_n914(x)
+ if (x < 1)
+ fun_l5_n201(x)
+ else
+ fun_l5_n233(x)
+ end
+end
+
+def fun_l4_n915(x)
+ if (x < 1)
+ fun_l5_n910(x)
+ else
+ fun_l5_n790(x)
+ end
+end
+
+def fun_l4_n916(x)
+ if (x < 1)
+ fun_l5_n734(x)
+ else
+ fun_l5_n685(x)
+ end
+end
+
+def fun_l4_n917(x)
+ if (x < 1)
+ fun_l5_n132(x)
+ else
+ fun_l5_n112(x)
+ end
+end
+
+def fun_l4_n918(x)
+ if (x < 1)
+ fun_l5_n897(x)
+ else
+ fun_l5_n196(x)
+ end
+end
+
+def fun_l4_n919(x)
+ if (x < 1)
+ fun_l5_n395(x)
+ else
+ fun_l5_n186(x)
+ end
+end
+
+def fun_l4_n920(x)
+ if (x < 1)
+ fun_l5_n19(x)
+ else
+ fun_l5_n810(x)
+ end
+end
+
+def fun_l4_n921(x)
+ if (x < 1)
+ fun_l5_n546(x)
+ else
+ fun_l5_n34(x)
+ end
+end
+
+def fun_l4_n922(x)
+ if (x < 1)
+ fun_l5_n863(x)
+ else
+ fun_l5_n928(x)
+ end
+end
+
+def fun_l4_n923(x)
+ if (x < 1)
+ fun_l5_n485(x)
+ else
+ fun_l5_n182(x)
+ end
+end
+
+def fun_l4_n924(x)
+ if (x < 1)
+ fun_l5_n303(x)
+ else
+ fun_l5_n17(x)
+ end
+end
+
+def fun_l4_n925(x)
+ if (x < 1)
+ fun_l5_n78(x)
+ else
+ fun_l5_n816(x)
+ end
+end
+
+def fun_l4_n926(x)
+ if (x < 1)
+ fun_l5_n797(x)
+ else
+ fun_l5_n761(x)
+ end
+end
+
+def fun_l4_n927(x)
+ if (x < 1)
+ fun_l5_n975(x)
+ else
+ fun_l5_n220(x)
+ end
+end
+
+def fun_l4_n928(x)
+ if (x < 1)
+ fun_l5_n213(x)
+ else
+ fun_l5_n815(x)
+ end
+end
+
+def fun_l4_n929(x)
+ if (x < 1)
+ fun_l5_n280(x)
+ else
+ fun_l5_n551(x)
+ end
+end
+
+def fun_l4_n930(x)
+ if (x < 1)
+ fun_l5_n631(x)
+ else
+ fun_l5_n629(x)
+ end
+end
+
+def fun_l4_n931(x)
+ if (x < 1)
+ fun_l5_n60(x)
+ else
+ fun_l5_n332(x)
+ end
+end
+
+def fun_l4_n932(x)
+ if (x < 1)
+ fun_l5_n336(x)
+ else
+ fun_l5_n733(x)
+ end
+end
+
+def fun_l4_n933(x)
+ if (x < 1)
+ fun_l5_n94(x)
+ else
+ fun_l5_n48(x)
+ end
+end
+
+def fun_l4_n934(x)
+ if (x < 1)
+ fun_l5_n527(x)
+ else
+ fun_l5_n108(x)
+ end
+end
+
+def fun_l4_n935(x)
+ if (x < 1)
+ fun_l5_n994(x)
+ else
+ fun_l5_n95(x)
+ end
+end
+
+def fun_l4_n936(x)
+ if (x < 1)
+ fun_l5_n631(x)
+ else
+ fun_l5_n310(x)
+ end
+end
+
+def fun_l4_n937(x)
+ if (x < 1)
+ fun_l5_n851(x)
+ else
+ fun_l5_n380(x)
+ end
+end
+
+def fun_l4_n938(x)
+ if (x < 1)
+ fun_l5_n256(x)
+ else
+ fun_l5_n231(x)
+ end
+end
+
+def fun_l4_n939(x)
+ if (x < 1)
+ fun_l5_n686(x)
+ else
+ fun_l5_n557(x)
+ end
+end
+
+def fun_l4_n940(x)
+ if (x < 1)
+ fun_l5_n311(x)
+ else
+ fun_l5_n611(x)
+ end
+end
+
+def fun_l4_n941(x)
+ if (x < 1)
+ fun_l5_n770(x)
+ else
+ fun_l5_n306(x)
+ end
+end
+
+def fun_l4_n942(x)
+ if (x < 1)
+ fun_l5_n649(x)
+ else
+ fun_l5_n60(x)
+ end
+end
+
+def fun_l4_n943(x)
+ if (x < 1)
+ fun_l5_n935(x)
+ else
+ fun_l5_n991(x)
+ end
+end
+
+def fun_l4_n944(x)
+ if (x < 1)
+ fun_l5_n851(x)
+ else
+ fun_l5_n131(x)
+ end
+end
+
+def fun_l4_n945(x)
+ if (x < 1)
+ fun_l5_n422(x)
+ else
+ fun_l5_n309(x)
+ end
+end
+
+def fun_l4_n946(x)
+ if (x < 1)
+ fun_l5_n906(x)
+ else
+ fun_l5_n798(x)
+ end
+end
+
+def fun_l4_n947(x)
+ if (x < 1)
+ fun_l5_n817(x)
+ else
+ fun_l5_n122(x)
+ end
+end
+
+def fun_l4_n948(x)
+ if (x < 1)
+ fun_l5_n233(x)
+ else
+ fun_l5_n600(x)
+ end
+end
+
+def fun_l4_n949(x)
+ if (x < 1)
+ fun_l5_n908(x)
+ else
+ fun_l5_n332(x)
+ end
+end
+
+def fun_l4_n950(x)
+ if (x < 1)
+ fun_l5_n71(x)
+ else
+ fun_l5_n893(x)
+ end
+end
+
+def fun_l4_n951(x)
+ if (x < 1)
+ fun_l5_n281(x)
+ else
+ fun_l5_n281(x)
+ end
+end
+
+def fun_l4_n952(x)
+ if (x < 1)
+ fun_l5_n312(x)
+ else
+ fun_l5_n164(x)
+ end
+end
+
+def fun_l4_n953(x)
+ if (x < 1)
+ fun_l5_n292(x)
+ else
+ fun_l5_n993(x)
+ end
+end
+
+def fun_l4_n954(x)
+ if (x < 1)
+ fun_l5_n271(x)
+ else
+ fun_l5_n635(x)
+ end
+end
+
+def fun_l4_n955(x)
+ if (x < 1)
+ fun_l5_n10(x)
+ else
+ fun_l5_n202(x)
+ end
+end
+
+def fun_l4_n956(x)
+ if (x < 1)
+ fun_l5_n574(x)
+ else
+ fun_l5_n29(x)
+ end
+end
+
+def fun_l4_n957(x)
+ if (x < 1)
+ fun_l5_n154(x)
+ else
+ fun_l5_n96(x)
+ end
+end
+
+def fun_l4_n958(x)
+ if (x < 1)
+ fun_l5_n287(x)
+ else
+ fun_l5_n509(x)
+ end
+end
+
+def fun_l4_n959(x)
+ if (x < 1)
+ fun_l5_n400(x)
+ else
+ fun_l5_n195(x)
+ end
+end
+
+def fun_l4_n960(x)
+ if (x < 1)
+ fun_l5_n94(x)
+ else
+ fun_l5_n165(x)
+ end
+end
+
+def fun_l4_n961(x)
+ if (x < 1)
+ fun_l5_n276(x)
+ else
+ fun_l5_n935(x)
+ end
+end
+
+def fun_l4_n962(x)
+ if (x < 1)
+ fun_l5_n504(x)
+ else
+ fun_l5_n480(x)
+ end
+end
+
+def fun_l4_n963(x)
+ if (x < 1)
+ fun_l5_n152(x)
+ else
+ fun_l5_n397(x)
+ end
+end
+
+def fun_l4_n964(x)
+ if (x < 1)
+ fun_l5_n303(x)
+ else
+ fun_l5_n481(x)
+ end
+end
+
+def fun_l4_n965(x)
+ if (x < 1)
+ fun_l5_n882(x)
+ else
+ fun_l5_n116(x)
+ end
+end
+
+def fun_l4_n966(x)
+ if (x < 1)
+ fun_l5_n329(x)
+ else
+ fun_l5_n484(x)
+ end
+end
+
+def fun_l4_n967(x)
+ if (x < 1)
+ fun_l5_n369(x)
+ else
+ fun_l5_n680(x)
+ end
+end
+
+def fun_l4_n968(x)
+ if (x < 1)
+ fun_l5_n30(x)
+ else
+ fun_l5_n653(x)
+ end
+end
+
+def fun_l4_n969(x)
+ if (x < 1)
+ fun_l5_n292(x)
+ else
+ fun_l5_n793(x)
+ end
+end
+
+def fun_l4_n970(x)
+ if (x < 1)
+ fun_l5_n796(x)
+ else
+ fun_l5_n387(x)
+ end
+end
+
+def fun_l4_n971(x)
+ if (x < 1)
+ fun_l5_n735(x)
+ else
+ fun_l5_n985(x)
+ end
+end
+
+def fun_l4_n972(x)
+ if (x < 1)
+ fun_l5_n925(x)
+ else
+ fun_l5_n689(x)
+ end
+end
+
+def fun_l4_n973(x)
+ if (x < 1)
+ fun_l5_n154(x)
+ else
+ fun_l5_n998(x)
+ end
+end
+
+def fun_l4_n974(x)
+ if (x < 1)
+ fun_l5_n708(x)
+ else
+ fun_l5_n253(x)
+ end
+end
+
+def fun_l4_n975(x)
+ if (x < 1)
+ fun_l5_n716(x)
+ else
+ fun_l5_n958(x)
+ end
+end
+
+def fun_l4_n976(x)
+ if (x < 1)
+ fun_l5_n488(x)
+ else
+ fun_l5_n299(x)
+ end
+end
+
+def fun_l4_n977(x)
+ if (x < 1)
+ fun_l5_n872(x)
+ else
+ fun_l5_n276(x)
+ end
+end
+
+def fun_l4_n978(x)
+ if (x < 1)
+ fun_l5_n219(x)
+ else
+ fun_l5_n76(x)
+ end
+end
+
+def fun_l4_n979(x)
+ if (x < 1)
+ fun_l5_n937(x)
+ else
+ fun_l5_n988(x)
+ end
+end
+
+def fun_l4_n980(x)
+ if (x < 1)
+ fun_l5_n681(x)
+ else
+ fun_l5_n264(x)
+ end
+end
+
+def fun_l4_n981(x)
+ if (x < 1)
+ fun_l5_n630(x)
+ else
+ fun_l5_n18(x)
+ end
+end
+
+def fun_l4_n982(x)
+ if (x < 1)
+ fun_l5_n910(x)
+ else
+ fun_l5_n97(x)
+ end
+end
+
+def fun_l4_n983(x)
+ if (x < 1)
+ fun_l5_n551(x)
+ else
+ fun_l5_n429(x)
+ end
+end
+
+def fun_l4_n984(x)
+ if (x < 1)
+ fun_l5_n304(x)
+ else
+ fun_l5_n192(x)
+ end
+end
+
+def fun_l4_n985(x)
+ if (x < 1)
+ fun_l5_n696(x)
+ else
+ fun_l5_n124(x)
+ end
+end
+
+def fun_l4_n986(x)
+ if (x < 1)
+ fun_l5_n603(x)
+ else
+ fun_l5_n967(x)
+ end
+end
+
+def fun_l4_n987(x)
+ if (x < 1)
+ fun_l5_n317(x)
+ else
+ fun_l5_n785(x)
+ end
+end
+
+def fun_l4_n988(x)
+ if (x < 1)
+ fun_l5_n762(x)
+ else
+ fun_l5_n999(x)
+ end
+end
+
+def fun_l4_n989(x)
+ if (x < 1)
+ fun_l5_n61(x)
+ else
+ fun_l5_n474(x)
+ end
+end
+
+def fun_l4_n990(x)
+ if (x < 1)
+ fun_l5_n465(x)
+ else
+ fun_l5_n878(x)
+ end
+end
+
+def fun_l4_n991(x)
+ if (x < 1)
+ fun_l5_n907(x)
+ else
+ fun_l5_n259(x)
+ end
+end
+
+def fun_l4_n992(x)
+ if (x < 1)
+ fun_l5_n594(x)
+ else
+ fun_l5_n378(x)
+ end
+end
+
+def fun_l4_n993(x)
+ if (x < 1)
+ fun_l5_n202(x)
+ else
+ fun_l5_n490(x)
+ end
+end
+
+def fun_l4_n994(x)
+ if (x < 1)
+ fun_l5_n105(x)
+ else
+ fun_l5_n675(x)
+ end
+end
+
+def fun_l4_n995(x)
+ if (x < 1)
+ fun_l5_n99(x)
+ else
+ fun_l5_n21(x)
+ end
+end
+
+def fun_l4_n996(x)
+ if (x < 1)
+ fun_l5_n831(x)
+ else
+ fun_l5_n692(x)
+ end
+end
+
+def fun_l4_n997(x)
+ if (x < 1)
+ fun_l5_n186(x)
+ else
+ fun_l5_n427(x)
+ end
+end
+
+def fun_l4_n998(x)
+ if (x < 1)
+ fun_l5_n597(x)
+ else
+ fun_l5_n978(x)
+ end
+end
+
+def fun_l4_n999(x)
+ if (x < 1)
+ fun_l5_n537(x)
+ else
+ fun_l5_n952(x)
+ end
+end
+
+def fun_l5_n0(x)
+ if (x < 1)
+ fun_l6_n104(x)
+ else
+ fun_l6_n249(x)
+ end
+end
+
+def fun_l5_n1(x)
+ if (x < 1)
+ fun_l6_n260(x)
+ else
+ fun_l6_n782(x)
+ end
+end
+
+def fun_l5_n2(x)
+ if (x < 1)
+ fun_l6_n974(x)
+ else
+ fun_l6_n301(x)
+ end
+end
+
+def fun_l5_n3(x)
+ if (x < 1)
+ fun_l6_n883(x)
+ else
+ fun_l6_n149(x)
+ end
+end
+
+def fun_l5_n4(x)
+ if (x < 1)
+ fun_l6_n134(x)
+ else
+ fun_l6_n111(x)
+ end
+end
+
+def fun_l5_n5(x)
+ if (x < 1)
+ fun_l6_n573(x)
+ else
+ fun_l6_n273(x)
+ end
+end
+
+def fun_l5_n6(x)
+ if (x < 1)
+ fun_l6_n221(x)
+ else
+ fun_l6_n843(x)
+ end
+end
+
+def fun_l5_n7(x)
+ if (x < 1)
+ fun_l6_n572(x)
+ else
+ fun_l6_n435(x)
+ end
+end
+
+def fun_l5_n8(x)
+ if (x < 1)
+ fun_l6_n43(x)
+ else
+ fun_l6_n598(x)
+ end
+end
+
+def fun_l5_n9(x)
+ if (x < 1)
+ fun_l6_n352(x)
+ else
+ fun_l6_n894(x)
+ end
+end
+
+def fun_l5_n10(x)
+ if (x < 1)
+ fun_l6_n161(x)
+ else
+ fun_l6_n654(x)
+ end
+end
+
+def fun_l5_n11(x)
+ if (x < 1)
+ fun_l6_n432(x)
+ else
+ fun_l6_n825(x)
+ end
+end
+
+def fun_l5_n12(x)
+ if (x < 1)
+ fun_l6_n863(x)
+ else
+ fun_l6_n940(x)
+ end
+end
+
+def fun_l5_n13(x)
+ if (x < 1)
+ fun_l6_n478(x)
+ else
+ fun_l6_n193(x)
+ end
+end
+
+def fun_l5_n14(x)
+ if (x < 1)
+ fun_l6_n327(x)
+ else
+ fun_l6_n808(x)
+ end
+end
+
+def fun_l5_n15(x)
+ if (x < 1)
+ fun_l6_n86(x)
+ else
+ fun_l6_n951(x)
+ end
+end
+
+def fun_l5_n16(x)
+ if (x < 1)
+ fun_l6_n492(x)
+ else
+ fun_l6_n704(x)
+ end
+end
+
+def fun_l5_n17(x)
+ if (x < 1)
+ fun_l6_n196(x)
+ else
+ fun_l6_n970(x)
+ end
+end
+
+def fun_l5_n18(x)
+ if (x < 1)
+ fun_l6_n325(x)
+ else
+ fun_l6_n30(x)
+ end
+end
+
+def fun_l5_n19(x)
+ if (x < 1)
+ fun_l6_n559(x)
+ else
+ fun_l6_n269(x)
+ end
+end
+
+def fun_l5_n20(x)
+ if (x < 1)
+ fun_l6_n716(x)
+ else
+ fun_l6_n783(x)
+ end
+end
+
+def fun_l5_n21(x)
+ if (x < 1)
+ fun_l6_n978(x)
+ else
+ fun_l6_n306(x)
+ end
+end
+
+def fun_l5_n22(x)
+ if (x < 1)
+ fun_l6_n220(x)
+ else
+ fun_l6_n823(x)
+ end
+end
+
+def fun_l5_n23(x)
+ if (x < 1)
+ fun_l6_n675(x)
+ else
+ fun_l6_n684(x)
+ end
+end
+
+def fun_l5_n24(x)
+ if (x < 1)
+ fun_l6_n851(x)
+ else
+ fun_l6_n450(x)
+ end
+end
+
+def fun_l5_n25(x)
+ if (x < 1)
+ fun_l6_n745(x)
+ else
+ fun_l6_n370(x)
+ end
+end
+
+def fun_l5_n26(x)
+ if (x < 1)
+ fun_l6_n431(x)
+ else
+ fun_l6_n130(x)
+ end
+end
+
+def fun_l5_n27(x)
+ if (x < 1)
+ fun_l6_n105(x)
+ else
+ fun_l6_n205(x)
+ end
+end
+
+def fun_l5_n28(x)
+ if (x < 1)
+ fun_l6_n718(x)
+ else
+ fun_l6_n742(x)
+ end
+end
+
+def fun_l5_n29(x)
+ if (x < 1)
+ fun_l6_n977(x)
+ else
+ fun_l6_n63(x)
+ end
+end
+
+def fun_l5_n30(x)
+ if (x < 1)
+ fun_l6_n902(x)
+ else
+ fun_l6_n471(x)
+ end
+end
+
+def fun_l5_n31(x)
+ if (x < 1)
+ fun_l6_n76(x)
+ else
+ fun_l6_n960(x)
+ end
+end
+
+def fun_l5_n32(x)
+ if (x < 1)
+ fun_l6_n75(x)
+ else
+ fun_l6_n640(x)
+ end
+end
+
+def fun_l5_n33(x)
+ if (x < 1)
+ fun_l6_n631(x)
+ else
+ fun_l6_n769(x)
+ end
+end
+
+def fun_l5_n34(x)
+ if (x < 1)
+ fun_l6_n201(x)
+ else
+ fun_l6_n771(x)
+ end
+end
+
+def fun_l5_n35(x)
+ if (x < 1)
+ fun_l6_n734(x)
+ else
+ fun_l6_n370(x)
+ end
+end
+
+def fun_l5_n36(x)
+ if (x < 1)
+ fun_l6_n490(x)
+ else
+ fun_l6_n994(x)
+ end
+end
+
+def fun_l5_n37(x)
+ if (x < 1)
+ fun_l6_n566(x)
+ else
+ fun_l6_n392(x)
+ end
+end
+
+def fun_l5_n38(x)
+ if (x < 1)
+ fun_l6_n120(x)
+ else
+ fun_l6_n774(x)
+ end
+end
+
+def fun_l5_n39(x)
+ if (x < 1)
+ fun_l6_n402(x)
+ else
+ fun_l6_n572(x)
+ end
+end
+
+def fun_l5_n40(x)
+ if (x < 1)
+ fun_l6_n911(x)
+ else
+ fun_l6_n968(x)
+ end
+end
+
+def fun_l5_n41(x)
+ if (x < 1)
+ fun_l6_n200(x)
+ else
+ fun_l6_n656(x)
+ end
+end
+
+def fun_l5_n42(x)
+ if (x < 1)
+ fun_l6_n59(x)
+ else
+ fun_l6_n426(x)
+ end
+end
+
+def fun_l5_n43(x)
+ if (x < 1)
+ fun_l6_n650(x)
+ else
+ fun_l6_n932(x)
+ end
+end
+
+def fun_l5_n44(x)
+ if (x < 1)
+ fun_l6_n365(x)
+ else
+ fun_l6_n755(x)
+ end
+end
+
+def fun_l5_n45(x)
+ if (x < 1)
+ fun_l6_n738(x)
+ else
+ fun_l6_n432(x)
+ end
+end
+
+def fun_l5_n46(x)
+ if (x < 1)
+ fun_l6_n82(x)
+ else
+ fun_l6_n484(x)
+ end
+end
+
+def fun_l5_n47(x)
+ if (x < 1)
+ fun_l6_n64(x)
+ else
+ fun_l6_n610(x)
+ end
+end
+
+def fun_l5_n48(x)
+ if (x < 1)
+ fun_l6_n875(x)
+ else
+ fun_l6_n464(x)
+ end
+end
+
+def fun_l5_n49(x)
+ if (x < 1)
+ fun_l6_n798(x)
+ else
+ fun_l6_n57(x)
+ end
+end
+
+def fun_l5_n50(x)
+ if (x < 1)
+ fun_l6_n76(x)
+ else
+ fun_l6_n462(x)
+ end
+end
+
+def fun_l5_n51(x)
+ if (x < 1)
+ fun_l6_n225(x)
+ else
+ fun_l6_n526(x)
+ end
+end
+
+def fun_l5_n52(x)
+ if (x < 1)
+ fun_l6_n388(x)
+ else
+ fun_l6_n167(x)
+ end
+end
+
+def fun_l5_n53(x)
+ if (x < 1)
+ fun_l6_n270(x)
+ else
+ fun_l6_n821(x)
+ end
+end
+
+def fun_l5_n54(x)
+ if (x < 1)
+ fun_l6_n790(x)
+ else
+ fun_l6_n95(x)
+ end
+end
+
+def fun_l5_n55(x)
+ if (x < 1)
+ fun_l6_n38(x)
+ else
+ fun_l6_n32(x)
+ end
+end
+
+def fun_l5_n56(x)
+ if (x < 1)
+ fun_l6_n400(x)
+ else
+ fun_l6_n513(x)
+ end
+end
+
+def fun_l5_n57(x)
+ if (x < 1)
+ fun_l6_n251(x)
+ else
+ fun_l6_n16(x)
+ end
+end
+
+def fun_l5_n58(x)
+ if (x < 1)
+ fun_l6_n303(x)
+ else
+ fun_l6_n858(x)
+ end
+end
+
+def fun_l5_n59(x)
+ if (x < 1)
+ fun_l6_n173(x)
+ else
+ fun_l6_n38(x)
+ end
+end
+
+def fun_l5_n60(x)
+ if (x < 1)
+ fun_l6_n466(x)
+ else
+ fun_l6_n64(x)
+ end
+end
+
+def fun_l5_n61(x)
+ if (x < 1)
+ fun_l6_n687(x)
+ else
+ fun_l6_n208(x)
+ end
+end
+
+def fun_l5_n62(x)
+ if (x < 1)
+ fun_l6_n891(x)
+ else
+ fun_l6_n789(x)
+ end
+end
+
+def fun_l5_n63(x)
+ if (x < 1)
+ fun_l6_n664(x)
+ else
+ fun_l6_n133(x)
+ end
+end
+
+def fun_l5_n64(x)
+ if (x < 1)
+ fun_l6_n121(x)
+ else
+ fun_l6_n989(x)
+ end
+end
+
+def fun_l5_n65(x)
+ if (x < 1)
+ fun_l6_n411(x)
+ else
+ fun_l6_n264(x)
+ end
+end
+
+def fun_l5_n66(x)
+ if (x < 1)
+ fun_l6_n528(x)
+ else
+ fun_l6_n662(x)
+ end
+end
+
+def fun_l5_n67(x)
+ if (x < 1)
+ fun_l6_n824(x)
+ else
+ fun_l6_n410(x)
+ end
+end
+
+def fun_l5_n68(x)
+ if (x < 1)
+ fun_l6_n29(x)
+ else
+ fun_l6_n946(x)
+ end
+end
+
+def fun_l5_n69(x)
+ if (x < 1)
+ fun_l6_n566(x)
+ else
+ fun_l6_n797(x)
+ end
+end
+
+def fun_l5_n70(x)
+ if (x < 1)
+ fun_l6_n816(x)
+ else
+ fun_l6_n112(x)
+ end
+end
+
+def fun_l5_n71(x)
+ if (x < 1)
+ fun_l6_n920(x)
+ else
+ fun_l6_n44(x)
+ end
+end
+
+def fun_l5_n72(x)
+ if (x < 1)
+ fun_l6_n725(x)
+ else
+ fun_l6_n113(x)
+ end
+end
+
+def fun_l5_n73(x)
+ if (x < 1)
+ fun_l6_n406(x)
+ else
+ fun_l6_n555(x)
+ end
+end
+
+def fun_l5_n74(x)
+ if (x < 1)
+ fun_l6_n527(x)
+ else
+ fun_l6_n991(x)
+ end
+end
+
+def fun_l5_n75(x)
+ if (x < 1)
+ fun_l6_n46(x)
+ else
+ fun_l6_n440(x)
+ end
+end
+
+def fun_l5_n76(x)
+ if (x < 1)
+ fun_l6_n242(x)
+ else
+ fun_l6_n488(x)
+ end
+end
+
+def fun_l5_n77(x)
+ if (x < 1)
+ fun_l6_n211(x)
+ else
+ fun_l6_n760(x)
+ end
+end
+
+def fun_l5_n78(x)
+ if (x < 1)
+ fun_l6_n821(x)
+ else
+ fun_l6_n653(x)
+ end
+end
+
+def fun_l5_n79(x)
+ if (x < 1)
+ fun_l6_n559(x)
+ else
+ fun_l6_n425(x)
+ end
+end
+
+def fun_l5_n80(x)
+ if (x < 1)
+ fun_l6_n792(x)
+ else
+ fun_l6_n813(x)
+ end
+end
+
+def fun_l5_n81(x)
+ if (x < 1)
+ fun_l6_n463(x)
+ else
+ fun_l6_n454(x)
+ end
+end
+
+def fun_l5_n82(x)
+ if (x < 1)
+ fun_l6_n731(x)
+ else
+ fun_l6_n718(x)
+ end
+end
+
+def fun_l5_n83(x)
+ if (x < 1)
+ fun_l6_n377(x)
+ else
+ fun_l6_n137(x)
+ end
+end
+
+def fun_l5_n84(x)
+ if (x < 1)
+ fun_l6_n829(x)
+ else
+ fun_l6_n77(x)
+ end
+end
+
+def fun_l5_n85(x)
+ if (x < 1)
+ fun_l6_n714(x)
+ else
+ fun_l6_n682(x)
+ end
+end
+
+def fun_l5_n86(x)
+ if (x < 1)
+ fun_l6_n47(x)
+ else
+ fun_l6_n527(x)
+ end
+end
+
+def fun_l5_n87(x)
+ if (x < 1)
+ fun_l6_n976(x)
+ else
+ fun_l6_n18(x)
+ end
+end
+
+def fun_l5_n88(x)
+ if (x < 1)
+ fun_l6_n710(x)
+ else
+ fun_l6_n998(x)
+ end
+end
+
+def fun_l5_n89(x)
+ if (x < 1)
+ fun_l6_n222(x)
+ else
+ fun_l6_n97(x)
+ end
+end
+
+def fun_l5_n90(x)
+ if (x < 1)
+ fun_l6_n862(x)
+ else
+ fun_l6_n627(x)
+ end
+end
+
+def fun_l5_n91(x)
+ if (x < 1)
+ fun_l6_n642(x)
+ else
+ fun_l6_n720(x)
+ end
+end
+
+def fun_l5_n92(x)
+ if (x < 1)
+ fun_l6_n444(x)
+ else
+ fun_l6_n506(x)
+ end
+end
+
+def fun_l5_n93(x)
+ if (x < 1)
+ fun_l6_n776(x)
+ else
+ fun_l6_n879(x)
+ end
+end
+
+def fun_l5_n94(x)
+ if (x < 1)
+ fun_l6_n629(x)
+ else
+ fun_l6_n322(x)
+ end
+end
+
+def fun_l5_n95(x)
+ if (x < 1)
+ fun_l6_n119(x)
+ else
+ fun_l6_n134(x)
+ end
+end
+
+def fun_l5_n96(x)
+ if (x < 1)
+ fun_l6_n385(x)
+ else
+ fun_l6_n343(x)
+ end
+end
+
+def fun_l5_n97(x)
+ if (x < 1)
+ fun_l6_n110(x)
+ else
+ fun_l6_n323(x)
+ end
+end
+
+def fun_l5_n98(x)
+ if (x < 1)
+ fun_l6_n547(x)
+ else
+ fun_l6_n762(x)
+ end
+end
+
+def fun_l5_n99(x)
+ if (x < 1)
+ fun_l6_n811(x)
+ else
+ fun_l6_n404(x)
+ end
+end
+
+def fun_l5_n100(x)
+ if (x < 1)
+ fun_l6_n893(x)
+ else
+ fun_l6_n120(x)
+ end
+end
+
+def fun_l5_n101(x)
+ if (x < 1)
+ fun_l6_n755(x)
+ else
+ fun_l6_n611(x)
+ end
+end
+
+def fun_l5_n102(x)
+ if (x < 1)
+ fun_l6_n65(x)
+ else
+ fun_l6_n897(x)
+ end
+end
+
+def fun_l5_n103(x)
+ if (x < 1)
+ fun_l6_n629(x)
+ else
+ fun_l6_n214(x)
+ end
+end
+
+def fun_l5_n104(x)
+ if (x < 1)
+ fun_l6_n211(x)
+ else
+ fun_l6_n179(x)
+ end
+end
+
+def fun_l5_n105(x)
+ if (x < 1)
+ fun_l6_n650(x)
+ else
+ fun_l6_n348(x)
+ end
+end
+
+def fun_l5_n106(x)
+ if (x < 1)
+ fun_l6_n966(x)
+ else
+ fun_l6_n654(x)
+ end
+end
+
+def fun_l5_n107(x)
+ if (x < 1)
+ fun_l6_n596(x)
+ else
+ fun_l6_n668(x)
+ end
+end
+
+def fun_l5_n108(x)
+ if (x < 1)
+ fun_l6_n392(x)
+ else
+ fun_l6_n629(x)
+ end
+end
+
+def fun_l5_n109(x)
+ if (x < 1)
+ fun_l6_n96(x)
+ else
+ fun_l6_n578(x)
+ end
+end
+
+def fun_l5_n110(x)
+ if (x < 1)
+ fun_l6_n88(x)
+ else
+ fun_l6_n214(x)
+ end
+end
+
+def fun_l5_n111(x)
+ if (x < 1)
+ fun_l6_n622(x)
+ else
+ fun_l6_n180(x)
+ end
+end
+
+def fun_l5_n112(x)
+ if (x < 1)
+ fun_l6_n100(x)
+ else
+ fun_l6_n373(x)
+ end
+end
+
+def fun_l5_n113(x)
+ if (x < 1)
+ fun_l6_n464(x)
+ else
+ fun_l6_n30(x)
+ end
+end
+
+def fun_l5_n114(x)
+ if (x < 1)
+ fun_l6_n703(x)
+ else
+ fun_l6_n116(x)
+ end
+end
+
+def fun_l5_n115(x)
+ if (x < 1)
+ fun_l6_n63(x)
+ else
+ fun_l6_n260(x)
+ end
+end
+
+def fun_l5_n116(x)
+ if (x < 1)
+ fun_l6_n935(x)
+ else
+ fun_l6_n951(x)
+ end
+end
+
+def fun_l5_n117(x)
+ if (x < 1)
+ fun_l6_n415(x)
+ else
+ fun_l6_n734(x)
+ end
+end
+
+def fun_l5_n118(x)
+ if (x < 1)
+ fun_l6_n873(x)
+ else
+ fun_l6_n163(x)
+ end
+end
+
+def fun_l5_n119(x)
+ if (x < 1)
+ fun_l6_n134(x)
+ else
+ fun_l6_n586(x)
+ end
+end
+
+def fun_l5_n120(x)
+ if (x < 1)
+ fun_l6_n793(x)
+ else
+ fun_l6_n197(x)
+ end
+end
+
+def fun_l5_n121(x)
+ if (x < 1)
+ fun_l6_n585(x)
+ else
+ fun_l6_n793(x)
+ end
+end
+
+def fun_l5_n122(x)
+ if (x < 1)
+ fun_l6_n10(x)
+ else
+ fun_l6_n12(x)
+ end
+end
+
+def fun_l5_n123(x)
+ if (x < 1)
+ fun_l6_n786(x)
+ else
+ fun_l6_n386(x)
+ end
+end
+
+def fun_l5_n124(x)
+ if (x < 1)
+ fun_l6_n891(x)
+ else
+ fun_l6_n903(x)
+ end
+end
+
+def fun_l5_n125(x)
+ if (x < 1)
+ fun_l6_n389(x)
+ else
+ fun_l6_n154(x)
+ end
+end
+
+def fun_l5_n126(x)
+ if (x < 1)
+ fun_l6_n214(x)
+ else
+ fun_l6_n754(x)
+ end
+end
+
+def fun_l5_n127(x)
+ if (x < 1)
+ fun_l6_n646(x)
+ else
+ fun_l6_n661(x)
+ end
+end
+
+def fun_l5_n128(x)
+ if (x < 1)
+ fun_l6_n662(x)
+ else
+ fun_l6_n527(x)
+ end
+end
+
+def fun_l5_n129(x)
+ if (x < 1)
+ fun_l6_n329(x)
+ else
+ fun_l6_n310(x)
+ end
+end
+
+def fun_l5_n130(x)
+ if (x < 1)
+ fun_l6_n352(x)
+ else
+ fun_l6_n820(x)
+ end
+end
+
+def fun_l5_n131(x)
+ if (x < 1)
+ fun_l6_n551(x)
+ else
+ fun_l6_n454(x)
+ end
+end
+
+def fun_l5_n132(x)
+ if (x < 1)
+ fun_l6_n586(x)
+ else
+ fun_l6_n340(x)
+ end
+end
+
+def fun_l5_n133(x)
+ if (x < 1)
+ fun_l6_n111(x)
+ else
+ fun_l6_n188(x)
+ end
+end
+
+def fun_l5_n134(x)
+ if (x < 1)
+ fun_l6_n261(x)
+ else
+ fun_l6_n464(x)
+ end
+end
+
+def fun_l5_n135(x)
+ if (x < 1)
+ fun_l6_n540(x)
+ else
+ fun_l6_n854(x)
+ end
+end
+
+def fun_l5_n136(x)
+ if (x < 1)
+ fun_l6_n120(x)
+ else
+ fun_l6_n230(x)
+ end
+end
+
+def fun_l5_n137(x)
+ if (x < 1)
+ fun_l6_n837(x)
+ else
+ fun_l6_n606(x)
+ end
+end
+
+def fun_l5_n138(x)
+ if (x < 1)
+ fun_l6_n663(x)
+ else
+ fun_l6_n195(x)
+ end
+end
+
+def fun_l5_n139(x)
+ if (x < 1)
+ fun_l6_n572(x)
+ else
+ fun_l6_n768(x)
+ end
+end
+
+def fun_l5_n140(x)
+ if (x < 1)
+ fun_l6_n747(x)
+ else
+ fun_l6_n304(x)
+ end
+end
+
+def fun_l5_n141(x)
+ if (x < 1)
+ fun_l6_n927(x)
+ else
+ fun_l6_n122(x)
+ end
+end
+
+def fun_l5_n142(x)
+ if (x < 1)
+ fun_l6_n311(x)
+ else
+ fun_l6_n920(x)
+ end
+end
+
+def fun_l5_n143(x)
+ if (x < 1)
+ fun_l6_n887(x)
+ else
+ fun_l6_n598(x)
+ end
+end
+
+def fun_l5_n144(x)
+ if (x < 1)
+ fun_l6_n289(x)
+ else
+ fun_l6_n894(x)
+ end
+end
+
+def fun_l5_n145(x)
+ if (x < 1)
+ fun_l6_n333(x)
+ else
+ fun_l6_n385(x)
+ end
+end
+
+def fun_l5_n146(x)
+ if (x < 1)
+ fun_l6_n926(x)
+ else
+ fun_l6_n310(x)
+ end
+end
+
+def fun_l5_n147(x)
+ if (x < 1)
+ fun_l6_n754(x)
+ else
+ fun_l6_n354(x)
+ end
+end
+
+def fun_l5_n148(x)
+ if (x < 1)
+ fun_l6_n800(x)
+ else
+ fun_l6_n634(x)
+ end
+end
+
+def fun_l5_n149(x)
+ if (x < 1)
+ fun_l6_n5(x)
+ else
+ fun_l6_n67(x)
+ end
+end
+
+def fun_l5_n150(x)
+ if (x < 1)
+ fun_l6_n766(x)
+ else
+ fun_l6_n237(x)
+ end
+end
+
+def fun_l5_n151(x)
+ if (x < 1)
+ fun_l6_n277(x)
+ else
+ fun_l6_n692(x)
+ end
+end
+
+def fun_l5_n152(x)
+ if (x < 1)
+ fun_l6_n328(x)
+ else
+ fun_l6_n216(x)
+ end
+end
+
+def fun_l5_n153(x)
+ if (x < 1)
+ fun_l6_n328(x)
+ else
+ fun_l6_n165(x)
+ end
+end
+
+def fun_l5_n154(x)
+ if (x < 1)
+ fun_l6_n884(x)
+ else
+ fun_l6_n811(x)
+ end
+end
+
+def fun_l5_n155(x)
+ if (x < 1)
+ fun_l6_n376(x)
+ else
+ fun_l6_n188(x)
+ end
+end
+
+def fun_l5_n156(x)
+ if (x < 1)
+ fun_l6_n276(x)
+ else
+ fun_l6_n19(x)
+ end
+end
+
+def fun_l5_n157(x)
+ if (x < 1)
+ fun_l6_n443(x)
+ else
+ fun_l6_n815(x)
+ end
+end
+
+def fun_l5_n158(x)
+ if (x < 1)
+ fun_l6_n610(x)
+ else
+ fun_l6_n341(x)
+ end
+end
+
+def fun_l5_n159(x)
+ if (x < 1)
+ fun_l6_n147(x)
+ else
+ fun_l6_n289(x)
+ end
+end
+
+def fun_l5_n160(x)
+ if (x < 1)
+ fun_l6_n258(x)
+ else
+ fun_l6_n683(x)
+ end
+end
+
+def fun_l5_n161(x)
+ if (x < 1)
+ fun_l6_n544(x)
+ else
+ fun_l6_n621(x)
+ end
+end
+
+def fun_l5_n162(x)
+ if (x < 1)
+ fun_l6_n119(x)
+ else
+ fun_l6_n336(x)
+ end
+end
+
+def fun_l5_n163(x)
+ if (x < 1)
+ fun_l6_n297(x)
+ else
+ fun_l6_n915(x)
+ end
+end
+
+def fun_l5_n164(x)
+ if (x < 1)
+ fun_l6_n782(x)
+ else
+ fun_l6_n499(x)
+ end
+end
+
+def fun_l5_n165(x)
+ if (x < 1)
+ fun_l6_n660(x)
+ else
+ fun_l6_n227(x)
+ end
+end
+
+def fun_l5_n166(x)
+ if (x < 1)
+ fun_l6_n820(x)
+ else
+ fun_l6_n822(x)
+ end
+end
+
+def fun_l5_n167(x)
+ if (x < 1)
+ fun_l6_n878(x)
+ else
+ fun_l6_n642(x)
+ end
+end
+
+def fun_l5_n168(x)
+ if (x < 1)
+ fun_l6_n123(x)
+ else
+ fun_l6_n451(x)
+ end
+end
+
+def fun_l5_n169(x)
+ if (x < 1)
+ fun_l6_n741(x)
+ else
+ fun_l6_n35(x)
+ end
+end
+
+def fun_l5_n170(x)
+ if (x < 1)
+ fun_l6_n522(x)
+ else
+ fun_l6_n546(x)
+ end
+end
+
+def fun_l5_n171(x)
+ if (x < 1)
+ fun_l6_n555(x)
+ else
+ fun_l6_n667(x)
+ end
+end
+
+def fun_l5_n172(x)
+ if (x < 1)
+ fun_l6_n760(x)
+ else
+ fun_l6_n292(x)
+ end
+end
+
+def fun_l5_n173(x)
+ if (x < 1)
+ fun_l6_n375(x)
+ else
+ fun_l6_n907(x)
+ end
+end
+
+def fun_l5_n174(x)
+ if (x < 1)
+ fun_l6_n382(x)
+ else
+ fun_l6_n693(x)
+ end
+end
+
+def fun_l5_n175(x)
+ if (x < 1)
+ fun_l6_n728(x)
+ else
+ fun_l6_n378(x)
+ end
+end
+
+def fun_l5_n176(x)
+ if (x < 1)
+ fun_l6_n79(x)
+ else
+ fun_l6_n402(x)
+ end
+end
+
+def fun_l5_n177(x)
+ if (x < 1)
+ fun_l6_n444(x)
+ else
+ fun_l6_n500(x)
+ end
+end
+
+def fun_l5_n178(x)
+ if (x < 1)
+ fun_l6_n535(x)
+ else
+ fun_l6_n754(x)
+ end
+end
+
+def fun_l5_n179(x)
+ if (x < 1)
+ fun_l6_n831(x)
+ else
+ fun_l6_n206(x)
+ end
+end
+
+def fun_l5_n180(x)
+ if (x < 1)
+ fun_l6_n878(x)
+ else
+ fun_l6_n715(x)
+ end
+end
+
+def fun_l5_n181(x)
+ if (x < 1)
+ fun_l6_n433(x)
+ else
+ fun_l6_n955(x)
+ end
+end
+
+def fun_l5_n182(x)
+ if (x < 1)
+ fun_l6_n611(x)
+ else
+ fun_l6_n258(x)
+ end
+end
+
+def fun_l5_n183(x)
+ if (x < 1)
+ fun_l6_n618(x)
+ else
+ fun_l6_n29(x)
+ end
+end
+
+def fun_l5_n184(x)
+ if (x < 1)
+ fun_l6_n838(x)
+ else
+ fun_l6_n918(x)
+ end
+end
+
+def fun_l5_n185(x)
+ if (x < 1)
+ fun_l6_n307(x)
+ else
+ fun_l6_n811(x)
+ end
+end
+
+def fun_l5_n186(x)
+ if (x < 1)
+ fun_l6_n564(x)
+ else
+ fun_l6_n73(x)
+ end
+end
+
+def fun_l5_n187(x)
+ if (x < 1)
+ fun_l6_n871(x)
+ else
+ fun_l6_n669(x)
+ end
+end
+
+def fun_l5_n188(x)
+ if (x < 1)
+ fun_l6_n546(x)
+ else
+ fun_l6_n41(x)
+ end
+end
+
+def fun_l5_n189(x)
+ if (x < 1)
+ fun_l6_n312(x)
+ else
+ fun_l6_n998(x)
+ end
+end
+
+def fun_l5_n190(x)
+ if (x < 1)
+ fun_l6_n519(x)
+ else
+ fun_l6_n275(x)
+ end
+end
+
+def fun_l5_n191(x)
+ if (x < 1)
+ fun_l6_n525(x)
+ else
+ fun_l6_n228(x)
+ end
+end
+
+def fun_l5_n192(x)
+ if (x < 1)
+ fun_l6_n115(x)
+ else
+ fun_l6_n790(x)
+ end
+end
+
+def fun_l5_n193(x)
+ if (x < 1)
+ fun_l6_n976(x)
+ else
+ fun_l6_n219(x)
+ end
+end
+
+def fun_l5_n194(x)
+ if (x < 1)
+ fun_l6_n785(x)
+ else
+ fun_l6_n834(x)
+ end
+end
+
+def fun_l5_n195(x)
+ if (x < 1)
+ fun_l6_n238(x)
+ else
+ fun_l6_n471(x)
+ end
+end
+
+def fun_l5_n196(x)
+ if (x < 1)
+ fun_l6_n275(x)
+ else
+ fun_l6_n218(x)
+ end
+end
+
+def fun_l5_n197(x)
+ if (x < 1)
+ fun_l6_n590(x)
+ else
+ fun_l6_n458(x)
+ end
+end
+
+def fun_l5_n198(x)
+ if (x < 1)
+ fun_l6_n498(x)
+ else
+ fun_l6_n906(x)
+ end
+end
+
+def fun_l5_n199(x)
+ if (x < 1)
+ fun_l6_n547(x)
+ else
+ fun_l6_n885(x)
+ end
+end
+
+def fun_l5_n200(x)
+ if (x < 1)
+ fun_l6_n631(x)
+ else
+ fun_l6_n0(x)
+ end
+end
+
+def fun_l5_n201(x)
+ if (x < 1)
+ fun_l6_n31(x)
+ else
+ fun_l6_n729(x)
+ end
+end
+
+def fun_l5_n202(x)
+ if (x < 1)
+ fun_l6_n303(x)
+ else
+ fun_l6_n847(x)
+ end
+end
+
+def fun_l5_n203(x)
+ if (x < 1)
+ fun_l6_n912(x)
+ else
+ fun_l6_n346(x)
+ end
+end
+
+def fun_l5_n204(x)
+ if (x < 1)
+ fun_l6_n153(x)
+ else
+ fun_l6_n831(x)
+ end
+end
+
+def fun_l5_n205(x)
+ if (x < 1)
+ fun_l6_n424(x)
+ else
+ fun_l6_n836(x)
+ end
+end
+
+def fun_l5_n206(x)
+ if (x < 1)
+ fun_l6_n214(x)
+ else
+ fun_l6_n207(x)
+ end
+end
+
+def fun_l5_n207(x)
+ if (x < 1)
+ fun_l6_n362(x)
+ else
+ fun_l6_n241(x)
+ end
+end
+
+def fun_l5_n208(x)
+ if (x < 1)
+ fun_l6_n796(x)
+ else
+ fun_l6_n763(x)
+ end
+end
+
+def fun_l5_n209(x)
+ if (x < 1)
+ fun_l6_n523(x)
+ else
+ fun_l6_n998(x)
+ end
+end
+
+def fun_l5_n210(x)
+ if (x < 1)
+ fun_l6_n326(x)
+ else
+ fun_l6_n755(x)
+ end
+end
+
+def fun_l5_n211(x)
+ if (x < 1)
+ fun_l6_n345(x)
+ else
+ fun_l6_n747(x)
+ end
+end
+
+def fun_l5_n212(x)
+ if (x < 1)
+ fun_l6_n631(x)
+ else
+ fun_l6_n576(x)
+ end
+end
+
+def fun_l5_n213(x)
+ if (x < 1)
+ fun_l6_n713(x)
+ else
+ fun_l6_n678(x)
+ end
+end
+
+def fun_l5_n214(x)
+ if (x < 1)
+ fun_l6_n470(x)
+ else
+ fun_l6_n877(x)
+ end
+end
+
+def fun_l5_n215(x)
+ if (x < 1)
+ fun_l6_n78(x)
+ else
+ fun_l6_n315(x)
+ end
+end
+
+def fun_l5_n216(x)
+ if (x < 1)
+ fun_l6_n281(x)
+ else
+ fun_l6_n872(x)
+ end
+end
+
+def fun_l5_n217(x)
+ if (x < 1)
+ fun_l6_n200(x)
+ else
+ fun_l6_n323(x)
+ end
+end
+
+def fun_l5_n218(x)
+ if (x < 1)
+ fun_l6_n483(x)
+ else
+ fun_l6_n938(x)
+ end
+end
+
+def fun_l5_n219(x)
+ if (x < 1)
+ fun_l6_n102(x)
+ else
+ fun_l6_n690(x)
+ end
+end
+
+def fun_l5_n220(x)
+ if (x < 1)
+ fun_l6_n740(x)
+ else
+ fun_l6_n684(x)
+ end
+end
+
+def fun_l5_n221(x)
+ if (x < 1)
+ fun_l6_n366(x)
+ else
+ fun_l6_n138(x)
+ end
+end
+
+def fun_l5_n222(x)
+ if (x < 1)
+ fun_l6_n306(x)
+ else
+ fun_l6_n874(x)
+ end
+end
+
+def fun_l5_n223(x)
+ if (x < 1)
+ fun_l6_n828(x)
+ else
+ fun_l6_n839(x)
+ end
+end
+
+def fun_l5_n224(x)
+ if (x < 1)
+ fun_l6_n739(x)
+ else
+ fun_l6_n569(x)
+ end
+end
+
+def fun_l5_n225(x)
+ if (x < 1)
+ fun_l6_n332(x)
+ else
+ fun_l6_n49(x)
+ end
+end
+
+def fun_l5_n226(x)
+ if (x < 1)
+ fun_l6_n783(x)
+ else
+ fun_l6_n20(x)
+ end
+end
+
+def fun_l5_n227(x)
+ if (x < 1)
+ fun_l6_n774(x)
+ else
+ fun_l6_n346(x)
+ end
+end
+
+def fun_l5_n228(x)
+ if (x < 1)
+ fun_l6_n637(x)
+ else
+ fun_l6_n302(x)
+ end
+end
+
+def fun_l5_n229(x)
+ if (x < 1)
+ fun_l6_n10(x)
+ else
+ fun_l6_n697(x)
+ end
+end
+
+def fun_l5_n230(x)
+ if (x < 1)
+ fun_l6_n804(x)
+ else
+ fun_l6_n141(x)
+ end
+end
+
+def fun_l5_n231(x)
+ if (x < 1)
+ fun_l6_n232(x)
+ else
+ fun_l6_n88(x)
+ end
+end
+
+def fun_l5_n232(x)
+ if (x < 1)
+ fun_l6_n248(x)
+ else
+ fun_l6_n862(x)
+ end
+end
+
+def fun_l5_n233(x)
+ if (x < 1)
+ fun_l6_n287(x)
+ else
+ fun_l6_n315(x)
+ end
+end
+
+def fun_l5_n234(x)
+ if (x < 1)
+ fun_l6_n543(x)
+ else
+ fun_l6_n182(x)
+ end
+end
+
+def fun_l5_n235(x)
+ if (x < 1)
+ fun_l6_n162(x)
+ else
+ fun_l6_n710(x)
+ end
+end
+
+def fun_l5_n236(x)
+ if (x < 1)
+ fun_l6_n879(x)
+ else
+ fun_l6_n262(x)
+ end
+end
+
+def fun_l5_n237(x)
+ if (x < 1)
+ fun_l6_n706(x)
+ else
+ fun_l6_n112(x)
+ end
+end
+
+def fun_l5_n238(x)
+ if (x < 1)
+ fun_l6_n578(x)
+ else
+ fun_l6_n792(x)
+ end
+end
+
+def fun_l5_n239(x)
+ if (x < 1)
+ fun_l6_n685(x)
+ else
+ fun_l6_n441(x)
+ end
+end
+
+def fun_l5_n240(x)
+ if (x < 1)
+ fun_l6_n706(x)
+ else
+ fun_l6_n737(x)
+ end
+end
+
+def fun_l5_n241(x)
+ if (x < 1)
+ fun_l6_n360(x)
+ else
+ fun_l6_n751(x)
+ end
+end
+
+def fun_l5_n242(x)
+ if (x < 1)
+ fun_l6_n823(x)
+ else
+ fun_l6_n888(x)
+ end
+end
+
+def fun_l5_n243(x)
+ if (x < 1)
+ fun_l6_n743(x)
+ else
+ fun_l6_n705(x)
+ end
+end
+
+def fun_l5_n244(x)
+ if (x < 1)
+ fun_l6_n340(x)
+ else
+ fun_l6_n541(x)
+ end
+end
+
+def fun_l5_n245(x)
+ if (x < 1)
+ fun_l6_n324(x)
+ else
+ fun_l6_n121(x)
+ end
+end
+
+def fun_l5_n246(x)
+ if (x < 1)
+ fun_l6_n241(x)
+ else
+ fun_l6_n584(x)
+ end
+end
+
+def fun_l5_n247(x)
+ if (x < 1)
+ fun_l6_n468(x)
+ else
+ fun_l6_n673(x)
+ end
+end
+
+def fun_l5_n248(x)
+ if (x < 1)
+ fun_l6_n595(x)
+ else
+ fun_l6_n537(x)
+ end
+end
+
+def fun_l5_n249(x)
+ if (x < 1)
+ fun_l6_n166(x)
+ else
+ fun_l6_n766(x)
+ end
+end
+
+def fun_l5_n250(x)
+ if (x < 1)
+ fun_l6_n322(x)
+ else
+ fun_l6_n904(x)
+ end
+end
+
+def fun_l5_n251(x)
+ if (x < 1)
+ fun_l6_n273(x)
+ else
+ fun_l6_n353(x)
+ end
+end
+
+def fun_l5_n252(x)
+ if (x < 1)
+ fun_l6_n990(x)
+ else
+ fun_l6_n360(x)
+ end
+end
+
+def fun_l5_n253(x)
+ if (x < 1)
+ fun_l6_n552(x)
+ else
+ fun_l6_n649(x)
+ end
+end
+
+def fun_l5_n254(x)
+ if (x < 1)
+ fun_l6_n917(x)
+ else
+ fun_l6_n803(x)
+ end
+end
+
+def fun_l5_n255(x)
+ if (x < 1)
+ fun_l6_n763(x)
+ else
+ fun_l6_n464(x)
+ end
+end
+
+def fun_l5_n256(x)
+ if (x < 1)
+ fun_l6_n577(x)
+ else
+ fun_l6_n70(x)
+ end
+end
+
+def fun_l5_n257(x)
+ if (x < 1)
+ fun_l6_n618(x)
+ else
+ fun_l6_n943(x)
+ end
+end
+
+def fun_l5_n258(x)
+ if (x < 1)
+ fun_l6_n683(x)
+ else
+ fun_l6_n761(x)
+ end
+end
+
+def fun_l5_n259(x)
+ if (x < 1)
+ fun_l6_n284(x)
+ else
+ fun_l6_n223(x)
+ end
+end
+
+def fun_l5_n260(x)
+ if (x < 1)
+ fun_l6_n189(x)
+ else
+ fun_l6_n455(x)
+ end
+end
+
+def fun_l5_n261(x)
+ if (x < 1)
+ fun_l6_n632(x)
+ else
+ fun_l6_n245(x)
+ end
+end
+
+def fun_l5_n262(x)
+ if (x < 1)
+ fun_l6_n940(x)
+ else
+ fun_l6_n38(x)
+ end
+end
+
+def fun_l5_n263(x)
+ if (x < 1)
+ fun_l6_n289(x)
+ else
+ fun_l6_n356(x)
+ end
+end
+
+def fun_l5_n264(x)
+ if (x < 1)
+ fun_l6_n268(x)
+ else
+ fun_l6_n412(x)
+ end
+end
+
+def fun_l5_n265(x)
+ if (x < 1)
+ fun_l6_n370(x)
+ else
+ fun_l6_n858(x)
+ end
+end
+
+def fun_l5_n266(x)
+ if (x < 1)
+ fun_l6_n523(x)
+ else
+ fun_l6_n809(x)
+ end
+end
+
+def fun_l5_n267(x)
+ if (x < 1)
+ fun_l6_n469(x)
+ else
+ fun_l6_n260(x)
+ end
+end
+
+def fun_l5_n268(x)
+ if (x < 1)
+ fun_l6_n442(x)
+ else
+ fun_l6_n738(x)
+ end
+end
+
+def fun_l5_n269(x)
+ if (x < 1)
+ fun_l6_n663(x)
+ else
+ fun_l6_n109(x)
+ end
+end
+
+def fun_l5_n270(x)
+ if (x < 1)
+ fun_l6_n302(x)
+ else
+ fun_l6_n664(x)
+ end
+end
+
+def fun_l5_n271(x)
+ if (x < 1)
+ fun_l6_n201(x)
+ else
+ fun_l6_n251(x)
+ end
+end
+
+def fun_l5_n272(x)
+ if (x < 1)
+ fun_l6_n776(x)
+ else
+ fun_l6_n903(x)
+ end
+end
+
+def fun_l5_n273(x)
+ if (x < 1)
+ fun_l6_n683(x)
+ else
+ fun_l6_n233(x)
+ end
+end
+
+def fun_l5_n274(x)
+ if (x < 1)
+ fun_l6_n530(x)
+ else
+ fun_l6_n487(x)
+ end
+end
+
+def fun_l5_n275(x)
+ if (x < 1)
+ fun_l6_n848(x)
+ else
+ fun_l6_n99(x)
+ end
+end
+
+def fun_l5_n276(x)
+ if (x < 1)
+ fun_l6_n126(x)
+ else
+ fun_l6_n914(x)
+ end
+end
+
+def fun_l5_n277(x)
+ if (x < 1)
+ fun_l6_n304(x)
+ else
+ fun_l6_n198(x)
+ end
+end
+
+def fun_l5_n278(x)
+ if (x < 1)
+ fun_l6_n47(x)
+ else
+ fun_l6_n179(x)
+ end
+end
+
+def fun_l5_n279(x)
+ if (x < 1)
+ fun_l6_n846(x)
+ else
+ fun_l6_n301(x)
+ end
+end
+
+def fun_l5_n280(x)
+ if (x < 1)
+ fun_l6_n576(x)
+ else
+ fun_l6_n561(x)
+ end
+end
+
+def fun_l5_n281(x)
+ if (x < 1)
+ fun_l6_n307(x)
+ else
+ fun_l6_n545(x)
+ end
+end
+
+def fun_l5_n282(x)
+ if (x < 1)
+ fun_l6_n434(x)
+ else
+ fun_l6_n669(x)
+ end
+end
+
+def fun_l5_n283(x)
+ if (x < 1)
+ fun_l6_n606(x)
+ else
+ fun_l6_n147(x)
+ end
+end
+
+def fun_l5_n284(x)
+ if (x < 1)
+ fun_l6_n212(x)
+ else
+ fun_l6_n504(x)
+ end
+end
+
+def fun_l5_n285(x)
+ if (x < 1)
+ fun_l6_n193(x)
+ else
+ fun_l6_n165(x)
+ end
+end
+
+def fun_l5_n286(x)
+ if (x < 1)
+ fun_l6_n569(x)
+ else
+ fun_l6_n100(x)
+ end
+end
+
+def fun_l5_n287(x)
+ if (x < 1)
+ fun_l6_n59(x)
+ else
+ fun_l6_n998(x)
+ end
+end
+
+def fun_l5_n288(x)
+ if (x < 1)
+ fun_l6_n544(x)
+ else
+ fun_l6_n923(x)
+ end
+end
+
+def fun_l5_n289(x)
+ if (x < 1)
+ fun_l6_n317(x)
+ else
+ fun_l6_n255(x)
+ end
+end
+
+def fun_l5_n290(x)
+ if (x < 1)
+ fun_l6_n476(x)
+ else
+ fun_l6_n789(x)
+ end
+end
+
+def fun_l5_n291(x)
+ if (x < 1)
+ fun_l6_n324(x)
+ else
+ fun_l6_n162(x)
+ end
+end
+
+def fun_l5_n292(x)
+ if (x < 1)
+ fun_l6_n704(x)
+ else
+ fun_l6_n454(x)
+ end
+end
+
+def fun_l5_n293(x)
+ if (x < 1)
+ fun_l6_n994(x)
+ else
+ fun_l6_n637(x)
+ end
+end
+
+def fun_l5_n294(x)
+ if (x < 1)
+ fun_l6_n801(x)
+ else
+ fun_l6_n913(x)
+ end
+end
+
+def fun_l5_n295(x)
+ if (x < 1)
+ fun_l6_n579(x)
+ else
+ fun_l6_n795(x)
+ end
+end
+
+def fun_l5_n296(x)
+ if (x < 1)
+ fun_l6_n689(x)
+ else
+ fun_l6_n546(x)
+ end
+end
+
+def fun_l5_n297(x)
+ if (x < 1)
+ fun_l6_n971(x)
+ else
+ fun_l6_n493(x)
+ end
+end
+
+def fun_l5_n298(x)
+ if (x < 1)
+ fun_l6_n67(x)
+ else
+ fun_l6_n372(x)
+ end
+end
+
+def fun_l5_n299(x)
+ if (x < 1)
+ fun_l6_n654(x)
+ else
+ fun_l6_n921(x)
+ end
+end
+
+def fun_l5_n300(x)
+ if (x < 1)
+ fun_l6_n402(x)
+ else
+ fun_l6_n437(x)
+ end
+end
+
+def fun_l5_n301(x)
+ if (x < 1)
+ fun_l6_n448(x)
+ else
+ fun_l6_n177(x)
+ end
+end
+
+def fun_l5_n302(x)
+ if (x < 1)
+ fun_l6_n308(x)
+ else
+ fun_l6_n869(x)
+ end
+end
+
+def fun_l5_n303(x)
+ if (x < 1)
+ fun_l6_n825(x)
+ else
+ fun_l6_n848(x)
+ end
+end
+
+def fun_l5_n304(x)
+ if (x < 1)
+ fun_l6_n735(x)
+ else
+ fun_l6_n653(x)
+ end
+end
+
+def fun_l5_n305(x)
+ if (x < 1)
+ fun_l6_n393(x)
+ else
+ fun_l6_n113(x)
+ end
+end
+
+def fun_l5_n306(x)
+ if (x < 1)
+ fun_l6_n821(x)
+ else
+ fun_l6_n827(x)
+ end
+end
+
+def fun_l5_n307(x)
+ if (x < 1)
+ fun_l6_n734(x)
+ else
+ fun_l6_n498(x)
+ end
+end
+
+def fun_l5_n308(x)
+ if (x < 1)
+ fun_l6_n833(x)
+ else
+ fun_l6_n302(x)
+ end
+end
+
+def fun_l5_n309(x)
+ if (x < 1)
+ fun_l6_n596(x)
+ else
+ fun_l6_n544(x)
+ end
+end
+
+def fun_l5_n310(x)
+ if (x < 1)
+ fun_l6_n61(x)
+ else
+ fun_l6_n823(x)
+ end
+end
+
+def fun_l5_n311(x)
+ if (x < 1)
+ fun_l6_n596(x)
+ else
+ fun_l6_n98(x)
+ end
+end
+
+def fun_l5_n312(x)
+ if (x < 1)
+ fun_l6_n78(x)
+ else
+ fun_l6_n186(x)
+ end
+end
+
+def fun_l5_n313(x)
+ if (x < 1)
+ fun_l6_n286(x)
+ else
+ fun_l6_n230(x)
+ end
+end
+
+def fun_l5_n314(x)
+ if (x < 1)
+ fun_l6_n613(x)
+ else
+ fun_l6_n412(x)
+ end
+end
+
+def fun_l5_n315(x)
+ if (x < 1)
+ fun_l6_n0(x)
+ else
+ fun_l6_n579(x)
+ end
+end
+
+def fun_l5_n316(x)
+ if (x < 1)
+ fun_l6_n991(x)
+ else
+ fun_l6_n379(x)
+ end
+end
+
+def fun_l5_n317(x)
+ if (x < 1)
+ fun_l6_n825(x)
+ else
+ fun_l6_n964(x)
+ end
+end
+
+def fun_l5_n318(x)
+ if (x < 1)
+ fun_l6_n707(x)
+ else
+ fun_l6_n664(x)
+ end
+end
+
+def fun_l5_n319(x)
+ if (x < 1)
+ fun_l6_n326(x)
+ else
+ fun_l6_n26(x)
+ end
+end
+
+def fun_l5_n320(x)
+ if (x < 1)
+ fun_l6_n684(x)
+ else
+ fun_l6_n333(x)
+ end
+end
+
+def fun_l5_n321(x)
+ if (x < 1)
+ fun_l6_n165(x)
+ else
+ fun_l6_n586(x)
+ end
+end
+
+def fun_l5_n322(x)
+ if (x < 1)
+ fun_l6_n28(x)
+ else
+ fun_l6_n53(x)
+ end
+end
+
+def fun_l5_n323(x)
+ if (x < 1)
+ fun_l6_n693(x)
+ else
+ fun_l6_n84(x)
+ end
+end
+
+def fun_l5_n324(x)
+ if (x < 1)
+ fun_l6_n500(x)
+ else
+ fun_l6_n445(x)
+ end
+end
+
+def fun_l5_n325(x)
+ if (x < 1)
+ fun_l6_n189(x)
+ else
+ fun_l6_n756(x)
+ end
+end
+
+def fun_l5_n326(x)
+ if (x < 1)
+ fun_l6_n147(x)
+ else
+ fun_l6_n709(x)
+ end
+end
+
+def fun_l5_n327(x)
+ if (x < 1)
+ fun_l6_n893(x)
+ else
+ fun_l6_n851(x)
+ end
+end
+
+def fun_l5_n328(x)
+ if (x < 1)
+ fun_l6_n685(x)
+ else
+ fun_l6_n399(x)
+ end
+end
+
+def fun_l5_n329(x)
+ if (x < 1)
+ fun_l6_n281(x)
+ else
+ fun_l6_n284(x)
+ end
+end
+
+def fun_l5_n330(x)
+ if (x < 1)
+ fun_l6_n579(x)
+ else
+ fun_l6_n671(x)
+ end
+end
+
+def fun_l5_n331(x)
+ if (x < 1)
+ fun_l6_n176(x)
+ else
+ fun_l6_n829(x)
+ end
+end
+
+def fun_l5_n332(x)
+ if (x < 1)
+ fun_l6_n209(x)
+ else
+ fun_l6_n91(x)
+ end
+end
+
+def fun_l5_n333(x)
+ if (x < 1)
+ fun_l6_n302(x)
+ else
+ fun_l6_n360(x)
+ end
+end
+
+def fun_l5_n334(x)
+ if (x < 1)
+ fun_l6_n701(x)
+ else
+ fun_l6_n438(x)
+ end
+end
+
+def fun_l5_n335(x)
+ if (x < 1)
+ fun_l6_n923(x)
+ else
+ fun_l6_n613(x)
+ end
+end
+
+def fun_l5_n336(x)
+ if (x < 1)
+ fun_l6_n490(x)
+ else
+ fun_l6_n405(x)
+ end
+end
+
+def fun_l5_n337(x)
+ if (x < 1)
+ fun_l6_n187(x)
+ else
+ fun_l6_n953(x)
+ end
+end
+
+def fun_l5_n338(x)
+ if (x < 1)
+ fun_l6_n396(x)
+ else
+ fun_l6_n917(x)
+ end
+end
+
+def fun_l5_n339(x)
+ if (x < 1)
+ fun_l6_n822(x)
+ else
+ fun_l6_n939(x)
+ end
+end
+
+def fun_l5_n340(x)
+ if (x < 1)
+ fun_l6_n665(x)
+ else
+ fun_l6_n890(x)
+ end
+end
+
+def fun_l5_n341(x)
+ if (x < 1)
+ fun_l6_n125(x)
+ else
+ fun_l6_n817(x)
+ end
+end
+
+def fun_l5_n342(x)
+ if (x < 1)
+ fun_l6_n825(x)
+ else
+ fun_l6_n853(x)
+ end
+end
+
+def fun_l5_n343(x)
+ if (x < 1)
+ fun_l6_n724(x)
+ else
+ fun_l6_n633(x)
+ end
+end
+
+def fun_l5_n344(x)
+ if (x < 1)
+ fun_l6_n901(x)
+ else
+ fun_l6_n26(x)
+ end
+end
+
+def fun_l5_n345(x)
+ if (x < 1)
+ fun_l6_n406(x)
+ else
+ fun_l6_n664(x)
+ end
+end
+
+def fun_l5_n346(x)
+ if (x < 1)
+ fun_l6_n720(x)
+ else
+ fun_l6_n856(x)
+ end
+end
+
+def fun_l5_n347(x)
+ if (x < 1)
+ fun_l6_n617(x)
+ else
+ fun_l6_n116(x)
+ end
+end
+
+def fun_l5_n348(x)
+ if (x < 1)
+ fun_l6_n560(x)
+ else
+ fun_l6_n24(x)
+ end
+end
+
+def fun_l5_n349(x)
+ if (x < 1)
+ fun_l6_n758(x)
+ else
+ fun_l6_n297(x)
+ end
+end
+
+def fun_l5_n350(x)
+ if (x < 1)
+ fun_l6_n583(x)
+ else
+ fun_l6_n673(x)
+ end
+end
+
+def fun_l5_n351(x)
+ if (x < 1)
+ fun_l6_n990(x)
+ else
+ fun_l6_n512(x)
+ end
+end
+
+def fun_l5_n352(x)
+ if (x < 1)
+ fun_l6_n250(x)
+ else
+ fun_l6_n819(x)
+ end
+end
+
+def fun_l5_n353(x)
+ if (x < 1)
+ fun_l6_n186(x)
+ else
+ fun_l6_n787(x)
+ end
+end
+
+def fun_l5_n354(x)
+ if (x < 1)
+ fun_l6_n217(x)
+ else
+ fun_l6_n231(x)
+ end
+end
+
+def fun_l5_n355(x)
+ if (x < 1)
+ fun_l6_n827(x)
+ else
+ fun_l6_n439(x)
+ end
+end
+
+def fun_l5_n356(x)
+ if (x < 1)
+ fun_l6_n456(x)
+ else
+ fun_l6_n352(x)
+ end
+end
+
+def fun_l5_n357(x)
+ if (x < 1)
+ fun_l6_n306(x)
+ else
+ fun_l6_n236(x)
+ end
+end
+
+def fun_l5_n358(x)
+ if (x < 1)
+ fun_l6_n515(x)
+ else
+ fun_l6_n100(x)
+ end
+end
+
+def fun_l5_n359(x)
+ if (x < 1)
+ fun_l6_n28(x)
+ else
+ fun_l6_n820(x)
+ end
+end
+
+def fun_l5_n360(x)
+ if (x < 1)
+ fun_l6_n683(x)
+ else
+ fun_l6_n547(x)
+ end
+end
+
+def fun_l5_n361(x)
+ if (x < 1)
+ fun_l6_n573(x)
+ else
+ fun_l6_n699(x)
+ end
+end
+
+def fun_l5_n362(x)
+ if (x < 1)
+ fun_l6_n807(x)
+ else
+ fun_l6_n548(x)
+ end
+end
+
+def fun_l5_n363(x)
+ if (x < 1)
+ fun_l6_n712(x)
+ else
+ fun_l6_n323(x)
+ end
+end
+
+def fun_l5_n364(x)
+ if (x < 1)
+ fun_l6_n457(x)
+ else
+ fun_l6_n447(x)
+ end
+end
+
+def fun_l5_n365(x)
+ if (x < 1)
+ fun_l6_n684(x)
+ else
+ fun_l6_n782(x)
+ end
+end
+
+def fun_l5_n366(x)
+ if (x < 1)
+ fun_l6_n217(x)
+ else
+ fun_l6_n344(x)
+ end
+end
+
+def fun_l5_n367(x)
+ if (x < 1)
+ fun_l6_n670(x)
+ else
+ fun_l6_n311(x)
+ end
+end
+
+def fun_l5_n368(x)
+ if (x < 1)
+ fun_l6_n224(x)
+ else
+ fun_l6_n198(x)
+ end
+end
+
+def fun_l5_n369(x)
+ if (x < 1)
+ fun_l6_n517(x)
+ else
+ fun_l6_n186(x)
+ end
+end
+
+def fun_l5_n370(x)
+ if (x < 1)
+ fun_l6_n511(x)
+ else
+ fun_l6_n27(x)
+ end
+end
+
+def fun_l5_n371(x)
+ if (x < 1)
+ fun_l6_n485(x)
+ else
+ fun_l6_n288(x)
+ end
+end
+
+def fun_l5_n372(x)
+ if (x < 1)
+ fun_l6_n931(x)
+ else
+ fun_l6_n473(x)
+ end
+end
+
+def fun_l5_n373(x)
+ if (x < 1)
+ fun_l6_n381(x)
+ else
+ fun_l6_n650(x)
+ end
+end
+
+def fun_l5_n374(x)
+ if (x < 1)
+ fun_l6_n132(x)
+ else
+ fun_l6_n243(x)
+ end
+end
+
+def fun_l5_n375(x)
+ if (x < 1)
+ fun_l6_n900(x)
+ else
+ fun_l6_n322(x)
+ end
+end
+
+def fun_l5_n376(x)
+ if (x < 1)
+ fun_l6_n787(x)
+ else
+ fun_l6_n588(x)
+ end
+end
+
+def fun_l5_n377(x)
+ if (x < 1)
+ fun_l6_n233(x)
+ else
+ fun_l6_n358(x)
+ end
+end
+
+def fun_l5_n378(x)
+ if (x < 1)
+ fun_l6_n601(x)
+ else
+ fun_l6_n40(x)
+ end
+end
+
+def fun_l5_n379(x)
+ if (x < 1)
+ fun_l6_n682(x)
+ else
+ fun_l6_n63(x)
+ end
+end
+
+def fun_l5_n380(x)
+ if (x < 1)
+ fun_l6_n279(x)
+ else
+ fun_l6_n393(x)
+ end
+end
+
+def fun_l5_n381(x)
+ if (x < 1)
+ fun_l6_n83(x)
+ else
+ fun_l6_n538(x)
+ end
+end
+
+def fun_l5_n382(x)
+ if (x < 1)
+ fun_l6_n956(x)
+ else
+ fun_l6_n194(x)
+ end
+end
+
+def fun_l5_n383(x)
+ if (x < 1)
+ fun_l6_n55(x)
+ else
+ fun_l6_n140(x)
+ end
+end
+
+def fun_l5_n384(x)
+ if (x < 1)
+ fun_l6_n165(x)
+ else
+ fun_l6_n784(x)
+ end
+end
+
+def fun_l5_n385(x)
+ if (x < 1)
+ fun_l6_n582(x)
+ else
+ fun_l6_n97(x)
+ end
+end
+
+def fun_l5_n386(x)
+ if (x < 1)
+ fun_l6_n362(x)
+ else
+ fun_l6_n935(x)
+ end
+end
+
+def fun_l5_n387(x)
+ if (x < 1)
+ fun_l6_n496(x)
+ else
+ fun_l6_n512(x)
+ end
+end
+
+def fun_l5_n388(x)
+ if (x < 1)
+ fun_l6_n793(x)
+ else
+ fun_l6_n746(x)
+ end
+end
+
+def fun_l5_n389(x)
+ if (x < 1)
+ fun_l6_n172(x)
+ else
+ fun_l6_n731(x)
+ end
+end
+
+def fun_l5_n390(x)
+ if (x < 1)
+ fun_l6_n895(x)
+ else
+ fun_l6_n694(x)
+ end
+end
+
+def fun_l5_n391(x)
+ if (x < 1)
+ fun_l6_n336(x)
+ else
+ fun_l6_n551(x)
+ end
+end
+
+def fun_l5_n392(x)
+ if (x < 1)
+ fun_l6_n453(x)
+ else
+ fun_l6_n83(x)
+ end
+end
+
+def fun_l5_n393(x)
+ if (x < 1)
+ fun_l6_n571(x)
+ else
+ fun_l6_n293(x)
+ end
+end
+
+def fun_l5_n394(x)
+ if (x < 1)
+ fun_l6_n872(x)
+ else
+ fun_l6_n519(x)
+ end
+end
+
+def fun_l5_n395(x)
+ if (x < 1)
+ fun_l6_n912(x)
+ else
+ fun_l6_n955(x)
+ end
+end
+
+def fun_l5_n396(x)
+ if (x < 1)
+ fun_l6_n961(x)
+ else
+ fun_l6_n367(x)
+ end
+end
+
+def fun_l5_n397(x)
+ if (x < 1)
+ fun_l6_n255(x)
+ else
+ fun_l6_n639(x)
+ end
+end
+
+def fun_l5_n398(x)
+ if (x < 1)
+ fun_l6_n970(x)
+ else
+ fun_l6_n465(x)
+ end
+end
+
+def fun_l5_n399(x)
+ if (x < 1)
+ fun_l6_n625(x)
+ else
+ fun_l6_n387(x)
+ end
+end
+
+def fun_l5_n400(x)
+ if (x < 1)
+ fun_l6_n537(x)
+ else
+ fun_l6_n336(x)
+ end
+end
+
+def fun_l5_n401(x)
+ if (x < 1)
+ fun_l6_n61(x)
+ else
+ fun_l6_n0(x)
+ end
+end
+
+def fun_l5_n402(x)
+ if (x < 1)
+ fun_l6_n358(x)
+ else
+ fun_l6_n715(x)
+ end
+end
+
+def fun_l5_n403(x)
+ if (x < 1)
+ fun_l6_n864(x)
+ else
+ fun_l6_n729(x)
+ end
+end
+
+def fun_l5_n404(x)
+ if (x < 1)
+ fun_l6_n393(x)
+ else
+ fun_l6_n962(x)
+ end
+end
+
+def fun_l5_n405(x)
+ if (x < 1)
+ fun_l6_n995(x)
+ else
+ fun_l6_n835(x)
+ end
+end
+
+def fun_l5_n406(x)
+ if (x < 1)
+ fun_l6_n605(x)
+ else
+ fun_l6_n732(x)
+ end
+end
+
+def fun_l5_n407(x)
+ if (x < 1)
+ fun_l6_n132(x)
+ else
+ fun_l6_n112(x)
+ end
+end
+
+def fun_l5_n408(x)
+ if (x < 1)
+ fun_l6_n311(x)
+ else
+ fun_l6_n972(x)
+ end
+end
+
+def fun_l5_n409(x)
+ if (x < 1)
+ fun_l6_n460(x)
+ else
+ fun_l6_n750(x)
+ end
+end
+
+def fun_l5_n410(x)
+ if (x < 1)
+ fun_l6_n237(x)
+ else
+ fun_l6_n463(x)
+ end
+end
+
+def fun_l5_n411(x)
+ if (x < 1)
+ fun_l6_n251(x)
+ else
+ fun_l6_n944(x)
+ end
+end
+
+def fun_l5_n412(x)
+ if (x < 1)
+ fun_l6_n123(x)
+ else
+ fun_l6_n369(x)
+ end
+end
+
+def fun_l5_n413(x)
+ if (x < 1)
+ fun_l6_n371(x)
+ else
+ fun_l6_n589(x)
+ end
+end
+
+def fun_l5_n414(x)
+ if (x < 1)
+ fun_l6_n232(x)
+ else
+ fun_l6_n354(x)
+ end
+end
+
+def fun_l5_n415(x)
+ if (x < 1)
+ fun_l6_n580(x)
+ else
+ fun_l6_n330(x)
+ end
+end
+
+def fun_l5_n416(x)
+ if (x < 1)
+ fun_l6_n483(x)
+ else
+ fun_l6_n446(x)
+ end
+end
+
+def fun_l5_n417(x)
+ if (x < 1)
+ fun_l6_n834(x)
+ else
+ fun_l6_n284(x)
+ end
+end
+
+def fun_l5_n418(x)
+ if (x < 1)
+ fun_l6_n864(x)
+ else
+ fun_l6_n825(x)
+ end
+end
+
+def fun_l5_n419(x)
+ if (x < 1)
+ fun_l6_n406(x)
+ else
+ fun_l6_n680(x)
+ end
+end
+
+def fun_l5_n420(x)
+ if (x < 1)
+ fun_l6_n101(x)
+ else
+ fun_l6_n4(x)
+ end
+end
+
+def fun_l5_n421(x)
+ if (x < 1)
+ fun_l6_n628(x)
+ else
+ fun_l6_n303(x)
+ end
+end
+
+def fun_l5_n422(x)
+ if (x < 1)
+ fun_l6_n521(x)
+ else
+ fun_l6_n547(x)
+ end
+end
+
+def fun_l5_n423(x)
+ if (x < 1)
+ fun_l6_n589(x)
+ else
+ fun_l6_n134(x)
+ end
+end
+
+def fun_l5_n424(x)
+ if (x < 1)
+ fun_l6_n161(x)
+ else
+ fun_l6_n347(x)
+ end
+end
+
+def fun_l5_n425(x)
+ if (x < 1)
+ fun_l6_n59(x)
+ else
+ fun_l6_n988(x)
+ end
+end
+
+def fun_l5_n426(x)
+ if (x < 1)
+ fun_l6_n388(x)
+ else
+ fun_l6_n182(x)
+ end
+end
+
+def fun_l5_n427(x)
+ if (x < 1)
+ fun_l6_n500(x)
+ else
+ fun_l6_n536(x)
+ end
+end
+
+def fun_l5_n428(x)
+ if (x < 1)
+ fun_l6_n792(x)
+ else
+ fun_l6_n253(x)
+ end
+end
+
+def fun_l5_n429(x)
+ if (x < 1)
+ fun_l6_n774(x)
+ else
+ fun_l6_n997(x)
+ end
+end
+
+def fun_l5_n430(x)
+ if (x < 1)
+ fun_l6_n876(x)
+ else
+ fun_l6_n798(x)
+ end
+end
+
+def fun_l5_n431(x)
+ if (x < 1)
+ fun_l6_n606(x)
+ else
+ fun_l6_n403(x)
+ end
+end
+
+def fun_l5_n432(x)
+ if (x < 1)
+ fun_l6_n920(x)
+ else
+ fun_l6_n827(x)
+ end
+end
+
+def fun_l5_n433(x)
+ if (x < 1)
+ fun_l6_n887(x)
+ else
+ fun_l6_n149(x)
+ end
+end
+
+def fun_l5_n434(x)
+ if (x < 1)
+ fun_l6_n413(x)
+ else
+ fun_l6_n44(x)
+ end
+end
+
+def fun_l5_n435(x)
+ if (x < 1)
+ fun_l6_n474(x)
+ else
+ fun_l6_n107(x)
+ end
+end
+
+def fun_l5_n436(x)
+ if (x < 1)
+ fun_l6_n807(x)
+ else
+ fun_l6_n636(x)
+ end
+end
+
+def fun_l5_n437(x)
+ if (x < 1)
+ fun_l6_n616(x)
+ else
+ fun_l6_n615(x)
+ end
+end
+
+def fun_l5_n438(x)
+ if (x < 1)
+ fun_l6_n384(x)
+ else
+ fun_l6_n535(x)
+ end
+end
+
+def fun_l5_n439(x)
+ if (x < 1)
+ fun_l6_n326(x)
+ else
+ fun_l6_n929(x)
+ end
+end
+
+def fun_l5_n440(x)
+ if (x < 1)
+ fun_l6_n852(x)
+ else
+ fun_l6_n597(x)
+ end
+end
+
+def fun_l5_n441(x)
+ if (x < 1)
+ fun_l6_n246(x)
+ else
+ fun_l6_n991(x)
+ end
+end
+
+def fun_l5_n442(x)
+ if (x < 1)
+ fun_l6_n258(x)
+ else
+ fun_l6_n140(x)
+ end
+end
+
+def fun_l5_n443(x)
+ if (x < 1)
+ fun_l6_n815(x)
+ else
+ fun_l6_n600(x)
+ end
+end
+
+def fun_l5_n444(x)
+ if (x < 1)
+ fun_l6_n153(x)
+ else
+ fun_l6_n902(x)
+ end
+end
+
+def fun_l5_n445(x)
+ if (x < 1)
+ fun_l6_n289(x)
+ else
+ fun_l6_n240(x)
+ end
+end
+
+def fun_l5_n446(x)
+ if (x < 1)
+ fun_l6_n398(x)
+ else
+ fun_l6_n273(x)
+ end
+end
+
+def fun_l5_n447(x)
+ if (x < 1)
+ fun_l6_n537(x)
+ else
+ fun_l6_n8(x)
+ end
+end
+
+def fun_l5_n448(x)
+ if (x < 1)
+ fun_l6_n801(x)
+ else
+ fun_l6_n389(x)
+ end
+end
+
+def fun_l5_n449(x)
+ if (x < 1)
+ fun_l6_n363(x)
+ else
+ fun_l6_n17(x)
+ end
+end
+
+def fun_l5_n450(x)
+ if (x < 1)
+ fun_l6_n511(x)
+ else
+ fun_l6_n151(x)
+ end
+end
+
+def fun_l5_n451(x)
+ if (x < 1)
+ fun_l6_n640(x)
+ else
+ fun_l6_n785(x)
+ end
+end
+
+def fun_l5_n452(x)
+ if (x < 1)
+ fun_l6_n961(x)
+ else
+ fun_l6_n567(x)
+ end
+end
+
+def fun_l5_n453(x)
+ if (x < 1)
+ fun_l6_n608(x)
+ else
+ fun_l6_n910(x)
+ end
+end
+
+def fun_l5_n454(x)
+ if (x < 1)
+ fun_l6_n898(x)
+ else
+ fun_l6_n231(x)
+ end
+end
+
+def fun_l5_n455(x)
+ if (x < 1)
+ fun_l6_n205(x)
+ else
+ fun_l6_n162(x)
+ end
+end
+
+def fun_l5_n456(x)
+ if (x < 1)
+ fun_l6_n374(x)
+ else
+ fun_l6_n544(x)
+ end
+end
+
+def fun_l5_n457(x)
+ if (x < 1)
+ fun_l6_n514(x)
+ else
+ fun_l6_n522(x)
+ end
+end
+
+def fun_l5_n458(x)
+ if (x < 1)
+ fun_l6_n385(x)
+ else
+ fun_l6_n808(x)
+ end
+end
+
+def fun_l5_n459(x)
+ if (x < 1)
+ fun_l6_n525(x)
+ else
+ fun_l6_n144(x)
+ end
+end
+
+def fun_l5_n460(x)
+ if (x < 1)
+ fun_l6_n781(x)
+ else
+ fun_l6_n180(x)
+ end
+end
+
+def fun_l5_n461(x)
+ if (x < 1)
+ fun_l6_n681(x)
+ else
+ fun_l6_n273(x)
+ end
+end
+
+def fun_l5_n462(x)
+ if (x < 1)
+ fun_l6_n497(x)
+ else
+ fun_l6_n399(x)
+ end
+end
+
+def fun_l5_n463(x)
+ if (x < 1)
+ fun_l6_n261(x)
+ else
+ fun_l6_n311(x)
+ end
+end
+
+def fun_l5_n464(x)
+ if (x < 1)
+ fun_l6_n64(x)
+ else
+ fun_l6_n253(x)
+ end
+end
+
+def fun_l5_n465(x)
+ if (x < 1)
+ fun_l6_n388(x)
+ else
+ fun_l6_n300(x)
+ end
+end
+
+def fun_l5_n466(x)
+ if (x < 1)
+ fun_l6_n822(x)
+ else
+ fun_l6_n277(x)
+ end
+end
+
+def fun_l5_n467(x)
+ if (x < 1)
+ fun_l6_n909(x)
+ else
+ fun_l6_n605(x)
+ end
+end
+
+def fun_l5_n468(x)
+ if (x < 1)
+ fun_l6_n879(x)
+ else
+ fun_l6_n870(x)
+ end
+end
+
+def fun_l5_n469(x)
+ if (x < 1)
+ fun_l6_n351(x)
+ else
+ fun_l6_n917(x)
+ end
+end
+
+def fun_l5_n470(x)
+ if (x < 1)
+ fun_l6_n202(x)
+ else
+ fun_l6_n40(x)
+ end
+end
+
+def fun_l5_n471(x)
+ if (x < 1)
+ fun_l6_n782(x)
+ else
+ fun_l6_n839(x)
+ end
+end
+
+def fun_l5_n472(x)
+ if (x < 1)
+ fun_l6_n370(x)
+ else
+ fun_l6_n868(x)
+ end
+end
+
+def fun_l5_n473(x)
+ if (x < 1)
+ fun_l6_n556(x)
+ else
+ fun_l6_n722(x)
+ end
+end
+
+def fun_l5_n474(x)
+ if (x < 1)
+ fun_l6_n220(x)
+ else
+ fun_l6_n994(x)
+ end
+end
+
+def fun_l5_n475(x)
+ if (x < 1)
+ fun_l6_n266(x)
+ else
+ fun_l6_n872(x)
+ end
+end
+
+def fun_l5_n476(x)
+ if (x < 1)
+ fun_l6_n720(x)
+ else
+ fun_l6_n412(x)
+ end
+end
+
+def fun_l5_n477(x)
+ if (x < 1)
+ fun_l6_n284(x)
+ else
+ fun_l6_n90(x)
+ end
+end
+
+def fun_l5_n478(x)
+ if (x < 1)
+ fun_l6_n716(x)
+ else
+ fun_l6_n125(x)
+ end
+end
+
+def fun_l5_n479(x)
+ if (x < 1)
+ fun_l6_n136(x)
+ else
+ fun_l6_n232(x)
+ end
+end
+
+def fun_l5_n480(x)
+ if (x < 1)
+ fun_l6_n627(x)
+ else
+ fun_l6_n641(x)
+ end
+end
+
+def fun_l5_n481(x)
+ if (x < 1)
+ fun_l6_n411(x)
+ else
+ fun_l6_n943(x)
+ end
+end
+
+def fun_l5_n482(x)
+ if (x < 1)
+ fun_l6_n86(x)
+ else
+ fun_l6_n839(x)
+ end
+end
+
+def fun_l5_n483(x)
+ if (x < 1)
+ fun_l6_n988(x)
+ else
+ fun_l6_n329(x)
+ end
+end
+
+def fun_l5_n484(x)
+ if (x < 1)
+ fun_l6_n595(x)
+ else
+ fun_l6_n499(x)
+ end
+end
+
+def fun_l5_n485(x)
+ if (x < 1)
+ fun_l6_n912(x)
+ else
+ fun_l6_n267(x)
+ end
+end
+
+def fun_l5_n486(x)
+ if (x < 1)
+ fun_l6_n334(x)
+ else
+ fun_l6_n459(x)
+ end
+end
+
+def fun_l5_n487(x)
+ if (x < 1)
+ fun_l6_n771(x)
+ else
+ fun_l6_n458(x)
+ end
+end
+
+def fun_l5_n488(x)
+ if (x < 1)
+ fun_l6_n609(x)
+ else
+ fun_l6_n280(x)
+ end
+end
+
+def fun_l5_n489(x)
+ if (x < 1)
+ fun_l6_n564(x)
+ else
+ fun_l6_n564(x)
+ end
+end
+
+def fun_l5_n490(x)
+ if (x < 1)
+ fun_l6_n271(x)
+ else
+ fun_l6_n904(x)
+ end
+end
+
+def fun_l5_n491(x)
+ if (x < 1)
+ fun_l6_n338(x)
+ else
+ fun_l6_n694(x)
+ end
+end
+
+def fun_l5_n492(x)
+ if (x < 1)
+ fun_l6_n596(x)
+ else
+ fun_l6_n191(x)
+ end
+end
+
+def fun_l5_n493(x)
+ if (x < 1)
+ fun_l6_n369(x)
+ else
+ fun_l6_n513(x)
+ end
+end
+
+def fun_l5_n494(x)
+ if (x < 1)
+ fun_l6_n972(x)
+ else
+ fun_l6_n945(x)
+ end
+end
+
+def fun_l5_n495(x)
+ if (x < 1)
+ fun_l6_n747(x)
+ else
+ fun_l6_n96(x)
+ end
+end
+
+def fun_l5_n496(x)
+ if (x < 1)
+ fun_l6_n220(x)
+ else
+ fun_l6_n665(x)
+ end
+end
+
+def fun_l5_n497(x)
+ if (x < 1)
+ fun_l6_n793(x)
+ else
+ fun_l6_n468(x)
+ end
+end
+
+def fun_l5_n498(x)
+ if (x < 1)
+ fun_l6_n227(x)
+ else
+ fun_l6_n576(x)
+ end
+end
+
+def fun_l5_n499(x)
+ if (x < 1)
+ fun_l6_n356(x)
+ else
+ fun_l6_n614(x)
+ end
+end
+
+def fun_l5_n500(x)
+ if (x < 1)
+ fun_l6_n256(x)
+ else
+ fun_l6_n237(x)
+ end
+end
+
+def fun_l5_n501(x)
+ if (x < 1)
+ fun_l6_n503(x)
+ else
+ fun_l6_n159(x)
+ end
+end
+
+def fun_l5_n502(x)
+ if (x < 1)
+ fun_l6_n523(x)
+ else
+ fun_l6_n547(x)
+ end
+end
+
+def fun_l5_n503(x)
+ if (x < 1)
+ fun_l6_n778(x)
+ else
+ fun_l6_n600(x)
+ end
+end
+
+def fun_l5_n504(x)
+ if (x < 1)
+ fun_l6_n654(x)
+ else
+ fun_l6_n950(x)
+ end
+end
+
+def fun_l5_n505(x)
+ if (x < 1)
+ fun_l6_n105(x)
+ else
+ fun_l6_n881(x)
+ end
+end
+
+def fun_l5_n506(x)
+ if (x < 1)
+ fun_l6_n956(x)
+ else
+ fun_l6_n71(x)
+ end
+end
+
+def fun_l5_n507(x)
+ if (x < 1)
+ fun_l6_n33(x)
+ else
+ fun_l6_n431(x)
+ end
+end
+
+def fun_l5_n508(x)
+ if (x < 1)
+ fun_l6_n587(x)
+ else
+ fun_l6_n1(x)
+ end
+end
+
+def fun_l5_n509(x)
+ if (x < 1)
+ fun_l6_n258(x)
+ else
+ fun_l6_n547(x)
+ end
+end
+
+def fun_l5_n510(x)
+ if (x < 1)
+ fun_l6_n816(x)
+ else
+ fun_l6_n558(x)
+ end
+end
+
+def fun_l5_n511(x)
+ if (x < 1)
+ fun_l6_n61(x)
+ else
+ fun_l6_n10(x)
+ end
+end
+
+def fun_l5_n512(x)
+ if (x < 1)
+ fun_l6_n674(x)
+ else
+ fun_l6_n466(x)
+ end
+end
+
+def fun_l5_n513(x)
+ if (x < 1)
+ fun_l6_n580(x)
+ else
+ fun_l6_n398(x)
+ end
+end
+
+def fun_l5_n514(x)
+ if (x < 1)
+ fun_l6_n495(x)
+ else
+ fun_l6_n407(x)
+ end
+end
+
+def fun_l5_n515(x)
+ if (x < 1)
+ fun_l6_n639(x)
+ else
+ fun_l6_n214(x)
+ end
+end
+
+def fun_l5_n516(x)
+ if (x < 1)
+ fun_l6_n15(x)
+ else
+ fun_l6_n716(x)
+ end
+end
+
+def fun_l5_n517(x)
+ if (x < 1)
+ fun_l6_n645(x)
+ else
+ fun_l6_n645(x)
+ end
+end
+
+def fun_l5_n518(x)
+ if (x < 1)
+ fun_l6_n615(x)
+ else
+ fun_l6_n44(x)
+ end
+end
+
+def fun_l5_n519(x)
+ if (x < 1)
+ fun_l6_n399(x)
+ else
+ fun_l6_n663(x)
+ end
+end
+
+def fun_l5_n520(x)
+ if (x < 1)
+ fun_l6_n85(x)
+ else
+ fun_l6_n870(x)
+ end
+end
+
+def fun_l5_n521(x)
+ if (x < 1)
+ fun_l6_n14(x)
+ else
+ fun_l6_n965(x)
+ end
+end
+
+def fun_l5_n522(x)
+ if (x < 1)
+ fun_l6_n24(x)
+ else
+ fun_l6_n581(x)
+ end
+end
+
+def fun_l5_n523(x)
+ if (x < 1)
+ fun_l6_n501(x)
+ else
+ fun_l6_n913(x)
+ end
+end
+
+def fun_l5_n524(x)
+ if (x < 1)
+ fun_l6_n772(x)
+ else
+ fun_l6_n999(x)
+ end
+end
+
+def fun_l5_n525(x)
+ if (x < 1)
+ fun_l6_n847(x)
+ else
+ fun_l6_n422(x)
+ end
+end
+
+def fun_l5_n526(x)
+ if (x < 1)
+ fun_l6_n377(x)
+ else
+ fun_l6_n986(x)
+ end
+end
+
+def fun_l5_n527(x)
+ if (x < 1)
+ fun_l6_n548(x)
+ else
+ fun_l6_n40(x)
+ end
+end
+
+def fun_l5_n528(x)
+ if (x < 1)
+ fun_l6_n392(x)
+ else
+ fun_l6_n577(x)
+ end
+end
+
+def fun_l5_n529(x)
+ if (x < 1)
+ fun_l6_n101(x)
+ else
+ fun_l6_n26(x)
+ end
+end
+
+def fun_l5_n530(x)
+ if (x < 1)
+ fun_l6_n800(x)
+ else
+ fun_l6_n958(x)
+ end
+end
+
+def fun_l5_n531(x)
+ if (x < 1)
+ fun_l6_n72(x)
+ else
+ fun_l6_n887(x)
+ end
+end
+
+def fun_l5_n532(x)
+ if (x < 1)
+ fun_l6_n467(x)
+ else
+ fun_l6_n740(x)
+ end
+end
+
+def fun_l5_n533(x)
+ if (x < 1)
+ fun_l6_n264(x)
+ else
+ fun_l6_n683(x)
+ end
+end
+
+def fun_l5_n534(x)
+ if (x < 1)
+ fun_l6_n953(x)
+ else
+ fun_l6_n560(x)
+ end
+end
+
+def fun_l5_n535(x)
+ if (x < 1)
+ fun_l6_n854(x)
+ else
+ fun_l6_n59(x)
+ end
+end
+
+def fun_l5_n536(x)
+ if (x < 1)
+ fun_l6_n940(x)
+ else
+ fun_l6_n832(x)
+ end
+end
+
+def fun_l5_n537(x)
+ if (x < 1)
+ fun_l6_n297(x)
+ else
+ fun_l6_n18(x)
+ end
+end
+
+def fun_l5_n538(x)
+ if (x < 1)
+ fun_l6_n188(x)
+ else
+ fun_l6_n537(x)
+ end
+end
+
+def fun_l5_n539(x)
+ if (x < 1)
+ fun_l6_n992(x)
+ else
+ fun_l6_n825(x)
+ end
+end
+
+def fun_l5_n540(x)
+ if (x < 1)
+ fun_l6_n691(x)
+ else
+ fun_l6_n850(x)
+ end
+end
+
+def fun_l5_n541(x)
+ if (x < 1)
+ fun_l6_n858(x)
+ else
+ fun_l6_n948(x)
+ end
+end
+
+def fun_l5_n542(x)
+ if (x < 1)
+ fun_l6_n923(x)
+ else
+ fun_l6_n112(x)
+ end
+end
+
+def fun_l5_n543(x)
+ if (x < 1)
+ fun_l6_n419(x)
+ else
+ fun_l6_n4(x)
+ end
+end
+
+def fun_l5_n544(x)
+ if (x < 1)
+ fun_l6_n766(x)
+ else
+ fun_l6_n147(x)
+ end
+end
+
+def fun_l5_n545(x)
+ if (x < 1)
+ fun_l6_n327(x)
+ else
+ fun_l6_n327(x)
+ end
+end
+
+def fun_l5_n546(x)
+ if (x < 1)
+ fun_l6_n94(x)
+ else
+ fun_l6_n662(x)
+ end
+end
+
+def fun_l5_n547(x)
+ if (x < 1)
+ fun_l6_n764(x)
+ else
+ fun_l6_n521(x)
+ end
+end
+
+def fun_l5_n548(x)
+ if (x < 1)
+ fun_l6_n509(x)
+ else
+ fun_l6_n231(x)
+ end
+end
+
+def fun_l5_n549(x)
+ if (x < 1)
+ fun_l6_n370(x)
+ else
+ fun_l6_n599(x)
+ end
+end
+
+def fun_l5_n550(x)
+ if (x < 1)
+ fun_l6_n710(x)
+ else
+ fun_l6_n104(x)
+ end
+end
+
+def fun_l5_n551(x)
+ if (x < 1)
+ fun_l6_n677(x)
+ else
+ fun_l6_n184(x)
+ end
+end
+
+def fun_l5_n552(x)
+ if (x < 1)
+ fun_l6_n700(x)
+ else
+ fun_l6_n299(x)
+ end
+end
+
+def fun_l5_n553(x)
+ if (x < 1)
+ fun_l6_n688(x)
+ else
+ fun_l6_n934(x)
+ end
+end
+
+def fun_l5_n554(x)
+ if (x < 1)
+ fun_l6_n771(x)
+ else
+ fun_l6_n989(x)
+ end
+end
+
+def fun_l5_n555(x)
+ if (x < 1)
+ fun_l6_n164(x)
+ else
+ fun_l6_n197(x)
+ end
+end
+
+def fun_l5_n556(x)
+ if (x < 1)
+ fun_l6_n901(x)
+ else
+ fun_l6_n235(x)
+ end
+end
+
+def fun_l5_n557(x)
+ if (x < 1)
+ fun_l6_n990(x)
+ else
+ fun_l6_n490(x)
+ end
+end
+
+def fun_l5_n558(x)
+ if (x < 1)
+ fun_l6_n476(x)
+ else
+ fun_l6_n612(x)
+ end
+end
+
+def fun_l5_n559(x)
+ if (x < 1)
+ fun_l6_n761(x)
+ else
+ fun_l6_n3(x)
+ end
+end
+
+def fun_l5_n560(x)
+ if (x < 1)
+ fun_l6_n461(x)
+ else
+ fun_l6_n832(x)
+ end
+end
+
+def fun_l5_n561(x)
+ if (x < 1)
+ fun_l6_n206(x)
+ else
+ fun_l6_n815(x)
+ end
+end
+
+def fun_l5_n562(x)
+ if (x < 1)
+ fun_l6_n180(x)
+ else
+ fun_l6_n640(x)
+ end
+end
+
+def fun_l5_n563(x)
+ if (x < 1)
+ fun_l6_n612(x)
+ else
+ fun_l6_n900(x)
+ end
+end
+
+def fun_l5_n564(x)
+ if (x < 1)
+ fun_l6_n31(x)
+ else
+ fun_l6_n290(x)
+ end
+end
+
+def fun_l5_n565(x)
+ if (x < 1)
+ fun_l6_n322(x)
+ else
+ fun_l6_n136(x)
+ end
+end
+
+def fun_l5_n566(x)
+ if (x < 1)
+ fun_l6_n657(x)
+ else
+ fun_l6_n649(x)
+ end
+end
+
+def fun_l5_n567(x)
+ if (x < 1)
+ fun_l6_n683(x)
+ else
+ fun_l6_n903(x)
+ end
+end
+
+def fun_l5_n568(x)
+ if (x < 1)
+ fun_l6_n391(x)
+ else
+ fun_l6_n117(x)
+ end
+end
+
+def fun_l5_n569(x)
+ if (x < 1)
+ fun_l6_n868(x)
+ else
+ fun_l6_n644(x)
+ end
+end
+
+def fun_l5_n570(x)
+ if (x < 1)
+ fun_l6_n493(x)
+ else
+ fun_l6_n638(x)
+ end
+end
+
+def fun_l5_n571(x)
+ if (x < 1)
+ fun_l6_n207(x)
+ else
+ fun_l6_n650(x)
+ end
+end
+
+def fun_l5_n572(x)
+ if (x < 1)
+ fun_l6_n575(x)
+ else
+ fun_l6_n421(x)
+ end
+end
+
+def fun_l5_n573(x)
+ if (x < 1)
+ fun_l6_n757(x)
+ else
+ fun_l6_n515(x)
+ end
+end
+
+def fun_l5_n574(x)
+ if (x < 1)
+ fun_l6_n619(x)
+ else
+ fun_l6_n39(x)
+ end
+end
+
+def fun_l5_n575(x)
+ if (x < 1)
+ fun_l6_n91(x)
+ else
+ fun_l6_n79(x)
+ end
+end
+
+def fun_l5_n576(x)
+ if (x < 1)
+ fun_l6_n492(x)
+ else
+ fun_l6_n938(x)
+ end
+end
+
+def fun_l5_n577(x)
+ if (x < 1)
+ fun_l6_n181(x)
+ else
+ fun_l6_n98(x)
+ end
+end
+
+def fun_l5_n578(x)
+ if (x < 1)
+ fun_l6_n823(x)
+ else
+ fun_l6_n528(x)
+ end
+end
+
+def fun_l5_n579(x)
+ if (x < 1)
+ fun_l6_n218(x)
+ else
+ fun_l6_n613(x)
+ end
+end
+
+def fun_l5_n580(x)
+ if (x < 1)
+ fun_l6_n81(x)
+ else
+ fun_l6_n318(x)
+ end
+end
+
+def fun_l5_n581(x)
+ if (x < 1)
+ fun_l6_n953(x)
+ else
+ fun_l6_n162(x)
+ end
+end
+
+def fun_l5_n582(x)
+ if (x < 1)
+ fun_l6_n225(x)
+ else
+ fun_l6_n441(x)
+ end
+end
+
+def fun_l5_n583(x)
+ if (x < 1)
+ fun_l6_n862(x)
+ else
+ fun_l6_n939(x)
+ end
+end
+
+def fun_l5_n584(x)
+ if (x < 1)
+ fun_l6_n401(x)
+ else
+ fun_l6_n149(x)
+ end
+end
+
+def fun_l5_n585(x)
+ if (x < 1)
+ fun_l6_n94(x)
+ else
+ fun_l6_n320(x)
+ end
+end
+
+def fun_l5_n586(x)
+ if (x < 1)
+ fun_l6_n609(x)
+ else
+ fun_l6_n524(x)
+ end
+end
+
+def fun_l5_n587(x)
+ if (x < 1)
+ fun_l6_n412(x)
+ else
+ fun_l6_n350(x)
+ end
+end
+
+def fun_l5_n588(x)
+ if (x < 1)
+ fun_l6_n874(x)
+ else
+ fun_l6_n217(x)
+ end
+end
+
+def fun_l5_n589(x)
+ if (x < 1)
+ fun_l6_n989(x)
+ else
+ fun_l6_n62(x)
+ end
+end
+
+def fun_l5_n590(x)
+ if (x < 1)
+ fun_l6_n91(x)
+ else
+ fun_l6_n360(x)
+ end
+end
+
+def fun_l5_n591(x)
+ if (x < 1)
+ fun_l6_n392(x)
+ else
+ fun_l6_n530(x)
+ end
+end
+
+def fun_l5_n592(x)
+ if (x < 1)
+ fun_l6_n635(x)
+ else
+ fun_l6_n268(x)
+ end
+end
+
+def fun_l5_n593(x)
+ if (x < 1)
+ fun_l6_n747(x)
+ else
+ fun_l6_n345(x)
+ end
+end
+
+def fun_l5_n594(x)
+ if (x < 1)
+ fun_l6_n450(x)
+ else
+ fun_l6_n165(x)
+ end
+end
+
+def fun_l5_n595(x)
+ if (x < 1)
+ fun_l6_n960(x)
+ else
+ fun_l6_n256(x)
+ end
+end
+
+def fun_l5_n596(x)
+ if (x < 1)
+ fun_l6_n724(x)
+ else
+ fun_l6_n506(x)
+ end
+end
+
+def fun_l5_n597(x)
+ if (x < 1)
+ fun_l6_n809(x)
+ else
+ fun_l6_n180(x)
+ end
+end
+
+def fun_l5_n598(x)
+ if (x < 1)
+ fun_l6_n799(x)
+ else
+ fun_l6_n59(x)
+ end
+end
+
+def fun_l5_n599(x)
+ if (x < 1)
+ fun_l6_n412(x)
+ else
+ fun_l6_n868(x)
+ end
+end
+
+def fun_l5_n600(x)
+ if (x < 1)
+ fun_l6_n459(x)
+ else
+ fun_l6_n537(x)
+ end
+end
+
+def fun_l5_n601(x)
+ if (x < 1)
+ fun_l6_n321(x)
+ else
+ fun_l6_n39(x)
+ end
+end
+
+def fun_l5_n602(x)
+ if (x < 1)
+ fun_l6_n561(x)
+ else
+ fun_l6_n413(x)
+ end
+end
+
+def fun_l5_n603(x)
+ if (x < 1)
+ fun_l6_n173(x)
+ else
+ fun_l6_n324(x)
+ end
+end
+
+def fun_l5_n604(x)
+ if (x < 1)
+ fun_l6_n583(x)
+ else
+ fun_l6_n763(x)
+ end
+end
+
+def fun_l5_n605(x)
+ if (x < 1)
+ fun_l6_n200(x)
+ else
+ fun_l6_n996(x)
+ end
+end
+
+def fun_l5_n606(x)
+ if (x < 1)
+ fun_l6_n465(x)
+ else
+ fun_l6_n777(x)
+ end
+end
+
+def fun_l5_n607(x)
+ if (x < 1)
+ fun_l6_n744(x)
+ else
+ fun_l6_n917(x)
+ end
+end
+
+def fun_l5_n608(x)
+ if (x < 1)
+ fun_l6_n825(x)
+ else
+ fun_l6_n751(x)
+ end
+end
+
+def fun_l5_n609(x)
+ if (x < 1)
+ fun_l6_n357(x)
+ else
+ fun_l6_n889(x)
+ end
+end
+
+def fun_l5_n610(x)
+ if (x < 1)
+ fun_l6_n534(x)
+ else
+ fun_l6_n628(x)
+ end
+end
+
+def fun_l5_n611(x)
+ if (x < 1)
+ fun_l6_n242(x)
+ else
+ fun_l6_n57(x)
+ end
+end
+
+def fun_l5_n612(x)
+ if (x < 1)
+ fun_l6_n426(x)
+ else
+ fun_l6_n295(x)
+ end
+end
+
+def fun_l5_n613(x)
+ if (x < 1)
+ fun_l6_n218(x)
+ else
+ fun_l6_n194(x)
+ end
+end
+
+def fun_l5_n614(x)
+ if (x < 1)
+ fun_l6_n618(x)
+ else
+ fun_l6_n757(x)
+ end
+end
+
+def fun_l5_n615(x)
+ if (x < 1)
+ fun_l6_n708(x)
+ else
+ fun_l6_n187(x)
+ end
+end
+
+def fun_l5_n616(x)
+ if (x < 1)
+ fun_l6_n824(x)
+ else
+ fun_l6_n384(x)
+ end
+end
+
+def fun_l5_n617(x)
+ if (x < 1)
+ fun_l6_n149(x)
+ else
+ fun_l6_n741(x)
+ end
+end
+
+def fun_l5_n618(x)
+ if (x < 1)
+ fun_l6_n889(x)
+ else
+ fun_l6_n768(x)
+ end
+end
+
+def fun_l5_n619(x)
+ if (x < 1)
+ fun_l6_n144(x)
+ else
+ fun_l6_n746(x)
+ end
+end
+
+def fun_l5_n620(x)
+ if (x < 1)
+ fun_l6_n338(x)
+ else
+ fun_l6_n757(x)
+ end
+end
+
+def fun_l5_n621(x)
+ if (x < 1)
+ fun_l6_n623(x)
+ else
+ fun_l6_n432(x)
+ end
+end
+
+def fun_l5_n622(x)
+ if (x < 1)
+ fun_l6_n139(x)
+ else
+ fun_l6_n162(x)
+ end
+end
+
+def fun_l5_n623(x)
+ if (x < 1)
+ fun_l6_n371(x)
+ else
+ fun_l6_n297(x)
+ end
+end
+
+def fun_l5_n624(x)
+ if (x < 1)
+ fun_l6_n166(x)
+ else
+ fun_l6_n172(x)
+ end
+end
+
+def fun_l5_n625(x)
+ if (x < 1)
+ fun_l6_n506(x)
+ else
+ fun_l6_n7(x)
+ end
+end
+
+def fun_l5_n626(x)
+ if (x < 1)
+ fun_l6_n334(x)
+ else
+ fun_l6_n631(x)
+ end
+end
+
+def fun_l5_n627(x)
+ if (x < 1)
+ fun_l6_n209(x)
+ else
+ fun_l6_n316(x)
+ end
+end
+
+def fun_l5_n628(x)
+ if (x < 1)
+ fun_l6_n461(x)
+ else
+ fun_l6_n34(x)
+ end
+end
+
+def fun_l5_n629(x)
+ if (x < 1)
+ fun_l6_n6(x)
+ else
+ fun_l6_n818(x)
+ end
+end
+
+def fun_l5_n630(x)
+ if (x < 1)
+ fun_l6_n265(x)
+ else
+ fun_l6_n111(x)
+ end
+end
+
+def fun_l5_n631(x)
+ if (x < 1)
+ fun_l6_n493(x)
+ else
+ fun_l6_n64(x)
+ end
+end
+
+def fun_l5_n632(x)
+ if (x < 1)
+ fun_l6_n513(x)
+ else
+ fun_l6_n890(x)
+ end
+end
+
+def fun_l5_n633(x)
+ if (x < 1)
+ fun_l6_n295(x)
+ else
+ fun_l6_n295(x)
+ end
+end
+
+def fun_l5_n634(x)
+ if (x < 1)
+ fun_l6_n158(x)
+ else
+ fun_l6_n971(x)
+ end
+end
+
+def fun_l5_n635(x)
+ if (x < 1)
+ fun_l6_n171(x)
+ else
+ fun_l6_n337(x)
+ end
+end
+
+def fun_l5_n636(x)
+ if (x < 1)
+ fun_l6_n943(x)
+ else
+ fun_l6_n502(x)
+ end
+end
+
+def fun_l5_n637(x)
+ if (x < 1)
+ fun_l6_n339(x)
+ else
+ fun_l6_n638(x)
+ end
+end
+
+def fun_l5_n638(x)
+ if (x < 1)
+ fun_l6_n155(x)
+ else
+ fun_l6_n393(x)
+ end
+end
+
+def fun_l5_n639(x)
+ if (x < 1)
+ fun_l6_n29(x)
+ else
+ fun_l6_n506(x)
+ end
+end
+
+def fun_l5_n640(x)
+ if (x < 1)
+ fun_l6_n677(x)
+ else
+ fun_l6_n606(x)
+ end
+end
+
+def fun_l5_n641(x)
+ if (x < 1)
+ fun_l6_n306(x)
+ else
+ fun_l6_n571(x)
+ end
+end
+
+def fun_l5_n642(x)
+ if (x < 1)
+ fun_l6_n490(x)
+ else
+ fun_l6_n281(x)
+ end
+end
+
+def fun_l5_n643(x)
+ if (x < 1)
+ fun_l6_n392(x)
+ else
+ fun_l6_n686(x)
+ end
+end
+
+def fun_l5_n644(x)
+ if (x < 1)
+ fun_l6_n257(x)
+ else
+ fun_l6_n358(x)
+ end
+end
+
+def fun_l5_n645(x)
+ if (x < 1)
+ fun_l6_n949(x)
+ else
+ fun_l6_n15(x)
+ end
+end
+
+def fun_l5_n646(x)
+ if (x < 1)
+ fun_l6_n866(x)
+ else
+ fun_l6_n657(x)
+ end
+end
+
+def fun_l5_n647(x)
+ if (x < 1)
+ fun_l6_n624(x)
+ else
+ fun_l6_n390(x)
+ end
+end
+
+def fun_l5_n648(x)
+ if (x < 1)
+ fun_l6_n394(x)
+ else
+ fun_l6_n951(x)
+ end
+end
+
+def fun_l5_n649(x)
+ if (x < 1)
+ fun_l6_n99(x)
+ else
+ fun_l6_n160(x)
+ end
+end
+
+def fun_l5_n650(x)
+ if (x < 1)
+ fun_l6_n84(x)
+ else
+ fun_l6_n111(x)
+ end
+end
+
+def fun_l5_n651(x)
+ if (x < 1)
+ fun_l6_n635(x)
+ else
+ fun_l6_n507(x)
+ end
+end
+
+def fun_l5_n652(x)
+ if (x < 1)
+ fun_l6_n674(x)
+ else
+ fun_l6_n501(x)
+ end
+end
+
+def fun_l5_n653(x)
+ if (x < 1)
+ fun_l6_n171(x)
+ else
+ fun_l6_n645(x)
+ end
+end
+
+def fun_l5_n654(x)
+ if (x < 1)
+ fun_l6_n327(x)
+ else
+ fun_l6_n152(x)
+ end
+end
+
+def fun_l5_n655(x)
+ if (x < 1)
+ fun_l6_n210(x)
+ else
+ fun_l6_n441(x)
+ end
+end
+
+def fun_l5_n656(x)
+ if (x < 1)
+ fun_l6_n691(x)
+ else
+ fun_l6_n813(x)
+ end
+end
+
+def fun_l5_n657(x)
+ if (x < 1)
+ fun_l6_n671(x)
+ else
+ fun_l6_n683(x)
+ end
+end
+
+def fun_l5_n658(x)
+ if (x < 1)
+ fun_l6_n939(x)
+ else
+ fun_l6_n580(x)
+ end
+end
+
+def fun_l5_n659(x)
+ if (x < 1)
+ fun_l6_n152(x)
+ else
+ fun_l6_n159(x)
+ end
+end
+
+def fun_l5_n660(x)
+ if (x < 1)
+ fun_l6_n50(x)
+ else
+ fun_l6_n911(x)
+ end
+end
+
+def fun_l5_n661(x)
+ if (x < 1)
+ fun_l6_n700(x)
+ else
+ fun_l6_n972(x)
+ end
+end
+
+def fun_l5_n662(x)
+ if (x < 1)
+ fun_l6_n22(x)
+ else
+ fun_l6_n82(x)
+ end
+end
+
+def fun_l5_n663(x)
+ if (x < 1)
+ fun_l6_n51(x)
+ else
+ fun_l6_n827(x)
+ end
+end
+
+def fun_l5_n664(x)
+ if (x < 1)
+ fun_l6_n997(x)
+ else
+ fun_l6_n289(x)
+ end
+end
+
+def fun_l5_n665(x)
+ if (x < 1)
+ fun_l6_n666(x)
+ else
+ fun_l6_n775(x)
+ end
+end
+
+def fun_l5_n666(x)
+ if (x < 1)
+ fun_l6_n373(x)
+ else
+ fun_l6_n175(x)
+ end
+end
+
+def fun_l5_n667(x)
+ if (x < 1)
+ fun_l6_n551(x)
+ else
+ fun_l6_n772(x)
+ end
+end
+
+def fun_l5_n668(x)
+ if (x < 1)
+ fun_l6_n107(x)
+ else
+ fun_l6_n197(x)
+ end
+end
+
+def fun_l5_n669(x)
+ if (x < 1)
+ fun_l6_n244(x)
+ else
+ fun_l6_n968(x)
+ end
+end
+
+def fun_l5_n670(x)
+ if (x < 1)
+ fun_l6_n798(x)
+ else
+ fun_l6_n30(x)
+ end
+end
+
+def fun_l5_n671(x)
+ if (x < 1)
+ fun_l6_n48(x)
+ else
+ fun_l6_n186(x)
+ end
+end
+
+def fun_l5_n672(x)
+ if (x < 1)
+ fun_l6_n373(x)
+ else
+ fun_l6_n937(x)
+ end
+end
+
+def fun_l5_n673(x)
+ if (x < 1)
+ fun_l6_n759(x)
+ else
+ fun_l6_n92(x)
+ end
+end
+
+def fun_l5_n674(x)
+ if (x < 1)
+ fun_l6_n380(x)
+ else
+ fun_l6_n772(x)
+ end
+end
+
+def fun_l5_n675(x)
+ if (x < 1)
+ fun_l6_n916(x)
+ else
+ fun_l6_n329(x)
+ end
+end
+
+def fun_l5_n676(x)
+ if (x < 1)
+ fun_l6_n110(x)
+ else
+ fun_l6_n951(x)
+ end
+end
+
+def fun_l5_n677(x)
+ if (x < 1)
+ fun_l6_n363(x)
+ else
+ fun_l6_n35(x)
+ end
+end
+
+def fun_l5_n678(x)
+ if (x < 1)
+ fun_l6_n484(x)
+ else
+ fun_l6_n492(x)
+ end
+end
+
+def fun_l5_n679(x)
+ if (x < 1)
+ fun_l6_n801(x)
+ else
+ fun_l6_n279(x)
+ end
+end
+
+def fun_l5_n680(x)
+ if (x < 1)
+ fun_l6_n499(x)
+ else
+ fun_l6_n561(x)
+ end
+end
+
+def fun_l5_n681(x)
+ if (x < 1)
+ fun_l6_n800(x)
+ else
+ fun_l6_n890(x)
+ end
+end
+
+def fun_l5_n682(x)
+ if (x < 1)
+ fun_l6_n191(x)
+ else
+ fun_l6_n686(x)
+ end
+end
+
+def fun_l5_n683(x)
+ if (x < 1)
+ fun_l6_n318(x)
+ else
+ fun_l6_n683(x)
+ end
+end
+
+def fun_l5_n684(x)
+ if (x < 1)
+ fun_l6_n500(x)
+ else
+ fun_l6_n914(x)
+ end
+end
+
+def fun_l5_n685(x)
+ if (x < 1)
+ fun_l6_n24(x)
+ else
+ fun_l6_n945(x)
+ end
+end
+
+def fun_l5_n686(x)
+ if (x < 1)
+ fun_l6_n602(x)
+ else
+ fun_l6_n291(x)
+ end
+end
+
+def fun_l5_n687(x)
+ if (x < 1)
+ fun_l6_n999(x)
+ else
+ fun_l6_n86(x)
+ end
+end
+
+def fun_l5_n688(x)
+ if (x < 1)
+ fun_l6_n106(x)
+ else
+ fun_l6_n754(x)
+ end
+end
+
+def fun_l5_n689(x)
+ if (x < 1)
+ fun_l6_n452(x)
+ else
+ fun_l6_n229(x)
+ end
+end
+
+def fun_l5_n690(x)
+ if (x < 1)
+ fun_l6_n672(x)
+ else
+ fun_l6_n587(x)
+ end
+end
+
+def fun_l5_n691(x)
+ if (x < 1)
+ fun_l6_n532(x)
+ else
+ fun_l6_n56(x)
+ end
+end
+
+def fun_l5_n692(x)
+ if (x < 1)
+ fun_l6_n606(x)
+ else
+ fun_l6_n552(x)
+ end
+end
+
+def fun_l5_n693(x)
+ if (x < 1)
+ fun_l6_n742(x)
+ else
+ fun_l6_n862(x)
+ end
+end
+
+def fun_l5_n694(x)
+ if (x < 1)
+ fun_l6_n964(x)
+ else
+ fun_l6_n475(x)
+ end
+end
+
+def fun_l5_n695(x)
+ if (x < 1)
+ fun_l6_n179(x)
+ else
+ fun_l6_n947(x)
+ end
+end
+
+def fun_l5_n696(x)
+ if (x < 1)
+ fun_l6_n563(x)
+ else
+ fun_l6_n329(x)
+ end
+end
+
+def fun_l5_n697(x)
+ if (x < 1)
+ fun_l6_n786(x)
+ else
+ fun_l6_n502(x)
+ end
+end
+
+def fun_l5_n698(x)
+ if (x < 1)
+ fun_l6_n178(x)
+ else
+ fun_l6_n757(x)
+ end
+end
+
+def fun_l5_n699(x)
+ if (x < 1)
+ fun_l6_n248(x)
+ else
+ fun_l6_n288(x)
+ end
+end
+
+def fun_l5_n700(x)
+ if (x < 1)
+ fun_l6_n262(x)
+ else
+ fun_l6_n650(x)
+ end
+end
+
+def fun_l5_n701(x)
+ if (x < 1)
+ fun_l6_n90(x)
+ else
+ fun_l6_n964(x)
+ end
+end
+
+def fun_l5_n702(x)
+ if (x < 1)
+ fun_l6_n864(x)
+ else
+ fun_l6_n743(x)
+ end
+end
+
+def fun_l5_n703(x)
+ if (x < 1)
+ fun_l6_n816(x)
+ else
+ fun_l6_n364(x)
+ end
+end
+
+def fun_l5_n704(x)
+ if (x < 1)
+ fun_l6_n590(x)
+ else
+ fun_l6_n434(x)
+ end
+end
+
+def fun_l5_n705(x)
+ if (x < 1)
+ fun_l6_n875(x)
+ else
+ fun_l6_n539(x)
+ end
+end
+
+def fun_l5_n706(x)
+ if (x < 1)
+ fun_l6_n964(x)
+ else
+ fun_l6_n564(x)
+ end
+end
+
+def fun_l5_n707(x)
+ if (x < 1)
+ fun_l6_n315(x)
+ else
+ fun_l6_n460(x)
+ end
+end
+
+def fun_l5_n708(x)
+ if (x < 1)
+ fun_l6_n139(x)
+ else
+ fun_l6_n210(x)
+ end
+end
+
+def fun_l5_n709(x)
+ if (x < 1)
+ fun_l6_n593(x)
+ else
+ fun_l6_n933(x)
+ end
+end
+
+def fun_l5_n710(x)
+ if (x < 1)
+ fun_l6_n37(x)
+ else
+ fun_l6_n459(x)
+ end
+end
+
+def fun_l5_n711(x)
+ if (x < 1)
+ fun_l6_n768(x)
+ else
+ fun_l6_n256(x)
+ end
+end
+
+def fun_l5_n712(x)
+ if (x < 1)
+ fun_l6_n506(x)
+ else
+ fun_l6_n936(x)
+ end
+end
+
+def fun_l5_n713(x)
+ if (x < 1)
+ fun_l6_n971(x)
+ else
+ fun_l6_n575(x)
+ end
+end
+
+def fun_l5_n714(x)
+ if (x < 1)
+ fun_l6_n498(x)
+ else
+ fun_l6_n932(x)
+ end
+end
+
+def fun_l5_n715(x)
+ if (x < 1)
+ fun_l6_n362(x)
+ else
+ fun_l6_n949(x)
+ end
+end
+
+def fun_l5_n716(x)
+ if (x < 1)
+ fun_l6_n706(x)
+ else
+ fun_l6_n788(x)
+ end
+end
+
+def fun_l5_n717(x)
+ if (x < 1)
+ fun_l6_n405(x)
+ else
+ fun_l6_n803(x)
+ end
+end
+
+def fun_l5_n718(x)
+ if (x < 1)
+ fun_l6_n166(x)
+ else
+ fun_l6_n917(x)
+ end
+end
+
+def fun_l5_n719(x)
+ if (x < 1)
+ fun_l6_n899(x)
+ else
+ fun_l6_n836(x)
+ end
+end
+
+def fun_l5_n720(x)
+ if (x < 1)
+ fun_l6_n805(x)
+ else
+ fun_l6_n159(x)
+ end
+end
+
+def fun_l5_n721(x)
+ if (x < 1)
+ fun_l6_n960(x)
+ else
+ fun_l6_n113(x)
+ end
+end
+
+def fun_l5_n722(x)
+ if (x < 1)
+ fun_l6_n807(x)
+ else
+ fun_l6_n533(x)
+ end
+end
+
+def fun_l5_n723(x)
+ if (x < 1)
+ fun_l6_n480(x)
+ else
+ fun_l6_n907(x)
+ end
+end
+
+def fun_l5_n724(x)
+ if (x < 1)
+ fun_l6_n951(x)
+ else
+ fun_l6_n793(x)
+ end
+end
+
+def fun_l5_n725(x)
+ if (x < 1)
+ fun_l6_n951(x)
+ else
+ fun_l6_n771(x)
+ end
+end
+
+def fun_l5_n726(x)
+ if (x < 1)
+ fun_l6_n435(x)
+ else
+ fun_l6_n336(x)
+ end
+end
+
+def fun_l5_n727(x)
+ if (x < 1)
+ fun_l6_n222(x)
+ else
+ fun_l6_n393(x)
+ end
+end
+
+def fun_l5_n728(x)
+ if (x < 1)
+ fun_l6_n988(x)
+ else
+ fun_l6_n402(x)
+ end
+end
+
+def fun_l5_n729(x)
+ if (x < 1)
+ fun_l6_n9(x)
+ else
+ fun_l6_n46(x)
+ end
+end
+
+def fun_l5_n730(x)
+ if (x < 1)
+ fun_l6_n722(x)
+ else
+ fun_l6_n112(x)
+ end
+end
+
+def fun_l5_n731(x)
+ if (x < 1)
+ fun_l6_n543(x)
+ else
+ fun_l6_n813(x)
+ end
+end
+
+def fun_l5_n732(x)
+ if (x < 1)
+ fun_l6_n337(x)
+ else
+ fun_l6_n245(x)
+ end
+end
+
+def fun_l5_n733(x)
+ if (x < 1)
+ fun_l6_n84(x)
+ else
+ fun_l6_n335(x)
+ end
+end
+
+def fun_l5_n734(x)
+ if (x < 1)
+ fun_l6_n697(x)
+ else
+ fun_l6_n498(x)
+ end
+end
+
+def fun_l5_n735(x)
+ if (x < 1)
+ fun_l6_n2(x)
+ else
+ fun_l6_n895(x)
+ end
+end
+
+def fun_l5_n736(x)
+ if (x < 1)
+ fun_l6_n619(x)
+ else
+ fun_l6_n163(x)
+ end
+end
+
+def fun_l5_n737(x)
+ if (x < 1)
+ fun_l6_n237(x)
+ else
+ fun_l6_n6(x)
+ end
+end
+
+def fun_l5_n738(x)
+ if (x < 1)
+ fun_l6_n259(x)
+ else
+ fun_l6_n95(x)
+ end
+end
+
+def fun_l5_n739(x)
+ if (x < 1)
+ fun_l6_n98(x)
+ else
+ fun_l6_n331(x)
+ end
+end
+
+def fun_l5_n740(x)
+ if (x < 1)
+ fun_l6_n489(x)
+ else
+ fun_l6_n305(x)
+ end
+end
+
+def fun_l5_n741(x)
+ if (x < 1)
+ fun_l6_n493(x)
+ else
+ fun_l6_n525(x)
+ end
+end
+
+def fun_l5_n742(x)
+ if (x < 1)
+ fun_l6_n269(x)
+ else
+ fun_l6_n742(x)
+ end
+end
+
+def fun_l5_n743(x)
+ if (x < 1)
+ fun_l6_n194(x)
+ else
+ fun_l6_n254(x)
+ end
+end
+
+def fun_l5_n744(x)
+ if (x < 1)
+ fun_l6_n247(x)
+ else
+ fun_l6_n909(x)
+ end
+end
+
+def fun_l5_n745(x)
+ if (x < 1)
+ fun_l6_n388(x)
+ else
+ fun_l6_n657(x)
+ end
+end
+
+def fun_l5_n746(x)
+ if (x < 1)
+ fun_l6_n27(x)
+ else
+ fun_l6_n286(x)
+ end
+end
+
+def fun_l5_n747(x)
+ if (x < 1)
+ fun_l6_n659(x)
+ else
+ fun_l6_n895(x)
+ end
+end
+
+def fun_l5_n748(x)
+ if (x < 1)
+ fun_l6_n538(x)
+ else
+ fun_l6_n667(x)
+ end
+end
+
+def fun_l5_n749(x)
+ if (x < 1)
+ fun_l6_n208(x)
+ else
+ fun_l6_n493(x)
+ end
+end
+
+def fun_l5_n750(x)
+ if (x < 1)
+ fun_l6_n326(x)
+ else
+ fun_l6_n570(x)
+ end
+end
+
+def fun_l5_n751(x)
+ if (x < 1)
+ fun_l6_n437(x)
+ else
+ fun_l6_n716(x)
+ end
+end
+
+def fun_l5_n752(x)
+ if (x < 1)
+ fun_l6_n957(x)
+ else
+ fun_l6_n231(x)
+ end
+end
+
+def fun_l5_n753(x)
+ if (x < 1)
+ fun_l6_n846(x)
+ else
+ fun_l6_n900(x)
+ end
+end
+
+def fun_l5_n754(x)
+ if (x < 1)
+ fun_l6_n81(x)
+ else
+ fun_l6_n464(x)
+ end
+end
+
+def fun_l5_n755(x)
+ if (x < 1)
+ fun_l6_n735(x)
+ else
+ fun_l6_n232(x)
+ end
+end
+
+def fun_l5_n756(x)
+ if (x < 1)
+ fun_l6_n577(x)
+ else
+ fun_l6_n804(x)
+ end
+end
+
+def fun_l5_n757(x)
+ if (x < 1)
+ fun_l6_n363(x)
+ else
+ fun_l6_n155(x)
+ end
+end
+
+def fun_l5_n758(x)
+ if (x < 1)
+ fun_l6_n534(x)
+ else
+ fun_l6_n827(x)
+ end
+end
+
+def fun_l5_n759(x)
+ if (x < 1)
+ fun_l6_n711(x)
+ else
+ fun_l6_n53(x)
+ end
+end
+
+def fun_l5_n760(x)
+ if (x < 1)
+ fun_l6_n80(x)
+ else
+ fun_l6_n734(x)
+ end
+end
+
+def fun_l5_n761(x)
+ if (x < 1)
+ fun_l6_n811(x)
+ else
+ fun_l6_n691(x)
+ end
+end
+
+def fun_l5_n762(x)
+ if (x < 1)
+ fun_l6_n847(x)
+ else
+ fun_l6_n570(x)
+ end
+end
+
+def fun_l5_n763(x)
+ if (x < 1)
+ fun_l6_n30(x)
+ else
+ fun_l6_n283(x)
+ end
+end
+
+def fun_l5_n764(x)
+ if (x < 1)
+ fun_l6_n270(x)
+ else
+ fun_l6_n965(x)
+ end
+end
+
+def fun_l5_n765(x)
+ if (x < 1)
+ fun_l6_n936(x)
+ else
+ fun_l6_n772(x)
+ end
+end
+
+def fun_l5_n766(x)
+ if (x < 1)
+ fun_l6_n181(x)
+ else
+ fun_l6_n682(x)
+ end
+end
+
+def fun_l5_n767(x)
+ if (x < 1)
+ fun_l6_n327(x)
+ else
+ fun_l6_n5(x)
+ end
+end
+
+def fun_l5_n768(x)
+ if (x < 1)
+ fun_l6_n296(x)
+ else
+ fun_l6_n940(x)
+ end
+end
+
+def fun_l5_n769(x)
+ if (x < 1)
+ fun_l6_n543(x)
+ else
+ fun_l6_n871(x)
+ end
+end
+
+def fun_l5_n770(x)
+ if (x < 1)
+ fun_l6_n670(x)
+ else
+ fun_l6_n128(x)
+ end
+end
+
+def fun_l5_n771(x)
+ if (x < 1)
+ fun_l6_n184(x)
+ else
+ fun_l6_n794(x)
+ end
+end
+
+def fun_l5_n772(x)
+ if (x < 1)
+ fun_l6_n571(x)
+ else
+ fun_l6_n337(x)
+ end
+end
+
+def fun_l5_n773(x)
+ if (x < 1)
+ fun_l6_n674(x)
+ else
+ fun_l6_n712(x)
+ end
+end
+
+def fun_l5_n774(x)
+ if (x < 1)
+ fun_l6_n572(x)
+ else
+ fun_l6_n944(x)
+ end
+end
+
+def fun_l5_n775(x)
+ if (x < 1)
+ fun_l6_n122(x)
+ else
+ fun_l6_n897(x)
+ end
+end
+
+def fun_l5_n776(x)
+ if (x < 1)
+ fun_l6_n461(x)
+ else
+ fun_l6_n202(x)
+ end
+end
+
+def fun_l5_n777(x)
+ if (x < 1)
+ fun_l6_n99(x)
+ else
+ fun_l6_n256(x)
+ end
+end
+
+def fun_l5_n778(x)
+ if (x < 1)
+ fun_l6_n547(x)
+ else
+ fun_l6_n138(x)
+ end
+end
+
+def fun_l5_n779(x)
+ if (x < 1)
+ fun_l6_n56(x)
+ else
+ fun_l6_n682(x)
+ end
+end
+
+def fun_l5_n780(x)
+ if (x < 1)
+ fun_l6_n10(x)
+ else
+ fun_l6_n238(x)
+ end
+end
+
+def fun_l5_n781(x)
+ if (x < 1)
+ fun_l6_n577(x)
+ else
+ fun_l6_n590(x)
+ end
+end
+
+def fun_l5_n782(x)
+ if (x < 1)
+ fun_l6_n415(x)
+ else
+ fun_l6_n522(x)
+ end
+end
+
+def fun_l5_n783(x)
+ if (x < 1)
+ fun_l6_n255(x)
+ else
+ fun_l6_n182(x)
+ end
+end
+
+def fun_l5_n784(x)
+ if (x < 1)
+ fun_l6_n755(x)
+ else
+ fun_l6_n644(x)
+ end
+end
+
+def fun_l5_n785(x)
+ if (x < 1)
+ fun_l6_n39(x)
+ else
+ fun_l6_n296(x)
+ end
+end
+
+def fun_l5_n786(x)
+ if (x < 1)
+ fun_l6_n17(x)
+ else
+ fun_l6_n811(x)
+ end
+end
+
+def fun_l5_n787(x)
+ if (x < 1)
+ fun_l6_n814(x)
+ else
+ fun_l6_n989(x)
+ end
+end
+
+def fun_l5_n788(x)
+ if (x < 1)
+ fun_l6_n791(x)
+ else
+ fun_l6_n688(x)
+ end
+end
+
+def fun_l5_n789(x)
+ if (x < 1)
+ fun_l6_n415(x)
+ else
+ fun_l6_n218(x)
+ end
+end
+
+def fun_l5_n790(x)
+ if (x < 1)
+ fun_l6_n227(x)
+ else
+ fun_l6_n391(x)
+ end
+end
+
+def fun_l5_n791(x)
+ if (x < 1)
+ fun_l6_n232(x)
+ else
+ fun_l6_n644(x)
+ end
+end
+
+def fun_l5_n792(x)
+ if (x < 1)
+ fun_l6_n325(x)
+ else
+ fun_l6_n740(x)
+ end
+end
+
+def fun_l5_n793(x)
+ if (x < 1)
+ fun_l6_n926(x)
+ else
+ fun_l6_n133(x)
+ end
+end
+
+def fun_l5_n794(x)
+ if (x < 1)
+ fun_l6_n114(x)
+ else
+ fun_l6_n76(x)
+ end
+end
+
+def fun_l5_n795(x)
+ if (x < 1)
+ fun_l6_n332(x)
+ else
+ fun_l6_n198(x)
+ end
+end
+
+def fun_l5_n796(x)
+ if (x < 1)
+ fun_l6_n584(x)
+ else
+ fun_l6_n298(x)
+ end
+end
+
+def fun_l5_n797(x)
+ if (x < 1)
+ fun_l6_n966(x)
+ else
+ fun_l6_n956(x)
+ end
+end
+
+def fun_l5_n798(x)
+ if (x < 1)
+ fun_l6_n604(x)
+ else
+ fun_l6_n311(x)
+ end
+end
+
+def fun_l5_n799(x)
+ if (x < 1)
+ fun_l6_n444(x)
+ else
+ fun_l6_n934(x)
+ end
+end
+
+def fun_l5_n800(x)
+ if (x < 1)
+ fun_l6_n805(x)
+ else
+ fun_l6_n83(x)
+ end
+end
+
+def fun_l5_n801(x)
+ if (x < 1)
+ fun_l6_n996(x)
+ else
+ fun_l6_n282(x)
+ end
+end
+
+def fun_l5_n802(x)
+ if (x < 1)
+ fun_l6_n759(x)
+ else
+ fun_l6_n681(x)
+ end
+end
+
+def fun_l5_n803(x)
+ if (x < 1)
+ fun_l6_n480(x)
+ else
+ fun_l6_n536(x)
+ end
+end
+
+def fun_l5_n804(x)
+ if (x < 1)
+ fun_l6_n536(x)
+ else
+ fun_l6_n64(x)
+ end
+end
+
+def fun_l5_n805(x)
+ if (x < 1)
+ fun_l6_n992(x)
+ else
+ fun_l6_n116(x)
+ end
+end
+
+def fun_l5_n806(x)
+ if (x < 1)
+ fun_l6_n446(x)
+ else
+ fun_l6_n553(x)
+ end
+end
+
+def fun_l5_n807(x)
+ if (x < 1)
+ fun_l6_n822(x)
+ else
+ fun_l6_n80(x)
+ end
+end
+
+def fun_l5_n808(x)
+ if (x < 1)
+ fun_l6_n912(x)
+ else
+ fun_l6_n525(x)
+ end
+end
+
+def fun_l5_n809(x)
+ if (x < 1)
+ fun_l6_n824(x)
+ else
+ fun_l6_n356(x)
+ end
+end
+
+def fun_l5_n810(x)
+ if (x < 1)
+ fun_l6_n144(x)
+ else
+ fun_l6_n193(x)
+ end
+end
+
+def fun_l5_n811(x)
+ if (x < 1)
+ fun_l6_n27(x)
+ else
+ fun_l6_n810(x)
+ end
+end
+
+def fun_l5_n812(x)
+ if (x < 1)
+ fun_l6_n244(x)
+ else
+ fun_l6_n696(x)
+ end
+end
+
+def fun_l5_n813(x)
+ if (x < 1)
+ fun_l6_n408(x)
+ else
+ fun_l6_n46(x)
+ end
+end
+
+def fun_l5_n814(x)
+ if (x < 1)
+ fun_l6_n496(x)
+ else
+ fun_l6_n149(x)
+ end
+end
+
+def fun_l5_n815(x)
+ if (x < 1)
+ fun_l6_n100(x)
+ else
+ fun_l6_n765(x)
+ end
+end
+
+def fun_l5_n816(x)
+ if (x < 1)
+ fun_l6_n731(x)
+ else
+ fun_l6_n953(x)
+ end
+end
+
+def fun_l5_n817(x)
+ if (x < 1)
+ fun_l6_n997(x)
+ else
+ fun_l6_n849(x)
+ end
+end
+
+def fun_l5_n818(x)
+ if (x < 1)
+ fun_l6_n570(x)
+ else
+ fun_l6_n918(x)
+ end
+end
+
+def fun_l5_n819(x)
+ if (x < 1)
+ fun_l6_n473(x)
+ else
+ fun_l6_n955(x)
+ end
+end
+
+def fun_l5_n820(x)
+ if (x < 1)
+ fun_l6_n691(x)
+ else
+ fun_l6_n271(x)
+ end
+end
+
+def fun_l5_n821(x)
+ if (x < 1)
+ fun_l6_n935(x)
+ else
+ fun_l6_n386(x)
+ end
+end
+
+def fun_l5_n822(x)
+ if (x < 1)
+ fun_l6_n210(x)
+ else
+ fun_l6_n871(x)
+ end
+end
+
+def fun_l5_n823(x)
+ if (x < 1)
+ fun_l6_n907(x)
+ else
+ fun_l6_n596(x)
+ end
+end
+
+def fun_l5_n824(x)
+ if (x < 1)
+ fun_l6_n701(x)
+ else
+ fun_l6_n541(x)
+ end
+end
+
+def fun_l5_n825(x)
+ if (x < 1)
+ fun_l6_n477(x)
+ else
+ fun_l6_n207(x)
+ end
+end
+
+def fun_l5_n826(x)
+ if (x < 1)
+ fun_l6_n95(x)
+ else
+ fun_l6_n26(x)
+ end
+end
+
+def fun_l5_n827(x)
+ if (x < 1)
+ fun_l6_n908(x)
+ else
+ fun_l6_n369(x)
+ end
+end
+
+def fun_l5_n828(x)
+ if (x < 1)
+ fun_l6_n213(x)
+ else
+ fun_l6_n607(x)
+ end
+end
+
+def fun_l5_n829(x)
+ if (x < 1)
+ fun_l6_n763(x)
+ else
+ fun_l6_n753(x)
+ end
+end
+
+def fun_l5_n830(x)
+ if (x < 1)
+ fun_l6_n24(x)
+ else
+ fun_l6_n157(x)
+ end
+end
+
+def fun_l5_n831(x)
+ if (x < 1)
+ fun_l6_n262(x)
+ else
+ fun_l6_n826(x)
+ end
+end
+
+def fun_l5_n832(x)
+ if (x < 1)
+ fun_l6_n743(x)
+ else
+ fun_l6_n37(x)
+ end
+end
+
+def fun_l5_n833(x)
+ if (x < 1)
+ fun_l6_n532(x)
+ else
+ fun_l6_n31(x)
+ end
+end
+
+def fun_l5_n834(x)
+ if (x < 1)
+ fun_l6_n309(x)
+ else
+ fun_l6_n454(x)
+ end
+end
+
+def fun_l5_n835(x)
+ if (x < 1)
+ fun_l6_n917(x)
+ else
+ fun_l6_n721(x)
+ end
+end
+
+def fun_l5_n836(x)
+ if (x < 1)
+ fun_l6_n376(x)
+ else
+ fun_l6_n758(x)
+ end
+end
+
+def fun_l5_n837(x)
+ if (x < 1)
+ fun_l6_n558(x)
+ else
+ fun_l6_n474(x)
+ end
+end
+
+def fun_l5_n838(x)
+ if (x < 1)
+ fun_l6_n436(x)
+ else
+ fun_l6_n675(x)
+ end
+end
+
+def fun_l5_n839(x)
+ if (x < 1)
+ fun_l6_n720(x)
+ else
+ fun_l6_n12(x)
+ end
+end
+
+def fun_l5_n840(x)
+ if (x < 1)
+ fun_l6_n957(x)
+ else
+ fun_l6_n200(x)
+ end
+end
+
+def fun_l5_n841(x)
+ if (x < 1)
+ fun_l6_n180(x)
+ else
+ fun_l6_n644(x)
+ end
+end
+
+def fun_l5_n842(x)
+ if (x < 1)
+ fun_l6_n510(x)
+ else
+ fun_l6_n73(x)
+ end
+end
+
+def fun_l5_n843(x)
+ if (x < 1)
+ fun_l6_n812(x)
+ else
+ fun_l6_n954(x)
+ end
+end
+
+def fun_l5_n844(x)
+ if (x < 1)
+ fun_l6_n566(x)
+ else
+ fun_l6_n866(x)
+ end
+end
+
+def fun_l5_n845(x)
+ if (x < 1)
+ fun_l6_n641(x)
+ else
+ fun_l6_n11(x)
+ end
+end
+
+def fun_l5_n846(x)
+ if (x < 1)
+ fun_l6_n607(x)
+ else
+ fun_l6_n884(x)
+ end
+end
+
+def fun_l5_n847(x)
+ if (x < 1)
+ fun_l6_n479(x)
+ else
+ fun_l6_n564(x)
+ end
+end
+
+def fun_l5_n848(x)
+ if (x < 1)
+ fun_l6_n365(x)
+ else
+ fun_l6_n285(x)
+ end
+end
+
+def fun_l5_n849(x)
+ if (x < 1)
+ fun_l6_n296(x)
+ else
+ fun_l6_n708(x)
+ end
+end
+
+def fun_l5_n850(x)
+ if (x < 1)
+ fun_l6_n429(x)
+ else
+ fun_l6_n758(x)
+ end
+end
+
+def fun_l5_n851(x)
+ if (x < 1)
+ fun_l6_n881(x)
+ else
+ fun_l6_n848(x)
+ end
+end
+
+def fun_l5_n852(x)
+ if (x < 1)
+ fun_l6_n257(x)
+ else
+ fun_l6_n51(x)
+ end
+end
+
+def fun_l5_n853(x)
+ if (x < 1)
+ fun_l6_n441(x)
+ else
+ fun_l6_n445(x)
+ end
+end
+
+def fun_l5_n854(x)
+ if (x < 1)
+ fun_l6_n80(x)
+ else
+ fun_l6_n767(x)
+ end
+end
+
+def fun_l5_n855(x)
+ if (x < 1)
+ fun_l6_n196(x)
+ else
+ fun_l6_n41(x)
+ end
+end
+
+def fun_l5_n856(x)
+ if (x < 1)
+ fun_l6_n548(x)
+ else
+ fun_l6_n860(x)
+ end
+end
+
+def fun_l5_n857(x)
+ if (x < 1)
+ fun_l6_n380(x)
+ else
+ fun_l6_n601(x)
+ end
+end
+
+def fun_l5_n858(x)
+ if (x < 1)
+ fun_l6_n272(x)
+ else
+ fun_l6_n376(x)
+ end
+end
+
+def fun_l5_n859(x)
+ if (x < 1)
+ fun_l6_n179(x)
+ else
+ fun_l6_n978(x)
+ end
+end
+
+def fun_l5_n860(x)
+ if (x < 1)
+ fun_l6_n52(x)
+ else
+ fun_l6_n827(x)
+ end
+end
+
+def fun_l5_n861(x)
+ if (x < 1)
+ fun_l6_n611(x)
+ else
+ fun_l6_n929(x)
+ end
+end
+
+def fun_l5_n862(x)
+ if (x < 1)
+ fun_l6_n179(x)
+ else
+ fun_l6_n171(x)
+ end
+end
+
+def fun_l5_n863(x)
+ if (x < 1)
+ fun_l6_n611(x)
+ else
+ fun_l6_n663(x)
+ end
+end
+
+def fun_l5_n864(x)
+ if (x < 1)
+ fun_l6_n539(x)
+ else
+ fun_l6_n356(x)
+ end
+end
+
+def fun_l5_n865(x)
+ if (x < 1)
+ fun_l6_n401(x)
+ else
+ fun_l6_n302(x)
+ end
+end
+
+def fun_l5_n866(x)
+ if (x < 1)
+ fun_l6_n94(x)
+ else
+ fun_l6_n761(x)
+ end
+end
+
+def fun_l5_n867(x)
+ if (x < 1)
+ fun_l6_n818(x)
+ else
+ fun_l6_n500(x)
+ end
+end
+
+def fun_l5_n868(x)
+ if (x < 1)
+ fun_l6_n956(x)
+ else
+ fun_l6_n661(x)
+ end
+end
+
+def fun_l5_n869(x)
+ if (x < 1)
+ fun_l6_n628(x)
+ else
+ fun_l6_n765(x)
+ end
+end
+
+def fun_l5_n870(x)
+ if (x < 1)
+ fun_l6_n654(x)
+ else
+ fun_l6_n181(x)
+ end
+end
+
+def fun_l5_n871(x)
+ if (x < 1)
+ fun_l6_n70(x)
+ else
+ fun_l6_n157(x)
+ end
+end
+
+def fun_l5_n872(x)
+ if (x < 1)
+ fun_l6_n133(x)
+ else
+ fun_l6_n806(x)
+ end
+end
+
+def fun_l5_n873(x)
+ if (x < 1)
+ fun_l6_n100(x)
+ else
+ fun_l6_n479(x)
+ end
+end
+
+def fun_l5_n874(x)
+ if (x < 1)
+ fun_l6_n401(x)
+ else
+ fun_l6_n567(x)
+ end
+end
+
+def fun_l5_n875(x)
+ if (x < 1)
+ fun_l6_n399(x)
+ else
+ fun_l6_n369(x)
+ end
+end
+
+def fun_l5_n876(x)
+ if (x < 1)
+ fun_l6_n412(x)
+ else
+ fun_l6_n763(x)
+ end
+end
+
+def fun_l5_n877(x)
+ if (x < 1)
+ fun_l6_n498(x)
+ else
+ fun_l6_n192(x)
+ end
+end
+
+def fun_l5_n878(x)
+ if (x < 1)
+ fun_l6_n798(x)
+ else
+ fun_l6_n473(x)
+ end
+end
+
+def fun_l5_n879(x)
+ if (x < 1)
+ fun_l6_n166(x)
+ else
+ fun_l6_n301(x)
+ end
+end
+
+def fun_l5_n880(x)
+ if (x < 1)
+ fun_l6_n616(x)
+ else
+ fun_l6_n28(x)
+ end
+end
+
+def fun_l5_n881(x)
+ if (x < 1)
+ fun_l6_n315(x)
+ else
+ fun_l6_n92(x)
+ end
+end
+
+def fun_l5_n882(x)
+ if (x < 1)
+ fun_l6_n548(x)
+ else
+ fun_l6_n41(x)
+ end
+end
+
+def fun_l5_n883(x)
+ if (x < 1)
+ fun_l6_n279(x)
+ else
+ fun_l6_n72(x)
+ end
+end
+
+def fun_l5_n884(x)
+ if (x < 1)
+ fun_l6_n206(x)
+ else
+ fun_l6_n384(x)
+ end
+end
+
+def fun_l5_n885(x)
+ if (x < 1)
+ fun_l6_n191(x)
+ else
+ fun_l6_n84(x)
+ end
+end
+
+def fun_l5_n886(x)
+ if (x < 1)
+ fun_l6_n960(x)
+ else
+ fun_l6_n495(x)
+ end
+end
+
+def fun_l5_n887(x)
+ if (x < 1)
+ fun_l6_n459(x)
+ else
+ fun_l6_n757(x)
+ end
+end
+
+def fun_l5_n888(x)
+ if (x < 1)
+ fun_l6_n634(x)
+ else
+ fun_l6_n463(x)
+ end
+end
+
+def fun_l5_n889(x)
+ if (x < 1)
+ fun_l6_n611(x)
+ else
+ fun_l6_n469(x)
+ end
+end
+
+def fun_l5_n890(x)
+ if (x < 1)
+ fun_l6_n787(x)
+ else
+ fun_l6_n707(x)
+ end
+end
+
+def fun_l5_n891(x)
+ if (x < 1)
+ fun_l6_n201(x)
+ else
+ fun_l6_n223(x)
+ end
+end
+
+def fun_l5_n892(x)
+ if (x < 1)
+ fun_l6_n214(x)
+ else
+ fun_l6_n303(x)
+ end
+end
+
+def fun_l5_n893(x)
+ if (x < 1)
+ fun_l6_n85(x)
+ else
+ fun_l6_n802(x)
+ end
+end
+
+def fun_l5_n894(x)
+ if (x < 1)
+ fun_l6_n816(x)
+ else
+ fun_l6_n325(x)
+ end
+end
+
+def fun_l5_n895(x)
+ if (x < 1)
+ fun_l6_n491(x)
+ else
+ fun_l6_n907(x)
+ end
+end
+
+def fun_l5_n896(x)
+ if (x < 1)
+ fun_l6_n809(x)
+ else
+ fun_l6_n364(x)
+ end
+end
+
+def fun_l5_n897(x)
+ if (x < 1)
+ fun_l6_n739(x)
+ else
+ fun_l6_n192(x)
+ end
+end
+
+def fun_l5_n898(x)
+ if (x < 1)
+ fun_l6_n461(x)
+ else
+ fun_l6_n594(x)
+ end
+end
+
+def fun_l5_n899(x)
+ if (x < 1)
+ fun_l6_n256(x)
+ else
+ fun_l6_n521(x)
+ end
+end
+
+def fun_l5_n900(x)
+ if (x < 1)
+ fun_l6_n6(x)
+ else
+ fun_l6_n127(x)
+ end
+end
+
+def fun_l5_n901(x)
+ if (x < 1)
+ fun_l6_n655(x)
+ else
+ fun_l6_n426(x)
+ end
+end
+
+def fun_l5_n902(x)
+ if (x < 1)
+ fun_l6_n936(x)
+ else
+ fun_l6_n275(x)
+ end
+end
+
+def fun_l5_n903(x)
+ if (x < 1)
+ fun_l6_n738(x)
+ else
+ fun_l6_n654(x)
+ end
+end
+
+def fun_l5_n904(x)
+ if (x < 1)
+ fun_l6_n583(x)
+ else
+ fun_l6_n600(x)
+ end
+end
+
+def fun_l5_n905(x)
+ if (x < 1)
+ fun_l6_n228(x)
+ else
+ fun_l6_n591(x)
+ end
+end
+
+def fun_l5_n906(x)
+ if (x < 1)
+ fun_l6_n292(x)
+ else
+ fun_l6_n55(x)
+ end
+end
+
+def fun_l5_n907(x)
+ if (x < 1)
+ fun_l6_n457(x)
+ else
+ fun_l6_n513(x)
+ end
+end
+
+def fun_l5_n908(x)
+ if (x < 1)
+ fun_l6_n542(x)
+ else
+ fun_l6_n593(x)
+ end
+end
+
+def fun_l5_n909(x)
+ if (x < 1)
+ fun_l6_n478(x)
+ else
+ fun_l6_n782(x)
+ end
+end
+
+def fun_l5_n910(x)
+ if (x < 1)
+ fun_l6_n780(x)
+ else
+ fun_l6_n275(x)
+ end
+end
+
+def fun_l5_n911(x)
+ if (x < 1)
+ fun_l6_n631(x)
+ else
+ fun_l6_n811(x)
+ end
+end
+
+def fun_l5_n912(x)
+ if (x < 1)
+ fun_l6_n17(x)
+ else
+ fun_l6_n837(x)
+ end
+end
+
+def fun_l5_n913(x)
+ if (x < 1)
+ fun_l6_n607(x)
+ else
+ fun_l6_n159(x)
+ end
+end
+
+def fun_l5_n914(x)
+ if (x < 1)
+ fun_l6_n23(x)
+ else
+ fun_l6_n70(x)
+ end
+end
+
+def fun_l5_n915(x)
+ if (x < 1)
+ fun_l6_n778(x)
+ else
+ fun_l6_n643(x)
+ end
+end
+
+def fun_l5_n916(x)
+ if (x < 1)
+ fun_l6_n707(x)
+ else
+ fun_l6_n121(x)
+ end
+end
+
+def fun_l5_n917(x)
+ if (x < 1)
+ fun_l6_n816(x)
+ else
+ fun_l6_n67(x)
+ end
+end
+
+def fun_l5_n918(x)
+ if (x < 1)
+ fun_l6_n963(x)
+ else
+ fun_l6_n520(x)
+ end
+end
+
+def fun_l5_n919(x)
+ if (x < 1)
+ fun_l6_n251(x)
+ else
+ fun_l6_n111(x)
+ end
+end
+
+def fun_l5_n920(x)
+ if (x < 1)
+ fun_l6_n198(x)
+ else
+ fun_l6_n872(x)
+ end
+end
+
+def fun_l5_n921(x)
+ if (x < 1)
+ fun_l6_n364(x)
+ else
+ fun_l6_n369(x)
+ end
+end
+
+def fun_l5_n922(x)
+ if (x < 1)
+ fun_l6_n7(x)
+ else
+ fun_l6_n314(x)
+ end
+end
+
+def fun_l5_n923(x)
+ if (x < 1)
+ fun_l6_n578(x)
+ else
+ fun_l6_n502(x)
+ end
+end
+
+def fun_l5_n924(x)
+ if (x < 1)
+ fun_l6_n450(x)
+ else
+ fun_l6_n680(x)
+ end
+end
+
+def fun_l5_n925(x)
+ if (x < 1)
+ fun_l6_n469(x)
+ else
+ fun_l6_n973(x)
+ end
+end
+
+def fun_l5_n926(x)
+ if (x < 1)
+ fun_l6_n114(x)
+ else
+ fun_l6_n862(x)
+ end
+end
+
+def fun_l5_n927(x)
+ if (x < 1)
+ fun_l6_n786(x)
+ else
+ fun_l6_n861(x)
+ end
+end
+
+def fun_l5_n928(x)
+ if (x < 1)
+ fun_l6_n40(x)
+ else
+ fun_l6_n665(x)
+ end
+end
+
+def fun_l5_n929(x)
+ if (x < 1)
+ fun_l6_n997(x)
+ else
+ fun_l6_n137(x)
+ end
+end
+
+def fun_l5_n930(x)
+ if (x < 1)
+ fun_l6_n547(x)
+ else
+ fun_l6_n547(x)
+ end
+end
+
+def fun_l5_n931(x)
+ if (x < 1)
+ fun_l6_n310(x)
+ else
+ fun_l6_n986(x)
+ end
+end
+
+def fun_l5_n932(x)
+ if (x < 1)
+ fun_l6_n339(x)
+ else
+ fun_l6_n18(x)
+ end
+end
+
+def fun_l5_n933(x)
+ if (x < 1)
+ fun_l6_n825(x)
+ else
+ fun_l6_n418(x)
+ end
+end
+
+def fun_l5_n934(x)
+ if (x < 1)
+ fun_l6_n509(x)
+ else
+ fun_l6_n589(x)
+ end
+end
+
+def fun_l5_n935(x)
+ if (x < 1)
+ fun_l6_n936(x)
+ else
+ fun_l6_n113(x)
+ end
+end
+
+def fun_l5_n936(x)
+ if (x < 1)
+ fun_l6_n530(x)
+ else
+ fun_l6_n841(x)
+ end
+end
+
+def fun_l5_n937(x)
+ if (x < 1)
+ fun_l6_n431(x)
+ else
+ fun_l6_n531(x)
+ end
+end
+
+def fun_l5_n938(x)
+ if (x < 1)
+ fun_l6_n791(x)
+ else
+ fun_l6_n41(x)
+ end
+end
+
+def fun_l5_n939(x)
+ if (x < 1)
+ fun_l6_n873(x)
+ else
+ fun_l6_n336(x)
+ end
+end
+
+def fun_l5_n940(x)
+ if (x < 1)
+ fun_l6_n925(x)
+ else
+ fun_l6_n746(x)
+ end
+end
+
+def fun_l5_n941(x)
+ if (x < 1)
+ fun_l6_n995(x)
+ else
+ fun_l6_n29(x)
+ end
+end
+
+def fun_l5_n942(x)
+ if (x < 1)
+ fun_l6_n984(x)
+ else
+ fun_l6_n455(x)
+ end
+end
+
+def fun_l5_n943(x)
+ if (x < 1)
+ fun_l6_n660(x)
+ else
+ fun_l6_n934(x)
+ end
+end
+
+def fun_l5_n944(x)
+ if (x < 1)
+ fun_l6_n625(x)
+ else
+ fun_l6_n523(x)
+ end
+end
+
+def fun_l5_n945(x)
+ if (x < 1)
+ fun_l6_n407(x)
+ else
+ fun_l6_n766(x)
+ end
+end
+
+def fun_l5_n946(x)
+ if (x < 1)
+ fun_l6_n80(x)
+ else
+ fun_l6_n75(x)
+ end
+end
+
+def fun_l5_n947(x)
+ if (x < 1)
+ fun_l6_n465(x)
+ else
+ fun_l6_n57(x)
+ end
+end
+
+def fun_l5_n948(x)
+ if (x < 1)
+ fun_l6_n750(x)
+ else
+ fun_l6_n278(x)
+ end
+end
+
+def fun_l5_n949(x)
+ if (x < 1)
+ fun_l6_n917(x)
+ else
+ fun_l6_n472(x)
+ end
+end
+
+def fun_l5_n950(x)
+ if (x < 1)
+ fun_l6_n917(x)
+ else
+ fun_l6_n71(x)
+ end
+end
+
+def fun_l5_n951(x)
+ if (x < 1)
+ fun_l6_n420(x)
+ else
+ fun_l6_n132(x)
+ end
+end
+
+def fun_l5_n952(x)
+ if (x < 1)
+ fun_l6_n112(x)
+ else
+ fun_l6_n396(x)
+ end
+end
+
+def fun_l5_n953(x)
+ if (x < 1)
+ fun_l6_n705(x)
+ else
+ fun_l6_n978(x)
+ end
+end
+
+def fun_l5_n954(x)
+ if (x < 1)
+ fun_l6_n761(x)
+ else
+ fun_l6_n136(x)
+ end
+end
+
+def fun_l5_n955(x)
+ if (x < 1)
+ fun_l6_n518(x)
+ else
+ fun_l6_n687(x)
+ end
+end
+
+def fun_l5_n956(x)
+ if (x < 1)
+ fun_l6_n308(x)
+ else
+ fun_l6_n34(x)
+ end
+end
+
+def fun_l5_n957(x)
+ if (x < 1)
+ fun_l6_n904(x)
+ else
+ fun_l6_n129(x)
+ end
+end
+
+def fun_l5_n958(x)
+ if (x < 1)
+ fun_l6_n271(x)
+ else
+ fun_l6_n444(x)
+ end
+end
+
+def fun_l5_n959(x)
+ if (x < 1)
+ fun_l6_n690(x)
+ else
+ fun_l6_n118(x)
+ end
+end
+
+def fun_l5_n960(x)
+ if (x < 1)
+ fun_l6_n509(x)
+ else
+ fun_l6_n957(x)
+ end
+end
+
+def fun_l5_n961(x)
+ if (x < 1)
+ fun_l6_n889(x)
+ else
+ fun_l6_n26(x)
+ end
+end
+
+def fun_l5_n962(x)
+ if (x < 1)
+ fun_l6_n802(x)
+ else
+ fun_l6_n364(x)
+ end
+end
+
+def fun_l5_n963(x)
+ if (x < 1)
+ fun_l6_n368(x)
+ else
+ fun_l6_n580(x)
+ end
+end
+
+def fun_l5_n964(x)
+ if (x < 1)
+ fun_l6_n879(x)
+ else
+ fun_l6_n925(x)
+ end
+end
+
+def fun_l5_n965(x)
+ if (x < 1)
+ fun_l6_n114(x)
+ else
+ fun_l6_n882(x)
+ end
+end
+
+def fun_l5_n966(x)
+ if (x < 1)
+ fun_l6_n9(x)
+ else
+ fun_l6_n5(x)
+ end
+end
+
+def fun_l5_n967(x)
+ if (x < 1)
+ fun_l6_n536(x)
+ else
+ fun_l6_n251(x)
+ end
+end
+
+def fun_l5_n968(x)
+ if (x < 1)
+ fun_l6_n804(x)
+ else
+ fun_l6_n179(x)
+ end
+end
+
+def fun_l5_n969(x)
+ if (x < 1)
+ fun_l6_n469(x)
+ else
+ fun_l6_n235(x)
+ end
+end
+
+def fun_l5_n970(x)
+ if (x < 1)
+ fun_l6_n328(x)
+ else
+ fun_l6_n763(x)
+ end
+end
+
+def fun_l5_n971(x)
+ if (x < 1)
+ fun_l6_n342(x)
+ else
+ fun_l6_n126(x)
+ end
+end
+
+def fun_l5_n972(x)
+ if (x < 1)
+ fun_l6_n901(x)
+ else
+ fun_l6_n674(x)
+ end
+end
+
+def fun_l5_n973(x)
+ if (x < 1)
+ fun_l6_n853(x)
+ else
+ fun_l6_n714(x)
+ end
+end
+
+def fun_l5_n974(x)
+ if (x < 1)
+ fun_l6_n670(x)
+ else
+ fun_l6_n905(x)
+ end
+end
+
+def fun_l5_n975(x)
+ if (x < 1)
+ fun_l6_n873(x)
+ else
+ fun_l6_n193(x)
+ end
+end
+
+def fun_l5_n976(x)
+ if (x < 1)
+ fun_l6_n603(x)
+ else
+ fun_l6_n186(x)
+ end
+end
+
+def fun_l5_n977(x)
+ if (x < 1)
+ fun_l6_n474(x)
+ else
+ fun_l6_n108(x)
+ end
+end
+
+def fun_l5_n978(x)
+ if (x < 1)
+ fun_l6_n347(x)
+ else
+ fun_l6_n872(x)
+ end
+end
+
+def fun_l5_n979(x)
+ if (x < 1)
+ fun_l6_n593(x)
+ else
+ fun_l6_n575(x)
+ end
+end
+
+def fun_l5_n980(x)
+ if (x < 1)
+ fun_l6_n692(x)
+ else
+ fun_l6_n262(x)
+ end
+end
+
+def fun_l5_n981(x)
+ if (x < 1)
+ fun_l6_n226(x)
+ else
+ fun_l6_n600(x)
+ end
+end
+
+def fun_l5_n982(x)
+ if (x < 1)
+ fun_l6_n357(x)
+ else
+ fun_l6_n165(x)
+ end
+end
+
+def fun_l5_n983(x)
+ if (x < 1)
+ fun_l6_n784(x)
+ else
+ fun_l6_n584(x)
+ end
+end
+
+def fun_l5_n984(x)
+ if (x < 1)
+ fun_l6_n339(x)
+ else
+ fun_l6_n754(x)
+ end
+end
+
+def fun_l5_n985(x)
+ if (x < 1)
+ fun_l6_n536(x)
+ else
+ fun_l6_n663(x)
+ end
+end
+
+def fun_l5_n986(x)
+ if (x < 1)
+ fun_l6_n239(x)
+ else
+ fun_l6_n723(x)
+ end
+end
+
+def fun_l5_n987(x)
+ if (x < 1)
+ fun_l6_n928(x)
+ else
+ fun_l6_n403(x)
+ end
+end
+
+def fun_l5_n988(x)
+ if (x < 1)
+ fun_l6_n328(x)
+ else
+ fun_l6_n551(x)
+ end
+end
+
+def fun_l5_n989(x)
+ if (x < 1)
+ fun_l6_n847(x)
+ else
+ fun_l6_n475(x)
+ end
+end
+
+def fun_l5_n990(x)
+ if (x < 1)
+ fun_l6_n405(x)
+ else
+ fun_l6_n39(x)
+ end
+end
+
+def fun_l5_n991(x)
+ if (x < 1)
+ fun_l6_n705(x)
+ else
+ fun_l6_n281(x)
+ end
+end
+
+def fun_l5_n992(x)
+ if (x < 1)
+ fun_l6_n59(x)
+ else
+ fun_l6_n465(x)
+ end
+end
+
+def fun_l5_n993(x)
+ if (x < 1)
+ fun_l6_n828(x)
+ else
+ fun_l6_n705(x)
+ end
+end
+
+def fun_l5_n994(x)
+ if (x < 1)
+ fun_l6_n687(x)
+ else
+ fun_l6_n451(x)
+ end
+end
+
+def fun_l5_n995(x)
+ if (x < 1)
+ fun_l6_n600(x)
+ else
+ fun_l6_n9(x)
+ end
+end
+
+def fun_l5_n996(x)
+ if (x < 1)
+ fun_l6_n205(x)
+ else
+ fun_l6_n242(x)
+ end
+end
+
+def fun_l5_n997(x)
+ if (x < 1)
+ fun_l6_n438(x)
+ else
+ fun_l6_n108(x)
+ end
+end
+
+def fun_l5_n998(x)
+ if (x < 1)
+ fun_l6_n164(x)
+ else
+ fun_l6_n302(x)
+ end
+end
+
+def fun_l5_n999(x)
+ if (x < 1)
+ fun_l6_n533(x)
+ else
+ fun_l6_n685(x)
+ end
+end
+
+def fun_l6_n0(x)
+ if (x < 1)
+ fun_l7_n74(x)
+ else
+ fun_l7_n45(x)
+ end
+end
+
+def fun_l6_n1(x)
+ if (x < 1)
+ fun_l7_n685(x)
+ else
+ fun_l7_n163(x)
+ end
+end
+
+def fun_l6_n2(x)
+ if (x < 1)
+ fun_l7_n325(x)
+ else
+ fun_l7_n365(x)
+ end
+end
+
+def fun_l6_n3(x)
+ if (x < 1)
+ fun_l7_n855(x)
+ else
+ fun_l7_n359(x)
+ end
+end
+
+def fun_l6_n4(x)
+ if (x < 1)
+ fun_l7_n939(x)
+ else
+ fun_l7_n690(x)
+ end
+end
+
+def fun_l6_n5(x)
+ if (x < 1)
+ fun_l7_n689(x)
+ else
+ fun_l7_n681(x)
+ end
+end
+
+def fun_l6_n6(x)
+ if (x < 1)
+ fun_l7_n226(x)
+ else
+ fun_l7_n307(x)
+ end
+end
+
+def fun_l6_n7(x)
+ if (x < 1)
+ fun_l7_n779(x)
+ else
+ fun_l7_n765(x)
+ end
+end
+
+def fun_l6_n8(x)
+ if (x < 1)
+ fun_l7_n85(x)
+ else
+ fun_l7_n967(x)
+ end
+end
+
+def fun_l6_n9(x)
+ if (x < 1)
+ fun_l7_n539(x)
+ else
+ fun_l7_n5(x)
+ end
+end
+
+def fun_l6_n10(x)
+ if (x < 1)
+ fun_l7_n503(x)
+ else
+ fun_l7_n163(x)
+ end
+end
+
+def fun_l6_n11(x)
+ if (x < 1)
+ fun_l7_n24(x)
+ else
+ fun_l7_n374(x)
+ end
+end
+
+def fun_l6_n12(x)
+ if (x < 1)
+ fun_l7_n633(x)
+ else
+ fun_l7_n395(x)
+ end
+end
+
+def fun_l6_n13(x)
+ if (x < 1)
+ fun_l7_n250(x)
+ else
+ fun_l7_n7(x)
+ end
+end
+
+def fun_l6_n14(x)
+ if (x < 1)
+ fun_l7_n22(x)
+ else
+ fun_l7_n560(x)
+ end
+end
+
+def fun_l6_n15(x)
+ if (x < 1)
+ fun_l7_n209(x)
+ else
+ fun_l7_n323(x)
+ end
+end
+
+def fun_l6_n16(x)
+ if (x < 1)
+ fun_l7_n770(x)
+ else
+ fun_l7_n155(x)
+ end
+end
+
+def fun_l6_n17(x)
+ if (x < 1)
+ fun_l7_n976(x)
+ else
+ fun_l7_n712(x)
+ end
+end
+
+def fun_l6_n18(x)
+ if (x < 1)
+ fun_l7_n762(x)
+ else
+ fun_l7_n307(x)
+ end
+end
+
+def fun_l6_n19(x)
+ if (x < 1)
+ fun_l7_n911(x)
+ else
+ fun_l7_n788(x)
+ end
+end
+
+def fun_l6_n20(x)
+ if (x < 1)
+ fun_l7_n40(x)
+ else
+ fun_l7_n269(x)
+ end
+end
+
+def fun_l6_n21(x)
+ if (x < 1)
+ fun_l7_n707(x)
+ else
+ fun_l7_n861(x)
+ end
+end
+
+def fun_l6_n22(x)
+ if (x < 1)
+ fun_l7_n591(x)
+ else
+ fun_l7_n557(x)
+ end
+end
+
+def fun_l6_n23(x)
+ if (x < 1)
+ fun_l7_n890(x)
+ else
+ fun_l7_n142(x)
+ end
+end
+
+def fun_l6_n24(x)
+ if (x < 1)
+ fun_l7_n563(x)
+ else
+ fun_l7_n509(x)
+ end
+end
+
+def fun_l6_n25(x)
+ if (x < 1)
+ fun_l7_n630(x)
+ else
+ fun_l7_n784(x)
+ end
+end
+
+def fun_l6_n26(x)
+ if (x < 1)
+ fun_l7_n30(x)
+ else
+ fun_l7_n826(x)
+ end
+end
+
+def fun_l6_n27(x)
+ if (x < 1)
+ fun_l7_n223(x)
+ else
+ fun_l7_n800(x)
+ end
+end
+
+def fun_l6_n28(x)
+ if (x < 1)
+ fun_l7_n51(x)
+ else
+ fun_l7_n963(x)
+ end
+end
+
+def fun_l6_n29(x)
+ if (x < 1)
+ fun_l7_n603(x)
+ else
+ fun_l7_n988(x)
+ end
+end
+
+def fun_l6_n30(x)
+ if (x < 1)
+ fun_l7_n258(x)
+ else
+ fun_l7_n512(x)
+ end
+end
+
+def fun_l6_n31(x)
+ if (x < 1)
+ fun_l7_n649(x)
+ else
+ fun_l7_n772(x)
+ end
+end
+
+def fun_l6_n32(x)
+ if (x < 1)
+ fun_l7_n999(x)
+ else
+ fun_l7_n592(x)
+ end
+end
+
+def fun_l6_n33(x)
+ if (x < 1)
+ fun_l7_n371(x)
+ else
+ fun_l7_n801(x)
+ end
+end
+
+def fun_l6_n34(x)
+ if (x < 1)
+ fun_l7_n905(x)
+ else
+ fun_l7_n701(x)
+ end
+end
+
+def fun_l6_n35(x)
+ if (x < 1)
+ fun_l7_n142(x)
+ else
+ fun_l7_n933(x)
+ end
+end
+
+def fun_l6_n36(x)
+ if (x < 1)
+ fun_l7_n987(x)
+ else
+ fun_l7_n305(x)
+ end
+end
+
+def fun_l6_n37(x)
+ if (x < 1)
+ fun_l7_n360(x)
+ else
+ fun_l7_n266(x)
+ end
+end
+
+def fun_l6_n38(x)
+ if (x < 1)
+ fun_l7_n799(x)
+ else
+ fun_l7_n938(x)
+ end
+end
+
+def fun_l6_n39(x)
+ if (x < 1)
+ fun_l7_n921(x)
+ else
+ fun_l7_n739(x)
+ end
+end
+
+def fun_l6_n40(x)
+ if (x < 1)
+ fun_l7_n553(x)
+ else
+ fun_l7_n236(x)
+ end
+end
+
+def fun_l6_n41(x)
+ if (x < 1)
+ fun_l7_n592(x)
+ else
+ fun_l7_n335(x)
+ end
+end
+
+def fun_l6_n42(x)
+ if (x < 1)
+ fun_l7_n33(x)
+ else
+ fun_l7_n521(x)
+ end
+end
+
+def fun_l6_n43(x)
+ if (x < 1)
+ fun_l7_n278(x)
+ else
+ fun_l7_n866(x)
+ end
+end
+
+def fun_l6_n44(x)
+ if (x < 1)
+ fun_l7_n808(x)
+ else
+ fun_l7_n394(x)
+ end
+end
+
+def fun_l6_n45(x)
+ if (x < 1)
+ fun_l7_n556(x)
+ else
+ fun_l7_n462(x)
+ end
+end
+
+def fun_l6_n46(x)
+ if (x < 1)
+ fun_l7_n72(x)
+ else
+ fun_l7_n558(x)
+ end
+end
+
+def fun_l6_n47(x)
+ if (x < 1)
+ fun_l7_n583(x)
+ else
+ fun_l7_n472(x)
+ end
+end
+
+def fun_l6_n48(x)
+ if (x < 1)
+ fun_l7_n577(x)
+ else
+ fun_l7_n264(x)
+ end
+end
+
+def fun_l6_n49(x)
+ if (x < 1)
+ fun_l7_n167(x)
+ else
+ fun_l7_n570(x)
+ end
+end
+
+def fun_l6_n50(x)
+ if (x < 1)
+ fun_l7_n707(x)
+ else
+ fun_l7_n127(x)
+ end
+end
+
+def fun_l6_n51(x)
+ if (x < 1)
+ fun_l7_n151(x)
+ else
+ fun_l7_n82(x)
+ end
+end
+
+def fun_l6_n52(x)
+ if (x < 1)
+ fun_l7_n729(x)
+ else
+ fun_l7_n785(x)
+ end
+end
+
+def fun_l6_n53(x)
+ if (x < 1)
+ fun_l7_n236(x)
+ else
+ fun_l7_n84(x)
+ end
+end
+
+def fun_l6_n54(x)
+ if (x < 1)
+ fun_l7_n573(x)
+ else
+ fun_l7_n92(x)
+ end
+end
+
+def fun_l6_n55(x)
+ if (x < 1)
+ fun_l7_n601(x)
+ else
+ fun_l7_n531(x)
+ end
+end
+
+def fun_l6_n56(x)
+ if (x < 1)
+ fun_l7_n623(x)
+ else
+ fun_l7_n522(x)
+ end
+end
+
+def fun_l6_n57(x)
+ if (x < 1)
+ fun_l7_n707(x)
+ else
+ fun_l7_n830(x)
+ end
+end
+
+def fun_l6_n58(x)
+ if (x < 1)
+ fun_l7_n240(x)
+ else
+ fun_l7_n868(x)
+ end
+end
+
+def fun_l6_n59(x)
+ if (x < 1)
+ fun_l7_n233(x)
+ else
+ fun_l7_n595(x)
+ end
+end
+
+def fun_l6_n60(x)
+ if (x < 1)
+ fun_l7_n384(x)
+ else
+ fun_l7_n605(x)
+ end
+end
+
+def fun_l6_n61(x)
+ if (x < 1)
+ fun_l7_n721(x)
+ else
+ fun_l7_n509(x)
+ end
+end
+
+def fun_l6_n62(x)
+ if (x < 1)
+ fun_l7_n741(x)
+ else
+ fun_l7_n934(x)
+ end
+end
+
+def fun_l6_n63(x)
+ if (x < 1)
+ fun_l7_n724(x)
+ else
+ fun_l7_n963(x)
+ end
+end
+
+def fun_l6_n64(x)
+ if (x < 1)
+ fun_l7_n144(x)
+ else
+ fun_l7_n417(x)
+ end
+end
+
+def fun_l6_n65(x)
+ if (x < 1)
+ fun_l7_n618(x)
+ else
+ fun_l7_n19(x)
+ end
+end
+
+def fun_l6_n66(x)
+ if (x < 1)
+ fun_l7_n128(x)
+ else
+ fun_l7_n257(x)
+ end
+end
+
+def fun_l6_n67(x)
+ if (x < 1)
+ fun_l7_n623(x)
+ else
+ fun_l7_n182(x)
+ end
+end
+
+def fun_l6_n68(x)
+ if (x < 1)
+ fun_l7_n256(x)
+ else
+ fun_l7_n692(x)
+ end
+end
+
+def fun_l6_n69(x)
+ if (x < 1)
+ fun_l7_n360(x)
+ else
+ fun_l7_n34(x)
+ end
+end
+
+def fun_l6_n70(x)
+ if (x < 1)
+ fun_l7_n690(x)
+ else
+ fun_l7_n813(x)
+ end
+end
+
+def fun_l6_n71(x)
+ if (x < 1)
+ fun_l7_n533(x)
+ else
+ fun_l7_n94(x)
+ end
+end
+
+def fun_l6_n72(x)
+ if (x < 1)
+ fun_l7_n245(x)
+ else
+ fun_l7_n548(x)
+ end
+end
+
+def fun_l6_n73(x)
+ if (x < 1)
+ fun_l7_n919(x)
+ else
+ fun_l7_n295(x)
+ end
+end
+
+def fun_l6_n74(x)
+ if (x < 1)
+ fun_l7_n925(x)
+ else
+ fun_l7_n965(x)
+ end
+end
+
+def fun_l6_n75(x)
+ if (x < 1)
+ fun_l7_n825(x)
+ else
+ fun_l7_n79(x)
+ end
+end
+
+def fun_l6_n76(x)
+ if (x < 1)
+ fun_l7_n812(x)
+ else
+ fun_l7_n96(x)
+ end
+end
+
+def fun_l6_n77(x)
+ if (x < 1)
+ fun_l7_n688(x)
+ else
+ fun_l7_n424(x)
+ end
+end
+
+def fun_l6_n78(x)
+ if (x < 1)
+ fun_l7_n171(x)
+ else
+ fun_l7_n728(x)
+ end
+end
+
+def fun_l6_n79(x)
+ if (x < 1)
+ fun_l7_n786(x)
+ else
+ fun_l7_n135(x)
+ end
+end
+
+def fun_l6_n80(x)
+ if (x < 1)
+ fun_l7_n622(x)
+ else
+ fun_l7_n3(x)
+ end
+end
+
+def fun_l6_n81(x)
+ if (x < 1)
+ fun_l7_n386(x)
+ else
+ fun_l7_n946(x)
+ end
+end
+
+def fun_l6_n82(x)
+ if (x < 1)
+ fun_l7_n192(x)
+ else
+ fun_l7_n253(x)
+ end
+end
+
+def fun_l6_n83(x)
+ if (x < 1)
+ fun_l7_n534(x)
+ else
+ fun_l7_n4(x)
+ end
+end
+
+def fun_l6_n84(x)
+ if (x < 1)
+ fun_l7_n198(x)
+ else
+ fun_l7_n455(x)
+ end
+end
+
+def fun_l6_n85(x)
+ if (x < 1)
+ fun_l7_n471(x)
+ else
+ fun_l7_n26(x)
+ end
+end
+
+def fun_l6_n86(x)
+ if (x < 1)
+ fun_l7_n147(x)
+ else
+ fun_l7_n893(x)
+ end
+end
+
+def fun_l6_n87(x)
+ if (x < 1)
+ fun_l7_n424(x)
+ else
+ fun_l7_n713(x)
+ end
+end
+
+def fun_l6_n88(x)
+ if (x < 1)
+ fun_l7_n867(x)
+ else
+ fun_l7_n561(x)
+ end
+end
+
+def fun_l6_n89(x)
+ if (x < 1)
+ fun_l7_n64(x)
+ else
+ fun_l7_n662(x)
+ end
+end
+
+def fun_l6_n90(x)
+ if (x < 1)
+ fun_l7_n849(x)
+ else
+ fun_l7_n105(x)
+ end
+end
+
+def fun_l6_n91(x)
+ if (x < 1)
+ fun_l7_n564(x)
+ else
+ fun_l7_n758(x)
+ end
+end
+
+def fun_l6_n92(x)
+ if (x < 1)
+ fun_l7_n433(x)
+ else
+ fun_l7_n713(x)
+ end
+end
+
+def fun_l6_n93(x)
+ if (x < 1)
+ fun_l7_n283(x)
+ else
+ fun_l7_n577(x)
+ end
+end
+
+def fun_l6_n94(x)
+ if (x < 1)
+ fun_l7_n951(x)
+ else
+ fun_l7_n48(x)
+ end
+end
+
+def fun_l6_n95(x)
+ if (x < 1)
+ fun_l7_n966(x)
+ else
+ fun_l7_n789(x)
+ end
+end
+
+def fun_l6_n96(x)
+ if (x < 1)
+ fun_l7_n954(x)
+ else
+ fun_l7_n431(x)
+ end
+end
+
+def fun_l6_n97(x)
+ if (x < 1)
+ fun_l7_n720(x)
+ else
+ fun_l7_n847(x)
+ end
+end
+
+def fun_l6_n98(x)
+ if (x < 1)
+ fun_l7_n517(x)
+ else
+ fun_l7_n378(x)
+ end
+end
+
+def fun_l6_n99(x)
+ if (x < 1)
+ fun_l7_n951(x)
+ else
+ fun_l7_n688(x)
+ end
+end
+
+def fun_l6_n100(x)
+ if (x < 1)
+ fun_l7_n163(x)
+ else
+ fun_l7_n540(x)
+ end
+end
+
+def fun_l6_n101(x)
+ if (x < 1)
+ fun_l7_n423(x)
+ else
+ fun_l7_n10(x)
+ end
+end
+
+def fun_l6_n102(x)
+ if (x < 1)
+ fun_l7_n346(x)
+ else
+ fun_l7_n409(x)
+ end
+end
+
+def fun_l6_n103(x)
+ if (x < 1)
+ fun_l7_n686(x)
+ else
+ fun_l7_n432(x)
+ end
+end
+
+def fun_l6_n104(x)
+ if (x < 1)
+ fun_l7_n144(x)
+ else
+ fun_l7_n510(x)
+ end
+end
+
+def fun_l6_n105(x)
+ if (x < 1)
+ fun_l7_n568(x)
+ else
+ fun_l7_n756(x)
+ end
+end
+
+def fun_l6_n106(x)
+ if (x < 1)
+ fun_l7_n708(x)
+ else
+ fun_l7_n199(x)
+ end
+end
+
+def fun_l6_n107(x)
+ if (x < 1)
+ fun_l7_n823(x)
+ else
+ fun_l7_n969(x)
+ end
+end
+
+def fun_l6_n108(x)
+ if (x < 1)
+ fun_l7_n156(x)
+ else
+ fun_l7_n211(x)
+ end
+end
+
+def fun_l6_n109(x)
+ if (x < 1)
+ fun_l7_n527(x)
+ else
+ fun_l7_n791(x)
+ end
+end
+
+def fun_l6_n110(x)
+ if (x < 1)
+ fun_l7_n392(x)
+ else
+ fun_l7_n314(x)
+ end
+end
+
+def fun_l6_n111(x)
+ if (x < 1)
+ fun_l7_n355(x)
+ else
+ fun_l7_n222(x)
+ end
+end
+
+def fun_l6_n112(x)
+ if (x < 1)
+ fun_l7_n683(x)
+ else
+ fun_l7_n735(x)
+ end
+end
+
+def fun_l6_n113(x)
+ if (x < 1)
+ fun_l7_n38(x)
+ else
+ fun_l7_n874(x)
+ end
+end
+
+def fun_l6_n114(x)
+ if (x < 1)
+ fun_l7_n190(x)
+ else
+ fun_l7_n209(x)
+ end
+end
+
+def fun_l6_n115(x)
+ if (x < 1)
+ fun_l7_n544(x)
+ else
+ fun_l7_n267(x)
+ end
+end
+
+def fun_l6_n116(x)
+ if (x < 1)
+ fun_l7_n701(x)
+ else
+ fun_l7_n101(x)
+ end
+end
+
+def fun_l6_n117(x)
+ if (x < 1)
+ fun_l7_n283(x)
+ else
+ fun_l7_n979(x)
+ end
+end
+
+def fun_l6_n118(x)
+ if (x < 1)
+ fun_l7_n902(x)
+ else
+ fun_l7_n897(x)
+ end
+end
+
+def fun_l6_n119(x)
+ if (x < 1)
+ fun_l7_n115(x)
+ else
+ fun_l7_n660(x)
+ end
+end
+
+def fun_l6_n120(x)
+ if (x < 1)
+ fun_l7_n804(x)
+ else
+ fun_l7_n536(x)
+ end
+end
+
+def fun_l6_n121(x)
+ if (x < 1)
+ fun_l7_n278(x)
+ else
+ fun_l7_n666(x)
+ end
+end
+
+def fun_l6_n122(x)
+ if (x < 1)
+ fun_l7_n254(x)
+ else
+ fun_l7_n572(x)
+ end
+end
+
+def fun_l6_n123(x)
+ if (x < 1)
+ fun_l7_n474(x)
+ else
+ fun_l7_n912(x)
+ end
+end
+
+def fun_l6_n124(x)
+ if (x < 1)
+ fun_l7_n0(x)
+ else
+ fun_l7_n514(x)
+ end
+end
+
+def fun_l6_n125(x)
+ if (x < 1)
+ fun_l7_n916(x)
+ else
+ fun_l7_n633(x)
+ end
+end
+
+def fun_l6_n126(x)
+ if (x < 1)
+ fun_l7_n184(x)
+ else
+ fun_l7_n379(x)
+ end
+end
+
+def fun_l6_n127(x)
+ if (x < 1)
+ fun_l7_n316(x)
+ else
+ fun_l7_n422(x)
+ end
+end
+
+def fun_l6_n128(x)
+ if (x < 1)
+ fun_l7_n402(x)
+ else
+ fun_l7_n237(x)
+ end
+end
+
+def fun_l6_n129(x)
+ if (x < 1)
+ fun_l7_n303(x)
+ else
+ fun_l7_n61(x)
+ end
+end
+
+def fun_l6_n130(x)
+ if (x < 1)
+ fun_l7_n918(x)
+ else
+ fun_l7_n732(x)
+ end
+end
+
+def fun_l6_n131(x)
+ if (x < 1)
+ fun_l7_n74(x)
+ else
+ fun_l7_n472(x)
+ end
+end
+
+def fun_l6_n132(x)
+ if (x < 1)
+ fun_l7_n648(x)
+ else
+ fun_l7_n546(x)
+ end
+end
+
+def fun_l6_n133(x)
+ if (x < 1)
+ fun_l7_n388(x)
+ else
+ fun_l7_n359(x)
+ end
+end
+
+def fun_l6_n134(x)
+ if (x < 1)
+ fun_l7_n594(x)
+ else
+ fun_l7_n607(x)
+ end
+end
+
+def fun_l6_n135(x)
+ if (x < 1)
+ fun_l7_n642(x)
+ else
+ fun_l7_n188(x)
+ end
+end
+
+def fun_l6_n136(x)
+ if (x < 1)
+ fun_l7_n879(x)
+ else
+ fun_l7_n521(x)
+ end
+end
+
+def fun_l6_n137(x)
+ if (x < 1)
+ fun_l7_n362(x)
+ else
+ fun_l7_n564(x)
+ end
+end
+
+def fun_l6_n138(x)
+ if (x < 1)
+ fun_l7_n590(x)
+ else
+ fun_l7_n59(x)
+ end
+end
+
+def fun_l6_n139(x)
+ if (x < 1)
+ fun_l7_n464(x)
+ else
+ fun_l7_n563(x)
+ end
+end
+
+def fun_l6_n140(x)
+ if (x < 1)
+ fun_l7_n710(x)
+ else
+ fun_l7_n890(x)
+ end
+end
+
+def fun_l6_n141(x)
+ if (x < 1)
+ fun_l7_n63(x)
+ else
+ fun_l7_n470(x)
+ end
+end
+
+def fun_l6_n142(x)
+ if (x < 1)
+ fun_l7_n160(x)
+ else
+ fun_l7_n153(x)
+ end
+end
+
+def fun_l6_n143(x)
+ if (x < 1)
+ fun_l7_n908(x)
+ else
+ fun_l7_n286(x)
+ end
+end
+
+def fun_l6_n144(x)
+ if (x < 1)
+ fun_l7_n193(x)
+ else
+ fun_l7_n657(x)
+ end
+end
+
+def fun_l6_n145(x)
+ if (x < 1)
+ fun_l7_n885(x)
+ else
+ fun_l7_n981(x)
+ end
+end
+
+def fun_l6_n146(x)
+ if (x < 1)
+ fun_l7_n479(x)
+ else
+ fun_l7_n188(x)
+ end
+end
+
+def fun_l6_n147(x)
+ if (x < 1)
+ fun_l7_n378(x)
+ else
+ fun_l7_n299(x)
+ end
+end
+
+def fun_l6_n148(x)
+ if (x < 1)
+ fun_l7_n239(x)
+ else
+ fun_l7_n70(x)
+ end
+end
+
+def fun_l6_n149(x)
+ if (x < 1)
+ fun_l7_n864(x)
+ else
+ fun_l7_n131(x)
+ end
+end
+
+def fun_l6_n150(x)
+ if (x < 1)
+ fun_l7_n748(x)
+ else
+ fun_l7_n112(x)
+ end
+end
+
+def fun_l6_n151(x)
+ if (x < 1)
+ fun_l7_n466(x)
+ else
+ fun_l7_n672(x)
+ end
+end
+
+def fun_l6_n152(x)
+ if (x < 1)
+ fun_l7_n242(x)
+ else
+ fun_l7_n8(x)
+ end
+end
+
+def fun_l6_n153(x)
+ if (x < 1)
+ fun_l7_n408(x)
+ else
+ fun_l7_n538(x)
+ end
+end
+
+def fun_l6_n154(x)
+ if (x < 1)
+ fun_l7_n984(x)
+ else
+ fun_l7_n134(x)
+ end
+end
+
+def fun_l6_n155(x)
+ if (x < 1)
+ fun_l7_n384(x)
+ else
+ fun_l7_n62(x)
+ end
+end
+
+def fun_l6_n156(x)
+ if (x < 1)
+ fun_l7_n576(x)
+ else
+ fun_l7_n168(x)
+ end
+end
+
+def fun_l6_n157(x)
+ if (x < 1)
+ fun_l7_n371(x)
+ else
+ fun_l7_n470(x)
+ end
+end
+
+def fun_l6_n158(x)
+ if (x < 1)
+ fun_l7_n30(x)
+ else
+ fun_l7_n11(x)
+ end
+end
+
+def fun_l6_n159(x)
+ if (x < 1)
+ fun_l7_n45(x)
+ else
+ fun_l7_n191(x)
+ end
+end
+
+def fun_l6_n160(x)
+ if (x < 1)
+ fun_l7_n659(x)
+ else
+ fun_l7_n981(x)
+ end
+end
+
+def fun_l6_n161(x)
+ if (x < 1)
+ fun_l7_n586(x)
+ else
+ fun_l7_n617(x)
+ end
+end
+
+def fun_l6_n162(x)
+ if (x < 1)
+ fun_l7_n844(x)
+ else
+ fun_l7_n831(x)
+ end
+end
+
+def fun_l6_n163(x)
+ if (x < 1)
+ fun_l7_n14(x)
+ else
+ fun_l7_n631(x)
+ end
+end
+
+def fun_l6_n164(x)
+ if (x < 1)
+ fun_l7_n388(x)
+ else
+ fun_l7_n837(x)
+ end
+end
+
+def fun_l6_n165(x)
+ if (x < 1)
+ fun_l7_n825(x)
+ else
+ fun_l7_n902(x)
+ end
+end
+
+def fun_l6_n166(x)
+ if (x < 1)
+ fun_l7_n490(x)
+ else
+ fun_l7_n724(x)
+ end
+end
+
+def fun_l6_n167(x)
+ if (x < 1)
+ fun_l7_n612(x)
+ else
+ fun_l7_n792(x)
+ end
+end
+
+def fun_l6_n168(x)
+ if (x < 1)
+ fun_l7_n344(x)
+ else
+ fun_l7_n624(x)
+ end
+end
+
+def fun_l6_n169(x)
+ if (x < 1)
+ fun_l7_n255(x)
+ else
+ fun_l7_n456(x)
+ end
+end
+
+def fun_l6_n170(x)
+ if (x < 1)
+ fun_l7_n893(x)
+ else
+ fun_l7_n358(x)
+ end
+end
+
+def fun_l6_n171(x)
+ if (x < 1)
+ fun_l7_n926(x)
+ else
+ fun_l7_n988(x)
+ end
+end
+
+def fun_l6_n172(x)
+ if (x < 1)
+ fun_l7_n856(x)
+ else
+ fun_l7_n553(x)
+ end
+end
+
+def fun_l6_n173(x)
+ if (x < 1)
+ fun_l7_n694(x)
+ else
+ fun_l7_n918(x)
+ end
+end
+
+def fun_l6_n174(x)
+ if (x < 1)
+ fun_l7_n884(x)
+ else
+ fun_l7_n711(x)
+ end
+end
+
+def fun_l6_n175(x)
+ if (x < 1)
+ fun_l7_n884(x)
+ else
+ fun_l7_n28(x)
+ end
+end
+
+def fun_l6_n176(x)
+ if (x < 1)
+ fun_l7_n378(x)
+ else
+ fun_l7_n14(x)
+ end
+end
+
+def fun_l6_n177(x)
+ if (x < 1)
+ fun_l7_n844(x)
+ else
+ fun_l7_n549(x)
+ end
+end
+
+def fun_l6_n178(x)
+ if (x < 1)
+ fun_l7_n396(x)
+ else
+ fun_l7_n802(x)
+ end
+end
+
+def fun_l6_n179(x)
+ if (x < 1)
+ fun_l7_n407(x)
+ else
+ fun_l7_n612(x)
+ end
+end
+
+def fun_l6_n180(x)
+ if (x < 1)
+ fun_l7_n403(x)
+ else
+ fun_l7_n768(x)
+ end
+end
+
+def fun_l6_n181(x)
+ if (x < 1)
+ fun_l7_n412(x)
+ else
+ fun_l7_n301(x)
+ end
+end
+
+def fun_l6_n182(x)
+ if (x < 1)
+ fun_l7_n479(x)
+ else
+ fun_l7_n923(x)
+ end
+end
+
+def fun_l6_n183(x)
+ if (x < 1)
+ fun_l7_n664(x)
+ else
+ fun_l7_n451(x)
+ end
+end
+
+def fun_l6_n184(x)
+ if (x < 1)
+ fun_l7_n854(x)
+ else
+ fun_l7_n438(x)
+ end
+end
+
+def fun_l6_n185(x)
+ if (x < 1)
+ fun_l7_n994(x)
+ else
+ fun_l7_n726(x)
+ end
+end
+
+def fun_l6_n186(x)
+ if (x < 1)
+ fun_l7_n128(x)
+ else
+ fun_l7_n166(x)
+ end
+end
+
+def fun_l6_n187(x)
+ if (x < 1)
+ fun_l7_n515(x)
+ else
+ fun_l7_n21(x)
+ end
+end
+
+def fun_l6_n188(x)
+ if (x < 1)
+ fun_l7_n487(x)
+ else
+ fun_l7_n631(x)
+ end
+end
+
+def fun_l6_n189(x)
+ if (x < 1)
+ fun_l7_n16(x)
+ else
+ fun_l7_n579(x)
+ end
+end
+
+def fun_l6_n190(x)
+ if (x < 1)
+ fun_l7_n30(x)
+ else
+ fun_l7_n417(x)
+ end
+end
+
+def fun_l6_n191(x)
+ if (x < 1)
+ fun_l7_n737(x)
+ else
+ fun_l7_n436(x)
+ end
+end
+
+def fun_l6_n192(x)
+ if (x < 1)
+ fun_l7_n260(x)
+ else
+ fun_l7_n657(x)
+ end
+end
+
+def fun_l6_n193(x)
+ if (x < 1)
+ fun_l7_n722(x)
+ else
+ fun_l7_n489(x)
+ end
+end
+
+def fun_l6_n194(x)
+ if (x < 1)
+ fun_l7_n53(x)
+ else
+ fun_l7_n624(x)
+ end
+end
+
+def fun_l6_n195(x)
+ if (x < 1)
+ fun_l7_n91(x)
+ else
+ fun_l7_n597(x)
+ end
+end
+
+def fun_l6_n196(x)
+ if (x < 1)
+ fun_l7_n980(x)
+ else
+ fun_l7_n498(x)
+ end
+end
+
+def fun_l6_n197(x)
+ if (x < 1)
+ fun_l7_n918(x)
+ else
+ fun_l7_n328(x)
+ end
+end
+
+def fun_l6_n198(x)
+ if (x < 1)
+ fun_l7_n184(x)
+ else
+ fun_l7_n761(x)
+ end
+end
+
+def fun_l6_n199(x)
+ if (x < 1)
+ fun_l7_n692(x)
+ else
+ fun_l7_n610(x)
+ end
+end
+
+def fun_l6_n200(x)
+ if (x < 1)
+ fun_l7_n597(x)
+ else
+ fun_l7_n135(x)
+ end
+end
+
+def fun_l6_n201(x)
+ if (x < 1)
+ fun_l7_n973(x)
+ else
+ fun_l7_n453(x)
+ end
+end
+
+def fun_l6_n202(x)
+ if (x < 1)
+ fun_l7_n433(x)
+ else
+ fun_l7_n794(x)
+ end
+end
+
+def fun_l6_n203(x)
+ if (x < 1)
+ fun_l7_n294(x)
+ else
+ fun_l7_n271(x)
+ end
+end
+
+def fun_l6_n204(x)
+ if (x < 1)
+ fun_l7_n783(x)
+ else
+ fun_l7_n20(x)
+ end
+end
+
+def fun_l6_n205(x)
+ if (x < 1)
+ fun_l7_n941(x)
+ else
+ fun_l7_n517(x)
+ end
+end
+
+def fun_l6_n206(x)
+ if (x < 1)
+ fun_l7_n306(x)
+ else
+ fun_l7_n735(x)
+ end
+end
+
+def fun_l6_n207(x)
+ if (x < 1)
+ fun_l7_n63(x)
+ else
+ fun_l7_n313(x)
+ end
+end
+
+def fun_l6_n208(x)
+ if (x < 1)
+ fun_l7_n766(x)
+ else
+ fun_l7_n868(x)
+ end
+end
+
+def fun_l6_n209(x)
+ if (x < 1)
+ fun_l7_n780(x)
+ else
+ fun_l7_n422(x)
+ end
+end
+
+def fun_l6_n210(x)
+ if (x < 1)
+ fun_l7_n279(x)
+ else
+ fun_l7_n695(x)
+ end
+end
+
+def fun_l6_n211(x)
+ if (x < 1)
+ fun_l7_n981(x)
+ else
+ fun_l7_n946(x)
+ end
+end
+
+def fun_l6_n212(x)
+ if (x < 1)
+ fun_l7_n885(x)
+ else
+ fun_l7_n752(x)
+ end
+end
+
+def fun_l6_n213(x)
+ if (x < 1)
+ fun_l7_n238(x)
+ else
+ fun_l7_n322(x)
+ end
+end
+
+def fun_l6_n214(x)
+ if (x < 1)
+ fun_l7_n832(x)
+ else
+ fun_l7_n714(x)
+ end
+end
+
+def fun_l6_n215(x)
+ if (x < 1)
+ fun_l7_n369(x)
+ else
+ fun_l7_n793(x)
+ end
+end
+
+def fun_l6_n216(x)
+ if (x < 1)
+ fun_l7_n897(x)
+ else
+ fun_l7_n22(x)
+ end
+end
+
+def fun_l6_n217(x)
+ if (x < 1)
+ fun_l7_n94(x)
+ else
+ fun_l7_n93(x)
+ end
+end
+
+def fun_l6_n218(x)
+ if (x < 1)
+ fun_l7_n638(x)
+ else
+ fun_l7_n267(x)
+ end
+end
+
+def fun_l6_n219(x)
+ if (x < 1)
+ fun_l7_n254(x)
+ else
+ fun_l7_n162(x)
+ end
+end
+
+def fun_l6_n220(x)
+ if (x < 1)
+ fun_l7_n969(x)
+ else
+ fun_l7_n371(x)
+ end
+end
+
+def fun_l6_n221(x)
+ if (x < 1)
+ fun_l7_n692(x)
+ else
+ fun_l7_n421(x)
+ end
+end
+
+def fun_l6_n222(x)
+ if (x < 1)
+ fun_l7_n312(x)
+ else
+ fun_l7_n475(x)
+ end
+end
+
+def fun_l6_n223(x)
+ if (x < 1)
+ fun_l7_n566(x)
+ else
+ fun_l7_n497(x)
+ end
+end
+
+def fun_l6_n224(x)
+ if (x < 1)
+ fun_l7_n560(x)
+ else
+ fun_l7_n60(x)
+ end
+end
+
+def fun_l6_n225(x)
+ if (x < 1)
+ fun_l7_n484(x)
+ else
+ fun_l7_n602(x)
+ end
+end
+
+def fun_l6_n226(x)
+ if (x < 1)
+ fun_l7_n399(x)
+ else
+ fun_l7_n614(x)
+ end
+end
+
+def fun_l6_n227(x)
+ if (x < 1)
+ fun_l7_n344(x)
+ else
+ fun_l7_n701(x)
+ end
+end
+
+def fun_l6_n228(x)
+ if (x < 1)
+ fun_l7_n840(x)
+ else
+ fun_l7_n211(x)
+ end
+end
+
+def fun_l6_n229(x)
+ if (x < 1)
+ fun_l7_n423(x)
+ else
+ fun_l7_n51(x)
+ end
+end
+
+def fun_l6_n230(x)
+ if (x < 1)
+ fun_l7_n978(x)
+ else
+ fun_l7_n334(x)
+ end
+end
+
+def fun_l6_n231(x)
+ if (x < 1)
+ fun_l7_n136(x)
+ else
+ fun_l7_n296(x)
+ end
+end
+
+def fun_l6_n232(x)
+ if (x < 1)
+ fun_l7_n24(x)
+ else
+ fun_l7_n282(x)
+ end
+end
+
+def fun_l6_n233(x)
+ if (x < 1)
+ fun_l7_n803(x)
+ else
+ fun_l7_n623(x)
+ end
+end
+
+def fun_l6_n234(x)
+ if (x < 1)
+ fun_l7_n720(x)
+ else
+ fun_l7_n492(x)
+ end
+end
+
+def fun_l6_n235(x)
+ if (x < 1)
+ fun_l7_n627(x)
+ else
+ fun_l7_n260(x)
+ end
+end
+
+def fun_l6_n236(x)
+ if (x < 1)
+ fun_l7_n460(x)
+ else
+ fun_l7_n693(x)
+ end
+end
+
+def fun_l6_n237(x)
+ if (x < 1)
+ fun_l7_n635(x)
+ else
+ fun_l7_n745(x)
+ end
+end
+
+def fun_l6_n238(x)
+ if (x < 1)
+ fun_l7_n76(x)
+ else
+ fun_l7_n283(x)
+ end
+end
+
+def fun_l6_n239(x)
+ if (x < 1)
+ fun_l7_n183(x)
+ else
+ fun_l7_n515(x)
+ end
+end
+
+def fun_l6_n240(x)
+ if (x < 1)
+ fun_l7_n266(x)
+ else
+ fun_l7_n34(x)
+ end
+end
+
+def fun_l6_n241(x)
+ if (x < 1)
+ fun_l7_n480(x)
+ else
+ fun_l7_n868(x)
+ end
+end
+
+def fun_l6_n242(x)
+ if (x < 1)
+ fun_l7_n998(x)
+ else
+ fun_l7_n669(x)
+ end
+end
+
+def fun_l6_n243(x)
+ if (x < 1)
+ fun_l7_n414(x)
+ else
+ fun_l7_n161(x)
+ end
+end
+
+def fun_l6_n244(x)
+ if (x < 1)
+ fun_l7_n104(x)
+ else
+ fun_l7_n659(x)
+ end
+end
+
+def fun_l6_n245(x)
+ if (x < 1)
+ fun_l7_n881(x)
+ else
+ fun_l7_n229(x)
+ end
+end
+
+def fun_l6_n246(x)
+ if (x < 1)
+ fun_l7_n328(x)
+ else
+ fun_l7_n935(x)
+ end
+end
+
+def fun_l6_n247(x)
+ if (x < 1)
+ fun_l7_n433(x)
+ else
+ fun_l7_n565(x)
+ end
+end
+
+def fun_l6_n248(x)
+ if (x < 1)
+ fun_l7_n161(x)
+ else
+ fun_l7_n760(x)
+ end
+end
+
+def fun_l6_n249(x)
+ if (x < 1)
+ fun_l7_n115(x)
+ else
+ fun_l7_n594(x)
+ end
+end
+
+def fun_l6_n250(x)
+ if (x < 1)
+ fun_l7_n350(x)
+ else
+ fun_l7_n310(x)
+ end
+end
+
+def fun_l6_n251(x)
+ if (x < 1)
+ fun_l7_n920(x)
+ else
+ fun_l7_n51(x)
+ end
+end
+
+def fun_l6_n252(x)
+ if (x < 1)
+ fun_l7_n132(x)
+ else
+ fun_l7_n492(x)
+ end
+end
+
+def fun_l6_n253(x)
+ if (x < 1)
+ fun_l7_n618(x)
+ else
+ fun_l7_n325(x)
+ end
+end
+
+def fun_l6_n254(x)
+ if (x < 1)
+ fun_l7_n682(x)
+ else
+ fun_l7_n840(x)
+ end
+end
+
+def fun_l6_n255(x)
+ if (x < 1)
+ fun_l7_n462(x)
+ else
+ fun_l7_n780(x)
+ end
+end
+
+def fun_l6_n256(x)
+ if (x < 1)
+ fun_l7_n712(x)
+ else
+ fun_l7_n995(x)
+ end
+end
+
+def fun_l6_n257(x)
+ if (x < 1)
+ fun_l7_n111(x)
+ else
+ fun_l7_n121(x)
+ end
+end
+
+def fun_l6_n258(x)
+ if (x < 1)
+ fun_l7_n255(x)
+ else
+ fun_l7_n384(x)
+ end
+end
+
+def fun_l6_n259(x)
+ if (x < 1)
+ fun_l7_n610(x)
+ else
+ fun_l7_n555(x)
+ end
+end
+
+def fun_l6_n260(x)
+ if (x < 1)
+ fun_l7_n778(x)
+ else
+ fun_l7_n454(x)
+ end
+end
+
+def fun_l6_n261(x)
+ if (x < 1)
+ fun_l7_n376(x)
+ else
+ fun_l7_n892(x)
+ end
+end
+
+def fun_l6_n262(x)
+ if (x < 1)
+ fun_l7_n454(x)
+ else
+ fun_l7_n468(x)
+ end
+end
+
+def fun_l6_n263(x)
+ if (x < 1)
+ fun_l7_n708(x)
+ else
+ fun_l7_n368(x)
+ end
+end
+
+def fun_l6_n264(x)
+ if (x < 1)
+ fun_l7_n517(x)
+ else
+ fun_l7_n299(x)
+ end
+end
+
+def fun_l6_n265(x)
+ if (x < 1)
+ fun_l7_n987(x)
+ else
+ fun_l7_n318(x)
+ end
+end
+
+def fun_l6_n266(x)
+ if (x < 1)
+ fun_l7_n372(x)
+ else
+ fun_l7_n560(x)
+ end
+end
+
+def fun_l6_n267(x)
+ if (x < 1)
+ fun_l7_n493(x)
+ else
+ fun_l7_n288(x)
+ end
+end
+
+def fun_l6_n268(x)
+ if (x < 1)
+ fun_l7_n278(x)
+ else
+ fun_l7_n10(x)
+ end
+end
+
+def fun_l6_n269(x)
+ if (x < 1)
+ fun_l7_n417(x)
+ else
+ fun_l7_n703(x)
+ end
+end
+
+def fun_l6_n270(x)
+ if (x < 1)
+ fun_l7_n410(x)
+ else
+ fun_l7_n91(x)
+ end
+end
+
+def fun_l6_n271(x)
+ if (x < 1)
+ fun_l7_n281(x)
+ else
+ fun_l7_n447(x)
+ end
+end
+
+def fun_l6_n272(x)
+ if (x < 1)
+ fun_l7_n263(x)
+ else
+ fun_l7_n374(x)
+ end
+end
+
+def fun_l6_n273(x)
+ if (x < 1)
+ fun_l7_n622(x)
+ else
+ fun_l7_n310(x)
+ end
+end
+
+def fun_l6_n274(x)
+ if (x < 1)
+ fun_l7_n176(x)
+ else
+ fun_l7_n227(x)
+ end
+end
+
+def fun_l6_n275(x)
+ if (x < 1)
+ fun_l7_n848(x)
+ else
+ fun_l7_n416(x)
+ end
+end
+
+def fun_l6_n276(x)
+ if (x < 1)
+ fun_l7_n998(x)
+ else
+ fun_l7_n969(x)
+ end
+end
+
+def fun_l6_n277(x)
+ if (x < 1)
+ fun_l7_n356(x)
+ else
+ fun_l7_n722(x)
+ end
+end
+
+def fun_l6_n278(x)
+ if (x < 1)
+ fun_l7_n888(x)
+ else
+ fun_l7_n156(x)
+ end
+end
+
+def fun_l6_n279(x)
+ if (x < 1)
+ fun_l7_n779(x)
+ else
+ fun_l7_n123(x)
+ end
+end
+
+def fun_l6_n280(x)
+ if (x < 1)
+ fun_l7_n393(x)
+ else
+ fun_l7_n765(x)
+ end
+end
+
+def fun_l6_n281(x)
+ if (x < 1)
+ fun_l7_n257(x)
+ else
+ fun_l7_n277(x)
+ end
+end
+
+def fun_l6_n282(x)
+ if (x < 1)
+ fun_l7_n780(x)
+ else
+ fun_l7_n643(x)
+ end
+end
+
+def fun_l6_n283(x)
+ if (x < 1)
+ fun_l7_n63(x)
+ else
+ fun_l7_n449(x)
+ end
+end
+
+def fun_l6_n284(x)
+ if (x < 1)
+ fun_l7_n200(x)
+ else
+ fun_l7_n806(x)
+ end
+end
+
+def fun_l6_n285(x)
+ if (x < 1)
+ fun_l7_n575(x)
+ else
+ fun_l7_n409(x)
+ end
+end
+
+def fun_l6_n286(x)
+ if (x < 1)
+ fun_l7_n377(x)
+ else
+ fun_l7_n456(x)
+ end
+end
+
+def fun_l6_n287(x)
+ if (x < 1)
+ fun_l7_n519(x)
+ else
+ fun_l7_n473(x)
+ end
+end
+
+def fun_l6_n288(x)
+ if (x < 1)
+ fun_l7_n525(x)
+ else
+ fun_l7_n45(x)
+ end
+end
+
+def fun_l6_n289(x)
+ if (x < 1)
+ fun_l7_n678(x)
+ else
+ fun_l7_n910(x)
+ end
+end
+
+def fun_l6_n290(x)
+ if (x < 1)
+ fun_l7_n628(x)
+ else
+ fun_l7_n939(x)
+ end
+end
+
+def fun_l6_n291(x)
+ if (x < 1)
+ fun_l7_n967(x)
+ else
+ fun_l7_n927(x)
+ end
+end
+
+def fun_l6_n292(x)
+ if (x < 1)
+ fun_l7_n634(x)
+ else
+ fun_l7_n184(x)
+ end
+end
+
+def fun_l6_n293(x)
+ if (x < 1)
+ fun_l7_n857(x)
+ else
+ fun_l7_n625(x)
+ end
+end
+
+def fun_l6_n294(x)
+ if (x < 1)
+ fun_l7_n105(x)
+ else
+ fun_l7_n46(x)
+ end
+end
+
+def fun_l6_n295(x)
+ if (x < 1)
+ fun_l7_n642(x)
+ else
+ fun_l7_n566(x)
+ end
+end
+
+def fun_l6_n296(x)
+ if (x < 1)
+ fun_l7_n173(x)
+ else
+ fun_l7_n619(x)
+ end
+end
+
+def fun_l6_n297(x)
+ if (x < 1)
+ fun_l7_n257(x)
+ else
+ fun_l7_n485(x)
+ end
+end
+
+def fun_l6_n298(x)
+ if (x < 1)
+ fun_l7_n447(x)
+ else
+ fun_l7_n808(x)
+ end
+end
+
+def fun_l6_n299(x)
+ if (x < 1)
+ fun_l7_n838(x)
+ else
+ fun_l7_n856(x)
+ end
+end
+
+def fun_l6_n300(x)
+ if (x < 1)
+ fun_l7_n893(x)
+ else
+ fun_l7_n99(x)
+ end
+end
+
+def fun_l6_n301(x)
+ if (x < 1)
+ fun_l7_n180(x)
+ else
+ fun_l7_n126(x)
+ end
+end
+
+def fun_l6_n302(x)
+ if (x < 1)
+ fun_l7_n62(x)
+ else
+ fun_l7_n266(x)
+ end
+end
+
+def fun_l6_n303(x)
+ if (x < 1)
+ fun_l7_n709(x)
+ else
+ fun_l7_n196(x)
+ end
+end
+
+def fun_l6_n304(x)
+ if (x < 1)
+ fun_l7_n408(x)
+ else
+ fun_l7_n730(x)
+ end
+end
+
+def fun_l6_n305(x)
+ if (x < 1)
+ fun_l7_n736(x)
+ else
+ fun_l7_n673(x)
+ end
+end
+
+def fun_l6_n306(x)
+ if (x < 1)
+ fun_l7_n734(x)
+ else
+ fun_l7_n519(x)
+ end
+end
+
+def fun_l6_n307(x)
+ if (x < 1)
+ fun_l7_n629(x)
+ else
+ fun_l7_n670(x)
+ end
+end
+
+def fun_l6_n308(x)
+ if (x < 1)
+ fun_l7_n839(x)
+ else
+ fun_l7_n191(x)
+ end
+end
+
+def fun_l6_n309(x)
+ if (x < 1)
+ fun_l7_n123(x)
+ else
+ fun_l7_n553(x)
+ end
+end
+
+def fun_l6_n310(x)
+ if (x < 1)
+ fun_l7_n812(x)
+ else
+ fun_l7_n815(x)
+ end
+end
+
+def fun_l6_n311(x)
+ if (x < 1)
+ fun_l7_n860(x)
+ else
+ fun_l7_n519(x)
+ end
+end
+
+def fun_l6_n312(x)
+ if (x < 1)
+ fun_l7_n181(x)
+ else
+ fun_l7_n674(x)
+ end
+end
+
+def fun_l6_n313(x)
+ if (x < 1)
+ fun_l7_n654(x)
+ else
+ fun_l7_n500(x)
+ end
+end
+
+def fun_l6_n314(x)
+ if (x < 1)
+ fun_l7_n690(x)
+ else
+ fun_l7_n136(x)
+ end
+end
+
+def fun_l6_n315(x)
+ if (x < 1)
+ fun_l7_n944(x)
+ else
+ fun_l7_n956(x)
+ end
+end
+
+def fun_l6_n316(x)
+ if (x < 1)
+ fun_l7_n471(x)
+ else
+ fun_l7_n245(x)
+ end
+end
+
+def fun_l6_n317(x)
+ if (x < 1)
+ fun_l7_n520(x)
+ else
+ fun_l7_n919(x)
+ end
+end
+
+def fun_l6_n318(x)
+ if (x < 1)
+ fun_l7_n843(x)
+ else
+ fun_l7_n793(x)
+ end
+end
+
+def fun_l6_n319(x)
+ if (x < 1)
+ fun_l7_n765(x)
+ else
+ fun_l7_n363(x)
+ end
+end
+
+def fun_l6_n320(x)
+ if (x < 1)
+ fun_l7_n483(x)
+ else
+ fun_l7_n439(x)
+ end
+end
+
+def fun_l6_n321(x)
+ if (x < 1)
+ fun_l7_n232(x)
+ else
+ fun_l7_n921(x)
+ end
+end
+
+def fun_l6_n322(x)
+ if (x < 1)
+ fun_l7_n398(x)
+ else
+ fun_l7_n862(x)
+ end
+end
+
+def fun_l6_n323(x)
+ if (x < 1)
+ fun_l7_n550(x)
+ else
+ fun_l7_n84(x)
+ end
+end
+
+def fun_l6_n324(x)
+ if (x < 1)
+ fun_l7_n788(x)
+ else
+ fun_l7_n512(x)
+ end
+end
+
+def fun_l6_n325(x)
+ if (x < 1)
+ fun_l7_n512(x)
+ else
+ fun_l7_n303(x)
+ end
+end
+
+def fun_l6_n326(x)
+ if (x < 1)
+ fun_l7_n86(x)
+ else
+ fun_l7_n555(x)
+ end
+end
+
+def fun_l6_n327(x)
+ if (x < 1)
+ fun_l7_n857(x)
+ else
+ fun_l7_n751(x)
+ end
+end
+
+def fun_l6_n328(x)
+ if (x < 1)
+ fun_l7_n341(x)
+ else
+ fun_l7_n208(x)
+ end
+end
+
+def fun_l6_n329(x)
+ if (x < 1)
+ fun_l7_n707(x)
+ else
+ fun_l7_n371(x)
+ end
+end
+
+def fun_l6_n330(x)
+ if (x < 1)
+ fun_l7_n217(x)
+ else
+ fun_l7_n604(x)
+ end
+end
+
+def fun_l6_n331(x)
+ if (x < 1)
+ fun_l7_n607(x)
+ else
+ fun_l7_n471(x)
+ end
+end
+
+def fun_l6_n332(x)
+ if (x < 1)
+ fun_l7_n818(x)
+ else
+ fun_l7_n787(x)
+ end
+end
+
+def fun_l6_n333(x)
+ if (x < 1)
+ fun_l7_n28(x)
+ else
+ fun_l7_n220(x)
+ end
+end
+
+def fun_l6_n334(x)
+ if (x < 1)
+ fun_l7_n666(x)
+ else
+ fun_l7_n995(x)
+ end
+end
+
+def fun_l6_n335(x)
+ if (x < 1)
+ fun_l7_n3(x)
+ else
+ fun_l7_n970(x)
+ end
+end
+
+def fun_l6_n336(x)
+ if (x < 1)
+ fun_l7_n977(x)
+ else
+ fun_l7_n77(x)
+ end
+end
+
+def fun_l6_n337(x)
+ if (x < 1)
+ fun_l7_n54(x)
+ else
+ fun_l7_n814(x)
+ end
+end
+
+def fun_l6_n338(x)
+ if (x < 1)
+ fun_l7_n129(x)
+ else
+ fun_l7_n624(x)
+ end
+end
+
+def fun_l6_n339(x)
+ if (x < 1)
+ fun_l7_n866(x)
+ else
+ fun_l7_n821(x)
+ end
+end
+
+def fun_l6_n340(x)
+ if (x < 1)
+ fun_l7_n912(x)
+ else
+ fun_l7_n24(x)
+ end
+end
+
+def fun_l6_n341(x)
+ if (x < 1)
+ fun_l7_n684(x)
+ else
+ fun_l7_n610(x)
+ end
+end
+
+def fun_l6_n342(x)
+ if (x < 1)
+ fun_l7_n816(x)
+ else
+ fun_l7_n826(x)
+ end
+end
+
+def fun_l6_n343(x)
+ if (x < 1)
+ fun_l7_n479(x)
+ else
+ fun_l7_n444(x)
+ end
+end
+
+def fun_l6_n344(x)
+ if (x < 1)
+ fun_l7_n51(x)
+ else
+ fun_l7_n731(x)
+ end
+end
+
+def fun_l6_n345(x)
+ if (x < 1)
+ fun_l7_n690(x)
+ else
+ fun_l7_n631(x)
+ end
+end
+
+def fun_l6_n346(x)
+ if (x < 1)
+ fun_l7_n254(x)
+ else
+ fun_l7_n353(x)
+ end
+end
+
+def fun_l6_n347(x)
+ if (x < 1)
+ fun_l7_n905(x)
+ else
+ fun_l7_n422(x)
+ end
+end
+
+def fun_l6_n348(x)
+ if (x < 1)
+ fun_l7_n249(x)
+ else
+ fun_l7_n200(x)
+ end
+end
+
+def fun_l6_n349(x)
+ if (x < 1)
+ fun_l7_n11(x)
+ else
+ fun_l7_n550(x)
+ end
+end
+
+def fun_l6_n350(x)
+ if (x < 1)
+ fun_l7_n818(x)
+ else
+ fun_l7_n444(x)
+ end
+end
+
+def fun_l6_n351(x)
+ if (x < 1)
+ fun_l7_n422(x)
+ else
+ fun_l7_n242(x)
+ end
+end
+
+def fun_l6_n352(x)
+ if (x < 1)
+ fun_l7_n653(x)
+ else
+ fun_l7_n912(x)
+ end
+end
+
+def fun_l6_n353(x)
+ if (x < 1)
+ fun_l7_n605(x)
+ else
+ fun_l7_n129(x)
+ end
+end
+
+def fun_l6_n354(x)
+ if (x < 1)
+ fun_l7_n622(x)
+ else
+ fun_l7_n172(x)
+ end
+end
+
+def fun_l6_n355(x)
+ if (x < 1)
+ fun_l7_n568(x)
+ else
+ fun_l7_n26(x)
+ end
+end
+
+def fun_l6_n356(x)
+ if (x < 1)
+ fun_l7_n777(x)
+ else
+ fun_l7_n818(x)
+ end
+end
+
+def fun_l6_n357(x)
+ if (x < 1)
+ fun_l7_n807(x)
+ else
+ fun_l7_n642(x)
+ end
+end
+
+def fun_l6_n358(x)
+ if (x < 1)
+ fun_l7_n745(x)
+ else
+ fun_l7_n866(x)
+ end
+end
+
+def fun_l6_n359(x)
+ if (x < 1)
+ fun_l7_n603(x)
+ else
+ fun_l7_n42(x)
+ end
+end
+
+def fun_l6_n360(x)
+ if (x < 1)
+ fun_l7_n818(x)
+ else
+ fun_l7_n203(x)
+ end
+end
+
+def fun_l6_n361(x)
+ if (x < 1)
+ fun_l7_n451(x)
+ else
+ fun_l7_n719(x)
+ end
+end
+
+def fun_l6_n362(x)
+ if (x < 1)
+ fun_l7_n31(x)
+ else
+ fun_l7_n148(x)
+ end
+end
+
+def fun_l6_n363(x)
+ if (x < 1)
+ fun_l7_n148(x)
+ else
+ fun_l7_n292(x)
+ end
+end
+
+def fun_l6_n364(x)
+ if (x < 1)
+ fun_l7_n175(x)
+ else
+ fun_l7_n849(x)
+ end
+end
+
+def fun_l6_n365(x)
+ if (x < 1)
+ fun_l7_n414(x)
+ else
+ fun_l7_n57(x)
+ end
+end
+
+def fun_l6_n366(x)
+ if (x < 1)
+ fun_l7_n771(x)
+ else
+ fun_l7_n625(x)
+ end
+end
+
+def fun_l6_n367(x)
+ if (x < 1)
+ fun_l7_n232(x)
+ else
+ fun_l7_n108(x)
+ end
+end
+
+def fun_l6_n368(x)
+ if (x < 1)
+ fun_l7_n123(x)
+ else
+ fun_l7_n413(x)
+ end
+end
+
+def fun_l6_n369(x)
+ if (x < 1)
+ fun_l7_n587(x)
+ else
+ fun_l7_n876(x)
+ end
+end
+
+def fun_l6_n370(x)
+ if (x < 1)
+ fun_l7_n750(x)
+ else
+ fun_l7_n531(x)
+ end
+end
+
+def fun_l6_n371(x)
+ if (x < 1)
+ fun_l7_n55(x)
+ else
+ fun_l7_n859(x)
+ end
+end
+
+def fun_l6_n372(x)
+ if (x < 1)
+ fun_l7_n848(x)
+ else
+ fun_l7_n272(x)
+ end
+end
+
+def fun_l6_n373(x)
+ if (x < 1)
+ fun_l7_n480(x)
+ else
+ fun_l7_n976(x)
+ end
+end
+
+def fun_l6_n374(x)
+ if (x < 1)
+ fun_l7_n298(x)
+ else
+ fun_l7_n844(x)
+ end
+end
+
+def fun_l6_n375(x)
+ if (x < 1)
+ fun_l7_n564(x)
+ else
+ fun_l7_n960(x)
+ end
+end
+
+def fun_l6_n376(x)
+ if (x < 1)
+ fun_l7_n684(x)
+ else
+ fun_l7_n181(x)
+ end
+end
+
+def fun_l6_n377(x)
+ if (x < 1)
+ fun_l7_n153(x)
+ else
+ fun_l7_n21(x)
+ end
+end
+
+def fun_l6_n378(x)
+ if (x < 1)
+ fun_l7_n377(x)
+ else
+ fun_l7_n761(x)
+ end
+end
+
+def fun_l6_n379(x)
+ if (x < 1)
+ fun_l7_n737(x)
+ else
+ fun_l7_n605(x)
+ end
+end
+
+def fun_l6_n380(x)
+ if (x < 1)
+ fun_l7_n536(x)
+ else
+ fun_l7_n143(x)
+ end
+end
+
+def fun_l6_n381(x)
+ if (x < 1)
+ fun_l7_n314(x)
+ else
+ fun_l7_n999(x)
+ end
+end
+
+def fun_l6_n382(x)
+ if (x < 1)
+ fun_l7_n995(x)
+ else
+ fun_l7_n564(x)
+ end
+end
+
+def fun_l6_n383(x)
+ if (x < 1)
+ fun_l7_n772(x)
+ else
+ fun_l7_n520(x)
+ end
+end
+
+def fun_l6_n384(x)
+ if (x < 1)
+ fun_l7_n250(x)
+ else
+ fun_l7_n142(x)
+ end
+end
+
+def fun_l6_n385(x)
+ if (x < 1)
+ fun_l7_n993(x)
+ else
+ fun_l7_n987(x)
+ end
+end
+
+def fun_l6_n386(x)
+ if (x < 1)
+ fun_l7_n717(x)
+ else
+ fun_l7_n674(x)
+ end
+end
+
+def fun_l6_n387(x)
+ if (x < 1)
+ fun_l7_n585(x)
+ else
+ fun_l7_n102(x)
+ end
+end
+
+def fun_l6_n388(x)
+ if (x < 1)
+ fun_l7_n779(x)
+ else
+ fun_l7_n110(x)
+ end
+end
+
+def fun_l6_n389(x)
+ if (x < 1)
+ fun_l7_n815(x)
+ else
+ fun_l7_n194(x)
+ end
+end
+
+def fun_l6_n390(x)
+ if (x < 1)
+ fun_l7_n261(x)
+ else
+ fun_l7_n30(x)
+ end
+end
+
+def fun_l6_n391(x)
+ if (x < 1)
+ fun_l7_n746(x)
+ else
+ fun_l7_n207(x)
+ end
+end
+
+def fun_l6_n392(x)
+ if (x < 1)
+ fun_l7_n866(x)
+ else
+ fun_l7_n862(x)
+ end
+end
+
+def fun_l6_n393(x)
+ if (x < 1)
+ fun_l7_n47(x)
+ else
+ fun_l7_n692(x)
+ end
+end
+
+def fun_l6_n394(x)
+ if (x < 1)
+ fun_l7_n140(x)
+ else
+ fun_l7_n104(x)
+ end
+end
+
+def fun_l6_n395(x)
+ if (x < 1)
+ fun_l7_n170(x)
+ else
+ fun_l7_n93(x)
+ end
+end
+
+def fun_l6_n396(x)
+ if (x < 1)
+ fun_l7_n184(x)
+ else
+ fun_l7_n218(x)
+ end
+end
+
+def fun_l6_n397(x)
+ if (x < 1)
+ fun_l7_n268(x)
+ else
+ fun_l7_n278(x)
+ end
+end
+
+def fun_l6_n398(x)
+ if (x < 1)
+ fun_l7_n859(x)
+ else
+ fun_l7_n888(x)
+ end
+end
+
+def fun_l6_n399(x)
+ if (x < 1)
+ fun_l7_n540(x)
+ else
+ fun_l7_n218(x)
+ end
+end
+
+def fun_l6_n400(x)
+ if (x < 1)
+ fun_l7_n148(x)
+ else
+ fun_l7_n220(x)
+ end
+end
+
+def fun_l6_n401(x)
+ if (x < 1)
+ fun_l7_n169(x)
+ else
+ fun_l7_n310(x)
+ end
+end
+
+def fun_l6_n402(x)
+ if (x < 1)
+ fun_l7_n256(x)
+ else
+ fun_l7_n336(x)
+ end
+end
+
+def fun_l6_n403(x)
+ if (x < 1)
+ fun_l7_n257(x)
+ else
+ fun_l7_n536(x)
+ end
+end
+
+def fun_l6_n404(x)
+ if (x < 1)
+ fun_l7_n47(x)
+ else
+ fun_l7_n714(x)
+ end
+end
+
+def fun_l6_n405(x)
+ if (x < 1)
+ fun_l7_n913(x)
+ else
+ fun_l7_n705(x)
+ end
+end
+
+def fun_l6_n406(x)
+ if (x < 1)
+ fun_l7_n816(x)
+ else
+ fun_l7_n168(x)
+ end
+end
+
+def fun_l6_n407(x)
+ if (x < 1)
+ fun_l7_n506(x)
+ else
+ fun_l7_n772(x)
+ end
+end
+
+def fun_l6_n408(x)
+ if (x < 1)
+ fun_l7_n983(x)
+ else
+ fun_l7_n973(x)
+ end
+end
+
+def fun_l6_n409(x)
+ if (x < 1)
+ fun_l7_n752(x)
+ else
+ fun_l7_n543(x)
+ end
+end
+
+def fun_l6_n410(x)
+ if (x < 1)
+ fun_l7_n580(x)
+ else
+ fun_l7_n265(x)
+ end
+end
+
+def fun_l6_n411(x)
+ if (x < 1)
+ fun_l7_n744(x)
+ else
+ fun_l7_n736(x)
+ end
+end
+
+def fun_l6_n412(x)
+ if (x < 1)
+ fun_l7_n950(x)
+ else
+ fun_l7_n5(x)
+ end
+end
+
+def fun_l6_n413(x)
+ if (x < 1)
+ fun_l7_n734(x)
+ else
+ fun_l7_n151(x)
+ end
+end
+
+def fun_l6_n414(x)
+ if (x < 1)
+ fun_l7_n907(x)
+ else
+ fun_l7_n113(x)
+ end
+end
+
+def fun_l6_n415(x)
+ if (x < 1)
+ fun_l7_n621(x)
+ else
+ fun_l7_n272(x)
+ end
+end
+
+def fun_l6_n416(x)
+ if (x < 1)
+ fun_l7_n598(x)
+ else
+ fun_l7_n872(x)
+ end
+end
+
+def fun_l6_n417(x)
+ if (x < 1)
+ fun_l7_n811(x)
+ else
+ fun_l7_n734(x)
+ end
+end
+
+def fun_l6_n418(x)
+ if (x < 1)
+ fun_l7_n18(x)
+ else
+ fun_l7_n405(x)
+ end
+end
+
+def fun_l6_n419(x)
+ if (x < 1)
+ fun_l7_n89(x)
+ else
+ fun_l7_n153(x)
+ end
+end
+
+def fun_l6_n420(x)
+ if (x < 1)
+ fun_l7_n438(x)
+ else
+ fun_l7_n452(x)
+ end
+end
+
+def fun_l6_n421(x)
+ if (x < 1)
+ fun_l7_n601(x)
+ else
+ fun_l7_n777(x)
+ end
+end
+
+def fun_l6_n422(x)
+ if (x < 1)
+ fun_l7_n202(x)
+ else
+ fun_l7_n368(x)
+ end
+end
+
+def fun_l6_n423(x)
+ if (x < 1)
+ fun_l7_n636(x)
+ else
+ fun_l7_n137(x)
+ end
+end
+
+def fun_l6_n424(x)
+ if (x < 1)
+ fun_l7_n222(x)
+ else
+ fun_l7_n838(x)
+ end
+end
+
+def fun_l6_n425(x)
+ if (x < 1)
+ fun_l7_n388(x)
+ else
+ fun_l7_n175(x)
+ end
+end
+
+def fun_l6_n426(x)
+ if (x < 1)
+ fun_l7_n629(x)
+ else
+ fun_l7_n376(x)
+ end
+end
+
+def fun_l6_n427(x)
+ if (x < 1)
+ fun_l7_n293(x)
+ else
+ fun_l7_n517(x)
+ end
+end
+
+def fun_l6_n428(x)
+ if (x < 1)
+ fun_l7_n859(x)
+ else
+ fun_l7_n129(x)
+ end
+end
+
+def fun_l6_n429(x)
+ if (x < 1)
+ fun_l7_n854(x)
+ else
+ fun_l7_n647(x)
+ end
+end
+
+def fun_l6_n430(x)
+ if (x < 1)
+ fun_l7_n117(x)
+ else
+ fun_l7_n291(x)
+ end
+end
+
+def fun_l6_n431(x)
+ if (x < 1)
+ fun_l7_n505(x)
+ else
+ fun_l7_n938(x)
+ end
+end
+
+def fun_l6_n432(x)
+ if (x < 1)
+ fun_l7_n536(x)
+ else
+ fun_l7_n303(x)
+ end
+end
+
+def fun_l6_n433(x)
+ if (x < 1)
+ fun_l7_n443(x)
+ else
+ fun_l7_n945(x)
+ end
+end
+
+def fun_l6_n434(x)
+ if (x < 1)
+ fun_l7_n74(x)
+ else
+ fun_l7_n594(x)
+ end
+end
+
+def fun_l6_n435(x)
+ if (x < 1)
+ fun_l7_n273(x)
+ else
+ fun_l7_n704(x)
+ end
+end
+
+def fun_l6_n436(x)
+ if (x < 1)
+ fun_l7_n451(x)
+ else
+ fun_l7_n371(x)
+ end
+end
+
+def fun_l6_n437(x)
+ if (x < 1)
+ fun_l7_n628(x)
+ else
+ fun_l7_n963(x)
+ end
+end
+
+def fun_l6_n438(x)
+ if (x < 1)
+ fun_l7_n361(x)
+ else
+ fun_l7_n506(x)
+ end
+end
+
+def fun_l6_n439(x)
+ if (x < 1)
+ fun_l7_n8(x)
+ else
+ fun_l7_n721(x)
+ end
+end
+
+def fun_l6_n440(x)
+ if (x < 1)
+ fun_l7_n728(x)
+ else
+ fun_l7_n372(x)
+ end
+end
+
+def fun_l6_n441(x)
+ if (x < 1)
+ fun_l7_n988(x)
+ else
+ fun_l7_n323(x)
+ end
+end
+
+def fun_l6_n442(x)
+ if (x < 1)
+ fun_l7_n5(x)
+ else
+ fun_l7_n56(x)
+ end
+end
+
+def fun_l6_n443(x)
+ if (x < 1)
+ fun_l7_n553(x)
+ else
+ fun_l7_n776(x)
+ end
+end
+
+def fun_l6_n444(x)
+ if (x < 1)
+ fun_l7_n194(x)
+ else
+ fun_l7_n101(x)
+ end
+end
+
+def fun_l6_n445(x)
+ if (x < 1)
+ fun_l7_n613(x)
+ else
+ fun_l7_n977(x)
+ end
+end
+
+def fun_l6_n446(x)
+ if (x < 1)
+ fun_l7_n23(x)
+ else
+ fun_l7_n311(x)
+ end
+end
+
+def fun_l6_n447(x)
+ if (x < 1)
+ fun_l7_n686(x)
+ else
+ fun_l7_n8(x)
+ end
+end
+
+def fun_l6_n448(x)
+ if (x < 1)
+ fun_l7_n812(x)
+ else
+ fun_l7_n229(x)
+ end
+end
+
+def fun_l6_n449(x)
+ if (x < 1)
+ fun_l7_n222(x)
+ else
+ fun_l7_n620(x)
+ end
+end
+
+def fun_l6_n450(x)
+ if (x < 1)
+ fun_l7_n599(x)
+ else
+ fun_l7_n468(x)
+ end
+end
+
+def fun_l6_n451(x)
+ if (x < 1)
+ fun_l7_n347(x)
+ else
+ fun_l7_n504(x)
+ end
+end
+
+def fun_l6_n452(x)
+ if (x < 1)
+ fun_l7_n95(x)
+ else
+ fun_l7_n482(x)
+ end
+end
+
+def fun_l6_n453(x)
+ if (x < 1)
+ fun_l7_n217(x)
+ else
+ fun_l7_n419(x)
+ end
+end
+
+def fun_l6_n454(x)
+ if (x < 1)
+ fun_l7_n28(x)
+ else
+ fun_l7_n609(x)
+ end
+end
+
+def fun_l6_n455(x)
+ if (x < 1)
+ fun_l7_n932(x)
+ else
+ fun_l7_n744(x)
+ end
+end
+
+def fun_l6_n456(x)
+ if (x < 1)
+ fun_l7_n901(x)
+ else
+ fun_l7_n882(x)
+ end
+end
+
+def fun_l6_n457(x)
+ if (x < 1)
+ fun_l7_n368(x)
+ else
+ fun_l7_n441(x)
+ end
+end
+
+def fun_l6_n458(x)
+ if (x < 1)
+ fun_l7_n49(x)
+ else
+ fun_l7_n135(x)
+ end
+end
+
+def fun_l6_n459(x)
+ if (x < 1)
+ fun_l7_n228(x)
+ else
+ fun_l7_n899(x)
+ end
+end
+
+def fun_l6_n460(x)
+ if (x < 1)
+ fun_l7_n90(x)
+ else
+ fun_l7_n190(x)
+ end
+end
+
+def fun_l6_n461(x)
+ if (x < 1)
+ fun_l7_n892(x)
+ else
+ fun_l7_n670(x)
+ end
+end
+
+def fun_l6_n462(x)
+ if (x < 1)
+ fun_l7_n961(x)
+ else
+ fun_l7_n769(x)
+ end
+end
+
+def fun_l6_n463(x)
+ if (x < 1)
+ fun_l7_n828(x)
+ else
+ fun_l7_n219(x)
+ end
+end
+
+def fun_l6_n464(x)
+ if (x < 1)
+ fun_l7_n17(x)
+ else
+ fun_l7_n345(x)
+ end
+end
+
+def fun_l6_n465(x)
+ if (x < 1)
+ fun_l7_n412(x)
+ else
+ fun_l7_n498(x)
+ end
+end
+
+def fun_l6_n466(x)
+ if (x < 1)
+ fun_l7_n659(x)
+ else
+ fun_l7_n781(x)
+ end
+end
+
+def fun_l6_n467(x)
+ if (x < 1)
+ fun_l7_n528(x)
+ else
+ fun_l7_n138(x)
+ end
+end
+
+def fun_l6_n468(x)
+ if (x < 1)
+ fun_l7_n272(x)
+ else
+ fun_l7_n512(x)
+ end
+end
+
+def fun_l6_n469(x)
+ if (x < 1)
+ fun_l7_n458(x)
+ else
+ fun_l7_n430(x)
+ end
+end
+
+def fun_l6_n470(x)
+ if (x < 1)
+ fun_l7_n192(x)
+ else
+ fun_l7_n755(x)
+ end
+end
+
+def fun_l6_n471(x)
+ if (x < 1)
+ fun_l7_n188(x)
+ else
+ fun_l7_n827(x)
+ end
+end
+
+def fun_l6_n472(x)
+ if (x < 1)
+ fun_l7_n467(x)
+ else
+ fun_l7_n167(x)
+ end
+end
+
+def fun_l6_n473(x)
+ if (x < 1)
+ fun_l7_n648(x)
+ else
+ fun_l7_n823(x)
+ end
+end
+
+def fun_l6_n474(x)
+ if (x < 1)
+ fun_l7_n788(x)
+ else
+ fun_l7_n120(x)
+ end
+end
+
+def fun_l6_n475(x)
+ if (x < 1)
+ fun_l7_n199(x)
+ else
+ fun_l7_n25(x)
+ end
+end
+
+def fun_l6_n476(x)
+ if (x < 1)
+ fun_l7_n812(x)
+ else
+ fun_l7_n258(x)
+ end
+end
+
+def fun_l6_n477(x)
+ if (x < 1)
+ fun_l7_n525(x)
+ else
+ fun_l7_n140(x)
+ end
+end
+
+def fun_l6_n478(x)
+ if (x < 1)
+ fun_l7_n728(x)
+ else
+ fun_l7_n157(x)
+ end
+end
+
+def fun_l6_n479(x)
+ if (x < 1)
+ fun_l7_n527(x)
+ else
+ fun_l7_n172(x)
+ end
+end
+
+def fun_l6_n480(x)
+ if (x < 1)
+ fun_l7_n295(x)
+ else
+ fun_l7_n230(x)
+ end
+end
+
+def fun_l6_n481(x)
+ if (x < 1)
+ fun_l7_n619(x)
+ else
+ fun_l7_n495(x)
+ end
+end
+
+def fun_l6_n482(x)
+ if (x < 1)
+ fun_l7_n412(x)
+ else
+ fun_l7_n30(x)
+ end
+end
+
+def fun_l6_n483(x)
+ if (x < 1)
+ fun_l7_n236(x)
+ else
+ fun_l7_n842(x)
+ end
+end
+
+def fun_l6_n484(x)
+ if (x < 1)
+ fun_l7_n59(x)
+ else
+ fun_l7_n341(x)
+ end
+end
+
+def fun_l6_n485(x)
+ if (x < 1)
+ fun_l7_n841(x)
+ else
+ fun_l7_n67(x)
+ end
+end
+
+def fun_l6_n486(x)
+ if (x < 1)
+ fun_l7_n299(x)
+ else
+ fun_l7_n144(x)
+ end
+end
+
+def fun_l6_n487(x)
+ if (x < 1)
+ fun_l7_n672(x)
+ else
+ fun_l7_n682(x)
+ end
+end
+
+def fun_l6_n488(x)
+ if (x < 1)
+ fun_l7_n110(x)
+ else
+ fun_l7_n231(x)
+ end
+end
+
+def fun_l6_n489(x)
+ if (x < 1)
+ fun_l7_n984(x)
+ else
+ fun_l7_n583(x)
+ end
+end
+
+def fun_l6_n490(x)
+ if (x < 1)
+ fun_l7_n354(x)
+ else
+ fun_l7_n90(x)
+ end
+end
+
+def fun_l6_n491(x)
+ if (x < 1)
+ fun_l7_n541(x)
+ else
+ fun_l7_n583(x)
+ end
+end
+
+def fun_l6_n492(x)
+ if (x < 1)
+ fun_l7_n35(x)
+ else
+ fun_l7_n209(x)
+ end
+end
+
+def fun_l6_n493(x)
+ if (x < 1)
+ fun_l7_n547(x)
+ else
+ fun_l7_n982(x)
+ end
+end
+
+def fun_l6_n494(x)
+ if (x < 1)
+ fun_l7_n3(x)
+ else
+ fun_l7_n230(x)
+ end
+end
+
+def fun_l6_n495(x)
+ if (x < 1)
+ fun_l7_n777(x)
+ else
+ fun_l7_n140(x)
+ end
+end
+
+def fun_l6_n496(x)
+ if (x < 1)
+ fun_l7_n927(x)
+ else
+ fun_l7_n934(x)
+ end
+end
+
+def fun_l6_n497(x)
+ if (x < 1)
+ fun_l7_n201(x)
+ else
+ fun_l7_n342(x)
+ end
+end
+
+def fun_l6_n498(x)
+ if (x < 1)
+ fun_l7_n973(x)
+ else
+ fun_l7_n106(x)
+ end
+end
+
+def fun_l6_n499(x)
+ if (x < 1)
+ fun_l7_n373(x)
+ else
+ fun_l7_n558(x)
+ end
+end
+
+def fun_l6_n500(x)
+ if (x < 1)
+ fun_l7_n835(x)
+ else
+ fun_l7_n981(x)
+ end
+end
+
+def fun_l6_n501(x)
+ if (x < 1)
+ fun_l7_n135(x)
+ else
+ fun_l7_n997(x)
+ end
+end
+
+def fun_l6_n502(x)
+ if (x < 1)
+ fun_l7_n129(x)
+ else
+ fun_l7_n530(x)
+ end
+end
+
+def fun_l6_n503(x)
+ if (x < 1)
+ fun_l7_n924(x)
+ else
+ fun_l7_n611(x)
+ end
+end
+
+def fun_l6_n504(x)
+ if (x < 1)
+ fun_l7_n125(x)
+ else
+ fun_l7_n26(x)
+ end
+end
+
+def fun_l6_n505(x)
+ if (x < 1)
+ fun_l7_n592(x)
+ else
+ fun_l7_n50(x)
+ end
+end
+
+def fun_l6_n506(x)
+ if (x < 1)
+ fun_l7_n220(x)
+ else
+ fun_l7_n787(x)
+ end
+end
+
+def fun_l6_n507(x)
+ if (x < 1)
+ fun_l7_n908(x)
+ else
+ fun_l7_n15(x)
+ end
+end
+
+def fun_l6_n508(x)
+ if (x < 1)
+ fun_l7_n475(x)
+ else
+ fun_l7_n899(x)
+ end
+end
+
+def fun_l6_n509(x)
+ if (x < 1)
+ fun_l7_n704(x)
+ else
+ fun_l7_n559(x)
+ end
+end
+
+def fun_l6_n510(x)
+ if (x < 1)
+ fun_l7_n625(x)
+ else
+ fun_l7_n73(x)
+ end
+end
+
+def fun_l6_n511(x)
+ if (x < 1)
+ fun_l7_n234(x)
+ else
+ fun_l7_n166(x)
+ end
+end
+
+def fun_l6_n512(x)
+ if (x < 1)
+ fun_l7_n502(x)
+ else
+ fun_l7_n304(x)
+ end
+end
+
+def fun_l6_n513(x)
+ if (x < 1)
+ fun_l7_n686(x)
+ else
+ fun_l7_n643(x)
+ end
+end
+
+def fun_l6_n514(x)
+ if (x < 1)
+ fun_l7_n804(x)
+ else
+ fun_l7_n116(x)
+ end
+end
+
+def fun_l6_n515(x)
+ if (x < 1)
+ fun_l7_n365(x)
+ else
+ fun_l7_n102(x)
+ end
+end
+
+def fun_l6_n516(x)
+ if (x < 1)
+ fun_l7_n563(x)
+ else
+ fun_l7_n161(x)
+ end
+end
+
+def fun_l6_n517(x)
+ if (x < 1)
+ fun_l7_n695(x)
+ else
+ fun_l7_n936(x)
+ end
+end
+
+def fun_l6_n518(x)
+ if (x < 1)
+ fun_l7_n220(x)
+ else
+ fun_l7_n884(x)
+ end
+end
+
+def fun_l6_n519(x)
+ if (x < 1)
+ fun_l7_n602(x)
+ else
+ fun_l7_n144(x)
+ end
+end
+
+def fun_l6_n520(x)
+ if (x < 1)
+ fun_l7_n409(x)
+ else
+ fun_l7_n861(x)
+ end
+end
+
+def fun_l6_n521(x)
+ if (x < 1)
+ fun_l7_n258(x)
+ else
+ fun_l7_n768(x)
+ end
+end
+
+def fun_l6_n522(x)
+ if (x < 1)
+ fun_l7_n878(x)
+ else
+ fun_l7_n503(x)
+ end
+end
+
+def fun_l6_n523(x)
+ if (x < 1)
+ fun_l7_n570(x)
+ else
+ fun_l7_n675(x)
+ end
+end
+
+def fun_l6_n524(x)
+ if (x < 1)
+ fun_l7_n741(x)
+ else
+ fun_l7_n748(x)
+ end
+end
+
+def fun_l6_n525(x)
+ if (x < 1)
+ fun_l7_n729(x)
+ else
+ fun_l7_n272(x)
+ end
+end
+
+def fun_l6_n526(x)
+ if (x < 1)
+ fun_l7_n734(x)
+ else
+ fun_l7_n70(x)
+ end
+end
+
+def fun_l6_n527(x)
+ if (x < 1)
+ fun_l7_n235(x)
+ else
+ fun_l7_n606(x)
+ end
+end
+
+def fun_l6_n528(x)
+ if (x < 1)
+ fun_l7_n67(x)
+ else
+ fun_l7_n399(x)
+ end
+end
+
+def fun_l6_n529(x)
+ if (x < 1)
+ fun_l7_n706(x)
+ else
+ fun_l7_n150(x)
+ end
+end
+
+def fun_l6_n530(x)
+ if (x < 1)
+ fun_l7_n35(x)
+ else
+ fun_l7_n951(x)
+ end
+end
+
+def fun_l6_n531(x)
+ if (x < 1)
+ fun_l7_n517(x)
+ else
+ fun_l7_n329(x)
+ end
+end
+
+def fun_l6_n532(x)
+ if (x < 1)
+ fun_l7_n392(x)
+ else
+ fun_l7_n970(x)
+ end
+end
+
+def fun_l6_n533(x)
+ if (x < 1)
+ fun_l7_n466(x)
+ else
+ fun_l7_n260(x)
+ end
+end
+
+def fun_l6_n534(x)
+ if (x < 1)
+ fun_l7_n957(x)
+ else
+ fun_l7_n417(x)
+ end
+end
+
+def fun_l6_n535(x)
+ if (x < 1)
+ fun_l7_n184(x)
+ else
+ fun_l7_n188(x)
+ end
+end
+
+def fun_l6_n536(x)
+ if (x < 1)
+ fun_l7_n298(x)
+ else
+ fun_l7_n192(x)
+ end
+end
+
+def fun_l6_n537(x)
+ if (x < 1)
+ fun_l7_n28(x)
+ else
+ fun_l7_n309(x)
+ end
+end
+
+def fun_l6_n538(x)
+ if (x < 1)
+ fun_l7_n801(x)
+ else
+ fun_l7_n84(x)
+ end
+end
+
+def fun_l6_n539(x)
+ if (x < 1)
+ fun_l7_n894(x)
+ else
+ fun_l7_n973(x)
+ end
+end
+
+def fun_l6_n540(x)
+ if (x < 1)
+ fun_l7_n895(x)
+ else
+ fun_l7_n231(x)
+ end
+end
+
+def fun_l6_n541(x)
+ if (x < 1)
+ fun_l7_n728(x)
+ else
+ fun_l7_n28(x)
+ end
+end
+
+def fun_l6_n542(x)
+ if (x < 1)
+ fun_l7_n655(x)
+ else
+ fun_l7_n845(x)
+ end
+end
+
+def fun_l6_n543(x)
+ if (x < 1)
+ fun_l7_n952(x)
+ else
+ fun_l7_n484(x)
+ end
+end
+
+def fun_l6_n544(x)
+ if (x < 1)
+ fun_l7_n835(x)
+ else
+ fun_l7_n187(x)
+ end
+end
+
+def fun_l6_n545(x)
+ if (x < 1)
+ fun_l7_n748(x)
+ else
+ fun_l7_n460(x)
+ end
+end
+
+def fun_l6_n546(x)
+ if (x < 1)
+ fun_l7_n734(x)
+ else
+ fun_l7_n165(x)
+ end
+end
+
+def fun_l6_n547(x)
+ if (x < 1)
+ fun_l7_n499(x)
+ else
+ fun_l7_n133(x)
+ end
+end
+
+def fun_l6_n548(x)
+ if (x < 1)
+ fun_l7_n751(x)
+ else
+ fun_l7_n731(x)
+ end
+end
+
+def fun_l6_n549(x)
+ if (x < 1)
+ fun_l7_n269(x)
+ else
+ fun_l7_n690(x)
+ end
+end
+
+def fun_l6_n550(x)
+ if (x < 1)
+ fun_l7_n320(x)
+ else
+ fun_l7_n308(x)
+ end
+end
+
+def fun_l6_n551(x)
+ if (x < 1)
+ fun_l7_n182(x)
+ else
+ fun_l7_n144(x)
+ end
+end
+
+def fun_l6_n552(x)
+ if (x < 1)
+ fun_l7_n683(x)
+ else
+ fun_l7_n691(x)
+ end
+end
+
+def fun_l6_n553(x)
+ if (x < 1)
+ fun_l7_n502(x)
+ else
+ fun_l7_n520(x)
+ end
+end
+
+def fun_l6_n554(x)
+ if (x < 1)
+ fun_l7_n60(x)
+ else
+ fun_l7_n551(x)
+ end
+end
+
+def fun_l6_n555(x)
+ if (x < 1)
+ fun_l7_n185(x)
+ else
+ fun_l7_n87(x)
+ end
+end
+
+def fun_l6_n556(x)
+ if (x < 1)
+ fun_l7_n140(x)
+ else
+ fun_l7_n725(x)
+ end
+end
+
+def fun_l6_n557(x)
+ if (x < 1)
+ fun_l7_n76(x)
+ else
+ fun_l7_n501(x)
+ end
+end
+
+def fun_l6_n558(x)
+ if (x < 1)
+ fun_l7_n553(x)
+ else
+ fun_l7_n146(x)
+ end
+end
+
+def fun_l6_n559(x)
+ if (x < 1)
+ fun_l7_n535(x)
+ else
+ fun_l7_n17(x)
+ end
+end
+
+def fun_l6_n560(x)
+ if (x < 1)
+ fun_l7_n566(x)
+ else
+ fun_l7_n773(x)
+ end
+end
+
+def fun_l6_n561(x)
+ if (x < 1)
+ fun_l7_n358(x)
+ else
+ fun_l7_n951(x)
+ end
+end
+
+def fun_l6_n562(x)
+ if (x < 1)
+ fun_l7_n492(x)
+ else
+ fun_l7_n478(x)
+ end
+end
+
+def fun_l6_n563(x)
+ if (x < 1)
+ fun_l7_n796(x)
+ else
+ fun_l7_n906(x)
+ end
+end
+
+def fun_l6_n564(x)
+ if (x < 1)
+ fun_l7_n751(x)
+ else
+ fun_l7_n546(x)
+ end
+end
+
+def fun_l6_n565(x)
+ if (x < 1)
+ fun_l7_n179(x)
+ else
+ fun_l7_n49(x)
+ end
+end
+
+def fun_l6_n566(x)
+ if (x < 1)
+ fun_l7_n550(x)
+ else
+ fun_l7_n440(x)
+ end
+end
+
+def fun_l6_n567(x)
+ if (x < 1)
+ fun_l7_n715(x)
+ else
+ fun_l7_n862(x)
+ end
+end
+
+def fun_l6_n568(x)
+ if (x < 1)
+ fun_l7_n124(x)
+ else
+ fun_l7_n191(x)
+ end
+end
+
+def fun_l6_n569(x)
+ if (x < 1)
+ fun_l7_n709(x)
+ else
+ fun_l7_n704(x)
+ end
+end
+
+def fun_l6_n570(x)
+ if (x < 1)
+ fun_l7_n864(x)
+ else
+ fun_l7_n84(x)
+ end
+end
+
+def fun_l6_n571(x)
+ if (x < 1)
+ fun_l7_n302(x)
+ else
+ fun_l7_n424(x)
+ end
+end
+
+def fun_l6_n572(x)
+ if (x < 1)
+ fun_l7_n488(x)
+ else
+ fun_l7_n570(x)
+ end
+end
+
+def fun_l6_n573(x)
+ if (x < 1)
+ fun_l7_n590(x)
+ else
+ fun_l7_n560(x)
+ end
+end
+
+def fun_l6_n574(x)
+ if (x < 1)
+ fun_l7_n38(x)
+ else
+ fun_l7_n444(x)
+ end
+end
+
+def fun_l6_n575(x)
+ if (x < 1)
+ fun_l7_n401(x)
+ else
+ fun_l7_n359(x)
+ end
+end
+
+def fun_l6_n576(x)
+ if (x < 1)
+ fun_l7_n518(x)
+ else
+ fun_l7_n240(x)
+ end
+end
+
+def fun_l6_n577(x)
+ if (x < 1)
+ fun_l7_n247(x)
+ else
+ fun_l7_n540(x)
+ end
+end
+
+def fun_l6_n578(x)
+ if (x < 1)
+ fun_l7_n223(x)
+ else
+ fun_l7_n729(x)
+ end
+end
+
+def fun_l6_n579(x)
+ if (x < 1)
+ fun_l7_n524(x)
+ else
+ fun_l7_n684(x)
+ end
+end
+
+def fun_l6_n580(x)
+ if (x < 1)
+ fun_l7_n113(x)
+ else
+ fun_l7_n865(x)
+ end
+end
+
+def fun_l6_n581(x)
+ if (x < 1)
+ fun_l7_n920(x)
+ else
+ fun_l7_n238(x)
+ end
+end
+
+def fun_l6_n582(x)
+ if (x < 1)
+ fun_l7_n78(x)
+ else
+ fun_l7_n85(x)
+ end
+end
+
+def fun_l6_n583(x)
+ if (x < 1)
+ fun_l7_n713(x)
+ else
+ fun_l7_n110(x)
+ end
+end
+
+def fun_l6_n584(x)
+ if (x < 1)
+ fun_l7_n170(x)
+ else
+ fun_l7_n183(x)
+ end
+end
+
+def fun_l6_n585(x)
+ if (x < 1)
+ fun_l7_n100(x)
+ else
+ fun_l7_n215(x)
+ end
+end
+
+def fun_l6_n586(x)
+ if (x < 1)
+ fun_l7_n126(x)
+ else
+ fun_l7_n136(x)
+ end
+end
+
+def fun_l6_n587(x)
+ if (x < 1)
+ fun_l7_n592(x)
+ else
+ fun_l7_n502(x)
+ end
+end
+
+def fun_l6_n588(x)
+ if (x < 1)
+ fun_l7_n155(x)
+ else
+ fun_l7_n591(x)
+ end
+end
+
+def fun_l6_n589(x)
+ if (x < 1)
+ fun_l7_n840(x)
+ else
+ fun_l7_n91(x)
+ end
+end
+
+def fun_l6_n590(x)
+ if (x < 1)
+ fun_l7_n146(x)
+ else
+ fun_l7_n849(x)
+ end
+end
+
+def fun_l6_n591(x)
+ if (x < 1)
+ fun_l7_n161(x)
+ else
+ fun_l7_n884(x)
+ end
+end
+
+def fun_l6_n592(x)
+ if (x < 1)
+ fun_l7_n994(x)
+ else
+ fun_l7_n341(x)
+ end
+end
+
+def fun_l6_n593(x)
+ if (x < 1)
+ fun_l7_n974(x)
+ else
+ fun_l7_n355(x)
+ end
+end
+
+def fun_l6_n594(x)
+ if (x < 1)
+ fun_l7_n726(x)
+ else
+ fun_l7_n266(x)
+ end
+end
+
+def fun_l6_n595(x)
+ if (x < 1)
+ fun_l7_n142(x)
+ else
+ fun_l7_n154(x)
+ end
+end
+
+def fun_l6_n596(x)
+ if (x < 1)
+ fun_l7_n287(x)
+ else
+ fun_l7_n155(x)
+ end
+end
+
+def fun_l6_n597(x)
+ if (x < 1)
+ fun_l7_n594(x)
+ else
+ fun_l7_n424(x)
+ end
+end
+
+def fun_l6_n598(x)
+ if (x < 1)
+ fun_l7_n227(x)
+ else
+ fun_l7_n445(x)
+ end
+end
+
+def fun_l6_n599(x)
+ if (x < 1)
+ fun_l7_n660(x)
+ else
+ fun_l7_n464(x)
+ end
+end
+
+def fun_l6_n600(x)
+ if (x < 1)
+ fun_l7_n930(x)
+ else
+ fun_l7_n520(x)
+ end
+end
+
+def fun_l6_n601(x)
+ if (x < 1)
+ fun_l7_n714(x)
+ else
+ fun_l7_n984(x)
+ end
+end
+
+def fun_l6_n602(x)
+ if (x < 1)
+ fun_l7_n226(x)
+ else
+ fun_l7_n316(x)
+ end
+end
+
+def fun_l6_n603(x)
+ if (x < 1)
+ fun_l7_n104(x)
+ else
+ fun_l7_n563(x)
+ end
+end
+
+def fun_l6_n604(x)
+ if (x < 1)
+ fun_l7_n149(x)
+ else
+ fun_l7_n921(x)
+ end
+end
+
+def fun_l6_n605(x)
+ if (x < 1)
+ fun_l7_n195(x)
+ else
+ fun_l7_n696(x)
+ end
+end
+
+def fun_l6_n606(x)
+ if (x < 1)
+ fun_l7_n337(x)
+ else
+ fun_l7_n347(x)
+ end
+end
+
+def fun_l6_n607(x)
+ if (x < 1)
+ fun_l7_n657(x)
+ else
+ fun_l7_n67(x)
+ end
+end
+
+def fun_l6_n608(x)
+ if (x < 1)
+ fun_l7_n796(x)
+ else
+ fun_l7_n589(x)
+ end
+end
+
+def fun_l6_n609(x)
+ if (x < 1)
+ fun_l7_n230(x)
+ else
+ fun_l7_n769(x)
+ end
+end
+
+def fun_l6_n610(x)
+ if (x < 1)
+ fun_l7_n427(x)
+ else
+ fun_l7_n892(x)
+ end
+end
+
+def fun_l6_n611(x)
+ if (x < 1)
+ fun_l7_n741(x)
+ else
+ fun_l7_n567(x)
+ end
+end
+
+def fun_l6_n612(x)
+ if (x < 1)
+ fun_l7_n471(x)
+ else
+ fun_l7_n824(x)
+ end
+end
+
+def fun_l6_n613(x)
+ if (x < 1)
+ fun_l7_n522(x)
+ else
+ fun_l7_n694(x)
+ end
+end
+
+def fun_l6_n614(x)
+ if (x < 1)
+ fun_l7_n95(x)
+ else
+ fun_l7_n84(x)
+ end
+end
+
+def fun_l6_n615(x)
+ if (x < 1)
+ fun_l7_n632(x)
+ else
+ fun_l7_n590(x)
+ end
+end
+
+def fun_l6_n616(x)
+ if (x < 1)
+ fun_l7_n372(x)
+ else
+ fun_l7_n659(x)
+ end
+end
+
+def fun_l6_n617(x)
+ if (x < 1)
+ fun_l7_n547(x)
+ else
+ fun_l7_n321(x)
+ end
+end
+
+def fun_l6_n618(x)
+ if (x < 1)
+ fun_l7_n53(x)
+ else
+ fun_l7_n723(x)
+ end
+end
+
+def fun_l6_n619(x)
+ if (x < 1)
+ fun_l7_n653(x)
+ else
+ fun_l7_n740(x)
+ end
+end
+
+def fun_l6_n620(x)
+ if (x < 1)
+ fun_l7_n16(x)
+ else
+ fun_l7_n228(x)
+ end
+end
+
+def fun_l6_n621(x)
+ if (x < 1)
+ fun_l7_n655(x)
+ else
+ fun_l7_n701(x)
+ end
+end
+
+def fun_l6_n622(x)
+ if (x < 1)
+ fun_l7_n212(x)
+ else
+ fun_l7_n692(x)
+ end
+end
+
+def fun_l6_n623(x)
+ if (x < 1)
+ fun_l7_n959(x)
+ else
+ fun_l7_n327(x)
+ end
+end
+
+def fun_l6_n624(x)
+ if (x < 1)
+ fun_l7_n607(x)
+ else
+ fun_l7_n168(x)
+ end
+end
+
+def fun_l6_n625(x)
+ if (x < 1)
+ fun_l7_n229(x)
+ else
+ fun_l7_n584(x)
+ end
+end
+
+def fun_l6_n626(x)
+ if (x < 1)
+ fun_l7_n776(x)
+ else
+ fun_l7_n284(x)
+ end
+end
+
+def fun_l6_n627(x)
+ if (x < 1)
+ fun_l7_n770(x)
+ else
+ fun_l7_n169(x)
+ end
+end
+
+def fun_l6_n628(x)
+ if (x < 1)
+ fun_l7_n550(x)
+ else
+ fun_l7_n918(x)
+ end
+end
+
+def fun_l6_n629(x)
+ if (x < 1)
+ fun_l7_n450(x)
+ else
+ fun_l7_n654(x)
+ end
+end
+
+def fun_l6_n630(x)
+ if (x < 1)
+ fun_l7_n968(x)
+ else
+ fun_l7_n770(x)
+ end
+end
+
+def fun_l6_n631(x)
+ if (x < 1)
+ fun_l7_n118(x)
+ else
+ fun_l7_n411(x)
+ end
+end
+
+def fun_l6_n632(x)
+ if (x < 1)
+ fun_l7_n352(x)
+ else
+ fun_l7_n840(x)
+ end
+end
+
+def fun_l6_n633(x)
+ if (x < 1)
+ fun_l7_n588(x)
+ else
+ fun_l7_n810(x)
+ end
+end
+
+def fun_l6_n634(x)
+ if (x < 1)
+ fun_l7_n955(x)
+ else
+ fun_l7_n709(x)
+ end
+end
+
+def fun_l6_n635(x)
+ if (x < 1)
+ fun_l7_n137(x)
+ else
+ fun_l7_n239(x)
+ end
+end
+
+def fun_l6_n636(x)
+ if (x < 1)
+ fun_l7_n955(x)
+ else
+ fun_l7_n87(x)
+ end
+end
+
+def fun_l6_n637(x)
+ if (x < 1)
+ fun_l7_n479(x)
+ else
+ fun_l7_n91(x)
+ end
+end
+
+def fun_l6_n638(x)
+ if (x < 1)
+ fun_l7_n366(x)
+ else
+ fun_l7_n388(x)
+ end
+end
+
+def fun_l6_n639(x)
+ if (x < 1)
+ fun_l7_n90(x)
+ else
+ fun_l7_n971(x)
+ end
+end
+
+def fun_l6_n640(x)
+ if (x < 1)
+ fun_l7_n330(x)
+ else
+ fun_l7_n415(x)
+ end
+end
+
+def fun_l6_n641(x)
+ if (x < 1)
+ fun_l7_n233(x)
+ else
+ fun_l7_n653(x)
+ end
+end
+
+def fun_l6_n642(x)
+ if (x < 1)
+ fun_l7_n106(x)
+ else
+ fun_l7_n440(x)
+ end
+end
+
+def fun_l6_n643(x)
+ if (x < 1)
+ fun_l7_n156(x)
+ else
+ fun_l7_n818(x)
+ end
+end
+
+def fun_l6_n644(x)
+ if (x < 1)
+ fun_l7_n68(x)
+ else
+ fun_l7_n763(x)
+ end
+end
+
+def fun_l6_n645(x)
+ if (x < 1)
+ fun_l7_n858(x)
+ else
+ fun_l7_n465(x)
+ end
+end
+
+def fun_l6_n646(x)
+ if (x < 1)
+ fun_l7_n886(x)
+ else
+ fun_l7_n86(x)
+ end
+end
+
+def fun_l6_n647(x)
+ if (x < 1)
+ fun_l7_n689(x)
+ else
+ fun_l7_n964(x)
+ end
+end
+
+def fun_l6_n648(x)
+ if (x < 1)
+ fun_l7_n748(x)
+ else
+ fun_l7_n214(x)
+ end
+end
+
+def fun_l6_n649(x)
+ if (x < 1)
+ fun_l7_n830(x)
+ else
+ fun_l7_n567(x)
+ end
+end
+
+def fun_l6_n650(x)
+ if (x < 1)
+ fun_l7_n907(x)
+ else
+ fun_l7_n957(x)
+ end
+end
+
+def fun_l6_n651(x)
+ if (x < 1)
+ fun_l7_n242(x)
+ else
+ fun_l7_n239(x)
+ end
+end
+
+def fun_l6_n652(x)
+ if (x < 1)
+ fun_l7_n347(x)
+ else
+ fun_l7_n63(x)
+ end
+end
+
+def fun_l6_n653(x)
+ if (x < 1)
+ fun_l7_n480(x)
+ else
+ fun_l7_n441(x)
+ end
+end
+
+def fun_l6_n654(x)
+ if (x < 1)
+ fun_l7_n304(x)
+ else
+ fun_l7_n690(x)
+ end
+end
+
+def fun_l6_n655(x)
+ if (x < 1)
+ fun_l7_n511(x)
+ else
+ fun_l7_n710(x)
+ end
+end
+
+def fun_l6_n656(x)
+ if (x < 1)
+ fun_l7_n202(x)
+ else
+ fun_l7_n48(x)
+ end
+end
+
+def fun_l6_n657(x)
+ if (x < 1)
+ fun_l7_n232(x)
+ else
+ fun_l7_n919(x)
+ end
+end
+
+def fun_l6_n658(x)
+ if (x < 1)
+ fun_l7_n627(x)
+ else
+ fun_l7_n620(x)
+ end
+end
+
+def fun_l6_n659(x)
+ if (x < 1)
+ fun_l7_n716(x)
+ else
+ fun_l7_n116(x)
+ end
+end
+
+def fun_l6_n660(x)
+ if (x < 1)
+ fun_l7_n105(x)
+ else
+ fun_l7_n536(x)
+ end
+end
+
+def fun_l6_n661(x)
+ if (x < 1)
+ fun_l7_n376(x)
+ else
+ fun_l7_n965(x)
+ end
+end
+
+def fun_l6_n662(x)
+ if (x < 1)
+ fun_l7_n417(x)
+ else
+ fun_l7_n726(x)
+ end
+end
+
+def fun_l6_n663(x)
+ if (x < 1)
+ fun_l7_n567(x)
+ else
+ fun_l7_n948(x)
+ end
+end
+
+def fun_l6_n664(x)
+ if (x < 1)
+ fun_l7_n640(x)
+ else
+ fun_l7_n910(x)
+ end
+end
+
+def fun_l6_n665(x)
+ if (x < 1)
+ fun_l7_n874(x)
+ else
+ fun_l7_n314(x)
+ end
+end
+
+def fun_l6_n666(x)
+ if (x < 1)
+ fun_l7_n946(x)
+ else
+ fun_l7_n101(x)
+ end
+end
+
+def fun_l6_n667(x)
+ if (x < 1)
+ fun_l7_n960(x)
+ else
+ fun_l7_n199(x)
+ end
+end
+
+def fun_l6_n668(x)
+ if (x < 1)
+ fun_l7_n389(x)
+ else
+ fun_l7_n976(x)
+ end
+end
+
+def fun_l6_n669(x)
+ if (x < 1)
+ fun_l7_n31(x)
+ else
+ fun_l7_n903(x)
+ end
+end
+
+def fun_l6_n670(x)
+ if (x < 1)
+ fun_l7_n288(x)
+ else
+ fun_l7_n115(x)
+ end
+end
+
+def fun_l6_n671(x)
+ if (x < 1)
+ fun_l7_n418(x)
+ else
+ fun_l7_n249(x)
+ end
+end
+
+def fun_l6_n672(x)
+ if (x < 1)
+ fun_l7_n733(x)
+ else
+ fun_l7_n815(x)
+ end
+end
+
+def fun_l6_n673(x)
+ if (x < 1)
+ fun_l7_n227(x)
+ else
+ fun_l7_n23(x)
+ end
+end
+
+def fun_l6_n674(x)
+ if (x < 1)
+ fun_l7_n936(x)
+ else
+ fun_l7_n386(x)
+ end
+end
+
+def fun_l6_n675(x)
+ if (x < 1)
+ fun_l7_n638(x)
+ else
+ fun_l7_n39(x)
+ end
+end
+
+def fun_l6_n676(x)
+ if (x < 1)
+ fun_l7_n304(x)
+ else
+ fun_l7_n35(x)
+ end
+end
+
+def fun_l6_n677(x)
+ if (x < 1)
+ fun_l7_n676(x)
+ else
+ fun_l7_n443(x)
+ end
+end
+
+def fun_l6_n678(x)
+ if (x < 1)
+ fun_l7_n648(x)
+ else
+ fun_l7_n453(x)
+ end
+end
+
+def fun_l6_n679(x)
+ if (x < 1)
+ fun_l7_n15(x)
+ else
+ fun_l7_n220(x)
+ end
+end
+
+def fun_l6_n680(x)
+ if (x < 1)
+ fun_l7_n201(x)
+ else
+ fun_l7_n842(x)
+ end
+end
+
+def fun_l6_n681(x)
+ if (x < 1)
+ fun_l7_n67(x)
+ else
+ fun_l7_n73(x)
+ end
+end
+
+def fun_l6_n682(x)
+ if (x < 1)
+ fun_l7_n280(x)
+ else
+ fun_l7_n370(x)
+ end
+end
+
+def fun_l6_n683(x)
+ if (x < 1)
+ fun_l7_n822(x)
+ else
+ fun_l7_n27(x)
+ end
+end
+
+def fun_l6_n684(x)
+ if (x < 1)
+ fun_l7_n720(x)
+ else
+ fun_l7_n651(x)
+ end
+end
+
+def fun_l6_n685(x)
+ if (x < 1)
+ fun_l7_n414(x)
+ else
+ fun_l7_n257(x)
+ end
+end
+
+def fun_l6_n686(x)
+ if (x < 1)
+ fun_l7_n477(x)
+ else
+ fun_l7_n883(x)
+ end
+end
+
+def fun_l6_n687(x)
+ if (x < 1)
+ fun_l7_n467(x)
+ else
+ fun_l7_n739(x)
+ end
+end
+
+def fun_l6_n688(x)
+ if (x < 1)
+ fun_l7_n421(x)
+ else
+ fun_l7_n271(x)
+ end
+end
+
+def fun_l6_n689(x)
+ if (x < 1)
+ fun_l7_n444(x)
+ else
+ fun_l7_n601(x)
+ end
+end
+
+def fun_l6_n690(x)
+ if (x < 1)
+ fun_l7_n104(x)
+ else
+ fun_l7_n597(x)
+ end
+end
+
+def fun_l6_n691(x)
+ if (x < 1)
+ fun_l7_n273(x)
+ else
+ fun_l7_n79(x)
+ end
+end
+
+def fun_l6_n692(x)
+ if (x < 1)
+ fun_l7_n310(x)
+ else
+ fun_l7_n261(x)
+ end
+end
+
+def fun_l6_n693(x)
+ if (x < 1)
+ fun_l7_n190(x)
+ else
+ fun_l7_n330(x)
+ end
+end
+
+def fun_l6_n694(x)
+ if (x < 1)
+ fun_l7_n103(x)
+ else
+ fun_l7_n258(x)
+ end
+end
+
+def fun_l6_n695(x)
+ if (x < 1)
+ fun_l7_n524(x)
+ else
+ fun_l7_n988(x)
+ end
+end
+
+def fun_l6_n696(x)
+ if (x < 1)
+ fun_l7_n885(x)
+ else
+ fun_l7_n731(x)
+ end
+end
+
+def fun_l6_n697(x)
+ if (x < 1)
+ fun_l7_n212(x)
+ else
+ fun_l7_n98(x)
+ end
+end
+
+def fun_l6_n698(x)
+ if (x < 1)
+ fun_l7_n735(x)
+ else
+ fun_l7_n682(x)
+ end
+end
+
+def fun_l6_n699(x)
+ if (x < 1)
+ fun_l7_n610(x)
+ else
+ fun_l7_n512(x)
+ end
+end
+
+def fun_l6_n700(x)
+ if (x < 1)
+ fun_l7_n828(x)
+ else
+ fun_l7_n651(x)
+ end
+end
+
+def fun_l6_n701(x)
+ if (x < 1)
+ fun_l7_n269(x)
+ else
+ fun_l7_n192(x)
+ end
+end
+
+def fun_l6_n702(x)
+ if (x < 1)
+ fun_l7_n775(x)
+ else
+ fun_l7_n334(x)
+ end
+end
+
+def fun_l6_n703(x)
+ if (x < 1)
+ fun_l7_n880(x)
+ else
+ fun_l7_n396(x)
+ end
+end
+
+def fun_l6_n704(x)
+ if (x < 1)
+ fun_l7_n907(x)
+ else
+ fun_l7_n107(x)
+ end
+end
+
+def fun_l6_n705(x)
+ if (x < 1)
+ fun_l7_n425(x)
+ else
+ fun_l7_n555(x)
+ end
+end
+
+def fun_l6_n706(x)
+ if (x < 1)
+ fun_l7_n458(x)
+ else
+ fun_l7_n610(x)
+ end
+end
+
+def fun_l6_n707(x)
+ if (x < 1)
+ fun_l7_n527(x)
+ else
+ fun_l7_n69(x)
+ end
+end
+
+def fun_l6_n708(x)
+ if (x < 1)
+ fun_l7_n518(x)
+ else
+ fun_l7_n87(x)
+ end
+end
+
+def fun_l6_n709(x)
+ if (x < 1)
+ fun_l7_n441(x)
+ else
+ fun_l7_n533(x)
+ end
+end
+
+def fun_l6_n710(x)
+ if (x < 1)
+ fun_l7_n872(x)
+ else
+ fun_l7_n896(x)
+ end
+end
+
+def fun_l6_n711(x)
+ if (x < 1)
+ fun_l7_n388(x)
+ else
+ fun_l7_n476(x)
+ end
+end
+
+def fun_l6_n712(x)
+ if (x < 1)
+ fun_l7_n913(x)
+ else
+ fun_l7_n304(x)
+ end
+end
+
+def fun_l6_n713(x)
+ if (x < 1)
+ fun_l7_n203(x)
+ else
+ fun_l7_n457(x)
+ end
+end
+
+def fun_l6_n714(x)
+ if (x < 1)
+ fun_l7_n593(x)
+ else
+ fun_l7_n921(x)
+ end
+end
+
+def fun_l6_n715(x)
+ if (x < 1)
+ fun_l7_n452(x)
+ else
+ fun_l7_n68(x)
+ end
+end
+
+def fun_l6_n716(x)
+ if (x < 1)
+ fun_l7_n203(x)
+ else
+ fun_l7_n362(x)
+ end
+end
+
+def fun_l6_n717(x)
+ if (x < 1)
+ fun_l7_n455(x)
+ else
+ fun_l7_n199(x)
+ end
+end
+
+def fun_l6_n718(x)
+ if (x < 1)
+ fun_l7_n500(x)
+ else
+ fun_l7_n272(x)
+ end
+end
+
+def fun_l6_n719(x)
+ if (x < 1)
+ fun_l7_n970(x)
+ else
+ fun_l7_n572(x)
+ end
+end
+
+def fun_l6_n720(x)
+ if (x < 1)
+ fun_l7_n74(x)
+ else
+ fun_l7_n246(x)
+ end
+end
+
+def fun_l6_n721(x)
+ if (x < 1)
+ fun_l7_n943(x)
+ else
+ fun_l7_n640(x)
+ end
+end
+
+def fun_l6_n722(x)
+ if (x < 1)
+ fun_l7_n106(x)
+ else
+ fun_l7_n387(x)
+ end
+end
+
+def fun_l6_n723(x)
+ if (x < 1)
+ fun_l7_n474(x)
+ else
+ fun_l7_n530(x)
+ end
+end
+
+def fun_l6_n724(x)
+ if (x < 1)
+ fun_l7_n246(x)
+ else
+ fun_l7_n387(x)
+ end
+end
+
+def fun_l6_n725(x)
+ if (x < 1)
+ fun_l7_n572(x)
+ else
+ fun_l7_n874(x)
+ end
+end
+
+def fun_l6_n726(x)
+ if (x < 1)
+ fun_l7_n858(x)
+ else
+ fun_l7_n71(x)
+ end
+end
+
+def fun_l6_n727(x)
+ if (x < 1)
+ fun_l7_n615(x)
+ else
+ fun_l7_n14(x)
+ end
+end
+
+def fun_l6_n728(x)
+ if (x < 1)
+ fun_l7_n840(x)
+ else
+ fun_l7_n779(x)
+ end
+end
+
+def fun_l6_n729(x)
+ if (x < 1)
+ fun_l7_n811(x)
+ else
+ fun_l7_n981(x)
+ end
+end
+
+def fun_l6_n730(x)
+ if (x < 1)
+ fun_l7_n196(x)
+ else
+ fun_l7_n731(x)
+ end
+end
+
+def fun_l6_n731(x)
+ if (x < 1)
+ fun_l7_n202(x)
+ else
+ fun_l7_n781(x)
+ end
+end
+
+def fun_l6_n732(x)
+ if (x < 1)
+ fun_l7_n867(x)
+ else
+ fun_l7_n770(x)
+ end
+end
+
+def fun_l6_n733(x)
+ if (x < 1)
+ fun_l7_n90(x)
+ else
+ fun_l7_n937(x)
+ end
+end
+
+def fun_l6_n734(x)
+ if (x < 1)
+ fun_l7_n890(x)
+ else
+ fun_l7_n937(x)
+ end
+end
+
+def fun_l6_n735(x)
+ if (x < 1)
+ fun_l7_n967(x)
+ else
+ fun_l7_n235(x)
+ end
+end
+
+def fun_l6_n736(x)
+ if (x < 1)
+ fun_l7_n612(x)
+ else
+ fun_l7_n140(x)
+ end
+end
+
+def fun_l6_n737(x)
+ if (x < 1)
+ fun_l7_n156(x)
+ else
+ fun_l7_n600(x)
+ end
+end
+
+def fun_l6_n738(x)
+ if (x < 1)
+ fun_l7_n462(x)
+ else
+ fun_l7_n907(x)
+ end
+end
+
+def fun_l6_n739(x)
+ if (x < 1)
+ fun_l7_n226(x)
+ else
+ fun_l7_n83(x)
+ end
+end
+
+def fun_l6_n740(x)
+ if (x < 1)
+ fun_l7_n419(x)
+ else
+ fun_l7_n95(x)
+ end
+end
+
+def fun_l6_n741(x)
+ if (x < 1)
+ fun_l7_n358(x)
+ else
+ fun_l7_n701(x)
+ end
+end
+
+def fun_l6_n742(x)
+ if (x < 1)
+ fun_l7_n451(x)
+ else
+ fun_l7_n700(x)
+ end
+end
+
+def fun_l6_n743(x)
+ if (x < 1)
+ fun_l7_n561(x)
+ else
+ fun_l7_n383(x)
+ end
+end
+
+def fun_l6_n744(x)
+ if (x < 1)
+ fun_l7_n28(x)
+ else
+ fun_l7_n56(x)
+ end
+end
+
+def fun_l6_n745(x)
+ if (x < 1)
+ fun_l7_n127(x)
+ else
+ fun_l7_n113(x)
+ end
+end
+
+def fun_l6_n746(x)
+ if (x < 1)
+ fun_l7_n767(x)
+ else
+ fun_l7_n907(x)
+ end
+end
+
+def fun_l6_n747(x)
+ if (x < 1)
+ fun_l7_n174(x)
+ else
+ fun_l7_n955(x)
+ end
+end
+
+def fun_l6_n748(x)
+ if (x < 1)
+ fun_l7_n51(x)
+ else
+ fun_l7_n951(x)
+ end
+end
+
+def fun_l6_n749(x)
+ if (x < 1)
+ fun_l7_n172(x)
+ else
+ fun_l7_n847(x)
+ end
+end
+
+def fun_l6_n750(x)
+ if (x < 1)
+ fun_l7_n240(x)
+ else
+ fun_l7_n869(x)
+ end
+end
+
+def fun_l6_n751(x)
+ if (x < 1)
+ fun_l7_n435(x)
+ else
+ fun_l7_n747(x)
+ end
+end
+
+def fun_l6_n752(x)
+ if (x < 1)
+ fun_l7_n673(x)
+ else
+ fun_l7_n454(x)
+ end
+end
+
+def fun_l6_n753(x)
+ if (x < 1)
+ fun_l7_n98(x)
+ else
+ fun_l7_n66(x)
+ end
+end
+
+def fun_l6_n754(x)
+ if (x < 1)
+ fun_l7_n63(x)
+ else
+ fun_l7_n943(x)
+ end
+end
+
+def fun_l6_n755(x)
+ if (x < 1)
+ fun_l7_n863(x)
+ else
+ fun_l7_n155(x)
+ end
+end
+
+def fun_l6_n756(x)
+ if (x < 1)
+ fun_l7_n197(x)
+ else
+ fun_l7_n807(x)
+ end
+end
+
+def fun_l6_n757(x)
+ if (x < 1)
+ fun_l7_n992(x)
+ else
+ fun_l7_n615(x)
+ end
+end
+
+def fun_l6_n758(x)
+ if (x < 1)
+ fun_l7_n172(x)
+ else
+ fun_l7_n507(x)
+ end
+end
+
+def fun_l6_n759(x)
+ if (x < 1)
+ fun_l7_n756(x)
+ else
+ fun_l7_n599(x)
+ end
+end
+
+def fun_l6_n760(x)
+ if (x < 1)
+ fun_l7_n876(x)
+ else
+ fun_l7_n185(x)
+ end
+end
+
+def fun_l6_n761(x)
+ if (x < 1)
+ fun_l7_n522(x)
+ else
+ fun_l7_n301(x)
+ end
+end
+
+def fun_l6_n762(x)
+ if (x < 1)
+ fun_l7_n994(x)
+ else
+ fun_l7_n408(x)
+ end
+end
+
+def fun_l6_n763(x)
+ if (x < 1)
+ fun_l7_n201(x)
+ else
+ fun_l7_n304(x)
+ end
+end
+
+def fun_l6_n764(x)
+ if (x < 1)
+ fun_l7_n200(x)
+ else
+ fun_l7_n123(x)
+ end
+end
+
+def fun_l6_n765(x)
+ if (x < 1)
+ fun_l7_n156(x)
+ else
+ fun_l7_n117(x)
+ end
+end
+
+def fun_l6_n766(x)
+ if (x < 1)
+ fun_l7_n918(x)
+ else
+ fun_l7_n416(x)
+ end
+end
+
+def fun_l6_n767(x)
+ if (x < 1)
+ fun_l7_n538(x)
+ else
+ fun_l7_n335(x)
+ end
+end
+
+def fun_l6_n768(x)
+ if (x < 1)
+ fun_l7_n699(x)
+ else
+ fun_l7_n980(x)
+ end
+end
+
+def fun_l6_n769(x)
+ if (x < 1)
+ fun_l7_n903(x)
+ else
+ fun_l7_n195(x)
+ end
+end
+
+def fun_l6_n770(x)
+ if (x < 1)
+ fun_l7_n419(x)
+ else
+ fun_l7_n523(x)
+ end
+end
+
+def fun_l6_n771(x)
+ if (x < 1)
+ fun_l7_n221(x)
+ else
+ fun_l7_n39(x)
+ end
+end
+
+def fun_l6_n772(x)
+ if (x < 1)
+ fun_l7_n369(x)
+ else
+ fun_l7_n227(x)
+ end
+end
+
+def fun_l6_n773(x)
+ if (x < 1)
+ fun_l7_n861(x)
+ else
+ fun_l7_n252(x)
+ end
+end
+
+def fun_l6_n774(x)
+ if (x < 1)
+ fun_l7_n682(x)
+ else
+ fun_l7_n42(x)
+ end
+end
+
+def fun_l6_n775(x)
+ if (x < 1)
+ fun_l7_n777(x)
+ else
+ fun_l7_n594(x)
+ end
+end
+
+def fun_l6_n776(x)
+ if (x < 1)
+ fun_l7_n794(x)
+ else
+ fun_l7_n71(x)
+ end
+end
+
+def fun_l6_n777(x)
+ if (x < 1)
+ fun_l7_n669(x)
+ else
+ fun_l7_n66(x)
+ end
+end
+
+def fun_l6_n778(x)
+ if (x < 1)
+ fun_l7_n824(x)
+ else
+ fun_l7_n95(x)
+ end
+end
+
+def fun_l6_n779(x)
+ if (x < 1)
+ fun_l7_n833(x)
+ else
+ fun_l7_n504(x)
+ end
+end
+
+def fun_l6_n780(x)
+ if (x < 1)
+ fun_l7_n50(x)
+ else
+ fun_l7_n647(x)
+ end
+end
+
+def fun_l6_n781(x)
+ if (x < 1)
+ fun_l7_n139(x)
+ else
+ fun_l7_n214(x)
+ end
+end
+
+def fun_l6_n782(x)
+ if (x < 1)
+ fun_l7_n450(x)
+ else
+ fun_l7_n660(x)
+ end
+end
+
+def fun_l6_n783(x)
+ if (x < 1)
+ fun_l7_n164(x)
+ else
+ fun_l7_n576(x)
+ end
+end
+
+def fun_l6_n784(x)
+ if (x < 1)
+ fun_l7_n722(x)
+ else
+ fun_l7_n714(x)
+ end
+end
+
+def fun_l6_n785(x)
+ if (x < 1)
+ fun_l7_n90(x)
+ else
+ fun_l7_n556(x)
+ end
+end
+
+def fun_l6_n786(x)
+ if (x < 1)
+ fun_l7_n413(x)
+ else
+ fun_l7_n722(x)
+ end
+end
+
+def fun_l6_n787(x)
+ if (x < 1)
+ fun_l7_n141(x)
+ else
+ fun_l7_n90(x)
+ end
+end
+
+def fun_l6_n788(x)
+ if (x < 1)
+ fun_l7_n750(x)
+ else
+ fun_l7_n660(x)
+ end
+end
+
+def fun_l6_n789(x)
+ if (x < 1)
+ fun_l7_n581(x)
+ else
+ fun_l7_n368(x)
+ end
+end
+
+def fun_l6_n790(x)
+ if (x < 1)
+ fun_l7_n922(x)
+ else
+ fun_l7_n551(x)
+ end
+end
+
+def fun_l6_n791(x)
+ if (x < 1)
+ fun_l7_n842(x)
+ else
+ fun_l7_n336(x)
+ end
+end
+
+def fun_l6_n792(x)
+ if (x < 1)
+ fun_l7_n264(x)
+ else
+ fun_l7_n7(x)
+ end
+end
+
+def fun_l6_n793(x)
+ if (x < 1)
+ fun_l7_n902(x)
+ else
+ fun_l7_n184(x)
+ end
+end
+
+def fun_l6_n794(x)
+ if (x < 1)
+ fun_l7_n788(x)
+ else
+ fun_l7_n854(x)
+ end
+end
+
+def fun_l6_n795(x)
+ if (x < 1)
+ fun_l7_n375(x)
+ else
+ fun_l7_n601(x)
+ end
+end
+
+def fun_l6_n796(x)
+ if (x < 1)
+ fun_l7_n674(x)
+ else
+ fun_l7_n136(x)
+ end
+end
+
+def fun_l6_n797(x)
+ if (x < 1)
+ fun_l7_n836(x)
+ else
+ fun_l7_n90(x)
+ end
+end
+
+def fun_l6_n798(x)
+ if (x < 1)
+ fun_l7_n461(x)
+ else
+ fun_l7_n686(x)
+ end
+end
+
+def fun_l6_n799(x)
+ if (x < 1)
+ fun_l7_n308(x)
+ else
+ fun_l7_n179(x)
+ end
+end
+
+def fun_l6_n800(x)
+ if (x < 1)
+ fun_l7_n154(x)
+ else
+ fun_l7_n618(x)
+ end
+end
+
+def fun_l6_n801(x)
+ if (x < 1)
+ fun_l7_n242(x)
+ else
+ fun_l7_n16(x)
+ end
+end
+
+def fun_l6_n802(x)
+ if (x < 1)
+ fun_l7_n880(x)
+ else
+ fun_l7_n143(x)
+ end
+end
+
+def fun_l6_n803(x)
+ if (x < 1)
+ fun_l7_n577(x)
+ else
+ fun_l7_n414(x)
+ end
+end
+
+def fun_l6_n804(x)
+ if (x < 1)
+ fun_l7_n409(x)
+ else
+ fun_l7_n116(x)
+ end
+end
+
+def fun_l6_n805(x)
+ if (x < 1)
+ fun_l7_n259(x)
+ else
+ fun_l7_n820(x)
+ end
+end
+
+def fun_l6_n806(x)
+ if (x < 1)
+ fun_l7_n384(x)
+ else
+ fun_l7_n939(x)
+ end
+end
+
+def fun_l6_n807(x)
+ if (x < 1)
+ fun_l7_n138(x)
+ else
+ fun_l7_n652(x)
+ end
+end
+
+def fun_l6_n808(x)
+ if (x < 1)
+ fun_l7_n455(x)
+ else
+ fun_l7_n693(x)
+ end
+end
+
+def fun_l6_n809(x)
+ if (x < 1)
+ fun_l7_n892(x)
+ else
+ fun_l7_n638(x)
+ end
+end
+
+def fun_l6_n810(x)
+ if (x < 1)
+ fun_l7_n498(x)
+ else
+ fun_l7_n716(x)
+ end
+end
+
+def fun_l6_n811(x)
+ if (x < 1)
+ fun_l7_n713(x)
+ else
+ fun_l7_n9(x)
+ end
+end
+
+def fun_l6_n812(x)
+ if (x < 1)
+ fun_l7_n854(x)
+ else
+ fun_l7_n507(x)
+ end
+end
+
+def fun_l6_n813(x)
+ if (x < 1)
+ fun_l7_n385(x)
+ else
+ fun_l7_n323(x)
+ end
+end
+
+def fun_l6_n814(x)
+ if (x < 1)
+ fun_l7_n437(x)
+ else
+ fun_l7_n954(x)
+ end
+end
+
+def fun_l6_n815(x)
+ if (x < 1)
+ fun_l7_n471(x)
+ else
+ fun_l7_n245(x)
+ end
+end
+
+def fun_l6_n816(x)
+ if (x < 1)
+ fun_l7_n485(x)
+ else
+ fun_l7_n935(x)
+ end
+end
+
+def fun_l6_n817(x)
+ if (x < 1)
+ fun_l7_n319(x)
+ else
+ fun_l7_n566(x)
+ end
+end
+
+def fun_l6_n818(x)
+ if (x < 1)
+ fun_l7_n339(x)
+ else
+ fun_l7_n152(x)
+ end
+end
+
+def fun_l6_n819(x)
+ if (x < 1)
+ fun_l7_n21(x)
+ else
+ fun_l7_n452(x)
+ end
+end
+
+def fun_l6_n820(x)
+ if (x < 1)
+ fun_l7_n16(x)
+ else
+ fun_l7_n502(x)
+ end
+end
+
+def fun_l6_n821(x)
+ if (x < 1)
+ fun_l7_n813(x)
+ else
+ fun_l7_n520(x)
+ end
+end
+
+def fun_l6_n822(x)
+ if (x < 1)
+ fun_l7_n519(x)
+ else
+ fun_l7_n651(x)
+ end
+end
+
+def fun_l6_n823(x)
+ if (x < 1)
+ fun_l7_n890(x)
+ else
+ fun_l7_n227(x)
+ end
+end
+
+def fun_l6_n824(x)
+ if (x < 1)
+ fun_l7_n517(x)
+ else
+ fun_l7_n799(x)
+ end
+end
+
+def fun_l6_n825(x)
+ if (x < 1)
+ fun_l7_n404(x)
+ else
+ fun_l7_n442(x)
+ end
+end
+
+def fun_l6_n826(x)
+ if (x < 1)
+ fun_l7_n308(x)
+ else
+ fun_l7_n328(x)
+ end
+end
+
+def fun_l6_n827(x)
+ if (x < 1)
+ fun_l7_n953(x)
+ else
+ fun_l7_n950(x)
+ end
+end
+
+def fun_l6_n828(x)
+ if (x < 1)
+ fun_l7_n206(x)
+ else
+ fun_l7_n701(x)
+ end
+end
+
+def fun_l6_n829(x)
+ if (x < 1)
+ fun_l7_n499(x)
+ else
+ fun_l7_n972(x)
+ end
+end
+
+def fun_l6_n830(x)
+ if (x < 1)
+ fun_l7_n578(x)
+ else
+ fun_l7_n697(x)
+ end
+end
+
+def fun_l6_n831(x)
+ if (x < 1)
+ fun_l7_n116(x)
+ else
+ fun_l7_n747(x)
+ end
+end
+
+def fun_l6_n832(x)
+ if (x < 1)
+ fun_l7_n423(x)
+ else
+ fun_l7_n653(x)
+ end
+end
+
+def fun_l6_n833(x)
+ if (x < 1)
+ fun_l7_n524(x)
+ else
+ fun_l7_n423(x)
+ end
+end
+
+def fun_l6_n834(x)
+ if (x < 1)
+ fun_l7_n940(x)
+ else
+ fun_l7_n48(x)
+ end
+end
+
+def fun_l6_n835(x)
+ if (x < 1)
+ fun_l7_n336(x)
+ else
+ fun_l7_n584(x)
+ end
+end
+
+def fun_l6_n836(x)
+ if (x < 1)
+ fun_l7_n66(x)
+ else
+ fun_l7_n980(x)
+ end
+end
+
+def fun_l6_n837(x)
+ if (x < 1)
+ fun_l7_n176(x)
+ else
+ fun_l7_n845(x)
+ end
+end
+
+def fun_l6_n838(x)
+ if (x < 1)
+ fun_l7_n736(x)
+ else
+ fun_l7_n385(x)
+ end
+end
+
+def fun_l6_n839(x)
+ if (x < 1)
+ fun_l7_n948(x)
+ else
+ fun_l7_n258(x)
+ end
+end
+
+def fun_l6_n840(x)
+ if (x < 1)
+ fun_l7_n926(x)
+ else
+ fun_l7_n926(x)
+ end
+end
+
+def fun_l6_n841(x)
+ if (x < 1)
+ fun_l7_n334(x)
+ else
+ fun_l7_n134(x)
+ end
+end
+
+def fun_l6_n842(x)
+ if (x < 1)
+ fun_l7_n757(x)
+ else
+ fun_l7_n995(x)
+ end
+end
+
+def fun_l6_n843(x)
+ if (x < 1)
+ fun_l7_n75(x)
+ else
+ fun_l7_n6(x)
+ end
+end
+
+def fun_l6_n844(x)
+ if (x < 1)
+ fun_l7_n632(x)
+ else
+ fun_l7_n683(x)
+ end
+end
+
+def fun_l6_n845(x)
+ if (x < 1)
+ fun_l7_n788(x)
+ else
+ fun_l7_n153(x)
+ end
+end
+
+def fun_l6_n846(x)
+ if (x < 1)
+ fun_l7_n668(x)
+ else
+ fun_l7_n78(x)
+ end
+end
+
+def fun_l6_n847(x)
+ if (x < 1)
+ fun_l7_n509(x)
+ else
+ fun_l7_n210(x)
+ end
+end
+
+def fun_l6_n848(x)
+ if (x < 1)
+ fun_l7_n862(x)
+ else
+ fun_l7_n90(x)
+ end
+end
+
+def fun_l6_n849(x)
+ if (x < 1)
+ fun_l7_n837(x)
+ else
+ fun_l7_n37(x)
+ end
+end
+
+def fun_l6_n850(x)
+ if (x < 1)
+ fun_l7_n792(x)
+ else
+ fun_l7_n171(x)
+ end
+end
+
+def fun_l6_n851(x)
+ if (x < 1)
+ fun_l7_n869(x)
+ else
+ fun_l7_n217(x)
+ end
+end
+
+def fun_l6_n852(x)
+ if (x < 1)
+ fun_l7_n585(x)
+ else
+ fun_l7_n980(x)
+ end
+end
+
+def fun_l6_n853(x)
+ if (x < 1)
+ fun_l7_n752(x)
+ else
+ fun_l7_n214(x)
+ end
+end
+
+def fun_l6_n854(x)
+ if (x < 1)
+ fun_l7_n151(x)
+ else
+ fun_l7_n668(x)
+ end
+end
+
+def fun_l6_n855(x)
+ if (x < 1)
+ fun_l7_n145(x)
+ else
+ fun_l7_n99(x)
+ end
+end
+
+def fun_l6_n856(x)
+ if (x < 1)
+ fun_l7_n114(x)
+ else
+ fun_l7_n852(x)
+ end
+end
+
+def fun_l6_n857(x)
+ if (x < 1)
+ fun_l7_n571(x)
+ else
+ fun_l7_n6(x)
+ end
+end
+
+def fun_l6_n858(x)
+ if (x < 1)
+ fun_l7_n724(x)
+ else
+ fun_l7_n132(x)
+ end
+end
+
+def fun_l6_n859(x)
+ if (x < 1)
+ fun_l7_n146(x)
+ else
+ fun_l7_n627(x)
+ end
+end
+
+def fun_l6_n860(x)
+ if (x < 1)
+ fun_l7_n515(x)
+ else
+ fun_l7_n598(x)
+ end
+end
+
+def fun_l6_n861(x)
+ if (x < 1)
+ fun_l7_n49(x)
+ else
+ fun_l7_n781(x)
+ end
+end
+
+def fun_l6_n862(x)
+ if (x < 1)
+ fun_l7_n378(x)
+ else
+ fun_l7_n295(x)
+ end
+end
+
+def fun_l6_n863(x)
+ if (x < 1)
+ fun_l7_n122(x)
+ else
+ fun_l7_n738(x)
+ end
+end
+
+def fun_l6_n864(x)
+ if (x < 1)
+ fun_l7_n421(x)
+ else
+ fun_l7_n719(x)
+ end
+end
+
+def fun_l6_n865(x)
+ if (x < 1)
+ fun_l7_n965(x)
+ else
+ fun_l7_n963(x)
+ end
+end
+
+def fun_l6_n866(x)
+ if (x < 1)
+ fun_l7_n551(x)
+ else
+ fun_l7_n177(x)
+ end
+end
+
+def fun_l6_n867(x)
+ if (x < 1)
+ fun_l7_n136(x)
+ else
+ fun_l7_n289(x)
+ end
+end
+
+def fun_l6_n868(x)
+ if (x < 1)
+ fun_l7_n26(x)
+ else
+ fun_l7_n376(x)
+ end
+end
+
+def fun_l6_n869(x)
+ if (x < 1)
+ fun_l7_n888(x)
+ else
+ fun_l7_n948(x)
+ end
+end
+
+def fun_l6_n870(x)
+ if (x < 1)
+ fun_l7_n636(x)
+ else
+ fun_l7_n89(x)
+ end
+end
+
+def fun_l6_n871(x)
+ if (x < 1)
+ fun_l7_n953(x)
+ else
+ fun_l7_n634(x)
+ end
+end
+
+def fun_l6_n872(x)
+ if (x < 1)
+ fun_l7_n74(x)
+ else
+ fun_l7_n219(x)
+ end
+end
+
+def fun_l6_n873(x)
+ if (x < 1)
+ fun_l7_n317(x)
+ else
+ fun_l7_n863(x)
+ end
+end
+
+def fun_l6_n874(x)
+ if (x < 1)
+ fun_l7_n495(x)
+ else
+ fun_l7_n478(x)
+ end
+end
+
+def fun_l6_n875(x)
+ if (x < 1)
+ fun_l7_n908(x)
+ else
+ fun_l7_n634(x)
+ end
+end
+
+def fun_l6_n876(x)
+ if (x < 1)
+ fun_l7_n780(x)
+ else
+ fun_l7_n886(x)
+ end
+end
+
+def fun_l6_n877(x)
+ if (x < 1)
+ fun_l7_n768(x)
+ else
+ fun_l7_n820(x)
+ end
+end
+
+def fun_l6_n878(x)
+ if (x < 1)
+ fun_l7_n501(x)
+ else
+ fun_l7_n355(x)
+ end
+end
+
+def fun_l6_n879(x)
+ if (x < 1)
+ fun_l7_n412(x)
+ else
+ fun_l7_n701(x)
+ end
+end
+
+def fun_l6_n880(x)
+ if (x < 1)
+ fun_l7_n541(x)
+ else
+ fun_l7_n148(x)
+ end
+end
+
+def fun_l6_n881(x)
+ if (x < 1)
+ fun_l7_n460(x)
+ else
+ fun_l7_n579(x)
+ end
+end
+
+def fun_l6_n882(x)
+ if (x < 1)
+ fun_l7_n576(x)
+ else
+ fun_l7_n118(x)
+ end
+end
+
+def fun_l6_n883(x)
+ if (x < 1)
+ fun_l7_n218(x)
+ else
+ fun_l7_n197(x)
+ end
+end
+
+def fun_l6_n884(x)
+ if (x < 1)
+ fun_l7_n279(x)
+ else
+ fun_l7_n565(x)
+ end
+end
+
+def fun_l6_n885(x)
+ if (x < 1)
+ fun_l7_n934(x)
+ else
+ fun_l7_n46(x)
+ end
+end
+
+def fun_l6_n886(x)
+ if (x < 1)
+ fun_l7_n894(x)
+ else
+ fun_l7_n864(x)
+ end
+end
+
+def fun_l6_n887(x)
+ if (x < 1)
+ fun_l7_n45(x)
+ else
+ fun_l7_n151(x)
+ end
+end
+
+def fun_l6_n888(x)
+ if (x < 1)
+ fun_l7_n190(x)
+ else
+ fun_l7_n46(x)
+ end
+end
+
+def fun_l6_n889(x)
+ if (x < 1)
+ fun_l7_n56(x)
+ else
+ fun_l7_n451(x)
+ end
+end
+
+def fun_l6_n890(x)
+ if (x < 1)
+ fun_l7_n7(x)
+ else
+ fun_l7_n906(x)
+ end
+end
+
+def fun_l6_n891(x)
+ if (x < 1)
+ fun_l7_n507(x)
+ else
+ fun_l7_n121(x)
+ end
+end
+
+def fun_l6_n892(x)
+ if (x < 1)
+ fun_l7_n757(x)
+ else
+ fun_l7_n485(x)
+ end
+end
+
+def fun_l6_n893(x)
+ if (x < 1)
+ fun_l7_n242(x)
+ else
+ fun_l7_n366(x)
+ end
+end
+
+def fun_l6_n894(x)
+ if (x < 1)
+ fun_l7_n563(x)
+ else
+ fun_l7_n749(x)
+ end
+end
+
+def fun_l6_n895(x)
+ if (x < 1)
+ fun_l7_n976(x)
+ else
+ fun_l7_n907(x)
+ end
+end
+
+def fun_l6_n896(x)
+ if (x < 1)
+ fun_l7_n525(x)
+ else
+ fun_l7_n843(x)
+ end
+end
+
+def fun_l6_n897(x)
+ if (x < 1)
+ fun_l7_n255(x)
+ else
+ fun_l7_n133(x)
+ end
+end
+
+def fun_l6_n898(x)
+ if (x < 1)
+ fun_l7_n796(x)
+ else
+ fun_l7_n275(x)
+ end
+end
+
+def fun_l6_n899(x)
+ if (x < 1)
+ fun_l7_n985(x)
+ else
+ fun_l7_n485(x)
+ end
+end
+
+def fun_l6_n900(x)
+ if (x < 1)
+ fun_l7_n32(x)
+ else
+ fun_l7_n324(x)
+ end
+end
+
+def fun_l6_n901(x)
+ if (x < 1)
+ fun_l7_n334(x)
+ else
+ fun_l7_n618(x)
+ end
+end
+
+def fun_l6_n902(x)
+ if (x < 1)
+ fun_l7_n762(x)
+ else
+ fun_l7_n778(x)
+ end
+end
+
+def fun_l6_n903(x)
+ if (x < 1)
+ fun_l7_n343(x)
+ else
+ fun_l7_n349(x)
+ end
+end
+
+def fun_l6_n904(x)
+ if (x < 1)
+ fun_l7_n310(x)
+ else
+ fun_l7_n215(x)
+ end
+end
+
+def fun_l6_n905(x)
+ if (x < 1)
+ fun_l7_n162(x)
+ else
+ fun_l7_n405(x)
+ end
+end
+
+def fun_l6_n906(x)
+ if (x < 1)
+ fun_l7_n667(x)
+ else
+ fun_l7_n633(x)
+ end
+end
+
+def fun_l6_n907(x)
+ if (x < 1)
+ fun_l7_n652(x)
+ else
+ fun_l7_n771(x)
+ end
+end
+
+def fun_l6_n908(x)
+ if (x < 1)
+ fun_l7_n198(x)
+ else
+ fun_l7_n32(x)
+ end
+end
+
+def fun_l6_n909(x)
+ if (x < 1)
+ fun_l7_n577(x)
+ else
+ fun_l7_n116(x)
+ end
+end
+
+def fun_l6_n910(x)
+ if (x < 1)
+ fun_l7_n63(x)
+ else
+ fun_l7_n57(x)
+ end
+end
+
+def fun_l6_n911(x)
+ if (x < 1)
+ fun_l7_n908(x)
+ else
+ fun_l7_n556(x)
+ end
+end
+
+def fun_l6_n912(x)
+ if (x < 1)
+ fun_l7_n420(x)
+ else
+ fun_l7_n163(x)
+ end
+end
+
+def fun_l6_n913(x)
+ if (x < 1)
+ fun_l7_n795(x)
+ else
+ fun_l7_n225(x)
+ end
+end
+
+def fun_l6_n914(x)
+ if (x < 1)
+ fun_l7_n855(x)
+ else
+ fun_l7_n742(x)
+ end
+end
+
+def fun_l6_n915(x)
+ if (x < 1)
+ fun_l7_n386(x)
+ else
+ fun_l7_n314(x)
+ end
+end
+
+def fun_l6_n916(x)
+ if (x < 1)
+ fun_l7_n587(x)
+ else
+ fun_l7_n371(x)
+ end
+end
+
+def fun_l6_n917(x)
+ if (x < 1)
+ fun_l7_n570(x)
+ else
+ fun_l7_n735(x)
+ end
+end
+
+def fun_l6_n918(x)
+ if (x < 1)
+ fun_l7_n985(x)
+ else
+ fun_l7_n492(x)
+ end
+end
+
+def fun_l6_n919(x)
+ if (x < 1)
+ fun_l7_n991(x)
+ else
+ fun_l7_n454(x)
+ end
+end
+
+def fun_l6_n920(x)
+ if (x < 1)
+ fun_l7_n925(x)
+ else
+ fun_l7_n161(x)
+ end
+end
+
+def fun_l6_n921(x)
+ if (x < 1)
+ fun_l7_n398(x)
+ else
+ fun_l7_n621(x)
+ end
+end
+
+def fun_l6_n922(x)
+ if (x < 1)
+ fun_l7_n732(x)
+ else
+ fun_l7_n509(x)
+ end
+end
+
+def fun_l6_n923(x)
+ if (x < 1)
+ fun_l7_n879(x)
+ else
+ fun_l7_n219(x)
+ end
+end
+
+def fun_l6_n924(x)
+ if (x < 1)
+ fun_l7_n19(x)
+ else
+ fun_l7_n604(x)
+ end
+end
+
+def fun_l6_n925(x)
+ if (x < 1)
+ fun_l7_n474(x)
+ else
+ fun_l7_n439(x)
+ end
+end
+
+def fun_l6_n926(x)
+ if (x < 1)
+ fun_l7_n807(x)
+ else
+ fun_l7_n802(x)
+ end
+end
+
+def fun_l6_n927(x)
+ if (x < 1)
+ fun_l7_n146(x)
+ else
+ fun_l7_n966(x)
+ end
+end
+
+def fun_l6_n928(x)
+ if (x < 1)
+ fun_l7_n72(x)
+ else
+ fun_l7_n835(x)
+ end
+end
+
+def fun_l6_n929(x)
+ if (x < 1)
+ fun_l7_n389(x)
+ else
+ fun_l7_n951(x)
+ end
+end
+
+def fun_l6_n930(x)
+ if (x < 1)
+ fun_l7_n104(x)
+ else
+ fun_l7_n719(x)
+ end
+end
+
+def fun_l6_n931(x)
+ if (x < 1)
+ fun_l7_n250(x)
+ else
+ fun_l7_n13(x)
+ end
+end
+
+def fun_l6_n932(x)
+ if (x < 1)
+ fun_l7_n470(x)
+ else
+ fun_l7_n984(x)
+ end
+end
+
+def fun_l6_n933(x)
+ if (x < 1)
+ fun_l7_n485(x)
+ else
+ fun_l7_n750(x)
+ end
+end
+
+def fun_l6_n934(x)
+ if (x < 1)
+ fun_l7_n371(x)
+ else
+ fun_l7_n912(x)
+ end
+end
+
+def fun_l6_n935(x)
+ if (x < 1)
+ fun_l7_n686(x)
+ else
+ fun_l7_n523(x)
+ end
+end
+
+def fun_l6_n936(x)
+ if (x < 1)
+ fun_l7_n998(x)
+ else
+ fun_l7_n6(x)
+ end
+end
+
+def fun_l6_n937(x)
+ if (x < 1)
+ fun_l7_n753(x)
+ else
+ fun_l7_n781(x)
+ end
+end
+
+def fun_l6_n938(x)
+ if (x < 1)
+ fun_l7_n518(x)
+ else
+ fun_l7_n18(x)
+ end
+end
+
+def fun_l6_n939(x)
+ if (x < 1)
+ fun_l7_n771(x)
+ else
+ fun_l7_n1(x)
+ end
+end
+
+def fun_l6_n940(x)
+ if (x < 1)
+ fun_l7_n43(x)
+ else
+ fun_l7_n727(x)
+ end
+end
+
+def fun_l6_n941(x)
+ if (x < 1)
+ fun_l7_n282(x)
+ else
+ fun_l7_n676(x)
+ end
+end
+
+def fun_l6_n942(x)
+ if (x < 1)
+ fun_l7_n284(x)
+ else
+ fun_l7_n56(x)
+ end
+end
+
+def fun_l6_n943(x)
+ if (x < 1)
+ fun_l7_n304(x)
+ else
+ fun_l7_n287(x)
+ end
+end
+
+def fun_l6_n944(x)
+ if (x < 1)
+ fun_l7_n968(x)
+ else
+ fun_l7_n283(x)
+ end
+end
+
+def fun_l6_n945(x)
+ if (x < 1)
+ fun_l7_n892(x)
+ else
+ fun_l7_n413(x)
+ end
+end
+
+def fun_l6_n946(x)
+ if (x < 1)
+ fun_l7_n750(x)
+ else
+ fun_l7_n244(x)
+ end
+end
+
+def fun_l6_n947(x)
+ if (x < 1)
+ fun_l7_n599(x)
+ else
+ fun_l7_n238(x)
+ end
+end
+
+def fun_l6_n948(x)
+ if (x < 1)
+ fun_l7_n447(x)
+ else
+ fun_l7_n880(x)
+ end
+end
+
+def fun_l6_n949(x)
+ if (x < 1)
+ fun_l7_n242(x)
+ else
+ fun_l7_n210(x)
+ end
+end
+
+def fun_l6_n950(x)
+ if (x < 1)
+ fun_l7_n143(x)
+ else
+ fun_l7_n816(x)
+ end
+end
+
+def fun_l6_n951(x)
+ if (x < 1)
+ fun_l7_n125(x)
+ else
+ fun_l7_n909(x)
+ end
+end
+
+def fun_l6_n952(x)
+ if (x < 1)
+ fun_l7_n134(x)
+ else
+ fun_l7_n946(x)
+ end
+end
+
+def fun_l6_n953(x)
+ if (x < 1)
+ fun_l7_n858(x)
+ else
+ fun_l7_n927(x)
+ end
+end
+
+def fun_l6_n954(x)
+ if (x < 1)
+ fun_l7_n853(x)
+ else
+ fun_l7_n396(x)
+ end
+end
+
+def fun_l6_n955(x)
+ if (x < 1)
+ fun_l7_n725(x)
+ else
+ fun_l7_n42(x)
+ end
+end
+
+def fun_l6_n956(x)
+ if (x < 1)
+ fun_l7_n108(x)
+ else
+ fun_l7_n177(x)
+ end
+end
+
+def fun_l6_n957(x)
+ if (x < 1)
+ fun_l7_n742(x)
+ else
+ fun_l7_n482(x)
+ end
+end
+
+def fun_l6_n958(x)
+ if (x < 1)
+ fun_l7_n385(x)
+ else
+ fun_l7_n292(x)
+ end
+end
+
+def fun_l6_n959(x)
+ if (x < 1)
+ fun_l7_n325(x)
+ else
+ fun_l7_n27(x)
+ end
+end
+
+def fun_l6_n960(x)
+ if (x < 1)
+ fun_l7_n558(x)
+ else
+ fun_l7_n556(x)
+ end
+end
+
+def fun_l6_n961(x)
+ if (x < 1)
+ fun_l7_n472(x)
+ else
+ fun_l7_n322(x)
+ end
+end
+
+def fun_l6_n962(x)
+ if (x < 1)
+ fun_l7_n754(x)
+ else
+ fun_l7_n729(x)
+ end
+end
+
+def fun_l6_n963(x)
+ if (x < 1)
+ fun_l7_n35(x)
+ else
+ fun_l7_n232(x)
+ end
+end
+
+def fun_l6_n964(x)
+ if (x < 1)
+ fun_l7_n113(x)
+ else
+ fun_l7_n878(x)
+ end
+end
+
+def fun_l6_n965(x)
+ if (x < 1)
+ fun_l7_n982(x)
+ else
+ fun_l7_n404(x)
+ end
+end
+
+def fun_l6_n966(x)
+ if (x < 1)
+ fun_l7_n385(x)
+ else
+ fun_l7_n20(x)
+ end
+end
+
+def fun_l6_n967(x)
+ if (x < 1)
+ fun_l7_n394(x)
+ else
+ fun_l7_n873(x)
+ end
+end
+
+def fun_l6_n968(x)
+ if (x < 1)
+ fun_l7_n230(x)
+ else
+ fun_l7_n141(x)
+ end
+end
+
+def fun_l6_n969(x)
+ if (x < 1)
+ fun_l7_n742(x)
+ else
+ fun_l7_n75(x)
+ end
+end
+
+def fun_l6_n970(x)
+ if (x < 1)
+ fun_l7_n583(x)
+ else
+ fun_l7_n809(x)
+ end
+end
+
+def fun_l6_n971(x)
+ if (x < 1)
+ fun_l7_n535(x)
+ else
+ fun_l7_n151(x)
+ end
+end
+
+def fun_l6_n972(x)
+ if (x < 1)
+ fun_l7_n42(x)
+ else
+ fun_l7_n469(x)
+ end
+end
+
+def fun_l6_n973(x)
+ if (x < 1)
+ fun_l7_n964(x)
+ else
+ fun_l7_n759(x)
+ end
+end
+
+def fun_l6_n974(x)
+ if (x < 1)
+ fun_l7_n897(x)
+ else
+ fun_l7_n29(x)
+ end
+end
+
+def fun_l6_n975(x)
+ if (x < 1)
+ fun_l7_n331(x)
+ else
+ fun_l7_n972(x)
+ end
+end
+
+def fun_l6_n976(x)
+ if (x < 1)
+ fun_l7_n825(x)
+ else
+ fun_l7_n221(x)
+ end
+end
+
+def fun_l6_n977(x)
+ if (x < 1)
+ fun_l7_n207(x)
+ else
+ fun_l7_n520(x)
+ end
+end
+
+def fun_l6_n978(x)
+ if (x < 1)
+ fun_l7_n231(x)
+ else
+ fun_l7_n552(x)
+ end
+end
+
+def fun_l6_n979(x)
+ if (x < 1)
+ fun_l7_n621(x)
+ else
+ fun_l7_n766(x)
+ end
+end
+
+def fun_l6_n980(x)
+ if (x < 1)
+ fun_l7_n461(x)
+ else
+ fun_l7_n552(x)
+ end
+end
+
+def fun_l6_n981(x)
+ if (x < 1)
+ fun_l7_n891(x)
+ else
+ fun_l7_n710(x)
+ end
+end
+
+def fun_l6_n982(x)
+ if (x < 1)
+ fun_l7_n253(x)
+ else
+ fun_l7_n104(x)
+ end
+end
+
+def fun_l6_n983(x)
+ if (x < 1)
+ fun_l7_n726(x)
+ else
+ fun_l7_n903(x)
+ end
+end
+
+def fun_l6_n984(x)
+ if (x < 1)
+ fun_l7_n524(x)
+ else
+ fun_l7_n700(x)
+ end
+end
+
+def fun_l6_n985(x)
+ if (x < 1)
+ fun_l7_n234(x)
+ else
+ fun_l7_n454(x)
+ end
+end
+
+def fun_l6_n986(x)
+ if (x < 1)
+ fun_l7_n379(x)
+ else
+ fun_l7_n504(x)
+ end
+end
+
+def fun_l6_n987(x)
+ if (x < 1)
+ fun_l7_n194(x)
+ else
+ fun_l7_n962(x)
+ end
+end
+
+def fun_l6_n988(x)
+ if (x < 1)
+ fun_l7_n60(x)
+ else
+ fun_l7_n699(x)
+ end
+end
+
+def fun_l6_n989(x)
+ if (x < 1)
+ fun_l7_n692(x)
+ else
+ fun_l7_n28(x)
+ end
+end
+
+def fun_l6_n990(x)
+ if (x < 1)
+ fun_l7_n456(x)
+ else
+ fun_l7_n126(x)
+ end
+end
+
+def fun_l6_n991(x)
+ if (x < 1)
+ fun_l7_n104(x)
+ else
+ fun_l7_n156(x)
+ end
+end
+
+def fun_l6_n992(x)
+ if (x < 1)
+ fun_l7_n269(x)
+ else
+ fun_l7_n513(x)
+ end
+end
+
+def fun_l6_n993(x)
+ if (x < 1)
+ fun_l7_n20(x)
+ else
+ fun_l7_n754(x)
+ end
+end
+
+def fun_l6_n994(x)
+ if (x < 1)
+ fun_l7_n445(x)
+ else
+ fun_l7_n422(x)
+ end
+end
+
+def fun_l6_n995(x)
+ if (x < 1)
+ fun_l7_n320(x)
+ else
+ fun_l7_n398(x)
+ end
+end
+
+def fun_l6_n996(x)
+ if (x < 1)
+ fun_l7_n479(x)
+ else
+ fun_l7_n238(x)
+ end
+end
+
+def fun_l6_n997(x)
+ if (x < 1)
+ fun_l7_n254(x)
+ else
+ fun_l7_n692(x)
+ end
+end
+
+def fun_l6_n998(x)
+ if (x < 1)
+ fun_l7_n774(x)
+ else
+ fun_l7_n510(x)
+ end
+end
+
+def fun_l6_n999(x)
+ if (x < 1)
+ fun_l7_n158(x)
+ else
+ fun_l7_n929(x)
+ end
+end
+
+def fun_l7_n0(x)
+ if (x < 1)
+ fun_l8_n414(x)
+ else
+ fun_l8_n327(x)
+ end
+end
+
+def fun_l7_n1(x)
+ if (x < 1)
+ fun_l8_n446(x)
+ else
+ fun_l8_n611(x)
+ end
+end
+
+def fun_l7_n2(x)
+ if (x < 1)
+ fun_l8_n116(x)
+ else
+ fun_l8_n224(x)
+ end
+end
+
+def fun_l7_n3(x)
+ if (x < 1)
+ fun_l8_n809(x)
+ else
+ fun_l8_n962(x)
+ end
+end
+
+def fun_l7_n4(x)
+ if (x < 1)
+ fun_l8_n137(x)
+ else
+ fun_l8_n426(x)
+ end
+end
+
+def fun_l7_n5(x)
+ if (x < 1)
+ fun_l8_n686(x)
+ else
+ fun_l8_n600(x)
+ end
+end
+
+def fun_l7_n6(x)
+ if (x < 1)
+ fun_l8_n430(x)
+ else
+ fun_l8_n302(x)
+ end
+end
+
+def fun_l7_n7(x)
+ if (x < 1)
+ fun_l8_n865(x)
+ else
+ fun_l8_n60(x)
+ end
+end
+
+def fun_l7_n8(x)
+ if (x < 1)
+ fun_l8_n895(x)
+ else
+ fun_l8_n148(x)
+ end
+end
+
+def fun_l7_n9(x)
+ if (x < 1)
+ fun_l8_n740(x)
+ else
+ fun_l8_n982(x)
+ end
+end
+
+def fun_l7_n10(x)
+ if (x < 1)
+ fun_l8_n757(x)
+ else
+ fun_l8_n640(x)
+ end
+end
+
+def fun_l7_n11(x)
+ if (x < 1)
+ fun_l8_n744(x)
+ else
+ fun_l8_n256(x)
+ end
+end
+
+def fun_l7_n12(x)
+ if (x < 1)
+ fun_l8_n272(x)
+ else
+ fun_l8_n828(x)
+ end
+end
+
+def fun_l7_n13(x)
+ if (x < 1)
+ fun_l8_n395(x)
+ else
+ fun_l8_n932(x)
+ end
+end
+
+def fun_l7_n14(x)
+ if (x < 1)
+ fun_l8_n178(x)
+ else
+ fun_l8_n676(x)
+ end
+end
+
+def fun_l7_n15(x)
+ if (x < 1)
+ fun_l8_n40(x)
+ else
+ fun_l8_n548(x)
+ end
+end
+
+def fun_l7_n16(x)
+ if (x < 1)
+ fun_l8_n506(x)
+ else
+ fun_l8_n234(x)
+ end
+end
+
+def fun_l7_n17(x)
+ if (x < 1)
+ fun_l8_n239(x)
+ else
+ fun_l8_n56(x)
+ end
+end
+
+def fun_l7_n18(x)
+ if (x < 1)
+ fun_l8_n381(x)
+ else
+ fun_l8_n720(x)
+ end
+end
+
+def fun_l7_n19(x)
+ if (x < 1)
+ fun_l8_n781(x)
+ else
+ fun_l8_n605(x)
+ end
+end
+
+def fun_l7_n20(x)
+ if (x < 1)
+ fun_l8_n479(x)
+ else
+ fun_l8_n894(x)
+ end
+end
+
+def fun_l7_n21(x)
+ if (x < 1)
+ fun_l8_n437(x)
+ else
+ fun_l8_n123(x)
+ end
+end
+
+def fun_l7_n22(x)
+ if (x < 1)
+ fun_l8_n20(x)
+ else
+ fun_l8_n144(x)
+ end
+end
+
+def fun_l7_n23(x)
+ if (x < 1)
+ fun_l8_n684(x)
+ else
+ fun_l8_n866(x)
+ end
+end
+
+def fun_l7_n24(x)
+ if (x < 1)
+ fun_l8_n527(x)
+ else
+ fun_l8_n629(x)
+ end
+end
+
+def fun_l7_n25(x)
+ if (x < 1)
+ fun_l8_n399(x)
+ else
+ fun_l8_n52(x)
+ end
+end
+
+def fun_l7_n26(x)
+ if (x < 1)
+ fun_l8_n353(x)
+ else
+ fun_l8_n438(x)
+ end
+end
+
+def fun_l7_n27(x)
+ if (x < 1)
+ fun_l8_n916(x)
+ else
+ fun_l8_n488(x)
+ end
+end
+
+def fun_l7_n28(x)
+ if (x < 1)
+ fun_l8_n833(x)
+ else
+ fun_l8_n585(x)
+ end
+end
+
+def fun_l7_n29(x)
+ if (x < 1)
+ fun_l8_n38(x)
+ else
+ fun_l8_n785(x)
+ end
+end
+
+def fun_l7_n30(x)
+ if (x < 1)
+ fun_l8_n157(x)
+ else
+ fun_l8_n332(x)
+ end
+end
+
+def fun_l7_n31(x)
+ if (x < 1)
+ fun_l8_n467(x)
+ else
+ fun_l8_n442(x)
+ end
+end
+
+def fun_l7_n32(x)
+ if (x < 1)
+ fun_l8_n704(x)
+ else
+ fun_l8_n295(x)
+ end
+end
+
+def fun_l7_n33(x)
+ if (x < 1)
+ fun_l8_n829(x)
+ else
+ fun_l8_n307(x)
+ end
+end
+
+def fun_l7_n34(x)
+ if (x < 1)
+ fun_l8_n227(x)
+ else
+ fun_l8_n865(x)
+ end
+end
+
+def fun_l7_n35(x)
+ if (x < 1)
+ fun_l8_n930(x)
+ else
+ fun_l8_n129(x)
+ end
+end
+
+def fun_l7_n36(x)
+ if (x < 1)
+ fun_l8_n519(x)
+ else
+ fun_l8_n587(x)
+ end
+end
+
+def fun_l7_n37(x)
+ if (x < 1)
+ fun_l8_n132(x)
+ else
+ fun_l8_n547(x)
+ end
+end
+
+def fun_l7_n38(x)
+ if (x < 1)
+ fun_l8_n627(x)
+ else
+ fun_l8_n712(x)
+ end
+end
+
+def fun_l7_n39(x)
+ if (x < 1)
+ fun_l8_n752(x)
+ else
+ fun_l8_n357(x)
+ end
+end
+
+def fun_l7_n40(x)
+ if (x < 1)
+ fun_l8_n542(x)
+ else
+ fun_l8_n86(x)
+ end
+end
+
+def fun_l7_n41(x)
+ if (x < 1)
+ fun_l8_n89(x)
+ else
+ fun_l8_n492(x)
+ end
+end
+
+def fun_l7_n42(x)
+ if (x < 1)
+ fun_l8_n508(x)
+ else
+ fun_l8_n288(x)
+ end
+end
+
+def fun_l7_n43(x)
+ if (x < 1)
+ fun_l8_n627(x)
+ else
+ fun_l8_n632(x)
+ end
+end
+
+def fun_l7_n44(x)
+ if (x < 1)
+ fun_l8_n513(x)
+ else
+ fun_l8_n472(x)
+ end
+end
+
+def fun_l7_n45(x)
+ if (x < 1)
+ fun_l8_n599(x)
+ else
+ fun_l8_n581(x)
+ end
+end
+
+def fun_l7_n46(x)
+ if (x < 1)
+ fun_l8_n907(x)
+ else
+ fun_l8_n71(x)
+ end
+end
+
+def fun_l7_n47(x)
+ if (x < 1)
+ fun_l8_n609(x)
+ else
+ fun_l8_n601(x)
+ end
+end
+
+def fun_l7_n48(x)
+ if (x < 1)
+ fun_l8_n985(x)
+ else
+ fun_l8_n536(x)
+ end
+end
+
+def fun_l7_n49(x)
+ if (x < 1)
+ fun_l8_n220(x)
+ else
+ fun_l8_n6(x)
+ end
+end
+
+def fun_l7_n50(x)
+ if (x < 1)
+ fun_l8_n445(x)
+ else
+ fun_l8_n939(x)
+ end
+end
+
+def fun_l7_n51(x)
+ if (x < 1)
+ fun_l8_n973(x)
+ else
+ fun_l8_n600(x)
+ end
+end
+
+def fun_l7_n52(x)
+ if (x < 1)
+ fun_l8_n377(x)
+ else
+ fun_l8_n70(x)
+ end
+end
+
+def fun_l7_n53(x)
+ if (x < 1)
+ fun_l8_n745(x)
+ else
+ fun_l8_n237(x)
+ end
+end
+
+def fun_l7_n54(x)
+ if (x < 1)
+ fun_l8_n782(x)
+ else
+ fun_l8_n756(x)
+ end
+end
+
+def fun_l7_n55(x)
+ if (x < 1)
+ fun_l8_n438(x)
+ else
+ fun_l8_n892(x)
+ end
+end
+
+def fun_l7_n56(x)
+ if (x < 1)
+ fun_l8_n564(x)
+ else
+ fun_l8_n600(x)
+ end
+end
+
+def fun_l7_n57(x)
+ if (x < 1)
+ fun_l8_n480(x)
+ else
+ fun_l8_n832(x)
+ end
+end
+
+def fun_l7_n58(x)
+ if (x < 1)
+ fun_l8_n239(x)
+ else
+ fun_l8_n540(x)
+ end
+end
+
+def fun_l7_n59(x)
+ if (x < 1)
+ fun_l8_n904(x)
+ else
+ fun_l8_n334(x)
+ end
+end
+
+def fun_l7_n60(x)
+ if (x < 1)
+ fun_l8_n407(x)
+ else
+ fun_l8_n573(x)
+ end
+end
+
+def fun_l7_n61(x)
+ if (x < 1)
+ fun_l8_n705(x)
+ else
+ fun_l8_n202(x)
+ end
+end
+
+def fun_l7_n62(x)
+ if (x < 1)
+ fun_l8_n715(x)
+ else
+ fun_l8_n394(x)
+ end
+end
+
+def fun_l7_n63(x)
+ if (x < 1)
+ fun_l8_n935(x)
+ else
+ fun_l8_n855(x)
+ end
+end
+
+def fun_l7_n64(x)
+ if (x < 1)
+ fun_l8_n525(x)
+ else
+ fun_l8_n81(x)
+ end
+end
+
+def fun_l7_n65(x)
+ if (x < 1)
+ fun_l8_n692(x)
+ else
+ fun_l8_n372(x)
+ end
+end
+
+def fun_l7_n66(x)
+ if (x < 1)
+ fun_l8_n286(x)
+ else
+ fun_l8_n568(x)
+ end
+end
+
+def fun_l7_n67(x)
+ if (x < 1)
+ fun_l8_n829(x)
+ else
+ fun_l8_n89(x)
+ end
+end
+
+def fun_l7_n68(x)
+ if (x < 1)
+ fun_l8_n542(x)
+ else
+ fun_l8_n362(x)
+ end
+end
+
+def fun_l7_n69(x)
+ if (x < 1)
+ fun_l8_n673(x)
+ else
+ fun_l8_n302(x)
+ end
+end
+
+def fun_l7_n70(x)
+ if (x < 1)
+ fun_l8_n649(x)
+ else
+ fun_l8_n890(x)
+ end
+end
+
+def fun_l7_n71(x)
+ if (x < 1)
+ fun_l8_n639(x)
+ else
+ fun_l8_n304(x)
+ end
+end
+
+def fun_l7_n72(x)
+ if (x < 1)
+ fun_l8_n797(x)
+ else
+ fun_l8_n132(x)
+ end
+end
+
+def fun_l7_n73(x)
+ if (x < 1)
+ fun_l8_n301(x)
+ else
+ fun_l8_n328(x)
+ end
+end
+
+def fun_l7_n74(x)
+ if (x < 1)
+ fun_l8_n438(x)
+ else
+ fun_l8_n979(x)
+ end
+end
+
+def fun_l7_n75(x)
+ if (x < 1)
+ fun_l8_n679(x)
+ else
+ fun_l8_n621(x)
+ end
+end
+
+def fun_l7_n76(x)
+ if (x < 1)
+ fun_l8_n136(x)
+ else
+ fun_l8_n916(x)
+ end
+end
+
+def fun_l7_n77(x)
+ if (x < 1)
+ fun_l8_n39(x)
+ else
+ fun_l8_n945(x)
+ end
+end
+
+def fun_l7_n78(x)
+ if (x < 1)
+ fun_l8_n365(x)
+ else
+ fun_l8_n883(x)
+ end
+end
+
+def fun_l7_n79(x)
+ if (x < 1)
+ fun_l8_n514(x)
+ else
+ fun_l8_n989(x)
+ end
+end
+
+def fun_l7_n80(x)
+ if (x < 1)
+ fun_l8_n378(x)
+ else
+ fun_l8_n634(x)
+ end
+end
+
+def fun_l7_n81(x)
+ if (x < 1)
+ fun_l8_n200(x)
+ else
+ fun_l8_n73(x)
+ end
+end
+
+def fun_l7_n82(x)
+ if (x < 1)
+ fun_l8_n521(x)
+ else
+ fun_l8_n848(x)
+ end
+end
+
+def fun_l7_n83(x)
+ if (x < 1)
+ fun_l8_n184(x)
+ else
+ fun_l8_n350(x)
+ end
+end
+
+def fun_l7_n84(x)
+ if (x < 1)
+ fun_l8_n215(x)
+ else
+ fun_l8_n615(x)
+ end
+end
+
+def fun_l7_n85(x)
+ if (x < 1)
+ fun_l8_n556(x)
+ else
+ fun_l8_n723(x)
+ end
+end
+
+def fun_l7_n86(x)
+ if (x < 1)
+ fun_l8_n565(x)
+ else
+ fun_l8_n661(x)
+ end
+end
+
+def fun_l7_n87(x)
+ if (x < 1)
+ fun_l8_n22(x)
+ else
+ fun_l8_n623(x)
+ end
+end
+
+def fun_l7_n88(x)
+ if (x < 1)
+ fun_l8_n995(x)
+ else
+ fun_l8_n30(x)
+ end
+end
+
+def fun_l7_n89(x)
+ if (x < 1)
+ fun_l8_n912(x)
+ else
+ fun_l8_n332(x)
+ end
+end
+
+def fun_l7_n90(x)
+ if (x < 1)
+ fun_l8_n776(x)
+ else
+ fun_l8_n362(x)
+ end
+end
+
+def fun_l7_n91(x)
+ if (x < 1)
+ fun_l8_n720(x)
+ else
+ fun_l8_n512(x)
+ end
+end
+
+def fun_l7_n92(x)
+ if (x < 1)
+ fun_l8_n437(x)
+ else
+ fun_l8_n107(x)
+ end
+end
+
+def fun_l7_n93(x)
+ if (x < 1)
+ fun_l8_n143(x)
+ else
+ fun_l8_n493(x)
+ end
+end
+
+def fun_l7_n94(x)
+ if (x < 1)
+ fun_l8_n814(x)
+ else
+ fun_l8_n892(x)
+ end
+end
+
+def fun_l7_n95(x)
+ if (x < 1)
+ fun_l8_n117(x)
+ else
+ fun_l8_n18(x)
+ end
+end
+
+def fun_l7_n96(x)
+ if (x < 1)
+ fun_l8_n769(x)
+ else
+ fun_l8_n37(x)
+ end
+end
+
+def fun_l7_n97(x)
+ if (x < 1)
+ fun_l8_n213(x)
+ else
+ fun_l8_n5(x)
+ end
+end
+
+def fun_l7_n98(x)
+ if (x < 1)
+ fun_l8_n608(x)
+ else
+ fun_l8_n551(x)
+ end
+end
+
+def fun_l7_n99(x)
+ if (x < 1)
+ fun_l8_n130(x)
+ else
+ fun_l8_n879(x)
+ end
+end
+
+def fun_l7_n100(x)
+ if (x < 1)
+ fun_l8_n279(x)
+ else
+ fun_l8_n786(x)
+ end
+end
+
+def fun_l7_n101(x)
+ if (x < 1)
+ fun_l8_n254(x)
+ else
+ fun_l8_n904(x)
+ end
+end
+
+def fun_l7_n102(x)
+ if (x < 1)
+ fun_l8_n661(x)
+ else
+ fun_l8_n873(x)
+ end
+end
+
+def fun_l7_n103(x)
+ if (x < 1)
+ fun_l8_n360(x)
+ else
+ fun_l8_n793(x)
+ end
+end
+
+def fun_l7_n104(x)
+ if (x < 1)
+ fun_l8_n687(x)
+ else
+ fun_l8_n853(x)
+ end
+end
+
+def fun_l7_n105(x)
+ if (x < 1)
+ fun_l8_n168(x)
+ else
+ fun_l8_n280(x)
+ end
+end
+
+def fun_l7_n106(x)
+ if (x < 1)
+ fun_l8_n742(x)
+ else
+ fun_l8_n222(x)
+ end
+end
+
+def fun_l7_n107(x)
+ if (x < 1)
+ fun_l8_n470(x)
+ else
+ fun_l8_n633(x)
+ end
+end
+
+def fun_l7_n108(x)
+ if (x < 1)
+ fun_l8_n495(x)
+ else
+ fun_l8_n253(x)
+ end
+end
+
+def fun_l7_n109(x)
+ if (x < 1)
+ fun_l8_n369(x)
+ else
+ fun_l8_n108(x)
+ end
+end
+
+def fun_l7_n110(x)
+ if (x < 1)
+ fun_l8_n254(x)
+ else
+ fun_l8_n162(x)
+ end
+end
+
+def fun_l7_n111(x)
+ if (x < 1)
+ fun_l8_n835(x)
+ else
+ fun_l8_n295(x)
+ end
+end
+
+def fun_l7_n112(x)
+ if (x < 1)
+ fun_l8_n349(x)
+ else
+ fun_l8_n432(x)
+ end
+end
+
+def fun_l7_n113(x)
+ if (x < 1)
+ fun_l8_n237(x)
+ else
+ fun_l8_n484(x)
+ end
+end
+
+def fun_l7_n114(x)
+ if (x < 1)
+ fun_l8_n755(x)
+ else
+ fun_l8_n819(x)
+ end
+end
+
+def fun_l7_n115(x)
+ if (x < 1)
+ fun_l8_n542(x)
+ else
+ fun_l8_n587(x)
+ end
+end
+
+def fun_l7_n116(x)
+ if (x < 1)
+ fun_l8_n837(x)
+ else
+ fun_l8_n274(x)
+ end
+end
+
+def fun_l7_n117(x)
+ if (x < 1)
+ fun_l8_n742(x)
+ else
+ fun_l8_n104(x)
+ end
+end
+
+def fun_l7_n118(x)
+ if (x < 1)
+ fun_l8_n201(x)
+ else
+ fun_l8_n340(x)
+ end
+end
+
+def fun_l7_n119(x)
+ if (x < 1)
+ fun_l8_n349(x)
+ else
+ fun_l8_n213(x)
+ end
+end
+
+def fun_l7_n120(x)
+ if (x < 1)
+ fun_l8_n563(x)
+ else
+ fun_l8_n193(x)
+ end
+end
+
+def fun_l7_n121(x)
+ if (x < 1)
+ fun_l8_n738(x)
+ else
+ fun_l8_n600(x)
+ end
+end
+
+def fun_l7_n122(x)
+ if (x < 1)
+ fun_l8_n261(x)
+ else
+ fun_l8_n500(x)
+ end
+end
+
+def fun_l7_n123(x)
+ if (x < 1)
+ fun_l8_n870(x)
+ else
+ fun_l8_n423(x)
+ end
+end
+
+def fun_l7_n124(x)
+ if (x < 1)
+ fun_l8_n405(x)
+ else
+ fun_l8_n749(x)
+ end
+end
+
+def fun_l7_n125(x)
+ if (x < 1)
+ fun_l8_n848(x)
+ else
+ fun_l8_n106(x)
+ end
+end
+
+def fun_l7_n126(x)
+ if (x < 1)
+ fun_l8_n382(x)
+ else
+ fun_l8_n379(x)
+ end
+end
+
+def fun_l7_n127(x)
+ if (x < 1)
+ fun_l8_n232(x)
+ else
+ fun_l8_n913(x)
+ end
+end
+
+def fun_l7_n128(x)
+ if (x < 1)
+ fun_l8_n7(x)
+ else
+ fun_l8_n177(x)
+ end
+end
+
+def fun_l7_n129(x)
+ if (x < 1)
+ fun_l8_n86(x)
+ else
+ fun_l8_n670(x)
+ end
+end
+
+def fun_l7_n130(x)
+ if (x < 1)
+ fun_l8_n361(x)
+ else
+ fun_l8_n132(x)
+ end
+end
+
+def fun_l7_n131(x)
+ if (x < 1)
+ fun_l8_n125(x)
+ else
+ fun_l8_n925(x)
+ end
+end
+
+def fun_l7_n132(x)
+ if (x < 1)
+ fun_l8_n529(x)
+ else
+ fun_l8_n996(x)
+ end
+end
+
+def fun_l7_n133(x)
+ if (x < 1)
+ fun_l8_n283(x)
+ else
+ fun_l8_n112(x)
+ end
+end
+
+def fun_l7_n134(x)
+ if (x < 1)
+ fun_l8_n974(x)
+ else
+ fun_l8_n204(x)
+ end
+end
+
+def fun_l7_n135(x)
+ if (x < 1)
+ fun_l8_n903(x)
+ else
+ fun_l8_n196(x)
+ end
+end
+
+def fun_l7_n136(x)
+ if (x < 1)
+ fun_l8_n59(x)
+ else
+ fun_l8_n505(x)
+ end
+end
+
+def fun_l7_n137(x)
+ if (x < 1)
+ fun_l8_n530(x)
+ else
+ fun_l8_n31(x)
+ end
+end
+
+def fun_l7_n138(x)
+ if (x < 1)
+ fun_l8_n990(x)
+ else
+ fun_l8_n924(x)
+ end
+end
+
+def fun_l7_n139(x)
+ if (x < 1)
+ fun_l8_n202(x)
+ else
+ fun_l8_n980(x)
+ end
+end
+
+def fun_l7_n140(x)
+ if (x < 1)
+ fun_l8_n354(x)
+ else
+ fun_l8_n694(x)
+ end
+end
+
+def fun_l7_n141(x)
+ if (x < 1)
+ fun_l8_n306(x)
+ else
+ fun_l8_n307(x)
+ end
+end
+
+def fun_l7_n142(x)
+ if (x < 1)
+ fun_l8_n433(x)
+ else
+ fun_l8_n902(x)
+ end
+end
+
+def fun_l7_n143(x)
+ if (x < 1)
+ fun_l8_n522(x)
+ else
+ fun_l8_n904(x)
+ end
+end
+
+def fun_l7_n144(x)
+ if (x < 1)
+ fun_l8_n86(x)
+ else
+ fun_l8_n11(x)
+ end
+end
+
+def fun_l7_n145(x)
+ if (x < 1)
+ fun_l8_n147(x)
+ else
+ fun_l8_n21(x)
+ end
+end
+
+def fun_l7_n146(x)
+ if (x < 1)
+ fun_l8_n47(x)
+ else
+ fun_l8_n855(x)
+ end
+end
+
+def fun_l7_n147(x)
+ if (x < 1)
+ fun_l8_n306(x)
+ else
+ fun_l8_n309(x)
+ end
+end
+
+def fun_l7_n148(x)
+ if (x < 1)
+ fun_l8_n757(x)
+ else
+ fun_l8_n83(x)
+ end
+end
+
+def fun_l7_n149(x)
+ if (x < 1)
+ fun_l8_n377(x)
+ else
+ fun_l8_n124(x)
+ end
+end
+
+def fun_l7_n150(x)
+ if (x < 1)
+ fun_l8_n276(x)
+ else
+ fun_l8_n838(x)
+ end
+end
+
+def fun_l7_n151(x)
+ if (x < 1)
+ fun_l8_n148(x)
+ else
+ fun_l8_n653(x)
+ end
+end
+
+def fun_l7_n152(x)
+ if (x < 1)
+ fun_l8_n315(x)
+ else
+ fun_l8_n942(x)
+ end
+end
+
+def fun_l7_n153(x)
+ if (x < 1)
+ fun_l8_n794(x)
+ else
+ fun_l8_n680(x)
+ end
+end
+
+def fun_l7_n154(x)
+ if (x < 1)
+ fun_l8_n508(x)
+ else
+ fun_l8_n427(x)
+ end
+end
+
+def fun_l7_n155(x)
+ if (x < 1)
+ fun_l8_n249(x)
+ else
+ fun_l8_n615(x)
+ end
+end
+
+def fun_l7_n156(x)
+ if (x < 1)
+ fun_l8_n339(x)
+ else
+ fun_l8_n559(x)
+ end
+end
+
+def fun_l7_n157(x)
+ if (x < 1)
+ fun_l8_n772(x)
+ else
+ fun_l8_n128(x)
+ end
+end
+
+def fun_l7_n158(x)
+ if (x < 1)
+ fun_l8_n247(x)
+ else
+ fun_l8_n962(x)
+ end
+end
+
+def fun_l7_n159(x)
+ if (x < 1)
+ fun_l8_n648(x)
+ else
+ fun_l8_n338(x)
+ end
+end
+
+def fun_l7_n160(x)
+ if (x < 1)
+ fun_l8_n104(x)
+ else
+ fun_l8_n757(x)
+ end
+end
+
+def fun_l7_n161(x)
+ if (x < 1)
+ fun_l8_n74(x)
+ else
+ fun_l8_n288(x)
+ end
+end
+
+def fun_l7_n162(x)
+ if (x < 1)
+ fun_l8_n849(x)
+ else
+ fun_l8_n982(x)
+ end
+end
+
+def fun_l7_n163(x)
+ if (x < 1)
+ fun_l8_n433(x)
+ else
+ fun_l8_n442(x)
+ end
+end
+
+def fun_l7_n164(x)
+ if (x < 1)
+ fun_l8_n268(x)
+ else
+ fun_l8_n717(x)
+ end
+end
+
+def fun_l7_n165(x)
+ if (x < 1)
+ fun_l8_n185(x)
+ else
+ fun_l8_n628(x)
+ end
+end
+
+def fun_l7_n166(x)
+ if (x < 1)
+ fun_l8_n207(x)
+ else
+ fun_l8_n830(x)
+ end
+end
+
+def fun_l7_n167(x)
+ if (x < 1)
+ fun_l8_n334(x)
+ else
+ fun_l8_n20(x)
+ end
+end
+
+def fun_l7_n168(x)
+ if (x < 1)
+ fun_l8_n835(x)
+ else
+ fun_l8_n493(x)
+ end
+end
+
+def fun_l7_n169(x)
+ if (x < 1)
+ fun_l8_n559(x)
+ else
+ fun_l8_n561(x)
+ end
+end
+
+def fun_l7_n170(x)
+ if (x < 1)
+ fun_l8_n942(x)
+ else
+ fun_l8_n699(x)
+ end
+end
+
+def fun_l7_n171(x)
+ if (x < 1)
+ fun_l8_n833(x)
+ else
+ fun_l8_n928(x)
+ end
+end
+
+def fun_l7_n172(x)
+ if (x < 1)
+ fun_l8_n564(x)
+ else
+ fun_l8_n972(x)
+ end
+end
+
+def fun_l7_n173(x)
+ if (x < 1)
+ fun_l8_n509(x)
+ else
+ fun_l8_n125(x)
+ end
+end
+
+def fun_l7_n174(x)
+ if (x < 1)
+ fun_l8_n643(x)
+ else
+ fun_l8_n813(x)
+ end
+end
+
+def fun_l7_n175(x)
+ if (x < 1)
+ fun_l8_n763(x)
+ else
+ fun_l8_n22(x)
+ end
+end
+
+def fun_l7_n176(x)
+ if (x < 1)
+ fun_l8_n955(x)
+ else
+ fun_l8_n579(x)
+ end
+end
+
+def fun_l7_n177(x)
+ if (x < 1)
+ fun_l8_n577(x)
+ else
+ fun_l8_n675(x)
+ end
+end
+
+def fun_l7_n178(x)
+ if (x < 1)
+ fun_l8_n867(x)
+ else
+ fun_l8_n119(x)
+ end
+end
+
+def fun_l7_n179(x)
+ if (x < 1)
+ fun_l8_n357(x)
+ else
+ fun_l8_n543(x)
+ end
+end
+
+def fun_l7_n180(x)
+ if (x < 1)
+ fun_l8_n67(x)
+ else
+ fun_l8_n643(x)
+ end
+end
+
+def fun_l7_n181(x)
+ if (x < 1)
+ fun_l8_n377(x)
+ else
+ fun_l8_n931(x)
+ end
+end
+
+def fun_l7_n182(x)
+ if (x < 1)
+ fun_l8_n962(x)
+ else
+ fun_l8_n220(x)
+ end
+end
+
+def fun_l7_n183(x)
+ if (x < 1)
+ fun_l8_n568(x)
+ else
+ fun_l8_n702(x)
+ end
+end
+
+def fun_l7_n184(x)
+ if (x < 1)
+ fun_l8_n877(x)
+ else
+ fun_l8_n376(x)
+ end
+end
+
+def fun_l7_n185(x)
+ if (x < 1)
+ fun_l8_n171(x)
+ else
+ fun_l8_n981(x)
+ end
+end
+
+def fun_l7_n186(x)
+ if (x < 1)
+ fun_l8_n283(x)
+ else
+ fun_l8_n81(x)
+ end
+end
+
+def fun_l7_n187(x)
+ if (x < 1)
+ fun_l8_n318(x)
+ else
+ fun_l8_n7(x)
+ end
+end
+
+def fun_l7_n188(x)
+ if (x < 1)
+ fun_l8_n874(x)
+ else
+ fun_l8_n879(x)
+ end
+end
+
+def fun_l7_n189(x)
+ if (x < 1)
+ fun_l8_n944(x)
+ else
+ fun_l8_n881(x)
+ end
+end
+
+def fun_l7_n190(x)
+ if (x < 1)
+ fun_l8_n462(x)
+ else
+ fun_l8_n570(x)
+ end
+end
+
+def fun_l7_n191(x)
+ if (x < 1)
+ fun_l8_n815(x)
+ else
+ fun_l8_n957(x)
+ end
+end
+
+def fun_l7_n192(x)
+ if (x < 1)
+ fun_l8_n217(x)
+ else
+ fun_l8_n93(x)
+ end
+end
+
+def fun_l7_n193(x)
+ if (x < 1)
+ fun_l8_n708(x)
+ else
+ fun_l8_n323(x)
+ end
+end
+
+def fun_l7_n194(x)
+ if (x < 1)
+ fun_l8_n420(x)
+ else
+ fun_l8_n375(x)
+ end
+end
+
+def fun_l7_n195(x)
+ if (x < 1)
+ fun_l8_n307(x)
+ else
+ fun_l8_n427(x)
+ end
+end
+
+def fun_l7_n196(x)
+ if (x < 1)
+ fun_l8_n200(x)
+ else
+ fun_l8_n265(x)
+ end
+end
+
+def fun_l7_n197(x)
+ if (x < 1)
+ fun_l8_n547(x)
+ else
+ fun_l8_n687(x)
+ end
+end
+
+def fun_l7_n198(x)
+ if (x < 1)
+ fun_l8_n693(x)
+ else
+ fun_l8_n420(x)
+ end
+end
+
+def fun_l7_n199(x)
+ if (x < 1)
+ fun_l8_n297(x)
+ else
+ fun_l8_n702(x)
+ end
+end
+
+def fun_l7_n200(x)
+ if (x < 1)
+ fun_l8_n418(x)
+ else
+ fun_l8_n758(x)
+ end
+end
+
+def fun_l7_n201(x)
+ if (x < 1)
+ fun_l8_n204(x)
+ else
+ fun_l8_n572(x)
+ end
+end
+
+def fun_l7_n202(x)
+ if (x < 1)
+ fun_l8_n847(x)
+ else
+ fun_l8_n440(x)
+ end
+end
+
+def fun_l7_n203(x)
+ if (x < 1)
+ fun_l8_n692(x)
+ else
+ fun_l8_n595(x)
+ end
+end
+
+def fun_l7_n204(x)
+ if (x < 1)
+ fun_l8_n922(x)
+ else
+ fun_l8_n618(x)
+ end
+end
+
+def fun_l7_n205(x)
+ if (x < 1)
+ fun_l8_n431(x)
+ else
+ fun_l8_n135(x)
+ end
+end
+
+def fun_l7_n206(x)
+ if (x < 1)
+ fun_l8_n92(x)
+ else
+ fun_l8_n370(x)
+ end
+end
+
+def fun_l7_n207(x)
+ if (x < 1)
+ fun_l8_n141(x)
+ else
+ fun_l8_n845(x)
+ end
+end
+
+def fun_l7_n208(x)
+ if (x < 1)
+ fun_l8_n552(x)
+ else
+ fun_l8_n917(x)
+ end
+end
+
+def fun_l7_n209(x)
+ if (x < 1)
+ fun_l8_n341(x)
+ else
+ fun_l8_n794(x)
+ end
+end
+
+def fun_l7_n210(x)
+ if (x < 1)
+ fun_l8_n772(x)
+ else
+ fun_l8_n515(x)
+ end
+end
+
+def fun_l7_n211(x)
+ if (x < 1)
+ fun_l8_n37(x)
+ else
+ fun_l8_n635(x)
+ end
+end
+
+def fun_l7_n212(x)
+ if (x < 1)
+ fun_l8_n164(x)
+ else
+ fun_l8_n506(x)
+ end
+end
+
+def fun_l7_n213(x)
+ if (x < 1)
+ fun_l8_n621(x)
+ else
+ fun_l8_n722(x)
+ end
+end
+
+def fun_l7_n214(x)
+ if (x < 1)
+ fun_l8_n475(x)
+ else
+ fun_l8_n854(x)
+ end
+end
+
+def fun_l7_n215(x)
+ if (x < 1)
+ fun_l8_n275(x)
+ else
+ fun_l8_n884(x)
+ end
+end
+
+def fun_l7_n216(x)
+ if (x < 1)
+ fun_l8_n894(x)
+ else
+ fun_l8_n424(x)
+ end
+end
+
+def fun_l7_n217(x)
+ if (x < 1)
+ fun_l8_n32(x)
+ else
+ fun_l8_n698(x)
+ end
+end
+
+def fun_l7_n218(x)
+ if (x < 1)
+ fun_l8_n707(x)
+ else
+ fun_l8_n728(x)
+ end
+end
+
+def fun_l7_n219(x)
+ if (x < 1)
+ fun_l8_n294(x)
+ else
+ fun_l8_n583(x)
+ end
+end
+
+def fun_l7_n220(x)
+ if (x < 1)
+ fun_l8_n577(x)
+ else
+ fun_l8_n881(x)
+ end
+end
+
+def fun_l7_n221(x)
+ if (x < 1)
+ fun_l8_n925(x)
+ else
+ fun_l8_n177(x)
+ end
+end
+
+def fun_l7_n222(x)
+ if (x < 1)
+ fun_l8_n495(x)
+ else
+ fun_l8_n407(x)
+ end
+end
+
+def fun_l7_n223(x)
+ if (x < 1)
+ fun_l8_n11(x)
+ else
+ fun_l8_n753(x)
+ end
+end
+
+def fun_l7_n224(x)
+ if (x < 1)
+ fun_l8_n366(x)
+ else
+ fun_l8_n546(x)
+ end
+end
+
+def fun_l7_n225(x)
+ if (x < 1)
+ fun_l8_n644(x)
+ else
+ fun_l8_n728(x)
+ end
+end
+
+def fun_l7_n226(x)
+ if (x < 1)
+ fun_l8_n731(x)
+ else
+ fun_l8_n292(x)
+ end
+end
+
+def fun_l7_n227(x)
+ if (x < 1)
+ fun_l8_n653(x)
+ else
+ fun_l8_n521(x)
+ end
+end
+
+def fun_l7_n228(x)
+ if (x < 1)
+ fun_l8_n479(x)
+ else
+ fun_l8_n329(x)
+ end
+end
+
+def fun_l7_n229(x)
+ if (x < 1)
+ fun_l8_n151(x)
+ else
+ fun_l8_n620(x)
+ end
+end
+
+def fun_l7_n230(x)
+ if (x < 1)
+ fun_l8_n58(x)
+ else
+ fun_l8_n369(x)
+ end
+end
+
+def fun_l7_n231(x)
+ if (x < 1)
+ fun_l8_n301(x)
+ else
+ fun_l8_n869(x)
+ end
+end
+
+def fun_l7_n232(x)
+ if (x < 1)
+ fun_l8_n810(x)
+ else
+ fun_l8_n376(x)
+ end
+end
+
+def fun_l7_n233(x)
+ if (x < 1)
+ fun_l8_n19(x)
+ else
+ fun_l8_n278(x)
+ end
+end
+
+def fun_l7_n234(x)
+ if (x < 1)
+ fun_l8_n509(x)
+ else
+ fun_l8_n826(x)
+ end
+end
+
+def fun_l7_n235(x)
+ if (x < 1)
+ fun_l8_n302(x)
+ else
+ fun_l8_n62(x)
+ end
+end
+
+def fun_l7_n236(x)
+ if (x < 1)
+ fun_l8_n289(x)
+ else
+ fun_l8_n935(x)
+ end
+end
+
+def fun_l7_n237(x)
+ if (x < 1)
+ fun_l8_n217(x)
+ else
+ fun_l8_n652(x)
+ end
+end
+
+def fun_l7_n238(x)
+ if (x < 1)
+ fun_l8_n234(x)
+ else
+ fun_l8_n720(x)
+ end
+end
+
+def fun_l7_n239(x)
+ if (x < 1)
+ fun_l8_n158(x)
+ else
+ fun_l8_n409(x)
+ end
+end
+
+def fun_l7_n240(x)
+ if (x < 1)
+ fun_l8_n478(x)
+ else
+ fun_l8_n855(x)
+ end
+end
+
+def fun_l7_n241(x)
+ if (x < 1)
+ fun_l8_n115(x)
+ else
+ fun_l8_n406(x)
+ end
+end
+
+def fun_l7_n242(x)
+ if (x < 1)
+ fun_l8_n278(x)
+ else
+ fun_l8_n12(x)
+ end
+end
+
+def fun_l7_n243(x)
+ if (x < 1)
+ fun_l8_n273(x)
+ else
+ fun_l8_n243(x)
+ end
+end
+
+def fun_l7_n244(x)
+ if (x < 1)
+ fun_l8_n909(x)
+ else
+ fun_l8_n721(x)
+ end
+end
+
+def fun_l7_n245(x)
+ if (x < 1)
+ fun_l8_n415(x)
+ else
+ fun_l8_n530(x)
+ end
+end
+
+def fun_l7_n246(x)
+ if (x < 1)
+ fun_l8_n770(x)
+ else
+ fun_l8_n413(x)
+ end
+end
+
+def fun_l7_n247(x)
+ if (x < 1)
+ fun_l8_n228(x)
+ else
+ fun_l8_n668(x)
+ end
+end
+
+def fun_l7_n248(x)
+ if (x < 1)
+ fun_l8_n174(x)
+ else
+ fun_l8_n559(x)
+ end
+end
+
+def fun_l7_n249(x)
+ if (x < 1)
+ fun_l8_n740(x)
+ else
+ fun_l8_n314(x)
+ end
+end
+
+def fun_l7_n250(x)
+ if (x < 1)
+ fun_l8_n293(x)
+ else
+ fun_l8_n835(x)
+ end
+end
+
+def fun_l7_n251(x)
+ if (x < 1)
+ fun_l8_n584(x)
+ else
+ fun_l8_n755(x)
+ end
+end
+
+def fun_l7_n252(x)
+ if (x < 1)
+ fun_l8_n792(x)
+ else
+ fun_l8_n135(x)
+ end
+end
+
+def fun_l7_n253(x)
+ if (x < 1)
+ fun_l8_n116(x)
+ else
+ fun_l8_n82(x)
+ end
+end
+
+def fun_l7_n254(x)
+ if (x < 1)
+ fun_l8_n478(x)
+ else
+ fun_l8_n809(x)
+ end
+end
+
+def fun_l7_n255(x)
+ if (x < 1)
+ fun_l8_n924(x)
+ else
+ fun_l8_n708(x)
+ end
+end
+
+def fun_l7_n256(x)
+ if (x < 1)
+ fun_l8_n345(x)
+ else
+ fun_l8_n669(x)
+ end
+end
+
+def fun_l7_n257(x)
+ if (x < 1)
+ fun_l8_n300(x)
+ else
+ fun_l8_n166(x)
+ end
+end
+
+def fun_l7_n258(x)
+ if (x < 1)
+ fun_l8_n979(x)
+ else
+ fun_l8_n894(x)
+ end
+end
+
+def fun_l7_n259(x)
+ if (x < 1)
+ fun_l8_n765(x)
+ else
+ fun_l8_n838(x)
+ end
+end
+
+def fun_l7_n260(x)
+ if (x < 1)
+ fun_l8_n812(x)
+ else
+ fun_l8_n472(x)
+ end
+end
+
+def fun_l7_n261(x)
+ if (x < 1)
+ fun_l8_n220(x)
+ else
+ fun_l8_n106(x)
+ end
+end
+
+def fun_l7_n262(x)
+ if (x < 1)
+ fun_l8_n727(x)
+ else
+ fun_l8_n783(x)
+ end
+end
+
+def fun_l7_n263(x)
+ if (x < 1)
+ fun_l8_n560(x)
+ else
+ fun_l8_n781(x)
+ end
+end
+
+def fun_l7_n264(x)
+ if (x < 1)
+ fun_l8_n709(x)
+ else
+ fun_l8_n33(x)
+ end
+end
+
+def fun_l7_n265(x)
+ if (x < 1)
+ fun_l8_n904(x)
+ else
+ fun_l8_n64(x)
+ end
+end
+
+def fun_l7_n266(x)
+ if (x < 1)
+ fun_l8_n944(x)
+ else
+ fun_l8_n652(x)
+ end
+end
+
+def fun_l7_n267(x)
+ if (x < 1)
+ fun_l8_n154(x)
+ else
+ fun_l8_n103(x)
+ end
+end
+
+def fun_l7_n268(x)
+ if (x < 1)
+ fun_l8_n55(x)
+ else
+ fun_l8_n841(x)
+ end
+end
+
+def fun_l7_n269(x)
+ if (x < 1)
+ fun_l8_n914(x)
+ else
+ fun_l8_n108(x)
+ end
+end
+
+def fun_l7_n270(x)
+ if (x < 1)
+ fun_l8_n733(x)
+ else
+ fun_l8_n398(x)
+ end
+end
+
+def fun_l7_n271(x)
+ if (x < 1)
+ fun_l8_n145(x)
+ else
+ fun_l8_n735(x)
+ end
+end
+
+def fun_l7_n272(x)
+ if (x < 1)
+ fun_l8_n404(x)
+ else
+ fun_l8_n216(x)
+ end
+end
+
+def fun_l7_n273(x)
+ if (x < 1)
+ fun_l8_n380(x)
+ else
+ fun_l8_n798(x)
+ end
+end
+
+def fun_l7_n274(x)
+ if (x < 1)
+ fun_l8_n63(x)
+ else
+ fun_l8_n133(x)
+ end
+end
+
+def fun_l7_n275(x)
+ if (x < 1)
+ fun_l8_n878(x)
+ else
+ fun_l8_n284(x)
+ end
+end
+
+def fun_l7_n276(x)
+ if (x < 1)
+ fun_l8_n718(x)
+ else
+ fun_l8_n97(x)
+ end
+end
+
+def fun_l7_n277(x)
+ if (x < 1)
+ fun_l8_n509(x)
+ else
+ fun_l8_n695(x)
+ end
+end
+
+def fun_l7_n278(x)
+ if (x < 1)
+ fun_l8_n581(x)
+ else
+ fun_l8_n898(x)
+ end
+end
+
+def fun_l7_n279(x)
+ if (x < 1)
+ fun_l8_n247(x)
+ else
+ fun_l8_n526(x)
+ end
+end
+
+def fun_l7_n280(x)
+ if (x < 1)
+ fun_l8_n995(x)
+ else
+ fun_l8_n614(x)
+ end
+end
+
+def fun_l7_n281(x)
+ if (x < 1)
+ fun_l8_n301(x)
+ else
+ fun_l8_n178(x)
+ end
+end
+
+def fun_l7_n282(x)
+ if (x < 1)
+ fun_l8_n575(x)
+ else
+ fun_l8_n712(x)
+ end
+end
+
+def fun_l7_n283(x)
+ if (x < 1)
+ fun_l8_n261(x)
+ else
+ fun_l8_n740(x)
+ end
+end
+
+def fun_l7_n284(x)
+ if (x < 1)
+ fun_l8_n284(x)
+ else
+ fun_l8_n350(x)
+ end
+end
+
+def fun_l7_n285(x)
+ if (x < 1)
+ fun_l8_n521(x)
+ else
+ fun_l8_n305(x)
+ end
+end
+
+def fun_l7_n286(x)
+ if (x < 1)
+ fun_l8_n271(x)
+ else
+ fun_l8_n728(x)
+ end
+end
+
+def fun_l7_n287(x)
+ if (x < 1)
+ fun_l8_n292(x)
+ else
+ fun_l8_n799(x)
+ end
+end
+
+def fun_l7_n288(x)
+ if (x < 1)
+ fun_l8_n852(x)
+ else
+ fun_l8_n606(x)
+ end
+end
+
+def fun_l7_n289(x)
+ if (x < 1)
+ fun_l8_n177(x)
+ else
+ fun_l8_n649(x)
+ end
+end
+
+def fun_l7_n290(x)
+ if (x < 1)
+ fun_l8_n488(x)
+ else
+ fun_l8_n252(x)
+ end
+end
+
+def fun_l7_n291(x)
+ if (x < 1)
+ fun_l8_n815(x)
+ else
+ fun_l8_n690(x)
+ end
+end
+
+def fun_l7_n292(x)
+ if (x < 1)
+ fun_l8_n999(x)
+ else
+ fun_l8_n700(x)
+ end
+end
+
+def fun_l7_n293(x)
+ if (x < 1)
+ fun_l8_n9(x)
+ else
+ fun_l8_n194(x)
+ end
+end
+
+def fun_l7_n294(x)
+ if (x < 1)
+ fun_l8_n390(x)
+ else
+ fun_l8_n772(x)
+ end
+end
+
+def fun_l7_n295(x)
+ if (x < 1)
+ fun_l8_n487(x)
+ else
+ fun_l8_n587(x)
+ end
+end
+
+def fun_l7_n296(x)
+ if (x < 1)
+ fun_l8_n892(x)
+ else
+ fun_l8_n126(x)
+ end
+end
+
+def fun_l7_n297(x)
+ if (x < 1)
+ fun_l8_n199(x)
+ else
+ fun_l8_n312(x)
+ end
+end
+
+def fun_l7_n298(x)
+ if (x < 1)
+ fun_l8_n534(x)
+ else
+ fun_l8_n640(x)
+ end
+end
+
+def fun_l7_n299(x)
+ if (x < 1)
+ fun_l8_n344(x)
+ else
+ fun_l8_n58(x)
+ end
+end
+
+def fun_l7_n300(x)
+ if (x < 1)
+ fun_l8_n11(x)
+ else
+ fun_l8_n922(x)
+ end
+end
+
+def fun_l7_n301(x)
+ if (x < 1)
+ fun_l8_n848(x)
+ else
+ fun_l8_n432(x)
+ end
+end
+
+def fun_l7_n302(x)
+ if (x < 1)
+ fun_l8_n292(x)
+ else
+ fun_l8_n86(x)
+ end
+end
+
+def fun_l7_n303(x)
+ if (x < 1)
+ fun_l8_n239(x)
+ else
+ fun_l8_n296(x)
+ end
+end
+
+def fun_l7_n304(x)
+ if (x < 1)
+ fun_l8_n350(x)
+ else
+ fun_l8_n769(x)
+ end
+end
+
+def fun_l7_n305(x)
+ if (x < 1)
+ fun_l8_n959(x)
+ else
+ fun_l8_n292(x)
+ end
+end
+
+def fun_l7_n306(x)
+ if (x < 1)
+ fun_l8_n371(x)
+ else
+ fun_l8_n507(x)
+ end
+end
+
+def fun_l7_n307(x)
+ if (x < 1)
+ fun_l8_n910(x)
+ else
+ fun_l8_n542(x)
+ end
+end
+
+def fun_l7_n308(x)
+ if (x < 1)
+ fun_l8_n299(x)
+ else
+ fun_l8_n543(x)
+ end
+end
+
+def fun_l7_n309(x)
+ if (x < 1)
+ fun_l8_n145(x)
+ else
+ fun_l8_n393(x)
+ end
+end
+
+def fun_l7_n310(x)
+ if (x < 1)
+ fun_l8_n1(x)
+ else
+ fun_l8_n810(x)
+ end
+end
+
+def fun_l7_n311(x)
+ if (x < 1)
+ fun_l8_n866(x)
+ else
+ fun_l8_n519(x)
+ end
+end
+
+def fun_l7_n312(x)
+ if (x < 1)
+ fun_l8_n730(x)
+ else
+ fun_l8_n912(x)
+ end
+end
+
+def fun_l7_n313(x)
+ if (x < 1)
+ fun_l8_n128(x)
+ else
+ fun_l8_n3(x)
+ end
+end
+
+def fun_l7_n314(x)
+ if (x < 1)
+ fun_l8_n393(x)
+ else
+ fun_l8_n887(x)
+ end
+end
+
+def fun_l7_n315(x)
+ if (x < 1)
+ fun_l8_n165(x)
+ else
+ fun_l8_n540(x)
+ end
+end
+
+def fun_l7_n316(x)
+ if (x < 1)
+ fun_l8_n641(x)
+ else
+ fun_l8_n255(x)
+ end
+end
+
+def fun_l7_n317(x)
+ if (x < 1)
+ fun_l8_n265(x)
+ else
+ fun_l8_n754(x)
+ end
+end
+
+def fun_l7_n318(x)
+ if (x < 1)
+ fun_l8_n433(x)
+ else
+ fun_l8_n163(x)
+ end
+end
+
+def fun_l7_n319(x)
+ if (x < 1)
+ fun_l8_n409(x)
+ else
+ fun_l8_n110(x)
+ end
+end
+
+def fun_l7_n320(x)
+ if (x < 1)
+ fun_l8_n704(x)
+ else
+ fun_l8_n959(x)
+ end
+end
+
+def fun_l7_n321(x)
+ if (x < 1)
+ fun_l8_n334(x)
+ else
+ fun_l8_n280(x)
+ end
+end
+
+def fun_l7_n322(x)
+ if (x < 1)
+ fun_l8_n107(x)
+ else
+ fun_l8_n403(x)
+ end
+end
+
+def fun_l7_n323(x)
+ if (x < 1)
+ fun_l8_n108(x)
+ else
+ fun_l8_n426(x)
+ end
+end
+
+def fun_l7_n324(x)
+ if (x < 1)
+ fun_l8_n310(x)
+ else
+ fun_l8_n968(x)
+ end
+end
+
+def fun_l7_n325(x)
+ if (x < 1)
+ fun_l8_n600(x)
+ else
+ fun_l8_n850(x)
+ end
+end
+
+def fun_l7_n326(x)
+ if (x < 1)
+ fun_l8_n736(x)
+ else
+ fun_l8_n61(x)
+ end
+end
+
+def fun_l7_n327(x)
+ if (x < 1)
+ fun_l8_n86(x)
+ else
+ fun_l8_n948(x)
+ end
+end
+
+def fun_l7_n328(x)
+ if (x < 1)
+ fun_l8_n625(x)
+ else
+ fun_l8_n644(x)
+ end
+end
+
+def fun_l7_n329(x)
+ if (x < 1)
+ fun_l8_n507(x)
+ else
+ fun_l8_n624(x)
+ end
+end
+
+def fun_l7_n330(x)
+ if (x < 1)
+ fun_l8_n956(x)
+ else
+ fun_l8_n281(x)
+ end
+end
+
+def fun_l7_n331(x)
+ if (x < 1)
+ fun_l8_n72(x)
+ else
+ fun_l8_n695(x)
+ end
+end
+
+def fun_l7_n332(x)
+ if (x < 1)
+ fun_l8_n445(x)
+ else
+ fun_l8_n465(x)
+ end
+end
+
+def fun_l7_n333(x)
+ if (x < 1)
+ fun_l8_n438(x)
+ else
+ fun_l8_n698(x)
+ end
+end
+
+def fun_l7_n334(x)
+ if (x < 1)
+ fun_l8_n242(x)
+ else
+ fun_l8_n837(x)
+ end
+end
+
+def fun_l7_n335(x)
+ if (x < 1)
+ fun_l8_n299(x)
+ else
+ fun_l8_n312(x)
+ end
+end
+
+def fun_l7_n336(x)
+ if (x < 1)
+ fun_l8_n264(x)
+ else
+ fun_l8_n330(x)
+ end
+end
+
+def fun_l7_n337(x)
+ if (x < 1)
+ fun_l8_n251(x)
+ else
+ fun_l8_n459(x)
+ end
+end
+
+def fun_l7_n338(x)
+ if (x < 1)
+ fun_l8_n876(x)
+ else
+ fun_l8_n689(x)
+ end
+end
+
+def fun_l7_n339(x)
+ if (x < 1)
+ fun_l8_n547(x)
+ else
+ fun_l8_n745(x)
+ end
+end
+
+def fun_l7_n340(x)
+ if (x < 1)
+ fun_l8_n493(x)
+ else
+ fun_l8_n877(x)
+ end
+end
+
+def fun_l7_n341(x)
+ if (x < 1)
+ fun_l8_n143(x)
+ else
+ fun_l8_n429(x)
+ end
+end
+
+def fun_l7_n342(x)
+ if (x < 1)
+ fun_l8_n650(x)
+ else
+ fun_l8_n384(x)
+ end
+end
+
+def fun_l7_n343(x)
+ if (x < 1)
+ fun_l8_n897(x)
+ else
+ fun_l8_n980(x)
+ end
+end
+
+def fun_l7_n344(x)
+ if (x < 1)
+ fun_l8_n699(x)
+ else
+ fun_l8_n673(x)
+ end
+end
+
+def fun_l7_n345(x)
+ if (x < 1)
+ fun_l8_n25(x)
+ else
+ fun_l8_n342(x)
+ end
+end
+
+def fun_l7_n346(x)
+ if (x < 1)
+ fun_l8_n345(x)
+ else
+ fun_l8_n140(x)
+ end
+end
+
+def fun_l7_n347(x)
+ if (x < 1)
+ fun_l8_n661(x)
+ else
+ fun_l8_n153(x)
+ end
+end
+
+def fun_l7_n348(x)
+ if (x < 1)
+ fun_l8_n477(x)
+ else
+ fun_l8_n460(x)
+ end
+end
+
+def fun_l7_n349(x)
+ if (x < 1)
+ fun_l8_n641(x)
+ else
+ fun_l8_n941(x)
+ end
+end
+
+def fun_l7_n350(x)
+ if (x < 1)
+ fun_l8_n952(x)
+ else
+ fun_l8_n886(x)
+ end
+end
+
+def fun_l7_n351(x)
+ if (x < 1)
+ fun_l8_n338(x)
+ else
+ fun_l8_n655(x)
+ end
+end
+
+def fun_l7_n352(x)
+ if (x < 1)
+ fun_l8_n717(x)
+ else
+ fun_l8_n314(x)
+ end
+end
+
+def fun_l7_n353(x)
+ if (x < 1)
+ fun_l8_n645(x)
+ else
+ fun_l8_n464(x)
+ end
+end
+
+def fun_l7_n354(x)
+ if (x < 1)
+ fun_l8_n114(x)
+ else
+ fun_l8_n816(x)
+ end
+end
+
+def fun_l7_n355(x)
+ if (x < 1)
+ fun_l8_n18(x)
+ else
+ fun_l8_n173(x)
+ end
+end
+
+def fun_l7_n356(x)
+ if (x < 1)
+ fun_l8_n214(x)
+ else
+ fun_l8_n834(x)
+ end
+end
+
+def fun_l7_n357(x)
+ if (x < 1)
+ fun_l8_n6(x)
+ else
+ fun_l8_n161(x)
+ end
+end
+
+def fun_l7_n358(x)
+ if (x < 1)
+ fun_l8_n823(x)
+ else
+ fun_l8_n695(x)
+ end
+end
+
+def fun_l7_n359(x)
+ if (x < 1)
+ fun_l8_n86(x)
+ else
+ fun_l8_n174(x)
+ end
+end
+
+def fun_l7_n360(x)
+ if (x < 1)
+ fun_l8_n904(x)
+ else
+ fun_l8_n240(x)
+ end
+end
+
+def fun_l7_n361(x)
+ if (x < 1)
+ fun_l8_n857(x)
+ else
+ fun_l8_n494(x)
+ end
+end
+
+def fun_l7_n362(x)
+ if (x < 1)
+ fun_l8_n780(x)
+ else
+ fun_l8_n186(x)
+ end
+end
+
+def fun_l7_n363(x)
+ if (x < 1)
+ fun_l8_n575(x)
+ else
+ fun_l8_n510(x)
+ end
+end
+
+def fun_l7_n364(x)
+ if (x < 1)
+ fun_l8_n412(x)
+ else
+ fun_l8_n153(x)
+ end
+end
+
+def fun_l7_n365(x)
+ if (x < 1)
+ fun_l8_n840(x)
+ else
+ fun_l8_n199(x)
+ end
+end
+
+def fun_l7_n366(x)
+ if (x < 1)
+ fun_l8_n162(x)
+ else
+ fun_l8_n971(x)
+ end
+end
+
+def fun_l7_n367(x)
+ if (x < 1)
+ fun_l8_n200(x)
+ else
+ fun_l8_n128(x)
+ end
+end
+
+def fun_l7_n368(x)
+ if (x < 1)
+ fun_l8_n404(x)
+ else
+ fun_l8_n657(x)
+ end
+end
+
+def fun_l7_n369(x)
+ if (x < 1)
+ fun_l8_n631(x)
+ else
+ fun_l8_n714(x)
+ end
+end
+
+def fun_l7_n370(x)
+ if (x < 1)
+ fun_l8_n177(x)
+ else
+ fun_l8_n718(x)
+ end
+end
+
+def fun_l7_n371(x)
+ if (x < 1)
+ fun_l8_n284(x)
+ else
+ fun_l8_n474(x)
+ end
+end
+
+def fun_l7_n372(x)
+ if (x < 1)
+ fun_l8_n138(x)
+ else
+ fun_l8_n461(x)
+ end
+end
+
+def fun_l7_n373(x)
+ if (x < 1)
+ fun_l8_n78(x)
+ else
+ fun_l8_n750(x)
+ end
+end
+
+def fun_l7_n374(x)
+ if (x < 1)
+ fun_l8_n231(x)
+ else
+ fun_l8_n648(x)
+ end
+end
+
+def fun_l7_n375(x)
+ if (x < 1)
+ fun_l8_n238(x)
+ else
+ fun_l8_n644(x)
+ end
+end
+
+def fun_l7_n376(x)
+ if (x < 1)
+ fun_l8_n509(x)
+ else
+ fun_l8_n354(x)
+ end
+end
+
+def fun_l7_n377(x)
+ if (x < 1)
+ fun_l8_n332(x)
+ else
+ fun_l8_n739(x)
+ end
+end
+
+def fun_l7_n378(x)
+ if (x < 1)
+ fun_l8_n76(x)
+ else
+ fun_l8_n505(x)
+ end
+end
+
+def fun_l7_n379(x)
+ if (x < 1)
+ fun_l8_n781(x)
+ else
+ fun_l8_n918(x)
+ end
+end
+
+def fun_l7_n380(x)
+ if (x < 1)
+ fun_l8_n854(x)
+ else
+ fun_l8_n85(x)
+ end
+end
+
+def fun_l7_n381(x)
+ if (x < 1)
+ fun_l8_n67(x)
+ else
+ fun_l8_n690(x)
+ end
+end
+
+def fun_l7_n382(x)
+ if (x < 1)
+ fun_l8_n425(x)
+ else
+ fun_l8_n643(x)
+ end
+end
+
+def fun_l7_n383(x)
+ if (x < 1)
+ fun_l8_n210(x)
+ else
+ fun_l8_n533(x)
+ end
+end
+
+def fun_l7_n384(x)
+ if (x < 1)
+ fun_l8_n469(x)
+ else
+ fun_l8_n274(x)
+ end
+end
+
+def fun_l7_n385(x)
+ if (x < 1)
+ fun_l8_n398(x)
+ else
+ fun_l8_n583(x)
+ end
+end
+
+def fun_l7_n386(x)
+ if (x < 1)
+ fun_l8_n483(x)
+ else
+ fun_l8_n80(x)
+ end
+end
+
+def fun_l7_n387(x)
+ if (x < 1)
+ fun_l8_n557(x)
+ else
+ fun_l8_n165(x)
+ end
+end
+
+def fun_l7_n388(x)
+ if (x < 1)
+ fun_l8_n335(x)
+ else
+ fun_l8_n656(x)
+ end
+end
+
+def fun_l7_n389(x)
+ if (x < 1)
+ fun_l8_n810(x)
+ else
+ fun_l8_n586(x)
+ end
+end
+
+def fun_l7_n390(x)
+ if (x < 1)
+ fun_l8_n90(x)
+ else
+ fun_l8_n788(x)
+ end
+end
+
+def fun_l7_n391(x)
+ if (x < 1)
+ fun_l8_n709(x)
+ else
+ fun_l8_n948(x)
+ end
+end
+
+def fun_l7_n392(x)
+ if (x < 1)
+ fun_l8_n139(x)
+ else
+ fun_l8_n631(x)
+ end
+end
+
+def fun_l7_n393(x)
+ if (x < 1)
+ fun_l8_n341(x)
+ else
+ fun_l8_n506(x)
+ end
+end
+
+def fun_l7_n394(x)
+ if (x < 1)
+ fun_l8_n435(x)
+ else
+ fun_l8_n178(x)
+ end
+end
+
+def fun_l7_n395(x)
+ if (x < 1)
+ fun_l8_n186(x)
+ else
+ fun_l8_n58(x)
+ end
+end
+
+def fun_l7_n396(x)
+ if (x < 1)
+ fun_l8_n512(x)
+ else
+ fun_l8_n87(x)
+ end
+end
+
+def fun_l7_n397(x)
+ if (x < 1)
+ fun_l8_n763(x)
+ else
+ fun_l8_n220(x)
+ end
+end
+
+def fun_l7_n398(x)
+ if (x < 1)
+ fun_l8_n266(x)
+ else
+ fun_l8_n231(x)
+ end
+end
+
+def fun_l7_n399(x)
+ if (x < 1)
+ fun_l8_n308(x)
+ else
+ fun_l8_n512(x)
+ end
+end
+
+def fun_l7_n400(x)
+ if (x < 1)
+ fun_l8_n26(x)
+ else
+ fun_l8_n228(x)
+ end
+end
+
+def fun_l7_n401(x)
+ if (x < 1)
+ fun_l8_n143(x)
+ else
+ fun_l8_n826(x)
+ end
+end
+
+def fun_l7_n402(x)
+ if (x < 1)
+ fun_l8_n893(x)
+ else
+ fun_l8_n334(x)
+ end
+end
+
+def fun_l7_n403(x)
+ if (x < 1)
+ fun_l8_n9(x)
+ else
+ fun_l8_n867(x)
+ end
+end
+
+def fun_l7_n404(x)
+ if (x < 1)
+ fun_l8_n85(x)
+ else
+ fun_l8_n279(x)
+ end
+end
+
+def fun_l7_n405(x)
+ if (x < 1)
+ fun_l8_n30(x)
+ else
+ fun_l8_n85(x)
+ end
+end
+
+def fun_l7_n406(x)
+ if (x < 1)
+ fun_l8_n38(x)
+ else
+ fun_l8_n979(x)
+ end
+end
+
+def fun_l7_n407(x)
+ if (x < 1)
+ fun_l8_n630(x)
+ else
+ fun_l8_n262(x)
+ end
+end
+
+def fun_l7_n408(x)
+ if (x < 1)
+ fun_l8_n430(x)
+ else
+ fun_l8_n178(x)
+ end
+end
+
+def fun_l7_n409(x)
+ if (x < 1)
+ fun_l8_n131(x)
+ else
+ fun_l8_n66(x)
+ end
+end
+
+def fun_l7_n410(x)
+ if (x < 1)
+ fun_l8_n373(x)
+ else
+ fun_l8_n514(x)
+ end
+end
+
+def fun_l7_n411(x)
+ if (x < 1)
+ fun_l8_n334(x)
+ else
+ fun_l8_n591(x)
+ end
+end
+
+def fun_l7_n412(x)
+ if (x < 1)
+ fun_l8_n619(x)
+ else
+ fun_l8_n490(x)
+ end
+end
+
+def fun_l7_n413(x)
+ if (x < 1)
+ fun_l8_n318(x)
+ else
+ fun_l8_n456(x)
+ end
+end
+
+def fun_l7_n414(x)
+ if (x < 1)
+ fun_l8_n576(x)
+ else
+ fun_l8_n638(x)
+ end
+end
+
+def fun_l7_n415(x)
+ if (x < 1)
+ fun_l8_n407(x)
+ else
+ fun_l8_n44(x)
+ end
+end
+
+def fun_l7_n416(x)
+ if (x < 1)
+ fun_l8_n970(x)
+ else
+ fun_l8_n564(x)
+ end
+end
+
+def fun_l7_n417(x)
+ if (x < 1)
+ fun_l8_n163(x)
+ else
+ fun_l8_n533(x)
+ end
+end
+
+def fun_l7_n418(x)
+ if (x < 1)
+ fun_l8_n216(x)
+ else
+ fun_l8_n604(x)
+ end
+end
+
+def fun_l7_n419(x)
+ if (x < 1)
+ fun_l8_n505(x)
+ else
+ fun_l8_n620(x)
+ end
+end
+
+def fun_l7_n420(x)
+ if (x < 1)
+ fun_l8_n746(x)
+ else
+ fun_l8_n998(x)
+ end
+end
+
+def fun_l7_n421(x)
+ if (x < 1)
+ fun_l8_n266(x)
+ else
+ fun_l8_n762(x)
+ end
+end
+
+def fun_l7_n422(x)
+ if (x < 1)
+ fun_l8_n216(x)
+ else
+ fun_l8_n995(x)
+ end
+end
+
+def fun_l7_n423(x)
+ if (x < 1)
+ fun_l8_n824(x)
+ else
+ fun_l8_n349(x)
+ end
+end
+
+def fun_l7_n424(x)
+ if (x < 1)
+ fun_l8_n177(x)
+ else
+ fun_l8_n61(x)
+ end
+end
+
+def fun_l7_n425(x)
+ if (x < 1)
+ fun_l8_n286(x)
+ else
+ fun_l8_n213(x)
+ end
+end
+
+def fun_l7_n426(x)
+ if (x < 1)
+ fun_l8_n794(x)
+ else
+ fun_l8_n428(x)
+ end
+end
+
+def fun_l7_n427(x)
+ if (x < 1)
+ fun_l8_n404(x)
+ else
+ fun_l8_n202(x)
+ end
+end
+
+def fun_l7_n428(x)
+ if (x < 1)
+ fun_l8_n571(x)
+ else
+ fun_l8_n812(x)
+ end
+end
+
+def fun_l7_n429(x)
+ if (x < 1)
+ fun_l8_n165(x)
+ else
+ fun_l8_n277(x)
+ end
+end
+
+def fun_l7_n430(x)
+ if (x < 1)
+ fun_l8_n138(x)
+ else
+ fun_l8_n230(x)
+ end
+end
+
+def fun_l7_n431(x)
+ if (x < 1)
+ fun_l8_n832(x)
+ else
+ fun_l8_n78(x)
+ end
+end
+
+def fun_l7_n432(x)
+ if (x < 1)
+ fun_l8_n866(x)
+ else
+ fun_l8_n137(x)
+ end
+end
+
+def fun_l7_n433(x)
+ if (x < 1)
+ fun_l8_n92(x)
+ else
+ fun_l8_n638(x)
+ end
+end
+
+def fun_l7_n434(x)
+ if (x < 1)
+ fun_l8_n788(x)
+ else
+ fun_l8_n991(x)
+ end
+end
+
+def fun_l7_n435(x)
+ if (x < 1)
+ fun_l8_n584(x)
+ else
+ fun_l8_n783(x)
+ end
+end
+
+def fun_l7_n436(x)
+ if (x < 1)
+ fun_l8_n786(x)
+ else
+ fun_l8_n771(x)
+ end
+end
+
+def fun_l7_n437(x)
+ if (x < 1)
+ fun_l8_n650(x)
+ else
+ fun_l8_n684(x)
+ end
+end
+
+def fun_l7_n438(x)
+ if (x < 1)
+ fun_l8_n964(x)
+ else
+ fun_l8_n738(x)
+ end
+end
+
+def fun_l7_n439(x)
+ if (x < 1)
+ fun_l8_n356(x)
+ else
+ fun_l8_n979(x)
+ end
+end
+
+def fun_l7_n440(x)
+ if (x < 1)
+ fun_l8_n630(x)
+ else
+ fun_l8_n79(x)
+ end
+end
+
+def fun_l7_n441(x)
+ if (x < 1)
+ fun_l8_n326(x)
+ else
+ fun_l8_n667(x)
+ end
+end
+
+def fun_l7_n442(x)
+ if (x < 1)
+ fun_l8_n561(x)
+ else
+ fun_l8_n263(x)
+ end
+end
+
+def fun_l7_n443(x)
+ if (x < 1)
+ fun_l8_n62(x)
+ else
+ fun_l8_n528(x)
+ end
+end
+
+def fun_l7_n444(x)
+ if (x < 1)
+ fun_l8_n156(x)
+ else
+ fun_l8_n496(x)
+ end
+end
+
+def fun_l7_n445(x)
+ if (x < 1)
+ fun_l8_n971(x)
+ else
+ fun_l8_n853(x)
+ end
+end
+
+def fun_l7_n446(x)
+ if (x < 1)
+ fun_l8_n217(x)
+ else
+ fun_l8_n985(x)
+ end
+end
+
+def fun_l7_n447(x)
+ if (x < 1)
+ fun_l8_n569(x)
+ else
+ fun_l8_n320(x)
+ end
+end
+
+def fun_l7_n448(x)
+ if (x < 1)
+ fun_l8_n133(x)
+ else
+ fun_l8_n417(x)
+ end
+end
+
+def fun_l7_n449(x)
+ if (x < 1)
+ fun_l8_n369(x)
+ else
+ fun_l8_n25(x)
+ end
+end
+
+def fun_l7_n450(x)
+ if (x < 1)
+ fun_l8_n735(x)
+ else
+ fun_l8_n573(x)
+ end
+end
+
+def fun_l7_n451(x)
+ if (x < 1)
+ fun_l8_n764(x)
+ else
+ fun_l8_n247(x)
+ end
+end
+
+def fun_l7_n452(x)
+ if (x < 1)
+ fun_l8_n512(x)
+ else
+ fun_l8_n274(x)
+ end
+end
+
+def fun_l7_n453(x)
+ if (x < 1)
+ fun_l8_n388(x)
+ else
+ fun_l8_n438(x)
+ end
+end
+
+def fun_l7_n454(x)
+ if (x < 1)
+ fun_l8_n565(x)
+ else
+ fun_l8_n449(x)
+ end
+end
+
+def fun_l7_n455(x)
+ if (x < 1)
+ fun_l8_n771(x)
+ else
+ fun_l8_n827(x)
+ end
+end
+
+def fun_l7_n456(x)
+ if (x < 1)
+ fun_l8_n538(x)
+ else
+ fun_l8_n270(x)
+ end
+end
+
+def fun_l7_n457(x)
+ if (x < 1)
+ fun_l8_n288(x)
+ else
+ fun_l8_n325(x)
+ end
+end
+
+def fun_l7_n458(x)
+ if (x < 1)
+ fun_l8_n345(x)
+ else
+ fun_l8_n334(x)
+ end
+end
+
+def fun_l7_n459(x)
+ if (x < 1)
+ fun_l8_n696(x)
+ else
+ fun_l8_n440(x)
+ end
+end
+
+def fun_l7_n460(x)
+ if (x < 1)
+ fun_l8_n509(x)
+ else
+ fun_l8_n580(x)
+ end
+end
+
+def fun_l7_n461(x)
+ if (x < 1)
+ fun_l8_n254(x)
+ else
+ fun_l8_n162(x)
+ end
+end
+
+def fun_l7_n462(x)
+ if (x < 1)
+ fun_l8_n541(x)
+ else
+ fun_l8_n493(x)
+ end
+end
+
+def fun_l7_n463(x)
+ if (x < 1)
+ fun_l8_n371(x)
+ else
+ fun_l8_n302(x)
+ end
+end
+
+def fun_l7_n464(x)
+ if (x < 1)
+ fun_l8_n44(x)
+ else
+ fun_l8_n677(x)
+ end
+end
+
+def fun_l7_n465(x)
+ if (x < 1)
+ fun_l8_n312(x)
+ else
+ fun_l8_n716(x)
+ end
+end
+
+def fun_l7_n466(x)
+ if (x < 1)
+ fun_l8_n252(x)
+ else
+ fun_l8_n827(x)
+ end
+end
+
+def fun_l7_n467(x)
+ if (x < 1)
+ fun_l8_n871(x)
+ else
+ fun_l8_n962(x)
+ end
+end
+
+def fun_l7_n468(x)
+ if (x < 1)
+ fun_l8_n323(x)
+ else
+ fun_l8_n813(x)
+ end
+end
+
+def fun_l7_n469(x)
+ if (x < 1)
+ fun_l8_n100(x)
+ else
+ fun_l8_n905(x)
+ end
+end
+
+def fun_l7_n470(x)
+ if (x < 1)
+ fun_l8_n95(x)
+ else
+ fun_l8_n96(x)
+ end
+end
+
+def fun_l7_n471(x)
+ if (x < 1)
+ fun_l8_n398(x)
+ else
+ fun_l8_n40(x)
+ end
+end
+
+def fun_l7_n472(x)
+ if (x < 1)
+ fun_l8_n280(x)
+ else
+ fun_l8_n34(x)
+ end
+end
+
+def fun_l7_n473(x)
+ if (x < 1)
+ fun_l8_n262(x)
+ else
+ fun_l8_n399(x)
+ end
+end
+
+def fun_l7_n474(x)
+ if (x < 1)
+ fun_l8_n126(x)
+ else
+ fun_l8_n208(x)
+ end
+end
+
+def fun_l7_n475(x)
+ if (x < 1)
+ fun_l8_n371(x)
+ else
+ fun_l8_n697(x)
+ end
+end
+
+def fun_l7_n476(x)
+ if (x < 1)
+ fun_l8_n617(x)
+ else
+ fun_l8_n822(x)
+ end
+end
+
+def fun_l7_n477(x)
+ if (x < 1)
+ fun_l8_n24(x)
+ else
+ fun_l8_n300(x)
+ end
+end
+
+def fun_l7_n478(x)
+ if (x < 1)
+ fun_l8_n864(x)
+ else
+ fun_l8_n357(x)
+ end
+end
+
+def fun_l7_n479(x)
+ if (x < 1)
+ fun_l8_n231(x)
+ else
+ fun_l8_n892(x)
+ end
+end
+
+def fun_l7_n480(x)
+ if (x < 1)
+ fun_l8_n200(x)
+ else
+ fun_l8_n49(x)
+ end
+end
+
+def fun_l7_n481(x)
+ if (x < 1)
+ fun_l8_n43(x)
+ else
+ fun_l8_n86(x)
+ end
+end
+
+def fun_l7_n482(x)
+ if (x < 1)
+ fun_l8_n617(x)
+ else
+ fun_l8_n752(x)
+ end
+end
+
+def fun_l7_n483(x)
+ if (x < 1)
+ fun_l8_n65(x)
+ else
+ fun_l8_n796(x)
+ end
+end
+
+def fun_l7_n484(x)
+ if (x < 1)
+ fun_l8_n960(x)
+ else
+ fun_l8_n174(x)
+ end
+end
+
+def fun_l7_n485(x)
+ if (x < 1)
+ fun_l8_n383(x)
+ else
+ fun_l8_n50(x)
+ end
+end
+
+def fun_l7_n486(x)
+ if (x < 1)
+ fun_l8_n269(x)
+ else
+ fun_l8_n413(x)
+ end
+end
+
+def fun_l7_n487(x)
+ if (x < 1)
+ fun_l8_n762(x)
+ else
+ fun_l8_n368(x)
+ end
+end
+
+def fun_l7_n488(x)
+ if (x < 1)
+ fun_l8_n635(x)
+ else
+ fun_l8_n900(x)
+ end
+end
+
+def fun_l7_n489(x)
+ if (x < 1)
+ fun_l8_n707(x)
+ else
+ fun_l8_n376(x)
+ end
+end
+
+def fun_l7_n490(x)
+ if (x < 1)
+ fun_l8_n215(x)
+ else
+ fun_l8_n255(x)
+ end
+end
+
+def fun_l7_n491(x)
+ if (x < 1)
+ fun_l8_n730(x)
+ else
+ fun_l8_n489(x)
+ end
+end
+
+def fun_l7_n492(x)
+ if (x < 1)
+ fun_l8_n896(x)
+ else
+ fun_l8_n501(x)
+ end
+end
+
+def fun_l7_n493(x)
+ if (x < 1)
+ fun_l8_n359(x)
+ else
+ fun_l8_n876(x)
+ end
+end
+
+def fun_l7_n494(x)
+ if (x < 1)
+ fun_l8_n13(x)
+ else
+ fun_l8_n669(x)
+ end
+end
+
+def fun_l7_n495(x)
+ if (x < 1)
+ fun_l8_n442(x)
+ else
+ fun_l8_n975(x)
+ end
+end
+
+def fun_l7_n496(x)
+ if (x < 1)
+ fun_l8_n349(x)
+ else
+ fun_l8_n240(x)
+ end
+end
+
+def fun_l7_n497(x)
+ if (x < 1)
+ fun_l8_n174(x)
+ else
+ fun_l8_n416(x)
+ end
+end
+
+def fun_l7_n498(x)
+ if (x < 1)
+ fun_l8_n141(x)
+ else
+ fun_l8_n231(x)
+ end
+end
+
+def fun_l7_n499(x)
+ if (x < 1)
+ fun_l8_n868(x)
+ else
+ fun_l8_n370(x)
+ end
+end
+
+def fun_l7_n500(x)
+ if (x < 1)
+ fun_l8_n888(x)
+ else
+ fun_l8_n525(x)
+ end
+end
+
+def fun_l7_n501(x)
+ if (x < 1)
+ fun_l8_n418(x)
+ else
+ fun_l8_n402(x)
+ end
+end
+
+def fun_l7_n502(x)
+ if (x < 1)
+ fun_l8_n910(x)
+ else
+ fun_l8_n308(x)
+ end
+end
+
+def fun_l7_n503(x)
+ if (x < 1)
+ fun_l8_n435(x)
+ else
+ fun_l8_n106(x)
+ end
+end
+
+def fun_l7_n504(x)
+ if (x < 1)
+ fun_l8_n716(x)
+ else
+ fun_l8_n768(x)
+ end
+end
+
+def fun_l7_n505(x)
+ if (x < 1)
+ fun_l8_n690(x)
+ else
+ fun_l8_n382(x)
+ end
+end
+
+def fun_l7_n506(x)
+ if (x < 1)
+ fun_l8_n786(x)
+ else
+ fun_l8_n270(x)
+ end
+end
+
+def fun_l7_n507(x)
+ if (x < 1)
+ fun_l8_n317(x)
+ else
+ fun_l8_n802(x)
+ end
+end
+
+def fun_l7_n508(x)
+ if (x < 1)
+ fun_l8_n423(x)
+ else
+ fun_l8_n91(x)
+ end
+end
+
+def fun_l7_n509(x)
+ if (x < 1)
+ fun_l8_n853(x)
+ else
+ fun_l8_n434(x)
+ end
+end
+
+def fun_l7_n510(x)
+ if (x < 1)
+ fun_l8_n691(x)
+ else
+ fun_l8_n458(x)
+ end
+end
+
+def fun_l7_n511(x)
+ if (x < 1)
+ fun_l8_n806(x)
+ else
+ fun_l8_n989(x)
+ end
+end
+
+def fun_l7_n512(x)
+ if (x < 1)
+ fun_l8_n696(x)
+ else
+ fun_l8_n234(x)
+ end
+end
+
+def fun_l7_n513(x)
+ if (x < 1)
+ fun_l8_n370(x)
+ else
+ fun_l8_n440(x)
+ end
+end
+
+def fun_l7_n514(x)
+ if (x < 1)
+ fun_l8_n889(x)
+ else
+ fun_l8_n306(x)
+ end
+end
+
+def fun_l7_n515(x)
+ if (x < 1)
+ fun_l8_n147(x)
+ else
+ fun_l8_n527(x)
+ end
+end
+
+def fun_l7_n516(x)
+ if (x < 1)
+ fun_l8_n18(x)
+ else
+ fun_l8_n120(x)
+ end
+end
+
+def fun_l7_n517(x)
+ if (x < 1)
+ fun_l8_n93(x)
+ else
+ fun_l8_n861(x)
+ end
+end
+
+def fun_l7_n518(x)
+ if (x < 1)
+ fun_l8_n954(x)
+ else
+ fun_l8_n864(x)
+ end
+end
+
+def fun_l7_n519(x)
+ if (x < 1)
+ fun_l8_n886(x)
+ else
+ fun_l8_n227(x)
+ end
+end
+
+def fun_l7_n520(x)
+ if (x < 1)
+ fun_l8_n525(x)
+ else
+ fun_l8_n73(x)
+ end
+end
+
+def fun_l7_n521(x)
+ if (x < 1)
+ fun_l8_n550(x)
+ else
+ fun_l8_n638(x)
+ end
+end
+
+def fun_l7_n522(x)
+ if (x < 1)
+ fun_l8_n36(x)
+ else
+ fun_l8_n511(x)
+ end
+end
+
+def fun_l7_n523(x)
+ if (x < 1)
+ fun_l8_n346(x)
+ else
+ fun_l8_n257(x)
+ end
+end
+
+def fun_l7_n524(x)
+ if (x < 1)
+ fun_l8_n973(x)
+ else
+ fun_l8_n704(x)
+ end
+end
+
+def fun_l7_n525(x)
+ if (x < 1)
+ fun_l8_n21(x)
+ else
+ fun_l8_n354(x)
+ end
+end
+
+def fun_l7_n526(x)
+ if (x < 1)
+ fun_l8_n394(x)
+ else
+ fun_l8_n461(x)
+ end
+end
+
+def fun_l7_n527(x)
+ if (x < 1)
+ fun_l8_n444(x)
+ else
+ fun_l8_n333(x)
+ end
+end
+
+def fun_l7_n528(x)
+ if (x < 1)
+ fun_l8_n743(x)
+ else
+ fun_l8_n579(x)
+ end
+end
+
+def fun_l7_n529(x)
+ if (x < 1)
+ fun_l8_n135(x)
+ else
+ fun_l8_n69(x)
+ end
+end
+
+def fun_l7_n530(x)
+ if (x < 1)
+ fun_l8_n66(x)
+ else
+ fun_l8_n6(x)
+ end
+end
+
+def fun_l7_n531(x)
+ if (x < 1)
+ fun_l8_n84(x)
+ else
+ fun_l8_n392(x)
+ end
+end
+
+def fun_l7_n532(x)
+ if (x < 1)
+ fun_l8_n178(x)
+ else
+ fun_l8_n227(x)
+ end
+end
+
+def fun_l7_n533(x)
+ if (x < 1)
+ fun_l8_n695(x)
+ else
+ fun_l8_n696(x)
+ end
+end
+
+def fun_l7_n534(x)
+ if (x < 1)
+ fun_l8_n266(x)
+ else
+ fun_l8_n8(x)
+ end
+end
+
+def fun_l7_n535(x)
+ if (x < 1)
+ fun_l8_n806(x)
+ else
+ fun_l8_n878(x)
+ end
+end
+
+def fun_l7_n536(x)
+ if (x < 1)
+ fun_l8_n775(x)
+ else
+ fun_l8_n778(x)
+ end
+end
+
+def fun_l7_n537(x)
+ if (x < 1)
+ fun_l8_n133(x)
+ else
+ fun_l8_n552(x)
+ end
+end
+
+def fun_l7_n538(x)
+ if (x < 1)
+ fun_l8_n71(x)
+ else
+ fun_l8_n786(x)
+ end
+end
+
+def fun_l7_n539(x)
+ if (x < 1)
+ fun_l8_n679(x)
+ else
+ fun_l8_n407(x)
+ end
+end
+
+def fun_l7_n540(x)
+ if (x < 1)
+ fun_l8_n532(x)
+ else
+ fun_l8_n75(x)
+ end
+end
+
+def fun_l7_n541(x)
+ if (x < 1)
+ fun_l8_n367(x)
+ else
+ fun_l8_n505(x)
+ end
+end
+
+def fun_l7_n542(x)
+ if (x < 1)
+ fun_l8_n74(x)
+ else
+ fun_l8_n374(x)
+ end
+end
+
+def fun_l7_n543(x)
+ if (x < 1)
+ fun_l8_n207(x)
+ else
+ fun_l8_n874(x)
+ end
+end
+
+def fun_l7_n544(x)
+ if (x < 1)
+ fun_l8_n774(x)
+ else
+ fun_l8_n750(x)
+ end
+end
+
+def fun_l7_n545(x)
+ if (x < 1)
+ fun_l8_n503(x)
+ else
+ fun_l8_n933(x)
+ end
+end
+
+def fun_l7_n546(x)
+ if (x < 1)
+ fun_l8_n125(x)
+ else
+ fun_l8_n570(x)
+ end
+end
+
+def fun_l7_n547(x)
+ if (x < 1)
+ fun_l8_n743(x)
+ else
+ fun_l8_n506(x)
+ end
+end
+
+def fun_l7_n548(x)
+ if (x < 1)
+ fun_l8_n297(x)
+ else
+ fun_l8_n70(x)
+ end
+end
+
+def fun_l7_n549(x)
+ if (x < 1)
+ fun_l8_n692(x)
+ else
+ fun_l8_n652(x)
+ end
+end
+
+def fun_l7_n550(x)
+ if (x < 1)
+ fun_l8_n446(x)
+ else
+ fun_l8_n432(x)
+ end
+end
+
+def fun_l7_n551(x)
+ if (x < 1)
+ fun_l8_n947(x)
+ else
+ fun_l8_n429(x)
+ end
+end
+
+def fun_l7_n552(x)
+ if (x < 1)
+ fun_l8_n417(x)
+ else
+ fun_l8_n136(x)
+ end
+end
+
+def fun_l7_n553(x)
+ if (x < 1)
+ fun_l8_n236(x)
+ else
+ fun_l8_n291(x)
+ end
+end
+
+def fun_l7_n554(x)
+ if (x < 1)
+ fun_l8_n60(x)
+ else
+ fun_l8_n328(x)
+ end
+end
+
+def fun_l7_n555(x)
+ if (x < 1)
+ fun_l8_n97(x)
+ else
+ fun_l8_n461(x)
+ end
+end
+
+def fun_l7_n556(x)
+ if (x < 1)
+ fun_l8_n158(x)
+ else
+ fun_l8_n603(x)
+ end
+end
+
+def fun_l7_n557(x)
+ if (x < 1)
+ fun_l8_n328(x)
+ else
+ fun_l8_n725(x)
+ end
+end
+
+def fun_l7_n558(x)
+ if (x < 1)
+ fun_l8_n25(x)
+ else
+ fun_l8_n933(x)
+ end
+end
+
+def fun_l7_n559(x)
+ if (x < 1)
+ fun_l8_n352(x)
+ else
+ fun_l8_n552(x)
+ end
+end
+
+def fun_l7_n560(x)
+ if (x < 1)
+ fun_l8_n427(x)
+ else
+ fun_l8_n91(x)
+ end
+end
+
+def fun_l7_n561(x)
+ if (x < 1)
+ fun_l8_n293(x)
+ else
+ fun_l8_n749(x)
+ end
+end
+
+def fun_l7_n562(x)
+ if (x < 1)
+ fun_l8_n672(x)
+ else
+ fun_l8_n905(x)
+ end
+end
+
+def fun_l7_n563(x)
+ if (x < 1)
+ fun_l8_n201(x)
+ else
+ fun_l8_n668(x)
+ end
+end
+
+def fun_l7_n564(x)
+ if (x < 1)
+ fun_l8_n109(x)
+ else
+ fun_l8_n401(x)
+ end
+end
+
+def fun_l7_n565(x)
+ if (x < 1)
+ fun_l8_n678(x)
+ else
+ fun_l8_n890(x)
+ end
+end
+
+def fun_l7_n566(x)
+ if (x < 1)
+ fun_l8_n876(x)
+ else
+ fun_l8_n129(x)
+ end
+end
+
+def fun_l7_n567(x)
+ if (x < 1)
+ fun_l8_n908(x)
+ else
+ fun_l8_n979(x)
+ end
+end
+
+def fun_l7_n568(x)
+ if (x < 1)
+ fun_l8_n899(x)
+ else
+ fun_l8_n792(x)
+ end
+end
+
+def fun_l7_n569(x)
+ if (x < 1)
+ fun_l8_n742(x)
+ else
+ fun_l8_n441(x)
+ end
+end
+
+def fun_l7_n570(x)
+ if (x < 1)
+ fun_l8_n338(x)
+ else
+ fun_l8_n877(x)
+ end
+end
+
+def fun_l7_n571(x)
+ if (x < 1)
+ fun_l8_n684(x)
+ else
+ fun_l8_n167(x)
+ end
+end
+
+def fun_l7_n572(x)
+ if (x < 1)
+ fun_l8_n366(x)
+ else
+ fun_l8_n239(x)
+ end
+end
+
+def fun_l7_n573(x)
+ if (x < 1)
+ fun_l8_n925(x)
+ else
+ fun_l8_n648(x)
+ end
+end
+
+def fun_l7_n574(x)
+ if (x < 1)
+ fun_l8_n825(x)
+ else
+ fun_l8_n811(x)
+ end
+end
+
+def fun_l7_n575(x)
+ if (x < 1)
+ fun_l8_n348(x)
+ else
+ fun_l8_n994(x)
+ end
+end
+
+def fun_l7_n576(x)
+ if (x < 1)
+ fun_l8_n721(x)
+ else
+ fun_l8_n584(x)
+ end
+end
+
+def fun_l7_n577(x)
+ if (x < 1)
+ fun_l8_n827(x)
+ else
+ fun_l8_n968(x)
+ end
+end
+
+def fun_l7_n578(x)
+ if (x < 1)
+ fun_l8_n649(x)
+ else
+ fun_l8_n684(x)
+ end
+end
+
+def fun_l7_n579(x)
+ if (x < 1)
+ fun_l8_n43(x)
+ else
+ fun_l8_n183(x)
+ end
+end
+
+def fun_l7_n580(x)
+ if (x < 1)
+ fun_l8_n516(x)
+ else
+ fun_l8_n893(x)
+ end
+end
+
+def fun_l7_n581(x)
+ if (x < 1)
+ fun_l8_n334(x)
+ else
+ fun_l8_n677(x)
+ end
+end
+
+def fun_l7_n582(x)
+ if (x < 1)
+ fun_l8_n731(x)
+ else
+ fun_l8_n922(x)
+ end
+end
+
+def fun_l7_n583(x)
+ if (x < 1)
+ fun_l8_n653(x)
+ else
+ fun_l8_n403(x)
+ end
+end
+
+def fun_l7_n584(x)
+ if (x < 1)
+ fun_l8_n534(x)
+ else
+ fun_l8_n155(x)
+ end
+end
+
+def fun_l7_n585(x)
+ if (x < 1)
+ fun_l8_n1(x)
+ else
+ fun_l8_n903(x)
+ end
+end
+
+def fun_l7_n586(x)
+ if (x < 1)
+ fun_l8_n956(x)
+ else
+ fun_l8_n182(x)
+ end
+end
+
+def fun_l7_n587(x)
+ if (x < 1)
+ fun_l8_n590(x)
+ else
+ fun_l8_n707(x)
+ end
+end
+
+def fun_l7_n588(x)
+ if (x < 1)
+ fun_l8_n74(x)
+ else
+ fun_l8_n612(x)
+ end
+end
+
+def fun_l7_n589(x)
+ if (x < 1)
+ fun_l8_n17(x)
+ else
+ fun_l8_n193(x)
+ end
+end
+
+def fun_l7_n590(x)
+ if (x < 1)
+ fun_l8_n650(x)
+ else
+ fun_l8_n863(x)
+ end
+end
+
+def fun_l7_n591(x)
+ if (x < 1)
+ fun_l8_n974(x)
+ else
+ fun_l8_n932(x)
+ end
+end
+
+def fun_l7_n592(x)
+ if (x < 1)
+ fun_l8_n17(x)
+ else
+ fun_l8_n537(x)
+ end
+end
+
+def fun_l7_n593(x)
+ if (x < 1)
+ fun_l8_n118(x)
+ else
+ fun_l8_n588(x)
+ end
+end
+
+def fun_l7_n594(x)
+ if (x < 1)
+ fun_l8_n241(x)
+ else
+ fun_l8_n929(x)
+ end
+end
+
+def fun_l7_n595(x)
+ if (x < 1)
+ fun_l8_n667(x)
+ else
+ fun_l8_n251(x)
+ end
+end
+
+def fun_l7_n596(x)
+ if (x < 1)
+ fun_l8_n473(x)
+ else
+ fun_l8_n189(x)
+ end
+end
+
+def fun_l7_n597(x)
+ if (x < 1)
+ fun_l8_n687(x)
+ else
+ fun_l8_n666(x)
+ end
+end
+
+def fun_l7_n598(x)
+ if (x < 1)
+ fun_l8_n700(x)
+ else
+ fun_l8_n294(x)
+ end
+end
+
+def fun_l7_n599(x)
+ if (x < 1)
+ fun_l8_n144(x)
+ else
+ fun_l8_n656(x)
+ end
+end
+
+def fun_l7_n600(x)
+ if (x < 1)
+ fun_l8_n375(x)
+ else
+ fun_l8_n571(x)
+ end
+end
+
+def fun_l7_n601(x)
+ if (x < 1)
+ fun_l8_n506(x)
+ else
+ fun_l8_n212(x)
+ end
+end
+
+def fun_l7_n602(x)
+ if (x < 1)
+ fun_l8_n770(x)
+ else
+ fun_l8_n491(x)
+ end
+end
+
+def fun_l7_n603(x)
+ if (x < 1)
+ fun_l8_n325(x)
+ else
+ fun_l8_n660(x)
+ end
+end
+
+def fun_l7_n604(x)
+ if (x < 1)
+ fun_l8_n255(x)
+ else
+ fun_l8_n842(x)
+ end
+end
+
+def fun_l7_n605(x)
+ if (x < 1)
+ fun_l8_n149(x)
+ else
+ fun_l8_n648(x)
+ end
+end
+
+def fun_l7_n606(x)
+ if (x < 1)
+ fun_l8_n845(x)
+ else
+ fun_l8_n248(x)
+ end
+end
+
+def fun_l7_n607(x)
+ if (x < 1)
+ fun_l8_n484(x)
+ else
+ fun_l8_n333(x)
+ end
+end
+
+def fun_l7_n608(x)
+ if (x < 1)
+ fun_l8_n287(x)
+ else
+ fun_l8_n341(x)
+ end
+end
+
+def fun_l7_n609(x)
+ if (x < 1)
+ fun_l8_n873(x)
+ else
+ fun_l8_n113(x)
+ end
+end
+
+def fun_l7_n610(x)
+ if (x < 1)
+ fun_l8_n45(x)
+ else
+ fun_l8_n994(x)
+ end
+end
+
+def fun_l7_n611(x)
+ if (x < 1)
+ fun_l8_n427(x)
+ else
+ fun_l8_n359(x)
+ end
+end
+
+def fun_l7_n612(x)
+ if (x < 1)
+ fun_l8_n433(x)
+ else
+ fun_l8_n189(x)
+ end
+end
+
+def fun_l7_n613(x)
+ if (x < 1)
+ fun_l8_n25(x)
+ else
+ fun_l8_n564(x)
+ end
+end
+
+def fun_l7_n614(x)
+ if (x < 1)
+ fun_l8_n789(x)
+ else
+ fun_l8_n441(x)
+ end
+end
+
+def fun_l7_n615(x)
+ if (x < 1)
+ fun_l8_n353(x)
+ else
+ fun_l8_n217(x)
+ end
+end
+
+def fun_l7_n616(x)
+ if (x < 1)
+ fun_l8_n334(x)
+ else
+ fun_l8_n559(x)
+ end
+end
+
+def fun_l7_n617(x)
+ if (x < 1)
+ fun_l8_n709(x)
+ else
+ fun_l8_n764(x)
+ end
+end
+
+def fun_l7_n618(x)
+ if (x < 1)
+ fun_l8_n710(x)
+ else
+ fun_l8_n307(x)
+ end
+end
+
+def fun_l7_n619(x)
+ if (x < 1)
+ fun_l8_n246(x)
+ else
+ fun_l8_n88(x)
+ end
+end
+
+def fun_l7_n620(x)
+ if (x < 1)
+ fun_l8_n259(x)
+ else
+ fun_l8_n813(x)
+ end
+end
+
+def fun_l7_n621(x)
+ if (x < 1)
+ fun_l8_n250(x)
+ else
+ fun_l8_n566(x)
+ end
+end
+
+def fun_l7_n622(x)
+ if (x < 1)
+ fun_l8_n223(x)
+ else
+ fun_l8_n627(x)
+ end
+end
+
+def fun_l7_n623(x)
+ if (x < 1)
+ fun_l8_n345(x)
+ else
+ fun_l8_n604(x)
+ end
+end
+
+def fun_l7_n624(x)
+ if (x < 1)
+ fun_l8_n405(x)
+ else
+ fun_l8_n850(x)
+ end
+end
+
+def fun_l7_n625(x)
+ if (x < 1)
+ fun_l8_n278(x)
+ else
+ fun_l8_n643(x)
+ end
+end
+
+def fun_l7_n626(x)
+ if (x < 1)
+ fun_l8_n359(x)
+ else
+ fun_l8_n167(x)
+ end
+end
+
+def fun_l7_n627(x)
+ if (x < 1)
+ fun_l8_n589(x)
+ else
+ fun_l8_n656(x)
+ end
+end
+
+def fun_l7_n628(x)
+ if (x < 1)
+ fun_l8_n612(x)
+ else
+ fun_l8_n227(x)
+ end
+end
+
+def fun_l7_n629(x)
+ if (x < 1)
+ fun_l8_n845(x)
+ else
+ fun_l8_n469(x)
+ end
+end
+
+def fun_l7_n630(x)
+ if (x < 1)
+ fun_l8_n833(x)
+ else
+ fun_l8_n92(x)
+ end
+end
+
+def fun_l7_n631(x)
+ if (x < 1)
+ fun_l8_n411(x)
+ else
+ fun_l8_n815(x)
+ end
+end
+
+def fun_l7_n632(x)
+ if (x < 1)
+ fun_l8_n844(x)
+ else
+ fun_l8_n349(x)
+ end
+end
+
+def fun_l7_n633(x)
+ if (x < 1)
+ fun_l8_n333(x)
+ else
+ fun_l8_n468(x)
+ end
+end
+
+def fun_l7_n634(x)
+ if (x < 1)
+ fun_l8_n12(x)
+ else
+ fun_l8_n727(x)
+ end
+end
+
+def fun_l7_n635(x)
+ if (x < 1)
+ fun_l8_n65(x)
+ else
+ fun_l8_n729(x)
+ end
+end
+
+def fun_l7_n636(x)
+ if (x < 1)
+ fun_l8_n54(x)
+ else
+ fun_l8_n524(x)
+ end
+end
+
+def fun_l7_n637(x)
+ if (x < 1)
+ fun_l8_n547(x)
+ else
+ fun_l8_n220(x)
+ end
+end
+
+def fun_l7_n638(x)
+ if (x < 1)
+ fun_l8_n679(x)
+ else
+ fun_l8_n693(x)
+ end
+end
+
+def fun_l7_n639(x)
+ if (x < 1)
+ fun_l8_n428(x)
+ else
+ fun_l8_n938(x)
+ end
+end
+
+def fun_l7_n640(x)
+ if (x < 1)
+ fun_l8_n779(x)
+ else
+ fun_l8_n323(x)
+ end
+end
+
+def fun_l7_n641(x)
+ if (x < 1)
+ fun_l8_n254(x)
+ else
+ fun_l8_n443(x)
+ end
+end
+
+def fun_l7_n642(x)
+ if (x < 1)
+ fun_l8_n623(x)
+ else
+ fun_l8_n361(x)
+ end
+end
+
+def fun_l7_n643(x)
+ if (x < 1)
+ fun_l8_n524(x)
+ else
+ fun_l8_n19(x)
+ end
+end
+
+def fun_l7_n644(x)
+ if (x < 1)
+ fun_l8_n77(x)
+ else
+ fun_l8_n696(x)
+ end
+end
+
+def fun_l7_n645(x)
+ if (x < 1)
+ fun_l8_n468(x)
+ else
+ fun_l8_n819(x)
+ end
+end
+
+def fun_l7_n646(x)
+ if (x < 1)
+ fun_l8_n319(x)
+ else
+ fun_l8_n921(x)
+ end
+end
+
+def fun_l7_n647(x)
+ if (x < 1)
+ fun_l8_n681(x)
+ else
+ fun_l8_n925(x)
+ end
+end
+
+def fun_l7_n648(x)
+ if (x < 1)
+ fun_l8_n146(x)
+ else
+ fun_l8_n451(x)
+ end
+end
+
+def fun_l7_n649(x)
+ if (x < 1)
+ fun_l8_n871(x)
+ else
+ fun_l8_n707(x)
+ end
+end
+
+def fun_l7_n650(x)
+ if (x < 1)
+ fun_l8_n782(x)
+ else
+ fun_l8_n556(x)
+ end
+end
+
+def fun_l7_n651(x)
+ if (x < 1)
+ fun_l8_n721(x)
+ else
+ fun_l8_n80(x)
+ end
+end
+
+def fun_l7_n652(x)
+ if (x < 1)
+ fun_l8_n110(x)
+ else
+ fun_l8_n529(x)
+ end
+end
+
+def fun_l7_n653(x)
+ if (x < 1)
+ fun_l8_n107(x)
+ else
+ fun_l8_n876(x)
+ end
+end
+
+def fun_l7_n654(x)
+ if (x < 1)
+ fun_l8_n902(x)
+ else
+ fun_l8_n45(x)
+ end
+end
+
+def fun_l7_n655(x)
+ if (x < 1)
+ fun_l8_n714(x)
+ else
+ fun_l8_n939(x)
+ end
+end
+
+def fun_l7_n656(x)
+ if (x < 1)
+ fun_l8_n761(x)
+ else
+ fun_l8_n940(x)
+ end
+end
+
+def fun_l7_n657(x)
+ if (x < 1)
+ fun_l8_n165(x)
+ else
+ fun_l8_n779(x)
+ end
+end
+
+def fun_l7_n658(x)
+ if (x < 1)
+ fun_l8_n848(x)
+ else
+ fun_l8_n307(x)
+ end
+end
+
+def fun_l7_n659(x)
+ if (x < 1)
+ fun_l8_n414(x)
+ else
+ fun_l8_n855(x)
+ end
+end
+
+def fun_l7_n660(x)
+ if (x < 1)
+ fun_l8_n963(x)
+ else
+ fun_l8_n887(x)
+ end
+end
+
+def fun_l7_n661(x)
+ if (x < 1)
+ fun_l8_n71(x)
+ else
+ fun_l8_n249(x)
+ end
+end
+
+def fun_l7_n662(x)
+ if (x < 1)
+ fun_l8_n426(x)
+ else
+ fun_l8_n977(x)
+ end
+end
+
+def fun_l7_n663(x)
+ if (x < 1)
+ fun_l8_n924(x)
+ else
+ fun_l8_n606(x)
+ end
+end
+
+def fun_l7_n664(x)
+ if (x < 1)
+ fun_l8_n88(x)
+ else
+ fun_l8_n718(x)
+ end
+end
+
+def fun_l7_n665(x)
+ if (x < 1)
+ fun_l8_n611(x)
+ else
+ fun_l8_n898(x)
+ end
+end
+
+def fun_l7_n666(x)
+ if (x < 1)
+ fun_l8_n656(x)
+ else
+ fun_l8_n116(x)
+ end
+end
+
+def fun_l7_n667(x)
+ if (x < 1)
+ fun_l8_n196(x)
+ else
+ fun_l8_n688(x)
+ end
+end
+
+def fun_l7_n668(x)
+ if (x < 1)
+ fun_l8_n873(x)
+ else
+ fun_l8_n561(x)
+ end
+end
+
+def fun_l7_n669(x)
+ if (x < 1)
+ fun_l8_n947(x)
+ else
+ fun_l8_n64(x)
+ end
+end
+
+def fun_l7_n670(x)
+ if (x < 1)
+ fun_l8_n309(x)
+ else
+ fun_l8_n708(x)
+ end
+end
+
+def fun_l7_n671(x)
+ if (x < 1)
+ fun_l8_n699(x)
+ else
+ fun_l8_n218(x)
+ end
+end
+
+def fun_l7_n672(x)
+ if (x < 1)
+ fun_l8_n930(x)
+ else
+ fun_l8_n387(x)
+ end
+end
+
+def fun_l7_n673(x)
+ if (x < 1)
+ fun_l8_n598(x)
+ else
+ fun_l8_n245(x)
+ end
+end
+
+def fun_l7_n674(x)
+ if (x < 1)
+ fun_l8_n411(x)
+ else
+ fun_l8_n236(x)
+ end
+end
+
+def fun_l7_n675(x)
+ if (x < 1)
+ fun_l8_n193(x)
+ else
+ fun_l8_n61(x)
+ end
+end
+
+def fun_l7_n676(x)
+ if (x < 1)
+ fun_l8_n2(x)
+ else
+ fun_l8_n253(x)
+ end
+end
+
+def fun_l7_n677(x)
+ if (x < 1)
+ fun_l8_n839(x)
+ else
+ fun_l8_n919(x)
+ end
+end
+
+def fun_l7_n678(x)
+ if (x < 1)
+ fun_l8_n643(x)
+ else
+ fun_l8_n245(x)
+ end
+end
+
+def fun_l7_n679(x)
+ if (x < 1)
+ fun_l8_n625(x)
+ else
+ fun_l8_n690(x)
+ end
+end
+
+def fun_l7_n680(x)
+ if (x < 1)
+ fun_l8_n899(x)
+ else
+ fun_l8_n307(x)
+ end
+end
+
+def fun_l7_n681(x)
+ if (x < 1)
+ fun_l8_n894(x)
+ else
+ fun_l8_n92(x)
+ end
+end
+
+def fun_l7_n682(x)
+ if (x < 1)
+ fun_l8_n139(x)
+ else
+ fun_l8_n252(x)
+ end
+end
+
+def fun_l7_n683(x)
+ if (x < 1)
+ fun_l8_n978(x)
+ else
+ fun_l8_n84(x)
+ end
+end
+
+def fun_l7_n684(x)
+ if (x < 1)
+ fun_l8_n426(x)
+ else
+ fun_l8_n329(x)
+ end
+end
+
+def fun_l7_n685(x)
+ if (x < 1)
+ fun_l8_n241(x)
+ else
+ fun_l8_n780(x)
+ end
+end
+
+def fun_l7_n686(x)
+ if (x < 1)
+ fun_l8_n793(x)
+ else
+ fun_l8_n534(x)
+ end
+end
+
+def fun_l7_n687(x)
+ if (x < 1)
+ fun_l8_n409(x)
+ else
+ fun_l8_n152(x)
+ end
+end
+
+def fun_l7_n688(x)
+ if (x < 1)
+ fun_l8_n838(x)
+ else
+ fun_l8_n555(x)
+ end
+end
+
+def fun_l7_n689(x)
+ if (x < 1)
+ fun_l8_n634(x)
+ else
+ fun_l8_n27(x)
+ end
+end
+
+def fun_l7_n690(x)
+ if (x < 1)
+ fun_l8_n629(x)
+ else
+ fun_l8_n448(x)
+ end
+end
+
+def fun_l7_n691(x)
+ if (x < 1)
+ fun_l8_n487(x)
+ else
+ fun_l8_n293(x)
+ end
+end
+
+def fun_l7_n692(x)
+ if (x < 1)
+ fun_l8_n739(x)
+ else
+ fun_l8_n472(x)
+ end
+end
+
+def fun_l7_n693(x)
+ if (x < 1)
+ fun_l8_n619(x)
+ else
+ fun_l8_n642(x)
+ end
+end
+
+def fun_l7_n694(x)
+ if (x < 1)
+ fun_l8_n123(x)
+ else
+ fun_l8_n880(x)
+ end
+end
+
+def fun_l7_n695(x)
+ if (x < 1)
+ fun_l8_n695(x)
+ else
+ fun_l8_n364(x)
+ end
+end
+
+def fun_l7_n696(x)
+ if (x < 1)
+ fun_l8_n610(x)
+ else
+ fun_l8_n798(x)
+ end
+end
+
+def fun_l7_n697(x)
+ if (x < 1)
+ fun_l8_n396(x)
+ else
+ fun_l8_n704(x)
+ end
+end
+
+def fun_l7_n698(x)
+ if (x < 1)
+ fun_l8_n759(x)
+ else
+ fun_l8_n880(x)
+ end
+end
+
+def fun_l7_n699(x)
+ if (x < 1)
+ fun_l8_n885(x)
+ else
+ fun_l8_n139(x)
+ end
+end
+
+def fun_l7_n700(x)
+ if (x < 1)
+ fun_l8_n244(x)
+ else
+ fun_l8_n307(x)
+ end
+end
+
+def fun_l7_n701(x)
+ if (x < 1)
+ fun_l8_n698(x)
+ else
+ fun_l8_n949(x)
+ end
+end
+
+def fun_l7_n702(x)
+ if (x < 1)
+ fun_l8_n585(x)
+ else
+ fun_l8_n211(x)
+ end
+end
+
+def fun_l7_n703(x)
+ if (x < 1)
+ fun_l8_n834(x)
+ else
+ fun_l8_n111(x)
+ end
+end
+
+def fun_l7_n704(x)
+ if (x < 1)
+ fun_l8_n690(x)
+ else
+ fun_l8_n981(x)
+ end
+end
+
+def fun_l7_n705(x)
+ if (x < 1)
+ fun_l8_n377(x)
+ else
+ fun_l8_n392(x)
+ end
+end
+
+def fun_l7_n706(x)
+ if (x < 1)
+ fun_l8_n988(x)
+ else
+ fun_l8_n759(x)
+ end
+end
+
+def fun_l7_n707(x)
+ if (x < 1)
+ fun_l8_n917(x)
+ else
+ fun_l8_n326(x)
+ end
+end
+
+def fun_l7_n708(x)
+ if (x < 1)
+ fun_l8_n662(x)
+ else
+ fun_l8_n488(x)
+ end
+end
+
+def fun_l7_n709(x)
+ if (x < 1)
+ fun_l8_n530(x)
+ else
+ fun_l8_n663(x)
+ end
+end
+
+def fun_l7_n710(x)
+ if (x < 1)
+ fun_l8_n344(x)
+ else
+ fun_l8_n282(x)
+ end
+end
+
+def fun_l7_n711(x)
+ if (x < 1)
+ fun_l8_n908(x)
+ else
+ fun_l8_n405(x)
+ end
+end
+
+def fun_l7_n712(x)
+ if (x < 1)
+ fun_l8_n332(x)
+ else
+ fun_l8_n660(x)
+ end
+end
+
+def fun_l7_n713(x)
+ if (x < 1)
+ fun_l8_n37(x)
+ else
+ fun_l8_n689(x)
+ end
+end
+
+def fun_l7_n714(x)
+ if (x < 1)
+ fun_l8_n642(x)
+ else
+ fun_l8_n996(x)
+ end
+end
+
+def fun_l7_n715(x)
+ if (x < 1)
+ fun_l8_n322(x)
+ else
+ fun_l8_n609(x)
+ end
+end
+
+def fun_l7_n716(x)
+ if (x < 1)
+ fun_l8_n567(x)
+ else
+ fun_l8_n492(x)
+ end
+end
+
+def fun_l7_n717(x)
+ if (x < 1)
+ fun_l8_n934(x)
+ else
+ fun_l8_n353(x)
+ end
+end
+
+def fun_l7_n718(x)
+ if (x < 1)
+ fun_l8_n572(x)
+ else
+ fun_l8_n859(x)
+ end
+end
+
+def fun_l7_n719(x)
+ if (x < 1)
+ fun_l8_n777(x)
+ else
+ fun_l8_n810(x)
+ end
+end
+
+def fun_l7_n720(x)
+ if (x < 1)
+ fun_l8_n121(x)
+ else
+ fun_l8_n847(x)
+ end
+end
+
+def fun_l7_n721(x)
+ if (x < 1)
+ fun_l8_n177(x)
+ else
+ fun_l8_n484(x)
+ end
+end
+
+def fun_l7_n722(x)
+ if (x < 1)
+ fun_l8_n73(x)
+ else
+ fun_l8_n795(x)
+ end
+end
+
+def fun_l7_n723(x)
+ if (x < 1)
+ fun_l8_n950(x)
+ else
+ fun_l8_n382(x)
+ end
+end
+
+def fun_l7_n724(x)
+ if (x < 1)
+ fun_l8_n349(x)
+ else
+ fun_l8_n644(x)
+ end
+end
+
+def fun_l7_n725(x)
+ if (x < 1)
+ fun_l8_n480(x)
+ else
+ fun_l8_n262(x)
+ end
+end
+
+def fun_l7_n726(x)
+ if (x < 1)
+ fun_l8_n39(x)
+ else
+ fun_l8_n543(x)
+ end
+end
+
+def fun_l7_n727(x)
+ if (x < 1)
+ fun_l8_n934(x)
+ else
+ fun_l8_n440(x)
+ end
+end
+
+def fun_l7_n728(x)
+ if (x < 1)
+ fun_l8_n312(x)
+ else
+ fun_l8_n341(x)
+ end
+end
+
+def fun_l7_n729(x)
+ if (x < 1)
+ fun_l8_n284(x)
+ else
+ fun_l8_n884(x)
+ end
+end
+
+def fun_l7_n730(x)
+ if (x < 1)
+ fun_l8_n841(x)
+ else
+ fun_l8_n221(x)
+ end
+end
+
+def fun_l7_n731(x)
+ if (x < 1)
+ fun_l8_n719(x)
+ else
+ fun_l8_n907(x)
+ end
+end
+
+def fun_l7_n732(x)
+ if (x < 1)
+ fun_l8_n422(x)
+ else
+ fun_l8_n65(x)
+ end
+end
+
+def fun_l7_n733(x)
+ if (x < 1)
+ fun_l8_n324(x)
+ else
+ fun_l8_n811(x)
+ end
+end
+
+def fun_l7_n734(x)
+ if (x < 1)
+ fun_l8_n643(x)
+ else
+ fun_l8_n622(x)
+ end
+end
+
+def fun_l7_n735(x)
+ if (x < 1)
+ fun_l8_n20(x)
+ else
+ fun_l8_n205(x)
+ end
+end
+
+def fun_l7_n736(x)
+ if (x < 1)
+ fun_l8_n487(x)
+ else
+ fun_l8_n908(x)
+ end
+end
+
+def fun_l7_n737(x)
+ if (x < 1)
+ fun_l8_n576(x)
+ else
+ fun_l8_n789(x)
+ end
+end
+
+def fun_l7_n738(x)
+ if (x < 1)
+ fun_l8_n353(x)
+ else
+ fun_l8_n286(x)
+ end
+end
+
+def fun_l7_n739(x)
+ if (x < 1)
+ fun_l8_n614(x)
+ else
+ fun_l8_n121(x)
+ end
+end
+
+def fun_l7_n740(x)
+ if (x < 1)
+ fun_l8_n79(x)
+ else
+ fun_l8_n637(x)
+ end
+end
+
+def fun_l7_n741(x)
+ if (x < 1)
+ fun_l8_n808(x)
+ else
+ fun_l8_n461(x)
+ end
+end
+
+def fun_l7_n742(x)
+ if (x < 1)
+ fun_l8_n590(x)
+ else
+ fun_l8_n198(x)
+ end
+end
+
+def fun_l7_n743(x)
+ if (x < 1)
+ fun_l8_n498(x)
+ else
+ fun_l8_n379(x)
+ end
+end
+
+def fun_l7_n744(x)
+ if (x < 1)
+ fun_l8_n599(x)
+ else
+ fun_l8_n359(x)
+ end
+end
+
+def fun_l7_n745(x)
+ if (x < 1)
+ fun_l8_n898(x)
+ else
+ fun_l8_n520(x)
+ end
+end
+
+def fun_l7_n746(x)
+ if (x < 1)
+ fun_l8_n835(x)
+ else
+ fun_l8_n131(x)
+ end
+end
+
+def fun_l7_n747(x)
+ if (x < 1)
+ fun_l8_n648(x)
+ else
+ fun_l8_n248(x)
+ end
+end
+
+def fun_l7_n748(x)
+ if (x < 1)
+ fun_l8_n451(x)
+ else
+ fun_l8_n352(x)
+ end
+end
+
+def fun_l7_n749(x)
+ if (x < 1)
+ fun_l8_n69(x)
+ else
+ fun_l8_n974(x)
+ end
+end
+
+def fun_l7_n750(x)
+ if (x < 1)
+ fun_l8_n108(x)
+ else
+ fun_l8_n744(x)
+ end
+end
+
+def fun_l7_n751(x)
+ if (x < 1)
+ fun_l8_n171(x)
+ else
+ fun_l8_n305(x)
+ end
+end
+
+def fun_l7_n752(x)
+ if (x < 1)
+ fun_l8_n180(x)
+ else
+ fun_l8_n293(x)
+ end
+end
+
+def fun_l7_n753(x)
+ if (x < 1)
+ fun_l8_n555(x)
+ else
+ fun_l8_n882(x)
+ end
+end
+
+def fun_l7_n754(x)
+ if (x < 1)
+ fun_l8_n756(x)
+ else
+ fun_l8_n604(x)
+ end
+end
+
+def fun_l7_n755(x)
+ if (x < 1)
+ fun_l8_n782(x)
+ else
+ fun_l8_n708(x)
+ end
+end
+
+def fun_l7_n756(x)
+ if (x < 1)
+ fun_l8_n65(x)
+ else
+ fun_l8_n474(x)
+ end
+end
+
+def fun_l7_n757(x)
+ if (x < 1)
+ fun_l8_n43(x)
+ else
+ fun_l8_n892(x)
+ end
+end
+
+def fun_l7_n758(x)
+ if (x < 1)
+ fun_l8_n90(x)
+ else
+ fun_l8_n968(x)
+ end
+end
+
+def fun_l7_n759(x)
+ if (x < 1)
+ fun_l8_n599(x)
+ else
+ fun_l8_n505(x)
+ end
+end
+
+def fun_l7_n760(x)
+ if (x < 1)
+ fun_l8_n344(x)
+ else
+ fun_l8_n122(x)
+ end
+end
+
+def fun_l7_n761(x)
+ if (x < 1)
+ fun_l8_n289(x)
+ else
+ fun_l8_n867(x)
+ end
+end
+
+def fun_l7_n762(x)
+ if (x < 1)
+ fun_l8_n107(x)
+ else
+ fun_l8_n950(x)
+ end
+end
+
+def fun_l7_n763(x)
+ if (x < 1)
+ fun_l8_n405(x)
+ else
+ fun_l8_n178(x)
+ end
+end
+
+def fun_l7_n764(x)
+ if (x < 1)
+ fun_l8_n91(x)
+ else
+ fun_l8_n760(x)
+ end
+end
+
+def fun_l7_n765(x)
+ if (x < 1)
+ fun_l8_n268(x)
+ else
+ fun_l8_n451(x)
+ end
+end
+
+def fun_l7_n766(x)
+ if (x < 1)
+ fun_l8_n693(x)
+ else
+ fun_l8_n150(x)
+ end
+end
+
+def fun_l7_n767(x)
+ if (x < 1)
+ fun_l8_n622(x)
+ else
+ fun_l8_n114(x)
+ end
+end
+
+def fun_l7_n768(x)
+ if (x < 1)
+ fun_l8_n662(x)
+ else
+ fun_l8_n514(x)
+ end
+end
+
+def fun_l7_n769(x)
+ if (x < 1)
+ fun_l8_n440(x)
+ else
+ fun_l8_n537(x)
+ end
+end
+
+def fun_l7_n770(x)
+ if (x < 1)
+ fun_l8_n207(x)
+ else
+ fun_l8_n513(x)
+ end
+end
+
+def fun_l7_n771(x)
+ if (x < 1)
+ fun_l8_n394(x)
+ else
+ fun_l8_n857(x)
+ end
+end
+
+def fun_l7_n772(x)
+ if (x < 1)
+ fun_l8_n635(x)
+ else
+ fun_l8_n696(x)
+ end
+end
+
+def fun_l7_n773(x)
+ if (x < 1)
+ fun_l8_n687(x)
+ else
+ fun_l8_n926(x)
+ end
+end
+
+def fun_l7_n774(x)
+ if (x < 1)
+ fun_l8_n54(x)
+ else
+ fun_l8_n410(x)
+ end
+end
+
+def fun_l7_n775(x)
+ if (x < 1)
+ fun_l8_n825(x)
+ else
+ fun_l8_n339(x)
+ end
+end
+
+def fun_l7_n776(x)
+ if (x < 1)
+ fun_l8_n130(x)
+ else
+ fun_l8_n48(x)
+ end
+end
+
+def fun_l7_n777(x)
+ if (x < 1)
+ fun_l8_n627(x)
+ else
+ fun_l8_n191(x)
+ end
+end
+
+def fun_l7_n778(x)
+ if (x < 1)
+ fun_l8_n843(x)
+ else
+ fun_l8_n627(x)
+ end
+end
+
+def fun_l7_n779(x)
+ if (x < 1)
+ fun_l8_n950(x)
+ else
+ fun_l8_n247(x)
+ end
+end
+
+def fun_l7_n780(x)
+ if (x < 1)
+ fun_l8_n865(x)
+ else
+ fun_l8_n540(x)
+ end
+end
+
+def fun_l7_n781(x)
+ if (x < 1)
+ fun_l8_n440(x)
+ else
+ fun_l8_n326(x)
+ end
+end
+
+def fun_l7_n782(x)
+ if (x < 1)
+ fun_l8_n463(x)
+ else
+ fun_l8_n808(x)
+ end
+end
+
+def fun_l7_n783(x)
+ if (x < 1)
+ fun_l8_n44(x)
+ else
+ fun_l8_n888(x)
+ end
+end
+
+def fun_l7_n784(x)
+ if (x < 1)
+ fun_l8_n690(x)
+ else
+ fun_l8_n673(x)
+ end
+end
+
+def fun_l7_n785(x)
+ if (x < 1)
+ fun_l8_n641(x)
+ else
+ fun_l8_n367(x)
+ end
+end
+
+def fun_l7_n786(x)
+ if (x < 1)
+ fun_l8_n193(x)
+ else
+ fun_l8_n764(x)
+ end
+end
+
+def fun_l7_n787(x)
+ if (x < 1)
+ fun_l8_n604(x)
+ else
+ fun_l8_n527(x)
+ end
+end
+
+def fun_l7_n788(x)
+ if (x < 1)
+ fun_l8_n626(x)
+ else
+ fun_l8_n101(x)
+ end
+end
+
+def fun_l7_n789(x)
+ if (x < 1)
+ fun_l8_n152(x)
+ else
+ fun_l8_n9(x)
+ end
+end
+
+def fun_l7_n790(x)
+ if (x < 1)
+ fun_l8_n541(x)
+ else
+ fun_l8_n789(x)
+ end
+end
+
+def fun_l7_n791(x)
+ if (x < 1)
+ fun_l8_n706(x)
+ else
+ fun_l8_n577(x)
+ end
+end
+
+def fun_l7_n792(x)
+ if (x < 1)
+ fun_l8_n480(x)
+ else
+ fun_l8_n284(x)
+ end
+end
+
+def fun_l7_n793(x)
+ if (x < 1)
+ fun_l8_n157(x)
+ else
+ fun_l8_n620(x)
+ end
+end
+
+def fun_l7_n794(x)
+ if (x < 1)
+ fun_l8_n666(x)
+ else
+ fun_l8_n755(x)
+ end
+end
+
+def fun_l7_n795(x)
+ if (x < 1)
+ fun_l8_n487(x)
+ else
+ fun_l8_n20(x)
+ end
+end
+
+def fun_l7_n796(x)
+ if (x < 1)
+ fun_l8_n750(x)
+ else
+ fun_l8_n974(x)
+ end
+end
+
+def fun_l7_n797(x)
+ if (x < 1)
+ fun_l8_n812(x)
+ else
+ fun_l8_n915(x)
+ end
+end
+
+def fun_l7_n798(x)
+ if (x < 1)
+ fun_l8_n297(x)
+ else
+ fun_l8_n8(x)
+ end
+end
+
+def fun_l7_n799(x)
+ if (x < 1)
+ fun_l8_n385(x)
+ else
+ fun_l8_n920(x)
+ end
+end
+
+def fun_l7_n800(x)
+ if (x < 1)
+ fun_l8_n322(x)
+ else
+ fun_l8_n603(x)
+ end
+end
+
+def fun_l7_n801(x)
+ if (x < 1)
+ fun_l8_n825(x)
+ else
+ fun_l8_n702(x)
+ end
+end
+
+def fun_l7_n802(x)
+ if (x < 1)
+ fun_l8_n902(x)
+ else
+ fun_l8_n978(x)
+ end
+end
+
+def fun_l7_n803(x)
+ if (x < 1)
+ fun_l8_n921(x)
+ else
+ fun_l8_n61(x)
+ end
+end
+
+def fun_l7_n804(x)
+ if (x < 1)
+ fun_l8_n171(x)
+ else
+ fun_l8_n630(x)
+ end
+end
+
+def fun_l7_n805(x)
+ if (x < 1)
+ fun_l8_n469(x)
+ else
+ fun_l8_n265(x)
+ end
+end
+
+def fun_l7_n806(x)
+ if (x < 1)
+ fun_l8_n367(x)
+ else
+ fun_l8_n820(x)
+ end
+end
+
+def fun_l7_n807(x)
+ if (x < 1)
+ fun_l8_n242(x)
+ else
+ fun_l8_n707(x)
+ end
+end
+
+def fun_l7_n808(x)
+ if (x < 1)
+ fun_l8_n512(x)
+ else
+ fun_l8_n139(x)
+ end
+end
+
+def fun_l7_n809(x)
+ if (x < 1)
+ fun_l8_n17(x)
+ else
+ fun_l8_n615(x)
+ end
+end
+
+def fun_l7_n810(x)
+ if (x < 1)
+ fun_l8_n600(x)
+ else
+ fun_l8_n6(x)
+ end
+end
+
+def fun_l7_n811(x)
+ if (x < 1)
+ fun_l8_n342(x)
+ else
+ fun_l8_n194(x)
+ end
+end
+
+def fun_l7_n812(x)
+ if (x < 1)
+ fun_l8_n191(x)
+ else
+ fun_l8_n864(x)
+ end
+end
+
+def fun_l7_n813(x)
+ if (x < 1)
+ fun_l8_n577(x)
+ else
+ fun_l8_n113(x)
+ end
+end
+
+def fun_l7_n814(x)
+ if (x < 1)
+ fun_l8_n729(x)
+ else
+ fun_l8_n989(x)
+ end
+end
+
+def fun_l7_n815(x)
+ if (x < 1)
+ fun_l8_n707(x)
+ else
+ fun_l8_n846(x)
+ end
+end
+
+def fun_l7_n816(x)
+ if (x < 1)
+ fun_l8_n874(x)
+ else
+ fun_l8_n790(x)
+ end
+end
+
+def fun_l7_n817(x)
+ if (x < 1)
+ fun_l8_n968(x)
+ else
+ fun_l8_n215(x)
+ end
+end
+
+def fun_l7_n818(x)
+ if (x < 1)
+ fun_l8_n444(x)
+ else
+ fun_l8_n182(x)
+ end
+end
+
+def fun_l7_n819(x)
+ if (x < 1)
+ fun_l8_n103(x)
+ else
+ fun_l8_n134(x)
+ end
+end
+
+def fun_l7_n820(x)
+ if (x < 1)
+ fun_l8_n718(x)
+ else
+ fun_l8_n950(x)
+ end
+end
+
+def fun_l7_n821(x)
+ if (x < 1)
+ fun_l8_n439(x)
+ else
+ fun_l8_n68(x)
+ end
+end
+
+def fun_l7_n822(x)
+ if (x < 1)
+ fun_l8_n961(x)
+ else
+ fun_l8_n593(x)
+ end
+end
+
+def fun_l7_n823(x)
+ if (x < 1)
+ fun_l8_n212(x)
+ else
+ fun_l8_n346(x)
+ end
+end
+
+def fun_l7_n824(x)
+ if (x < 1)
+ fun_l8_n926(x)
+ else
+ fun_l8_n225(x)
+ end
+end
+
+def fun_l7_n825(x)
+ if (x < 1)
+ fun_l8_n880(x)
+ else
+ fun_l8_n527(x)
+ end
+end
+
+def fun_l7_n826(x)
+ if (x < 1)
+ fun_l8_n13(x)
+ else
+ fun_l8_n436(x)
+ end
+end
+
+def fun_l7_n827(x)
+ if (x < 1)
+ fun_l8_n19(x)
+ else
+ fun_l8_n201(x)
+ end
+end
+
+def fun_l7_n828(x)
+ if (x < 1)
+ fun_l8_n568(x)
+ else
+ fun_l8_n450(x)
+ end
+end
+
+def fun_l7_n829(x)
+ if (x < 1)
+ fun_l8_n543(x)
+ else
+ fun_l8_n606(x)
+ end
+end
+
+def fun_l7_n830(x)
+ if (x < 1)
+ fun_l8_n503(x)
+ else
+ fun_l8_n104(x)
+ end
+end
+
+def fun_l7_n831(x)
+ if (x < 1)
+ fun_l8_n185(x)
+ else
+ fun_l8_n1(x)
+ end
+end
+
+def fun_l7_n832(x)
+ if (x < 1)
+ fun_l8_n598(x)
+ else
+ fun_l8_n637(x)
+ end
+end
+
+def fun_l7_n833(x)
+ if (x < 1)
+ fun_l8_n936(x)
+ else
+ fun_l8_n554(x)
+ end
+end
+
+def fun_l7_n834(x)
+ if (x < 1)
+ fun_l8_n864(x)
+ else
+ fun_l8_n766(x)
+ end
+end
+
+def fun_l7_n835(x)
+ if (x < 1)
+ fun_l8_n11(x)
+ else
+ fun_l8_n472(x)
+ end
+end
+
+def fun_l7_n836(x)
+ if (x < 1)
+ fun_l8_n596(x)
+ else
+ fun_l8_n429(x)
+ end
+end
+
+def fun_l7_n837(x)
+ if (x < 1)
+ fun_l8_n643(x)
+ else
+ fun_l8_n61(x)
+ end
+end
+
+def fun_l7_n838(x)
+ if (x < 1)
+ fun_l8_n905(x)
+ else
+ fun_l8_n605(x)
+ end
+end
+
+def fun_l7_n839(x)
+ if (x < 1)
+ fun_l8_n371(x)
+ else
+ fun_l8_n44(x)
+ end
+end
+
+def fun_l7_n840(x)
+ if (x < 1)
+ fun_l8_n195(x)
+ else
+ fun_l8_n368(x)
+ end
+end
+
+def fun_l7_n841(x)
+ if (x < 1)
+ fun_l8_n432(x)
+ else
+ fun_l8_n368(x)
+ end
+end
+
+def fun_l7_n842(x)
+ if (x < 1)
+ fun_l8_n145(x)
+ else
+ fun_l8_n180(x)
+ end
+end
+
+def fun_l7_n843(x)
+ if (x < 1)
+ fun_l8_n124(x)
+ else
+ fun_l8_n358(x)
+ end
+end
+
+def fun_l7_n844(x)
+ if (x < 1)
+ fun_l8_n87(x)
+ else
+ fun_l8_n364(x)
+ end
+end
+
+def fun_l7_n845(x)
+ if (x < 1)
+ fun_l8_n133(x)
+ else
+ fun_l8_n849(x)
+ end
+end
+
+def fun_l7_n846(x)
+ if (x < 1)
+ fun_l8_n686(x)
+ else
+ fun_l8_n556(x)
+ end
+end
+
+def fun_l7_n847(x)
+ if (x < 1)
+ fun_l8_n732(x)
+ else
+ fun_l8_n536(x)
+ end
+end
+
+def fun_l7_n848(x)
+ if (x < 1)
+ fun_l8_n950(x)
+ else
+ fun_l8_n173(x)
+ end
+end
+
+def fun_l7_n849(x)
+ if (x < 1)
+ fun_l8_n694(x)
+ else
+ fun_l8_n747(x)
+ end
+end
+
+def fun_l7_n850(x)
+ if (x < 1)
+ fun_l8_n421(x)
+ else
+ fun_l8_n940(x)
+ end
+end
+
+def fun_l7_n851(x)
+ if (x < 1)
+ fun_l8_n359(x)
+ else
+ fun_l8_n239(x)
+ end
+end
+
+def fun_l7_n852(x)
+ if (x < 1)
+ fun_l8_n731(x)
+ else
+ fun_l8_n530(x)
+ end
+end
+
+def fun_l7_n853(x)
+ if (x < 1)
+ fun_l8_n887(x)
+ else
+ fun_l8_n548(x)
+ end
+end
+
+def fun_l7_n854(x)
+ if (x < 1)
+ fun_l8_n47(x)
+ else
+ fun_l8_n675(x)
+ end
+end
+
+def fun_l7_n855(x)
+ if (x < 1)
+ fun_l8_n534(x)
+ else
+ fun_l8_n34(x)
+ end
+end
+
+def fun_l7_n856(x)
+ if (x < 1)
+ fun_l8_n935(x)
+ else
+ fun_l8_n402(x)
+ end
+end
+
+def fun_l7_n857(x)
+ if (x < 1)
+ fun_l8_n873(x)
+ else
+ fun_l8_n864(x)
+ end
+end
+
+def fun_l7_n858(x)
+ if (x < 1)
+ fun_l8_n325(x)
+ else
+ fun_l8_n495(x)
+ end
+end
+
+def fun_l7_n859(x)
+ if (x < 1)
+ fun_l8_n758(x)
+ else
+ fun_l8_n63(x)
+ end
+end
+
+def fun_l7_n860(x)
+ if (x < 1)
+ fun_l8_n811(x)
+ else
+ fun_l8_n150(x)
+ end
+end
+
+def fun_l7_n861(x)
+ if (x < 1)
+ fun_l8_n666(x)
+ else
+ fun_l8_n282(x)
+ end
+end
+
+def fun_l7_n862(x)
+ if (x < 1)
+ fun_l8_n123(x)
+ else
+ fun_l8_n893(x)
+ end
+end
+
+def fun_l7_n863(x)
+ if (x < 1)
+ fun_l8_n681(x)
+ else
+ fun_l8_n640(x)
+ end
+end
+
+def fun_l7_n864(x)
+ if (x < 1)
+ fun_l8_n861(x)
+ else
+ fun_l8_n922(x)
+ end
+end
+
+def fun_l7_n865(x)
+ if (x < 1)
+ fun_l8_n8(x)
+ else
+ fun_l8_n14(x)
+ end
+end
+
+def fun_l7_n866(x)
+ if (x < 1)
+ fun_l8_n713(x)
+ else
+ fun_l8_n802(x)
+ end
+end
+
+def fun_l7_n867(x)
+ if (x < 1)
+ fun_l8_n654(x)
+ else
+ fun_l8_n540(x)
+ end
+end
+
+def fun_l7_n868(x)
+ if (x < 1)
+ fun_l8_n308(x)
+ else
+ fun_l8_n504(x)
+ end
+end
+
+def fun_l7_n869(x)
+ if (x < 1)
+ fun_l8_n67(x)
+ else
+ fun_l8_n592(x)
+ end
+end
+
+def fun_l7_n870(x)
+ if (x < 1)
+ fun_l8_n295(x)
+ else
+ fun_l8_n585(x)
+ end
+end
+
+def fun_l7_n871(x)
+ if (x < 1)
+ fun_l8_n121(x)
+ else
+ fun_l8_n797(x)
+ end
+end
+
+def fun_l7_n872(x)
+ if (x < 1)
+ fun_l8_n234(x)
+ else
+ fun_l8_n834(x)
+ end
+end
+
+def fun_l7_n873(x)
+ if (x < 1)
+ fun_l8_n287(x)
+ else
+ fun_l8_n731(x)
+ end
+end
+
+def fun_l7_n874(x)
+ if (x < 1)
+ fun_l8_n460(x)
+ else
+ fun_l8_n261(x)
+ end
+end
+
+def fun_l7_n875(x)
+ if (x < 1)
+ fun_l8_n211(x)
+ else
+ fun_l8_n199(x)
+ end
+end
+
+def fun_l7_n876(x)
+ if (x < 1)
+ fun_l8_n256(x)
+ else
+ fun_l8_n547(x)
+ end
+end
+
+def fun_l7_n877(x)
+ if (x < 1)
+ fun_l8_n560(x)
+ else
+ fun_l8_n202(x)
+ end
+end
+
+def fun_l7_n878(x)
+ if (x < 1)
+ fun_l8_n5(x)
+ else
+ fun_l8_n955(x)
+ end
+end
+
+def fun_l7_n879(x)
+ if (x < 1)
+ fun_l8_n908(x)
+ else
+ fun_l8_n677(x)
+ end
+end
+
+def fun_l7_n880(x)
+ if (x < 1)
+ fun_l8_n366(x)
+ else
+ fun_l8_n226(x)
+ end
+end
+
+def fun_l7_n881(x)
+ if (x < 1)
+ fun_l8_n970(x)
+ else
+ fun_l8_n939(x)
+ end
+end
+
+def fun_l7_n882(x)
+ if (x < 1)
+ fun_l8_n562(x)
+ else
+ fun_l8_n420(x)
+ end
+end
+
+def fun_l7_n883(x)
+ if (x < 1)
+ fun_l8_n377(x)
+ else
+ fun_l8_n35(x)
+ end
+end
+
+def fun_l7_n884(x)
+ if (x < 1)
+ fun_l8_n459(x)
+ else
+ fun_l8_n155(x)
+ end
+end
+
+def fun_l7_n885(x)
+ if (x < 1)
+ fun_l8_n257(x)
+ else
+ fun_l8_n704(x)
+ end
+end
+
+def fun_l7_n886(x)
+ if (x < 1)
+ fun_l8_n393(x)
+ else
+ fun_l8_n741(x)
+ end
+end
+
+def fun_l7_n887(x)
+ if (x < 1)
+ fun_l8_n333(x)
+ else
+ fun_l8_n225(x)
+ end
+end
+
+def fun_l7_n888(x)
+ if (x < 1)
+ fun_l8_n614(x)
+ else
+ fun_l8_n106(x)
+ end
+end
+
+def fun_l7_n889(x)
+ if (x < 1)
+ fun_l8_n229(x)
+ else
+ fun_l8_n519(x)
+ end
+end
+
+def fun_l7_n890(x)
+ if (x < 1)
+ fun_l8_n651(x)
+ else
+ fun_l8_n68(x)
+ end
+end
+
+def fun_l7_n891(x)
+ if (x < 1)
+ fun_l8_n987(x)
+ else
+ fun_l8_n905(x)
+ end
+end
+
+def fun_l7_n892(x)
+ if (x < 1)
+ fun_l8_n698(x)
+ else
+ fun_l8_n942(x)
+ end
+end
+
+def fun_l7_n893(x)
+ if (x < 1)
+ fun_l8_n559(x)
+ else
+ fun_l8_n613(x)
+ end
+end
+
+def fun_l7_n894(x)
+ if (x < 1)
+ fun_l8_n402(x)
+ else
+ fun_l8_n966(x)
+ end
+end
+
+def fun_l7_n895(x)
+ if (x < 1)
+ fun_l8_n475(x)
+ else
+ fun_l8_n158(x)
+ end
+end
+
+def fun_l7_n896(x)
+ if (x < 1)
+ fun_l8_n420(x)
+ else
+ fun_l8_n113(x)
+ end
+end
+
+def fun_l7_n897(x)
+ if (x < 1)
+ fun_l8_n366(x)
+ else
+ fun_l8_n272(x)
+ end
+end
+
+def fun_l7_n898(x)
+ if (x < 1)
+ fun_l8_n252(x)
+ else
+ fun_l8_n821(x)
+ end
+end
+
+def fun_l7_n899(x)
+ if (x < 1)
+ fun_l8_n736(x)
+ else
+ fun_l8_n174(x)
+ end
+end
+
+def fun_l7_n900(x)
+ if (x < 1)
+ fun_l8_n151(x)
+ else
+ fun_l8_n611(x)
+ end
+end
+
+def fun_l7_n901(x)
+ if (x < 1)
+ fun_l8_n3(x)
+ else
+ fun_l8_n221(x)
+ end
+end
+
+def fun_l7_n902(x)
+ if (x < 1)
+ fun_l8_n319(x)
+ else
+ fun_l8_n930(x)
+ end
+end
+
+def fun_l7_n903(x)
+ if (x < 1)
+ fun_l8_n619(x)
+ else
+ fun_l8_n491(x)
+ end
+end
+
+def fun_l7_n904(x)
+ if (x < 1)
+ fun_l8_n144(x)
+ else
+ fun_l8_n304(x)
+ end
+end
+
+def fun_l7_n905(x)
+ if (x < 1)
+ fun_l8_n721(x)
+ else
+ fun_l8_n996(x)
+ end
+end
+
+def fun_l7_n906(x)
+ if (x < 1)
+ fun_l8_n700(x)
+ else
+ fun_l8_n688(x)
+ end
+end
+
+def fun_l7_n907(x)
+ if (x < 1)
+ fun_l8_n202(x)
+ else
+ fun_l8_n668(x)
+ end
+end
+
+def fun_l7_n908(x)
+ if (x < 1)
+ fun_l8_n514(x)
+ else
+ fun_l8_n306(x)
+ end
+end
+
+def fun_l7_n909(x)
+ if (x < 1)
+ fun_l8_n592(x)
+ else
+ fun_l8_n43(x)
+ end
+end
+
+def fun_l7_n910(x)
+ if (x < 1)
+ fun_l8_n743(x)
+ else
+ fun_l8_n406(x)
+ end
+end
+
+def fun_l7_n911(x)
+ if (x < 1)
+ fun_l8_n801(x)
+ else
+ fun_l8_n91(x)
+ end
+end
+
+def fun_l7_n912(x)
+ if (x < 1)
+ fun_l8_n392(x)
+ else
+ fun_l8_n461(x)
+ end
+end
+
+def fun_l7_n913(x)
+ if (x < 1)
+ fun_l8_n175(x)
+ else
+ fun_l8_n578(x)
+ end
+end
+
+def fun_l7_n914(x)
+ if (x < 1)
+ fun_l8_n155(x)
+ else
+ fun_l8_n795(x)
+ end
+end
+
+def fun_l7_n915(x)
+ if (x < 1)
+ fun_l8_n394(x)
+ else
+ fun_l8_n89(x)
+ end
+end
+
+def fun_l7_n916(x)
+ if (x < 1)
+ fun_l8_n59(x)
+ else
+ fun_l8_n345(x)
+ end
+end
+
+def fun_l7_n917(x)
+ if (x < 1)
+ fun_l8_n450(x)
+ else
+ fun_l8_n136(x)
+ end
+end
+
+def fun_l7_n918(x)
+ if (x < 1)
+ fun_l8_n530(x)
+ else
+ fun_l8_n233(x)
+ end
+end
+
+def fun_l7_n919(x)
+ if (x < 1)
+ fun_l8_n632(x)
+ else
+ fun_l8_n631(x)
+ end
+end
+
+def fun_l7_n920(x)
+ if (x < 1)
+ fun_l8_n252(x)
+ else
+ fun_l8_n95(x)
+ end
+end
+
+def fun_l7_n921(x)
+ if (x < 1)
+ fun_l8_n980(x)
+ else
+ fun_l8_n819(x)
+ end
+end
+
+def fun_l7_n922(x)
+ if (x < 1)
+ fun_l8_n866(x)
+ else
+ fun_l8_n652(x)
+ end
+end
+
+def fun_l7_n923(x)
+ if (x < 1)
+ fun_l8_n239(x)
+ else
+ fun_l8_n83(x)
+ end
+end
+
+def fun_l7_n924(x)
+ if (x < 1)
+ fun_l8_n162(x)
+ else
+ fun_l8_n193(x)
+ end
+end
+
+def fun_l7_n925(x)
+ if (x < 1)
+ fun_l8_n78(x)
+ else
+ fun_l8_n546(x)
+ end
+end
+
+def fun_l7_n926(x)
+ if (x < 1)
+ fun_l8_n102(x)
+ else
+ fun_l8_n847(x)
+ end
+end
+
+def fun_l7_n927(x)
+ if (x < 1)
+ fun_l8_n84(x)
+ else
+ fun_l8_n257(x)
+ end
+end
+
+def fun_l7_n928(x)
+ if (x < 1)
+ fun_l8_n195(x)
+ else
+ fun_l8_n200(x)
+ end
+end
+
+def fun_l7_n929(x)
+ if (x < 1)
+ fun_l8_n245(x)
+ else
+ fun_l8_n926(x)
+ end
+end
+
+def fun_l7_n930(x)
+ if (x < 1)
+ fun_l8_n651(x)
+ else
+ fun_l8_n308(x)
+ end
+end
+
+def fun_l7_n931(x)
+ if (x < 1)
+ fun_l8_n312(x)
+ else
+ fun_l8_n901(x)
+ end
+end
+
+def fun_l7_n932(x)
+ if (x < 1)
+ fun_l8_n221(x)
+ else
+ fun_l8_n775(x)
+ end
+end
+
+def fun_l7_n933(x)
+ if (x < 1)
+ fun_l8_n397(x)
+ else
+ fun_l8_n973(x)
+ end
+end
+
+def fun_l7_n934(x)
+ if (x < 1)
+ fun_l8_n788(x)
+ else
+ fun_l8_n952(x)
+ end
+end
+
+def fun_l7_n935(x)
+ if (x < 1)
+ fun_l8_n965(x)
+ else
+ fun_l8_n861(x)
+ end
+end
+
+def fun_l7_n936(x)
+ if (x < 1)
+ fun_l8_n969(x)
+ else
+ fun_l8_n773(x)
+ end
+end
+
+def fun_l7_n937(x)
+ if (x < 1)
+ fun_l8_n742(x)
+ else
+ fun_l8_n237(x)
+ end
+end
+
+def fun_l7_n938(x)
+ if (x < 1)
+ fun_l8_n369(x)
+ else
+ fun_l8_n444(x)
+ end
+end
+
+def fun_l7_n939(x)
+ if (x < 1)
+ fun_l8_n863(x)
+ else
+ fun_l8_n515(x)
+ end
+end
+
+def fun_l7_n940(x)
+ if (x < 1)
+ fun_l8_n573(x)
+ else
+ fun_l8_n356(x)
+ end
+end
+
+def fun_l7_n941(x)
+ if (x < 1)
+ fun_l8_n188(x)
+ else
+ fun_l8_n171(x)
+ end
+end
+
+def fun_l7_n942(x)
+ if (x < 1)
+ fun_l8_n483(x)
+ else
+ fun_l8_n556(x)
+ end
+end
+
+def fun_l7_n943(x)
+ if (x < 1)
+ fun_l8_n935(x)
+ else
+ fun_l8_n723(x)
+ end
+end
+
+def fun_l7_n944(x)
+ if (x < 1)
+ fun_l8_n451(x)
+ else
+ fun_l8_n227(x)
+ end
+end
+
+def fun_l7_n945(x)
+ if (x < 1)
+ fun_l8_n651(x)
+ else
+ fun_l8_n891(x)
+ end
+end
+
+def fun_l7_n946(x)
+ if (x < 1)
+ fun_l8_n225(x)
+ else
+ fun_l8_n178(x)
+ end
+end
+
+def fun_l7_n947(x)
+ if (x < 1)
+ fun_l8_n704(x)
+ else
+ fun_l8_n456(x)
+ end
+end
+
+def fun_l7_n948(x)
+ if (x < 1)
+ fun_l8_n995(x)
+ else
+ fun_l8_n816(x)
+ end
+end
+
+def fun_l7_n949(x)
+ if (x < 1)
+ fun_l8_n287(x)
+ else
+ fun_l8_n140(x)
+ end
+end
+
+def fun_l7_n950(x)
+ if (x < 1)
+ fun_l8_n302(x)
+ else
+ fun_l8_n582(x)
+ end
+end
+
+def fun_l7_n951(x)
+ if (x < 1)
+ fun_l8_n58(x)
+ else
+ fun_l8_n759(x)
+ end
+end
+
+def fun_l7_n952(x)
+ if (x < 1)
+ fun_l8_n54(x)
+ else
+ fun_l8_n812(x)
+ end
+end
+
+def fun_l7_n953(x)
+ if (x < 1)
+ fun_l8_n263(x)
+ else
+ fun_l8_n801(x)
+ end
+end
+
+def fun_l7_n954(x)
+ if (x < 1)
+ fun_l8_n507(x)
+ else
+ fun_l8_n781(x)
+ end
+end
+
+def fun_l7_n955(x)
+ if (x < 1)
+ fun_l8_n328(x)
+ else
+ fun_l8_n886(x)
+ end
+end
+
+def fun_l7_n956(x)
+ if (x < 1)
+ fun_l8_n653(x)
+ else
+ fun_l8_n847(x)
+ end
+end
+
+def fun_l7_n957(x)
+ if (x < 1)
+ fun_l8_n594(x)
+ else
+ fun_l8_n9(x)
+ end
+end
+
+def fun_l7_n958(x)
+ if (x < 1)
+ fun_l8_n818(x)
+ else
+ fun_l8_n912(x)
+ end
+end
+
+def fun_l7_n959(x)
+ if (x < 1)
+ fun_l8_n941(x)
+ else
+ fun_l8_n394(x)
+ end
+end
+
+def fun_l7_n960(x)
+ if (x < 1)
+ fun_l8_n151(x)
+ else
+ fun_l8_n426(x)
+ end
+end
+
+def fun_l7_n961(x)
+ if (x < 1)
+ fun_l8_n276(x)
+ else
+ fun_l8_n458(x)
+ end
+end
+
+def fun_l7_n962(x)
+ if (x < 1)
+ fun_l8_n310(x)
+ else
+ fun_l8_n472(x)
+ end
+end
+
+def fun_l7_n963(x)
+ if (x < 1)
+ fun_l8_n103(x)
+ else
+ fun_l8_n995(x)
+ end
+end
+
+def fun_l7_n964(x)
+ if (x < 1)
+ fun_l8_n808(x)
+ else
+ fun_l8_n41(x)
+ end
+end
+
+def fun_l7_n965(x)
+ if (x < 1)
+ fun_l8_n840(x)
+ else
+ fun_l8_n143(x)
+ end
+end
+
+def fun_l7_n966(x)
+ if (x < 1)
+ fun_l8_n855(x)
+ else
+ fun_l8_n38(x)
+ end
+end
+
+def fun_l7_n967(x)
+ if (x < 1)
+ fun_l8_n316(x)
+ else
+ fun_l8_n86(x)
+ end
+end
+
+def fun_l7_n968(x)
+ if (x < 1)
+ fun_l8_n856(x)
+ else
+ fun_l8_n182(x)
+ end
+end
+
+def fun_l7_n969(x)
+ if (x < 1)
+ fun_l8_n436(x)
+ else
+ fun_l8_n460(x)
+ end
+end
+
+def fun_l7_n970(x)
+ if (x < 1)
+ fun_l8_n187(x)
+ else
+ fun_l8_n90(x)
+ end
+end
+
+def fun_l7_n971(x)
+ if (x < 1)
+ fun_l8_n689(x)
+ else
+ fun_l8_n921(x)
+ end
+end
+
+def fun_l7_n972(x)
+ if (x < 1)
+ fun_l8_n207(x)
+ else
+ fun_l8_n79(x)
+ end
+end
+
+def fun_l7_n973(x)
+ if (x < 1)
+ fun_l8_n137(x)
+ else
+ fun_l8_n17(x)
+ end
+end
+
+def fun_l7_n974(x)
+ if (x < 1)
+ fun_l8_n813(x)
+ else
+ fun_l8_n944(x)
+ end
+end
+
+def fun_l7_n975(x)
+ if (x < 1)
+ fun_l8_n589(x)
+ else
+ fun_l8_n416(x)
+ end
+end
+
+def fun_l7_n976(x)
+ if (x < 1)
+ fun_l8_n763(x)
+ else
+ fun_l8_n289(x)
+ end
+end
+
+def fun_l7_n977(x)
+ if (x < 1)
+ fun_l8_n118(x)
+ else
+ fun_l8_n998(x)
+ end
+end
+
+def fun_l7_n978(x)
+ if (x < 1)
+ fun_l8_n382(x)
+ else
+ fun_l8_n216(x)
+ end
+end
+
+def fun_l7_n979(x)
+ if (x < 1)
+ fun_l8_n304(x)
+ else
+ fun_l8_n586(x)
+ end
+end
+
+def fun_l7_n980(x)
+ if (x < 1)
+ fun_l8_n693(x)
+ else
+ fun_l8_n118(x)
+ end
+end
+
+def fun_l7_n981(x)
+ if (x < 1)
+ fun_l8_n669(x)
+ else
+ fun_l8_n796(x)
+ end
+end
+
+def fun_l7_n982(x)
+ if (x < 1)
+ fun_l8_n187(x)
+ else
+ fun_l8_n143(x)
+ end
+end
+
+def fun_l7_n983(x)
+ if (x < 1)
+ fun_l8_n848(x)
+ else
+ fun_l8_n172(x)
+ end
+end
+
+def fun_l7_n984(x)
+ if (x < 1)
+ fun_l8_n782(x)
+ else
+ fun_l8_n670(x)
+ end
+end
+
+def fun_l7_n985(x)
+ if (x < 1)
+ fun_l8_n275(x)
+ else
+ fun_l8_n520(x)
+ end
+end
+
+def fun_l7_n986(x)
+ if (x < 1)
+ fun_l8_n841(x)
+ else
+ fun_l8_n446(x)
+ end
+end
+
+def fun_l7_n987(x)
+ if (x < 1)
+ fun_l8_n712(x)
+ else
+ fun_l8_n976(x)
+ end
+end
+
+def fun_l7_n988(x)
+ if (x < 1)
+ fun_l8_n115(x)
+ else
+ fun_l8_n222(x)
+ end
+end
+
+def fun_l7_n989(x)
+ if (x < 1)
+ fun_l8_n23(x)
+ else
+ fun_l8_n665(x)
+ end
+end
+
+def fun_l7_n990(x)
+ if (x < 1)
+ fun_l8_n416(x)
+ else
+ fun_l8_n455(x)
+ end
+end
+
+def fun_l7_n991(x)
+ if (x < 1)
+ fun_l8_n556(x)
+ else
+ fun_l8_n971(x)
+ end
+end
+
+def fun_l7_n992(x)
+ if (x < 1)
+ fun_l8_n726(x)
+ else
+ fun_l8_n312(x)
+ end
+end
+
+def fun_l7_n993(x)
+ if (x < 1)
+ fun_l8_n354(x)
+ else
+ fun_l8_n860(x)
+ end
+end
+
+def fun_l7_n994(x)
+ if (x < 1)
+ fun_l8_n62(x)
+ else
+ fun_l8_n290(x)
+ end
+end
+
+def fun_l7_n995(x)
+ if (x < 1)
+ fun_l8_n695(x)
+ else
+ fun_l8_n881(x)
+ end
+end
+
+def fun_l7_n996(x)
+ if (x < 1)
+ fun_l8_n938(x)
+ else
+ fun_l8_n566(x)
+ end
+end
+
+def fun_l7_n997(x)
+ if (x < 1)
+ fun_l8_n379(x)
+ else
+ fun_l8_n731(x)
+ end
+end
+
+def fun_l7_n998(x)
+ if (x < 1)
+ fun_l8_n320(x)
+ else
+ fun_l8_n339(x)
+ end
+end
+
+def fun_l7_n999(x)
+ if (x < 1)
+ fun_l8_n219(x)
+ else
+ fun_l8_n320(x)
+ end
+end
+
+def fun_l8_n0(x)
+ if (x < 1)
+ fun_l9_n590(x)
+ else
+ fun_l9_n825(x)
+ end
+end
+
+def fun_l8_n1(x)
+ if (x < 1)
+ fun_l9_n301(x)
+ else
+ fun_l9_n457(x)
+ end
+end
+
+def fun_l8_n2(x)
+ if (x < 1)
+ fun_l9_n637(x)
+ else
+ fun_l9_n909(x)
+ end
+end
+
+def fun_l8_n3(x)
+ if (x < 1)
+ fun_l9_n485(x)
+ else
+ fun_l9_n603(x)
+ end
+end
+
+def fun_l8_n4(x)
+ if (x < 1)
+ fun_l9_n465(x)
+ else
+ fun_l9_n39(x)
+ end
+end
+
+def fun_l8_n5(x)
+ if (x < 1)
+ fun_l9_n861(x)
+ else
+ fun_l9_n153(x)
+ end
+end
+
+def fun_l8_n6(x)
+ if (x < 1)
+ fun_l9_n130(x)
+ else
+ fun_l9_n401(x)
+ end
+end
+
+def fun_l8_n7(x)
+ if (x < 1)
+ fun_l9_n726(x)
+ else
+ fun_l9_n248(x)
+ end
+end
+
+def fun_l8_n8(x)
+ if (x < 1)
+ fun_l9_n607(x)
+ else
+ fun_l9_n681(x)
+ end
+end
+
+def fun_l8_n9(x)
+ if (x < 1)
+ fun_l9_n862(x)
+ else
+ fun_l9_n8(x)
+ end
+end
+
+def fun_l8_n10(x)
+ if (x < 1)
+ fun_l9_n607(x)
+ else
+ fun_l9_n373(x)
+ end
+end
+
+def fun_l8_n11(x)
+ if (x < 1)
+ fun_l9_n692(x)
+ else
+ fun_l9_n31(x)
+ end
+end
+
+def fun_l8_n12(x)
+ if (x < 1)
+ fun_l9_n412(x)
+ else
+ fun_l9_n576(x)
+ end
+end
+
+def fun_l8_n13(x)
+ if (x < 1)
+ fun_l9_n582(x)
+ else
+ fun_l9_n281(x)
+ end
+end
+
+def fun_l8_n14(x)
+ if (x < 1)
+ fun_l9_n986(x)
+ else
+ fun_l9_n99(x)
+ end
+end
+
+def fun_l8_n15(x)
+ if (x < 1)
+ fun_l9_n807(x)
+ else
+ fun_l9_n807(x)
+ end
+end
+
+def fun_l8_n16(x)
+ if (x < 1)
+ fun_l9_n635(x)
+ else
+ fun_l9_n51(x)
+ end
+end
+
+def fun_l8_n17(x)
+ if (x < 1)
+ fun_l9_n193(x)
+ else
+ fun_l9_n673(x)
+ end
+end
+
+def fun_l8_n18(x)
+ if (x < 1)
+ fun_l9_n897(x)
+ else
+ fun_l9_n881(x)
+ end
+end
+
+def fun_l8_n19(x)
+ if (x < 1)
+ fun_l9_n288(x)
+ else
+ fun_l9_n337(x)
+ end
+end
+
+def fun_l8_n20(x)
+ if (x < 1)
+ fun_l9_n754(x)
+ else
+ fun_l9_n581(x)
+ end
+end
+
+def fun_l8_n21(x)
+ if (x < 1)
+ fun_l9_n265(x)
+ else
+ fun_l9_n205(x)
+ end
+end
+
+def fun_l8_n22(x)
+ if (x < 1)
+ fun_l9_n784(x)
+ else
+ fun_l9_n428(x)
+ end
+end
+
+def fun_l8_n23(x)
+ if (x < 1)
+ fun_l9_n821(x)
+ else
+ fun_l9_n930(x)
+ end
+end
+
+def fun_l8_n24(x)
+ if (x < 1)
+ fun_l9_n237(x)
+ else
+ fun_l9_n681(x)
+ end
+end
+
+def fun_l8_n25(x)
+ if (x < 1)
+ fun_l9_n32(x)
+ else
+ fun_l9_n405(x)
+ end
+end
+
+def fun_l8_n26(x)
+ if (x < 1)
+ fun_l9_n370(x)
+ else
+ fun_l9_n802(x)
+ end
+end
+
+def fun_l8_n27(x)
+ if (x < 1)
+ fun_l9_n4(x)
+ else
+ fun_l9_n288(x)
+ end
+end
+
+def fun_l8_n28(x)
+ if (x < 1)
+ fun_l9_n9(x)
+ else
+ fun_l9_n180(x)
+ end
+end
+
+def fun_l8_n29(x)
+ if (x < 1)
+ fun_l9_n585(x)
+ else
+ fun_l9_n396(x)
+ end
+end
+
+def fun_l8_n30(x)
+ if (x < 1)
+ fun_l9_n954(x)
+ else
+ fun_l9_n603(x)
+ end
+end
+
+def fun_l8_n31(x)
+ if (x < 1)
+ fun_l9_n808(x)
+ else
+ fun_l9_n770(x)
+ end
+end
+
+def fun_l8_n32(x)
+ if (x < 1)
+ fun_l9_n442(x)
+ else
+ fun_l9_n243(x)
+ end
+end
+
+def fun_l8_n33(x)
+ if (x < 1)
+ fun_l9_n139(x)
+ else
+ fun_l9_n93(x)
+ end
+end
+
+def fun_l8_n34(x)
+ if (x < 1)
+ fun_l9_n270(x)
+ else
+ fun_l9_n333(x)
+ end
+end
+
+def fun_l8_n35(x)
+ if (x < 1)
+ fun_l9_n542(x)
+ else
+ fun_l9_n38(x)
+ end
+end
+
+def fun_l8_n36(x)
+ if (x < 1)
+ fun_l9_n892(x)
+ else
+ fun_l9_n517(x)
+ end
+end
+
+def fun_l8_n37(x)
+ if (x < 1)
+ fun_l9_n599(x)
+ else
+ fun_l9_n692(x)
+ end
+end
+
+def fun_l8_n38(x)
+ if (x < 1)
+ fun_l9_n0(x)
+ else
+ fun_l9_n879(x)
+ end
+end
+
+def fun_l8_n39(x)
+ if (x < 1)
+ fun_l9_n835(x)
+ else
+ fun_l9_n134(x)
+ end
+end
+
+def fun_l8_n40(x)
+ if (x < 1)
+ fun_l9_n151(x)
+ else
+ fun_l9_n546(x)
+ end
+end
+
+def fun_l8_n41(x)
+ if (x < 1)
+ fun_l9_n214(x)
+ else
+ fun_l9_n652(x)
+ end
+end
+
+def fun_l8_n42(x)
+ if (x < 1)
+ fun_l9_n239(x)
+ else
+ fun_l9_n123(x)
+ end
+end
+
+def fun_l8_n43(x)
+ if (x < 1)
+ fun_l9_n763(x)
+ else
+ fun_l9_n953(x)
+ end
+end
+
+def fun_l8_n44(x)
+ if (x < 1)
+ fun_l9_n449(x)
+ else
+ fun_l9_n343(x)
+ end
+end
+
+def fun_l8_n45(x)
+ if (x < 1)
+ fun_l9_n727(x)
+ else
+ fun_l9_n47(x)
+ end
+end
+
+def fun_l8_n46(x)
+ if (x < 1)
+ fun_l9_n684(x)
+ else
+ fun_l9_n711(x)
+ end
+end
+
+def fun_l8_n47(x)
+ if (x < 1)
+ fun_l9_n895(x)
+ else
+ fun_l9_n864(x)
+ end
+end
+
+def fun_l8_n48(x)
+ if (x < 1)
+ fun_l9_n544(x)
+ else
+ fun_l9_n358(x)
+ end
+end
+
+def fun_l8_n49(x)
+ if (x < 1)
+ fun_l9_n829(x)
+ else
+ fun_l9_n411(x)
+ end
+end
+
+def fun_l8_n50(x)
+ if (x < 1)
+ fun_l9_n673(x)
+ else
+ fun_l9_n640(x)
+ end
+end
+
+def fun_l8_n51(x)
+ if (x < 1)
+ fun_l9_n9(x)
+ else
+ fun_l9_n690(x)
+ end
+end
+
+def fun_l8_n52(x)
+ if (x < 1)
+ fun_l9_n293(x)
+ else
+ fun_l9_n889(x)
+ end
+end
+
+def fun_l8_n53(x)
+ if (x < 1)
+ fun_l9_n141(x)
+ else
+ fun_l9_n554(x)
+ end
+end
+
+def fun_l8_n54(x)
+ if (x < 1)
+ fun_l9_n982(x)
+ else
+ fun_l9_n955(x)
+ end
+end
+
+def fun_l8_n55(x)
+ if (x < 1)
+ fun_l9_n382(x)
+ else
+ fun_l9_n897(x)
+ end
+end
+
+def fun_l8_n56(x)
+ if (x < 1)
+ fun_l9_n332(x)
+ else
+ fun_l9_n550(x)
+ end
+end
+
+def fun_l8_n57(x)
+ if (x < 1)
+ fun_l9_n46(x)
+ else
+ fun_l9_n569(x)
+ end
+end
+
+def fun_l8_n58(x)
+ if (x < 1)
+ fun_l9_n556(x)
+ else
+ fun_l9_n0(x)
+ end
+end
+
+def fun_l8_n59(x)
+ if (x < 1)
+ fun_l9_n931(x)
+ else
+ fun_l9_n995(x)
+ end
+end
+
+def fun_l8_n60(x)
+ if (x < 1)
+ fun_l9_n470(x)
+ else
+ fun_l9_n679(x)
+ end
+end
+
+def fun_l8_n61(x)
+ if (x < 1)
+ fun_l9_n269(x)
+ else
+ fun_l9_n742(x)
+ end
+end
+
+def fun_l8_n62(x)
+ if (x < 1)
+ fun_l9_n542(x)
+ else
+ fun_l9_n556(x)
+ end
+end
+
+def fun_l8_n63(x)
+ if (x < 1)
+ fun_l9_n713(x)
+ else
+ fun_l9_n249(x)
+ end
+end
+
+def fun_l8_n64(x)
+ if (x < 1)
+ fun_l9_n842(x)
+ else
+ fun_l9_n566(x)
+ end
+end
+
+def fun_l8_n65(x)
+ if (x < 1)
+ fun_l9_n708(x)
+ else
+ fun_l9_n3(x)
+ end
+end
+
+def fun_l8_n66(x)
+ if (x < 1)
+ fun_l9_n845(x)
+ else
+ fun_l9_n926(x)
+ end
+end
+
+def fun_l8_n67(x)
+ if (x < 1)
+ fun_l9_n823(x)
+ else
+ fun_l9_n326(x)
+ end
+end
+
+def fun_l8_n68(x)
+ if (x < 1)
+ fun_l9_n457(x)
+ else
+ fun_l9_n888(x)
+ end
+end
+
+def fun_l8_n69(x)
+ if (x < 1)
+ fun_l9_n454(x)
+ else
+ fun_l9_n601(x)
+ end
+end
+
+def fun_l8_n70(x)
+ if (x < 1)
+ fun_l9_n636(x)
+ else
+ fun_l9_n482(x)
+ end
+end
+
+def fun_l8_n71(x)
+ if (x < 1)
+ fun_l9_n778(x)
+ else
+ fun_l9_n678(x)
+ end
+end
+
+def fun_l8_n72(x)
+ if (x < 1)
+ fun_l9_n715(x)
+ else
+ fun_l9_n851(x)
+ end
+end
+
+def fun_l8_n73(x)
+ if (x < 1)
+ fun_l9_n330(x)
+ else
+ fun_l9_n84(x)
+ end
+end
+
+def fun_l8_n74(x)
+ if (x < 1)
+ fun_l9_n524(x)
+ else
+ fun_l9_n109(x)
+ end
+end
+
+def fun_l8_n75(x)
+ if (x < 1)
+ fun_l9_n610(x)
+ else
+ fun_l9_n723(x)
+ end
+end
+
+def fun_l8_n76(x)
+ if (x < 1)
+ fun_l9_n964(x)
+ else
+ fun_l9_n893(x)
+ end
+end
+
+def fun_l8_n77(x)
+ if (x < 1)
+ fun_l9_n653(x)
+ else
+ fun_l9_n451(x)
+ end
+end
+
+def fun_l8_n78(x)
+ if (x < 1)
+ fun_l9_n582(x)
+ else
+ fun_l9_n29(x)
+ end
+end
+
+def fun_l8_n79(x)
+ if (x < 1)
+ fun_l9_n219(x)
+ else
+ fun_l9_n25(x)
+ end
+end
+
+def fun_l8_n80(x)
+ if (x < 1)
+ fun_l9_n471(x)
+ else
+ fun_l9_n217(x)
+ end
+end
+
+def fun_l8_n81(x)
+ if (x < 1)
+ fun_l9_n339(x)
+ else
+ fun_l9_n365(x)
+ end
+end
+
+def fun_l8_n82(x)
+ if (x < 1)
+ fun_l9_n808(x)
+ else
+ fun_l9_n501(x)
+ end
+end
+
+def fun_l8_n83(x)
+ if (x < 1)
+ fun_l9_n23(x)
+ else
+ fun_l9_n252(x)
+ end
+end
+
+def fun_l8_n84(x)
+ if (x < 1)
+ fun_l9_n628(x)
+ else
+ fun_l9_n514(x)
+ end
+end
+
+def fun_l8_n85(x)
+ if (x < 1)
+ fun_l9_n22(x)
+ else
+ fun_l9_n465(x)
+ end
+end
+
+def fun_l8_n86(x)
+ if (x < 1)
+ fun_l9_n553(x)
+ else
+ fun_l9_n788(x)
+ end
+end
+
+def fun_l8_n87(x)
+ if (x < 1)
+ fun_l9_n852(x)
+ else
+ fun_l9_n945(x)
+ end
+end
+
+def fun_l8_n88(x)
+ if (x < 1)
+ fun_l9_n886(x)
+ else
+ fun_l9_n438(x)
+ end
+end
+
+def fun_l8_n89(x)
+ if (x < 1)
+ fun_l9_n821(x)
+ else
+ fun_l9_n697(x)
+ end
+end
+
+def fun_l8_n90(x)
+ if (x < 1)
+ fun_l9_n388(x)
+ else
+ fun_l9_n39(x)
+ end
+end
+
+def fun_l8_n91(x)
+ if (x < 1)
+ fun_l9_n135(x)
+ else
+ fun_l9_n736(x)
+ end
+end
+
+def fun_l8_n92(x)
+ if (x < 1)
+ fun_l9_n225(x)
+ else
+ fun_l9_n125(x)
+ end
+end
+
+def fun_l8_n93(x)
+ if (x < 1)
+ fun_l9_n201(x)
+ else
+ fun_l9_n320(x)
+ end
+end
+
+def fun_l8_n94(x)
+ if (x < 1)
+ fun_l9_n30(x)
+ else
+ fun_l9_n938(x)
+ end
+end
+
+def fun_l8_n95(x)
+ if (x < 1)
+ fun_l9_n987(x)
+ else
+ fun_l9_n141(x)
+ end
+end
+
+def fun_l8_n96(x)
+ if (x < 1)
+ fun_l9_n968(x)
+ else
+ fun_l9_n999(x)
+ end
+end
+
+def fun_l8_n97(x)
+ if (x < 1)
+ fun_l9_n852(x)
+ else
+ fun_l9_n927(x)
+ end
+end
+
+def fun_l8_n98(x)
+ if (x < 1)
+ fun_l9_n720(x)
+ else
+ fun_l9_n717(x)
+ end
+end
+
+def fun_l8_n99(x)
+ if (x < 1)
+ fun_l9_n907(x)
+ else
+ fun_l9_n8(x)
+ end
+end
+
+def fun_l8_n100(x)
+ if (x < 1)
+ fun_l9_n973(x)
+ else
+ fun_l9_n379(x)
+ end
+end
+
+def fun_l8_n101(x)
+ if (x < 1)
+ fun_l9_n370(x)
+ else
+ fun_l9_n530(x)
+ end
+end
+
+def fun_l8_n102(x)
+ if (x < 1)
+ fun_l9_n411(x)
+ else
+ fun_l9_n560(x)
+ end
+end
+
+def fun_l8_n103(x)
+ if (x < 1)
+ fun_l9_n54(x)
+ else
+ fun_l9_n813(x)
+ end
+end
+
+def fun_l8_n104(x)
+ if (x < 1)
+ fun_l9_n217(x)
+ else
+ fun_l9_n455(x)
+ end
+end
+
+def fun_l8_n105(x)
+ if (x < 1)
+ fun_l9_n667(x)
+ else
+ fun_l9_n92(x)
+ end
+end
+
+def fun_l8_n106(x)
+ if (x < 1)
+ fun_l9_n933(x)
+ else
+ fun_l9_n727(x)
+ end
+end
+
+def fun_l8_n107(x)
+ if (x < 1)
+ fun_l9_n578(x)
+ else
+ fun_l9_n350(x)
+ end
+end
+
+def fun_l8_n108(x)
+ if (x < 1)
+ fun_l9_n75(x)
+ else
+ fun_l9_n439(x)
+ end
+end
+
+def fun_l8_n109(x)
+ if (x < 1)
+ fun_l9_n21(x)
+ else
+ fun_l9_n112(x)
+ end
+end
+
+def fun_l8_n110(x)
+ if (x < 1)
+ fun_l9_n57(x)
+ else
+ fun_l9_n553(x)
+ end
+end
+
+def fun_l8_n111(x)
+ if (x < 1)
+ fun_l9_n347(x)
+ else
+ fun_l9_n285(x)
+ end
+end
+
+def fun_l8_n112(x)
+ if (x < 1)
+ fun_l9_n837(x)
+ else
+ fun_l9_n793(x)
+ end
+end
+
+def fun_l8_n113(x)
+ if (x < 1)
+ fun_l9_n517(x)
+ else
+ fun_l9_n668(x)
+ end
+end
+
+def fun_l8_n114(x)
+ if (x < 1)
+ fun_l9_n486(x)
+ else
+ fun_l9_n872(x)
+ end
+end
+
+def fun_l8_n115(x)
+ if (x < 1)
+ fun_l9_n454(x)
+ else
+ fun_l9_n507(x)
+ end
+end
+
+def fun_l8_n116(x)
+ if (x < 1)
+ fun_l9_n110(x)
+ else
+ fun_l9_n263(x)
+ end
+end
+
+def fun_l8_n117(x)
+ if (x < 1)
+ fun_l9_n878(x)
+ else
+ fun_l9_n21(x)
+ end
+end
+
+def fun_l8_n118(x)
+ if (x < 1)
+ fun_l9_n564(x)
+ else
+ fun_l9_n409(x)
+ end
+end
+
+def fun_l8_n119(x)
+ if (x < 1)
+ fun_l9_n511(x)
+ else
+ fun_l9_n800(x)
+ end
+end
+
+def fun_l8_n120(x)
+ if (x < 1)
+ fun_l9_n501(x)
+ else
+ fun_l9_n305(x)
+ end
+end
+
+def fun_l8_n121(x)
+ if (x < 1)
+ fun_l9_n630(x)
+ else
+ fun_l9_n319(x)
+ end
+end
+
+def fun_l8_n122(x)
+ if (x < 1)
+ fun_l9_n271(x)
+ else
+ fun_l9_n639(x)
+ end
+end
+
+def fun_l8_n123(x)
+ if (x < 1)
+ fun_l9_n812(x)
+ else
+ fun_l9_n735(x)
+ end
+end
+
+def fun_l8_n124(x)
+ if (x < 1)
+ fun_l9_n828(x)
+ else
+ fun_l9_n203(x)
+ end
+end
+
+def fun_l8_n125(x)
+ if (x < 1)
+ fun_l9_n228(x)
+ else
+ fun_l9_n536(x)
+ end
+end
+
+def fun_l8_n126(x)
+ if (x < 1)
+ fun_l9_n426(x)
+ else
+ fun_l9_n325(x)
+ end
+end
+
+def fun_l8_n127(x)
+ if (x < 1)
+ fun_l9_n19(x)
+ else
+ fun_l9_n324(x)
+ end
+end
+
+def fun_l8_n128(x)
+ if (x < 1)
+ fun_l9_n419(x)
+ else
+ fun_l9_n425(x)
+ end
+end
+
+def fun_l8_n129(x)
+ if (x < 1)
+ fun_l9_n384(x)
+ else
+ fun_l9_n550(x)
+ end
+end
+
+def fun_l8_n130(x)
+ if (x < 1)
+ fun_l9_n956(x)
+ else
+ fun_l9_n270(x)
+ end
+end
+
+def fun_l8_n131(x)
+ if (x < 1)
+ fun_l9_n20(x)
+ else
+ fun_l9_n949(x)
+ end
+end
+
+def fun_l8_n132(x)
+ if (x < 1)
+ fun_l9_n317(x)
+ else
+ fun_l9_n909(x)
+ end
+end
+
+def fun_l8_n133(x)
+ if (x < 1)
+ fun_l9_n131(x)
+ else
+ fun_l9_n108(x)
+ end
+end
+
+def fun_l8_n134(x)
+ if (x < 1)
+ fun_l9_n648(x)
+ else
+ fun_l9_n473(x)
+ end
+end
+
+def fun_l8_n135(x)
+ if (x < 1)
+ fun_l9_n837(x)
+ else
+ fun_l9_n89(x)
+ end
+end
+
+def fun_l8_n136(x)
+ if (x < 1)
+ fun_l9_n637(x)
+ else
+ fun_l9_n394(x)
+ end
+end
+
+def fun_l8_n137(x)
+ if (x < 1)
+ fun_l9_n608(x)
+ else
+ fun_l9_n283(x)
+ end
+end
+
+def fun_l8_n138(x)
+ if (x < 1)
+ fun_l9_n728(x)
+ else
+ fun_l9_n586(x)
+ end
+end
+
+def fun_l8_n139(x)
+ if (x < 1)
+ fun_l9_n689(x)
+ else
+ fun_l9_n700(x)
+ end
+end
+
+def fun_l8_n140(x)
+ if (x < 1)
+ fun_l9_n532(x)
+ else
+ fun_l9_n98(x)
+ end
+end
+
+def fun_l8_n141(x)
+ if (x < 1)
+ fun_l9_n696(x)
+ else
+ fun_l9_n951(x)
+ end
+end
+
+def fun_l8_n142(x)
+ if (x < 1)
+ fun_l9_n28(x)
+ else
+ fun_l9_n714(x)
+ end
+end
+
+def fun_l8_n143(x)
+ if (x < 1)
+ fun_l9_n438(x)
+ else
+ fun_l9_n934(x)
+ end
+end
+
+def fun_l8_n144(x)
+ if (x < 1)
+ fun_l9_n738(x)
+ else
+ fun_l9_n473(x)
+ end
+end
+
+def fun_l8_n145(x)
+ if (x < 1)
+ fun_l9_n201(x)
+ else
+ fun_l9_n634(x)
+ end
+end
+
+def fun_l8_n146(x)
+ if (x < 1)
+ fun_l9_n391(x)
+ else
+ fun_l9_n703(x)
+ end
+end
+
+def fun_l8_n147(x)
+ if (x < 1)
+ fun_l9_n740(x)
+ else
+ fun_l9_n265(x)
+ end
+end
+
+def fun_l8_n148(x)
+ if (x < 1)
+ fun_l9_n303(x)
+ else
+ fun_l9_n162(x)
+ end
+end
+
+def fun_l8_n149(x)
+ if (x < 1)
+ fun_l9_n518(x)
+ else
+ fun_l9_n899(x)
+ end
+end
+
+def fun_l8_n150(x)
+ if (x < 1)
+ fun_l9_n847(x)
+ else
+ fun_l9_n840(x)
+ end
+end
+
+def fun_l8_n151(x)
+ if (x < 1)
+ fun_l9_n567(x)
+ else
+ fun_l9_n29(x)
+ end
+end
+
+def fun_l8_n152(x)
+ if (x < 1)
+ fun_l9_n426(x)
+ else
+ fun_l9_n578(x)
+ end
+end
+
+def fun_l8_n153(x)
+ if (x < 1)
+ fun_l9_n617(x)
+ else
+ fun_l9_n703(x)
+ end
+end
+
+def fun_l8_n154(x)
+ if (x < 1)
+ fun_l9_n673(x)
+ else
+ fun_l9_n663(x)
+ end
+end
+
+def fun_l8_n155(x)
+ if (x < 1)
+ fun_l9_n456(x)
+ else
+ fun_l9_n406(x)
+ end
+end
+
+def fun_l8_n156(x)
+ if (x < 1)
+ fun_l9_n297(x)
+ else
+ fun_l9_n444(x)
+ end
+end
+
+def fun_l8_n157(x)
+ if (x < 1)
+ fun_l9_n300(x)
+ else
+ fun_l9_n179(x)
+ end
+end
+
+def fun_l8_n158(x)
+ if (x < 1)
+ fun_l9_n646(x)
+ else
+ fun_l9_n9(x)
+ end
+end
+
+def fun_l8_n159(x)
+ if (x < 1)
+ fun_l9_n133(x)
+ else
+ fun_l9_n228(x)
+ end
+end
+
+def fun_l8_n160(x)
+ if (x < 1)
+ fun_l9_n814(x)
+ else
+ fun_l9_n775(x)
+ end
+end
+
+def fun_l8_n161(x)
+ if (x < 1)
+ fun_l9_n748(x)
+ else
+ fun_l9_n849(x)
+ end
+end
+
+def fun_l8_n162(x)
+ if (x < 1)
+ fun_l9_n209(x)
+ else
+ fun_l9_n273(x)
+ end
+end
+
+def fun_l8_n163(x)
+ if (x < 1)
+ fun_l9_n116(x)
+ else
+ fun_l9_n669(x)
+ end
+end
+
+def fun_l8_n164(x)
+ if (x < 1)
+ fun_l9_n714(x)
+ else
+ fun_l9_n621(x)
+ end
+end
+
+def fun_l8_n165(x)
+ if (x < 1)
+ fun_l9_n619(x)
+ else
+ fun_l9_n365(x)
+ end
+end
+
+def fun_l8_n166(x)
+ if (x < 1)
+ fun_l9_n756(x)
+ else
+ fun_l9_n745(x)
+ end
+end
+
+def fun_l8_n167(x)
+ if (x < 1)
+ fun_l9_n12(x)
+ else
+ fun_l9_n633(x)
+ end
+end
+
+def fun_l8_n168(x)
+ if (x < 1)
+ fun_l9_n619(x)
+ else
+ fun_l9_n31(x)
+ end
+end
+
+def fun_l8_n169(x)
+ if (x < 1)
+ fun_l9_n685(x)
+ else
+ fun_l9_n107(x)
+ end
+end
+
+def fun_l8_n170(x)
+ if (x < 1)
+ fun_l9_n636(x)
+ else
+ fun_l9_n280(x)
+ end
+end
+
+def fun_l8_n171(x)
+ if (x < 1)
+ fun_l9_n254(x)
+ else
+ fun_l9_n742(x)
+ end
+end
+
+def fun_l8_n172(x)
+ if (x < 1)
+ fun_l9_n829(x)
+ else
+ fun_l9_n632(x)
+ end
+end
+
+def fun_l8_n173(x)
+ if (x < 1)
+ fun_l9_n341(x)
+ else
+ fun_l9_n186(x)
+ end
+end
+
+def fun_l8_n174(x)
+ if (x < 1)
+ fun_l9_n903(x)
+ else
+ fun_l9_n93(x)
+ end
+end
+
+def fun_l8_n175(x)
+ if (x < 1)
+ fun_l9_n416(x)
+ else
+ fun_l9_n318(x)
+ end
+end
+
+def fun_l8_n176(x)
+ if (x < 1)
+ fun_l9_n893(x)
+ else
+ fun_l9_n438(x)
+ end
+end
+
+def fun_l8_n177(x)
+ if (x < 1)
+ fun_l9_n551(x)
+ else
+ fun_l9_n312(x)
+ end
+end
+
+def fun_l8_n178(x)
+ if (x < 1)
+ fun_l9_n582(x)
+ else
+ fun_l9_n926(x)
+ end
+end
+
+def fun_l8_n179(x)
+ if (x < 1)
+ fun_l9_n547(x)
+ else
+ fun_l9_n804(x)
+ end
+end
+
+def fun_l8_n180(x)
+ if (x < 1)
+ fun_l9_n589(x)
+ else
+ fun_l9_n246(x)
+ end
+end
+
+def fun_l8_n181(x)
+ if (x < 1)
+ fun_l9_n67(x)
+ else
+ fun_l9_n345(x)
+ end
+end
+
+def fun_l8_n182(x)
+ if (x < 1)
+ fun_l9_n958(x)
+ else
+ fun_l9_n923(x)
+ end
+end
+
+def fun_l8_n183(x)
+ if (x < 1)
+ fun_l9_n418(x)
+ else
+ fun_l9_n532(x)
+ end
+end
+
+def fun_l8_n184(x)
+ if (x < 1)
+ fun_l9_n372(x)
+ else
+ fun_l9_n808(x)
+ end
+end
+
+def fun_l8_n185(x)
+ if (x < 1)
+ fun_l9_n849(x)
+ else
+ fun_l9_n755(x)
+ end
+end
+
+def fun_l8_n186(x)
+ if (x < 1)
+ fun_l9_n544(x)
+ else
+ fun_l9_n73(x)
+ end
+end
+
+def fun_l8_n187(x)
+ if (x < 1)
+ fun_l9_n729(x)
+ else
+ fun_l9_n295(x)
+ end
+end
+
+def fun_l8_n188(x)
+ if (x < 1)
+ fun_l9_n782(x)
+ else
+ fun_l9_n117(x)
+ end
+end
+
+def fun_l8_n189(x)
+ if (x < 1)
+ fun_l9_n947(x)
+ else
+ fun_l9_n93(x)
+ end
+end
+
+def fun_l8_n190(x)
+ if (x < 1)
+ fun_l9_n989(x)
+ else
+ fun_l9_n444(x)
+ end
+end
+
+def fun_l8_n191(x)
+ if (x < 1)
+ fun_l9_n394(x)
+ else
+ fun_l9_n741(x)
+ end
+end
+
+def fun_l8_n192(x)
+ if (x < 1)
+ fun_l9_n582(x)
+ else
+ fun_l9_n333(x)
+ end
+end
+
+def fun_l8_n193(x)
+ if (x < 1)
+ fun_l9_n73(x)
+ else
+ fun_l9_n227(x)
+ end
+end
+
+def fun_l8_n194(x)
+ if (x < 1)
+ fun_l9_n946(x)
+ else
+ fun_l9_n354(x)
+ end
+end
+
+def fun_l8_n195(x)
+ if (x < 1)
+ fun_l9_n383(x)
+ else
+ fun_l9_n863(x)
+ end
+end
+
+def fun_l8_n196(x)
+ if (x < 1)
+ fun_l9_n441(x)
+ else
+ fun_l9_n947(x)
+ end
+end
+
+def fun_l8_n197(x)
+ if (x < 1)
+ fun_l9_n506(x)
+ else
+ fun_l9_n45(x)
+ end
+end
+
+def fun_l8_n198(x)
+ if (x < 1)
+ fun_l9_n890(x)
+ else
+ fun_l9_n341(x)
+ end
+end
+
+def fun_l8_n199(x)
+ if (x < 1)
+ fun_l9_n75(x)
+ else
+ fun_l9_n56(x)
+ end
+end
+
+def fun_l8_n200(x)
+ if (x < 1)
+ fun_l9_n386(x)
+ else
+ fun_l9_n280(x)
+ end
+end
+
+def fun_l8_n201(x)
+ if (x < 1)
+ fun_l9_n364(x)
+ else
+ fun_l9_n424(x)
+ end
+end
+
+def fun_l8_n202(x)
+ if (x < 1)
+ fun_l9_n923(x)
+ else
+ fun_l9_n326(x)
+ end
+end
+
+def fun_l8_n203(x)
+ if (x < 1)
+ fun_l9_n692(x)
+ else
+ fun_l9_n502(x)
+ end
+end
+
+def fun_l8_n204(x)
+ if (x < 1)
+ fun_l9_n508(x)
+ else
+ fun_l9_n561(x)
+ end
+end
+
+def fun_l8_n205(x)
+ if (x < 1)
+ fun_l9_n496(x)
+ else
+ fun_l9_n949(x)
+ end
+end
+
+def fun_l8_n206(x)
+ if (x < 1)
+ fun_l9_n852(x)
+ else
+ fun_l9_n447(x)
+ end
+end
+
+def fun_l8_n207(x)
+ if (x < 1)
+ fun_l9_n757(x)
+ else
+ fun_l9_n782(x)
+ end
+end
+
+def fun_l8_n208(x)
+ if (x < 1)
+ fun_l9_n505(x)
+ else
+ fun_l9_n32(x)
+ end
+end
+
+def fun_l8_n209(x)
+ if (x < 1)
+ fun_l9_n591(x)
+ else
+ fun_l9_n967(x)
+ end
+end
+
+def fun_l8_n210(x)
+ if (x < 1)
+ fun_l9_n537(x)
+ else
+ fun_l9_n380(x)
+ end
+end
+
+def fun_l8_n211(x)
+ if (x < 1)
+ fun_l9_n19(x)
+ else
+ fun_l9_n362(x)
+ end
+end
+
+def fun_l8_n212(x)
+ if (x < 1)
+ fun_l9_n759(x)
+ else
+ fun_l9_n835(x)
+ end
+end
+
+def fun_l8_n213(x)
+ if (x < 1)
+ fun_l9_n518(x)
+ else
+ fun_l9_n294(x)
+ end
+end
+
+def fun_l8_n214(x)
+ if (x < 1)
+ fun_l9_n921(x)
+ else
+ fun_l9_n687(x)
+ end
+end
+
+def fun_l8_n215(x)
+ if (x < 1)
+ fun_l9_n114(x)
+ else
+ fun_l9_n130(x)
+ end
+end
+
+def fun_l8_n216(x)
+ if (x < 1)
+ fun_l9_n829(x)
+ else
+ fun_l9_n8(x)
+ end
+end
+
+def fun_l8_n217(x)
+ if (x < 1)
+ fun_l9_n875(x)
+ else
+ fun_l9_n392(x)
+ end
+end
+
+def fun_l8_n218(x)
+ if (x < 1)
+ fun_l9_n13(x)
+ else
+ fun_l9_n907(x)
+ end
+end
+
+def fun_l8_n219(x)
+ if (x < 1)
+ fun_l9_n457(x)
+ else
+ fun_l9_n77(x)
+ end
+end
+
+def fun_l8_n220(x)
+ if (x < 1)
+ fun_l9_n404(x)
+ else
+ fun_l9_n12(x)
+ end
+end
+
+def fun_l8_n221(x)
+ if (x < 1)
+ fun_l9_n244(x)
+ else
+ fun_l9_n231(x)
+ end
+end
+
+def fun_l8_n222(x)
+ if (x < 1)
+ fun_l9_n205(x)
+ else
+ fun_l9_n458(x)
+ end
+end
+
+def fun_l8_n223(x)
+ if (x < 1)
+ fun_l9_n570(x)
+ else
+ fun_l9_n44(x)
+ end
+end
+
+def fun_l8_n224(x)
+ if (x < 1)
+ fun_l9_n651(x)
+ else
+ fun_l9_n836(x)
+ end
+end
+
+def fun_l8_n225(x)
+ if (x < 1)
+ fun_l9_n518(x)
+ else
+ fun_l9_n845(x)
+ end
+end
+
+def fun_l8_n226(x)
+ if (x < 1)
+ fun_l9_n92(x)
+ else
+ fun_l9_n637(x)
+ end
+end
+
+def fun_l8_n227(x)
+ if (x < 1)
+ fun_l9_n990(x)
+ else
+ fun_l9_n926(x)
+ end
+end
+
+def fun_l8_n228(x)
+ if (x < 1)
+ fun_l9_n465(x)
+ else
+ fun_l9_n764(x)
+ end
+end
+
+def fun_l8_n229(x)
+ if (x < 1)
+ fun_l9_n7(x)
+ else
+ fun_l9_n981(x)
+ end
+end
+
+def fun_l8_n230(x)
+ if (x < 1)
+ fun_l9_n250(x)
+ else
+ fun_l9_n690(x)
+ end
+end
+
+def fun_l8_n231(x)
+ if (x < 1)
+ fun_l9_n101(x)
+ else
+ fun_l9_n342(x)
+ end
+end
+
+def fun_l8_n232(x)
+ if (x < 1)
+ fun_l9_n659(x)
+ else
+ fun_l9_n216(x)
+ end
+end
+
+def fun_l8_n233(x)
+ if (x < 1)
+ fun_l9_n129(x)
+ else
+ fun_l9_n439(x)
+ end
+end
+
+def fun_l8_n234(x)
+ if (x < 1)
+ fun_l9_n616(x)
+ else
+ fun_l9_n700(x)
+ end
+end
+
+def fun_l8_n235(x)
+ if (x < 1)
+ fun_l9_n850(x)
+ else
+ fun_l9_n254(x)
+ end
+end
+
+def fun_l8_n236(x)
+ if (x < 1)
+ fun_l9_n186(x)
+ else
+ fun_l9_n592(x)
+ end
+end
+
+def fun_l8_n237(x)
+ if (x < 1)
+ fun_l9_n895(x)
+ else
+ fun_l9_n752(x)
+ end
+end
+
+def fun_l8_n238(x)
+ if (x < 1)
+ fun_l9_n496(x)
+ else
+ fun_l9_n784(x)
+ end
+end
+
+def fun_l8_n239(x)
+ if (x < 1)
+ fun_l9_n579(x)
+ else
+ fun_l9_n144(x)
+ end
+end
+
+def fun_l8_n240(x)
+ if (x < 1)
+ fun_l9_n805(x)
+ else
+ fun_l9_n851(x)
+ end
+end
+
+def fun_l8_n241(x)
+ if (x < 1)
+ fun_l9_n756(x)
+ else
+ fun_l9_n332(x)
+ end
+end
+
+def fun_l8_n242(x)
+ if (x < 1)
+ fun_l9_n898(x)
+ else
+ fun_l9_n285(x)
+ end
+end
+
+def fun_l8_n243(x)
+ if (x < 1)
+ fun_l9_n729(x)
+ else
+ fun_l9_n149(x)
+ end
+end
+
+def fun_l8_n244(x)
+ if (x < 1)
+ fun_l9_n881(x)
+ else
+ fun_l9_n356(x)
+ end
+end
+
+def fun_l8_n245(x)
+ if (x < 1)
+ fun_l9_n351(x)
+ else
+ fun_l9_n805(x)
+ end
+end
+
+def fun_l8_n246(x)
+ if (x < 1)
+ fun_l9_n493(x)
+ else
+ fun_l9_n200(x)
+ end
+end
+
+def fun_l8_n247(x)
+ if (x < 1)
+ fun_l9_n706(x)
+ else
+ fun_l9_n917(x)
+ end
+end
+
+def fun_l8_n248(x)
+ if (x < 1)
+ fun_l9_n370(x)
+ else
+ fun_l9_n755(x)
+ end
+end
+
+def fun_l8_n249(x)
+ if (x < 1)
+ fun_l9_n859(x)
+ else
+ fun_l9_n496(x)
+ end
+end
+
+def fun_l8_n250(x)
+ if (x < 1)
+ fun_l9_n443(x)
+ else
+ fun_l9_n113(x)
+ end
+end
+
+def fun_l8_n251(x)
+ if (x < 1)
+ fun_l9_n631(x)
+ else
+ fun_l9_n195(x)
+ end
+end
+
+def fun_l8_n252(x)
+ if (x < 1)
+ fun_l9_n357(x)
+ else
+ fun_l9_n694(x)
+ end
+end
+
+def fun_l8_n253(x)
+ if (x < 1)
+ fun_l9_n387(x)
+ else
+ fun_l9_n820(x)
+ end
+end
+
+def fun_l8_n254(x)
+ if (x < 1)
+ fun_l9_n909(x)
+ else
+ fun_l9_n559(x)
+ end
+end
+
+def fun_l8_n255(x)
+ if (x < 1)
+ fun_l9_n474(x)
+ else
+ fun_l9_n864(x)
+ end
+end
+
+def fun_l8_n256(x)
+ if (x < 1)
+ fun_l9_n914(x)
+ else
+ fun_l9_n672(x)
+ end
+end
+
+def fun_l8_n257(x)
+ if (x < 1)
+ fun_l9_n915(x)
+ else
+ fun_l9_n177(x)
+ end
+end
+
+def fun_l8_n258(x)
+ if (x < 1)
+ fun_l9_n917(x)
+ else
+ fun_l9_n210(x)
+ end
+end
+
+def fun_l8_n259(x)
+ if (x < 1)
+ fun_l9_n851(x)
+ else
+ fun_l9_n342(x)
+ end
+end
+
+def fun_l8_n260(x)
+ if (x < 1)
+ fun_l9_n205(x)
+ else
+ fun_l9_n412(x)
+ end
+end
+
+def fun_l8_n261(x)
+ if (x < 1)
+ fun_l9_n110(x)
+ else
+ fun_l9_n62(x)
+ end
+end
+
+def fun_l8_n262(x)
+ if (x < 1)
+ fun_l9_n706(x)
+ else
+ fun_l9_n903(x)
+ end
+end
+
+def fun_l8_n263(x)
+ if (x < 1)
+ fun_l9_n403(x)
+ else
+ fun_l9_n994(x)
+ end
+end
+
+def fun_l8_n264(x)
+ if (x < 1)
+ fun_l9_n631(x)
+ else
+ fun_l9_n126(x)
+ end
+end
+
+def fun_l8_n265(x)
+ if (x < 1)
+ fun_l9_n216(x)
+ else
+ fun_l9_n573(x)
+ end
+end
+
+def fun_l8_n266(x)
+ if (x < 1)
+ fun_l9_n174(x)
+ else
+ fun_l9_n495(x)
+ end
+end
+
+def fun_l8_n267(x)
+ if (x < 1)
+ fun_l9_n802(x)
+ else
+ fun_l9_n881(x)
+ end
+end
+
+def fun_l8_n268(x)
+ if (x < 1)
+ fun_l9_n148(x)
+ else
+ fun_l9_n399(x)
+ end
+end
+
+def fun_l8_n269(x)
+ if (x < 1)
+ fun_l9_n675(x)
+ else
+ fun_l9_n596(x)
+ end
+end
+
+def fun_l8_n270(x)
+ if (x < 1)
+ fun_l9_n306(x)
+ else
+ fun_l9_n413(x)
+ end
+end
+
+def fun_l8_n271(x)
+ if (x < 1)
+ fun_l9_n205(x)
+ else
+ fun_l9_n569(x)
+ end
+end
+
+def fun_l8_n272(x)
+ if (x < 1)
+ fun_l9_n6(x)
+ else
+ fun_l9_n531(x)
+ end
+end
+
+def fun_l8_n273(x)
+ if (x < 1)
+ fun_l9_n516(x)
+ else
+ fun_l9_n172(x)
+ end
+end
+
+def fun_l8_n274(x)
+ if (x < 1)
+ fun_l9_n641(x)
+ else
+ fun_l9_n85(x)
+ end
+end
+
+def fun_l8_n275(x)
+ if (x < 1)
+ fun_l9_n258(x)
+ else
+ fun_l9_n773(x)
+ end
+end
+
+def fun_l8_n276(x)
+ if (x < 1)
+ fun_l9_n233(x)
+ else
+ fun_l9_n222(x)
+ end
+end
+
+def fun_l8_n277(x)
+ if (x < 1)
+ fun_l9_n615(x)
+ else
+ fun_l9_n598(x)
+ end
+end
+
+def fun_l8_n278(x)
+ if (x < 1)
+ fun_l9_n126(x)
+ else
+ fun_l9_n270(x)
+ end
+end
+
+def fun_l8_n279(x)
+ if (x < 1)
+ fun_l9_n186(x)
+ else
+ fun_l9_n78(x)
+ end
+end
+
+def fun_l8_n280(x)
+ if (x < 1)
+ fun_l9_n731(x)
+ else
+ fun_l9_n114(x)
+ end
+end
+
+def fun_l8_n281(x)
+ if (x < 1)
+ fun_l9_n513(x)
+ else
+ fun_l9_n69(x)
+ end
+end
+
+def fun_l8_n282(x)
+ if (x < 1)
+ fun_l9_n208(x)
+ else
+ fun_l9_n466(x)
+ end
+end
+
+def fun_l8_n283(x)
+ if (x < 1)
+ fun_l9_n958(x)
+ else
+ fun_l9_n86(x)
+ end
+end
+
+def fun_l8_n284(x)
+ if (x < 1)
+ fun_l9_n663(x)
+ else
+ fun_l9_n619(x)
+ end
+end
+
+def fun_l8_n285(x)
+ if (x < 1)
+ fun_l9_n698(x)
+ else
+ fun_l9_n97(x)
+ end
+end
+
+def fun_l8_n286(x)
+ if (x < 1)
+ fun_l9_n544(x)
+ else
+ fun_l9_n791(x)
+ end
+end
+
+def fun_l8_n287(x)
+ if (x < 1)
+ fun_l9_n51(x)
+ else
+ fun_l9_n415(x)
+ end
+end
+
+def fun_l8_n288(x)
+ if (x < 1)
+ fun_l9_n942(x)
+ else
+ fun_l9_n11(x)
+ end
+end
+
+def fun_l8_n289(x)
+ if (x < 1)
+ fun_l9_n386(x)
+ else
+ fun_l9_n507(x)
+ end
+end
+
+def fun_l8_n290(x)
+ if (x < 1)
+ fun_l9_n983(x)
+ else
+ fun_l9_n81(x)
+ end
+end
+
+def fun_l8_n291(x)
+ if (x < 1)
+ fun_l9_n403(x)
+ else
+ fun_l9_n628(x)
+ end
+end
+
+def fun_l8_n292(x)
+ if (x < 1)
+ fun_l9_n479(x)
+ else
+ fun_l9_n379(x)
+ end
+end
+
+def fun_l8_n293(x)
+ if (x < 1)
+ fun_l9_n284(x)
+ else
+ fun_l9_n109(x)
+ end
+end
+
+def fun_l8_n294(x)
+ if (x < 1)
+ fun_l9_n29(x)
+ else
+ fun_l9_n802(x)
+ end
+end
+
+def fun_l8_n295(x)
+ if (x < 1)
+ fun_l9_n579(x)
+ else
+ fun_l9_n371(x)
+ end
+end
+
+def fun_l8_n296(x)
+ if (x < 1)
+ fun_l9_n96(x)
+ else
+ fun_l9_n632(x)
+ end
+end
+
+def fun_l8_n297(x)
+ if (x < 1)
+ fun_l9_n442(x)
+ else
+ fun_l9_n396(x)
+ end
+end
+
+def fun_l8_n298(x)
+ if (x < 1)
+ fun_l9_n902(x)
+ else
+ fun_l9_n804(x)
+ end
+end
+
+def fun_l8_n299(x)
+ if (x < 1)
+ fun_l9_n561(x)
+ else
+ fun_l9_n952(x)
+ end
+end
+
+def fun_l8_n300(x)
+ if (x < 1)
+ fun_l9_n757(x)
+ else
+ fun_l9_n819(x)
+ end
+end
+
+def fun_l8_n301(x)
+ if (x < 1)
+ fun_l9_n97(x)
+ else
+ fun_l9_n892(x)
+ end
+end
+
+def fun_l8_n302(x)
+ if (x < 1)
+ fun_l9_n67(x)
+ else
+ fun_l9_n765(x)
+ end
+end
+
+def fun_l8_n303(x)
+ if (x < 1)
+ fun_l9_n166(x)
+ else
+ fun_l9_n769(x)
+ end
+end
+
+def fun_l8_n304(x)
+ if (x < 1)
+ fun_l9_n592(x)
+ else
+ fun_l9_n423(x)
+ end
+end
+
+def fun_l8_n305(x)
+ if (x < 1)
+ fun_l9_n354(x)
+ else
+ fun_l9_n953(x)
+ end
+end
+
+def fun_l8_n306(x)
+ if (x < 1)
+ fun_l9_n857(x)
+ else
+ fun_l9_n18(x)
+ end
+end
+
+def fun_l8_n307(x)
+ if (x < 1)
+ fun_l9_n445(x)
+ else
+ fun_l9_n796(x)
+ end
+end
+
+def fun_l8_n308(x)
+ if (x < 1)
+ fun_l9_n293(x)
+ else
+ fun_l9_n304(x)
+ end
+end
+
+def fun_l8_n309(x)
+ if (x < 1)
+ fun_l9_n859(x)
+ else
+ fun_l9_n282(x)
+ end
+end
+
+def fun_l8_n310(x)
+ if (x < 1)
+ fun_l9_n133(x)
+ else
+ fun_l9_n90(x)
+ end
+end
+
+def fun_l8_n311(x)
+ if (x < 1)
+ fun_l9_n997(x)
+ else
+ fun_l9_n201(x)
+ end
+end
+
+def fun_l8_n312(x)
+ if (x < 1)
+ fun_l9_n140(x)
+ else
+ fun_l9_n772(x)
+ end
+end
+
+def fun_l8_n313(x)
+ if (x < 1)
+ fun_l9_n761(x)
+ else
+ fun_l9_n980(x)
+ end
+end
+
+def fun_l8_n314(x)
+ if (x < 1)
+ fun_l9_n357(x)
+ else
+ fun_l9_n552(x)
+ end
+end
+
+def fun_l8_n315(x)
+ if (x < 1)
+ fun_l9_n312(x)
+ else
+ fun_l9_n412(x)
+ end
+end
+
+def fun_l8_n316(x)
+ if (x < 1)
+ fun_l9_n700(x)
+ else
+ fun_l9_n551(x)
+ end
+end
+
+def fun_l8_n317(x)
+ if (x < 1)
+ fun_l9_n623(x)
+ else
+ fun_l9_n219(x)
+ end
+end
+
+def fun_l8_n318(x)
+ if (x < 1)
+ fun_l9_n613(x)
+ else
+ fun_l9_n959(x)
+ end
+end
+
+def fun_l8_n319(x)
+ if (x < 1)
+ fun_l9_n978(x)
+ else
+ fun_l9_n596(x)
+ end
+end
+
+def fun_l8_n320(x)
+ if (x < 1)
+ fun_l9_n542(x)
+ else
+ fun_l9_n527(x)
+ end
+end
+
+def fun_l8_n321(x)
+ if (x < 1)
+ fun_l9_n877(x)
+ else
+ fun_l9_n108(x)
+ end
+end
+
+def fun_l8_n322(x)
+ if (x < 1)
+ fun_l9_n817(x)
+ else
+ fun_l9_n476(x)
+ end
+end
+
+def fun_l8_n323(x)
+ if (x < 1)
+ fun_l9_n765(x)
+ else
+ fun_l9_n672(x)
+ end
+end
+
+def fun_l8_n324(x)
+ if (x < 1)
+ fun_l9_n863(x)
+ else
+ fun_l9_n605(x)
+ end
+end
+
+def fun_l8_n325(x)
+ if (x < 1)
+ fun_l9_n566(x)
+ else
+ fun_l9_n592(x)
+ end
+end
+
+def fun_l8_n326(x)
+ if (x < 1)
+ fun_l9_n607(x)
+ else
+ fun_l9_n968(x)
+ end
+end
+
+def fun_l8_n327(x)
+ if (x < 1)
+ fun_l9_n36(x)
+ else
+ fun_l9_n380(x)
+ end
+end
+
+def fun_l8_n328(x)
+ if (x < 1)
+ fun_l9_n597(x)
+ else
+ fun_l9_n664(x)
+ end
+end
+
+def fun_l8_n329(x)
+ if (x < 1)
+ fun_l9_n195(x)
+ else
+ fun_l9_n268(x)
+ end
+end
+
+def fun_l8_n330(x)
+ if (x < 1)
+ fun_l9_n419(x)
+ else
+ fun_l9_n715(x)
+ end
+end
+
+def fun_l8_n331(x)
+ if (x < 1)
+ fun_l9_n451(x)
+ else
+ fun_l9_n518(x)
+ end
+end
+
+def fun_l8_n332(x)
+ if (x < 1)
+ fun_l9_n106(x)
+ else
+ fun_l9_n236(x)
+ end
+end
+
+def fun_l8_n333(x)
+ if (x < 1)
+ fun_l9_n611(x)
+ else
+ fun_l9_n825(x)
+ end
+end
+
+def fun_l8_n334(x)
+ if (x < 1)
+ fun_l9_n394(x)
+ else
+ fun_l9_n34(x)
+ end
+end
+
+def fun_l8_n335(x)
+ if (x < 1)
+ fun_l9_n63(x)
+ else
+ fun_l9_n58(x)
+ end
+end
+
+def fun_l8_n336(x)
+ if (x < 1)
+ fun_l9_n475(x)
+ else
+ fun_l9_n455(x)
+ end
+end
+
+def fun_l8_n337(x)
+ if (x < 1)
+ fun_l9_n836(x)
+ else
+ fun_l9_n318(x)
+ end
+end
+
+def fun_l8_n338(x)
+ if (x < 1)
+ fun_l9_n844(x)
+ else
+ fun_l9_n21(x)
+ end
+end
+
+def fun_l8_n339(x)
+ if (x < 1)
+ fun_l9_n628(x)
+ else
+ fun_l9_n721(x)
+ end
+end
+
+def fun_l8_n340(x)
+ if (x < 1)
+ fun_l9_n966(x)
+ else
+ fun_l9_n833(x)
+ end
+end
+
+def fun_l8_n341(x)
+ if (x < 1)
+ fun_l9_n267(x)
+ else
+ fun_l9_n28(x)
+ end
+end
+
+def fun_l8_n342(x)
+ if (x < 1)
+ fun_l9_n204(x)
+ else
+ fun_l9_n838(x)
+ end
+end
+
+def fun_l8_n343(x)
+ if (x < 1)
+ fun_l9_n151(x)
+ else
+ fun_l9_n382(x)
+ end
+end
+
+def fun_l8_n344(x)
+ if (x < 1)
+ fun_l9_n409(x)
+ else
+ fun_l9_n591(x)
+ end
+end
+
+def fun_l8_n345(x)
+ if (x < 1)
+ fun_l9_n680(x)
+ else
+ fun_l9_n912(x)
+ end
+end
+
+def fun_l8_n346(x)
+ if (x < 1)
+ fun_l9_n150(x)
+ else
+ fun_l9_n34(x)
+ end
+end
+
+def fun_l8_n347(x)
+ if (x < 1)
+ fun_l9_n538(x)
+ else
+ fun_l9_n441(x)
+ end
+end
+
+def fun_l8_n348(x)
+ if (x < 1)
+ fun_l9_n707(x)
+ else
+ fun_l9_n199(x)
+ end
+end
+
+def fun_l8_n349(x)
+ if (x < 1)
+ fun_l9_n245(x)
+ else
+ fun_l9_n809(x)
+ end
+end
+
+def fun_l8_n350(x)
+ if (x < 1)
+ fun_l9_n16(x)
+ else
+ fun_l9_n565(x)
+ end
+end
+
+def fun_l8_n351(x)
+ if (x < 1)
+ fun_l9_n857(x)
+ else
+ fun_l9_n37(x)
+ end
+end
+
+def fun_l8_n352(x)
+ if (x < 1)
+ fun_l9_n816(x)
+ else
+ fun_l9_n941(x)
+ end
+end
+
+def fun_l8_n353(x)
+ if (x < 1)
+ fun_l9_n605(x)
+ else
+ fun_l9_n476(x)
+ end
+end
+
+def fun_l8_n354(x)
+ if (x < 1)
+ fun_l9_n641(x)
+ else
+ fun_l9_n319(x)
+ end
+end
+
+def fun_l8_n355(x)
+ if (x < 1)
+ fun_l9_n133(x)
+ else
+ fun_l9_n676(x)
+ end
+end
+
+def fun_l8_n356(x)
+ if (x < 1)
+ fun_l9_n826(x)
+ else
+ fun_l9_n926(x)
+ end
+end
+
+def fun_l8_n357(x)
+ if (x < 1)
+ fun_l9_n17(x)
+ else
+ fun_l9_n486(x)
+ end
+end
+
+def fun_l8_n358(x)
+ if (x < 1)
+ fun_l9_n90(x)
+ else
+ fun_l9_n744(x)
+ end
+end
+
+def fun_l8_n359(x)
+ if (x < 1)
+ fun_l9_n745(x)
+ else
+ fun_l9_n158(x)
+ end
+end
+
+def fun_l8_n360(x)
+ if (x < 1)
+ fun_l9_n261(x)
+ else
+ fun_l9_n878(x)
+ end
+end
+
+def fun_l8_n361(x)
+ if (x < 1)
+ fun_l9_n832(x)
+ else
+ fun_l9_n905(x)
+ end
+end
+
+def fun_l8_n362(x)
+ if (x < 1)
+ fun_l9_n879(x)
+ else
+ fun_l9_n475(x)
+ end
+end
+
+def fun_l8_n363(x)
+ if (x < 1)
+ fun_l9_n586(x)
+ else
+ fun_l9_n272(x)
+ end
+end
+
+def fun_l8_n364(x)
+ if (x < 1)
+ fun_l9_n469(x)
+ else
+ fun_l9_n918(x)
+ end
+end
+
+def fun_l8_n365(x)
+ if (x < 1)
+ fun_l9_n568(x)
+ else
+ fun_l9_n777(x)
+ end
+end
+
+def fun_l8_n366(x)
+ if (x < 1)
+ fun_l9_n662(x)
+ else
+ fun_l9_n957(x)
+ end
+end
+
+def fun_l8_n367(x)
+ if (x < 1)
+ fun_l9_n26(x)
+ else
+ fun_l9_n593(x)
+ end
+end
+
+def fun_l8_n368(x)
+ if (x < 1)
+ fun_l9_n766(x)
+ else
+ fun_l9_n598(x)
+ end
+end
+
+def fun_l8_n369(x)
+ if (x < 1)
+ fun_l9_n362(x)
+ else
+ fun_l9_n491(x)
+ end
+end
+
+def fun_l8_n370(x)
+ if (x < 1)
+ fun_l9_n205(x)
+ else
+ fun_l9_n585(x)
+ end
+end
+
+def fun_l8_n371(x)
+ if (x < 1)
+ fun_l9_n301(x)
+ else
+ fun_l9_n796(x)
+ end
+end
+
+def fun_l8_n372(x)
+ if (x < 1)
+ fun_l9_n527(x)
+ else
+ fun_l9_n31(x)
+ end
+end
+
+def fun_l8_n373(x)
+ if (x < 1)
+ fun_l9_n461(x)
+ else
+ fun_l9_n42(x)
+ end
+end
+
+def fun_l8_n374(x)
+ if (x < 1)
+ fun_l9_n288(x)
+ else
+ fun_l9_n534(x)
+ end
+end
+
+def fun_l8_n375(x)
+ if (x < 1)
+ fun_l9_n558(x)
+ else
+ fun_l9_n403(x)
+ end
+end
+
+def fun_l8_n376(x)
+ if (x < 1)
+ fun_l9_n835(x)
+ else
+ fun_l9_n390(x)
+ end
+end
+
+def fun_l8_n377(x)
+ if (x < 1)
+ fun_l9_n658(x)
+ else
+ fun_l9_n768(x)
+ end
+end
+
+def fun_l8_n378(x)
+ if (x < 1)
+ fun_l9_n856(x)
+ else
+ fun_l9_n299(x)
+ end
+end
+
+def fun_l8_n379(x)
+ if (x < 1)
+ fun_l9_n196(x)
+ else
+ fun_l9_n540(x)
+ end
+end
+
+def fun_l8_n380(x)
+ if (x < 1)
+ fun_l9_n70(x)
+ else
+ fun_l9_n627(x)
+ end
+end
+
+def fun_l8_n381(x)
+ if (x < 1)
+ fun_l9_n28(x)
+ else
+ fun_l9_n377(x)
+ end
+end
+
+def fun_l8_n382(x)
+ if (x < 1)
+ fun_l9_n955(x)
+ else
+ fun_l9_n124(x)
+ end
+end
+
+def fun_l8_n383(x)
+ if (x < 1)
+ fun_l9_n16(x)
+ else
+ fun_l9_n783(x)
+ end
+end
+
+def fun_l8_n384(x)
+ if (x < 1)
+ fun_l9_n843(x)
+ else
+ fun_l9_n801(x)
+ end
+end
+
+def fun_l8_n385(x)
+ if (x < 1)
+ fun_l9_n876(x)
+ else
+ fun_l9_n152(x)
+ end
+end
+
+def fun_l8_n386(x)
+ if (x < 1)
+ fun_l9_n6(x)
+ else
+ fun_l9_n432(x)
+ end
+end
+
+def fun_l8_n387(x)
+ if (x < 1)
+ fun_l9_n636(x)
+ else
+ fun_l9_n325(x)
+ end
+end
+
+def fun_l8_n388(x)
+ if (x < 1)
+ fun_l9_n267(x)
+ else
+ fun_l9_n716(x)
+ end
+end
+
+def fun_l8_n389(x)
+ if (x < 1)
+ fun_l9_n898(x)
+ else
+ fun_l9_n776(x)
+ end
+end
+
+def fun_l8_n390(x)
+ if (x < 1)
+ fun_l9_n481(x)
+ else
+ fun_l9_n878(x)
+ end
+end
+
+def fun_l8_n391(x)
+ if (x < 1)
+ fun_l9_n398(x)
+ else
+ fun_l9_n159(x)
+ end
+end
+
+def fun_l8_n392(x)
+ if (x < 1)
+ fun_l9_n889(x)
+ else
+ fun_l9_n517(x)
+ end
+end
+
+def fun_l8_n393(x)
+ if (x < 1)
+ fun_l9_n460(x)
+ else
+ fun_l9_n440(x)
+ end
+end
+
+def fun_l8_n394(x)
+ if (x < 1)
+ fun_l9_n576(x)
+ else
+ fun_l9_n421(x)
+ end
+end
+
+def fun_l8_n395(x)
+ if (x < 1)
+ fun_l9_n310(x)
+ else
+ fun_l9_n646(x)
+ end
+end
+
+def fun_l8_n396(x)
+ if (x < 1)
+ fun_l9_n914(x)
+ else
+ fun_l9_n414(x)
+ end
+end
+
+def fun_l8_n397(x)
+ if (x < 1)
+ fun_l9_n330(x)
+ else
+ fun_l9_n520(x)
+ end
+end
+
+def fun_l8_n398(x)
+ if (x < 1)
+ fun_l9_n175(x)
+ else
+ fun_l9_n975(x)
+ end
+end
+
+def fun_l8_n399(x)
+ if (x < 1)
+ fun_l9_n443(x)
+ else
+ fun_l9_n964(x)
+ end
+end
+
+def fun_l8_n400(x)
+ if (x < 1)
+ fun_l9_n810(x)
+ else
+ fun_l9_n102(x)
+ end
+end
+
+def fun_l8_n401(x)
+ if (x < 1)
+ fun_l9_n352(x)
+ else
+ fun_l9_n295(x)
+ end
+end
+
+def fun_l8_n402(x)
+ if (x < 1)
+ fun_l9_n980(x)
+ else
+ fun_l9_n974(x)
+ end
+end
+
+def fun_l8_n403(x)
+ if (x < 1)
+ fun_l9_n169(x)
+ else
+ fun_l9_n877(x)
+ end
+end
+
+def fun_l8_n404(x)
+ if (x < 1)
+ fun_l9_n333(x)
+ else
+ fun_l9_n306(x)
+ end
+end
+
+def fun_l8_n405(x)
+ if (x < 1)
+ fun_l9_n183(x)
+ else
+ fun_l9_n704(x)
+ end
+end
+
+def fun_l8_n406(x)
+ if (x < 1)
+ fun_l9_n532(x)
+ else
+ fun_l9_n684(x)
+ end
+end
+
+def fun_l8_n407(x)
+ if (x < 1)
+ fun_l9_n526(x)
+ else
+ fun_l9_n287(x)
+ end
+end
+
+def fun_l8_n408(x)
+ if (x < 1)
+ fun_l9_n377(x)
+ else
+ fun_l9_n351(x)
+ end
+end
+
+def fun_l8_n409(x)
+ if (x < 1)
+ fun_l9_n770(x)
+ else
+ fun_l9_n738(x)
+ end
+end
+
+def fun_l8_n410(x)
+ if (x < 1)
+ fun_l9_n61(x)
+ else
+ fun_l9_n499(x)
+ end
+end
+
+def fun_l8_n411(x)
+ if (x < 1)
+ fun_l9_n156(x)
+ else
+ fun_l9_n247(x)
+ end
+end
+
+def fun_l8_n412(x)
+ if (x < 1)
+ fun_l9_n431(x)
+ else
+ fun_l9_n636(x)
+ end
+end
+
+def fun_l8_n413(x)
+ if (x < 1)
+ fun_l9_n540(x)
+ else
+ fun_l9_n329(x)
+ end
+end
+
+def fun_l8_n414(x)
+ if (x < 1)
+ fun_l9_n49(x)
+ else
+ fun_l9_n979(x)
+ end
+end
+
+def fun_l8_n415(x)
+ if (x < 1)
+ fun_l9_n94(x)
+ else
+ fun_l9_n487(x)
+ end
+end
+
+def fun_l8_n416(x)
+ if (x < 1)
+ fun_l9_n679(x)
+ else
+ fun_l9_n644(x)
+ end
+end
+
+def fun_l8_n417(x)
+ if (x < 1)
+ fun_l9_n907(x)
+ else
+ fun_l9_n324(x)
+ end
+end
+
+def fun_l8_n418(x)
+ if (x < 1)
+ fun_l9_n426(x)
+ else
+ fun_l9_n495(x)
+ end
+end
+
+def fun_l8_n419(x)
+ if (x < 1)
+ fun_l9_n273(x)
+ else
+ fun_l9_n838(x)
+ end
+end
+
+def fun_l8_n420(x)
+ if (x < 1)
+ fun_l9_n631(x)
+ else
+ fun_l9_n205(x)
+ end
+end
+
+def fun_l8_n421(x)
+ if (x < 1)
+ fun_l9_n484(x)
+ else
+ fun_l9_n80(x)
+ end
+end
+
+def fun_l8_n422(x)
+ if (x < 1)
+ fun_l9_n908(x)
+ else
+ fun_l9_n514(x)
+ end
+end
+
+def fun_l8_n423(x)
+ if (x < 1)
+ fun_l9_n265(x)
+ else
+ fun_l9_n190(x)
+ end
+end
+
+def fun_l8_n424(x)
+ if (x < 1)
+ fun_l9_n463(x)
+ else
+ fun_l9_n714(x)
+ end
+end
+
+def fun_l8_n425(x)
+ if (x < 1)
+ fun_l9_n780(x)
+ else
+ fun_l9_n444(x)
+ end
+end
+
+def fun_l8_n426(x)
+ if (x < 1)
+ fun_l9_n418(x)
+ else
+ fun_l9_n518(x)
+ end
+end
+
+def fun_l8_n427(x)
+ if (x < 1)
+ fun_l9_n912(x)
+ else
+ fun_l9_n27(x)
+ end
+end
+
+def fun_l8_n428(x)
+ if (x < 1)
+ fun_l9_n157(x)
+ else
+ fun_l9_n547(x)
+ end
+end
+
+def fun_l8_n429(x)
+ if (x < 1)
+ fun_l9_n760(x)
+ else
+ fun_l9_n466(x)
+ end
+end
+
+def fun_l8_n430(x)
+ if (x < 1)
+ fun_l9_n726(x)
+ else
+ fun_l9_n609(x)
+ end
+end
+
+def fun_l8_n431(x)
+ if (x < 1)
+ fun_l9_n206(x)
+ else
+ fun_l9_n65(x)
+ end
+end
+
+def fun_l8_n432(x)
+ if (x < 1)
+ fun_l9_n396(x)
+ else
+ fun_l9_n325(x)
+ end
+end
+
+def fun_l8_n433(x)
+ if (x < 1)
+ fun_l9_n100(x)
+ else
+ fun_l9_n519(x)
+ end
+end
+
+def fun_l8_n434(x)
+ if (x < 1)
+ fun_l9_n923(x)
+ else
+ fun_l9_n387(x)
+ end
+end
+
+def fun_l8_n435(x)
+ if (x < 1)
+ fun_l9_n334(x)
+ else
+ fun_l9_n866(x)
+ end
+end
+
+def fun_l8_n436(x)
+ if (x < 1)
+ fun_l9_n896(x)
+ else
+ fun_l9_n780(x)
+ end
+end
+
+def fun_l8_n437(x)
+ if (x < 1)
+ fun_l9_n328(x)
+ else
+ fun_l9_n776(x)
+ end
+end
+
+def fun_l8_n438(x)
+ if (x < 1)
+ fun_l9_n414(x)
+ else
+ fun_l9_n884(x)
+ end
+end
+
+def fun_l8_n439(x)
+ if (x < 1)
+ fun_l9_n528(x)
+ else
+ fun_l9_n419(x)
+ end
+end
+
+def fun_l8_n440(x)
+ if (x < 1)
+ fun_l9_n192(x)
+ else
+ fun_l9_n973(x)
+ end
+end
+
+def fun_l8_n441(x)
+ if (x < 1)
+ fun_l9_n679(x)
+ else
+ fun_l9_n135(x)
+ end
+end
+
+def fun_l8_n442(x)
+ if (x < 1)
+ fun_l9_n294(x)
+ else
+ fun_l9_n138(x)
+ end
+end
+
+def fun_l8_n443(x)
+ if (x < 1)
+ fun_l9_n784(x)
+ else
+ fun_l9_n940(x)
+ end
+end
+
+def fun_l8_n444(x)
+ if (x < 1)
+ fun_l9_n834(x)
+ else
+ fun_l9_n127(x)
+ end
+end
+
+def fun_l8_n445(x)
+ if (x < 1)
+ fun_l9_n217(x)
+ else
+ fun_l9_n788(x)
+ end
+end
+
+def fun_l8_n446(x)
+ if (x < 1)
+ fun_l9_n173(x)
+ else
+ fun_l9_n49(x)
+ end
+end
+
+def fun_l8_n447(x)
+ if (x < 1)
+ fun_l9_n493(x)
+ else
+ fun_l9_n384(x)
+ end
+end
+
+def fun_l8_n448(x)
+ if (x < 1)
+ fun_l9_n770(x)
+ else
+ fun_l9_n761(x)
+ end
+end
+
+def fun_l8_n449(x)
+ if (x < 1)
+ fun_l9_n633(x)
+ else
+ fun_l9_n555(x)
+ end
+end
+
+def fun_l8_n450(x)
+ if (x < 1)
+ fun_l9_n25(x)
+ else
+ fun_l9_n714(x)
+ end
+end
+
+def fun_l8_n451(x)
+ if (x < 1)
+ fun_l9_n960(x)
+ else
+ fun_l9_n371(x)
+ end
+end
+
+def fun_l8_n452(x)
+ if (x < 1)
+ fun_l9_n733(x)
+ else
+ fun_l9_n996(x)
+ end
+end
+
+def fun_l8_n453(x)
+ if (x < 1)
+ fun_l9_n628(x)
+ else
+ fun_l9_n512(x)
+ end
+end
+
+def fun_l8_n454(x)
+ if (x < 1)
+ fun_l9_n406(x)
+ else
+ fun_l9_n671(x)
+ end
+end
+
+def fun_l8_n455(x)
+ if (x < 1)
+ fun_l9_n47(x)
+ else
+ fun_l9_n102(x)
+ end
+end
+
+def fun_l8_n456(x)
+ if (x < 1)
+ fun_l9_n85(x)
+ else
+ fun_l9_n591(x)
+ end
+end
+
+def fun_l8_n457(x)
+ if (x < 1)
+ fun_l9_n234(x)
+ else
+ fun_l9_n209(x)
+ end
+end
+
+def fun_l8_n458(x)
+ if (x < 1)
+ fun_l9_n186(x)
+ else
+ fun_l9_n928(x)
+ end
+end
+
+def fun_l8_n459(x)
+ if (x < 1)
+ fun_l9_n36(x)
+ else
+ fun_l9_n783(x)
+ end
+end
+
+def fun_l8_n460(x)
+ if (x < 1)
+ fun_l9_n775(x)
+ else
+ fun_l9_n641(x)
+ end
+end
+
+def fun_l8_n461(x)
+ if (x < 1)
+ fun_l9_n426(x)
+ else
+ fun_l9_n740(x)
+ end
+end
+
+def fun_l8_n462(x)
+ if (x < 1)
+ fun_l9_n770(x)
+ else
+ fun_l9_n113(x)
+ end
+end
+
+def fun_l8_n463(x)
+ if (x < 1)
+ fun_l9_n94(x)
+ else
+ fun_l9_n574(x)
+ end
+end
+
+def fun_l8_n464(x)
+ if (x < 1)
+ fun_l9_n457(x)
+ else
+ fun_l9_n776(x)
+ end
+end
+
+def fun_l8_n465(x)
+ if (x < 1)
+ fun_l9_n252(x)
+ else
+ fun_l9_n182(x)
+ end
+end
+
+def fun_l8_n466(x)
+ if (x < 1)
+ fun_l9_n157(x)
+ else
+ fun_l9_n849(x)
+ end
+end
+
+def fun_l8_n467(x)
+ if (x < 1)
+ fun_l9_n393(x)
+ else
+ fun_l9_n437(x)
+ end
+end
+
+def fun_l8_n468(x)
+ if (x < 1)
+ fun_l9_n269(x)
+ else
+ fun_l9_n381(x)
+ end
+end
+
+def fun_l8_n469(x)
+ if (x < 1)
+ fun_l9_n97(x)
+ else
+ fun_l9_n184(x)
+ end
+end
+
+def fun_l8_n470(x)
+ if (x < 1)
+ fun_l9_n157(x)
+ else
+ fun_l9_n621(x)
+ end
+end
+
+def fun_l8_n471(x)
+ if (x < 1)
+ fun_l9_n627(x)
+ else
+ fun_l9_n793(x)
+ end
+end
+
+def fun_l8_n472(x)
+ if (x < 1)
+ fun_l9_n117(x)
+ else
+ fun_l9_n318(x)
+ end
+end
+
+def fun_l8_n473(x)
+ if (x < 1)
+ fun_l9_n238(x)
+ else
+ fun_l9_n559(x)
+ end
+end
+
+def fun_l8_n474(x)
+ if (x < 1)
+ fun_l9_n81(x)
+ else
+ fun_l9_n355(x)
+ end
+end
+
+def fun_l8_n475(x)
+ if (x < 1)
+ fun_l9_n889(x)
+ else
+ fun_l9_n940(x)
+ end
+end
+
+def fun_l8_n476(x)
+ if (x < 1)
+ fun_l9_n358(x)
+ else
+ fun_l9_n52(x)
+ end
+end
+
+def fun_l8_n477(x)
+ if (x < 1)
+ fun_l9_n144(x)
+ else
+ fun_l9_n575(x)
+ end
+end
+
+def fun_l8_n478(x)
+ if (x < 1)
+ fun_l9_n975(x)
+ else
+ fun_l9_n740(x)
+ end
+end
+
+def fun_l8_n479(x)
+ if (x < 1)
+ fun_l9_n552(x)
+ else
+ fun_l9_n169(x)
+ end
+end
+
+def fun_l8_n480(x)
+ if (x < 1)
+ fun_l9_n854(x)
+ else
+ fun_l9_n524(x)
+ end
+end
+
+def fun_l8_n481(x)
+ if (x < 1)
+ fun_l9_n461(x)
+ else
+ fun_l9_n65(x)
+ end
+end
+
+def fun_l8_n482(x)
+ if (x < 1)
+ fun_l9_n809(x)
+ else
+ fun_l9_n844(x)
+ end
+end
+
+def fun_l8_n483(x)
+ if (x < 1)
+ fun_l9_n741(x)
+ else
+ fun_l9_n327(x)
+ end
+end
+
+def fun_l8_n484(x)
+ if (x < 1)
+ fun_l9_n560(x)
+ else
+ fun_l9_n385(x)
+ end
+end
+
+def fun_l8_n485(x)
+ if (x < 1)
+ fun_l9_n798(x)
+ else
+ fun_l9_n154(x)
+ end
+end
+
+def fun_l8_n486(x)
+ if (x < 1)
+ fun_l9_n892(x)
+ else
+ fun_l9_n367(x)
+ end
+end
+
+def fun_l8_n487(x)
+ if (x < 1)
+ fun_l9_n423(x)
+ else
+ fun_l9_n723(x)
+ end
+end
+
+def fun_l8_n488(x)
+ if (x < 1)
+ fun_l9_n618(x)
+ else
+ fun_l9_n649(x)
+ end
+end
+
+def fun_l8_n489(x)
+ if (x < 1)
+ fun_l9_n261(x)
+ else
+ fun_l9_n495(x)
+ end
+end
+
+def fun_l8_n490(x)
+ if (x < 1)
+ fun_l9_n566(x)
+ else
+ fun_l9_n405(x)
+ end
+end
+
+def fun_l8_n491(x)
+ if (x < 1)
+ fun_l9_n718(x)
+ else
+ fun_l9_n674(x)
+ end
+end
+
+def fun_l8_n492(x)
+ if (x < 1)
+ fun_l9_n487(x)
+ else
+ fun_l9_n95(x)
+ end
+end
+
+def fun_l8_n493(x)
+ if (x < 1)
+ fun_l9_n473(x)
+ else
+ fun_l9_n722(x)
+ end
+end
+
+def fun_l8_n494(x)
+ if (x < 1)
+ fun_l9_n460(x)
+ else
+ fun_l9_n157(x)
+ end
+end
+
+def fun_l8_n495(x)
+ if (x < 1)
+ fun_l9_n812(x)
+ else
+ fun_l9_n807(x)
+ end
+end
+
+def fun_l8_n496(x)
+ if (x < 1)
+ fun_l9_n609(x)
+ else
+ fun_l9_n697(x)
+ end
+end
+
+def fun_l8_n497(x)
+ if (x < 1)
+ fun_l9_n894(x)
+ else
+ fun_l9_n580(x)
+ end
+end
+
+def fun_l8_n498(x)
+ if (x < 1)
+ fun_l9_n642(x)
+ else
+ fun_l9_n13(x)
+ end
+end
+
+def fun_l8_n499(x)
+ if (x < 1)
+ fun_l9_n961(x)
+ else
+ fun_l9_n669(x)
+ end
+end
+
+def fun_l8_n500(x)
+ if (x < 1)
+ fun_l9_n587(x)
+ else
+ fun_l9_n828(x)
+ end
+end
+
+def fun_l8_n501(x)
+ if (x < 1)
+ fun_l9_n30(x)
+ else
+ fun_l9_n966(x)
+ end
+end
+
+def fun_l8_n502(x)
+ if (x < 1)
+ fun_l9_n436(x)
+ else
+ fun_l9_n170(x)
+ end
+end
+
+def fun_l8_n503(x)
+ if (x < 1)
+ fun_l9_n20(x)
+ else
+ fun_l9_n927(x)
+ end
+end
+
+def fun_l8_n504(x)
+ if (x < 1)
+ fun_l9_n326(x)
+ else
+ fun_l9_n223(x)
+ end
+end
+
+def fun_l8_n505(x)
+ if (x < 1)
+ fun_l9_n911(x)
+ else
+ fun_l9_n746(x)
+ end
+end
+
+def fun_l8_n506(x)
+ if (x < 1)
+ fun_l9_n333(x)
+ else
+ fun_l9_n773(x)
+ end
+end
+
+def fun_l8_n507(x)
+ if (x < 1)
+ fun_l9_n514(x)
+ else
+ fun_l9_n882(x)
+ end
+end
+
+def fun_l8_n508(x)
+ if (x < 1)
+ fun_l9_n918(x)
+ else
+ fun_l9_n713(x)
+ end
+end
+
+def fun_l8_n509(x)
+ if (x < 1)
+ fun_l9_n694(x)
+ else
+ fun_l9_n950(x)
+ end
+end
+
+def fun_l8_n510(x)
+ if (x < 1)
+ fun_l9_n77(x)
+ else
+ fun_l9_n65(x)
+ end
+end
+
+def fun_l8_n511(x)
+ if (x < 1)
+ fun_l9_n506(x)
+ else
+ fun_l9_n144(x)
+ end
+end
+
+def fun_l8_n512(x)
+ if (x < 1)
+ fun_l9_n69(x)
+ else
+ fun_l9_n308(x)
+ end
+end
+
+def fun_l8_n513(x)
+ if (x < 1)
+ fun_l9_n377(x)
+ else
+ fun_l9_n531(x)
+ end
+end
+
+def fun_l8_n514(x)
+ if (x < 1)
+ fun_l9_n342(x)
+ else
+ fun_l9_n265(x)
+ end
+end
+
+def fun_l8_n515(x)
+ if (x < 1)
+ fun_l9_n453(x)
+ else
+ fun_l9_n227(x)
+ end
+end
+
+def fun_l8_n516(x)
+ if (x < 1)
+ fun_l9_n396(x)
+ else
+ fun_l9_n130(x)
+ end
+end
+
+def fun_l8_n517(x)
+ if (x < 1)
+ fun_l9_n715(x)
+ else
+ fun_l9_n953(x)
+ end
+end
+
+def fun_l8_n518(x)
+ if (x < 1)
+ fun_l9_n326(x)
+ else
+ fun_l9_n949(x)
+ end
+end
+
+def fun_l8_n519(x)
+ if (x < 1)
+ fun_l9_n535(x)
+ else
+ fun_l9_n41(x)
+ end
+end
+
+def fun_l8_n520(x)
+ if (x < 1)
+ fun_l9_n985(x)
+ else
+ fun_l9_n942(x)
+ end
+end
+
+def fun_l8_n521(x)
+ if (x < 1)
+ fun_l9_n347(x)
+ else
+ fun_l9_n263(x)
+ end
+end
+
+def fun_l8_n522(x)
+ if (x < 1)
+ fun_l9_n532(x)
+ else
+ fun_l9_n378(x)
+ end
+end
+
+def fun_l8_n523(x)
+ if (x < 1)
+ fun_l9_n362(x)
+ else
+ fun_l9_n630(x)
+ end
+end
+
+def fun_l8_n524(x)
+ if (x < 1)
+ fun_l9_n382(x)
+ else
+ fun_l9_n256(x)
+ end
+end
+
+def fun_l8_n525(x)
+ if (x < 1)
+ fun_l9_n444(x)
+ else
+ fun_l9_n756(x)
+ end
+end
+
+def fun_l8_n526(x)
+ if (x < 1)
+ fun_l9_n5(x)
+ else
+ fun_l9_n754(x)
+ end
+end
+
+def fun_l8_n527(x)
+ if (x < 1)
+ fun_l9_n157(x)
+ else
+ fun_l9_n279(x)
+ end
+end
+
+def fun_l8_n528(x)
+ if (x < 1)
+ fun_l9_n110(x)
+ else
+ fun_l9_n246(x)
+ end
+end
+
+def fun_l8_n529(x)
+ if (x < 1)
+ fun_l9_n312(x)
+ else
+ fun_l9_n890(x)
+ end
+end
+
+def fun_l8_n530(x)
+ if (x < 1)
+ fun_l9_n624(x)
+ else
+ fun_l9_n548(x)
+ end
+end
+
+def fun_l8_n531(x)
+ if (x < 1)
+ fun_l9_n615(x)
+ else
+ fun_l9_n948(x)
+ end
+end
+
+def fun_l8_n532(x)
+ if (x < 1)
+ fun_l9_n519(x)
+ else
+ fun_l9_n583(x)
+ end
+end
+
+def fun_l8_n533(x)
+ if (x < 1)
+ fun_l9_n292(x)
+ else
+ fun_l9_n827(x)
+ end
+end
+
+def fun_l8_n534(x)
+ if (x < 1)
+ fun_l9_n893(x)
+ else
+ fun_l9_n877(x)
+ end
+end
+
+def fun_l8_n535(x)
+ if (x < 1)
+ fun_l9_n873(x)
+ else
+ fun_l9_n399(x)
+ end
+end
+
+def fun_l8_n536(x)
+ if (x < 1)
+ fun_l9_n497(x)
+ else
+ fun_l9_n507(x)
+ end
+end
+
+def fun_l8_n537(x)
+ if (x < 1)
+ fun_l9_n404(x)
+ else
+ fun_l9_n815(x)
+ end
+end
+
+def fun_l8_n538(x)
+ if (x < 1)
+ fun_l9_n258(x)
+ else
+ fun_l9_n810(x)
+ end
+end
+
+def fun_l8_n539(x)
+ if (x < 1)
+ fun_l9_n907(x)
+ else
+ fun_l9_n720(x)
+ end
+end
+
+def fun_l8_n540(x)
+ if (x < 1)
+ fun_l9_n752(x)
+ else
+ fun_l9_n597(x)
+ end
+end
+
+def fun_l8_n541(x)
+ if (x < 1)
+ fun_l9_n802(x)
+ else
+ fun_l9_n45(x)
+ end
+end
+
+def fun_l8_n542(x)
+ if (x < 1)
+ fun_l9_n635(x)
+ else
+ fun_l9_n470(x)
+ end
+end
+
+def fun_l8_n543(x)
+ if (x < 1)
+ fun_l9_n353(x)
+ else
+ fun_l9_n238(x)
+ end
+end
+
+def fun_l8_n544(x)
+ if (x < 1)
+ fun_l9_n656(x)
+ else
+ fun_l9_n569(x)
+ end
+end
+
+def fun_l8_n545(x)
+ if (x < 1)
+ fun_l9_n494(x)
+ else
+ fun_l9_n566(x)
+ end
+end
+
+def fun_l8_n546(x)
+ if (x < 1)
+ fun_l9_n65(x)
+ else
+ fun_l9_n896(x)
+ end
+end
+
+def fun_l8_n547(x)
+ if (x < 1)
+ fun_l9_n995(x)
+ else
+ fun_l9_n727(x)
+ end
+end
+
+def fun_l8_n548(x)
+ if (x < 1)
+ fun_l9_n562(x)
+ else
+ fun_l9_n759(x)
+ end
+end
+
+def fun_l8_n549(x)
+ if (x < 1)
+ fun_l9_n210(x)
+ else
+ fun_l9_n734(x)
+ end
+end
+
+def fun_l8_n550(x)
+ if (x < 1)
+ fun_l9_n482(x)
+ else
+ fun_l9_n11(x)
+ end
+end
+
+def fun_l8_n551(x)
+ if (x < 1)
+ fun_l9_n86(x)
+ else
+ fun_l9_n867(x)
+ end
+end
+
+def fun_l8_n552(x)
+ if (x < 1)
+ fun_l9_n647(x)
+ else
+ fun_l9_n293(x)
+ end
+end
+
+def fun_l8_n553(x)
+ if (x < 1)
+ fun_l9_n98(x)
+ else
+ fun_l9_n868(x)
+ end
+end
+
+def fun_l8_n554(x)
+ if (x < 1)
+ fun_l9_n380(x)
+ else
+ fun_l9_n2(x)
+ end
+end
+
+def fun_l8_n555(x)
+ if (x < 1)
+ fun_l9_n274(x)
+ else
+ fun_l9_n489(x)
+ end
+end
+
+def fun_l8_n556(x)
+ if (x < 1)
+ fun_l9_n623(x)
+ else
+ fun_l9_n848(x)
+ end
+end
+
+def fun_l8_n557(x)
+ if (x < 1)
+ fun_l9_n642(x)
+ else
+ fun_l9_n890(x)
+ end
+end
+
+def fun_l8_n558(x)
+ if (x < 1)
+ fun_l9_n247(x)
+ else
+ fun_l9_n65(x)
+ end
+end
+
+def fun_l8_n559(x)
+ if (x < 1)
+ fun_l9_n896(x)
+ else
+ fun_l9_n937(x)
+ end
+end
+
+def fun_l8_n560(x)
+ if (x < 1)
+ fun_l9_n592(x)
+ else
+ fun_l9_n211(x)
+ end
+end
+
+def fun_l8_n561(x)
+ if (x < 1)
+ fun_l9_n205(x)
+ else
+ fun_l9_n971(x)
+ end
+end
+
+def fun_l8_n562(x)
+ if (x < 1)
+ fun_l9_n663(x)
+ else
+ fun_l9_n147(x)
+ end
+end
+
+def fun_l8_n563(x)
+ if (x < 1)
+ fun_l9_n722(x)
+ else
+ fun_l9_n649(x)
+ end
+end
+
+def fun_l8_n564(x)
+ if (x < 1)
+ fun_l9_n605(x)
+ else
+ fun_l9_n58(x)
+ end
+end
+
+def fun_l8_n565(x)
+ if (x < 1)
+ fun_l9_n914(x)
+ else
+ fun_l9_n617(x)
+ end
+end
+
+def fun_l8_n566(x)
+ if (x < 1)
+ fun_l9_n772(x)
+ else
+ fun_l9_n428(x)
+ end
+end
+
+def fun_l8_n567(x)
+ if (x < 1)
+ fun_l9_n870(x)
+ else
+ fun_l9_n672(x)
+ end
+end
+
+def fun_l8_n568(x)
+ if (x < 1)
+ fun_l9_n946(x)
+ else
+ fun_l9_n465(x)
+ end
+end
+
+def fun_l8_n569(x)
+ if (x < 1)
+ fun_l9_n507(x)
+ else
+ fun_l9_n346(x)
+ end
+end
+
+def fun_l8_n570(x)
+ if (x < 1)
+ fun_l9_n157(x)
+ else
+ fun_l9_n70(x)
+ end
+end
+
+def fun_l8_n571(x)
+ if (x < 1)
+ fun_l9_n549(x)
+ else
+ fun_l9_n831(x)
+ end
+end
+
+def fun_l8_n572(x)
+ if (x < 1)
+ fun_l9_n463(x)
+ else
+ fun_l9_n843(x)
+ end
+end
+
+def fun_l8_n573(x)
+ if (x < 1)
+ fun_l9_n676(x)
+ else
+ fun_l9_n651(x)
+ end
+end
+
+def fun_l8_n574(x)
+ if (x < 1)
+ fun_l9_n648(x)
+ else
+ fun_l9_n562(x)
+ end
+end
+
+def fun_l8_n575(x)
+ if (x < 1)
+ fun_l9_n771(x)
+ else
+ fun_l9_n549(x)
+ end
+end
+
+def fun_l8_n576(x)
+ if (x < 1)
+ fun_l9_n899(x)
+ else
+ fun_l9_n436(x)
+ end
+end
+
+def fun_l8_n577(x)
+ if (x < 1)
+ fun_l9_n110(x)
+ else
+ fun_l9_n708(x)
+ end
+end
+
+def fun_l8_n578(x)
+ if (x < 1)
+ fun_l9_n49(x)
+ else
+ fun_l9_n559(x)
+ end
+end
+
+def fun_l8_n579(x)
+ if (x < 1)
+ fun_l9_n937(x)
+ else
+ fun_l9_n962(x)
+ end
+end
+
+def fun_l8_n580(x)
+ if (x < 1)
+ fun_l9_n970(x)
+ else
+ fun_l9_n203(x)
+ end
+end
+
+def fun_l8_n581(x)
+ if (x < 1)
+ fun_l9_n901(x)
+ else
+ fun_l9_n666(x)
+ end
+end
+
+def fun_l8_n582(x)
+ if (x < 1)
+ fun_l9_n79(x)
+ else
+ fun_l9_n260(x)
+ end
+end
+
+def fun_l8_n583(x)
+ if (x < 1)
+ fun_l9_n167(x)
+ else
+ fun_l9_n512(x)
+ end
+end
+
+def fun_l8_n584(x)
+ if (x < 1)
+ fun_l9_n750(x)
+ else
+ fun_l9_n406(x)
+ end
+end
+
+def fun_l8_n585(x)
+ if (x < 1)
+ fun_l9_n118(x)
+ else
+ fun_l9_n525(x)
+ end
+end
+
+def fun_l8_n586(x)
+ if (x < 1)
+ fun_l9_n573(x)
+ else
+ fun_l9_n657(x)
+ end
+end
+
+def fun_l8_n587(x)
+ if (x < 1)
+ fun_l9_n228(x)
+ else
+ fun_l9_n903(x)
+ end
+end
+
+def fun_l8_n588(x)
+ if (x < 1)
+ fun_l9_n971(x)
+ else
+ fun_l9_n608(x)
+ end
+end
+
+def fun_l8_n589(x)
+ if (x < 1)
+ fun_l9_n334(x)
+ else
+ fun_l9_n704(x)
+ end
+end
+
+def fun_l8_n590(x)
+ if (x < 1)
+ fun_l9_n398(x)
+ else
+ fun_l9_n500(x)
+ end
+end
+
+def fun_l8_n591(x)
+ if (x < 1)
+ fun_l9_n196(x)
+ else
+ fun_l9_n616(x)
+ end
+end
+
+def fun_l8_n592(x)
+ if (x < 1)
+ fun_l9_n756(x)
+ else
+ fun_l9_n655(x)
+ end
+end
+
+def fun_l8_n593(x)
+ if (x < 1)
+ fun_l9_n864(x)
+ else
+ fun_l9_n697(x)
+ end
+end
+
+def fun_l8_n594(x)
+ if (x < 1)
+ fun_l9_n35(x)
+ else
+ fun_l9_n554(x)
+ end
+end
+
+def fun_l8_n595(x)
+ if (x < 1)
+ fun_l9_n480(x)
+ else
+ fun_l9_n688(x)
+ end
+end
+
+def fun_l8_n596(x)
+ if (x < 1)
+ fun_l9_n846(x)
+ else
+ fun_l9_n403(x)
+ end
+end
+
+def fun_l8_n597(x)
+ if (x < 1)
+ fun_l9_n522(x)
+ else
+ fun_l9_n532(x)
+ end
+end
+
+def fun_l8_n598(x)
+ if (x < 1)
+ fun_l9_n800(x)
+ else
+ fun_l9_n531(x)
+ end
+end
+
+def fun_l8_n599(x)
+ if (x < 1)
+ fun_l9_n64(x)
+ else
+ fun_l9_n876(x)
+ end
+end
+
+def fun_l8_n600(x)
+ if (x < 1)
+ fun_l9_n614(x)
+ else
+ fun_l9_n660(x)
+ end
+end
+
+def fun_l8_n601(x)
+ if (x < 1)
+ fun_l9_n119(x)
+ else
+ fun_l9_n998(x)
+ end
+end
+
+def fun_l8_n602(x)
+ if (x < 1)
+ fun_l9_n549(x)
+ else
+ fun_l9_n911(x)
+ end
+end
+
+def fun_l8_n603(x)
+ if (x < 1)
+ fun_l9_n659(x)
+ else
+ fun_l9_n664(x)
+ end
+end
+
+def fun_l8_n604(x)
+ if (x < 1)
+ fun_l9_n570(x)
+ else
+ fun_l9_n960(x)
+ end
+end
+
+def fun_l8_n605(x)
+ if (x < 1)
+ fun_l9_n82(x)
+ else
+ fun_l9_n658(x)
+ end
+end
+
+def fun_l8_n606(x)
+ if (x < 1)
+ fun_l9_n379(x)
+ else
+ fun_l9_n881(x)
+ end
+end
+
+def fun_l8_n607(x)
+ if (x < 1)
+ fun_l9_n104(x)
+ else
+ fun_l9_n869(x)
+ end
+end
+
+def fun_l8_n608(x)
+ if (x < 1)
+ fun_l9_n874(x)
+ else
+ fun_l9_n63(x)
+ end
+end
+
+def fun_l8_n609(x)
+ if (x < 1)
+ fun_l9_n921(x)
+ else
+ fun_l9_n667(x)
+ end
+end
+
+def fun_l8_n610(x)
+ if (x < 1)
+ fun_l9_n1(x)
+ else
+ fun_l9_n524(x)
+ end
+end
+
+def fun_l8_n611(x)
+ if (x < 1)
+ fun_l9_n523(x)
+ else
+ fun_l9_n230(x)
+ end
+end
+
+def fun_l8_n612(x)
+ if (x < 1)
+ fun_l9_n914(x)
+ else
+ fun_l9_n129(x)
+ end
+end
+
+def fun_l8_n613(x)
+ if (x < 1)
+ fun_l9_n591(x)
+ else
+ fun_l9_n551(x)
+ end
+end
+
+def fun_l8_n614(x)
+ if (x < 1)
+ fun_l9_n860(x)
+ else
+ fun_l9_n358(x)
+ end
+end
+
+def fun_l8_n615(x)
+ if (x < 1)
+ fun_l9_n387(x)
+ else
+ fun_l9_n425(x)
+ end
+end
+
+def fun_l8_n616(x)
+ if (x < 1)
+ fun_l9_n426(x)
+ else
+ fun_l9_n449(x)
+ end
+end
+
+def fun_l8_n617(x)
+ if (x < 1)
+ fun_l9_n627(x)
+ else
+ fun_l9_n399(x)
+ end
+end
+
+def fun_l8_n618(x)
+ if (x < 1)
+ fun_l9_n244(x)
+ else
+ fun_l9_n24(x)
+ end
+end
+
+def fun_l8_n619(x)
+ if (x < 1)
+ fun_l9_n574(x)
+ else
+ fun_l9_n640(x)
+ end
+end
+
+def fun_l8_n620(x)
+ if (x < 1)
+ fun_l9_n20(x)
+ else
+ fun_l9_n6(x)
+ end
+end
+
+def fun_l8_n621(x)
+ if (x < 1)
+ fun_l9_n148(x)
+ else
+ fun_l9_n718(x)
+ end
+end
+
+def fun_l8_n622(x)
+ if (x < 1)
+ fun_l9_n151(x)
+ else
+ fun_l9_n56(x)
+ end
+end
+
+def fun_l8_n623(x)
+ if (x < 1)
+ fun_l9_n672(x)
+ else
+ fun_l9_n93(x)
+ end
+end
+
+def fun_l8_n624(x)
+ if (x < 1)
+ fun_l9_n948(x)
+ else
+ fun_l9_n530(x)
+ end
+end
+
+def fun_l8_n625(x)
+ if (x < 1)
+ fun_l9_n0(x)
+ else
+ fun_l9_n334(x)
+ end
+end
+
+def fun_l8_n626(x)
+ if (x < 1)
+ fun_l9_n736(x)
+ else
+ fun_l9_n922(x)
+ end
+end
+
+def fun_l8_n627(x)
+ if (x < 1)
+ fun_l9_n291(x)
+ else
+ fun_l9_n953(x)
+ end
+end
+
+def fun_l8_n628(x)
+ if (x < 1)
+ fun_l9_n493(x)
+ else
+ fun_l9_n4(x)
+ end
+end
+
+def fun_l8_n629(x)
+ if (x < 1)
+ fun_l9_n890(x)
+ else
+ fun_l9_n297(x)
+ end
+end
+
+def fun_l8_n630(x)
+ if (x < 1)
+ fun_l9_n860(x)
+ else
+ fun_l9_n74(x)
+ end
+end
+
+def fun_l8_n631(x)
+ if (x < 1)
+ fun_l9_n400(x)
+ else
+ fun_l9_n584(x)
+ end
+end
+
+def fun_l8_n632(x)
+ if (x < 1)
+ fun_l9_n714(x)
+ else
+ fun_l9_n200(x)
+ end
+end
+
+def fun_l8_n633(x)
+ if (x < 1)
+ fun_l9_n433(x)
+ else
+ fun_l9_n489(x)
+ end
+end
+
+def fun_l8_n634(x)
+ if (x < 1)
+ fun_l9_n974(x)
+ else
+ fun_l9_n25(x)
+ end
+end
+
+def fun_l8_n635(x)
+ if (x < 1)
+ fun_l9_n739(x)
+ else
+ fun_l9_n695(x)
+ end
+end
+
+def fun_l8_n636(x)
+ if (x < 1)
+ fun_l9_n855(x)
+ else
+ fun_l9_n898(x)
+ end
+end
+
+def fun_l8_n637(x)
+ if (x < 1)
+ fun_l9_n868(x)
+ else
+ fun_l9_n825(x)
+ end
+end
+
+def fun_l8_n638(x)
+ if (x < 1)
+ fun_l9_n537(x)
+ else
+ fun_l9_n417(x)
+ end
+end
+
+def fun_l8_n639(x)
+ if (x < 1)
+ fun_l9_n975(x)
+ else
+ fun_l9_n484(x)
+ end
+end
+
+def fun_l8_n640(x)
+ if (x < 1)
+ fun_l9_n228(x)
+ else
+ fun_l9_n964(x)
+ end
+end
+
+def fun_l8_n641(x)
+ if (x < 1)
+ fun_l9_n743(x)
+ else
+ fun_l9_n986(x)
+ end
+end
+
+def fun_l8_n642(x)
+ if (x < 1)
+ fun_l9_n763(x)
+ else
+ fun_l9_n747(x)
+ end
+end
+
+def fun_l8_n643(x)
+ if (x < 1)
+ fun_l9_n77(x)
+ else
+ fun_l9_n149(x)
+ end
+end
+
+def fun_l8_n644(x)
+ if (x < 1)
+ fun_l9_n393(x)
+ else
+ fun_l9_n694(x)
+ end
+end
+
+def fun_l8_n645(x)
+ if (x < 1)
+ fun_l9_n196(x)
+ else
+ fun_l9_n211(x)
+ end
+end
+
+def fun_l8_n646(x)
+ if (x < 1)
+ fun_l9_n94(x)
+ else
+ fun_l9_n649(x)
+ end
+end
+
+def fun_l8_n647(x)
+ if (x < 1)
+ fun_l9_n669(x)
+ else
+ fun_l9_n12(x)
+ end
+end
+
+def fun_l8_n648(x)
+ if (x < 1)
+ fun_l9_n175(x)
+ else
+ fun_l9_n388(x)
+ end
+end
+
+def fun_l8_n649(x)
+ if (x < 1)
+ fun_l9_n122(x)
+ else
+ fun_l9_n736(x)
+ end
+end
+
+def fun_l8_n650(x)
+ if (x < 1)
+ fun_l9_n817(x)
+ else
+ fun_l9_n274(x)
+ end
+end
+
+def fun_l8_n651(x)
+ if (x < 1)
+ fun_l9_n919(x)
+ else
+ fun_l9_n538(x)
+ end
+end
+
+def fun_l8_n652(x)
+ if (x < 1)
+ fun_l9_n171(x)
+ else
+ fun_l9_n895(x)
+ end
+end
+
+def fun_l8_n653(x)
+ if (x < 1)
+ fun_l9_n417(x)
+ else
+ fun_l9_n618(x)
+ end
+end
+
+def fun_l8_n654(x)
+ if (x < 1)
+ fun_l9_n709(x)
+ else
+ fun_l9_n892(x)
+ end
+end
+
+def fun_l8_n655(x)
+ if (x < 1)
+ fun_l9_n906(x)
+ else
+ fun_l9_n602(x)
+ end
+end
+
+def fun_l8_n656(x)
+ if (x < 1)
+ fun_l9_n769(x)
+ else
+ fun_l9_n554(x)
+ end
+end
+
+def fun_l8_n657(x)
+ if (x < 1)
+ fun_l9_n468(x)
+ else
+ fun_l9_n320(x)
+ end
+end
+
+def fun_l8_n658(x)
+ if (x < 1)
+ fun_l9_n38(x)
+ else
+ fun_l9_n947(x)
+ end
+end
+
+def fun_l8_n659(x)
+ if (x < 1)
+ fun_l9_n635(x)
+ else
+ fun_l9_n327(x)
+ end
+end
+
+def fun_l8_n660(x)
+ if (x < 1)
+ fun_l9_n452(x)
+ else
+ fun_l9_n313(x)
+ end
+end
+
+def fun_l8_n661(x)
+ if (x < 1)
+ fun_l9_n568(x)
+ else
+ fun_l9_n383(x)
+ end
+end
+
+def fun_l8_n662(x)
+ if (x < 1)
+ fun_l9_n795(x)
+ else
+ fun_l9_n680(x)
+ end
+end
+
+def fun_l8_n663(x)
+ if (x < 1)
+ fun_l9_n152(x)
+ else
+ fun_l9_n846(x)
+ end
+end
+
+def fun_l8_n664(x)
+ if (x < 1)
+ fun_l9_n585(x)
+ else
+ fun_l9_n670(x)
+ end
+end
+
+def fun_l8_n665(x)
+ if (x < 1)
+ fun_l9_n133(x)
+ else
+ fun_l9_n799(x)
+ end
+end
+
+def fun_l8_n666(x)
+ if (x < 1)
+ fun_l9_n699(x)
+ else
+ fun_l9_n311(x)
+ end
+end
+
+def fun_l8_n667(x)
+ if (x < 1)
+ fun_l9_n558(x)
+ else
+ fun_l9_n669(x)
+ end
+end
+
+def fun_l8_n668(x)
+ if (x < 1)
+ fun_l9_n825(x)
+ else
+ fun_l9_n534(x)
+ end
+end
+
+def fun_l8_n669(x)
+ if (x < 1)
+ fun_l9_n143(x)
+ else
+ fun_l9_n21(x)
+ end
+end
+
+def fun_l8_n670(x)
+ if (x < 1)
+ fun_l9_n827(x)
+ else
+ fun_l9_n679(x)
+ end
+end
+
+def fun_l8_n671(x)
+ if (x < 1)
+ fun_l9_n704(x)
+ else
+ fun_l9_n922(x)
+ end
+end
+
+def fun_l8_n672(x)
+ if (x < 1)
+ fun_l9_n776(x)
+ else
+ fun_l9_n867(x)
+ end
+end
+
+def fun_l8_n673(x)
+ if (x < 1)
+ fun_l9_n871(x)
+ else
+ fun_l9_n315(x)
+ end
+end
+
+def fun_l8_n674(x)
+ if (x < 1)
+ fun_l9_n860(x)
+ else
+ fun_l9_n945(x)
+ end
+end
+
+def fun_l8_n675(x)
+ if (x < 1)
+ fun_l9_n890(x)
+ else
+ fun_l9_n629(x)
+ end
+end
+
+def fun_l8_n676(x)
+ if (x < 1)
+ fun_l9_n91(x)
+ else
+ fun_l9_n35(x)
+ end
+end
+
+def fun_l8_n677(x)
+ if (x < 1)
+ fun_l9_n825(x)
+ else
+ fun_l9_n922(x)
+ end
+end
+
+def fun_l8_n678(x)
+ if (x < 1)
+ fun_l9_n457(x)
+ else
+ fun_l9_n156(x)
+ end
+end
+
+def fun_l8_n679(x)
+ if (x < 1)
+ fun_l9_n455(x)
+ else
+ fun_l9_n809(x)
+ end
+end
+
+def fun_l8_n680(x)
+ if (x < 1)
+ fun_l9_n213(x)
+ else
+ fun_l9_n944(x)
+ end
+end
+
+def fun_l8_n681(x)
+ if (x < 1)
+ fun_l9_n760(x)
+ else
+ fun_l9_n137(x)
+ end
+end
+
+def fun_l8_n682(x)
+ if (x < 1)
+ fun_l9_n998(x)
+ else
+ fun_l9_n703(x)
+ end
+end
+
+def fun_l8_n683(x)
+ if (x < 1)
+ fun_l9_n988(x)
+ else
+ fun_l9_n952(x)
+ end
+end
+
+def fun_l8_n684(x)
+ if (x < 1)
+ fun_l9_n583(x)
+ else
+ fun_l9_n890(x)
+ end
+end
+
+def fun_l8_n685(x)
+ if (x < 1)
+ fun_l9_n879(x)
+ else
+ fun_l9_n978(x)
+ end
+end
+
+def fun_l8_n686(x)
+ if (x < 1)
+ fun_l9_n6(x)
+ else
+ fun_l9_n978(x)
+ end
+end
+
+def fun_l8_n687(x)
+ if (x < 1)
+ fun_l9_n67(x)
+ else
+ fun_l9_n149(x)
+ end
+end
+
+def fun_l8_n688(x)
+ if (x < 1)
+ fun_l9_n386(x)
+ else
+ fun_l9_n293(x)
+ end
+end
+
+def fun_l8_n689(x)
+ if (x < 1)
+ fun_l9_n493(x)
+ else
+ fun_l9_n140(x)
+ end
+end
+
+def fun_l8_n690(x)
+ if (x < 1)
+ fun_l9_n92(x)
+ else
+ fun_l9_n550(x)
+ end
+end
+
+def fun_l8_n691(x)
+ if (x < 1)
+ fun_l9_n456(x)
+ else
+ fun_l9_n976(x)
+ end
+end
+
+def fun_l8_n692(x)
+ if (x < 1)
+ fun_l9_n516(x)
+ else
+ fun_l9_n145(x)
+ end
+end
+
+def fun_l8_n693(x)
+ if (x < 1)
+ fun_l9_n757(x)
+ else
+ fun_l9_n647(x)
+ end
+end
+
+def fun_l8_n694(x)
+ if (x < 1)
+ fun_l9_n348(x)
+ else
+ fun_l9_n86(x)
+ end
+end
+
+def fun_l8_n695(x)
+ if (x < 1)
+ fun_l9_n546(x)
+ else
+ fun_l9_n320(x)
+ end
+end
+
+def fun_l8_n696(x)
+ if (x < 1)
+ fun_l9_n588(x)
+ else
+ fun_l9_n285(x)
+ end
+end
+
+def fun_l8_n697(x)
+ if (x < 1)
+ fun_l9_n635(x)
+ else
+ fun_l9_n710(x)
+ end
+end
+
+def fun_l8_n698(x)
+ if (x < 1)
+ fun_l9_n312(x)
+ else
+ fun_l9_n217(x)
+ end
+end
+
+def fun_l8_n699(x)
+ if (x < 1)
+ fun_l9_n714(x)
+ else
+ fun_l9_n723(x)
+ end
+end
+
+def fun_l8_n700(x)
+ if (x < 1)
+ fun_l9_n378(x)
+ else
+ fun_l9_n122(x)
+ end
+end
+
+def fun_l8_n701(x)
+ if (x < 1)
+ fun_l9_n696(x)
+ else
+ fun_l9_n577(x)
+ end
+end
+
+def fun_l8_n702(x)
+ if (x < 1)
+ fun_l9_n388(x)
+ else
+ fun_l9_n578(x)
+ end
+end
+
+def fun_l8_n703(x)
+ if (x < 1)
+ fun_l9_n502(x)
+ else
+ fun_l9_n410(x)
+ end
+end
+
+def fun_l8_n704(x)
+ if (x < 1)
+ fun_l9_n532(x)
+ else
+ fun_l9_n532(x)
+ end
+end
+
+def fun_l8_n705(x)
+ if (x < 1)
+ fun_l9_n662(x)
+ else
+ fun_l9_n361(x)
+ end
+end
+
+def fun_l8_n706(x)
+ if (x < 1)
+ fun_l9_n109(x)
+ else
+ fun_l9_n380(x)
+ end
+end
+
+def fun_l8_n707(x)
+ if (x < 1)
+ fun_l9_n653(x)
+ else
+ fun_l9_n566(x)
+ end
+end
+
+def fun_l8_n708(x)
+ if (x < 1)
+ fun_l9_n587(x)
+ else
+ fun_l9_n465(x)
+ end
+end
+
+def fun_l8_n709(x)
+ if (x < 1)
+ fun_l9_n523(x)
+ else
+ fun_l9_n274(x)
+ end
+end
+
+def fun_l8_n710(x)
+ if (x < 1)
+ fun_l9_n459(x)
+ else
+ fun_l9_n872(x)
+ end
+end
+
+def fun_l8_n711(x)
+ if (x < 1)
+ fun_l9_n71(x)
+ else
+ fun_l9_n135(x)
+ end
+end
+
+def fun_l8_n712(x)
+ if (x < 1)
+ fun_l9_n770(x)
+ else
+ fun_l9_n405(x)
+ end
+end
+
+def fun_l8_n713(x)
+ if (x < 1)
+ fun_l9_n233(x)
+ else
+ fun_l9_n232(x)
+ end
+end
+
+def fun_l8_n714(x)
+ if (x < 1)
+ fun_l9_n29(x)
+ else
+ fun_l9_n23(x)
+ end
+end
+
+def fun_l8_n715(x)
+ if (x < 1)
+ fun_l9_n44(x)
+ else
+ fun_l9_n185(x)
+ end
+end
+
+def fun_l8_n716(x)
+ if (x < 1)
+ fun_l9_n980(x)
+ else
+ fun_l9_n967(x)
+ end
+end
+
+def fun_l8_n717(x)
+ if (x < 1)
+ fun_l9_n323(x)
+ else
+ fun_l9_n636(x)
+ end
+end
+
+def fun_l8_n718(x)
+ if (x < 1)
+ fun_l9_n733(x)
+ else
+ fun_l9_n10(x)
+ end
+end
+
+def fun_l8_n719(x)
+ if (x < 1)
+ fun_l9_n100(x)
+ else
+ fun_l9_n123(x)
+ end
+end
+
+def fun_l8_n720(x)
+ if (x < 1)
+ fun_l9_n572(x)
+ else
+ fun_l9_n813(x)
+ end
+end
+
+def fun_l8_n721(x)
+ if (x < 1)
+ fun_l9_n634(x)
+ else
+ fun_l9_n152(x)
+ end
+end
+
+def fun_l8_n722(x)
+ if (x < 1)
+ fun_l9_n527(x)
+ else
+ fun_l9_n881(x)
+ end
+end
+
+def fun_l8_n723(x)
+ if (x < 1)
+ fun_l9_n202(x)
+ else
+ fun_l9_n294(x)
+ end
+end
+
+def fun_l8_n724(x)
+ if (x < 1)
+ fun_l9_n668(x)
+ else
+ fun_l9_n792(x)
+ end
+end
+
+def fun_l8_n725(x)
+ if (x < 1)
+ fun_l9_n309(x)
+ else
+ fun_l9_n612(x)
+ end
+end
+
+def fun_l8_n726(x)
+ if (x < 1)
+ fun_l9_n320(x)
+ else
+ fun_l9_n459(x)
+ end
+end
+
+def fun_l8_n727(x)
+ if (x < 1)
+ fun_l9_n355(x)
+ else
+ fun_l9_n321(x)
+ end
+end
+
+def fun_l8_n728(x)
+ if (x < 1)
+ fun_l9_n237(x)
+ else
+ fun_l9_n135(x)
+ end
+end
+
+def fun_l8_n729(x)
+ if (x < 1)
+ fun_l9_n980(x)
+ else
+ fun_l9_n209(x)
+ end
+end
+
+def fun_l8_n730(x)
+ if (x < 1)
+ fun_l9_n957(x)
+ else
+ fun_l9_n481(x)
+ end
+end
+
+def fun_l8_n731(x)
+ if (x < 1)
+ fun_l9_n594(x)
+ else
+ fun_l9_n396(x)
+ end
+end
+
+def fun_l8_n732(x)
+ if (x < 1)
+ fun_l9_n916(x)
+ else
+ fun_l9_n473(x)
+ end
+end
+
+def fun_l8_n733(x)
+ if (x < 1)
+ fun_l9_n376(x)
+ else
+ fun_l9_n231(x)
+ end
+end
+
+def fun_l8_n734(x)
+ if (x < 1)
+ fun_l9_n758(x)
+ else
+ fun_l9_n111(x)
+ end
+end
+
+def fun_l8_n735(x)
+ if (x < 1)
+ fun_l9_n131(x)
+ else
+ fun_l9_n586(x)
+ end
+end
+
+def fun_l8_n736(x)
+ if (x < 1)
+ fun_l9_n290(x)
+ else
+ fun_l9_n227(x)
+ end
+end
+
+def fun_l8_n737(x)
+ if (x < 1)
+ fun_l9_n784(x)
+ else
+ fun_l9_n869(x)
+ end
+end
+
+def fun_l8_n738(x)
+ if (x < 1)
+ fun_l9_n94(x)
+ else
+ fun_l9_n79(x)
+ end
+end
+
+def fun_l8_n739(x)
+ if (x < 1)
+ fun_l9_n489(x)
+ else
+ fun_l9_n347(x)
+ end
+end
+
+def fun_l8_n740(x)
+ if (x < 1)
+ fun_l9_n4(x)
+ else
+ fun_l9_n587(x)
+ end
+end
+
+def fun_l8_n741(x)
+ if (x < 1)
+ fun_l9_n949(x)
+ else
+ fun_l9_n196(x)
+ end
+end
+
+def fun_l8_n742(x)
+ if (x < 1)
+ fun_l9_n577(x)
+ else
+ fun_l9_n562(x)
+ end
+end
+
+def fun_l8_n743(x)
+ if (x < 1)
+ fun_l9_n656(x)
+ else
+ fun_l9_n685(x)
+ end
+end
+
+def fun_l8_n744(x)
+ if (x < 1)
+ fun_l9_n792(x)
+ else
+ fun_l9_n572(x)
+ end
+end
+
+def fun_l8_n745(x)
+ if (x < 1)
+ fun_l9_n318(x)
+ else
+ fun_l9_n335(x)
+ end
+end
+
+def fun_l8_n746(x)
+ if (x < 1)
+ fun_l9_n59(x)
+ else
+ fun_l9_n109(x)
+ end
+end
+
+def fun_l8_n747(x)
+ if (x < 1)
+ fun_l9_n276(x)
+ else
+ fun_l9_n567(x)
+ end
+end
+
+def fun_l8_n748(x)
+ if (x < 1)
+ fun_l9_n902(x)
+ else
+ fun_l9_n220(x)
+ end
+end
+
+def fun_l8_n749(x)
+ if (x < 1)
+ fun_l9_n208(x)
+ else
+ fun_l9_n108(x)
+ end
+end
+
+def fun_l8_n750(x)
+ if (x < 1)
+ fun_l9_n927(x)
+ else
+ fun_l9_n950(x)
+ end
+end
+
+def fun_l8_n751(x)
+ if (x < 1)
+ fun_l9_n371(x)
+ else
+ fun_l9_n72(x)
+ end
+end
+
+def fun_l8_n752(x)
+ if (x < 1)
+ fun_l9_n455(x)
+ else
+ fun_l9_n467(x)
+ end
+end
+
+def fun_l8_n753(x)
+ if (x < 1)
+ fun_l9_n344(x)
+ else
+ fun_l9_n82(x)
+ end
+end
+
+def fun_l8_n754(x)
+ if (x < 1)
+ fun_l9_n902(x)
+ else
+ fun_l9_n219(x)
+ end
+end
+
+def fun_l8_n755(x)
+ if (x < 1)
+ fun_l9_n101(x)
+ else
+ fun_l9_n523(x)
+ end
+end
+
+def fun_l8_n756(x)
+ if (x < 1)
+ fun_l9_n517(x)
+ else
+ fun_l9_n97(x)
+ end
+end
+
+def fun_l8_n757(x)
+ if (x < 1)
+ fun_l9_n571(x)
+ else
+ fun_l9_n650(x)
+ end
+end
+
+def fun_l8_n758(x)
+ if (x < 1)
+ fun_l9_n701(x)
+ else
+ fun_l9_n962(x)
+ end
+end
+
+def fun_l8_n759(x)
+ if (x < 1)
+ fun_l9_n589(x)
+ else
+ fun_l9_n808(x)
+ end
+end
+
+def fun_l8_n760(x)
+ if (x < 1)
+ fun_l9_n570(x)
+ else
+ fun_l9_n90(x)
+ end
+end
+
+def fun_l8_n761(x)
+ if (x < 1)
+ fun_l9_n792(x)
+ else
+ fun_l9_n673(x)
+ end
+end
+
+def fun_l8_n762(x)
+ if (x < 1)
+ fun_l9_n514(x)
+ else
+ fun_l9_n771(x)
+ end
+end
+
+def fun_l8_n763(x)
+ if (x < 1)
+ fun_l9_n404(x)
+ else
+ fun_l9_n264(x)
+ end
+end
+
+def fun_l8_n764(x)
+ if (x < 1)
+ fun_l9_n270(x)
+ else
+ fun_l9_n738(x)
+ end
+end
+
+def fun_l8_n765(x)
+ if (x < 1)
+ fun_l9_n915(x)
+ else
+ fun_l9_n901(x)
+ end
+end
+
+def fun_l8_n766(x)
+ if (x < 1)
+ fun_l9_n120(x)
+ else
+ fun_l9_n176(x)
+ end
+end
+
+def fun_l8_n767(x)
+ if (x < 1)
+ fun_l9_n470(x)
+ else
+ fun_l9_n837(x)
+ end
+end
+
+def fun_l8_n768(x)
+ if (x < 1)
+ fun_l9_n912(x)
+ else
+ fun_l9_n898(x)
+ end
+end
+
+def fun_l8_n769(x)
+ if (x < 1)
+ fun_l9_n76(x)
+ else
+ fun_l9_n350(x)
+ end
+end
+
+def fun_l8_n770(x)
+ if (x < 1)
+ fun_l9_n487(x)
+ else
+ fun_l9_n572(x)
+ end
+end
+
+def fun_l8_n771(x)
+ if (x < 1)
+ fun_l9_n823(x)
+ else
+ fun_l9_n815(x)
+ end
+end
+
+def fun_l8_n772(x)
+ if (x < 1)
+ fun_l9_n655(x)
+ else
+ fun_l9_n968(x)
+ end
+end
+
+def fun_l8_n773(x)
+ if (x < 1)
+ fun_l9_n824(x)
+ else
+ fun_l9_n741(x)
+ end
+end
+
+def fun_l8_n774(x)
+ if (x < 1)
+ fun_l9_n143(x)
+ else
+ fun_l9_n641(x)
+ end
+end
+
+def fun_l8_n775(x)
+ if (x < 1)
+ fun_l9_n985(x)
+ else
+ fun_l9_n592(x)
+ end
+end
+
+def fun_l8_n776(x)
+ if (x < 1)
+ fun_l9_n686(x)
+ else
+ fun_l9_n234(x)
+ end
+end
+
+def fun_l8_n777(x)
+ if (x < 1)
+ fun_l9_n540(x)
+ else
+ fun_l9_n794(x)
+ end
+end
+
+def fun_l8_n778(x)
+ if (x < 1)
+ fun_l9_n154(x)
+ else
+ fun_l9_n656(x)
+ end
+end
+
+def fun_l8_n779(x)
+ if (x < 1)
+ fun_l9_n836(x)
+ else
+ fun_l9_n756(x)
+ end
+end
+
+def fun_l8_n780(x)
+ if (x < 1)
+ fun_l9_n98(x)
+ else
+ fun_l9_n233(x)
+ end
+end
+
+def fun_l8_n781(x)
+ if (x < 1)
+ fun_l9_n584(x)
+ else
+ fun_l9_n398(x)
+ end
+end
+
+def fun_l8_n782(x)
+ if (x < 1)
+ fun_l9_n851(x)
+ else
+ fun_l9_n967(x)
+ end
+end
+
+def fun_l8_n783(x)
+ if (x < 1)
+ fun_l9_n868(x)
+ else
+ fun_l9_n57(x)
+ end
+end
+
+def fun_l8_n784(x)
+ if (x < 1)
+ fun_l9_n782(x)
+ else
+ fun_l9_n214(x)
+ end
+end
+
+def fun_l8_n785(x)
+ if (x < 1)
+ fun_l9_n79(x)
+ else
+ fun_l9_n489(x)
+ end
+end
+
+def fun_l8_n786(x)
+ if (x < 1)
+ fun_l9_n757(x)
+ else
+ fun_l9_n918(x)
+ end
+end
+
+def fun_l8_n787(x)
+ if (x < 1)
+ fun_l9_n985(x)
+ else
+ fun_l9_n781(x)
+ end
+end
+
+def fun_l8_n788(x)
+ if (x < 1)
+ fun_l9_n921(x)
+ else
+ fun_l9_n542(x)
+ end
+end
+
+def fun_l8_n789(x)
+ if (x < 1)
+ fun_l9_n982(x)
+ else
+ fun_l9_n834(x)
+ end
+end
+
+def fun_l8_n790(x)
+ if (x < 1)
+ fun_l9_n482(x)
+ else
+ fun_l9_n424(x)
+ end
+end
+
+def fun_l8_n791(x)
+ if (x < 1)
+ fun_l9_n289(x)
+ else
+ fun_l9_n487(x)
+ end
+end
+
+def fun_l8_n792(x)
+ if (x < 1)
+ fun_l9_n820(x)
+ else
+ fun_l9_n242(x)
+ end
+end
+
+def fun_l8_n793(x)
+ if (x < 1)
+ fun_l9_n110(x)
+ else
+ fun_l9_n356(x)
+ end
+end
+
+def fun_l8_n794(x)
+ if (x < 1)
+ fun_l9_n142(x)
+ else
+ fun_l9_n607(x)
+ end
+end
+
+def fun_l8_n795(x)
+ if (x < 1)
+ fun_l9_n21(x)
+ else
+ fun_l9_n14(x)
+ end
+end
+
+def fun_l8_n796(x)
+ if (x < 1)
+ fun_l9_n147(x)
+ else
+ fun_l9_n767(x)
+ end
+end
+
+def fun_l8_n797(x)
+ if (x < 1)
+ fun_l9_n427(x)
+ else
+ fun_l9_n854(x)
+ end
+end
+
+def fun_l8_n798(x)
+ if (x < 1)
+ fun_l9_n557(x)
+ else
+ fun_l9_n729(x)
+ end
+end
+
+def fun_l8_n799(x)
+ if (x < 1)
+ fun_l9_n150(x)
+ else
+ fun_l9_n180(x)
+ end
+end
+
+def fun_l8_n800(x)
+ if (x < 1)
+ fun_l9_n888(x)
+ else
+ fun_l9_n612(x)
+ end
+end
+
+def fun_l8_n801(x)
+ if (x < 1)
+ fun_l9_n631(x)
+ else
+ fun_l9_n370(x)
+ end
+end
+
+def fun_l8_n802(x)
+ if (x < 1)
+ fun_l9_n759(x)
+ else
+ fun_l9_n446(x)
+ end
+end
+
+def fun_l8_n803(x)
+ if (x < 1)
+ fun_l9_n182(x)
+ else
+ fun_l9_n705(x)
+ end
+end
+
+def fun_l8_n804(x)
+ if (x < 1)
+ fun_l9_n474(x)
+ else
+ fun_l9_n461(x)
+ end
+end
+
+def fun_l8_n805(x)
+ if (x < 1)
+ fun_l9_n897(x)
+ else
+ fun_l9_n935(x)
+ end
+end
+
+def fun_l8_n806(x)
+ if (x < 1)
+ fun_l9_n793(x)
+ else
+ fun_l9_n981(x)
+ end
+end
+
+def fun_l8_n807(x)
+ if (x < 1)
+ fun_l9_n124(x)
+ else
+ fun_l9_n528(x)
+ end
+end
+
+def fun_l8_n808(x)
+ if (x < 1)
+ fun_l9_n637(x)
+ else
+ fun_l9_n989(x)
+ end
+end
+
+def fun_l8_n809(x)
+ if (x < 1)
+ fun_l9_n388(x)
+ else
+ fun_l9_n196(x)
+ end
+end
+
+def fun_l8_n810(x)
+ if (x < 1)
+ fun_l9_n333(x)
+ else
+ fun_l9_n806(x)
+ end
+end
+
+def fun_l8_n811(x)
+ if (x < 1)
+ fun_l9_n541(x)
+ else
+ fun_l9_n6(x)
+ end
+end
+
+def fun_l8_n812(x)
+ if (x < 1)
+ fun_l9_n346(x)
+ else
+ fun_l9_n496(x)
+ end
+end
+
+def fun_l8_n813(x)
+ if (x < 1)
+ fun_l9_n374(x)
+ else
+ fun_l9_n739(x)
+ end
+end
+
+def fun_l8_n814(x)
+ if (x < 1)
+ fun_l9_n121(x)
+ else
+ fun_l9_n184(x)
+ end
+end
+
+def fun_l8_n815(x)
+ if (x < 1)
+ fun_l9_n460(x)
+ else
+ fun_l9_n759(x)
+ end
+end
+
+def fun_l8_n816(x)
+ if (x < 1)
+ fun_l9_n598(x)
+ else
+ fun_l9_n638(x)
+ end
+end
+
+def fun_l8_n817(x)
+ if (x < 1)
+ fun_l9_n986(x)
+ else
+ fun_l9_n658(x)
+ end
+end
+
+def fun_l8_n818(x)
+ if (x < 1)
+ fun_l9_n169(x)
+ else
+ fun_l9_n696(x)
+ end
+end
+
+def fun_l8_n819(x)
+ if (x < 1)
+ fun_l9_n885(x)
+ else
+ fun_l9_n74(x)
+ end
+end
+
+def fun_l8_n820(x)
+ if (x < 1)
+ fun_l9_n24(x)
+ else
+ fun_l9_n87(x)
+ end
+end
+
+def fun_l8_n821(x)
+ if (x < 1)
+ fun_l9_n59(x)
+ else
+ fun_l9_n843(x)
+ end
+end
+
+def fun_l8_n822(x)
+ if (x < 1)
+ fun_l9_n640(x)
+ else
+ fun_l9_n360(x)
+ end
+end
+
+def fun_l8_n823(x)
+ if (x < 1)
+ fun_l9_n355(x)
+ else
+ fun_l9_n28(x)
+ end
+end
+
+def fun_l8_n824(x)
+ if (x < 1)
+ fun_l9_n553(x)
+ else
+ fun_l9_n475(x)
+ end
+end
+
+def fun_l8_n825(x)
+ if (x < 1)
+ fun_l9_n757(x)
+ else
+ fun_l9_n790(x)
+ end
+end
+
+def fun_l8_n826(x)
+ if (x < 1)
+ fun_l9_n787(x)
+ else
+ fun_l9_n226(x)
+ end
+end
+
+def fun_l8_n827(x)
+ if (x < 1)
+ fun_l9_n622(x)
+ else
+ fun_l9_n711(x)
+ end
+end
+
+def fun_l8_n828(x)
+ if (x < 1)
+ fun_l9_n399(x)
+ else
+ fun_l9_n7(x)
+ end
+end
+
+def fun_l8_n829(x)
+ if (x < 1)
+ fun_l9_n589(x)
+ else
+ fun_l9_n276(x)
+ end
+end
+
+def fun_l8_n830(x)
+ if (x < 1)
+ fun_l9_n83(x)
+ else
+ fun_l9_n126(x)
+ end
+end
+
+def fun_l8_n831(x)
+ if (x < 1)
+ fun_l9_n576(x)
+ else
+ fun_l9_n855(x)
+ end
+end
+
+def fun_l8_n832(x)
+ if (x < 1)
+ fun_l9_n756(x)
+ else
+ fun_l9_n638(x)
+ end
+end
+
+def fun_l8_n833(x)
+ if (x < 1)
+ fun_l9_n388(x)
+ else
+ fun_l9_n154(x)
+ end
+end
+
+def fun_l8_n834(x)
+ if (x < 1)
+ fun_l9_n268(x)
+ else
+ fun_l9_n85(x)
+ end
+end
+
+def fun_l8_n835(x)
+ if (x < 1)
+ fun_l9_n32(x)
+ else
+ fun_l9_n678(x)
+ end
+end
+
+def fun_l8_n836(x)
+ if (x < 1)
+ fun_l9_n537(x)
+ else
+ fun_l9_n984(x)
+ end
+end
+
+def fun_l8_n837(x)
+ if (x < 1)
+ fun_l9_n798(x)
+ else
+ fun_l9_n221(x)
+ end
+end
+
+def fun_l8_n838(x)
+ if (x < 1)
+ fun_l9_n153(x)
+ else
+ fun_l9_n414(x)
+ end
+end
+
+def fun_l8_n839(x)
+ if (x < 1)
+ fun_l9_n904(x)
+ else
+ fun_l9_n847(x)
+ end
+end
+
+def fun_l8_n840(x)
+ if (x < 1)
+ fun_l9_n501(x)
+ else
+ fun_l9_n971(x)
+ end
+end
+
+def fun_l8_n841(x)
+ if (x < 1)
+ fun_l9_n94(x)
+ else
+ fun_l9_n719(x)
+ end
+end
+
+def fun_l8_n842(x)
+ if (x < 1)
+ fun_l9_n343(x)
+ else
+ fun_l9_n365(x)
+ end
+end
+
+def fun_l8_n843(x)
+ if (x < 1)
+ fun_l9_n541(x)
+ else
+ fun_l9_n800(x)
+ end
+end
+
+def fun_l8_n844(x)
+ if (x < 1)
+ fun_l9_n122(x)
+ else
+ fun_l9_n662(x)
+ end
+end
+
+def fun_l8_n845(x)
+ if (x < 1)
+ fun_l9_n312(x)
+ else
+ fun_l9_n688(x)
+ end
+end
+
+def fun_l8_n846(x)
+ if (x < 1)
+ fun_l9_n668(x)
+ else
+ fun_l9_n805(x)
+ end
+end
+
+def fun_l8_n847(x)
+ if (x < 1)
+ fun_l9_n785(x)
+ else
+ fun_l9_n917(x)
+ end
+end
+
+def fun_l8_n848(x)
+ if (x < 1)
+ fun_l9_n333(x)
+ else
+ fun_l9_n782(x)
+ end
+end
+
+def fun_l8_n849(x)
+ if (x < 1)
+ fun_l9_n402(x)
+ else
+ fun_l9_n398(x)
+ end
+end
+
+def fun_l8_n850(x)
+ if (x < 1)
+ fun_l9_n708(x)
+ else
+ fun_l9_n199(x)
+ end
+end
+
+def fun_l8_n851(x)
+ if (x < 1)
+ fun_l9_n950(x)
+ else
+ fun_l9_n308(x)
+ end
+end
+
+def fun_l8_n852(x)
+ if (x < 1)
+ fun_l9_n524(x)
+ else
+ fun_l9_n411(x)
+ end
+end
+
+def fun_l8_n853(x)
+ if (x < 1)
+ fun_l9_n467(x)
+ else
+ fun_l9_n807(x)
+ end
+end
+
+def fun_l8_n854(x)
+ if (x < 1)
+ fun_l9_n316(x)
+ else
+ fun_l9_n717(x)
+ end
+end
+
+def fun_l8_n855(x)
+ if (x < 1)
+ fun_l9_n955(x)
+ else
+ fun_l9_n631(x)
+ end
+end
+
+def fun_l8_n856(x)
+ if (x < 1)
+ fun_l9_n244(x)
+ else
+ fun_l9_n838(x)
+ end
+end
+
+def fun_l8_n857(x)
+ if (x < 1)
+ fun_l9_n574(x)
+ else
+ fun_l9_n720(x)
+ end
+end
+
+def fun_l8_n858(x)
+ if (x < 1)
+ fun_l9_n80(x)
+ else
+ fun_l9_n888(x)
+ end
+end
+
+def fun_l8_n859(x)
+ if (x < 1)
+ fun_l9_n675(x)
+ else
+ fun_l9_n71(x)
+ end
+end
+
+def fun_l8_n860(x)
+ if (x < 1)
+ fun_l9_n375(x)
+ else
+ fun_l9_n507(x)
+ end
+end
+
+def fun_l8_n861(x)
+ if (x < 1)
+ fun_l9_n450(x)
+ else
+ fun_l9_n705(x)
+ end
+end
+
+def fun_l8_n862(x)
+ if (x < 1)
+ fun_l9_n358(x)
+ else
+ fun_l9_n614(x)
+ end
+end
+
+def fun_l8_n863(x)
+ if (x < 1)
+ fun_l9_n690(x)
+ else
+ fun_l9_n73(x)
+ end
+end
+
+def fun_l8_n864(x)
+ if (x < 1)
+ fun_l9_n806(x)
+ else
+ fun_l9_n85(x)
+ end
+end
+
+def fun_l8_n865(x)
+ if (x < 1)
+ fun_l9_n251(x)
+ else
+ fun_l9_n657(x)
+ end
+end
+
+def fun_l8_n866(x)
+ if (x < 1)
+ fun_l9_n921(x)
+ else
+ fun_l9_n199(x)
+ end
+end
+
+def fun_l8_n867(x)
+ if (x < 1)
+ fun_l9_n265(x)
+ else
+ fun_l9_n937(x)
+ end
+end
+
+def fun_l8_n868(x)
+ if (x < 1)
+ fun_l9_n610(x)
+ else
+ fun_l9_n7(x)
+ end
+end
+
+def fun_l8_n869(x)
+ if (x < 1)
+ fun_l9_n703(x)
+ else
+ fun_l9_n74(x)
+ end
+end
+
+def fun_l8_n870(x)
+ if (x < 1)
+ fun_l9_n869(x)
+ else
+ fun_l9_n665(x)
+ end
+end
+
+def fun_l8_n871(x)
+ if (x < 1)
+ fun_l9_n808(x)
+ else
+ fun_l9_n686(x)
+ end
+end
+
+def fun_l8_n872(x)
+ if (x < 1)
+ fun_l9_n678(x)
+ else
+ fun_l9_n709(x)
+ end
+end
+
+def fun_l8_n873(x)
+ if (x < 1)
+ fun_l9_n557(x)
+ else
+ fun_l9_n63(x)
+ end
+end
+
+def fun_l8_n874(x)
+ if (x < 1)
+ fun_l9_n214(x)
+ else
+ fun_l9_n926(x)
+ end
+end
+
+def fun_l8_n875(x)
+ if (x < 1)
+ fun_l9_n218(x)
+ else
+ fun_l9_n810(x)
+ end
+end
+
+def fun_l8_n876(x)
+ if (x < 1)
+ fun_l9_n365(x)
+ else
+ fun_l9_n257(x)
+ end
+end
+
+def fun_l8_n877(x)
+ if (x < 1)
+ fun_l9_n51(x)
+ else
+ fun_l9_n68(x)
+ end
+end
+
+def fun_l8_n878(x)
+ if (x < 1)
+ fun_l9_n20(x)
+ else
+ fun_l9_n55(x)
+ end
+end
+
+def fun_l8_n879(x)
+ if (x < 1)
+ fun_l9_n686(x)
+ else
+ fun_l9_n226(x)
+ end
+end
+
+def fun_l8_n880(x)
+ if (x < 1)
+ fun_l9_n283(x)
+ else
+ fun_l9_n270(x)
+ end
+end
+
+def fun_l8_n881(x)
+ if (x < 1)
+ fun_l9_n811(x)
+ else
+ fun_l9_n820(x)
+ end
+end
+
+def fun_l8_n882(x)
+ if (x < 1)
+ fun_l9_n342(x)
+ else
+ fun_l9_n919(x)
+ end
+end
+
+def fun_l8_n883(x)
+ if (x < 1)
+ fun_l9_n515(x)
+ else
+ fun_l9_n24(x)
+ end
+end
+
+def fun_l8_n884(x)
+ if (x < 1)
+ fun_l9_n0(x)
+ else
+ fun_l9_n150(x)
+ end
+end
+
+def fun_l8_n885(x)
+ if (x < 1)
+ fun_l9_n481(x)
+ else
+ fun_l9_n65(x)
+ end
+end
+
+def fun_l8_n886(x)
+ if (x < 1)
+ fun_l9_n252(x)
+ else
+ fun_l9_n544(x)
+ end
+end
+
+def fun_l8_n887(x)
+ if (x < 1)
+ fun_l9_n635(x)
+ else
+ fun_l9_n402(x)
+ end
+end
+
+def fun_l8_n888(x)
+ if (x < 1)
+ fun_l9_n78(x)
+ else
+ fun_l9_n779(x)
+ end
+end
+
+def fun_l8_n889(x)
+ if (x < 1)
+ fun_l9_n249(x)
+ else
+ fun_l9_n331(x)
+ end
+end
+
+def fun_l8_n890(x)
+ if (x < 1)
+ fun_l9_n914(x)
+ else
+ fun_l9_n115(x)
+ end
+end
+
+def fun_l8_n891(x)
+ if (x < 1)
+ fun_l9_n832(x)
+ else
+ fun_l9_n34(x)
+ end
+end
+
+def fun_l8_n892(x)
+ if (x < 1)
+ fun_l9_n919(x)
+ else
+ fun_l9_n620(x)
+ end
+end
+
+def fun_l8_n893(x)
+ if (x < 1)
+ fun_l9_n360(x)
+ else
+ fun_l9_n189(x)
+ end
+end
+
+def fun_l8_n894(x)
+ if (x < 1)
+ fun_l9_n736(x)
+ else
+ fun_l9_n949(x)
+ end
+end
+
+def fun_l8_n895(x)
+ if (x < 1)
+ fun_l9_n56(x)
+ else
+ fun_l9_n826(x)
+ end
+end
+
+def fun_l8_n896(x)
+ if (x < 1)
+ fun_l9_n159(x)
+ else
+ fun_l9_n523(x)
+ end
+end
+
+def fun_l8_n897(x)
+ if (x < 1)
+ fun_l9_n630(x)
+ else
+ fun_l9_n601(x)
+ end
+end
+
+def fun_l8_n898(x)
+ if (x < 1)
+ fun_l9_n434(x)
+ else
+ fun_l9_n919(x)
+ end
+end
+
+def fun_l8_n899(x)
+ if (x < 1)
+ fun_l9_n872(x)
+ else
+ fun_l9_n652(x)
+ end
+end
+
+def fun_l8_n900(x)
+ if (x < 1)
+ fun_l9_n550(x)
+ else
+ fun_l9_n124(x)
+ end
+end
+
+def fun_l8_n901(x)
+ if (x < 1)
+ fun_l9_n56(x)
+ else
+ fun_l9_n422(x)
+ end
+end
+
+def fun_l8_n902(x)
+ if (x < 1)
+ fun_l9_n297(x)
+ else
+ fun_l9_n281(x)
+ end
+end
+
+def fun_l8_n903(x)
+ if (x < 1)
+ fun_l9_n764(x)
+ else
+ fun_l9_n942(x)
+ end
+end
+
+def fun_l8_n904(x)
+ if (x < 1)
+ fun_l9_n700(x)
+ else
+ fun_l9_n525(x)
+ end
+end
+
+def fun_l8_n905(x)
+ if (x < 1)
+ fun_l9_n370(x)
+ else
+ fun_l9_n871(x)
+ end
+end
+
+def fun_l8_n906(x)
+ if (x < 1)
+ fun_l9_n385(x)
+ else
+ fun_l9_n999(x)
+ end
+end
+
+def fun_l8_n907(x)
+ if (x < 1)
+ fun_l9_n63(x)
+ else
+ fun_l9_n137(x)
+ end
+end
+
+def fun_l8_n908(x)
+ if (x < 1)
+ fun_l9_n793(x)
+ else
+ fun_l9_n720(x)
+ end
+end
+
+def fun_l8_n909(x)
+ if (x < 1)
+ fun_l9_n141(x)
+ else
+ fun_l9_n838(x)
+ end
+end
+
+def fun_l8_n910(x)
+ if (x < 1)
+ fun_l9_n764(x)
+ else
+ fun_l9_n334(x)
+ end
+end
+
+def fun_l8_n911(x)
+ if (x < 1)
+ fun_l9_n429(x)
+ else
+ fun_l9_n309(x)
+ end
+end
+
+def fun_l8_n912(x)
+ if (x < 1)
+ fun_l9_n852(x)
+ else
+ fun_l9_n662(x)
+ end
+end
+
+def fun_l8_n913(x)
+ if (x < 1)
+ fun_l9_n467(x)
+ else
+ fun_l9_n817(x)
+ end
+end
+
+def fun_l8_n914(x)
+ if (x < 1)
+ fun_l9_n318(x)
+ else
+ fun_l9_n942(x)
+ end
+end
+
+def fun_l8_n915(x)
+ if (x < 1)
+ fun_l9_n442(x)
+ else
+ fun_l9_n987(x)
+ end
+end
+
+def fun_l8_n916(x)
+ if (x < 1)
+ fun_l9_n657(x)
+ else
+ fun_l9_n736(x)
+ end
+end
+
+def fun_l8_n917(x)
+ if (x < 1)
+ fun_l9_n839(x)
+ else
+ fun_l9_n897(x)
+ end
+end
+
+def fun_l8_n918(x)
+ if (x < 1)
+ fun_l9_n659(x)
+ else
+ fun_l9_n766(x)
+ end
+end
+
+def fun_l8_n919(x)
+ if (x < 1)
+ fun_l9_n523(x)
+ else
+ fun_l9_n54(x)
+ end
+end
+
+def fun_l8_n920(x)
+ if (x < 1)
+ fun_l9_n298(x)
+ else
+ fun_l9_n169(x)
+ end
+end
+
+def fun_l8_n921(x)
+ if (x < 1)
+ fun_l9_n752(x)
+ else
+ fun_l9_n281(x)
+ end
+end
+
+def fun_l8_n922(x)
+ if (x < 1)
+ fun_l9_n703(x)
+ else
+ fun_l9_n874(x)
+ end
+end
+
+def fun_l8_n923(x)
+ if (x < 1)
+ fun_l9_n218(x)
+ else
+ fun_l9_n880(x)
+ end
+end
+
+def fun_l8_n924(x)
+ if (x < 1)
+ fun_l9_n699(x)
+ else
+ fun_l9_n357(x)
+ end
+end
+
+def fun_l8_n925(x)
+ if (x < 1)
+ fun_l9_n279(x)
+ else
+ fun_l9_n300(x)
+ end
+end
+
+def fun_l8_n926(x)
+ if (x < 1)
+ fun_l9_n955(x)
+ else
+ fun_l9_n522(x)
+ end
+end
+
+def fun_l8_n927(x)
+ if (x < 1)
+ fun_l9_n943(x)
+ else
+ fun_l9_n597(x)
+ end
+end
+
+def fun_l8_n928(x)
+ if (x < 1)
+ fun_l9_n828(x)
+ else
+ fun_l9_n658(x)
+ end
+end
+
+def fun_l8_n929(x)
+ if (x < 1)
+ fun_l9_n160(x)
+ else
+ fun_l9_n536(x)
+ end
+end
+
+def fun_l8_n930(x)
+ if (x < 1)
+ fun_l9_n422(x)
+ else
+ fun_l9_n848(x)
+ end
+end
+
+def fun_l8_n931(x)
+ if (x < 1)
+ fun_l9_n66(x)
+ else
+ fun_l9_n298(x)
+ end
+end
+
+def fun_l8_n932(x)
+ if (x < 1)
+ fun_l9_n871(x)
+ else
+ fun_l9_n443(x)
+ end
+end
+
+def fun_l8_n933(x)
+ if (x < 1)
+ fun_l9_n876(x)
+ else
+ fun_l9_n459(x)
+ end
+end
+
+def fun_l8_n934(x)
+ if (x < 1)
+ fun_l9_n319(x)
+ else
+ fun_l9_n921(x)
+ end
+end
+
+def fun_l8_n935(x)
+ if (x < 1)
+ fun_l9_n741(x)
+ else
+ fun_l9_n659(x)
+ end
+end
+
+def fun_l8_n936(x)
+ if (x < 1)
+ fun_l9_n713(x)
+ else
+ fun_l9_n800(x)
+ end
+end
+
+def fun_l8_n937(x)
+ if (x < 1)
+ fun_l9_n945(x)
+ else
+ fun_l9_n548(x)
+ end
+end
+
+def fun_l8_n938(x)
+ if (x < 1)
+ fun_l9_n732(x)
+ else
+ fun_l9_n10(x)
+ end
+end
+
+def fun_l8_n939(x)
+ if (x < 1)
+ fun_l9_n5(x)
+ else
+ fun_l9_n864(x)
+ end
+end
+
+def fun_l8_n940(x)
+ if (x < 1)
+ fun_l9_n353(x)
+ else
+ fun_l9_n46(x)
+ end
+end
+
+def fun_l8_n941(x)
+ if (x < 1)
+ fun_l9_n622(x)
+ else
+ fun_l9_n430(x)
+ end
+end
+
+def fun_l8_n942(x)
+ if (x < 1)
+ fun_l9_n120(x)
+ else
+ fun_l9_n265(x)
+ end
+end
+
+def fun_l8_n943(x)
+ if (x < 1)
+ fun_l9_n272(x)
+ else
+ fun_l9_n290(x)
+ end
+end
+
+def fun_l8_n944(x)
+ if (x < 1)
+ fun_l9_n527(x)
+ else
+ fun_l9_n830(x)
+ end
+end
+
+def fun_l8_n945(x)
+ if (x < 1)
+ fun_l9_n202(x)
+ else
+ fun_l9_n597(x)
+ end
+end
+
+def fun_l8_n946(x)
+ if (x < 1)
+ fun_l9_n797(x)
+ else
+ fun_l9_n984(x)
+ end
+end
+
+def fun_l8_n947(x)
+ if (x < 1)
+ fun_l9_n3(x)
+ else
+ fun_l9_n173(x)
+ end
+end
+
+def fun_l8_n948(x)
+ if (x < 1)
+ fun_l9_n658(x)
+ else
+ fun_l9_n48(x)
+ end
+end
+
+def fun_l8_n949(x)
+ if (x < 1)
+ fun_l9_n302(x)
+ else
+ fun_l9_n549(x)
+ end
+end
+
+def fun_l8_n950(x)
+ if (x < 1)
+ fun_l9_n450(x)
+ else
+ fun_l9_n442(x)
+ end
+end
+
+def fun_l8_n951(x)
+ if (x < 1)
+ fun_l9_n32(x)
+ else
+ fun_l9_n287(x)
+ end
+end
+
+def fun_l8_n952(x)
+ if (x < 1)
+ fun_l9_n427(x)
+ else
+ fun_l9_n67(x)
+ end
+end
+
+def fun_l8_n953(x)
+ if (x < 1)
+ fun_l9_n153(x)
+ else
+ fun_l9_n748(x)
+ end
+end
+
+def fun_l8_n954(x)
+ if (x < 1)
+ fun_l9_n528(x)
+ else
+ fun_l9_n754(x)
+ end
+end
+
+def fun_l8_n955(x)
+ if (x < 1)
+ fun_l9_n722(x)
+ else
+ fun_l9_n982(x)
+ end
+end
+
+def fun_l8_n956(x)
+ if (x < 1)
+ fun_l9_n857(x)
+ else
+ fun_l9_n286(x)
+ end
+end
+
+def fun_l8_n957(x)
+ if (x < 1)
+ fun_l9_n579(x)
+ else
+ fun_l9_n830(x)
+ end
+end
+
+def fun_l8_n958(x)
+ if (x < 1)
+ fun_l9_n913(x)
+ else
+ fun_l9_n918(x)
+ end
+end
+
+def fun_l8_n959(x)
+ if (x < 1)
+ fun_l9_n575(x)
+ else
+ fun_l9_n441(x)
+ end
+end
+
+def fun_l8_n960(x)
+ if (x < 1)
+ fun_l9_n774(x)
+ else
+ fun_l9_n426(x)
+ end
+end
+
+def fun_l8_n961(x)
+ if (x < 1)
+ fun_l9_n415(x)
+ else
+ fun_l9_n720(x)
+ end
+end
+
+def fun_l8_n962(x)
+ if (x < 1)
+ fun_l9_n187(x)
+ else
+ fun_l9_n101(x)
+ end
+end
+
+def fun_l8_n963(x)
+ if (x < 1)
+ fun_l9_n146(x)
+ else
+ fun_l9_n911(x)
+ end
+end
+
+def fun_l8_n964(x)
+ if (x < 1)
+ fun_l9_n758(x)
+ else
+ fun_l9_n46(x)
+ end
+end
+
+def fun_l8_n965(x)
+ if (x < 1)
+ fun_l9_n316(x)
+ else
+ fun_l9_n874(x)
+ end
+end
+
+def fun_l8_n966(x)
+ if (x < 1)
+ fun_l9_n604(x)
+ else
+ fun_l9_n933(x)
+ end
+end
+
+def fun_l8_n967(x)
+ if (x < 1)
+ fun_l9_n185(x)
+ else
+ fun_l9_n729(x)
+ end
+end
+
+def fun_l8_n968(x)
+ if (x < 1)
+ fun_l9_n735(x)
+ else
+ fun_l9_n347(x)
+ end
+end
+
+def fun_l8_n969(x)
+ if (x < 1)
+ fun_l9_n796(x)
+ else
+ fun_l9_n163(x)
+ end
+end
+
+def fun_l8_n970(x)
+ if (x < 1)
+ fun_l9_n535(x)
+ else
+ fun_l9_n184(x)
+ end
+end
+
+def fun_l8_n971(x)
+ if (x < 1)
+ fun_l9_n919(x)
+ else
+ fun_l9_n838(x)
+ end
+end
+
+def fun_l8_n972(x)
+ if (x < 1)
+ fun_l9_n296(x)
+ else
+ fun_l9_n822(x)
+ end
+end
+
+def fun_l8_n973(x)
+ if (x < 1)
+ fun_l9_n503(x)
+ else
+ fun_l9_n979(x)
+ end
+end
+
+def fun_l8_n974(x)
+ if (x < 1)
+ fun_l9_n598(x)
+ else
+ fun_l9_n916(x)
+ end
+end
+
+def fun_l8_n975(x)
+ if (x < 1)
+ fun_l9_n119(x)
+ else
+ fun_l9_n138(x)
+ end
+end
+
+def fun_l8_n976(x)
+ if (x < 1)
+ fun_l9_n92(x)
+ else
+ fun_l9_n244(x)
+ end
+end
+
+def fun_l8_n977(x)
+ if (x < 1)
+ fun_l9_n250(x)
+ else
+ fun_l9_n496(x)
+ end
+end
+
+def fun_l8_n978(x)
+ if (x < 1)
+ fun_l9_n665(x)
+ else
+ fun_l9_n617(x)
+ end
+end
+
+def fun_l8_n979(x)
+ if (x < 1)
+ fun_l9_n212(x)
+ else
+ fun_l9_n63(x)
+ end
+end
+
+def fun_l8_n980(x)
+ if (x < 1)
+ fun_l9_n304(x)
+ else
+ fun_l9_n373(x)
+ end
+end
+
+def fun_l8_n981(x)
+ if (x < 1)
+ fun_l9_n70(x)
+ else
+ fun_l9_n194(x)
+ end
+end
+
+def fun_l8_n982(x)
+ if (x < 1)
+ fun_l9_n996(x)
+ else
+ fun_l9_n821(x)
+ end
+end
+
+def fun_l8_n983(x)
+ if (x < 1)
+ fun_l9_n109(x)
+ else
+ fun_l9_n926(x)
+ end
+end
+
+def fun_l8_n984(x)
+ if (x < 1)
+ fun_l9_n342(x)
+ else
+ fun_l9_n768(x)
+ end
+end
+
+def fun_l8_n985(x)
+ if (x < 1)
+ fun_l9_n12(x)
+ else
+ fun_l9_n55(x)
+ end
+end
+
+def fun_l8_n986(x)
+ if (x < 1)
+ fun_l9_n663(x)
+ else
+ fun_l9_n963(x)
+ end
+end
+
+def fun_l8_n987(x)
+ if (x < 1)
+ fun_l9_n108(x)
+ else
+ fun_l9_n139(x)
+ end
+end
+
+def fun_l8_n988(x)
+ if (x < 1)
+ fun_l9_n256(x)
+ else
+ fun_l9_n55(x)
+ end
+end
+
+def fun_l8_n989(x)
+ if (x < 1)
+ fun_l9_n261(x)
+ else
+ fun_l9_n962(x)
+ end
+end
+
+def fun_l8_n990(x)
+ if (x < 1)
+ fun_l9_n581(x)
+ else
+ fun_l9_n558(x)
+ end
+end
+
+def fun_l8_n991(x)
+ if (x < 1)
+ fun_l9_n379(x)
+ else
+ fun_l9_n179(x)
+ end
+end
+
+def fun_l8_n992(x)
+ if (x < 1)
+ fun_l9_n256(x)
+ else
+ fun_l9_n158(x)
+ end
+end
+
+def fun_l8_n993(x)
+ if (x < 1)
+ fun_l9_n75(x)
+ else
+ fun_l9_n669(x)
+ end
+end
+
+def fun_l8_n994(x)
+ if (x < 1)
+ fun_l9_n1(x)
+ else
+ fun_l9_n723(x)
+ end
+end
+
+def fun_l8_n995(x)
+ if (x < 1)
+ fun_l9_n52(x)
+ else
+ fun_l9_n706(x)
+ end
+end
+
+def fun_l8_n996(x)
+ if (x < 1)
+ fun_l9_n583(x)
+ else
+ fun_l9_n769(x)
+ end
+end
+
+def fun_l8_n997(x)
+ if (x < 1)
+ fun_l9_n91(x)
+ else
+ fun_l9_n86(x)
+ end
+end
+
+def fun_l8_n998(x)
+ if (x < 1)
+ fun_l9_n605(x)
+ else
+ fun_l9_n106(x)
+ end
+end
+
+def fun_l8_n999(x)
+ if (x < 1)
+ fun_l9_n634(x)
+ else
+ fun_l9_n110(x)
+ end
+end
+
+def fun_l9_n0(x)
+ if (x < 1)
+ fun_l10_n583(x)
+ else
+ fun_l10_n168(x)
+ end
+end
+
+def fun_l9_n1(x)
+ if (x < 1)
+ fun_l10_n523(x)
+ else
+ fun_l10_n486(x)
+ end
+end
+
+def fun_l9_n2(x)
+ if (x < 1)
+ fun_l10_n869(x)
+ else
+ fun_l10_n480(x)
+ end
+end
+
+def fun_l9_n3(x)
+ if (x < 1)
+ fun_l10_n227(x)
+ else
+ fun_l10_n798(x)
+ end
+end
+
+def fun_l9_n4(x)
+ if (x < 1)
+ fun_l10_n8(x)
+ else
+ fun_l10_n984(x)
+ end
+end
+
+def fun_l9_n5(x)
+ if (x < 1)
+ fun_l10_n757(x)
+ else
+ fun_l10_n791(x)
+ end
+end
+
+def fun_l9_n6(x)
+ if (x < 1)
+ fun_l10_n131(x)
+ else
+ fun_l10_n44(x)
+ end
+end
+
+def fun_l9_n7(x)
+ if (x < 1)
+ fun_l10_n292(x)
+ else
+ fun_l10_n793(x)
+ end
+end
+
+def fun_l9_n8(x)
+ if (x < 1)
+ fun_l10_n237(x)
+ else
+ fun_l10_n439(x)
+ end
+end
+
+def fun_l9_n9(x)
+ if (x < 1)
+ fun_l10_n99(x)
+ else
+ fun_l10_n914(x)
+ end
+end
+
+def fun_l9_n10(x)
+ if (x < 1)
+ fun_l10_n702(x)
+ else
+ fun_l10_n983(x)
+ end
+end
+
+def fun_l9_n11(x)
+ if (x < 1)
+ fun_l10_n843(x)
+ else
+ fun_l10_n774(x)
+ end
+end
+
+def fun_l9_n12(x)
+ if (x < 1)
+ fun_l10_n720(x)
+ else
+ fun_l10_n238(x)
+ end
+end
+
+def fun_l9_n13(x)
+ if (x < 1)
+ fun_l10_n339(x)
+ else
+ fun_l10_n613(x)
+ end
+end
+
+def fun_l9_n14(x)
+ if (x < 1)
+ fun_l10_n711(x)
+ else
+ fun_l10_n836(x)
+ end
+end
+
+def fun_l9_n15(x)
+ if (x < 1)
+ fun_l10_n542(x)
+ else
+ fun_l10_n90(x)
+ end
+end
+
+def fun_l9_n16(x)
+ if (x < 1)
+ fun_l10_n931(x)
+ else
+ fun_l10_n1(x)
+ end
+end
+
+def fun_l9_n17(x)
+ if (x < 1)
+ fun_l10_n568(x)
+ else
+ fun_l10_n397(x)
+ end
+end
+
+def fun_l9_n18(x)
+ if (x < 1)
+ fun_l10_n108(x)
+ else
+ fun_l10_n245(x)
+ end
+end
+
+def fun_l9_n19(x)
+ if (x < 1)
+ fun_l10_n413(x)
+ else
+ fun_l10_n644(x)
+ end
+end
+
+def fun_l9_n20(x)
+ if (x < 1)
+ fun_l10_n533(x)
+ else
+ fun_l10_n345(x)
+ end
+end
+
+def fun_l9_n21(x)
+ if (x < 1)
+ fun_l10_n84(x)
+ else
+ fun_l10_n821(x)
+ end
+end
+
+def fun_l9_n22(x)
+ if (x < 1)
+ fun_l10_n391(x)
+ else
+ fun_l10_n154(x)
+ end
+end
+
+def fun_l9_n23(x)
+ if (x < 1)
+ fun_l10_n591(x)
+ else
+ fun_l10_n443(x)
+ end
+end
+
+def fun_l9_n24(x)
+ if (x < 1)
+ fun_l10_n105(x)
+ else
+ fun_l10_n652(x)
+ end
+end
+
+def fun_l9_n25(x)
+ if (x < 1)
+ fun_l10_n495(x)
+ else
+ fun_l10_n185(x)
+ end
+end
+
+def fun_l9_n26(x)
+ if (x < 1)
+ fun_l10_n160(x)
+ else
+ fun_l10_n77(x)
+ end
+end
+
+def fun_l9_n27(x)
+ if (x < 1)
+ fun_l10_n307(x)
+ else
+ fun_l10_n813(x)
+ end
+end
+
+def fun_l9_n28(x)
+ if (x < 1)
+ fun_l10_n651(x)
+ else
+ fun_l10_n198(x)
+ end
+end
+
+def fun_l9_n29(x)
+ if (x < 1)
+ fun_l10_n689(x)
+ else
+ fun_l10_n995(x)
+ end
+end
+
+def fun_l9_n30(x)
+ if (x < 1)
+ fun_l10_n52(x)
+ else
+ fun_l10_n846(x)
+ end
+end
+
+def fun_l9_n31(x)
+ if (x < 1)
+ fun_l10_n151(x)
+ else
+ fun_l10_n758(x)
+ end
+end
+
+def fun_l9_n32(x)
+ if (x < 1)
+ fun_l10_n675(x)
+ else
+ fun_l10_n240(x)
+ end
+end
+
+def fun_l9_n33(x)
+ if (x < 1)
+ fun_l10_n660(x)
+ else
+ fun_l10_n826(x)
+ end
+end
+
+def fun_l9_n34(x)
+ if (x < 1)
+ fun_l10_n858(x)
+ else
+ fun_l10_n120(x)
+ end
+end
+
+def fun_l9_n35(x)
+ if (x < 1)
+ fun_l10_n28(x)
+ else
+ fun_l10_n870(x)
+ end
+end
+
+def fun_l9_n36(x)
+ if (x < 1)
+ fun_l10_n275(x)
+ else
+ fun_l10_n376(x)
+ end
+end
+
+def fun_l9_n37(x)
+ if (x < 1)
+ fun_l10_n852(x)
+ else
+ fun_l10_n413(x)
+ end
+end
+
+def fun_l9_n38(x)
+ if (x < 1)
+ fun_l10_n608(x)
+ else
+ fun_l10_n740(x)
+ end
+end
+
+def fun_l9_n39(x)
+ if (x < 1)
+ fun_l10_n856(x)
+ else
+ fun_l10_n543(x)
+ end
+end
+
+def fun_l9_n40(x)
+ if (x < 1)
+ fun_l10_n979(x)
+ else
+ fun_l10_n37(x)
+ end
+end
+
+def fun_l9_n41(x)
+ if (x < 1)
+ fun_l10_n752(x)
+ else
+ fun_l10_n94(x)
+ end
+end
+
+def fun_l9_n42(x)
+ if (x < 1)
+ fun_l10_n653(x)
+ else
+ fun_l10_n64(x)
+ end
+end
+
+def fun_l9_n43(x)
+ if (x < 1)
+ fun_l10_n860(x)
+ else
+ fun_l10_n658(x)
+ end
+end
+
+def fun_l9_n44(x)
+ if (x < 1)
+ fun_l10_n952(x)
+ else
+ fun_l10_n413(x)
+ end
+end
+
+def fun_l9_n45(x)
+ if (x < 1)
+ fun_l10_n70(x)
+ else
+ fun_l10_n297(x)
+ end
+end
+
+def fun_l9_n46(x)
+ if (x < 1)
+ fun_l10_n100(x)
+ else
+ fun_l10_n154(x)
+ end
+end
+
+def fun_l9_n47(x)
+ if (x < 1)
+ fun_l10_n746(x)
+ else
+ fun_l10_n937(x)
+ end
+end
+
+def fun_l9_n48(x)
+ if (x < 1)
+ fun_l10_n341(x)
+ else
+ fun_l10_n200(x)
+ end
+end
+
+def fun_l9_n49(x)
+ if (x < 1)
+ fun_l10_n317(x)
+ else
+ fun_l10_n607(x)
+ end
+end
+
+def fun_l9_n50(x)
+ if (x < 1)
+ fun_l10_n194(x)
+ else
+ fun_l10_n499(x)
+ end
+end
+
+def fun_l9_n51(x)
+ if (x < 1)
+ fun_l10_n936(x)
+ else
+ fun_l10_n387(x)
+ end
+end
+
+def fun_l9_n52(x)
+ if (x < 1)
+ fun_l10_n660(x)
+ else
+ fun_l10_n734(x)
+ end
+end
+
+def fun_l9_n53(x)
+ if (x < 1)
+ fun_l10_n523(x)
+ else
+ fun_l10_n532(x)
+ end
+end
+
+def fun_l9_n54(x)
+ if (x < 1)
+ fun_l10_n847(x)
+ else
+ fun_l10_n586(x)
+ end
+end
+
+def fun_l9_n55(x)
+ if (x < 1)
+ fun_l10_n378(x)
+ else
+ fun_l10_n226(x)
+ end
+end
+
+def fun_l9_n56(x)
+ if (x < 1)
+ fun_l10_n55(x)
+ else
+ fun_l10_n823(x)
+ end
+end
+
+def fun_l9_n57(x)
+ if (x < 1)
+ fun_l10_n931(x)
+ else
+ fun_l10_n268(x)
+ end
+end
+
+def fun_l9_n58(x)
+ if (x < 1)
+ fun_l10_n603(x)
+ else
+ fun_l10_n746(x)
+ end
+end
+
+def fun_l9_n59(x)
+ if (x < 1)
+ fun_l10_n174(x)
+ else
+ fun_l10_n196(x)
+ end
+end
+
+def fun_l9_n60(x)
+ if (x < 1)
+ fun_l10_n45(x)
+ else
+ fun_l10_n907(x)
+ end
+end
+
+def fun_l9_n61(x)
+ if (x < 1)
+ fun_l10_n282(x)
+ else
+ fun_l10_n478(x)
+ end
+end
+
+def fun_l9_n62(x)
+ if (x < 1)
+ fun_l10_n558(x)
+ else
+ fun_l10_n354(x)
+ end
+end
+
+def fun_l9_n63(x)
+ if (x < 1)
+ fun_l10_n729(x)
+ else
+ fun_l10_n275(x)
+ end
+end
+
+def fun_l9_n64(x)
+ if (x < 1)
+ fun_l10_n804(x)
+ else
+ fun_l10_n873(x)
+ end
+end
+
+def fun_l9_n65(x)
+ if (x < 1)
+ fun_l10_n901(x)
+ else
+ fun_l10_n684(x)
+ end
+end
+
+def fun_l9_n66(x)
+ if (x < 1)
+ fun_l10_n542(x)
+ else
+ fun_l10_n668(x)
+ end
+end
+
+def fun_l9_n67(x)
+ if (x < 1)
+ fun_l10_n112(x)
+ else
+ fun_l10_n48(x)
+ end
+end
+
+def fun_l9_n68(x)
+ if (x < 1)
+ fun_l10_n78(x)
+ else
+ fun_l10_n50(x)
+ end
+end
+
+def fun_l9_n69(x)
+ if (x < 1)
+ fun_l10_n595(x)
+ else
+ fun_l10_n163(x)
+ end
+end
+
+def fun_l9_n70(x)
+ if (x < 1)
+ fun_l10_n839(x)
+ else
+ fun_l10_n811(x)
+ end
+end
+
+def fun_l9_n71(x)
+ if (x < 1)
+ fun_l10_n704(x)
+ else
+ fun_l10_n81(x)
+ end
+end
+
+def fun_l9_n72(x)
+ if (x < 1)
+ fun_l10_n708(x)
+ else
+ fun_l10_n711(x)
+ end
+end
+
+def fun_l9_n73(x)
+ if (x < 1)
+ fun_l10_n948(x)
+ else
+ fun_l10_n397(x)
+ end
+end
+
+def fun_l9_n74(x)
+ if (x < 1)
+ fun_l10_n168(x)
+ else
+ fun_l10_n415(x)
+ end
+end
+
+def fun_l9_n75(x)
+ if (x < 1)
+ fun_l10_n191(x)
+ else
+ fun_l10_n547(x)
+ end
+end
+
+def fun_l9_n76(x)
+ if (x < 1)
+ fun_l10_n163(x)
+ else
+ fun_l10_n246(x)
+ end
+end
+
+def fun_l9_n77(x)
+ if (x < 1)
+ fun_l10_n948(x)
+ else
+ fun_l10_n80(x)
+ end
+end
+
+def fun_l9_n78(x)
+ if (x < 1)
+ fun_l10_n178(x)
+ else
+ fun_l10_n698(x)
+ end
+end
+
+def fun_l9_n79(x)
+ if (x < 1)
+ fun_l10_n150(x)
+ else
+ fun_l10_n545(x)
+ end
+end
+
+def fun_l9_n80(x)
+ if (x < 1)
+ fun_l10_n808(x)
+ else
+ fun_l10_n668(x)
+ end
+end
+
+def fun_l9_n81(x)
+ if (x < 1)
+ fun_l10_n597(x)
+ else
+ fun_l10_n908(x)
+ end
+end
+
+def fun_l9_n82(x)
+ if (x < 1)
+ fun_l10_n868(x)
+ else
+ fun_l10_n831(x)
+ end
+end
+
+def fun_l9_n83(x)
+ if (x < 1)
+ fun_l10_n432(x)
+ else
+ fun_l10_n281(x)
+ end
+end
+
+def fun_l9_n84(x)
+ if (x < 1)
+ fun_l10_n906(x)
+ else
+ fun_l10_n838(x)
+ end
+end
+
+def fun_l9_n85(x)
+ if (x < 1)
+ fun_l10_n828(x)
+ else
+ fun_l10_n876(x)
+ end
+end
+
+def fun_l9_n86(x)
+ if (x < 1)
+ fun_l10_n537(x)
+ else
+ fun_l10_n283(x)
+ end
+end
+
+def fun_l9_n87(x)
+ if (x < 1)
+ fun_l10_n108(x)
+ else
+ fun_l10_n157(x)
+ end
+end
+
+def fun_l9_n88(x)
+ if (x < 1)
+ fun_l10_n576(x)
+ else
+ fun_l10_n448(x)
+ end
+end
+
+def fun_l9_n89(x)
+ if (x < 1)
+ fun_l10_n633(x)
+ else
+ fun_l10_n43(x)
+ end
+end
+
+def fun_l9_n90(x)
+ if (x < 1)
+ fun_l10_n114(x)
+ else
+ fun_l10_n44(x)
+ end
+end
+
+def fun_l9_n91(x)
+ if (x < 1)
+ fun_l10_n656(x)
+ else
+ fun_l10_n586(x)
+ end
+end
+
+def fun_l9_n92(x)
+ if (x < 1)
+ fun_l10_n366(x)
+ else
+ fun_l10_n613(x)
+ end
+end
+
+def fun_l9_n93(x)
+ if (x < 1)
+ fun_l10_n174(x)
+ else
+ fun_l10_n250(x)
+ end
+end
+
+def fun_l9_n94(x)
+ if (x < 1)
+ fun_l10_n911(x)
+ else
+ fun_l10_n877(x)
+ end
+end
+
+def fun_l9_n95(x)
+ if (x < 1)
+ fun_l10_n940(x)
+ else
+ fun_l10_n334(x)
+ end
+end
+
+def fun_l9_n96(x)
+ if (x < 1)
+ fun_l10_n473(x)
+ else
+ fun_l10_n914(x)
+ end
+end
+
+def fun_l9_n97(x)
+ if (x < 1)
+ fun_l10_n550(x)
+ else
+ fun_l10_n80(x)
+ end
+end
+
+def fun_l9_n98(x)
+ if (x < 1)
+ fun_l10_n46(x)
+ else
+ fun_l10_n716(x)
+ end
+end
+
+def fun_l9_n99(x)
+ if (x < 1)
+ fun_l10_n294(x)
+ else
+ fun_l10_n615(x)
+ end
+end
+
+def fun_l9_n100(x)
+ if (x < 1)
+ fun_l10_n286(x)
+ else
+ fun_l10_n120(x)
+ end
+end
+
+def fun_l9_n101(x)
+ if (x < 1)
+ fun_l10_n674(x)
+ else
+ fun_l10_n498(x)
+ end
+end
+
+def fun_l9_n102(x)
+ if (x < 1)
+ fun_l10_n700(x)
+ else
+ fun_l10_n35(x)
+ end
+end
+
+def fun_l9_n103(x)
+ if (x < 1)
+ fun_l10_n225(x)
+ else
+ fun_l10_n206(x)
+ end
+end
+
+def fun_l9_n104(x)
+ if (x < 1)
+ fun_l10_n594(x)
+ else
+ fun_l10_n679(x)
+ end
+end
+
+def fun_l9_n105(x)
+ if (x < 1)
+ fun_l10_n148(x)
+ else
+ fun_l10_n449(x)
+ end
+end
+
+def fun_l9_n106(x)
+ if (x < 1)
+ fun_l10_n125(x)
+ else
+ fun_l10_n761(x)
+ end
+end
+
+def fun_l9_n107(x)
+ if (x < 1)
+ fun_l10_n642(x)
+ else
+ fun_l10_n988(x)
+ end
+end
+
+def fun_l9_n108(x)
+ if (x < 1)
+ fun_l10_n952(x)
+ else
+ fun_l10_n436(x)
+ end
+end
+
+def fun_l9_n109(x)
+ if (x < 1)
+ fun_l10_n955(x)
+ else
+ fun_l10_n380(x)
+ end
+end
+
+def fun_l9_n110(x)
+ if (x < 1)
+ fun_l10_n813(x)
+ else
+ fun_l10_n837(x)
+ end
+end
+
+def fun_l9_n111(x)
+ if (x < 1)
+ fun_l10_n471(x)
+ else
+ fun_l10_n69(x)
+ end
+end
+
+def fun_l9_n112(x)
+ if (x < 1)
+ fun_l10_n614(x)
+ else
+ fun_l10_n665(x)
+ end
+end
+
+def fun_l9_n113(x)
+ if (x < 1)
+ fun_l10_n485(x)
+ else
+ fun_l10_n276(x)
+ end
+end
+
+def fun_l9_n114(x)
+ if (x < 1)
+ fun_l10_n171(x)
+ else
+ fun_l10_n457(x)
+ end
+end
+
+def fun_l9_n115(x)
+ if (x < 1)
+ fun_l10_n106(x)
+ else
+ fun_l10_n555(x)
+ end
+end
+
+def fun_l9_n116(x)
+ if (x < 1)
+ fun_l10_n231(x)
+ else
+ fun_l10_n163(x)
+ end
+end
+
+def fun_l9_n117(x)
+ if (x < 1)
+ fun_l10_n569(x)
+ else
+ fun_l10_n345(x)
+ end
+end
+
+def fun_l9_n118(x)
+ if (x < 1)
+ fun_l10_n946(x)
+ else
+ fun_l10_n174(x)
+ end
+end
+
+def fun_l9_n119(x)
+ if (x < 1)
+ fun_l10_n716(x)
+ else
+ fun_l10_n882(x)
+ end
+end
+
+def fun_l9_n120(x)
+ if (x < 1)
+ fun_l10_n628(x)
+ else
+ fun_l10_n838(x)
+ end
+end
+
+def fun_l9_n121(x)
+ if (x < 1)
+ fun_l10_n699(x)
+ else
+ fun_l10_n384(x)
+ end
+end
+
+def fun_l9_n122(x)
+ if (x < 1)
+ fun_l10_n180(x)
+ else
+ fun_l10_n98(x)
+ end
+end
+
+def fun_l9_n123(x)
+ if (x < 1)
+ fun_l10_n177(x)
+ else
+ fun_l10_n635(x)
+ end
+end
+
+def fun_l9_n124(x)
+ if (x < 1)
+ fun_l10_n398(x)
+ else
+ fun_l10_n121(x)
+ end
+end
+
+def fun_l9_n125(x)
+ if (x < 1)
+ fun_l10_n866(x)
+ else
+ fun_l10_n595(x)
+ end
+end
+
+def fun_l9_n126(x)
+ if (x < 1)
+ fun_l10_n9(x)
+ else
+ fun_l10_n901(x)
+ end
+end
+
+def fun_l9_n127(x)
+ if (x < 1)
+ fun_l10_n579(x)
+ else
+ fun_l10_n640(x)
+ end
+end
+
+def fun_l9_n128(x)
+ if (x < 1)
+ fun_l10_n884(x)
+ else
+ fun_l10_n327(x)
+ end
+end
+
+def fun_l9_n129(x)
+ if (x < 1)
+ fun_l10_n553(x)
+ else
+ fun_l10_n73(x)
+ end
+end
+
+def fun_l9_n130(x)
+ if (x < 1)
+ fun_l10_n372(x)
+ else
+ fun_l10_n450(x)
+ end
+end
+
+def fun_l9_n131(x)
+ if (x < 1)
+ fun_l10_n748(x)
+ else
+ fun_l10_n534(x)
+ end
+end
+
+def fun_l9_n132(x)
+ if (x < 1)
+ fun_l10_n529(x)
+ else
+ fun_l10_n689(x)
+ end
+end
+
+def fun_l9_n133(x)
+ if (x < 1)
+ fun_l10_n24(x)
+ else
+ fun_l10_n677(x)
+ end
+end
+
+def fun_l9_n134(x)
+ if (x < 1)
+ fun_l10_n4(x)
+ else
+ fun_l10_n482(x)
+ end
+end
+
+def fun_l9_n135(x)
+ if (x < 1)
+ fun_l10_n314(x)
+ else
+ fun_l10_n509(x)
+ end
+end
+
+def fun_l9_n136(x)
+ if (x < 1)
+ fun_l10_n160(x)
+ else
+ fun_l10_n342(x)
+ end
+end
+
+def fun_l9_n137(x)
+ if (x < 1)
+ fun_l10_n289(x)
+ else
+ fun_l10_n910(x)
+ end
+end
+
+def fun_l9_n138(x)
+ if (x < 1)
+ fun_l10_n926(x)
+ else
+ fun_l10_n752(x)
+ end
+end
+
+def fun_l9_n139(x)
+ if (x < 1)
+ fun_l10_n303(x)
+ else
+ fun_l10_n399(x)
+ end
+end
+
+def fun_l9_n140(x)
+ if (x < 1)
+ fun_l10_n11(x)
+ else
+ fun_l10_n241(x)
+ end
+end
+
+def fun_l9_n141(x)
+ if (x < 1)
+ fun_l10_n528(x)
+ else
+ fun_l10_n727(x)
+ end
+end
+
+def fun_l9_n142(x)
+ if (x < 1)
+ fun_l10_n572(x)
+ else
+ fun_l10_n132(x)
+ end
+end
+
+def fun_l9_n143(x)
+ if (x < 1)
+ fun_l10_n858(x)
+ else
+ fun_l10_n712(x)
+ end
+end
+
+def fun_l9_n144(x)
+ if (x < 1)
+ fun_l10_n662(x)
+ else
+ fun_l10_n128(x)
+ end
+end
+
+def fun_l9_n145(x)
+ if (x < 1)
+ fun_l10_n668(x)
+ else
+ fun_l10_n483(x)
+ end
+end
+
+def fun_l9_n146(x)
+ if (x < 1)
+ fun_l10_n401(x)
+ else
+ fun_l10_n330(x)
+ end
+end
+
+def fun_l9_n147(x)
+ if (x < 1)
+ fun_l10_n103(x)
+ else
+ fun_l10_n859(x)
+ end
+end
+
+def fun_l9_n148(x)
+ if (x < 1)
+ fun_l10_n129(x)
+ else
+ fun_l10_n628(x)
+ end
+end
+
+def fun_l9_n149(x)
+ if (x < 1)
+ fun_l10_n898(x)
+ else
+ fun_l10_n984(x)
+ end
+end
+
+def fun_l9_n150(x)
+ if (x < 1)
+ fun_l10_n574(x)
+ else
+ fun_l10_n59(x)
+ end
+end
+
+def fun_l9_n151(x)
+ if (x < 1)
+ fun_l10_n759(x)
+ else
+ fun_l10_n782(x)
+ end
+end
+
+def fun_l9_n152(x)
+ if (x < 1)
+ fun_l10_n588(x)
+ else
+ fun_l10_n383(x)
+ end
+end
+
+def fun_l9_n153(x)
+ if (x < 1)
+ fun_l10_n889(x)
+ else
+ fun_l10_n628(x)
+ end
+end
+
+def fun_l9_n154(x)
+ if (x < 1)
+ fun_l10_n376(x)
+ else
+ fun_l10_n442(x)
+ end
+end
+
+def fun_l9_n155(x)
+ if (x < 1)
+ fun_l10_n497(x)
+ else
+ fun_l10_n269(x)
+ end
+end
+
+def fun_l9_n156(x)
+ if (x < 1)
+ fun_l10_n976(x)
+ else
+ fun_l10_n829(x)
+ end
+end
+
+def fun_l9_n157(x)
+ if (x < 1)
+ fun_l10_n366(x)
+ else
+ fun_l10_n416(x)
+ end
+end
+
+def fun_l9_n158(x)
+ if (x < 1)
+ fun_l10_n712(x)
+ else
+ fun_l10_n19(x)
+ end
+end
+
+def fun_l9_n159(x)
+ if (x < 1)
+ fun_l10_n151(x)
+ else
+ fun_l10_n129(x)
+ end
+end
+
+def fun_l9_n160(x)
+ if (x < 1)
+ fun_l10_n621(x)
+ else
+ fun_l10_n385(x)
+ end
+end
+
+def fun_l9_n161(x)
+ if (x < 1)
+ fun_l10_n626(x)
+ else
+ fun_l10_n686(x)
+ end
+end
+
+def fun_l9_n162(x)
+ if (x < 1)
+ fun_l10_n165(x)
+ else
+ fun_l10_n922(x)
+ end
+end
+
+def fun_l9_n163(x)
+ if (x < 1)
+ fun_l10_n670(x)
+ else
+ fun_l10_n889(x)
+ end
+end
+
+def fun_l9_n164(x)
+ if (x < 1)
+ fun_l10_n346(x)
+ else
+ fun_l10_n292(x)
+ end
+end
+
+def fun_l9_n165(x)
+ if (x < 1)
+ fun_l10_n812(x)
+ else
+ fun_l10_n799(x)
+ end
+end
+
+def fun_l9_n166(x)
+ if (x < 1)
+ fun_l10_n21(x)
+ else
+ fun_l10_n641(x)
+ end
+end
+
+def fun_l9_n167(x)
+ if (x < 1)
+ fun_l10_n268(x)
+ else
+ fun_l10_n943(x)
+ end
+end
+
+def fun_l9_n168(x)
+ if (x < 1)
+ fun_l10_n349(x)
+ else
+ fun_l10_n620(x)
+ end
+end
+
+def fun_l9_n169(x)
+ if (x < 1)
+ fun_l10_n894(x)
+ else
+ fun_l10_n625(x)
+ end
+end
+
+def fun_l9_n170(x)
+ if (x < 1)
+ fun_l10_n732(x)
+ else
+ fun_l10_n518(x)
+ end
+end
+
+def fun_l9_n171(x)
+ if (x < 1)
+ fun_l10_n272(x)
+ else
+ fun_l10_n148(x)
+ end
+end
+
+def fun_l9_n172(x)
+ if (x < 1)
+ fun_l10_n701(x)
+ else
+ fun_l10_n227(x)
+ end
+end
+
+def fun_l9_n173(x)
+ if (x < 1)
+ fun_l10_n833(x)
+ else
+ fun_l10_n853(x)
+ end
+end
+
+def fun_l9_n174(x)
+ if (x < 1)
+ fun_l10_n643(x)
+ else
+ fun_l10_n314(x)
+ end
+end
+
+def fun_l9_n175(x)
+ if (x < 1)
+ fun_l10_n217(x)
+ else
+ fun_l10_n64(x)
+ end
+end
+
+def fun_l9_n176(x)
+ if (x < 1)
+ fun_l10_n204(x)
+ else
+ fun_l10_n990(x)
+ end
+end
+
+def fun_l9_n177(x)
+ if (x < 1)
+ fun_l10_n130(x)
+ else
+ fun_l10_n829(x)
+ end
+end
+
+def fun_l9_n178(x)
+ if (x < 1)
+ fun_l10_n260(x)
+ else
+ fun_l10_n881(x)
+ end
+end
+
+def fun_l9_n179(x)
+ if (x < 1)
+ fun_l10_n728(x)
+ else
+ fun_l10_n491(x)
+ end
+end
+
+def fun_l9_n180(x)
+ if (x < 1)
+ fun_l10_n456(x)
+ else
+ fun_l10_n221(x)
+ end
+end
+
+def fun_l9_n181(x)
+ if (x < 1)
+ fun_l10_n634(x)
+ else
+ fun_l10_n166(x)
+ end
+end
+
+def fun_l9_n182(x)
+ if (x < 1)
+ fun_l10_n427(x)
+ else
+ fun_l10_n452(x)
+ end
+end
+
+def fun_l9_n183(x)
+ if (x < 1)
+ fun_l10_n754(x)
+ else
+ fun_l10_n54(x)
+ end
+end
+
+def fun_l9_n184(x)
+ if (x < 1)
+ fun_l10_n137(x)
+ else
+ fun_l10_n155(x)
+ end
+end
+
+def fun_l9_n185(x)
+ if (x < 1)
+ fun_l10_n611(x)
+ else
+ fun_l10_n573(x)
+ end
+end
+
+def fun_l9_n186(x)
+ if (x < 1)
+ fun_l10_n694(x)
+ else
+ fun_l10_n890(x)
+ end
+end
+
+def fun_l9_n187(x)
+ if (x < 1)
+ fun_l10_n533(x)
+ else
+ fun_l10_n102(x)
+ end
+end
+
+def fun_l9_n188(x)
+ if (x < 1)
+ fun_l10_n400(x)
+ else
+ fun_l10_n838(x)
+ end
+end
+
+def fun_l9_n189(x)
+ if (x < 1)
+ fun_l10_n783(x)
+ else
+ fun_l10_n620(x)
+ end
+end
+
+def fun_l9_n190(x)
+ if (x < 1)
+ fun_l10_n209(x)
+ else
+ fun_l10_n777(x)
+ end
+end
+
+def fun_l9_n191(x)
+ if (x < 1)
+ fun_l10_n760(x)
+ else
+ fun_l10_n299(x)
+ end
+end
+
+def fun_l9_n192(x)
+ if (x < 1)
+ fun_l10_n306(x)
+ else
+ fun_l10_n509(x)
+ end
+end
+
+def fun_l9_n193(x)
+ if (x < 1)
+ fun_l10_n842(x)
+ else
+ fun_l10_n0(x)
+ end
+end
+
+def fun_l9_n194(x)
+ if (x < 1)
+ fun_l10_n620(x)
+ else
+ fun_l10_n757(x)
+ end
+end
+
+def fun_l9_n195(x)
+ if (x < 1)
+ fun_l10_n944(x)
+ else
+ fun_l10_n257(x)
+ end
+end
+
+def fun_l9_n196(x)
+ if (x < 1)
+ fun_l10_n222(x)
+ else
+ fun_l10_n11(x)
+ end
+end
+
+def fun_l9_n197(x)
+ if (x < 1)
+ fun_l10_n419(x)
+ else
+ fun_l10_n704(x)
+ end
+end
+
+def fun_l9_n198(x)
+ if (x < 1)
+ fun_l10_n656(x)
+ else
+ fun_l10_n341(x)
+ end
+end
+
+def fun_l9_n199(x)
+ if (x < 1)
+ fun_l10_n129(x)
+ else
+ fun_l10_n815(x)
+ end
+end
+
+def fun_l9_n200(x)
+ if (x < 1)
+ fun_l10_n768(x)
+ else
+ fun_l10_n924(x)
+ end
+end
+
+def fun_l9_n201(x)
+ if (x < 1)
+ fun_l10_n439(x)
+ else
+ fun_l10_n361(x)
+ end
+end
+
+def fun_l9_n202(x)
+ if (x < 1)
+ fun_l10_n421(x)
+ else
+ fun_l10_n937(x)
+ end
+end
+
+def fun_l9_n203(x)
+ if (x < 1)
+ fun_l10_n323(x)
+ else
+ fun_l10_n262(x)
+ end
+end
+
+def fun_l9_n204(x)
+ if (x < 1)
+ fun_l10_n335(x)
+ else
+ fun_l10_n87(x)
+ end
+end
+
+def fun_l9_n205(x)
+ if (x < 1)
+ fun_l10_n973(x)
+ else
+ fun_l10_n262(x)
+ end
+end
+
+def fun_l9_n206(x)
+ if (x < 1)
+ fun_l10_n494(x)
+ else
+ fun_l10_n430(x)
+ end
+end
+
+def fun_l9_n207(x)
+ if (x < 1)
+ fun_l10_n482(x)
+ else
+ fun_l10_n828(x)
+ end
+end
+
+def fun_l9_n208(x)
+ if (x < 1)
+ fun_l10_n820(x)
+ else
+ fun_l10_n646(x)
+ end
+end
+
+def fun_l9_n209(x)
+ if (x < 1)
+ fun_l10_n669(x)
+ else
+ fun_l10_n176(x)
+ end
+end
+
+def fun_l9_n210(x)
+ if (x < 1)
+ fun_l10_n165(x)
+ else
+ fun_l10_n963(x)
+ end
+end
+
+def fun_l9_n211(x)
+ if (x < 1)
+ fun_l10_n772(x)
+ else
+ fun_l10_n49(x)
+ end
+end
+
+def fun_l9_n212(x)
+ if (x < 1)
+ fun_l10_n167(x)
+ else
+ fun_l10_n598(x)
+ end
+end
+
+def fun_l9_n213(x)
+ if (x < 1)
+ fun_l10_n552(x)
+ else
+ fun_l10_n374(x)
+ end
+end
+
+def fun_l9_n214(x)
+ if (x < 1)
+ fun_l10_n39(x)
+ else
+ fun_l10_n470(x)
+ end
+end
+
+def fun_l9_n215(x)
+ if (x < 1)
+ fun_l10_n489(x)
+ else
+ fun_l10_n812(x)
+ end
+end
+
+def fun_l9_n216(x)
+ if (x < 1)
+ fun_l10_n390(x)
+ else
+ fun_l10_n928(x)
+ end
+end
+
+def fun_l9_n217(x)
+ if (x < 1)
+ fun_l10_n909(x)
+ else
+ fun_l10_n876(x)
+ end
+end
+
+def fun_l9_n218(x)
+ if (x < 1)
+ fun_l10_n781(x)
+ else
+ fun_l10_n799(x)
+ end
+end
+
+def fun_l9_n219(x)
+ if (x < 1)
+ fun_l10_n958(x)
+ else
+ fun_l10_n697(x)
+ end
+end
+
+def fun_l9_n220(x)
+ if (x < 1)
+ fun_l10_n577(x)
+ else
+ fun_l10_n667(x)
+ end
+end
+
+def fun_l9_n221(x)
+ if (x < 1)
+ fun_l10_n206(x)
+ else
+ fun_l10_n573(x)
+ end
+end
+
+def fun_l9_n222(x)
+ if (x < 1)
+ fun_l10_n389(x)
+ else
+ fun_l10_n659(x)
+ end
+end
+
+def fun_l9_n223(x)
+ if (x < 1)
+ fun_l10_n951(x)
+ else
+ fun_l10_n572(x)
+ end
+end
+
+def fun_l9_n224(x)
+ if (x < 1)
+ fun_l10_n158(x)
+ else
+ fun_l10_n500(x)
+ end
+end
+
+def fun_l9_n225(x)
+ if (x < 1)
+ fun_l10_n681(x)
+ else
+ fun_l10_n983(x)
+ end
+end
+
+def fun_l9_n226(x)
+ if (x < 1)
+ fun_l10_n987(x)
+ else
+ fun_l10_n116(x)
+ end
+end
+
+def fun_l9_n227(x)
+ if (x < 1)
+ fun_l10_n347(x)
+ else
+ fun_l10_n606(x)
+ end
+end
+
+def fun_l9_n228(x)
+ if (x < 1)
+ fun_l10_n566(x)
+ else
+ fun_l10_n77(x)
+ end
+end
+
+def fun_l9_n229(x)
+ if (x < 1)
+ fun_l10_n879(x)
+ else
+ fun_l10_n66(x)
+ end
+end
+
+def fun_l9_n230(x)
+ if (x < 1)
+ fun_l10_n355(x)
+ else
+ fun_l10_n97(x)
+ end
+end
+
+def fun_l9_n231(x)
+ if (x < 1)
+ fun_l10_n938(x)
+ else
+ fun_l10_n5(x)
+ end
+end
+
+def fun_l9_n232(x)
+ if (x < 1)
+ fun_l10_n684(x)
+ else
+ fun_l10_n528(x)
+ end
+end
+
+def fun_l9_n233(x)
+ if (x < 1)
+ fun_l10_n500(x)
+ else
+ fun_l10_n246(x)
+ end
+end
+
+def fun_l9_n234(x)
+ if (x < 1)
+ fun_l10_n610(x)
+ else
+ fun_l10_n61(x)
+ end
+end
+
+def fun_l9_n235(x)
+ if (x < 1)
+ fun_l10_n199(x)
+ else
+ fun_l10_n838(x)
+ end
+end
+
+def fun_l9_n236(x)
+ if (x < 1)
+ fun_l10_n820(x)
+ else
+ fun_l10_n633(x)
+ end
+end
+
+def fun_l9_n237(x)
+ if (x < 1)
+ fun_l10_n216(x)
+ else
+ fun_l10_n546(x)
+ end
+end
+
+def fun_l9_n238(x)
+ if (x < 1)
+ fun_l10_n555(x)
+ else
+ fun_l10_n153(x)
+ end
+end
+
+def fun_l9_n239(x)
+ if (x < 1)
+ fun_l10_n236(x)
+ else
+ fun_l10_n231(x)
+ end
+end
+
+def fun_l9_n240(x)
+ if (x < 1)
+ fun_l10_n775(x)
+ else
+ fun_l10_n823(x)
+ end
+end
+
+def fun_l9_n241(x)
+ if (x < 1)
+ fun_l10_n275(x)
+ else
+ fun_l10_n519(x)
+ end
+end
+
+def fun_l9_n242(x)
+ if (x < 1)
+ fun_l10_n239(x)
+ else
+ fun_l10_n791(x)
+ end
+end
+
+def fun_l9_n243(x)
+ if (x < 1)
+ fun_l10_n639(x)
+ else
+ fun_l10_n972(x)
+ end
+end
+
+def fun_l9_n244(x)
+ if (x < 1)
+ fun_l10_n5(x)
+ else
+ fun_l10_n510(x)
+ end
+end
+
+def fun_l9_n245(x)
+ if (x < 1)
+ fun_l10_n256(x)
+ else
+ fun_l10_n298(x)
+ end
+end
+
+def fun_l9_n246(x)
+ if (x < 1)
+ fun_l10_n139(x)
+ else
+ fun_l10_n385(x)
+ end
+end
+
+def fun_l9_n247(x)
+ if (x < 1)
+ fun_l10_n4(x)
+ else
+ fun_l10_n323(x)
+ end
+end
+
+def fun_l9_n248(x)
+ if (x < 1)
+ fun_l10_n166(x)
+ else
+ fun_l10_n539(x)
+ end
+end
+
+def fun_l9_n249(x)
+ if (x < 1)
+ fun_l10_n374(x)
+ else
+ fun_l10_n351(x)
+ end
+end
+
+def fun_l9_n250(x)
+ if (x < 1)
+ fun_l10_n650(x)
+ else
+ fun_l10_n791(x)
+ end
+end
+
+def fun_l9_n251(x)
+ if (x < 1)
+ fun_l10_n799(x)
+ else
+ fun_l10_n990(x)
+ end
+end
+
+def fun_l9_n252(x)
+ if (x < 1)
+ fun_l10_n388(x)
+ else
+ fun_l10_n887(x)
+ end
+end
+
+def fun_l9_n253(x)
+ if (x < 1)
+ fun_l10_n564(x)
+ else
+ fun_l10_n997(x)
+ end
+end
+
+def fun_l9_n254(x)
+ if (x < 1)
+ fun_l10_n580(x)
+ else
+ fun_l10_n646(x)
+ end
+end
+
+def fun_l9_n255(x)
+ if (x < 1)
+ fun_l10_n0(x)
+ else
+ fun_l10_n480(x)
+ end
+end
+
+def fun_l9_n256(x)
+ if (x < 1)
+ fun_l10_n606(x)
+ else
+ fun_l10_n48(x)
+ end
+end
+
+def fun_l9_n257(x)
+ if (x < 1)
+ fun_l10_n601(x)
+ else
+ fun_l10_n657(x)
+ end
+end
+
+def fun_l9_n258(x)
+ if (x < 1)
+ fun_l10_n419(x)
+ else
+ fun_l10_n909(x)
+ end
+end
+
+def fun_l9_n259(x)
+ if (x < 1)
+ fun_l10_n194(x)
+ else
+ fun_l10_n961(x)
+ end
+end
+
+def fun_l9_n260(x)
+ if (x < 1)
+ fun_l10_n876(x)
+ else
+ fun_l10_n345(x)
+ end
+end
+
+def fun_l9_n261(x)
+ if (x < 1)
+ fun_l10_n643(x)
+ else
+ fun_l10_n905(x)
+ end
+end
+
+def fun_l9_n262(x)
+ if (x < 1)
+ fun_l10_n105(x)
+ else
+ fun_l10_n61(x)
+ end
+end
+
+def fun_l9_n263(x)
+ if (x < 1)
+ fun_l10_n610(x)
+ else
+ fun_l10_n527(x)
+ end
+end
+
+def fun_l9_n264(x)
+ if (x < 1)
+ fun_l10_n183(x)
+ else
+ fun_l10_n51(x)
+ end
+end
+
+def fun_l9_n265(x)
+ if (x < 1)
+ fun_l10_n800(x)
+ else
+ fun_l10_n103(x)
+ end
+end
+
+def fun_l9_n266(x)
+ if (x < 1)
+ fun_l10_n501(x)
+ else
+ fun_l10_n293(x)
+ end
+end
+
+def fun_l9_n267(x)
+ if (x < 1)
+ fun_l10_n207(x)
+ else
+ fun_l10_n162(x)
+ end
+end
+
+def fun_l9_n268(x)
+ if (x < 1)
+ fun_l10_n901(x)
+ else
+ fun_l10_n914(x)
+ end
+end
+
+def fun_l9_n269(x)
+ if (x < 1)
+ fun_l10_n711(x)
+ else
+ fun_l10_n227(x)
+ end
+end
+
+def fun_l9_n270(x)
+ if (x < 1)
+ fun_l10_n445(x)
+ else
+ fun_l10_n227(x)
+ end
+end
+
+def fun_l9_n271(x)
+ if (x < 1)
+ fun_l10_n585(x)
+ else
+ fun_l10_n371(x)
+ end
+end
+
+def fun_l9_n272(x)
+ if (x < 1)
+ fun_l10_n137(x)
+ else
+ fun_l10_n678(x)
+ end
+end
+
+def fun_l9_n273(x)
+ if (x < 1)
+ fun_l10_n284(x)
+ else
+ fun_l10_n801(x)
+ end
+end
+
+def fun_l9_n274(x)
+ if (x < 1)
+ fun_l10_n829(x)
+ else
+ fun_l10_n456(x)
+ end
+end
+
+def fun_l9_n275(x)
+ if (x < 1)
+ fun_l10_n663(x)
+ else
+ fun_l10_n529(x)
+ end
+end
+
+def fun_l9_n276(x)
+ if (x < 1)
+ fun_l10_n154(x)
+ else
+ fun_l10_n421(x)
+ end
+end
+
+def fun_l9_n277(x)
+ if (x < 1)
+ fun_l10_n778(x)
+ else
+ fun_l10_n325(x)
+ end
+end
+
+def fun_l9_n278(x)
+ if (x < 1)
+ fun_l10_n179(x)
+ else
+ fun_l10_n369(x)
+ end
+end
+
+def fun_l9_n279(x)
+ if (x < 1)
+ fun_l10_n800(x)
+ else
+ fun_l10_n488(x)
+ end
+end
+
+def fun_l9_n280(x)
+ if (x < 1)
+ fun_l10_n617(x)
+ else
+ fun_l10_n1(x)
+ end
+end
+
+def fun_l9_n281(x)
+ if (x < 1)
+ fun_l10_n951(x)
+ else
+ fun_l10_n763(x)
+ end
+end
+
+def fun_l9_n282(x)
+ if (x < 1)
+ fun_l10_n537(x)
+ else
+ fun_l10_n541(x)
+ end
+end
+
+def fun_l9_n283(x)
+ if (x < 1)
+ fun_l10_n664(x)
+ else
+ fun_l10_n854(x)
+ end
+end
+
+def fun_l9_n284(x)
+ if (x < 1)
+ fun_l10_n711(x)
+ else
+ fun_l10_n767(x)
+ end
+end
+
+def fun_l9_n285(x)
+ if (x < 1)
+ fun_l10_n211(x)
+ else
+ fun_l10_n733(x)
+ end
+end
+
+def fun_l9_n286(x)
+ if (x < 1)
+ fun_l10_n524(x)
+ else
+ fun_l10_n850(x)
+ end
+end
+
+def fun_l9_n287(x)
+ if (x < 1)
+ fun_l10_n270(x)
+ else
+ fun_l10_n798(x)
+ end
+end
+
+def fun_l9_n288(x)
+ if (x < 1)
+ fun_l10_n906(x)
+ else
+ fun_l10_n408(x)
+ end
+end
+
+def fun_l9_n289(x)
+ if (x < 1)
+ fun_l10_n873(x)
+ else
+ fun_l10_n298(x)
+ end
+end
+
+def fun_l9_n290(x)
+ if (x < 1)
+ fun_l10_n535(x)
+ else
+ fun_l10_n946(x)
+ end
+end
+
+def fun_l9_n291(x)
+ if (x < 1)
+ fun_l10_n290(x)
+ else
+ fun_l10_n631(x)
+ end
+end
+
+def fun_l9_n292(x)
+ if (x < 1)
+ fun_l10_n223(x)
+ else
+ fun_l10_n165(x)
+ end
+end
+
+def fun_l9_n293(x)
+ if (x < 1)
+ fun_l10_n768(x)
+ else
+ fun_l10_n512(x)
+ end
+end
+
+def fun_l9_n294(x)
+ if (x < 1)
+ fun_l10_n133(x)
+ else
+ fun_l10_n986(x)
+ end
+end
+
+def fun_l9_n295(x)
+ if (x < 1)
+ fun_l10_n657(x)
+ else
+ fun_l10_n914(x)
+ end
+end
+
+def fun_l9_n296(x)
+ if (x < 1)
+ fun_l10_n444(x)
+ else
+ fun_l10_n490(x)
+ end
+end
+
+def fun_l9_n297(x)
+ if (x < 1)
+ fun_l10_n478(x)
+ else
+ fun_l10_n979(x)
+ end
+end
+
+def fun_l9_n298(x)
+ if (x < 1)
+ fun_l10_n994(x)
+ else
+ fun_l10_n945(x)
+ end
+end
+
+def fun_l9_n299(x)
+ if (x < 1)
+ fun_l10_n865(x)
+ else
+ fun_l10_n206(x)
+ end
+end
+
+def fun_l9_n300(x)
+ if (x < 1)
+ fun_l10_n678(x)
+ else
+ fun_l10_n103(x)
+ end
+end
+
+def fun_l9_n301(x)
+ if (x < 1)
+ fun_l10_n115(x)
+ else
+ fun_l10_n742(x)
+ end
+end
+
+def fun_l9_n302(x)
+ if (x < 1)
+ fun_l10_n677(x)
+ else
+ fun_l10_n446(x)
+ end
+end
+
+def fun_l9_n303(x)
+ if (x < 1)
+ fun_l10_n144(x)
+ else
+ fun_l10_n856(x)
+ end
+end
+
+def fun_l9_n304(x)
+ if (x < 1)
+ fun_l10_n751(x)
+ else
+ fun_l10_n989(x)
+ end
+end
+
+def fun_l9_n305(x)
+ if (x < 1)
+ fun_l10_n200(x)
+ else
+ fun_l10_n97(x)
+ end
+end
+
+def fun_l9_n306(x)
+ if (x < 1)
+ fun_l10_n685(x)
+ else
+ fun_l10_n663(x)
+ end
+end
+
+def fun_l9_n307(x)
+ if (x < 1)
+ fun_l10_n831(x)
+ else
+ fun_l10_n770(x)
+ end
+end
+
+def fun_l9_n308(x)
+ if (x < 1)
+ fun_l10_n467(x)
+ else
+ fun_l10_n715(x)
+ end
+end
+
+def fun_l9_n309(x)
+ if (x < 1)
+ fun_l10_n676(x)
+ else
+ fun_l10_n885(x)
+ end
+end
+
+def fun_l9_n310(x)
+ if (x < 1)
+ fun_l10_n382(x)
+ else
+ fun_l10_n687(x)
+ end
+end
+
+def fun_l9_n311(x)
+ if (x < 1)
+ fun_l10_n489(x)
+ else
+ fun_l10_n810(x)
+ end
+end
+
+def fun_l9_n312(x)
+ if (x < 1)
+ fun_l10_n922(x)
+ else
+ fun_l10_n81(x)
+ end
+end
+
+def fun_l9_n313(x)
+ if (x < 1)
+ fun_l10_n788(x)
+ else
+ fun_l10_n519(x)
+ end
+end
+
+def fun_l9_n314(x)
+ if (x < 1)
+ fun_l10_n830(x)
+ else
+ fun_l10_n486(x)
+ end
+end
+
+def fun_l9_n315(x)
+ if (x < 1)
+ fun_l10_n848(x)
+ else
+ fun_l10_n250(x)
+ end
+end
+
+def fun_l9_n316(x)
+ if (x < 1)
+ fun_l10_n933(x)
+ else
+ fun_l10_n268(x)
+ end
+end
+
+def fun_l9_n317(x)
+ if (x < 1)
+ fun_l10_n659(x)
+ else
+ fun_l10_n91(x)
+ end
+end
+
+def fun_l9_n318(x)
+ if (x < 1)
+ fun_l10_n417(x)
+ else
+ fun_l10_n99(x)
+ end
+end
+
+def fun_l9_n319(x)
+ if (x < 1)
+ fun_l10_n781(x)
+ else
+ fun_l10_n770(x)
+ end
+end
+
+def fun_l9_n320(x)
+ if (x < 1)
+ fun_l10_n820(x)
+ else
+ fun_l10_n836(x)
+ end
+end
+
+def fun_l9_n321(x)
+ if (x < 1)
+ fun_l10_n865(x)
+ else
+ fun_l10_n232(x)
+ end
+end
+
+def fun_l9_n322(x)
+ if (x < 1)
+ fun_l10_n793(x)
+ else
+ fun_l10_n856(x)
+ end
+end
+
+def fun_l9_n323(x)
+ if (x < 1)
+ fun_l10_n91(x)
+ else
+ fun_l10_n356(x)
+ end
+end
+
+def fun_l9_n324(x)
+ if (x < 1)
+ fun_l10_n262(x)
+ else
+ fun_l10_n776(x)
+ end
+end
+
+def fun_l9_n325(x)
+ if (x < 1)
+ fun_l10_n733(x)
+ else
+ fun_l10_n866(x)
+ end
+end
+
+def fun_l9_n326(x)
+ if (x < 1)
+ fun_l10_n798(x)
+ else
+ fun_l10_n703(x)
+ end
+end
+
+def fun_l9_n327(x)
+ if (x < 1)
+ fun_l10_n478(x)
+ else
+ fun_l10_n181(x)
+ end
+end
+
+def fun_l9_n328(x)
+ if (x < 1)
+ fun_l10_n66(x)
+ else
+ fun_l10_n736(x)
+ end
+end
+
+def fun_l9_n329(x)
+ if (x < 1)
+ fun_l10_n388(x)
+ else
+ fun_l10_n122(x)
+ end
+end
+
+def fun_l9_n330(x)
+ if (x < 1)
+ fun_l10_n382(x)
+ else
+ fun_l10_n85(x)
+ end
+end
+
+def fun_l9_n331(x)
+ if (x < 1)
+ fun_l10_n987(x)
+ else
+ fun_l10_n769(x)
+ end
+end
+
+def fun_l9_n332(x)
+ if (x < 1)
+ fun_l10_n644(x)
+ else
+ fun_l10_n230(x)
+ end
+end
+
+def fun_l9_n333(x)
+ if (x < 1)
+ fun_l10_n604(x)
+ else
+ fun_l10_n157(x)
+ end
+end
+
+def fun_l9_n334(x)
+ if (x < 1)
+ fun_l10_n50(x)
+ else
+ fun_l10_n417(x)
+ end
+end
+
+def fun_l9_n335(x)
+ if (x < 1)
+ fun_l10_n37(x)
+ else
+ fun_l10_n564(x)
+ end
+end
+
+def fun_l9_n336(x)
+ if (x < 1)
+ fun_l10_n265(x)
+ else
+ fun_l10_n330(x)
+ end
+end
+
+def fun_l9_n337(x)
+ if (x < 1)
+ fun_l10_n816(x)
+ else
+ fun_l10_n723(x)
+ end
+end
+
+def fun_l9_n338(x)
+ if (x < 1)
+ fun_l10_n583(x)
+ else
+ fun_l10_n193(x)
+ end
+end
+
+def fun_l9_n339(x)
+ if (x < 1)
+ fun_l10_n552(x)
+ else
+ fun_l10_n710(x)
+ end
+end
+
+def fun_l9_n340(x)
+ if (x < 1)
+ fun_l10_n594(x)
+ else
+ fun_l10_n323(x)
+ end
+end
+
+def fun_l9_n341(x)
+ if (x < 1)
+ fun_l10_n727(x)
+ else
+ fun_l10_n305(x)
+ end
+end
+
+def fun_l9_n342(x)
+ if (x < 1)
+ fun_l10_n621(x)
+ else
+ fun_l10_n917(x)
+ end
+end
+
+def fun_l9_n343(x)
+ if (x < 1)
+ fun_l10_n634(x)
+ else
+ fun_l10_n36(x)
+ end
+end
+
+def fun_l9_n344(x)
+ if (x < 1)
+ fun_l10_n792(x)
+ else
+ fun_l10_n438(x)
+ end
+end
+
+def fun_l9_n345(x)
+ if (x < 1)
+ fun_l10_n399(x)
+ else
+ fun_l10_n577(x)
+ end
+end
+
+def fun_l9_n346(x)
+ if (x < 1)
+ fun_l10_n110(x)
+ else
+ fun_l10_n120(x)
+ end
+end
+
+def fun_l9_n347(x)
+ if (x < 1)
+ fun_l10_n521(x)
+ else
+ fun_l10_n111(x)
+ end
+end
+
+def fun_l9_n348(x)
+ if (x < 1)
+ fun_l10_n134(x)
+ else
+ fun_l10_n49(x)
+ end
+end
+
+def fun_l9_n349(x)
+ if (x < 1)
+ fun_l10_n353(x)
+ else
+ fun_l10_n156(x)
+ end
+end
+
+def fun_l9_n350(x)
+ if (x < 1)
+ fun_l10_n532(x)
+ else
+ fun_l10_n796(x)
+ end
+end
+
+def fun_l9_n351(x)
+ if (x < 1)
+ fun_l10_n896(x)
+ else
+ fun_l10_n176(x)
+ end
+end
+
+def fun_l9_n352(x)
+ if (x < 1)
+ fun_l10_n857(x)
+ else
+ fun_l10_n798(x)
+ end
+end
+
+def fun_l9_n353(x)
+ if (x < 1)
+ fun_l10_n676(x)
+ else
+ fun_l10_n870(x)
+ end
+end
+
+def fun_l9_n354(x)
+ if (x < 1)
+ fun_l10_n806(x)
+ else
+ fun_l10_n951(x)
+ end
+end
+
+def fun_l9_n355(x)
+ if (x < 1)
+ fun_l10_n841(x)
+ else
+ fun_l10_n918(x)
+ end
+end
+
+def fun_l9_n356(x)
+ if (x < 1)
+ fun_l10_n226(x)
+ else
+ fun_l10_n514(x)
+ end
+end
+
+def fun_l9_n357(x)
+ if (x < 1)
+ fun_l10_n289(x)
+ else
+ fun_l10_n911(x)
+ end
+end
+
+def fun_l9_n358(x)
+ if (x < 1)
+ fun_l10_n697(x)
+ else
+ fun_l10_n138(x)
+ end
+end
+
+def fun_l9_n359(x)
+ if (x < 1)
+ fun_l10_n432(x)
+ else
+ fun_l10_n88(x)
+ end
+end
+
+def fun_l9_n360(x)
+ if (x < 1)
+ fun_l10_n351(x)
+ else
+ fun_l10_n968(x)
+ end
+end
+
+def fun_l9_n361(x)
+ if (x < 1)
+ fun_l10_n782(x)
+ else
+ fun_l10_n506(x)
+ end
+end
+
+def fun_l9_n362(x)
+ if (x < 1)
+ fun_l10_n875(x)
+ else
+ fun_l10_n459(x)
+ end
+end
+
+def fun_l9_n363(x)
+ if (x < 1)
+ fun_l10_n2(x)
+ else
+ fun_l10_n468(x)
+ end
+end
+
+def fun_l9_n364(x)
+ if (x < 1)
+ fun_l10_n151(x)
+ else
+ fun_l10_n538(x)
+ end
+end
+
+def fun_l9_n365(x)
+ if (x < 1)
+ fun_l10_n203(x)
+ else
+ fun_l10_n890(x)
+ end
+end
+
+def fun_l9_n366(x)
+ if (x < 1)
+ fun_l10_n612(x)
+ else
+ fun_l10_n483(x)
+ end
+end
+
+def fun_l9_n367(x)
+ if (x < 1)
+ fun_l10_n465(x)
+ else
+ fun_l10_n724(x)
+ end
+end
+
+def fun_l9_n368(x)
+ if (x < 1)
+ fun_l10_n745(x)
+ else
+ fun_l10_n92(x)
+ end
+end
+
+def fun_l9_n369(x)
+ if (x < 1)
+ fun_l10_n312(x)
+ else
+ fun_l10_n754(x)
+ end
+end
+
+def fun_l9_n370(x)
+ if (x < 1)
+ fun_l10_n432(x)
+ else
+ fun_l10_n234(x)
+ end
+end
+
+def fun_l9_n371(x)
+ if (x < 1)
+ fun_l10_n629(x)
+ else
+ fun_l10_n751(x)
+ end
+end
+
+def fun_l9_n372(x)
+ if (x < 1)
+ fun_l10_n142(x)
+ else
+ fun_l10_n381(x)
+ end
+end
+
+def fun_l9_n373(x)
+ if (x < 1)
+ fun_l10_n490(x)
+ else
+ fun_l10_n405(x)
+ end
+end
+
+def fun_l9_n374(x)
+ if (x < 1)
+ fun_l10_n197(x)
+ else
+ fun_l10_n574(x)
+ end
+end
+
+def fun_l9_n375(x)
+ if (x < 1)
+ fun_l10_n808(x)
+ else
+ fun_l10_n753(x)
+ end
+end
+
+def fun_l9_n376(x)
+ if (x < 1)
+ fun_l10_n195(x)
+ else
+ fun_l10_n186(x)
+ end
+end
+
+def fun_l9_n377(x)
+ if (x < 1)
+ fun_l10_n172(x)
+ else
+ fun_l10_n233(x)
+ end
+end
+
+def fun_l9_n378(x)
+ if (x < 1)
+ fun_l10_n723(x)
+ else
+ fun_l10_n919(x)
+ end
+end
+
+def fun_l9_n379(x)
+ if (x < 1)
+ fun_l10_n937(x)
+ else
+ fun_l10_n149(x)
+ end
+end
+
+def fun_l9_n380(x)
+ if (x < 1)
+ fun_l10_n676(x)
+ else
+ fun_l10_n502(x)
+ end
+end
+
+def fun_l9_n381(x)
+ if (x < 1)
+ fun_l10_n756(x)
+ else
+ fun_l10_n346(x)
+ end
+end
+
+def fun_l9_n382(x)
+ if (x < 1)
+ fun_l10_n477(x)
+ else
+ fun_l10_n179(x)
+ end
+end
+
+def fun_l9_n383(x)
+ if (x < 1)
+ fun_l10_n729(x)
+ else
+ fun_l10_n862(x)
+ end
+end
+
+def fun_l9_n384(x)
+ if (x < 1)
+ fun_l10_n406(x)
+ else
+ fun_l10_n145(x)
+ end
+end
+
+def fun_l9_n385(x)
+ if (x < 1)
+ fun_l10_n239(x)
+ else
+ fun_l10_n169(x)
+ end
+end
+
+def fun_l9_n386(x)
+ if (x < 1)
+ fun_l10_n596(x)
+ else
+ fun_l10_n971(x)
+ end
+end
+
+def fun_l9_n387(x)
+ if (x < 1)
+ fun_l10_n466(x)
+ else
+ fun_l10_n488(x)
+ end
+end
+
+def fun_l9_n388(x)
+ if (x < 1)
+ fun_l10_n711(x)
+ else
+ fun_l10_n553(x)
+ end
+end
+
+def fun_l9_n389(x)
+ if (x < 1)
+ fun_l10_n776(x)
+ else
+ fun_l10_n227(x)
+ end
+end
+
+def fun_l9_n390(x)
+ if (x < 1)
+ fun_l10_n872(x)
+ else
+ fun_l10_n901(x)
+ end
+end
+
+def fun_l9_n391(x)
+ if (x < 1)
+ fun_l10_n579(x)
+ else
+ fun_l10_n906(x)
+ end
+end
+
+def fun_l9_n392(x)
+ if (x < 1)
+ fun_l10_n734(x)
+ else
+ fun_l10_n195(x)
+ end
+end
+
+def fun_l9_n393(x)
+ if (x < 1)
+ fun_l10_n691(x)
+ else
+ fun_l10_n68(x)
+ end
+end
+
+def fun_l9_n394(x)
+ if (x < 1)
+ fun_l10_n446(x)
+ else
+ fun_l10_n838(x)
+ end
+end
+
+def fun_l9_n395(x)
+ if (x < 1)
+ fun_l10_n708(x)
+ else
+ fun_l10_n319(x)
+ end
+end
+
+def fun_l9_n396(x)
+ if (x < 1)
+ fun_l10_n414(x)
+ else
+ fun_l10_n152(x)
+ end
+end
+
+def fun_l9_n397(x)
+ if (x < 1)
+ fun_l10_n637(x)
+ else
+ fun_l10_n662(x)
+ end
+end
+
+def fun_l9_n398(x)
+ if (x < 1)
+ fun_l10_n990(x)
+ else
+ fun_l10_n547(x)
+ end
+end
+
+def fun_l9_n399(x)
+ if (x < 1)
+ fun_l10_n43(x)
+ else
+ fun_l10_n845(x)
+ end
+end
+
+def fun_l9_n400(x)
+ if (x < 1)
+ fun_l10_n843(x)
+ else
+ fun_l10_n876(x)
+ end
+end
+
+def fun_l9_n401(x)
+ if (x < 1)
+ fun_l10_n925(x)
+ else
+ fun_l10_n221(x)
+ end
+end
+
+def fun_l9_n402(x)
+ if (x < 1)
+ fun_l10_n420(x)
+ else
+ fun_l10_n267(x)
+ end
+end
+
+def fun_l9_n403(x)
+ if (x < 1)
+ fun_l10_n78(x)
+ else
+ fun_l10_n316(x)
+ end
+end
+
+def fun_l9_n404(x)
+ if (x < 1)
+ fun_l10_n291(x)
+ else
+ fun_l10_n614(x)
+ end
+end
+
+def fun_l9_n405(x)
+ if (x < 1)
+ fun_l10_n956(x)
+ else
+ fun_l10_n550(x)
+ end
+end
+
+def fun_l9_n406(x)
+ if (x < 1)
+ fun_l10_n232(x)
+ else
+ fun_l10_n53(x)
+ end
+end
+
+def fun_l9_n407(x)
+ if (x < 1)
+ fun_l10_n171(x)
+ else
+ fun_l10_n51(x)
+ end
+end
+
+def fun_l9_n408(x)
+ if (x < 1)
+ fun_l10_n401(x)
+ else
+ fun_l10_n932(x)
+ end
+end
+
+def fun_l9_n409(x)
+ if (x < 1)
+ fun_l10_n863(x)
+ else
+ fun_l10_n951(x)
+ end
+end
+
+def fun_l9_n410(x)
+ if (x < 1)
+ fun_l10_n64(x)
+ else
+ fun_l10_n721(x)
+ end
+end
+
+def fun_l9_n411(x)
+ if (x < 1)
+ fun_l10_n539(x)
+ else
+ fun_l10_n951(x)
+ end
+end
+
+def fun_l9_n412(x)
+ if (x < 1)
+ fun_l10_n660(x)
+ else
+ fun_l10_n852(x)
+ end
+end
+
+def fun_l9_n413(x)
+ if (x < 1)
+ fun_l10_n777(x)
+ else
+ fun_l10_n366(x)
+ end
+end
+
+def fun_l9_n414(x)
+ if (x < 1)
+ fun_l10_n695(x)
+ else
+ fun_l10_n538(x)
+ end
+end
+
+def fun_l9_n415(x)
+ if (x < 1)
+ fun_l10_n208(x)
+ else
+ fun_l10_n892(x)
+ end
+end
+
+def fun_l9_n416(x)
+ if (x < 1)
+ fun_l10_n632(x)
+ else
+ fun_l10_n463(x)
+ end
+end
+
+def fun_l9_n417(x)
+ if (x < 1)
+ fun_l10_n238(x)
+ else
+ fun_l10_n55(x)
+ end
+end
+
+def fun_l9_n418(x)
+ if (x < 1)
+ fun_l10_n276(x)
+ else
+ fun_l10_n526(x)
+ end
+end
+
+def fun_l9_n419(x)
+ if (x < 1)
+ fun_l10_n625(x)
+ else
+ fun_l10_n806(x)
+ end
+end
+
+def fun_l9_n420(x)
+ if (x < 1)
+ fun_l10_n899(x)
+ else
+ fun_l10_n594(x)
+ end
+end
+
+def fun_l9_n421(x)
+ if (x < 1)
+ fun_l10_n725(x)
+ else
+ fun_l10_n461(x)
+ end
+end
+
+def fun_l9_n422(x)
+ if (x < 1)
+ fun_l10_n187(x)
+ else
+ fun_l10_n953(x)
+ end
+end
+
+def fun_l9_n423(x)
+ if (x < 1)
+ fun_l10_n698(x)
+ else
+ fun_l10_n751(x)
+ end
+end
+
+def fun_l9_n424(x)
+ if (x < 1)
+ fun_l10_n115(x)
+ else
+ fun_l10_n321(x)
+ end
+end
+
+def fun_l9_n425(x)
+ if (x < 1)
+ fun_l10_n775(x)
+ else
+ fun_l10_n100(x)
+ end
+end
+
+def fun_l9_n426(x)
+ if (x < 1)
+ fun_l10_n177(x)
+ else
+ fun_l10_n565(x)
+ end
+end
+
+def fun_l9_n427(x)
+ if (x < 1)
+ fun_l10_n568(x)
+ else
+ fun_l10_n234(x)
+ end
+end
+
+def fun_l9_n428(x)
+ if (x < 1)
+ fun_l10_n787(x)
+ else
+ fun_l10_n815(x)
+ end
+end
+
+def fun_l9_n429(x)
+ if (x < 1)
+ fun_l10_n580(x)
+ else
+ fun_l10_n990(x)
+ end
+end
+
+def fun_l9_n430(x)
+ if (x < 1)
+ fun_l10_n680(x)
+ else
+ fun_l10_n696(x)
+ end
+end
+
+def fun_l9_n431(x)
+ if (x < 1)
+ fun_l10_n650(x)
+ else
+ fun_l10_n776(x)
+ end
+end
+
+def fun_l9_n432(x)
+ if (x < 1)
+ fun_l10_n377(x)
+ else
+ fun_l10_n232(x)
+ end
+end
+
+def fun_l9_n433(x)
+ if (x < 1)
+ fun_l10_n621(x)
+ else
+ fun_l10_n948(x)
+ end
+end
+
+def fun_l9_n434(x)
+ if (x < 1)
+ fun_l10_n126(x)
+ else
+ fun_l10_n220(x)
+ end
+end
+
+def fun_l9_n435(x)
+ if (x < 1)
+ fun_l10_n395(x)
+ else
+ fun_l10_n732(x)
+ end
+end
+
+def fun_l9_n436(x)
+ if (x < 1)
+ fun_l10_n194(x)
+ else
+ fun_l10_n159(x)
+ end
+end
+
+def fun_l9_n437(x)
+ if (x < 1)
+ fun_l10_n123(x)
+ else
+ fun_l10_n40(x)
+ end
+end
+
+def fun_l9_n438(x)
+ if (x < 1)
+ fun_l10_n241(x)
+ else
+ fun_l10_n861(x)
+ end
+end
+
+def fun_l9_n439(x)
+ if (x < 1)
+ fun_l10_n333(x)
+ else
+ fun_l10_n950(x)
+ end
+end
+
+def fun_l9_n440(x)
+ if (x < 1)
+ fun_l10_n116(x)
+ else
+ fun_l10_n832(x)
+ end
+end
+
+def fun_l9_n441(x)
+ if (x < 1)
+ fun_l10_n598(x)
+ else
+ fun_l10_n68(x)
+ end
+end
+
+def fun_l9_n442(x)
+ if (x < 1)
+ fun_l10_n234(x)
+ else
+ fun_l10_n277(x)
+ end
+end
+
+def fun_l9_n443(x)
+ if (x < 1)
+ fun_l10_n943(x)
+ else
+ fun_l10_n895(x)
+ end
+end
+
+def fun_l9_n444(x)
+ if (x < 1)
+ fun_l10_n490(x)
+ else
+ fun_l10_n512(x)
+ end
+end
+
+def fun_l9_n445(x)
+ if (x < 1)
+ fun_l10_n54(x)
+ else
+ fun_l10_n74(x)
+ end
+end
+
+def fun_l9_n446(x)
+ if (x < 1)
+ fun_l10_n214(x)
+ else
+ fun_l10_n858(x)
+ end
+end
+
+def fun_l9_n447(x)
+ if (x < 1)
+ fun_l10_n274(x)
+ else
+ fun_l10_n255(x)
+ end
+end
+
+def fun_l9_n448(x)
+ if (x < 1)
+ fun_l10_n307(x)
+ else
+ fun_l10_n197(x)
+ end
+end
+
+def fun_l9_n449(x)
+ if (x < 1)
+ fun_l10_n152(x)
+ else
+ fun_l10_n543(x)
+ end
+end
+
+def fun_l9_n450(x)
+ if (x < 1)
+ fun_l10_n773(x)
+ else
+ fun_l10_n582(x)
+ end
+end
+
+def fun_l9_n451(x)
+ if (x < 1)
+ fun_l10_n757(x)
+ else
+ fun_l10_n298(x)
+ end
+end
+
+def fun_l9_n452(x)
+ if (x < 1)
+ fun_l10_n648(x)
+ else
+ fun_l10_n826(x)
+ end
+end
+
+def fun_l9_n453(x)
+ if (x < 1)
+ fun_l10_n209(x)
+ else
+ fun_l10_n767(x)
+ end
+end
+
+def fun_l9_n454(x)
+ if (x < 1)
+ fun_l10_n448(x)
+ else
+ fun_l10_n374(x)
+ end
+end
+
+def fun_l9_n455(x)
+ if (x < 1)
+ fun_l10_n975(x)
+ else
+ fun_l10_n45(x)
+ end
+end
+
+def fun_l9_n456(x)
+ if (x < 1)
+ fun_l10_n938(x)
+ else
+ fun_l10_n354(x)
+ end
+end
+
+def fun_l9_n457(x)
+ if (x < 1)
+ fun_l10_n723(x)
+ else
+ fun_l10_n511(x)
+ end
+end
+
+def fun_l9_n458(x)
+ if (x < 1)
+ fun_l10_n864(x)
+ else
+ fun_l10_n688(x)
+ end
+end
+
+def fun_l9_n459(x)
+ if (x < 1)
+ fun_l10_n283(x)
+ else
+ fun_l10_n776(x)
+ end
+end
+
+def fun_l9_n460(x)
+ if (x < 1)
+ fun_l10_n167(x)
+ else
+ fun_l10_n639(x)
+ end
+end
+
+def fun_l9_n461(x)
+ if (x < 1)
+ fun_l10_n493(x)
+ else
+ fun_l10_n538(x)
+ end
+end
+
+def fun_l9_n462(x)
+ if (x < 1)
+ fun_l10_n392(x)
+ else
+ fun_l10_n434(x)
+ end
+end
+
+def fun_l9_n463(x)
+ if (x < 1)
+ fun_l10_n958(x)
+ else
+ fun_l10_n305(x)
+ end
+end
+
+def fun_l9_n464(x)
+ if (x < 1)
+ fun_l10_n516(x)
+ else
+ fun_l10_n375(x)
+ end
+end
+
+def fun_l9_n465(x)
+ if (x < 1)
+ fun_l10_n371(x)
+ else
+ fun_l10_n596(x)
+ end
+end
+
+def fun_l9_n466(x)
+ if (x < 1)
+ fun_l10_n435(x)
+ else
+ fun_l10_n176(x)
+ end
+end
+
+def fun_l9_n467(x)
+ if (x < 1)
+ fun_l10_n391(x)
+ else
+ fun_l10_n83(x)
+ end
+end
+
+def fun_l9_n468(x)
+ if (x < 1)
+ fun_l10_n751(x)
+ else
+ fun_l10_n632(x)
+ end
+end
+
+def fun_l9_n469(x)
+ if (x < 1)
+ fun_l10_n14(x)
+ else
+ fun_l10_n530(x)
+ end
+end
+
+def fun_l9_n470(x)
+ if (x < 1)
+ fun_l10_n663(x)
+ else
+ fun_l10_n844(x)
+ end
+end
+
+def fun_l9_n471(x)
+ if (x < 1)
+ fun_l10_n82(x)
+ else
+ fun_l10_n493(x)
+ end
+end
+
+def fun_l9_n472(x)
+ if (x < 1)
+ fun_l10_n614(x)
+ else
+ fun_l10_n277(x)
+ end
+end
+
+def fun_l9_n473(x)
+ if (x < 1)
+ fun_l10_n492(x)
+ else
+ fun_l10_n492(x)
+ end
+end
+
+def fun_l9_n474(x)
+ if (x < 1)
+ fun_l10_n440(x)
+ else
+ fun_l10_n665(x)
+ end
+end
+
+def fun_l9_n475(x)
+ if (x < 1)
+ fun_l10_n284(x)
+ else
+ fun_l10_n513(x)
+ end
+end
+
+def fun_l9_n476(x)
+ if (x < 1)
+ fun_l10_n690(x)
+ else
+ fun_l10_n507(x)
+ end
+end
+
+def fun_l9_n477(x)
+ if (x < 1)
+ fun_l10_n281(x)
+ else
+ fun_l10_n519(x)
+ end
+end
+
+def fun_l9_n478(x)
+ if (x < 1)
+ fun_l10_n903(x)
+ else
+ fun_l10_n866(x)
+ end
+end
+
+def fun_l9_n479(x)
+ if (x < 1)
+ fun_l10_n880(x)
+ else
+ fun_l10_n310(x)
+ end
+end
+
+def fun_l9_n480(x)
+ if (x < 1)
+ fun_l10_n69(x)
+ else
+ fun_l10_n227(x)
+ end
+end
+
+def fun_l9_n481(x)
+ if (x < 1)
+ fun_l10_n316(x)
+ else
+ fun_l10_n692(x)
+ end
+end
+
+def fun_l9_n482(x)
+ if (x < 1)
+ fun_l10_n875(x)
+ else
+ fun_l10_n384(x)
+ end
+end
+
+def fun_l9_n483(x)
+ if (x < 1)
+ fun_l10_n496(x)
+ else
+ fun_l10_n81(x)
+ end
+end
+
+def fun_l9_n484(x)
+ if (x < 1)
+ fun_l10_n336(x)
+ else
+ fun_l10_n193(x)
+ end
+end
+
+def fun_l9_n485(x)
+ if (x < 1)
+ fun_l10_n541(x)
+ else
+ fun_l10_n465(x)
+ end
+end
+
+def fun_l9_n486(x)
+ if (x < 1)
+ fun_l10_n197(x)
+ else
+ fun_l10_n844(x)
+ end
+end
+
+def fun_l9_n487(x)
+ if (x < 1)
+ fun_l10_n652(x)
+ else
+ fun_l10_n736(x)
+ end
+end
+
+def fun_l9_n488(x)
+ if (x < 1)
+ fun_l10_n960(x)
+ else
+ fun_l10_n912(x)
+ end
+end
+
+def fun_l9_n489(x)
+ if (x < 1)
+ fun_l10_n238(x)
+ else
+ fun_l10_n64(x)
+ end
+end
+
+def fun_l9_n490(x)
+ if (x < 1)
+ fun_l10_n505(x)
+ else
+ fun_l10_n204(x)
+ end
+end
+
+def fun_l9_n491(x)
+ if (x < 1)
+ fun_l10_n960(x)
+ else
+ fun_l10_n198(x)
+ end
+end
+
+def fun_l9_n492(x)
+ if (x < 1)
+ fun_l10_n148(x)
+ else
+ fun_l10_n669(x)
+ end
+end
+
+def fun_l9_n493(x)
+ if (x < 1)
+ fun_l10_n860(x)
+ else
+ fun_l10_n676(x)
+ end
+end
+
+def fun_l9_n494(x)
+ if (x < 1)
+ fun_l10_n77(x)
+ else
+ fun_l10_n945(x)
+ end
+end
+
+def fun_l9_n495(x)
+ if (x < 1)
+ fun_l10_n151(x)
+ else
+ fun_l10_n588(x)
+ end
+end
+
+def fun_l9_n496(x)
+ if (x < 1)
+ fun_l10_n159(x)
+ else
+ fun_l10_n727(x)
+ end
+end
+
+def fun_l9_n497(x)
+ if (x < 1)
+ fun_l10_n803(x)
+ else
+ fun_l10_n292(x)
+ end
+end
+
+def fun_l9_n498(x)
+ if (x < 1)
+ fun_l10_n308(x)
+ else
+ fun_l10_n46(x)
+ end
+end
+
+def fun_l9_n499(x)
+ if (x < 1)
+ fun_l10_n511(x)
+ else
+ fun_l10_n766(x)
+ end
+end
+
+def fun_l9_n500(x)
+ if (x < 1)
+ fun_l10_n753(x)
+ else
+ fun_l10_n235(x)
+ end
+end
+
+def fun_l9_n501(x)
+ if (x < 1)
+ fun_l10_n776(x)
+ else
+ fun_l10_n779(x)
+ end
+end
+
+def fun_l9_n502(x)
+ if (x < 1)
+ fun_l10_n114(x)
+ else
+ fun_l10_n969(x)
+ end
+end
+
+def fun_l9_n503(x)
+ if (x < 1)
+ fun_l10_n511(x)
+ else
+ fun_l10_n678(x)
+ end
+end
+
+def fun_l9_n504(x)
+ if (x < 1)
+ fun_l10_n578(x)
+ else
+ fun_l10_n689(x)
+ end
+end
+
+def fun_l9_n505(x)
+ if (x < 1)
+ fun_l10_n434(x)
+ else
+ fun_l10_n0(x)
+ end
+end
+
+def fun_l9_n506(x)
+ if (x < 1)
+ fun_l10_n121(x)
+ else
+ fun_l10_n488(x)
+ end
+end
+
+def fun_l9_n507(x)
+ if (x < 1)
+ fun_l10_n865(x)
+ else
+ fun_l10_n540(x)
+ end
+end
+
+def fun_l9_n508(x)
+ if (x < 1)
+ fun_l10_n809(x)
+ else
+ fun_l10_n764(x)
+ end
+end
+
+def fun_l9_n509(x)
+ if (x < 1)
+ fun_l10_n463(x)
+ else
+ fun_l10_n141(x)
+ end
+end
+
+def fun_l9_n510(x)
+ if (x < 1)
+ fun_l10_n442(x)
+ else
+ fun_l10_n646(x)
+ end
+end
+
+def fun_l9_n511(x)
+ if (x < 1)
+ fun_l10_n971(x)
+ else
+ fun_l10_n57(x)
+ end
+end
+
+def fun_l9_n512(x)
+ if (x < 1)
+ fun_l10_n892(x)
+ else
+ fun_l10_n612(x)
+ end
+end
+
+def fun_l9_n513(x)
+ if (x < 1)
+ fun_l10_n864(x)
+ else
+ fun_l10_n438(x)
+ end
+end
+
+def fun_l9_n514(x)
+ if (x < 1)
+ fun_l10_n504(x)
+ else
+ fun_l10_n100(x)
+ end
+end
+
+def fun_l9_n515(x)
+ if (x < 1)
+ fun_l10_n881(x)
+ else
+ fun_l10_n621(x)
+ end
+end
+
+def fun_l9_n516(x)
+ if (x < 1)
+ fun_l10_n170(x)
+ else
+ fun_l10_n435(x)
+ end
+end
+
+def fun_l9_n517(x)
+ if (x < 1)
+ fun_l10_n712(x)
+ else
+ fun_l10_n537(x)
+ end
+end
+
+def fun_l9_n518(x)
+ if (x < 1)
+ fun_l10_n369(x)
+ else
+ fun_l10_n832(x)
+ end
+end
+
+def fun_l9_n519(x)
+ if (x < 1)
+ fun_l10_n654(x)
+ else
+ fun_l10_n364(x)
+ end
+end
+
+def fun_l9_n520(x)
+ if (x < 1)
+ fun_l10_n410(x)
+ else
+ fun_l10_n10(x)
+ end
+end
+
+def fun_l9_n521(x)
+ if (x < 1)
+ fun_l10_n814(x)
+ else
+ fun_l10_n569(x)
+ end
+end
+
+def fun_l9_n522(x)
+ if (x < 1)
+ fun_l10_n787(x)
+ else
+ fun_l10_n333(x)
+ end
+end
+
+def fun_l9_n523(x)
+ if (x < 1)
+ fun_l10_n336(x)
+ else
+ fun_l10_n720(x)
+ end
+end
+
+def fun_l9_n524(x)
+ if (x < 1)
+ fun_l10_n34(x)
+ else
+ fun_l10_n643(x)
+ end
+end
+
+def fun_l9_n525(x)
+ if (x < 1)
+ fun_l10_n147(x)
+ else
+ fun_l10_n62(x)
+ end
+end
+
+def fun_l9_n526(x)
+ if (x < 1)
+ fun_l10_n431(x)
+ else
+ fun_l10_n452(x)
+ end
+end
+
+def fun_l9_n527(x)
+ if (x < 1)
+ fun_l10_n374(x)
+ else
+ fun_l10_n831(x)
+ end
+end
+
+def fun_l9_n528(x)
+ if (x < 1)
+ fun_l10_n604(x)
+ else
+ fun_l10_n274(x)
+ end
+end
+
+def fun_l9_n529(x)
+ if (x < 1)
+ fun_l10_n470(x)
+ else
+ fun_l10_n764(x)
+ end
+end
+
+def fun_l9_n530(x)
+ if (x < 1)
+ fun_l10_n552(x)
+ else
+ fun_l10_n646(x)
+ end
+end
+
+def fun_l9_n531(x)
+ if (x < 1)
+ fun_l10_n427(x)
+ else
+ fun_l10_n464(x)
+ end
+end
+
+def fun_l9_n532(x)
+ if (x < 1)
+ fun_l10_n553(x)
+ else
+ fun_l10_n605(x)
+ end
+end
+
+def fun_l9_n533(x)
+ if (x < 1)
+ fun_l10_n59(x)
+ else
+ fun_l10_n794(x)
+ end
+end
+
+def fun_l9_n534(x)
+ if (x < 1)
+ fun_l10_n168(x)
+ else
+ fun_l10_n740(x)
+ end
+end
+
+def fun_l9_n535(x)
+ if (x < 1)
+ fun_l10_n227(x)
+ else
+ fun_l10_n651(x)
+ end
+end
+
+def fun_l9_n536(x)
+ if (x < 1)
+ fun_l10_n904(x)
+ else
+ fun_l10_n552(x)
+ end
+end
+
+def fun_l9_n537(x)
+ if (x < 1)
+ fun_l10_n389(x)
+ else
+ fun_l10_n397(x)
+ end
+end
+
+def fun_l9_n538(x)
+ if (x < 1)
+ fun_l10_n622(x)
+ else
+ fun_l10_n100(x)
+ end
+end
+
+def fun_l9_n539(x)
+ if (x < 1)
+ fun_l10_n117(x)
+ else
+ fun_l10_n715(x)
+ end
+end
+
+def fun_l9_n540(x)
+ if (x < 1)
+ fun_l10_n959(x)
+ else
+ fun_l10_n391(x)
+ end
+end
+
+def fun_l9_n541(x)
+ if (x < 1)
+ fun_l10_n733(x)
+ else
+ fun_l10_n393(x)
+ end
+end
+
+def fun_l9_n542(x)
+ if (x < 1)
+ fun_l10_n171(x)
+ else
+ fun_l10_n299(x)
+ end
+end
+
+def fun_l9_n543(x)
+ if (x < 1)
+ fun_l10_n655(x)
+ else
+ fun_l10_n285(x)
+ end
+end
+
+def fun_l9_n544(x)
+ if (x < 1)
+ fun_l10_n819(x)
+ else
+ fun_l10_n817(x)
+ end
+end
+
+def fun_l9_n545(x)
+ if (x < 1)
+ fun_l10_n938(x)
+ else
+ fun_l10_n54(x)
+ end
+end
+
+def fun_l9_n546(x)
+ if (x < 1)
+ fun_l10_n712(x)
+ else
+ fun_l10_n6(x)
+ end
+end
+
+def fun_l9_n547(x)
+ if (x < 1)
+ fun_l10_n203(x)
+ else
+ fun_l10_n626(x)
+ end
+end
+
+def fun_l9_n548(x)
+ if (x < 1)
+ fun_l10_n643(x)
+ else
+ fun_l10_n761(x)
+ end
+end
+
+def fun_l9_n549(x)
+ if (x < 1)
+ fun_l10_n450(x)
+ else
+ fun_l10_n673(x)
+ end
+end
+
+def fun_l9_n550(x)
+ if (x < 1)
+ fun_l10_n496(x)
+ else
+ fun_l10_n568(x)
+ end
+end
+
+def fun_l9_n551(x)
+ if (x < 1)
+ fun_l10_n30(x)
+ else
+ fun_l10_n518(x)
+ end
+end
+
+def fun_l9_n552(x)
+ if (x < 1)
+ fun_l10_n398(x)
+ else
+ fun_l10_n858(x)
+ end
+end
+
+def fun_l9_n553(x)
+ if (x < 1)
+ fun_l10_n253(x)
+ else
+ fun_l10_n831(x)
+ end
+end
+
+def fun_l9_n554(x)
+ if (x < 1)
+ fun_l10_n493(x)
+ else
+ fun_l10_n750(x)
+ end
+end
+
+def fun_l9_n555(x)
+ if (x < 1)
+ fun_l10_n111(x)
+ else
+ fun_l10_n279(x)
+ end
+end
+
+def fun_l9_n556(x)
+ if (x < 1)
+ fun_l10_n927(x)
+ else
+ fun_l10_n793(x)
+ end
+end
+
+def fun_l9_n557(x)
+ if (x < 1)
+ fun_l10_n530(x)
+ else
+ fun_l10_n182(x)
+ end
+end
+
+def fun_l9_n558(x)
+ if (x < 1)
+ fun_l10_n434(x)
+ else
+ fun_l10_n453(x)
+ end
+end
+
+def fun_l9_n559(x)
+ if (x < 1)
+ fun_l10_n480(x)
+ else
+ fun_l10_n590(x)
+ end
+end
+
+def fun_l9_n560(x)
+ if (x < 1)
+ fun_l10_n906(x)
+ else
+ fun_l10_n280(x)
+ end
+end
+
+def fun_l9_n561(x)
+ if (x < 1)
+ fun_l10_n107(x)
+ else
+ fun_l10_n716(x)
+ end
+end
+
+def fun_l9_n562(x)
+ if (x < 1)
+ fun_l10_n15(x)
+ else
+ fun_l10_n671(x)
+ end
+end
+
+def fun_l9_n563(x)
+ if (x < 1)
+ fun_l10_n544(x)
+ else
+ fun_l10_n571(x)
+ end
+end
+
+def fun_l9_n564(x)
+ if (x < 1)
+ fun_l10_n482(x)
+ else
+ fun_l10_n885(x)
+ end
+end
+
+def fun_l9_n565(x)
+ if (x < 1)
+ fun_l10_n465(x)
+ else
+ fun_l10_n916(x)
+ end
+end
+
+def fun_l9_n566(x)
+ if (x < 1)
+ fun_l10_n413(x)
+ else
+ fun_l10_n684(x)
+ end
+end
+
+def fun_l9_n567(x)
+ if (x < 1)
+ fun_l10_n413(x)
+ else
+ fun_l10_n577(x)
+ end
+end
+
+def fun_l9_n568(x)
+ if (x < 1)
+ fun_l10_n604(x)
+ else
+ fun_l10_n899(x)
+ end
+end
+
+def fun_l9_n569(x)
+ if (x < 1)
+ fun_l10_n138(x)
+ else
+ fun_l10_n164(x)
+ end
+end
+
+def fun_l9_n570(x)
+ if (x < 1)
+ fun_l10_n993(x)
+ else
+ fun_l10_n94(x)
+ end
+end
+
+def fun_l9_n571(x)
+ if (x < 1)
+ fun_l10_n602(x)
+ else
+ fun_l10_n989(x)
+ end
+end
+
+def fun_l9_n572(x)
+ if (x < 1)
+ fun_l10_n700(x)
+ else
+ fun_l10_n308(x)
+ end
+end
+
+def fun_l9_n573(x)
+ if (x < 1)
+ fun_l10_n773(x)
+ else
+ fun_l10_n945(x)
+ end
+end
+
+def fun_l9_n574(x)
+ if (x < 1)
+ fun_l10_n148(x)
+ else
+ fun_l10_n608(x)
+ end
+end
+
+def fun_l9_n575(x)
+ if (x < 1)
+ fun_l10_n439(x)
+ else
+ fun_l10_n908(x)
+ end
+end
+
+def fun_l9_n576(x)
+ if (x < 1)
+ fun_l10_n174(x)
+ else
+ fun_l10_n872(x)
+ end
+end
+
+def fun_l9_n577(x)
+ if (x < 1)
+ fun_l10_n291(x)
+ else
+ fun_l10_n265(x)
+ end
+end
+
+def fun_l9_n578(x)
+ if (x < 1)
+ fun_l10_n425(x)
+ else
+ fun_l10_n928(x)
+ end
+end
+
+def fun_l9_n579(x)
+ if (x < 1)
+ fun_l10_n674(x)
+ else
+ fun_l10_n666(x)
+ end
+end
+
+def fun_l9_n580(x)
+ if (x < 1)
+ fun_l10_n784(x)
+ else
+ fun_l10_n147(x)
+ end
+end
+
+def fun_l9_n581(x)
+ if (x < 1)
+ fun_l10_n550(x)
+ else
+ fun_l10_n119(x)
+ end
+end
+
+def fun_l9_n582(x)
+ if (x < 1)
+ fun_l10_n953(x)
+ else
+ fun_l10_n269(x)
+ end
+end
+
+def fun_l9_n583(x)
+ if (x < 1)
+ fun_l10_n575(x)
+ else
+ fun_l10_n79(x)
+ end
+end
+
+def fun_l9_n584(x)
+ if (x < 1)
+ fun_l10_n498(x)
+ else
+ fun_l10_n1(x)
+ end
+end
+
+def fun_l9_n585(x)
+ if (x < 1)
+ fun_l10_n728(x)
+ else
+ fun_l10_n92(x)
+ end
+end
+
+def fun_l9_n586(x)
+ if (x < 1)
+ fun_l10_n344(x)
+ else
+ fun_l10_n753(x)
+ end
+end
+
+def fun_l9_n587(x)
+ if (x < 1)
+ fun_l10_n911(x)
+ else
+ fun_l10_n327(x)
+ end
+end
+
+def fun_l9_n588(x)
+ if (x < 1)
+ fun_l10_n422(x)
+ else
+ fun_l10_n224(x)
+ end
+end
+
+def fun_l9_n589(x)
+ if (x < 1)
+ fun_l10_n311(x)
+ else
+ fun_l10_n457(x)
+ end
+end
+
+def fun_l9_n590(x)
+ if (x < 1)
+ fun_l10_n705(x)
+ else
+ fun_l10_n136(x)
+ end
+end
+
+def fun_l9_n591(x)
+ if (x < 1)
+ fun_l10_n338(x)
+ else
+ fun_l10_n14(x)
+ end
+end
+
+def fun_l9_n592(x)
+ if (x < 1)
+ fun_l10_n144(x)
+ else
+ fun_l10_n671(x)
+ end
+end
+
+def fun_l9_n593(x)
+ if (x < 1)
+ fun_l10_n402(x)
+ else
+ fun_l10_n218(x)
+ end
+end
+
+def fun_l9_n594(x)
+ if (x < 1)
+ fun_l10_n854(x)
+ else
+ fun_l10_n198(x)
+ end
+end
+
+def fun_l9_n595(x)
+ if (x < 1)
+ fun_l10_n932(x)
+ else
+ fun_l10_n181(x)
+ end
+end
+
+def fun_l9_n596(x)
+ if (x < 1)
+ fun_l10_n373(x)
+ else
+ fun_l10_n937(x)
+ end
+end
+
+def fun_l9_n597(x)
+ if (x < 1)
+ fun_l10_n181(x)
+ else
+ fun_l10_n925(x)
+ end
+end
+
+def fun_l9_n598(x)
+ if (x < 1)
+ fun_l10_n263(x)
+ else
+ fun_l10_n98(x)
+ end
+end
+
+def fun_l9_n599(x)
+ if (x < 1)
+ fun_l10_n736(x)
+ else
+ fun_l10_n951(x)
+ end
+end
+
+def fun_l9_n600(x)
+ if (x < 1)
+ fun_l10_n576(x)
+ else
+ fun_l10_n998(x)
+ end
+end
+
+def fun_l9_n601(x)
+ if (x < 1)
+ fun_l10_n577(x)
+ else
+ fun_l10_n559(x)
+ end
+end
+
+def fun_l9_n602(x)
+ if (x < 1)
+ fun_l10_n143(x)
+ else
+ fun_l10_n993(x)
+ end
+end
+
+def fun_l9_n603(x)
+ if (x < 1)
+ fun_l10_n35(x)
+ else
+ fun_l10_n132(x)
+ end
+end
+
+def fun_l9_n604(x)
+ if (x < 1)
+ fun_l10_n715(x)
+ else
+ fun_l10_n933(x)
+ end
+end
+
+def fun_l9_n605(x)
+ if (x < 1)
+ fun_l10_n570(x)
+ else
+ fun_l10_n455(x)
+ end
+end
+
+def fun_l9_n606(x)
+ if (x < 1)
+ fun_l10_n225(x)
+ else
+ fun_l10_n216(x)
+ end
+end
+
+def fun_l9_n607(x)
+ if (x < 1)
+ fun_l10_n431(x)
+ else
+ fun_l10_n285(x)
+ end
+end
+
+def fun_l9_n608(x)
+ if (x < 1)
+ fun_l10_n647(x)
+ else
+ fun_l10_n221(x)
+ end
+end
+
+def fun_l9_n609(x)
+ if (x < 1)
+ fun_l10_n348(x)
+ else
+ fun_l10_n849(x)
+ end
+end
+
+def fun_l9_n610(x)
+ if (x < 1)
+ fun_l10_n301(x)
+ else
+ fun_l10_n500(x)
+ end
+end
+
+def fun_l9_n611(x)
+ if (x < 1)
+ fun_l10_n152(x)
+ else
+ fun_l10_n1(x)
+ end
+end
+
+def fun_l9_n612(x)
+ if (x < 1)
+ fun_l10_n339(x)
+ else
+ fun_l10_n577(x)
+ end
+end
+
+def fun_l9_n613(x)
+ if (x < 1)
+ fun_l10_n427(x)
+ else
+ fun_l10_n779(x)
+ end
+end
+
+def fun_l9_n614(x)
+ if (x < 1)
+ fun_l10_n60(x)
+ else
+ fun_l10_n199(x)
+ end
+end
+
+def fun_l9_n615(x)
+ if (x < 1)
+ fun_l10_n616(x)
+ else
+ fun_l10_n33(x)
+ end
+end
+
+def fun_l9_n616(x)
+ if (x < 1)
+ fun_l10_n940(x)
+ else
+ fun_l10_n981(x)
+ end
+end
+
+def fun_l9_n617(x)
+ if (x < 1)
+ fun_l10_n594(x)
+ else
+ fun_l10_n321(x)
+ end
+end
+
+def fun_l9_n618(x)
+ if (x < 1)
+ fun_l10_n233(x)
+ else
+ fun_l10_n500(x)
+ end
+end
+
+def fun_l9_n619(x)
+ if (x < 1)
+ fun_l10_n169(x)
+ else
+ fun_l10_n883(x)
+ end
+end
+
+def fun_l9_n620(x)
+ if (x < 1)
+ fun_l10_n611(x)
+ else
+ fun_l10_n345(x)
+ end
+end
+
+def fun_l9_n621(x)
+ if (x < 1)
+ fun_l10_n155(x)
+ else
+ fun_l10_n547(x)
+ end
+end
+
+def fun_l9_n622(x)
+ if (x < 1)
+ fun_l10_n244(x)
+ else
+ fun_l10_n760(x)
+ end
+end
+
+def fun_l9_n623(x)
+ if (x < 1)
+ fun_l10_n893(x)
+ else
+ fun_l10_n13(x)
+ end
+end
+
+def fun_l9_n624(x)
+ if (x < 1)
+ fun_l10_n465(x)
+ else
+ fun_l10_n276(x)
+ end
+end
+
+def fun_l9_n625(x)
+ if (x < 1)
+ fun_l10_n196(x)
+ else
+ fun_l10_n920(x)
+ end
+end
+
+def fun_l9_n626(x)
+ if (x < 1)
+ fun_l10_n235(x)
+ else
+ fun_l10_n962(x)
+ end
+end
+
+def fun_l9_n627(x)
+ if (x < 1)
+ fun_l10_n361(x)
+ else
+ fun_l10_n162(x)
+ end
+end
+
+def fun_l9_n628(x)
+ if (x < 1)
+ fun_l10_n110(x)
+ else
+ fun_l10_n574(x)
+ end
+end
+
+def fun_l9_n629(x)
+ if (x < 1)
+ fun_l10_n861(x)
+ else
+ fun_l10_n839(x)
+ end
+end
+
+def fun_l9_n630(x)
+ if (x < 1)
+ fun_l10_n386(x)
+ else
+ fun_l10_n741(x)
+ end
+end
+
+def fun_l9_n631(x)
+ if (x < 1)
+ fun_l10_n426(x)
+ else
+ fun_l10_n430(x)
+ end
+end
+
+def fun_l9_n632(x)
+ if (x < 1)
+ fun_l10_n231(x)
+ else
+ fun_l10_n353(x)
+ end
+end
+
+def fun_l9_n633(x)
+ if (x < 1)
+ fun_l10_n945(x)
+ else
+ fun_l10_n199(x)
+ end
+end
+
+def fun_l9_n634(x)
+ if (x < 1)
+ fun_l10_n607(x)
+ else
+ fun_l10_n388(x)
+ end
+end
+
+def fun_l9_n635(x)
+ if (x < 1)
+ fun_l10_n214(x)
+ else
+ fun_l10_n135(x)
+ end
+end
+
+def fun_l9_n636(x)
+ if (x < 1)
+ fun_l10_n642(x)
+ else
+ fun_l10_n522(x)
+ end
+end
+
+def fun_l9_n637(x)
+ if (x < 1)
+ fun_l10_n605(x)
+ else
+ fun_l10_n336(x)
+ end
+end
+
+def fun_l9_n638(x)
+ if (x < 1)
+ fun_l10_n719(x)
+ else
+ fun_l10_n213(x)
+ end
+end
+
+def fun_l9_n639(x)
+ if (x < 1)
+ fun_l10_n202(x)
+ else
+ fun_l10_n767(x)
+ end
+end
+
+def fun_l9_n640(x)
+ if (x < 1)
+ fun_l10_n242(x)
+ else
+ fun_l10_n596(x)
+ end
+end
+
+def fun_l9_n641(x)
+ if (x < 1)
+ fun_l10_n851(x)
+ else
+ fun_l10_n216(x)
+ end
+end
+
+def fun_l9_n642(x)
+ if (x < 1)
+ fun_l10_n645(x)
+ else
+ fun_l10_n890(x)
+ end
+end
+
+def fun_l9_n643(x)
+ if (x < 1)
+ fun_l10_n241(x)
+ else
+ fun_l10_n350(x)
+ end
+end
+
+def fun_l9_n644(x)
+ if (x < 1)
+ fun_l10_n597(x)
+ else
+ fun_l10_n350(x)
+ end
+end
+
+def fun_l9_n645(x)
+ if (x < 1)
+ fun_l10_n170(x)
+ else
+ fun_l10_n962(x)
+ end
+end
+
+def fun_l9_n646(x)
+ if (x < 1)
+ fun_l10_n229(x)
+ else
+ fun_l10_n543(x)
+ end
+end
+
+def fun_l9_n647(x)
+ if (x < 1)
+ fun_l10_n473(x)
+ else
+ fun_l10_n942(x)
+ end
+end
+
+def fun_l9_n648(x)
+ if (x < 1)
+ fun_l10_n630(x)
+ else
+ fun_l10_n630(x)
+ end
+end
+
+def fun_l9_n649(x)
+ if (x < 1)
+ fun_l10_n183(x)
+ else
+ fun_l10_n313(x)
+ end
+end
+
+def fun_l9_n650(x)
+ if (x < 1)
+ fun_l10_n951(x)
+ else
+ fun_l10_n394(x)
+ end
+end
+
+def fun_l9_n651(x)
+ if (x < 1)
+ fun_l10_n107(x)
+ else
+ fun_l10_n849(x)
+ end
+end
+
+def fun_l9_n652(x)
+ if (x < 1)
+ fun_l10_n678(x)
+ else
+ fun_l10_n768(x)
+ end
+end
+
+def fun_l9_n653(x)
+ if (x < 1)
+ fun_l10_n534(x)
+ else
+ fun_l10_n410(x)
+ end
+end
+
+def fun_l9_n654(x)
+ if (x < 1)
+ fun_l10_n694(x)
+ else
+ fun_l10_n134(x)
+ end
+end
+
+def fun_l9_n655(x)
+ if (x < 1)
+ fun_l10_n751(x)
+ else
+ fun_l10_n923(x)
+ end
+end
+
+def fun_l9_n656(x)
+ if (x < 1)
+ fun_l10_n957(x)
+ else
+ fun_l10_n489(x)
+ end
+end
+
+def fun_l9_n657(x)
+ if (x < 1)
+ fun_l10_n612(x)
+ else
+ fun_l10_n577(x)
+ end
+end
+
+def fun_l9_n658(x)
+ if (x < 1)
+ fun_l10_n814(x)
+ else
+ fun_l10_n801(x)
+ end
+end
+
+def fun_l9_n659(x)
+ if (x < 1)
+ fun_l10_n297(x)
+ else
+ fun_l10_n344(x)
+ end
+end
+
+def fun_l9_n660(x)
+ if (x < 1)
+ fun_l10_n674(x)
+ else
+ fun_l10_n780(x)
+ end
+end
+
+def fun_l9_n661(x)
+ if (x < 1)
+ fun_l10_n302(x)
+ else
+ fun_l10_n184(x)
+ end
+end
+
+def fun_l9_n662(x)
+ if (x < 1)
+ fun_l10_n308(x)
+ else
+ fun_l10_n302(x)
+ end
+end
+
+def fun_l9_n663(x)
+ if (x < 1)
+ fun_l10_n825(x)
+ else
+ fun_l10_n866(x)
+ end
+end
+
+def fun_l9_n664(x)
+ if (x < 1)
+ fun_l10_n771(x)
+ else
+ fun_l10_n141(x)
+ end
+end
+
+def fun_l9_n665(x)
+ if (x < 1)
+ fun_l10_n369(x)
+ else
+ fun_l10_n63(x)
+ end
+end
+
+def fun_l9_n666(x)
+ if (x < 1)
+ fun_l10_n198(x)
+ else
+ fun_l10_n793(x)
+ end
+end
+
+def fun_l9_n667(x)
+ if (x < 1)
+ fun_l10_n800(x)
+ else
+ fun_l10_n748(x)
+ end
+end
+
+def fun_l9_n668(x)
+ if (x < 1)
+ fun_l10_n809(x)
+ else
+ fun_l10_n673(x)
+ end
+end
+
+def fun_l9_n669(x)
+ if (x < 1)
+ fun_l10_n438(x)
+ else
+ fun_l10_n301(x)
+ end
+end
+
+def fun_l9_n670(x)
+ if (x < 1)
+ fun_l10_n802(x)
+ else
+ fun_l10_n882(x)
+ end
+end
+
+def fun_l9_n671(x)
+ if (x < 1)
+ fun_l10_n690(x)
+ else
+ fun_l10_n524(x)
+ end
+end
+
+def fun_l9_n672(x)
+ if (x < 1)
+ fun_l10_n996(x)
+ else
+ fun_l10_n68(x)
+ end
+end
+
+def fun_l9_n673(x)
+ if (x < 1)
+ fun_l10_n773(x)
+ else
+ fun_l10_n26(x)
+ end
+end
+
+def fun_l9_n674(x)
+ if (x < 1)
+ fun_l10_n252(x)
+ else
+ fun_l10_n680(x)
+ end
+end
+
+def fun_l9_n675(x)
+ if (x < 1)
+ fun_l10_n466(x)
+ else
+ fun_l10_n885(x)
+ end
+end
+
+def fun_l9_n676(x)
+ if (x < 1)
+ fun_l10_n746(x)
+ else
+ fun_l10_n968(x)
+ end
+end
+
+def fun_l9_n677(x)
+ if (x < 1)
+ fun_l10_n827(x)
+ else
+ fun_l10_n69(x)
+ end
+end
+
+def fun_l9_n678(x)
+ if (x < 1)
+ fun_l10_n158(x)
+ else
+ fun_l10_n780(x)
+ end
+end
+
+def fun_l9_n679(x)
+ if (x < 1)
+ fun_l10_n267(x)
+ else
+ fun_l10_n907(x)
+ end
+end
+
+def fun_l9_n680(x)
+ if (x < 1)
+ fun_l10_n378(x)
+ else
+ fun_l10_n668(x)
+ end
+end
+
+def fun_l9_n681(x)
+ if (x < 1)
+ fun_l10_n481(x)
+ else
+ fun_l10_n421(x)
+ end
+end
+
+def fun_l9_n682(x)
+ if (x < 1)
+ fun_l10_n501(x)
+ else
+ fun_l10_n277(x)
+ end
+end
+
+def fun_l9_n683(x)
+ if (x < 1)
+ fun_l10_n533(x)
+ else
+ fun_l10_n604(x)
+ end
+end
+
+def fun_l9_n684(x)
+ if (x < 1)
+ fun_l10_n483(x)
+ else
+ fun_l10_n761(x)
+ end
+end
+
+def fun_l9_n685(x)
+ if (x < 1)
+ fun_l10_n41(x)
+ else
+ fun_l10_n374(x)
+ end
+end
+
+def fun_l9_n686(x)
+ if (x < 1)
+ fun_l10_n549(x)
+ else
+ fun_l10_n319(x)
+ end
+end
+
+def fun_l9_n687(x)
+ if (x < 1)
+ fun_l10_n246(x)
+ else
+ fun_l10_n854(x)
+ end
+end
+
+def fun_l9_n688(x)
+ if (x < 1)
+ fun_l10_n634(x)
+ else
+ fun_l10_n43(x)
+ end
+end
+
+def fun_l9_n689(x)
+ if (x < 1)
+ fun_l10_n994(x)
+ else
+ fun_l10_n549(x)
+ end
+end
+
+def fun_l9_n690(x)
+ if (x < 1)
+ fun_l10_n439(x)
+ else
+ fun_l10_n560(x)
+ end
+end
+
+def fun_l9_n691(x)
+ if (x < 1)
+ fun_l10_n227(x)
+ else
+ fun_l10_n877(x)
+ end
+end
+
+def fun_l9_n692(x)
+ if (x < 1)
+ fun_l10_n644(x)
+ else
+ fun_l10_n350(x)
+ end
+end
+
+def fun_l9_n693(x)
+ if (x < 1)
+ fun_l10_n543(x)
+ else
+ fun_l10_n638(x)
+ end
+end
+
+def fun_l9_n694(x)
+ if (x < 1)
+ fun_l10_n537(x)
+ else
+ fun_l10_n187(x)
+ end
+end
+
+def fun_l9_n695(x)
+ if (x < 1)
+ fun_l10_n1(x)
+ else
+ fun_l10_n540(x)
+ end
+end
+
+def fun_l9_n696(x)
+ if (x < 1)
+ fun_l10_n306(x)
+ else
+ fun_l10_n290(x)
+ end
+end
+
+def fun_l9_n697(x)
+ if (x < 1)
+ fun_l10_n140(x)
+ else
+ fun_l10_n72(x)
+ end
+end
+
+def fun_l9_n698(x)
+ if (x < 1)
+ fun_l10_n43(x)
+ else
+ fun_l10_n257(x)
+ end
+end
+
+def fun_l9_n699(x)
+ if (x < 1)
+ fun_l10_n91(x)
+ else
+ fun_l10_n816(x)
+ end
+end
+
+def fun_l9_n700(x)
+ if (x < 1)
+ fun_l10_n420(x)
+ else
+ fun_l10_n763(x)
+ end
+end
+
+def fun_l9_n701(x)
+ if (x < 1)
+ fun_l10_n429(x)
+ else
+ fun_l10_n28(x)
+ end
+end
+
+def fun_l9_n702(x)
+ if (x < 1)
+ fun_l10_n615(x)
+ else
+ fun_l10_n995(x)
+ end
+end
+
+def fun_l9_n703(x)
+ if (x < 1)
+ fun_l10_n776(x)
+ else
+ fun_l10_n85(x)
+ end
+end
+
+def fun_l9_n704(x)
+ if (x < 1)
+ fun_l10_n795(x)
+ else
+ fun_l10_n542(x)
+ end
+end
+
+def fun_l9_n705(x)
+ if (x < 1)
+ fun_l10_n114(x)
+ else
+ fun_l10_n166(x)
+ end
+end
+
+def fun_l9_n706(x)
+ if (x < 1)
+ fun_l10_n99(x)
+ else
+ fun_l10_n400(x)
+ end
+end
+
+def fun_l9_n707(x)
+ if (x < 1)
+ fun_l10_n292(x)
+ else
+ fun_l10_n801(x)
+ end
+end
+
+def fun_l9_n708(x)
+ if (x < 1)
+ fun_l10_n303(x)
+ else
+ fun_l10_n380(x)
+ end
+end
+
+def fun_l9_n709(x)
+ if (x < 1)
+ fun_l10_n369(x)
+ else
+ fun_l10_n938(x)
+ end
+end
+
+def fun_l9_n710(x)
+ if (x < 1)
+ fun_l10_n652(x)
+ else
+ fun_l10_n453(x)
+ end
+end
+
+def fun_l9_n711(x)
+ if (x < 1)
+ fun_l10_n770(x)
+ else
+ fun_l10_n32(x)
+ end
+end
+
+def fun_l9_n712(x)
+ if (x < 1)
+ fun_l10_n261(x)
+ else
+ fun_l10_n485(x)
+ end
+end
+
+def fun_l9_n713(x)
+ if (x < 1)
+ fun_l10_n935(x)
+ else
+ fun_l10_n39(x)
+ end
+end
+
+def fun_l9_n714(x)
+ if (x < 1)
+ fun_l10_n332(x)
+ else
+ fun_l10_n309(x)
+ end
+end
+
+def fun_l9_n715(x)
+ if (x < 1)
+ fun_l10_n824(x)
+ else
+ fun_l10_n614(x)
+ end
+end
+
+def fun_l9_n716(x)
+ if (x < 1)
+ fun_l10_n468(x)
+ else
+ fun_l10_n341(x)
+ end
+end
+
+def fun_l9_n717(x)
+ if (x < 1)
+ fun_l10_n425(x)
+ else
+ fun_l10_n67(x)
+ end
+end
+
+def fun_l9_n718(x)
+ if (x < 1)
+ fun_l10_n199(x)
+ else
+ fun_l10_n423(x)
+ end
+end
+
+def fun_l9_n719(x)
+ if (x < 1)
+ fun_l10_n294(x)
+ else
+ fun_l10_n484(x)
+ end
+end
+
+def fun_l9_n720(x)
+ if (x < 1)
+ fun_l10_n276(x)
+ else
+ fun_l10_n799(x)
+ end
+end
+
+def fun_l9_n721(x)
+ if (x < 1)
+ fun_l10_n98(x)
+ else
+ fun_l10_n278(x)
+ end
+end
+
+def fun_l9_n722(x)
+ if (x < 1)
+ fun_l10_n689(x)
+ else
+ fun_l10_n767(x)
+ end
+end
+
+def fun_l9_n723(x)
+ if (x < 1)
+ fun_l10_n786(x)
+ else
+ fun_l10_n409(x)
+ end
+end
+
+def fun_l9_n724(x)
+ if (x < 1)
+ fun_l10_n874(x)
+ else
+ fun_l10_n413(x)
+ end
+end
+
+def fun_l9_n725(x)
+ if (x < 1)
+ fun_l10_n806(x)
+ else
+ fun_l10_n529(x)
+ end
+end
+
+def fun_l9_n726(x)
+ if (x < 1)
+ fun_l10_n847(x)
+ else
+ fun_l10_n427(x)
+ end
+end
+
+def fun_l9_n727(x)
+ if (x < 1)
+ fun_l10_n285(x)
+ else
+ fun_l10_n878(x)
+ end
+end
+
+def fun_l9_n728(x)
+ if (x < 1)
+ fun_l10_n829(x)
+ else
+ fun_l10_n341(x)
+ end
+end
+
+def fun_l9_n729(x)
+ if (x < 1)
+ fun_l10_n797(x)
+ else
+ fun_l10_n865(x)
+ end
+end
+
+def fun_l9_n730(x)
+ if (x < 1)
+ fun_l10_n689(x)
+ else
+ fun_l10_n241(x)
+ end
+end
+
+def fun_l9_n731(x)
+ if (x < 1)
+ fun_l10_n764(x)
+ else
+ fun_l10_n451(x)
+ end
+end
+
+def fun_l9_n732(x)
+ if (x < 1)
+ fun_l10_n147(x)
+ else
+ fun_l10_n998(x)
+ end
+end
+
+def fun_l9_n733(x)
+ if (x < 1)
+ fun_l10_n192(x)
+ else
+ fun_l10_n293(x)
+ end
+end
+
+def fun_l9_n734(x)
+ if (x < 1)
+ fun_l10_n207(x)
+ else
+ fun_l10_n696(x)
+ end
+end
+
+def fun_l9_n735(x)
+ if (x < 1)
+ fun_l10_n457(x)
+ else
+ fun_l10_n93(x)
+ end
+end
+
+def fun_l9_n736(x)
+ if (x < 1)
+ fun_l10_n130(x)
+ else
+ fun_l10_n512(x)
+ end
+end
+
+def fun_l9_n737(x)
+ if (x < 1)
+ fun_l10_n86(x)
+ else
+ fun_l10_n409(x)
+ end
+end
+
+def fun_l9_n738(x)
+ if (x < 1)
+ fun_l10_n783(x)
+ else
+ fun_l10_n981(x)
+ end
+end
+
+def fun_l9_n739(x)
+ if (x < 1)
+ fun_l10_n927(x)
+ else
+ fun_l10_n831(x)
+ end
+end
+
+def fun_l9_n740(x)
+ if (x < 1)
+ fun_l10_n719(x)
+ else
+ fun_l10_n422(x)
+ end
+end
+
+def fun_l9_n741(x)
+ if (x < 1)
+ fun_l10_n287(x)
+ else
+ fun_l10_n139(x)
+ end
+end
+
+def fun_l9_n742(x)
+ if (x < 1)
+ fun_l10_n533(x)
+ else
+ fun_l10_n687(x)
+ end
+end
+
+def fun_l9_n743(x)
+ if (x < 1)
+ fun_l10_n550(x)
+ else
+ fun_l10_n972(x)
+ end
+end
+
+def fun_l9_n744(x)
+ if (x < 1)
+ fun_l10_n5(x)
+ else
+ fun_l10_n306(x)
+ end
+end
+
+def fun_l9_n745(x)
+ if (x < 1)
+ fun_l10_n755(x)
+ else
+ fun_l10_n849(x)
+ end
+end
+
+def fun_l9_n746(x)
+ if (x < 1)
+ fun_l10_n912(x)
+ else
+ fun_l10_n307(x)
+ end
+end
+
+def fun_l9_n747(x)
+ if (x < 1)
+ fun_l10_n181(x)
+ else
+ fun_l10_n476(x)
+ end
+end
+
+def fun_l9_n748(x)
+ if (x < 1)
+ fun_l10_n353(x)
+ else
+ fun_l10_n412(x)
+ end
+end
+
+def fun_l9_n749(x)
+ if (x < 1)
+ fun_l10_n187(x)
+ else
+ fun_l10_n591(x)
+ end
+end
+
+def fun_l9_n750(x)
+ if (x < 1)
+ fun_l10_n464(x)
+ else
+ fun_l10_n581(x)
+ end
+end
+
+def fun_l9_n751(x)
+ if (x < 1)
+ fun_l10_n1(x)
+ else
+ fun_l10_n557(x)
+ end
+end
+
+def fun_l9_n752(x)
+ if (x < 1)
+ fun_l10_n788(x)
+ else
+ fun_l10_n438(x)
+ end
+end
+
+def fun_l9_n753(x)
+ if (x < 1)
+ fun_l10_n535(x)
+ else
+ fun_l10_n13(x)
+ end
+end
+
+def fun_l9_n754(x)
+ if (x < 1)
+ fun_l10_n811(x)
+ else
+ fun_l10_n543(x)
+ end
+end
+
+def fun_l9_n755(x)
+ if (x < 1)
+ fun_l10_n328(x)
+ else
+ fun_l10_n44(x)
+ end
+end
+
+def fun_l9_n756(x)
+ if (x < 1)
+ fun_l10_n839(x)
+ else
+ fun_l10_n16(x)
+ end
+end
+
+def fun_l9_n757(x)
+ if (x < 1)
+ fun_l10_n778(x)
+ else
+ fun_l10_n865(x)
+ end
+end
+
+def fun_l9_n758(x)
+ if (x < 1)
+ fun_l10_n552(x)
+ else
+ fun_l10_n783(x)
+ end
+end
+
+def fun_l9_n759(x)
+ if (x < 1)
+ fun_l10_n293(x)
+ else
+ fun_l10_n140(x)
+ end
+end
+
+def fun_l9_n760(x)
+ if (x < 1)
+ fun_l10_n814(x)
+ else
+ fun_l10_n497(x)
+ end
+end
+
+def fun_l9_n761(x)
+ if (x < 1)
+ fun_l10_n922(x)
+ else
+ fun_l10_n986(x)
+ end
+end
+
+def fun_l9_n762(x)
+ if (x < 1)
+ fun_l10_n100(x)
+ else
+ fun_l10_n346(x)
+ end
+end
+
+def fun_l9_n763(x)
+ if (x < 1)
+ fun_l10_n738(x)
+ else
+ fun_l10_n223(x)
+ end
+end
+
+def fun_l9_n764(x)
+ if (x < 1)
+ fun_l10_n168(x)
+ else
+ fun_l10_n367(x)
+ end
+end
+
+def fun_l9_n765(x)
+ if (x < 1)
+ fun_l10_n929(x)
+ else
+ fun_l10_n684(x)
+ end
+end
+
+def fun_l9_n766(x)
+ if (x < 1)
+ fun_l10_n940(x)
+ else
+ fun_l10_n497(x)
+ end
+end
+
+def fun_l9_n767(x)
+ if (x < 1)
+ fun_l10_n198(x)
+ else
+ fun_l10_n386(x)
+ end
+end
+
+def fun_l9_n768(x)
+ if (x < 1)
+ fun_l10_n667(x)
+ else
+ fun_l10_n913(x)
+ end
+end
+
+def fun_l9_n769(x)
+ if (x < 1)
+ fun_l10_n210(x)
+ else
+ fun_l10_n233(x)
+ end
+end
+
+def fun_l9_n770(x)
+ if (x < 1)
+ fun_l10_n819(x)
+ else
+ fun_l10_n755(x)
+ end
+end
+
+def fun_l9_n771(x)
+ if (x < 1)
+ fun_l10_n519(x)
+ else
+ fun_l10_n554(x)
+ end
+end
+
+def fun_l9_n772(x)
+ if (x < 1)
+ fun_l10_n880(x)
+ else
+ fun_l10_n280(x)
+ end
+end
+
+def fun_l9_n773(x)
+ if (x < 1)
+ fun_l10_n368(x)
+ else
+ fun_l10_n97(x)
+ end
+end
+
+def fun_l9_n774(x)
+ if (x < 1)
+ fun_l10_n447(x)
+ else
+ fun_l10_n918(x)
+ end
+end
+
+def fun_l9_n775(x)
+ if (x < 1)
+ fun_l10_n222(x)
+ else
+ fun_l10_n35(x)
+ end
+end
+
+def fun_l9_n776(x)
+ if (x < 1)
+ fun_l10_n339(x)
+ else
+ fun_l10_n309(x)
+ end
+end
+
+def fun_l9_n777(x)
+ if (x < 1)
+ fun_l10_n948(x)
+ else
+ fun_l10_n967(x)
+ end
+end
+
+def fun_l9_n778(x)
+ if (x < 1)
+ fun_l10_n561(x)
+ else
+ fun_l10_n202(x)
+ end
+end
+
+def fun_l9_n779(x)
+ if (x < 1)
+ fun_l10_n863(x)
+ else
+ fun_l10_n356(x)
+ end
+end
+
+def fun_l9_n780(x)
+ if (x < 1)
+ fun_l10_n459(x)
+ else
+ fun_l10_n358(x)
+ end
+end
+
+def fun_l9_n781(x)
+ if (x < 1)
+ fun_l10_n543(x)
+ else
+ fun_l10_n229(x)
+ end
+end
+
+def fun_l9_n782(x)
+ if (x < 1)
+ fun_l10_n422(x)
+ else
+ fun_l10_n448(x)
+ end
+end
+
+def fun_l9_n783(x)
+ if (x < 1)
+ fun_l10_n222(x)
+ else
+ fun_l10_n63(x)
+ end
+end
+
+def fun_l9_n784(x)
+ if (x < 1)
+ fun_l10_n520(x)
+ else
+ fun_l10_n443(x)
+ end
+end
+
+def fun_l9_n785(x)
+ if (x < 1)
+ fun_l10_n894(x)
+ else
+ fun_l10_n379(x)
+ end
+end
+
+def fun_l9_n786(x)
+ if (x < 1)
+ fun_l10_n362(x)
+ else
+ fun_l10_n676(x)
+ end
+end
+
+def fun_l9_n787(x)
+ if (x < 1)
+ fun_l10_n545(x)
+ else
+ fun_l10_n263(x)
+ end
+end
+
+def fun_l9_n788(x)
+ if (x < 1)
+ fun_l10_n322(x)
+ else
+ fun_l10_n631(x)
+ end
+end
+
+def fun_l9_n789(x)
+ if (x < 1)
+ fun_l10_n916(x)
+ else
+ fun_l10_n877(x)
+ end
+end
+
+def fun_l9_n790(x)
+ if (x < 1)
+ fun_l10_n443(x)
+ else
+ fun_l10_n640(x)
+ end
+end
+
+def fun_l9_n791(x)
+ if (x < 1)
+ fun_l10_n202(x)
+ else
+ fun_l10_n686(x)
+ end
+end
+
+def fun_l9_n792(x)
+ if (x < 1)
+ fun_l10_n143(x)
+ else
+ fun_l10_n200(x)
+ end
+end
+
+def fun_l9_n793(x)
+ if (x < 1)
+ fun_l10_n939(x)
+ else
+ fun_l10_n374(x)
+ end
+end
+
+def fun_l9_n794(x)
+ if (x < 1)
+ fun_l10_n506(x)
+ else
+ fun_l10_n782(x)
+ end
+end
+
+def fun_l9_n795(x)
+ if (x < 1)
+ fun_l10_n619(x)
+ else
+ fun_l10_n439(x)
+ end
+end
+
+def fun_l9_n796(x)
+ if (x < 1)
+ fun_l10_n748(x)
+ else
+ fun_l10_n61(x)
+ end
+end
+
+def fun_l9_n797(x)
+ if (x < 1)
+ fun_l10_n217(x)
+ else
+ fun_l10_n816(x)
+ end
+end
+
+def fun_l9_n798(x)
+ if (x < 1)
+ fun_l10_n310(x)
+ else
+ fun_l10_n445(x)
+ end
+end
+
+def fun_l9_n799(x)
+ if (x < 1)
+ fun_l10_n262(x)
+ else
+ fun_l10_n776(x)
+ end
+end
+
+def fun_l9_n800(x)
+ if (x < 1)
+ fun_l10_n807(x)
+ else
+ fun_l10_n566(x)
+ end
+end
+
+def fun_l9_n801(x)
+ if (x < 1)
+ fun_l10_n466(x)
+ else
+ fun_l10_n287(x)
+ end
+end
+
+def fun_l9_n802(x)
+ if (x < 1)
+ fun_l10_n205(x)
+ else
+ fun_l10_n415(x)
+ end
+end
+
+def fun_l9_n803(x)
+ if (x < 1)
+ fun_l10_n618(x)
+ else
+ fun_l10_n87(x)
+ end
+end
+
+def fun_l9_n804(x)
+ if (x < 1)
+ fun_l10_n248(x)
+ else
+ fun_l10_n32(x)
+ end
+end
+
+def fun_l9_n805(x)
+ if (x < 1)
+ fun_l10_n537(x)
+ else
+ fun_l10_n844(x)
+ end
+end
+
+def fun_l9_n806(x)
+ if (x < 1)
+ fun_l10_n554(x)
+ else
+ fun_l10_n696(x)
+ end
+end
+
+def fun_l9_n807(x)
+ if (x < 1)
+ fun_l10_n774(x)
+ else
+ fun_l10_n486(x)
+ end
+end
+
+def fun_l9_n808(x)
+ if (x < 1)
+ fun_l10_n500(x)
+ else
+ fun_l10_n926(x)
+ end
+end
+
+def fun_l9_n809(x)
+ if (x < 1)
+ fun_l10_n601(x)
+ else
+ fun_l10_n696(x)
+ end
+end
+
+def fun_l9_n810(x)
+ if (x < 1)
+ fun_l10_n770(x)
+ else
+ fun_l10_n948(x)
+ end
+end
+
+def fun_l9_n811(x)
+ if (x < 1)
+ fun_l10_n658(x)
+ else
+ fun_l10_n840(x)
+ end
+end
+
+def fun_l9_n812(x)
+ if (x < 1)
+ fun_l10_n913(x)
+ else
+ fun_l10_n892(x)
+ end
+end
+
+def fun_l9_n813(x)
+ if (x < 1)
+ fun_l10_n440(x)
+ else
+ fun_l10_n204(x)
+ end
+end
+
+def fun_l9_n814(x)
+ if (x < 1)
+ fun_l10_n273(x)
+ else
+ fun_l10_n816(x)
+ end
+end
+
+def fun_l9_n815(x)
+ if (x < 1)
+ fun_l10_n190(x)
+ else
+ fun_l10_n735(x)
+ end
+end
+
+def fun_l9_n816(x)
+ if (x < 1)
+ fun_l10_n77(x)
+ else
+ fun_l10_n30(x)
+ end
+end
+
+def fun_l9_n817(x)
+ if (x < 1)
+ fun_l10_n697(x)
+ else
+ fun_l10_n985(x)
+ end
+end
+
+def fun_l9_n818(x)
+ if (x < 1)
+ fun_l10_n583(x)
+ else
+ fun_l10_n314(x)
+ end
+end
+
+def fun_l9_n819(x)
+ if (x < 1)
+ fun_l10_n149(x)
+ else
+ fun_l10_n556(x)
+ end
+end
+
+def fun_l9_n820(x)
+ if (x < 1)
+ fun_l10_n634(x)
+ else
+ fun_l10_n392(x)
+ end
+end
+
+def fun_l9_n821(x)
+ if (x < 1)
+ fun_l10_n401(x)
+ else
+ fun_l10_n217(x)
+ end
+end
+
+def fun_l9_n822(x)
+ if (x < 1)
+ fun_l10_n564(x)
+ else
+ fun_l10_n860(x)
+ end
+end
+
+def fun_l9_n823(x)
+ if (x < 1)
+ fun_l10_n901(x)
+ else
+ fun_l10_n836(x)
+ end
+end
+
+def fun_l9_n824(x)
+ if (x < 1)
+ fun_l10_n526(x)
+ else
+ fun_l10_n660(x)
+ end
+end
+
+def fun_l9_n825(x)
+ if (x < 1)
+ fun_l10_n245(x)
+ else
+ fun_l10_n914(x)
+ end
+end
+
+def fun_l9_n826(x)
+ if (x < 1)
+ fun_l10_n712(x)
+ else
+ fun_l10_n928(x)
+ end
+end
+
+def fun_l9_n827(x)
+ if (x < 1)
+ fun_l10_n596(x)
+ else
+ fun_l10_n803(x)
+ end
+end
+
+def fun_l9_n828(x)
+ if (x < 1)
+ fun_l10_n454(x)
+ else
+ fun_l10_n679(x)
+ end
+end
+
+def fun_l9_n829(x)
+ if (x < 1)
+ fun_l10_n104(x)
+ else
+ fun_l10_n137(x)
+ end
+end
+
+def fun_l9_n830(x)
+ if (x < 1)
+ fun_l10_n516(x)
+ else
+ fun_l10_n687(x)
+ end
+end
+
+def fun_l9_n831(x)
+ if (x < 1)
+ fun_l10_n720(x)
+ else
+ fun_l10_n770(x)
+ end
+end
+
+def fun_l9_n832(x)
+ if (x < 1)
+ fun_l10_n939(x)
+ else
+ fun_l10_n119(x)
+ end
+end
+
+def fun_l9_n833(x)
+ if (x < 1)
+ fun_l10_n515(x)
+ else
+ fun_l10_n71(x)
+ end
+end
+
+def fun_l9_n834(x)
+ if (x < 1)
+ fun_l10_n668(x)
+ else
+ fun_l10_n521(x)
+ end
+end
+
+def fun_l9_n835(x)
+ if (x < 1)
+ fun_l10_n405(x)
+ else
+ fun_l10_n69(x)
+ end
+end
+
+def fun_l9_n836(x)
+ if (x < 1)
+ fun_l10_n201(x)
+ else
+ fun_l10_n681(x)
+ end
+end
+
+def fun_l9_n837(x)
+ if (x < 1)
+ fun_l10_n161(x)
+ else
+ fun_l10_n258(x)
+ end
+end
+
+def fun_l9_n838(x)
+ if (x < 1)
+ fun_l10_n913(x)
+ else
+ fun_l10_n586(x)
+ end
+end
+
+def fun_l9_n839(x)
+ if (x < 1)
+ fun_l10_n566(x)
+ else
+ fun_l10_n543(x)
+ end
+end
+
+def fun_l9_n840(x)
+ if (x < 1)
+ fun_l10_n547(x)
+ else
+ fun_l10_n570(x)
+ end
+end
+
+def fun_l9_n841(x)
+ if (x < 1)
+ fun_l10_n888(x)
+ else
+ fun_l10_n843(x)
+ end
+end
+
+def fun_l9_n842(x)
+ if (x < 1)
+ fun_l10_n497(x)
+ else
+ fun_l10_n985(x)
+ end
+end
+
+def fun_l9_n843(x)
+ if (x < 1)
+ fun_l10_n754(x)
+ else
+ fun_l10_n390(x)
+ end
+end
+
+def fun_l9_n844(x)
+ if (x < 1)
+ fun_l10_n625(x)
+ else
+ fun_l10_n562(x)
+ end
+end
+
+def fun_l9_n845(x)
+ if (x < 1)
+ fun_l10_n858(x)
+ else
+ fun_l10_n553(x)
+ end
+end
+
+def fun_l9_n846(x)
+ if (x < 1)
+ fun_l10_n145(x)
+ else
+ fun_l10_n551(x)
+ end
+end
+
+def fun_l9_n847(x)
+ if (x < 1)
+ fun_l10_n916(x)
+ else
+ fun_l10_n851(x)
+ end
+end
+
+def fun_l9_n848(x)
+ if (x < 1)
+ fun_l10_n935(x)
+ else
+ fun_l10_n448(x)
+ end
+end
+
+def fun_l9_n849(x)
+ if (x < 1)
+ fun_l10_n664(x)
+ else
+ fun_l10_n832(x)
+ end
+end
+
+def fun_l9_n850(x)
+ if (x < 1)
+ fun_l10_n904(x)
+ else
+ fun_l10_n203(x)
+ end
+end
+
+def fun_l9_n851(x)
+ if (x < 1)
+ fun_l10_n863(x)
+ else
+ fun_l10_n491(x)
+ end
+end
+
+def fun_l9_n852(x)
+ if (x < 1)
+ fun_l10_n930(x)
+ else
+ fun_l10_n248(x)
+ end
+end
+
+def fun_l9_n853(x)
+ if (x < 1)
+ fun_l10_n637(x)
+ else
+ fun_l10_n716(x)
+ end
+end
+
+def fun_l9_n854(x)
+ if (x < 1)
+ fun_l10_n35(x)
+ else
+ fun_l10_n662(x)
+ end
+end
+
+def fun_l9_n855(x)
+ if (x < 1)
+ fun_l10_n57(x)
+ else
+ fun_l10_n392(x)
+ end
+end
+
+def fun_l9_n856(x)
+ if (x < 1)
+ fun_l10_n480(x)
+ else
+ fun_l10_n447(x)
+ end
+end
+
+def fun_l9_n857(x)
+ if (x < 1)
+ fun_l10_n134(x)
+ else
+ fun_l10_n842(x)
+ end
+end
+
+def fun_l9_n858(x)
+ if (x < 1)
+ fun_l10_n384(x)
+ else
+ fun_l10_n577(x)
+ end
+end
+
+def fun_l9_n859(x)
+ if (x < 1)
+ fun_l10_n718(x)
+ else
+ fun_l10_n571(x)
+ end
+end
+
+def fun_l9_n860(x)
+ if (x < 1)
+ fun_l10_n237(x)
+ else
+ fun_l10_n654(x)
+ end
+end
+
+def fun_l9_n861(x)
+ if (x < 1)
+ fun_l10_n807(x)
+ else
+ fun_l10_n409(x)
+ end
+end
+
+def fun_l9_n862(x)
+ if (x < 1)
+ fun_l10_n801(x)
+ else
+ fun_l10_n575(x)
+ end
+end
+
+def fun_l9_n863(x)
+ if (x < 1)
+ fun_l10_n500(x)
+ else
+ fun_l10_n133(x)
+ end
+end
+
+def fun_l9_n864(x)
+ if (x < 1)
+ fun_l10_n403(x)
+ else
+ fun_l10_n720(x)
+ end
+end
+
+def fun_l9_n865(x)
+ if (x < 1)
+ fun_l10_n648(x)
+ else
+ fun_l10_n677(x)
+ end
+end
+
+def fun_l9_n866(x)
+ if (x < 1)
+ fun_l10_n873(x)
+ else
+ fun_l10_n519(x)
+ end
+end
+
+def fun_l9_n867(x)
+ if (x < 1)
+ fun_l10_n459(x)
+ else
+ fun_l10_n331(x)
+ end
+end
+
+def fun_l9_n868(x)
+ if (x < 1)
+ fun_l10_n159(x)
+ else
+ fun_l10_n64(x)
+ end
+end
+
+def fun_l9_n869(x)
+ if (x < 1)
+ fun_l10_n155(x)
+ else
+ fun_l10_n41(x)
+ end
+end
+
+def fun_l9_n870(x)
+ if (x < 1)
+ fun_l10_n947(x)
+ else
+ fun_l10_n386(x)
+ end
+end
+
+def fun_l9_n871(x)
+ if (x < 1)
+ fun_l10_n868(x)
+ else
+ fun_l10_n794(x)
+ end
+end
+
+def fun_l9_n872(x)
+ if (x < 1)
+ fun_l10_n77(x)
+ else
+ fun_l10_n268(x)
+ end
+end
+
+def fun_l9_n873(x)
+ if (x < 1)
+ fun_l10_n119(x)
+ else
+ fun_l10_n819(x)
+ end
+end
+
+def fun_l9_n874(x)
+ if (x < 1)
+ fun_l10_n143(x)
+ else
+ fun_l10_n434(x)
+ end
+end
+
+def fun_l9_n875(x)
+ if (x < 1)
+ fun_l10_n238(x)
+ else
+ fun_l10_n489(x)
+ end
+end
+
+def fun_l9_n876(x)
+ if (x < 1)
+ fun_l10_n687(x)
+ else
+ fun_l10_n384(x)
+ end
+end
+
+def fun_l9_n877(x)
+ if (x < 1)
+ fun_l10_n27(x)
+ else
+ fun_l10_n138(x)
+ end
+end
+
+def fun_l9_n878(x)
+ if (x < 1)
+ fun_l10_n208(x)
+ else
+ fun_l10_n691(x)
+ end
+end
+
+def fun_l9_n879(x)
+ if (x < 1)
+ fun_l10_n793(x)
+ else
+ fun_l10_n165(x)
+ end
+end
+
+def fun_l9_n880(x)
+ if (x < 1)
+ fun_l10_n358(x)
+ else
+ fun_l10_n455(x)
+ end
+end
+
+def fun_l9_n881(x)
+ if (x < 1)
+ fun_l10_n558(x)
+ else
+ fun_l10_n184(x)
+ end
+end
+
+def fun_l9_n882(x)
+ if (x < 1)
+ fun_l10_n461(x)
+ else
+ fun_l10_n155(x)
+ end
+end
+
+def fun_l9_n883(x)
+ if (x < 1)
+ fun_l10_n503(x)
+ else
+ fun_l10_n225(x)
+ end
+end
+
+def fun_l9_n884(x)
+ if (x < 1)
+ fun_l10_n384(x)
+ else
+ fun_l10_n262(x)
+ end
+end
+
+def fun_l9_n885(x)
+ if (x < 1)
+ fun_l10_n565(x)
+ else
+ fun_l10_n633(x)
+ end
+end
+
+def fun_l9_n886(x)
+ if (x < 1)
+ fun_l10_n626(x)
+ else
+ fun_l10_n796(x)
+ end
+end
+
+def fun_l9_n887(x)
+ if (x < 1)
+ fun_l10_n424(x)
+ else
+ fun_l10_n458(x)
+ end
+end
+
+def fun_l9_n888(x)
+ if (x < 1)
+ fun_l10_n687(x)
+ else
+ fun_l10_n145(x)
+ end
+end
+
+def fun_l9_n889(x)
+ if (x < 1)
+ fun_l10_n787(x)
+ else
+ fun_l10_n225(x)
+ end
+end
+
+def fun_l9_n890(x)
+ if (x < 1)
+ fun_l10_n249(x)
+ else
+ fun_l10_n154(x)
+ end
+end
+
+def fun_l9_n891(x)
+ if (x < 1)
+ fun_l10_n38(x)
+ else
+ fun_l10_n635(x)
+ end
+end
+
+def fun_l9_n892(x)
+ if (x < 1)
+ fun_l10_n920(x)
+ else
+ fun_l10_n478(x)
+ end
+end
+
+def fun_l9_n893(x)
+ if (x < 1)
+ fun_l10_n13(x)
+ else
+ fun_l10_n174(x)
+ end
+end
+
+def fun_l9_n894(x)
+ if (x < 1)
+ fun_l10_n304(x)
+ else
+ fun_l10_n495(x)
+ end
+end
+
+def fun_l9_n895(x)
+ if (x < 1)
+ fun_l10_n226(x)
+ else
+ fun_l10_n420(x)
+ end
+end
+
+def fun_l9_n896(x)
+ if (x < 1)
+ fun_l10_n86(x)
+ else
+ fun_l10_n837(x)
+ end
+end
+
+def fun_l9_n897(x)
+ if (x < 1)
+ fun_l10_n927(x)
+ else
+ fun_l10_n344(x)
+ end
+end
+
+def fun_l9_n898(x)
+ if (x < 1)
+ fun_l10_n87(x)
+ else
+ fun_l10_n270(x)
+ end
+end
+
+def fun_l9_n899(x)
+ if (x < 1)
+ fun_l10_n549(x)
+ else
+ fun_l10_n349(x)
+ end
+end
+
+def fun_l9_n900(x)
+ if (x < 1)
+ fun_l10_n456(x)
+ else
+ fun_l10_n345(x)
+ end
+end
+
+def fun_l9_n901(x)
+ if (x < 1)
+ fun_l10_n154(x)
+ else
+ fun_l10_n764(x)
+ end
+end
+
+def fun_l9_n902(x)
+ if (x < 1)
+ fun_l10_n614(x)
+ else
+ fun_l10_n536(x)
+ end
+end
+
+def fun_l9_n903(x)
+ if (x < 1)
+ fun_l10_n108(x)
+ else
+ fun_l10_n197(x)
+ end
+end
+
+def fun_l9_n904(x)
+ if (x < 1)
+ fun_l10_n616(x)
+ else
+ fun_l10_n777(x)
+ end
+end
+
+def fun_l9_n905(x)
+ if (x < 1)
+ fun_l10_n515(x)
+ else
+ fun_l10_n830(x)
+ end
+end
+
+def fun_l9_n906(x)
+ if (x < 1)
+ fun_l10_n288(x)
+ else
+ fun_l10_n717(x)
+ end
+end
+
+def fun_l9_n907(x)
+ if (x < 1)
+ fun_l10_n704(x)
+ else
+ fun_l10_n962(x)
+ end
+end
+
+def fun_l9_n908(x)
+ if (x < 1)
+ fun_l10_n85(x)
+ else
+ fun_l10_n70(x)
+ end
+end
+
+def fun_l9_n909(x)
+ if (x < 1)
+ fun_l10_n806(x)
+ else
+ fun_l10_n97(x)
+ end
+end
+
+def fun_l9_n910(x)
+ if (x < 1)
+ fun_l10_n567(x)
+ else
+ fun_l10_n47(x)
+ end
+end
+
+def fun_l9_n911(x)
+ if (x < 1)
+ fun_l10_n919(x)
+ else
+ fun_l10_n879(x)
+ end
+end
+
+def fun_l9_n912(x)
+ if (x < 1)
+ fun_l10_n229(x)
+ else
+ fun_l10_n200(x)
+ end
+end
+
+def fun_l9_n913(x)
+ if (x < 1)
+ fun_l10_n502(x)
+ else
+ fun_l10_n820(x)
+ end
+end
+
+def fun_l9_n914(x)
+ if (x < 1)
+ fun_l10_n57(x)
+ else
+ fun_l10_n470(x)
+ end
+end
+
+def fun_l9_n915(x)
+ if (x < 1)
+ fun_l10_n909(x)
+ else
+ fun_l10_n845(x)
+ end
+end
+
+def fun_l9_n916(x)
+ if (x < 1)
+ fun_l10_n658(x)
+ else
+ fun_l10_n925(x)
+ end
+end
+
+def fun_l9_n917(x)
+ if (x < 1)
+ fun_l10_n886(x)
+ else
+ fun_l10_n285(x)
+ end
+end
+
+def fun_l9_n918(x)
+ if (x < 1)
+ fun_l10_n484(x)
+ else
+ fun_l10_n712(x)
+ end
+end
+
+def fun_l9_n919(x)
+ if (x < 1)
+ fun_l10_n299(x)
+ else
+ fun_l10_n144(x)
+ end
+end
+
+def fun_l9_n920(x)
+ if (x < 1)
+ fun_l10_n830(x)
+ else
+ fun_l10_n913(x)
+ end
+end
+
+def fun_l9_n921(x)
+ if (x < 1)
+ fun_l10_n306(x)
+ else
+ fun_l10_n242(x)
+ end
+end
+
+def fun_l9_n922(x)
+ if (x < 1)
+ fun_l10_n442(x)
+ else
+ fun_l10_n497(x)
+ end
+end
+
+def fun_l9_n923(x)
+ if (x < 1)
+ fun_l10_n22(x)
+ else
+ fun_l10_n376(x)
+ end
+end
+
+def fun_l9_n924(x)
+ if (x < 1)
+ fun_l10_n737(x)
+ else
+ fun_l10_n603(x)
+ end
+end
+
+def fun_l9_n925(x)
+ if (x < 1)
+ fun_l10_n300(x)
+ else
+ fun_l10_n974(x)
+ end
+end
+
+def fun_l9_n926(x)
+ if (x < 1)
+ fun_l10_n644(x)
+ else
+ fun_l10_n973(x)
+ end
+end
+
+def fun_l9_n927(x)
+ if (x < 1)
+ fun_l10_n413(x)
+ else
+ fun_l10_n353(x)
+ end
+end
+
+def fun_l9_n928(x)
+ if (x < 1)
+ fun_l10_n438(x)
+ else
+ fun_l10_n518(x)
+ end
+end
+
+def fun_l9_n929(x)
+ if (x < 1)
+ fun_l10_n30(x)
+ else
+ fun_l10_n207(x)
+ end
+end
+
+def fun_l9_n930(x)
+ if (x < 1)
+ fun_l10_n976(x)
+ else
+ fun_l10_n70(x)
+ end
+end
+
+def fun_l9_n931(x)
+ if (x < 1)
+ fun_l10_n5(x)
+ else
+ fun_l10_n37(x)
+ end
+end
+
+def fun_l9_n932(x)
+ if (x < 1)
+ fun_l10_n612(x)
+ else
+ fun_l10_n268(x)
+ end
+end
+
+def fun_l9_n933(x)
+ if (x < 1)
+ fun_l10_n81(x)
+ else
+ fun_l10_n373(x)
+ end
+end
+
+def fun_l9_n934(x)
+ if (x < 1)
+ fun_l10_n30(x)
+ else
+ fun_l10_n504(x)
+ end
+end
+
+def fun_l9_n935(x)
+ if (x < 1)
+ fun_l10_n131(x)
+ else
+ fun_l10_n617(x)
+ end
+end
+
+def fun_l9_n936(x)
+ if (x < 1)
+ fun_l10_n114(x)
+ else
+ fun_l10_n883(x)
+ end
+end
+
+def fun_l9_n937(x)
+ if (x < 1)
+ fun_l10_n859(x)
+ else
+ fun_l10_n788(x)
+ end
+end
+
+def fun_l9_n938(x)
+ if (x < 1)
+ fun_l10_n993(x)
+ else
+ fun_l10_n349(x)
+ end
+end
+
+def fun_l9_n939(x)
+ if (x < 1)
+ fun_l10_n775(x)
+ else
+ fun_l10_n195(x)
+ end
+end
+
+def fun_l9_n940(x)
+ if (x < 1)
+ fun_l10_n829(x)
+ else
+ fun_l10_n398(x)
+ end
+end
+
+def fun_l9_n941(x)
+ if (x < 1)
+ fun_l10_n906(x)
+ else
+ fun_l10_n529(x)
+ end
+end
+
+def fun_l9_n942(x)
+ if (x < 1)
+ fun_l10_n965(x)
+ else
+ fun_l10_n372(x)
+ end
+end
+
+def fun_l9_n943(x)
+ if (x < 1)
+ fun_l10_n953(x)
+ else
+ fun_l10_n397(x)
+ end
+end
+
+def fun_l9_n944(x)
+ if (x < 1)
+ fun_l10_n984(x)
+ else
+ fun_l10_n909(x)
+ end
+end
+
+def fun_l9_n945(x)
+ if (x < 1)
+ fun_l10_n649(x)
+ else
+ fun_l10_n304(x)
+ end
+end
+
+def fun_l9_n946(x)
+ if (x < 1)
+ fun_l10_n0(x)
+ else
+ fun_l10_n922(x)
+ end
+end
+
+def fun_l9_n947(x)
+ if (x < 1)
+ fun_l10_n460(x)
+ else
+ fun_l10_n215(x)
+ end
+end
+
+def fun_l9_n948(x)
+ if (x < 1)
+ fun_l10_n251(x)
+ else
+ fun_l10_n636(x)
+ end
+end
+
+def fun_l9_n949(x)
+ if (x < 1)
+ fun_l10_n14(x)
+ else
+ fun_l10_n494(x)
+ end
+end
+
+def fun_l9_n950(x)
+ if (x < 1)
+ fun_l10_n6(x)
+ else
+ fun_l10_n523(x)
+ end
+end
+
+def fun_l9_n951(x)
+ if (x < 1)
+ fun_l10_n705(x)
+ else
+ fun_l10_n689(x)
+ end
+end
+
+def fun_l9_n952(x)
+ if (x < 1)
+ fun_l10_n830(x)
+ else
+ fun_l10_n374(x)
+ end
+end
+
+def fun_l9_n953(x)
+ if (x < 1)
+ fun_l10_n451(x)
+ else
+ fun_l10_n87(x)
+ end
+end
+
+def fun_l9_n954(x)
+ if (x < 1)
+ fun_l10_n202(x)
+ else
+ fun_l10_n753(x)
+ end
+end
+
+def fun_l9_n955(x)
+ if (x < 1)
+ fun_l10_n384(x)
+ else
+ fun_l10_n216(x)
+ end
+end
+
+def fun_l9_n956(x)
+ if (x < 1)
+ fun_l10_n381(x)
+ else
+ fun_l10_n727(x)
+ end
+end
+
+def fun_l9_n957(x)
+ if (x < 1)
+ fun_l10_n876(x)
+ else
+ fun_l10_n760(x)
+ end
+end
+
+def fun_l9_n958(x)
+ if (x < 1)
+ fun_l10_n975(x)
+ else
+ fun_l10_n372(x)
+ end
+end
+
+def fun_l9_n959(x)
+ if (x < 1)
+ fun_l10_n714(x)
+ else
+ fun_l10_n680(x)
+ end
+end
+
+def fun_l9_n960(x)
+ if (x < 1)
+ fun_l10_n733(x)
+ else
+ fun_l10_n508(x)
+ end
+end
+
+def fun_l9_n961(x)
+ if (x < 1)
+ fun_l10_n319(x)
+ else
+ fun_l10_n90(x)
+ end
+end
+
+def fun_l9_n962(x)
+ if (x < 1)
+ fun_l10_n964(x)
+ else
+ fun_l10_n282(x)
+ end
+end
+
+def fun_l9_n963(x)
+ if (x < 1)
+ fun_l10_n179(x)
+ else
+ fun_l10_n324(x)
+ end
+end
+
+def fun_l9_n964(x)
+ if (x < 1)
+ fun_l10_n627(x)
+ else
+ fun_l10_n627(x)
+ end
+end
+
+def fun_l9_n965(x)
+ if (x < 1)
+ fun_l10_n7(x)
+ else
+ fun_l10_n375(x)
+ end
+end
+
+def fun_l9_n966(x)
+ if (x < 1)
+ fun_l10_n960(x)
+ else
+ fun_l10_n272(x)
+ end
+end
+
+def fun_l9_n967(x)
+ if (x < 1)
+ fun_l10_n869(x)
+ else
+ fun_l10_n737(x)
+ end
+end
+
+def fun_l9_n968(x)
+ if (x < 1)
+ fun_l10_n621(x)
+ else
+ fun_l10_n413(x)
+ end
+end
+
+def fun_l9_n969(x)
+ if (x < 1)
+ fun_l10_n258(x)
+ else
+ fun_l10_n105(x)
+ end
+end
+
+def fun_l9_n970(x)
+ if (x < 1)
+ fun_l10_n817(x)
+ else
+ fun_l10_n519(x)
+ end
+end
+
+def fun_l9_n971(x)
+ if (x < 1)
+ fun_l10_n462(x)
+ else
+ fun_l10_n335(x)
+ end
+end
+
+def fun_l9_n972(x)
+ if (x < 1)
+ fun_l10_n770(x)
+ else
+ fun_l10_n856(x)
+ end
+end
+
+def fun_l9_n973(x)
+ if (x < 1)
+ fun_l10_n607(x)
+ else
+ fun_l10_n142(x)
+ end
+end
+
+def fun_l9_n974(x)
+ if (x < 1)
+ fun_l10_n498(x)
+ else
+ fun_l10_n373(x)
+ end
+end
+
+def fun_l9_n975(x)
+ if (x < 1)
+ fun_l10_n239(x)
+ else
+ fun_l10_n895(x)
+ end
+end
+
+def fun_l9_n976(x)
+ if (x < 1)
+ fun_l10_n312(x)
+ else
+ fun_l10_n489(x)
+ end
+end
+
+def fun_l9_n977(x)
+ if (x < 1)
+ fun_l10_n595(x)
+ else
+ fun_l10_n674(x)
+ end
+end
+
+def fun_l9_n978(x)
+ if (x < 1)
+ fun_l10_n928(x)
+ else
+ fun_l10_n774(x)
+ end
+end
+
+def fun_l9_n979(x)
+ if (x < 1)
+ fun_l10_n964(x)
+ else
+ fun_l10_n860(x)
+ end
+end
+
+def fun_l9_n980(x)
+ if (x < 1)
+ fun_l10_n63(x)
+ else
+ fun_l10_n115(x)
+ end
+end
+
+def fun_l9_n981(x)
+ if (x < 1)
+ fun_l10_n930(x)
+ else
+ fun_l10_n559(x)
+ end
+end
+
+def fun_l9_n982(x)
+ if (x < 1)
+ fun_l10_n391(x)
+ else
+ fun_l10_n411(x)
+ end
+end
+
+def fun_l9_n983(x)
+ if (x < 1)
+ fun_l10_n71(x)
+ else
+ fun_l10_n632(x)
+ end
+end
+
+def fun_l9_n984(x)
+ if (x < 1)
+ fun_l10_n613(x)
+ else
+ fun_l10_n218(x)
+ end
+end
+
+def fun_l9_n985(x)
+ if (x < 1)
+ fun_l10_n263(x)
+ else
+ fun_l10_n858(x)
+ end
+end
+
+def fun_l9_n986(x)
+ if (x < 1)
+ fun_l10_n974(x)
+ else
+ fun_l10_n824(x)
+ end
+end
+
+def fun_l9_n987(x)
+ if (x < 1)
+ fun_l10_n636(x)
+ else
+ fun_l10_n55(x)
+ end
+end
+
+def fun_l9_n988(x)
+ if (x < 1)
+ fun_l10_n367(x)
+ else
+ fun_l10_n878(x)
+ end
+end
+
+def fun_l9_n989(x)
+ if (x < 1)
+ fun_l10_n316(x)
+ else
+ fun_l10_n800(x)
+ end
+end
+
+def fun_l9_n990(x)
+ if (x < 1)
+ fun_l10_n581(x)
+ else
+ fun_l10_n511(x)
+ end
+end
+
+def fun_l9_n991(x)
+ if (x < 1)
+ fun_l10_n667(x)
+ else
+ fun_l10_n566(x)
+ end
+end
+
+def fun_l9_n992(x)
+ if (x < 1)
+ fun_l10_n144(x)
+ else
+ fun_l10_n51(x)
+ end
+end
+
+def fun_l9_n993(x)
+ if (x < 1)
+ fun_l10_n652(x)
+ else
+ fun_l10_n436(x)
+ end
+end
+
+def fun_l9_n994(x)
+ if (x < 1)
+ fun_l10_n283(x)
+ else
+ fun_l10_n480(x)
+ end
+end
+
+def fun_l9_n995(x)
+ if (x < 1)
+ fun_l10_n908(x)
+ else
+ fun_l10_n86(x)
+ end
+end
+
+def fun_l9_n996(x)
+ if (x < 1)
+ fun_l10_n43(x)
+ else
+ fun_l10_n466(x)
+ end
+end
+
+def fun_l9_n997(x)
+ if (x < 1)
+ fun_l10_n968(x)
+ else
+ fun_l10_n97(x)
+ end
+end
+
+def fun_l9_n998(x)
+ if (x < 1)
+ fun_l10_n38(x)
+ else
+ fun_l10_n78(x)
+ end
+end
+
+def fun_l9_n999(x)
+ if (x < 1)
+ fun_l10_n947(x)
+ else
+ fun_l10_n322(x)
+ end
+end
+
+def fun_l10_n0(x)
+ if (x < 1)
+ fun_l11_n424(x)
+ else
+ fun_l11_n206(x)
+ end
+end
+
+def fun_l10_n1(x)
+ if (x < 1)
+ fun_l11_n263(x)
+ else
+ fun_l11_n886(x)
+ end
+end
+
+def fun_l10_n2(x)
+ if (x < 1)
+ fun_l11_n31(x)
+ else
+ fun_l11_n693(x)
+ end
+end
+
+def fun_l10_n3(x)
+ if (x < 1)
+ fun_l11_n924(x)
+ else
+ fun_l11_n585(x)
+ end
+end
+
+def fun_l10_n4(x)
+ if (x < 1)
+ fun_l11_n552(x)
+ else
+ fun_l11_n904(x)
+ end
+end
+
+def fun_l10_n5(x)
+ if (x < 1)
+ fun_l11_n912(x)
+ else
+ fun_l11_n127(x)
+ end
+end
+
+def fun_l10_n6(x)
+ if (x < 1)
+ fun_l11_n284(x)
+ else
+ fun_l11_n952(x)
+ end
+end
+
+def fun_l10_n7(x)
+ if (x < 1)
+ fun_l11_n294(x)
+ else
+ fun_l11_n527(x)
+ end
+end
+
+def fun_l10_n8(x)
+ if (x < 1)
+ fun_l11_n675(x)
+ else
+ fun_l11_n457(x)
+ end
+end
+
+def fun_l10_n9(x)
+ if (x < 1)
+ fun_l11_n908(x)
+ else
+ fun_l11_n963(x)
+ end
+end
+
+def fun_l10_n10(x)
+ if (x < 1)
+ fun_l11_n361(x)
+ else
+ fun_l11_n839(x)
+ end
+end
+
+def fun_l10_n11(x)
+ if (x < 1)
+ fun_l11_n402(x)
+ else
+ fun_l11_n228(x)
+ end
+end
+
+def fun_l10_n12(x)
+ if (x < 1)
+ fun_l11_n871(x)
+ else
+ fun_l11_n194(x)
+ end
+end
+
+def fun_l10_n13(x)
+ if (x < 1)
+ fun_l11_n255(x)
+ else
+ fun_l11_n816(x)
+ end
+end
+
+def fun_l10_n14(x)
+ if (x < 1)
+ fun_l11_n987(x)
+ else
+ fun_l11_n870(x)
+ end
+end
+
+def fun_l10_n15(x)
+ if (x < 1)
+ fun_l11_n538(x)
+ else
+ fun_l11_n837(x)
+ end
+end
+
+def fun_l10_n16(x)
+ if (x < 1)
+ fun_l11_n205(x)
+ else
+ fun_l11_n831(x)
+ end
+end
+
+def fun_l10_n17(x)
+ if (x < 1)
+ fun_l11_n737(x)
+ else
+ fun_l11_n242(x)
+ end
+end
+
+def fun_l10_n18(x)
+ if (x < 1)
+ fun_l11_n18(x)
+ else
+ fun_l11_n103(x)
+ end
+end
+
+def fun_l10_n19(x)
+ if (x < 1)
+ fun_l11_n928(x)
+ else
+ fun_l11_n300(x)
+ end
+end
+
+def fun_l10_n20(x)
+ if (x < 1)
+ fun_l11_n816(x)
+ else
+ fun_l11_n225(x)
+ end
+end
+
+def fun_l10_n21(x)
+ if (x < 1)
+ fun_l11_n52(x)
+ else
+ fun_l11_n901(x)
+ end
+end
+
+def fun_l10_n22(x)
+ if (x < 1)
+ fun_l11_n316(x)
+ else
+ fun_l11_n556(x)
+ end
+end
+
+def fun_l10_n23(x)
+ if (x < 1)
+ fun_l11_n103(x)
+ else
+ fun_l11_n223(x)
+ end
+end
+
+def fun_l10_n24(x)
+ if (x < 1)
+ fun_l11_n14(x)
+ else
+ fun_l11_n875(x)
+ end
+end
+
+def fun_l10_n25(x)
+ if (x < 1)
+ fun_l11_n359(x)
+ else
+ fun_l11_n504(x)
+ end
+end
+
+def fun_l10_n26(x)
+ if (x < 1)
+ fun_l11_n416(x)
+ else
+ fun_l11_n172(x)
+ end
+end
+
+def fun_l10_n27(x)
+ if (x < 1)
+ fun_l11_n142(x)
+ else
+ fun_l11_n238(x)
+ end
+end
+
+def fun_l10_n28(x)
+ if (x < 1)
+ fun_l11_n319(x)
+ else
+ fun_l11_n350(x)
+ end
+end
+
+def fun_l10_n29(x)
+ if (x < 1)
+ fun_l11_n53(x)
+ else
+ fun_l11_n162(x)
+ end
+end
+
+def fun_l10_n30(x)
+ if (x < 1)
+ fun_l11_n121(x)
+ else
+ fun_l11_n39(x)
+ end
+end
+
+def fun_l10_n31(x)
+ if (x < 1)
+ fun_l11_n447(x)
+ else
+ fun_l11_n259(x)
+ end
+end
+
+def fun_l10_n32(x)
+ if (x < 1)
+ fun_l11_n931(x)
+ else
+ fun_l11_n571(x)
+ end
+end
+
+def fun_l10_n33(x)
+ if (x < 1)
+ fun_l11_n705(x)
+ else
+ fun_l11_n544(x)
+ end
+end
+
+def fun_l10_n34(x)
+ if (x < 1)
+ fun_l11_n990(x)
+ else
+ fun_l11_n695(x)
+ end
+end
+
+def fun_l10_n35(x)
+ if (x < 1)
+ fun_l11_n197(x)
+ else
+ fun_l11_n391(x)
+ end
+end
+
+def fun_l10_n36(x)
+ if (x < 1)
+ fun_l11_n686(x)
+ else
+ fun_l11_n785(x)
+ end
+end
+
+def fun_l10_n37(x)
+ if (x < 1)
+ fun_l11_n466(x)
+ else
+ fun_l11_n783(x)
+ end
+end
+
+def fun_l10_n38(x)
+ if (x < 1)
+ fun_l11_n351(x)
+ else
+ fun_l11_n732(x)
+ end
+end
+
+def fun_l10_n39(x)
+ if (x < 1)
+ fun_l11_n22(x)
+ else
+ fun_l11_n995(x)
+ end
+end
+
+def fun_l10_n40(x)
+ if (x < 1)
+ fun_l11_n264(x)
+ else
+ fun_l11_n662(x)
+ end
+end
+
+def fun_l10_n41(x)
+ if (x < 1)
+ fun_l11_n91(x)
+ else
+ fun_l11_n673(x)
+ end
+end
+
+def fun_l10_n42(x)
+ if (x < 1)
+ fun_l11_n993(x)
+ else
+ fun_l11_n839(x)
+ end
+end
+
+def fun_l10_n43(x)
+ if (x < 1)
+ fun_l11_n158(x)
+ else
+ fun_l11_n774(x)
+ end
+end
+
+def fun_l10_n44(x)
+ if (x < 1)
+ fun_l11_n977(x)
+ else
+ fun_l11_n321(x)
+ end
+end
+
+def fun_l10_n45(x)
+ if (x < 1)
+ fun_l11_n121(x)
+ else
+ fun_l11_n184(x)
+ end
+end
+
+def fun_l10_n46(x)
+ if (x < 1)
+ fun_l11_n446(x)
+ else
+ fun_l11_n426(x)
+ end
+end
+
+def fun_l10_n47(x)
+ if (x < 1)
+ fun_l11_n524(x)
+ else
+ fun_l11_n123(x)
+ end
+end
+
+def fun_l10_n48(x)
+ if (x < 1)
+ fun_l11_n435(x)
+ else
+ fun_l11_n560(x)
+ end
+end
+
+def fun_l10_n49(x)
+ if (x < 1)
+ fun_l11_n94(x)
+ else
+ fun_l11_n731(x)
+ end
+end
+
+def fun_l10_n50(x)
+ if (x < 1)
+ fun_l11_n177(x)
+ else
+ fun_l11_n902(x)
+ end
+end
+
+def fun_l10_n51(x)
+ if (x < 1)
+ fun_l11_n648(x)
+ else
+ fun_l11_n807(x)
+ end
+end
+
+def fun_l10_n52(x)
+ if (x < 1)
+ fun_l11_n414(x)
+ else
+ fun_l11_n335(x)
+ end
+end
+
+def fun_l10_n53(x)
+ if (x < 1)
+ fun_l11_n754(x)
+ else
+ fun_l11_n789(x)
+ end
+end
+
+def fun_l10_n54(x)
+ if (x < 1)
+ fun_l11_n843(x)
+ else
+ fun_l11_n980(x)
+ end
+end
+
+def fun_l10_n55(x)
+ if (x < 1)
+ fun_l11_n706(x)
+ else
+ fun_l11_n160(x)
+ end
+end
+
+def fun_l10_n56(x)
+ if (x < 1)
+ fun_l11_n564(x)
+ else
+ fun_l11_n672(x)
+ end
+end
+
+def fun_l10_n57(x)
+ if (x < 1)
+ fun_l11_n158(x)
+ else
+ fun_l11_n154(x)
+ end
+end
+
+def fun_l10_n58(x)
+ if (x < 1)
+ fun_l11_n120(x)
+ else
+ fun_l11_n970(x)
+ end
+end
+
+def fun_l10_n59(x)
+ if (x < 1)
+ fun_l11_n561(x)
+ else
+ fun_l11_n872(x)
+ end
+end
+
+def fun_l10_n60(x)
+ if (x < 1)
+ fun_l11_n93(x)
+ else
+ fun_l11_n723(x)
+ end
+end
+
+def fun_l10_n61(x)
+ if (x < 1)
+ fun_l11_n921(x)
+ else
+ fun_l11_n578(x)
+ end
+end
+
+def fun_l10_n62(x)
+ if (x < 1)
+ fun_l11_n527(x)
+ else
+ fun_l11_n118(x)
+ end
+end
+
+def fun_l10_n63(x)
+ if (x < 1)
+ fun_l11_n225(x)
+ else
+ fun_l11_n937(x)
+ end
+end
+
+def fun_l10_n64(x)
+ if (x < 1)
+ fun_l11_n634(x)
+ else
+ fun_l11_n415(x)
+ end
+end
+
+def fun_l10_n65(x)
+ if (x < 1)
+ fun_l11_n718(x)
+ else
+ fun_l11_n958(x)
+ end
+end
+
+def fun_l10_n66(x)
+ if (x < 1)
+ fun_l11_n972(x)
+ else
+ fun_l11_n453(x)
+ end
+end
+
+def fun_l10_n67(x)
+ if (x < 1)
+ fun_l11_n85(x)
+ else
+ fun_l11_n934(x)
+ end
+end
+
+def fun_l10_n68(x)
+ if (x < 1)
+ fun_l11_n386(x)
+ else
+ fun_l11_n138(x)
+ end
+end
+
+def fun_l10_n69(x)
+ if (x < 1)
+ fun_l11_n312(x)
+ else
+ fun_l11_n62(x)
+ end
+end
+
+def fun_l10_n70(x)
+ if (x < 1)
+ fun_l11_n591(x)
+ else
+ fun_l11_n906(x)
+ end
+end
+
+def fun_l10_n71(x)
+ if (x < 1)
+ fun_l11_n571(x)
+ else
+ fun_l11_n429(x)
+ end
+end
+
+def fun_l10_n72(x)
+ if (x < 1)
+ fun_l11_n996(x)
+ else
+ fun_l11_n932(x)
+ end
+end
+
+def fun_l10_n73(x)
+ if (x < 1)
+ fun_l11_n691(x)
+ else
+ fun_l11_n116(x)
+ end
+end
+
+def fun_l10_n74(x)
+ if (x < 1)
+ fun_l11_n670(x)
+ else
+ fun_l11_n865(x)
+ end
+end
+
+def fun_l10_n75(x)
+ if (x < 1)
+ fun_l11_n568(x)
+ else
+ fun_l11_n164(x)
+ end
+end
+
+def fun_l10_n76(x)
+ if (x < 1)
+ fun_l11_n747(x)
+ else
+ fun_l11_n235(x)
+ end
+end
+
+def fun_l10_n77(x)
+ if (x < 1)
+ fun_l11_n402(x)
+ else
+ fun_l11_n670(x)
+ end
+end
+
+def fun_l10_n78(x)
+ if (x < 1)
+ fun_l11_n525(x)
+ else
+ fun_l11_n675(x)
+ end
+end
+
+def fun_l10_n79(x)
+ if (x < 1)
+ fun_l11_n6(x)
+ else
+ fun_l11_n238(x)
+ end
+end
+
+def fun_l10_n80(x)
+ if (x < 1)
+ fun_l11_n905(x)
+ else
+ fun_l11_n170(x)
+ end
+end
+
+def fun_l10_n81(x)
+ if (x < 1)
+ fun_l11_n610(x)
+ else
+ fun_l11_n527(x)
+ end
+end
+
+def fun_l10_n82(x)
+ if (x < 1)
+ fun_l11_n796(x)
+ else
+ fun_l11_n749(x)
+ end
+end
+
+def fun_l10_n83(x)
+ if (x < 1)
+ fun_l11_n634(x)
+ else
+ fun_l11_n49(x)
+ end
+end
+
+def fun_l10_n84(x)
+ if (x < 1)
+ fun_l11_n376(x)
+ else
+ fun_l11_n852(x)
+ end
+end
+
+def fun_l10_n85(x)
+ if (x < 1)
+ fun_l11_n593(x)
+ else
+ fun_l11_n752(x)
+ end
+end
+
+def fun_l10_n86(x)
+ if (x < 1)
+ fun_l11_n821(x)
+ else
+ fun_l11_n892(x)
+ end
+end
+
+def fun_l10_n87(x)
+ if (x < 1)
+ fun_l11_n950(x)
+ else
+ fun_l11_n309(x)
+ end
+end
+
+def fun_l10_n88(x)
+ if (x < 1)
+ fun_l11_n360(x)
+ else
+ fun_l11_n134(x)
+ end
+end
+
+def fun_l10_n89(x)
+ if (x < 1)
+ fun_l11_n935(x)
+ else
+ fun_l11_n241(x)
+ end
+end
+
+def fun_l10_n90(x)
+ if (x < 1)
+ fun_l11_n524(x)
+ else
+ fun_l11_n120(x)
+ end
+end
+
+def fun_l10_n91(x)
+ if (x < 1)
+ fun_l11_n25(x)
+ else
+ fun_l11_n357(x)
+ end
+end
+
+def fun_l10_n92(x)
+ if (x < 1)
+ fun_l11_n248(x)
+ else
+ fun_l11_n212(x)
+ end
+end
+
+def fun_l10_n93(x)
+ if (x < 1)
+ fun_l11_n860(x)
+ else
+ fun_l11_n733(x)
+ end
+end
+
+def fun_l10_n94(x)
+ if (x < 1)
+ fun_l11_n147(x)
+ else
+ fun_l11_n907(x)
+ end
+end
+
+def fun_l10_n95(x)
+ if (x < 1)
+ fun_l11_n758(x)
+ else
+ fun_l11_n62(x)
+ end
+end
+
+def fun_l10_n96(x)
+ if (x < 1)
+ fun_l11_n205(x)
+ else
+ fun_l11_n54(x)
+ end
+end
+
+def fun_l10_n97(x)
+ if (x < 1)
+ fun_l11_n8(x)
+ else
+ fun_l11_n813(x)
+ end
+end
+
+def fun_l10_n98(x)
+ if (x < 1)
+ fun_l11_n296(x)
+ else
+ fun_l11_n693(x)
+ end
+end
+
+def fun_l10_n99(x)
+ if (x < 1)
+ fun_l11_n181(x)
+ else
+ fun_l11_n709(x)
+ end
+end
+
+def fun_l10_n100(x)
+ if (x < 1)
+ fun_l11_n109(x)
+ else
+ fun_l11_n560(x)
+ end
+end
+
+def fun_l10_n101(x)
+ if (x < 1)
+ fun_l11_n290(x)
+ else
+ fun_l11_n693(x)
+ end
+end
+
+def fun_l10_n102(x)
+ if (x < 1)
+ fun_l11_n826(x)
+ else
+ fun_l11_n813(x)
+ end
+end
+
+def fun_l10_n103(x)
+ if (x < 1)
+ fun_l11_n3(x)
+ else
+ fun_l11_n787(x)
+ end
+end
+
+def fun_l10_n104(x)
+ if (x < 1)
+ fun_l11_n477(x)
+ else
+ fun_l11_n336(x)
+ end
+end
+
+def fun_l10_n105(x)
+ if (x < 1)
+ fun_l11_n131(x)
+ else
+ fun_l11_n500(x)
+ end
+end
+
+def fun_l10_n106(x)
+ if (x < 1)
+ fun_l11_n278(x)
+ else
+ fun_l11_n948(x)
+ end
+end
+
+def fun_l10_n107(x)
+ if (x < 1)
+ fun_l11_n950(x)
+ else
+ fun_l11_n848(x)
+ end
+end
+
+def fun_l10_n108(x)
+ if (x < 1)
+ fun_l11_n490(x)
+ else
+ fun_l11_n1(x)
+ end
+end
+
+def fun_l10_n109(x)
+ if (x < 1)
+ fun_l11_n297(x)
+ else
+ fun_l11_n902(x)
+ end
+end
+
+def fun_l10_n110(x)
+ if (x < 1)
+ fun_l11_n808(x)
+ else
+ fun_l11_n697(x)
+ end
+end
+
+def fun_l10_n111(x)
+ if (x < 1)
+ fun_l11_n330(x)
+ else
+ fun_l11_n424(x)
+ end
+end
+
+def fun_l10_n112(x)
+ if (x < 1)
+ fun_l11_n254(x)
+ else
+ fun_l11_n662(x)
+ end
+end
+
+def fun_l10_n113(x)
+ if (x < 1)
+ fun_l11_n811(x)
+ else
+ fun_l11_n196(x)
+ end
+end
+
+def fun_l10_n114(x)
+ if (x < 1)
+ fun_l11_n868(x)
+ else
+ fun_l11_n812(x)
+ end
+end
+
+def fun_l10_n115(x)
+ if (x < 1)
+ fun_l11_n308(x)
+ else
+ fun_l11_n184(x)
+ end
+end
+
+def fun_l10_n116(x)
+ if (x < 1)
+ fun_l11_n64(x)
+ else
+ fun_l11_n906(x)
+ end
+end
+
+def fun_l10_n117(x)
+ if (x < 1)
+ fun_l11_n339(x)
+ else
+ fun_l11_n338(x)
+ end
+end
+
+def fun_l10_n118(x)
+ if (x < 1)
+ fun_l11_n993(x)
+ else
+ fun_l11_n915(x)
+ end
+end
+
+def fun_l10_n119(x)
+ if (x < 1)
+ fun_l11_n729(x)
+ else
+ fun_l11_n472(x)
+ end
+end
+
+def fun_l10_n120(x)
+ if (x < 1)
+ fun_l11_n646(x)
+ else
+ fun_l11_n83(x)
+ end
+end
+
+def fun_l10_n121(x)
+ if (x < 1)
+ fun_l11_n294(x)
+ else
+ fun_l11_n208(x)
+ end
+end
+
+def fun_l10_n122(x)
+ if (x < 1)
+ fun_l11_n309(x)
+ else
+ fun_l11_n52(x)
+ end
+end
+
+def fun_l10_n123(x)
+ if (x < 1)
+ fun_l11_n5(x)
+ else
+ fun_l11_n124(x)
+ end
+end
+
+def fun_l10_n124(x)
+ if (x < 1)
+ fun_l11_n288(x)
+ else
+ fun_l11_n731(x)
+ end
+end
+
+def fun_l10_n125(x)
+ if (x < 1)
+ fun_l11_n835(x)
+ else
+ fun_l11_n381(x)
+ end
+end
+
+def fun_l10_n126(x)
+ if (x < 1)
+ fun_l11_n269(x)
+ else
+ fun_l11_n104(x)
+ end
+end
+
+def fun_l10_n127(x)
+ if (x < 1)
+ fun_l11_n377(x)
+ else
+ fun_l11_n650(x)
+ end
+end
+
+def fun_l10_n128(x)
+ if (x < 1)
+ fun_l11_n749(x)
+ else
+ fun_l11_n223(x)
+ end
+end
+
+def fun_l10_n129(x)
+ if (x < 1)
+ fun_l11_n493(x)
+ else
+ fun_l11_n834(x)
+ end
+end
+
+def fun_l10_n130(x)
+ if (x < 1)
+ fun_l11_n826(x)
+ else
+ fun_l11_n857(x)
+ end
+end
+
+def fun_l10_n131(x)
+ if (x < 1)
+ fun_l11_n19(x)
+ else
+ fun_l11_n932(x)
+ end
+end
+
+def fun_l10_n132(x)
+ if (x < 1)
+ fun_l11_n585(x)
+ else
+ fun_l11_n13(x)
+ end
+end
+
+def fun_l10_n133(x)
+ if (x < 1)
+ fun_l11_n91(x)
+ else
+ fun_l11_n748(x)
+ end
+end
+
+def fun_l10_n134(x)
+ if (x < 1)
+ fun_l11_n218(x)
+ else
+ fun_l11_n343(x)
+ end
+end
+
+def fun_l10_n135(x)
+ if (x < 1)
+ fun_l11_n386(x)
+ else
+ fun_l11_n655(x)
+ end
+end
+
+def fun_l10_n136(x)
+ if (x < 1)
+ fun_l11_n43(x)
+ else
+ fun_l11_n964(x)
+ end
+end
+
+def fun_l10_n137(x)
+ if (x < 1)
+ fun_l11_n50(x)
+ else
+ fun_l11_n836(x)
+ end
+end
+
+def fun_l10_n138(x)
+ if (x < 1)
+ fun_l11_n915(x)
+ else
+ fun_l11_n262(x)
+ end
+end
+
+def fun_l10_n139(x)
+ if (x < 1)
+ fun_l11_n187(x)
+ else
+ fun_l11_n321(x)
+ end
+end
+
+def fun_l10_n140(x)
+ if (x < 1)
+ fun_l11_n473(x)
+ else
+ fun_l11_n771(x)
+ end
+end
+
+def fun_l10_n141(x)
+ if (x < 1)
+ fun_l11_n125(x)
+ else
+ fun_l11_n118(x)
+ end
+end
+
+def fun_l10_n142(x)
+ if (x < 1)
+ fun_l11_n993(x)
+ else
+ fun_l11_n178(x)
+ end
+end
+
+def fun_l10_n143(x)
+ if (x < 1)
+ fun_l11_n223(x)
+ else
+ fun_l11_n509(x)
+ end
+end
+
+def fun_l10_n144(x)
+ if (x < 1)
+ fun_l11_n62(x)
+ else
+ fun_l11_n455(x)
+ end
+end
+
+def fun_l10_n145(x)
+ if (x < 1)
+ fun_l11_n155(x)
+ else
+ fun_l11_n486(x)
+ end
+end
+
+def fun_l10_n146(x)
+ if (x < 1)
+ fun_l11_n414(x)
+ else
+ fun_l11_n384(x)
+ end
+end
+
+def fun_l10_n147(x)
+ if (x < 1)
+ fun_l11_n840(x)
+ else
+ fun_l11_n287(x)
+ end
+end
+
+def fun_l10_n148(x)
+ if (x < 1)
+ fun_l11_n605(x)
+ else
+ fun_l11_n283(x)
+ end
+end
+
+def fun_l10_n149(x)
+ if (x < 1)
+ fun_l11_n101(x)
+ else
+ fun_l11_n710(x)
+ end
+end
+
+def fun_l10_n150(x)
+ if (x < 1)
+ fun_l11_n51(x)
+ else
+ fun_l11_n304(x)
+ end
+end
+
+def fun_l10_n151(x)
+ if (x < 1)
+ fun_l11_n501(x)
+ else
+ fun_l11_n789(x)
+ end
+end
+
+def fun_l10_n152(x)
+ if (x < 1)
+ fun_l11_n265(x)
+ else
+ fun_l11_n304(x)
+ end
+end
+
+def fun_l10_n153(x)
+ if (x < 1)
+ fun_l11_n500(x)
+ else
+ fun_l11_n32(x)
+ end
+end
+
+def fun_l10_n154(x)
+ if (x < 1)
+ fun_l11_n464(x)
+ else
+ fun_l11_n212(x)
+ end
+end
+
+def fun_l10_n155(x)
+ if (x < 1)
+ fun_l11_n729(x)
+ else
+ fun_l11_n805(x)
+ end
+end
+
+def fun_l10_n156(x)
+ if (x < 1)
+ fun_l11_n225(x)
+ else
+ fun_l11_n241(x)
+ end
+end
+
+def fun_l10_n157(x)
+ if (x < 1)
+ fun_l11_n600(x)
+ else
+ fun_l11_n424(x)
+ end
+end
+
+def fun_l10_n158(x)
+ if (x < 1)
+ fun_l11_n684(x)
+ else
+ fun_l11_n898(x)
+ end
+end
+
+def fun_l10_n159(x)
+ if (x < 1)
+ fun_l11_n354(x)
+ else
+ fun_l11_n909(x)
+ end
+end
+
+def fun_l10_n160(x)
+ if (x < 1)
+ fun_l11_n879(x)
+ else
+ fun_l11_n917(x)
+ end
+end
+
+def fun_l10_n161(x)
+ if (x < 1)
+ fun_l11_n547(x)
+ else
+ fun_l11_n333(x)
+ end
+end
+
+def fun_l10_n162(x)
+ if (x < 1)
+ fun_l11_n769(x)
+ else
+ fun_l11_n846(x)
+ end
+end
+
+def fun_l10_n163(x)
+ if (x < 1)
+ fun_l11_n292(x)
+ else
+ fun_l11_n526(x)
+ end
+end
+
+def fun_l10_n164(x)
+ if (x < 1)
+ fun_l11_n355(x)
+ else
+ fun_l11_n783(x)
+ end
+end
+
+def fun_l10_n165(x)
+ if (x < 1)
+ fun_l11_n413(x)
+ else
+ fun_l11_n570(x)
+ end
+end
+
+def fun_l10_n166(x)
+ if (x < 1)
+ fun_l11_n535(x)
+ else
+ fun_l11_n953(x)
+ end
+end
+
+def fun_l10_n167(x)
+ if (x < 1)
+ fun_l11_n661(x)
+ else
+ fun_l11_n178(x)
+ end
+end
+
+def fun_l10_n168(x)
+ if (x < 1)
+ fun_l11_n749(x)
+ else
+ fun_l11_n853(x)
+ end
+end
+
+def fun_l10_n169(x)
+ if (x < 1)
+ fun_l11_n208(x)
+ else
+ fun_l11_n378(x)
+ end
+end
+
+def fun_l10_n170(x)
+ if (x < 1)
+ fun_l11_n385(x)
+ else
+ fun_l11_n811(x)
+ end
+end
+
+def fun_l10_n171(x)
+ if (x < 1)
+ fun_l11_n960(x)
+ else
+ fun_l11_n631(x)
+ end
+end
+
+def fun_l10_n172(x)
+ if (x < 1)
+ fun_l11_n104(x)
+ else
+ fun_l11_n667(x)
+ end
+end
+
+def fun_l10_n173(x)
+ if (x < 1)
+ fun_l11_n532(x)
+ else
+ fun_l11_n169(x)
+ end
+end
+
+def fun_l10_n174(x)
+ if (x < 1)
+ fun_l11_n794(x)
+ else
+ fun_l11_n761(x)
+ end
+end
+
+def fun_l10_n175(x)
+ if (x < 1)
+ fun_l11_n306(x)
+ else
+ fun_l11_n369(x)
+ end
+end
+
+def fun_l10_n176(x)
+ if (x < 1)
+ fun_l11_n693(x)
+ else
+ fun_l11_n254(x)
+ end
+end
+
+def fun_l10_n177(x)
+ if (x < 1)
+ fun_l11_n927(x)
+ else
+ fun_l11_n194(x)
+ end
+end
+
+def fun_l10_n178(x)
+ if (x < 1)
+ fun_l11_n197(x)
+ else
+ fun_l11_n136(x)
+ end
+end
+
+def fun_l10_n179(x)
+ if (x < 1)
+ fun_l11_n482(x)
+ else
+ fun_l11_n182(x)
+ end
+end
+
+def fun_l10_n180(x)
+ if (x < 1)
+ fun_l11_n20(x)
+ else
+ fun_l11_n978(x)
+ end
+end
+
+def fun_l10_n181(x)
+ if (x < 1)
+ fun_l11_n101(x)
+ else
+ fun_l11_n92(x)
+ end
+end
+
+def fun_l10_n182(x)
+ if (x < 1)
+ fun_l11_n179(x)
+ else
+ fun_l11_n634(x)
+ end
+end
+
+def fun_l10_n183(x)
+ if (x < 1)
+ fun_l11_n221(x)
+ else
+ fun_l11_n275(x)
+ end
+end
+
+def fun_l10_n184(x)
+ if (x < 1)
+ fun_l11_n826(x)
+ else
+ fun_l11_n630(x)
+ end
+end
+
+def fun_l10_n185(x)
+ if (x < 1)
+ fun_l11_n706(x)
+ else
+ fun_l11_n30(x)
+ end
+end
+
+def fun_l10_n186(x)
+ if (x < 1)
+ fun_l11_n491(x)
+ else
+ fun_l11_n182(x)
+ end
+end
+
+def fun_l10_n187(x)
+ if (x < 1)
+ fun_l11_n404(x)
+ else
+ fun_l11_n812(x)
+ end
+end
+
+def fun_l10_n188(x)
+ if (x < 1)
+ fun_l11_n41(x)
+ else
+ fun_l11_n39(x)
+ end
+end
+
+def fun_l10_n189(x)
+ if (x < 1)
+ fun_l11_n620(x)
+ else
+ fun_l11_n659(x)
+ end
+end
+
+def fun_l10_n190(x)
+ if (x < 1)
+ fun_l11_n717(x)
+ else
+ fun_l11_n846(x)
+ end
+end
+
+def fun_l10_n191(x)
+ if (x < 1)
+ fun_l11_n328(x)
+ else
+ fun_l11_n223(x)
+ end
+end
+
+def fun_l10_n192(x)
+ if (x < 1)
+ fun_l11_n280(x)
+ else
+ fun_l11_n360(x)
+ end
+end
+
+def fun_l10_n193(x)
+ if (x < 1)
+ fun_l11_n798(x)
+ else
+ fun_l11_n303(x)
+ end
+end
+
+def fun_l10_n194(x)
+ if (x < 1)
+ fun_l11_n617(x)
+ else
+ fun_l11_n207(x)
+ end
+end
+
+def fun_l10_n195(x)
+ if (x < 1)
+ fun_l11_n918(x)
+ else
+ fun_l11_n398(x)
+ end
+end
+
+def fun_l10_n196(x)
+ if (x < 1)
+ fun_l11_n998(x)
+ else
+ fun_l11_n31(x)
+ end
+end
+
+def fun_l10_n197(x)
+ if (x < 1)
+ fun_l11_n740(x)
+ else
+ fun_l11_n220(x)
+ end
+end
+
+def fun_l10_n198(x)
+ if (x < 1)
+ fun_l11_n847(x)
+ else
+ fun_l11_n915(x)
+ end
+end
+
+def fun_l10_n199(x)
+ if (x < 1)
+ fun_l11_n946(x)
+ else
+ fun_l11_n300(x)
+ end
+end
+
+def fun_l10_n200(x)
+ if (x < 1)
+ fun_l11_n494(x)
+ else
+ fun_l11_n316(x)
+ end
+end
+
+def fun_l10_n201(x)
+ if (x < 1)
+ fun_l11_n871(x)
+ else
+ fun_l11_n310(x)
+ end
+end
+
+def fun_l10_n202(x)
+ if (x < 1)
+ fun_l11_n660(x)
+ else
+ fun_l11_n538(x)
+ end
+end
+
+def fun_l10_n203(x)
+ if (x < 1)
+ fun_l11_n478(x)
+ else
+ fun_l11_n618(x)
+ end
+end
+
+def fun_l10_n204(x)
+ if (x < 1)
+ fun_l11_n134(x)
+ else
+ fun_l11_n224(x)
+ end
+end
+
+def fun_l10_n205(x)
+ if (x < 1)
+ fun_l11_n128(x)
+ else
+ fun_l11_n279(x)
+ end
+end
+
+def fun_l10_n206(x)
+ if (x < 1)
+ fun_l11_n718(x)
+ else
+ fun_l11_n514(x)
+ end
+end
+
+def fun_l10_n207(x)
+ if (x < 1)
+ fun_l11_n466(x)
+ else
+ fun_l11_n979(x)
+ end
+end
+
+def fun_l10_n208(x)
+ if (x < 1)
+ fun_l11_n990(x)
+ else
+ fun_l11_n881(x)
+ end
+end
+
+def fun_l10_n209(x)
+ if (x < 1)
+ fun_l11_n860(x)
+ else
+ fun_l11_n995(x)
+ end
+end
+
+def fun_l10_n210(x)
+ if (x < 1)
+ fun_l11_n269(x)
+ else
+ fun_l11_n846(x)
+ end
+end
+
+def fun_l10_n211(x)
+ if (x < 1)
+ fun_l11_n155(x)
+ else
+ fun_l11_n97(x)
+ end
+end
+
+def fun_l10_n212(x)
+ if (x < 1)
+ fun_l11_n377(x)
+ else
+ fun_l11_n749(x)
+ end
+end
+
+def fun_l10_n213(x)
+ if (x < 1)
+ fun_l11_n647(x)
+ else
+ fun_l11_n276(x)
+ end
+end
+
+def fun_l10_n214(x)
+ if (x < 1)
+ fun_l11_n290(x)
+ else
+ fun_l11_n325(x)
+ end
+end
+
+def fun_l10_n215(x)
+ if (x < 1)
+ fun_l11_n445(x)
+ else
+ fun_l11_n538(x)
+ end
+end
+
+def fun_l10_n216(x)
+ if (x < 1)
+ fun_l11_n433(x)
+ else
+ fun_l11_n937(x)
+ end
+end
+
+def fun_l10_n217(x)
+ if (x < 1)
+ fun_l11_n288(x)
+ else
+ fun_l11_n897(x)
+ end
+end
+
+def fun_l10_n218(x)
+ if (x < 1)
+ fun_l11_n386(x)
+ else
+ fun_l11_n545(x)
+ end
+end
+
+def fun_l10_n219(x)
+ if (x < 1)
+ fun_l11_n678(x)
+ else
+ fun_l11_n433(x)
+ end
+end
+
+def fun_l10_n220(x)
+ if (x < 1)
+ fun_l11_n203(x)
+ else
+ fun_l11_n163(x)
+ end
+end
+
+def fun_l10_n221(x)
+ if (x < 1)
+ fun_l11_n134(x)
+ else
+ fun_l11_n545(x)
+ end
+end
+
+def fun_l10_n222(x)
+ if (x < 1)
+ fun_l11_n556(x)
+ else
+ fun_l11_n537(x)
+ end
+end
+
+def fun_l10_n223(x)
+ if (x < 1)
+ fun_l11_n743(x)
+ else
+ fun_l11_n88(x)
+ end
+end
+
+def fun_l10_n224(x)
+ if (x < 1)
+ fun_l11_n678(x)
+ else
+ fun_l11_n643(x)
+ end
+end
+
+def fun_l10_n225(x)
+ if (x < 1)
+ fun_l11_n13(x)
+ else
+ fun_l11_n219(x)
+ end
+end
+
+def fun_l10_n226(x)
+ if (x < 1)
+ fun_l11_n454(x)
+ else
+ fun_l11_n691(x)
+ end
+end
+
+def fun_l10_n227(x)
+ if (x < 1)
+ fun_l11_n117(x)
+ else
+ fun_l11_n433(x)
+ end
+end
+
+def fun_l10_n228(x)
+ if (x < 1)
+ fun_l11_n559(x)
+ else
+ fun_l11_n884(x)
+ end
+end
+
+def fun_l10_n229(x)
+ if (x < 1)
+ fun_l11_n58(x)
+ else
+ fun_l11_n620(x)
+ end
+end
+
+def fun_l10_n230(x)
+ if (x < 1)
+ fun_l11_n688(x)
+ else
+ fun_l11_n76(x)
+ end
+end
+
+def fun_l10_n231(x)
+ if (x < 1)
+ fun_l11_n726(x)
+ else
+ fun_l11_n644(x)
+ end
+end
+
+def fun_l10_n232(x)
+ if (x < 1)
+ fun_l11_n918(x)
+ else
+ fun_l11_n0(x)
+ end
+end
+
+def fun_l10_n233(x)
+ if (x < 1)
+ fun_l11_n761(x)
+ else
+ fun_l11_n487(x)
+ end
+end
+
+def fun_l10_n234(x)
+ if (x < 1)
+ fun_l11_n657(x)
+ else
+ fun_l11_n107(x)
+ end
+end
+
+def fun_l10_n235(x)
+ if (x < 1)
+ fun_l11_n897(x)
+ else
+ fun_l11_n881(x)
+ end
+end
+
+def fun_l10_n236(x)
+ if (x < 1)
+ fun_l11_n851(x)
+ else
+ fun_l11_n593(x)
+ end
+end
+
+def fun_l10_n237(x)
+ if (x < 1)
+ fun_l11_n319(x)
+ else
+ fun_l11_n983(x)
+ end
+end
+
+def fun_l10_n238(x)
+ if (x < 1)
+ fun_l11_n648(x)
+ else
+ fun_l11_n870(x)
+ end
+end
+
+def fun_l10_n239(x)
+ if (x < 1)
+ fun_l11_n195(x)
+ else
+ fun_l11_n378(x)
+ end
+end
+
+def fun_l10_n240(x)
+ if (x < 1)
+ fun_l11_n398(x)
+ else
+ fun_l11_n284(x)
+ end
+end
+
+def fun_l10_n241(x)
+ if (x < 1)
+ fun_l11_n118(x)
+ else
+ fun_l11_n417(x)
+ end
+end
+
+def fun_l10_n242(x)
+ if (x < 1)
+ fun_l11_n220(x)
+ else
+ fun_l11_n445(x)
+ end
+end
+
+def fun_l10_n243(x)
+ if (x < 1)
+ fun_l11_n783(x)
+ else
+ fun_l11_n989(x)
+ end
+end
+
+def fun_l10_n244(x)
+ if (x < 1)
+ fun_l11_n229(x)
+ else
+ fun_l11_n439(x)
+ end
+end
+
+def fun_l10_n245(x)
+ if (x < 1)
+ fun_l11_n78(x)
+ else
+ fun_l11_n272(x)
+ end
+end
+
+def fun_l10_n246(x)
+ if (x < 1)
+ fun_l11_n481(x)
+ else
+ fun_l11_n499(x)
+ end
+end
+
+def fun_l10_n247(x)
+ if (x < 1)
+ fun_l11_n213(x)
+ else
+ fun_l11_n471(x)
+ end
+end
+
+def fun_l10_n248(x)
+ if (x < 1)
+ fun_l11_n45(x)
+ else
+ fun_l11_n919(x)
+ end
+end
+
+def fun_l10_n249(x)
+ if (x < 1)
+ fun_l11_n13(x)
+ else
+ fun_l11_n526(x)
+ end
+end
+
+def fun_l10_n250(x)
+ if (x < 1)
+ fun_l11_n997(x)
+ else
+ fun_l11_n112(x)
+ end
+end
+
+def fun_l10_n251(x)
+ if (x < 1)
+ fun_l11_n662(x)
+ else
+ fun_l11_n950(x)
+ end
+end
+
+def fun_l10_n252(x)
+ if (x < 1)
+ fun_l11_n272(x)
+ else
+ fun_l11_n345(x)
+ end
+end
+
+def fun_l10_n253(x)
+ if (x < 1)
+ fun_l11_n28(x)
+ else
+ fun_l11_n24(x)
+ end
+end
+
+def fun_l10_n254(x)
+ if (x < 1)
+ fun_l11_n326(x)
+ else
+ fun_l11_n907(x)
+ end
+end
+
+def fun_l10_n255(x)
+ if (x < 1)
+ fun_l11_n612(x)
+ else
+ fun_l11_n790(x)
+ end
+end
+
+def fun_l10_n256(x)
+ if (x < 1)
+ fun_l11_n50(x)
+ else
+ fun_l11_n16(x)
+ end
+end
+
+def fun_l10_n257(x)
+ if (x < 1)
+ fun_l11_n350(x)
+ else
+ fun_l11_n151(x)
+ end
+end
+
+def fun_l10_n258(x)
+ if (x < 1)
+ fun_l11_n290(x)
+ else
+ fun_l11_n113(x)
+ end
+end
+
+def fun_l10_n259(x)
+ if (x < 1)
+ fun_l11_n751(x)
+ else
+ fun_l11_n16(x)
+ end
+end
+
+def fun_l10_n260(x)
+ if (x < 1)
+ fun_l11_n337(x)
+ else
+ fun_l11_n516(x)
+ end
+end
+
+def fun_l10_n261(x)
+ if (x < 1)
+ fun_l11_n530(x)
+ else
+ fun_l11_n19(x)
+ end
+end
+
+def fun_l10_n262(x)
+ if (x < 1)
+ fun_l11_n977(x)
+ else
+ fun_l11_n529(x)
+ end
+end
+
+def fun_l10_n263(x)
+ if (x < 1)
+ fun_l11_n769(x)
+ else
+ fun_l11_n723(x)
+ end
+end
+
+def fun_l10_n264(x)
+ if (x < 1)
+ fun_l11_n673(x)
+ else
+ fun_l11_n771(x)
+ end
+end
+
+def fun_l10_n265(x)
+ if (x < 1)
+ fun_l11_n28(x)
+ else
+ fun_l11_n427(x)
+ end
+end
+
+def fun_l10_n266(x)
+ if (x < 1)
+ fun_l11_n92(x)
+ else
+ fun_l11_n590(x)
+ end
+end
+
+def fun_l10_n267(x)
+ if (x < 1)
+ fun_l11_n762(x)
+ else
+ fun_l11_n352(x)
+ end
+end
+
+def fun_l10_n268(x)
+ if (x < 1)
+ fun_l11_n988(x)
+ else
+ fun_l11_n126(x)
+ end
+end
+
+def fun_l10_n269(x)
+ if (x < 1)
+ fun_l11_n92(x)
+ else
+ fun_l11_n455(x)
+ end
+end
+
+def fun_l10_n270(x)
+ if (x < 1)
+ fun_l11_n233(x)
+ else
+ fun_l11_n404(x)
+ end
+end
+
+def fun_l10_n271(x)
+ if (x < 1)
+ fun_l11_n122(x)
+ else
+ fun_l11_n245(x)
+ end
+end
+
+def fun_l10_n272(x)
+ if (x < 1)
+ fun_l11_n277(x)
+ else
+ fun_l11_n969(x)
+ end
+end
+
+def fun_l10_n273(x)
+ if (x < 1)
+ fun_l11_n848(x)
+ else
+ fun_l11_n56(x)
+ end
+end
+
+def fun_l10_n274(x)
+ if (x < 1)
+ fun_l11_n640(x)
+ else
+ fun_l11_n632(x)
+ end
+end
+
+def fun_l10_n275(x)
+ if (x < 1)
+ fun_l11_n38(x)
+ else
+ fun_l11_n816(x)
+ end
+end
+
+def fun_l10_n276(x)
+ if (x < 1)
+ fun_l11_n706(x)
+ else
+ fun_l11_n126(x)
+ end
+end
+
+def fun_l10_n277(x)
+ if (x < 1)
+ fun_l11_n710(x)
+ else
+ fun_l11_n128(x)
+ end
+end
+
+def fun_l10_n278(x)
+ if (x < 1)
+ fun_l11_n175(x)
+ else
+ fun_l11_n954(x)
+ end
+end
+
+def fun_l10_n279(x)
+ if (x < 1)
+ fun_l11_n379(x)
+ else
+ fun_l11_n58(x)
+ end
+end
+
+def fun_l10_n280(x)
+ if (x < 1)
+ fun_l11_n926(x)
+ else
+ fun_l11_n287(x)
+ end
+end
+
+def fun_l10_n281(x)
+ if (x < 1)
+ fun_l11_n981(x)
+ else
+ fun_l11_n921(x)
+ end
+end
+
+def fun_l10_n282(x)
+ if (x < 1)
+ fun_l11_n222(x)
+ else
+ fun_l11_n954(x)
+ end
+end
+
+def fun_l10_n283(x)
+ if (x < 1)
+ fun_l11_n295(x)
+ else
+ fun_l11_n575(x)
+ end
+end
+
+def fun_l10_n284(x)
+ if (x < 1)
+ fun_l11_n554(x)
+ else
+ fun_l11_n559(x)
+ end
+end
+
+def fun_l10_n285(x)
+ if (x < 1)
+ fun_l11_n476(x)
+ else
+ fun_l11_n487(x)
+ end
+end
+
+def fun_l10_n286(x)
+ if (x < 1)
+ fun_l11_n102(x)
+ else
+ fun_l11_n928(x)
+ end
+end
+
+def fun_l10_n287(x)
+ if (x < 1)
+ fun_l11_n949(x)
+ else
+ fun_l11_n900(x)
+ end
+end
+
+def fun_l10_n288(x)
+ if (x < 1)
+ fun_l11_n717(x)
+ else
+ fun_l11_n762(x)
+ end
+end
+
+def fun_l10_n289(x)
+ if (x < 1)
+ fun_l11_n304(x)
+ else
+ fun_l11_n572(x)
+ end
+end
+
+def fun_l10_n290(x)
+ if (x < 1)
+ fun_l11_n748(x)
+ else
+ fun_l11_n791(x)
+ end
+end
+
+def fun_l10_n291(x)
+ if (x < 1)
+ fun_l11_n557(x)
+ else
+ fun_l11_n79(x)
+ end
+end
+
+def fun_l10_n292(x)
+ if (x < 1)
+ fun_l11_n74(x)
+ else
+ fun_l11_n931(x)
+ end
+end
+
+def fun_l10_n293(x)
+ if (x < 1)
+ fun_l11_n31(x)
+ else
+ fun_l11_n791(x)
+ end
+end
+
+def fun_l10_n294(x)
+ if (x < 1)
+ fun_l11_n29(x)
+ else
+ fun_l11_n377(x)
+ end
+end
+
+def fun_l10_n295(x)
+ if (x < 1)
+ fun_l11_n509(x)
+ else
+ fun_l11_n900(x)
+ end
+end
+
+def fun_l10_n296(x)
+ if (x < 1)
+ fun_l11_n594(x)
+ else
+ fun_l11_n835(x)
+ end
+end
+
+def fun_l10_n297(x)
+ if (x < 1)
+ fun_l11_n943(x)
+ else
+ fun_l11_n621(x)
+ end
+end
+
+def fun_l10_n298(x)
+ if (x < 1)
+ fun_l11_n860(x)
+ else
+ fun_l11_n403(x)
+ end
+end
+
+def fun_l10_n299(x)
+ if (x < 1)
+ fun_l11_n676(x)
+ else
+ fun_l11_n478(x)
+ end
+end
+
+def fun_l10_n300(x)
+ if (x < 1)
+ fun_l11_n513(x)
+ else
+ fun_l11_n661(x)
+ end
+end
+
+def fun_l10_n301(x)
+ if (x < 1)
+ fun_l11_n513(x)
+ else
+ fun_l11_n30(x)
+ end
+end
+
+def fun_l10_n302(x)
+ if (x < 1)
+ fun_l11_n763(x)
+ else
+ fun_l11_n823(x)
+ end
+end
+
+def fun_l10_n303(x)
+ if (x < 1)
+ fun_l11_n469(x)
+ else
+ fun_l11_n250(x)
+ end
+end
+
+def fun_l10_n304(x)
+ if (x < 1)
+ fun_l11_n637(x)
+ else
+ fun_l11_n478(x)
+ end
+end
+
+def fun_l10_n305(x)
+ if (x < 1)
+ fun_l11_n825(x)
+ else
+ fun_l11_n258(x)
+ end
+end
+
+def fun_l10_n306(x)
+ if (x < 1)
+ fun_l11_n264(x)
+ else
+ fun_l11_n698(x)
+ end
+end
+
+def fun_l10_n307(x)
+ if (x < 1)
+ fun_l11_n426(x)
+ else
+ fun_l11_n490(x)
+ end
+end
+
+def fun_l10_n308(x)
+ if (x < 1)
+ fun_l11_n157(x)
+ else
+ fun_l11_n407(x)
+ end
+end
+
+def fun_l10_n309(x)
+ if (x < 1)
+ fun_l11_n68(x)
+ else
+ fun_l11_n424(x)
+ end
+end
+
+def fun_l10_n310(x)
+ if (x < 1)
+ fun_l11_n844(x)
+ else
+ fun_l11_n955(x)
+ end
+end
+
+def fun_l10_n311(x)
+ if (x < 1)
+ fun_l11_n316(x)
+ else
+ fun_l11_n27(x)
+ end
+end
+
+def fun_l10_n312(x)
+ if (x < 1)
+ fun_l11_n771(x)
+ else
+ fun_l11_n604(x)
+ end
+end
+
+def fun_l10_n313(x)
+ if (x < 1)
+ fun_l11_n436(x)
+ else
+ fun_l11_n714(x)
+ end
+end
+
+def fun_l10_n314(x)
+ if (x < 1)
+ fun_l11_n791(x)
+ else
+ fun_l11_n889(x)
+ end
+end
+
+def fun_l10_n315(x)
+ if (x < 1)
+ fun_l11_n621(x)
+ else
+ fun_l11_n694(x)
+ end
+end
+
+def fun_l10_n316(x)
+ if (x < 1)
+ fun_l11_n403(x)
+ else
+ fun_l11_n625(x)
+ end
+end
+
+def fun_l10_n317(x)
+ if (x < 1)
+ fun_l11_n532(x)
+ else
+ fun_l11_n920(x)
+ end
+end
+
+def fun_l10_n318(x)
+ if (x < 1)
+ fun_l11_n895(x)
+ else
+ fun_l11_n781(x)
+ end
+end
+
+def fun_l10_n319(x)
+ if (x < 1)
+ fun_l11_n109(x)
+ else
+ fun_l11_n254(x)
+ end
+end
+
+def fun_l10_n320(x)
+ if (x < 1)
+ fun_l11_n842(x)
+ else
+ fun_l11_n879(x)
+ end
+end
+
+def fun_l10_n321(x)
+ if (x < 1)
+ fun_l11_n655(x)
+ else
+ fun_l11_n518(x)
+ end
+end
+
+def fun_l10_n322(x)
+ if (x < 1)
+ fun_l11_n727(x)
+ else
+ fun_l11_n14(x)
+ end
+end
+
+def fun_l10_n323(x)
+ if (x < 1)
+ fun_l11_n808(x)
+ else
+ fun_l11_n128(x)
+ end
+end
+
+def fun_l10_n324(x)
+ if (x < 1)
+ fun_l11_n412(x)
+ else
+ fun_l11_n940(x)
+ end
+end
+
+def fun_l10_n325(x)
+ if (x < 1)
+ fun_l11_n944(x)
+ else
+ fun_l11_n915(x)
+ end
+end
+
+def fun_l10_n326(x)
+ if (x < 1)
+ fun_l11_n120(x)
+ else
+ fun_l11_n572(x)
+ end
+end
+
+def fun_l10_n327(x)
+ if (x < 1)
+ fun_l11_n444(x)
+ else
+ fun_l11_n793(x)
+ end
+end
+
+def fun_l10_n328(x)
+ if (x < 1)
+ fun_l11_n892(x)
+ else
+ fun_l11_n475(x)
+ end
+end
+
+def fun_l10_n329(x)
+ if (x < 1)
+ fun_l11_n919(x)
+ else
+ fun_l11_n183(x)
+ end
+end
+
+def fun_l10_n330(x)
+ if (x < 1)
+ fun_l11_n80(x)
+ else
+ fun_l11_n357(x)
+ end
+end
+
+def fun_l10_n331(x)
+ if (x < 1)
+ fun_l11_n340(x)
+ else
+ fun_l11_n349(x)
+ end
+end
+
+def fun_l10_n332(x)
+ if (x < 1)
+ fun_l11_n579(x)
+ else
+ fun_l11_n845(x)
+ end
+end
+
+def fun_l10_n333(x)
+ if (x < 1)
+ fun_l11_n169(x)
+ else
+ fun_l11_n583(x)
+ end
+end
+
+def fun_l10_n334(x)
+ if (x < 1)
+ fun_l11_n544(x)
+ else
+ fun_l11_n462(x)
+ end
+end
+
+def fun_l10_n335(x)
+ if (x < 1)
+ fun_l11_n51(x)
+ else
+ fun_l11_n635(x)
+ end
+end
+
+def fun_l10_n336(x)
+ if (x < 1)
+ fun_l11_n965(x)
+ else
+ fun_l11_n538(x)
+ end
+end
+
+def fun_l10_n337(x)
+ if (x < 1)
+ fun_l11_n930(x)
+ else
+ fun_l11_n169(x)
+ end
+end
+
+def fun_l10_n338(x)
+ if (x < 1)
+ fun_l11_n732(x)
+ else
+ fun_l11_n574(x)
+ end
+end
+
+def fun_l10_n339(x)
+ if (x < 1)
+ fun_l11_n51(x)
+ else
+ fun_l11_n141(x)
+ end
+end
+
+def fun_l10_n340(x)
+ if (x < 1)
+ fun_l11_n149(x)
+ else
+ fun_l11_n346(x)
+ end
+end
+
+def fun_l10_n341(x)
+ if (x < 1)
+ fun_l11_n163(x)
+ else
+ fun_l11_n224(x)
+ end
+end
+
+def fun_l10_n342(x)
+ if (x < 1)
+ fun_l11_n175(x)
+ else
+ fun_l11_n40(x)
+ end
+end
+
+def fun_l10_n343(x)
+ if (x < 1)
+ fun_l11_n156(x)
+ else
+ fun_l11_n639(x)
+ end
+end
+
+def fun_l10_n344(x)
+ if (x < 1)
+ fun_l11_n934(x)
+ else
+ fun_l11_n85(x)
+ end
+end
+
+def fun_l10_n345(x)
+ if (x < 1)
+ fun_l11_n182(x)
+ else
+ fun_l11_n190(x)
+ end
+end
+
+def fun_l10_n346(x)
+ if (x < 1)
+ fun_l11_n879(x)
+ else
+ fun_l11_n702(x)
+ end
+end
+
+def fun_l10_n347(x)
+ if (x < 1)
+ fun_l11_n540(x)
+ else
+ fun_l11_n689(x)
+ end
+end
+
+def fun_l10_n348(x)
+ if (x < 1)
+ fun_l11_n945(x)
+ else
+ fun_l11_n39(x)
+ end
+end
+
+def fun_l10_n349(x)
+ if (x < 1)
+ fun_l11_n683(x)
+ else
+ fun_l11_n688(x)
+ end
+end
+
+def fun_l10_n350(x)
+ if (x < 1)
+ fun_l11_n750(x)
+ else
+ fun_l11_n577(x)
+ end
+end
+
+def fun_l10_n351(x)
+ if (x < 1)
+ fun_l11_n74(x)
+ else
+ fun_l11_n672(x)
+ end
+end
+
+def fun_l10_n352(x)
+ if (x < 1)
+ fun_l11_n252(x)
+ else
+ fun_l11_n766(x)
+ end
+end
+
+def fun_l10_n353(x)
+ if (x < 1)
+ fun_l11_n702(x)
+ else
+ fun_l11_n516(x)
+ end
+end
+
+def fun_l10_n354(x)
+ if (x < 1)
+ fun_l11_n19(x)
+ else
+ fun_l11_n290(x)
+ end
+end
+
+def fun_l10_n355(x)
+ if (x < 1)
+ fun_l11_n70(x)
+ else
+ fun_l11_n553(x)
+ end
+end
+
+def fun_l10_n356(x)
+ if (x < 1)
+ fun_l11_n654(x)
+ else
+ fun_l11_n108(x)
+ end
+end
+
+def fun_l10_n357(x)
+ if (x < 1)
+ fun_l11_n820(x)
+ else
+ fun_l11_n320(x)
+ end
+end
+
+def fun_l10_n358(x)
+ if (x < 1)
+ fun_l11_n417(x)
+ else
+ fun_l11_n290(x)
+ end
+end
+
+def fun_l10_n359(x)
+ if (x < 1)
+ fun_l11_n832(x)
+ else
+ fun_l11_n647(x)
+ end
+end
+
+def fun_l10_n360(x)
+ if (x < 1)
+ fun_l11_n625(x)
+ else
+ fun_l11_n723(x)
+ end
+end
+
+def fun_l10_n361(x)
+ if (x < 1)
+ fun_l11_n257(x)
+ else
+ fun_l11_n991(x)
+ end
+end
+
+def fun_l10_n362(x)
+ if (x < 1)
+ fun_l11_n101(x)
+ else
+ fun_l11_n247(x)
+ end
+end
+
+def fun_l10_n363(x)
+ if (x < 1)
+ fun_l11_n484(x)
+ else
+ fun_l11_n953(x)
+ end
+end
+
+def fun_l10_n364(x)
+ if (x < 1)
+ fun_l11_n281(x)
+ else
+ fun_l11_n846(x)
+ end
+end
+
+def fun_l10_n365(x)
+ if (x < 1)
+ fun_l11_n425(x)
+ else
+ fun_l11_n772(x)
+ end
+end
+
+def fun_l10_n366(x)
+ if (x < 1)
+ fun_l11_n724(x)
+ else
+ fun_l11_n725(x)
+ end
+end
+
+def fun_l10_n367(x)
+ if (x < 1)
+ fun_l11_n943(x)
+ else
+ fun_l11_n360(x)
+ end
+end
+
+def fun_l10_n368(x)
+ if (x < 1)
+ fun_l11_n874(x)
+ else
+ fun_l11_n698(x)
+ end
+end
+
+def fun_l10_n369(x)
+ if (x < 1)
+ fun_l11_n419(x)
+ else
+ fun_l11_n672(x)
+ end
+end
+
+def fun_l10_n370(x)
+ if (x < 1)
+ fun_l11_n655(x)
+ else
+ fun_l11_n468(x)
+ end
+end
+
+def fun_l10_n371(x)
+ if (x < 1)
+ fun_l11_n577(x)
+ else
+ fun_l11_n402(x)
+ end
+end
+
+def fun_l10_n372(x)
+ if (x < 1)
+ fun_l11_n531(x)
+ else
+ fun_l11_n319(x)
+ end
+end
+
+def fun_l10_n373(x)
+ if (x < 1)
+ fun_l11_n972(x)
+ else
+ fun_l11_n901(x)
+ end
+end
+
+def fun_l10_n374(x)
+ if (x < 1)
+ fun_l11_n59(x)
+ else
+ fun_l11_n12(x)
+ end
+end
+
+def fun_l10_n375(x)
+ if (x < 1)
+ fun_l11_n409(x)
+ else
+ fun_l11_n214(x)
+ end
+end
+
+def fun_l10_n376(x)
+ if (x < 1)
+ fun_l11_n52(x)
+ else
+ fun_l11_n395(x)
+ end
+end
+
+def fun_l10_n377(x)
+ if (x < 1)
+ fun_l11_n594(x)
+ else
+ fun_l11_n434(x)
+ end
+end
+
+def fun_l10_n378(x)
+ if (x < 1)
+ fun_l11_n795(x)
+ else
+ fun_l11_n820(x)
+ end
+end
+
+def fun_l10_n379(x)
+ if (x < 1)
+ fun_l11_n962(x)
+ else
+ fun_l11_n111(x)
+ end
+end
+
+def fun_l10_n380(x)
+ if (x < 1)
+ fun_l11_n762(x)
+ else
+ fun_l11_n678(x)
+ end
+end
+
+def fun_l10_n381(x)
+ if (x < 1)
+ fun_l11_n324(x)
+ else
+ fun_l11_n478(x)
+ end
+end
+
+def fun_l10_n382(x)
+ if (x < 1)
+ fun_l11_n360(x)
+ else
+ fun_l11_n35(x)
+ end
+end
+
+def fun_l10_n383(x)
+ if (x < 1)
+ fun_l11_n710(x)
+ else
+ fun_l11_n898(x)
+ end
+end
+
+def fun_l10_n384(x)
+ if (x < 1)
+ fun_l11_n584(x)
+ else
+ fun_l11_n260(x)
+ end
+end
+
+def fun_l10_n385(x)
+ if (x < 1)
+ fun_l11_n514(x)
+ else
+ fun_l11_n649(x)
+ end
+end
+
+def fun_l10_n386(x)
+ if (x < 1)
+ fun_l11_n426(x)
+ else
+ fun_l11_n391(x)
+ end
+end
+
+def fun_l10_n387(x)
+ if (x < 1)
+ fun_l11_n713(x)
+ else
+ fun_l11_n734(x)
+ end
+end
+
+def fun_l10_n388(x)
+ if (x < 1)
+ fun_l11_n236(x)
+ else
+ fun_l11_n469(x)
+ end
+end
+
+def fun_l10_n389(x)
+ if (x < 1)
+ fun_l11_n267(x)
+ else
+ fun_l11_n965(x)
+ end
+end
+
+def fun_l10_n390(x)
+ if (x < 1)
+ fun_l11_n943(x)
+ else
+ fun_l11_n181(x)
+ end
+end
+
+def fun_l10_n391(x)
+ if (x < 1)
+ fun_l11_n869(x)
+ else
+ fun_l11_n348(x)
+ end
+end
+
+def fun_l10_n392(x)
+ if (x < 1)
+ fun_l11_n144(x)
+ else
+ fun_l11_n801(x)
+ end
+end
+
+def fun_l10_n393(x)
+ if (x < 1)
+ fun_l11_n907(x)
+ else
+ fun_l11_n609(x)
+ end
+end
+
+def fun_l10_n394(x)
+ if (x < 1)
+ fun_l11_n801(x)
+ else
+ fun_l11_n691(x)
+ end
+end
+
+def fun_l10_n395(x)
+ if (x < 1)
+ fun_l11_n629(x)
+ else
+ fun_l11_n924(x)
+ end
+end
+
+def fun_l10_n396(x)
+ if (x < 1)
+ fun_l11_n191(x)
+ else
+ fun_l11_n248(x)
+ end
+end
+
+def fun_l10_n397(x)
+ if (x < 1)
+ fun_l11_n339(x)
+ else
+ fun_l11_n805(x)
+ end
+end
+
+def fun_l10_n398(x)
+ if (x < 1)
+ fun_l11_n798(x)
+ else
+ fun_l11_n154(x)
+ end
+end
+
+def fun_l10_n399(x)
+ if (x < 1)
+ fun_l11_n847(x)
+ else
+ fun_l11_n14(x)
+ end
+end
+
+def fun_l10_n400(x)
+ if (x < 1)
+ fun_l11_n296(x)
+ else
+ fun_l11_n144(x)
+ end
+end
+
+def fun_l10_n401(x)
+ if (x < 1)
+ fun_l11_n853(x)
+ else
+ fun_l11_n156(x)
+ end
+end
+
+def fun_l10_n402(x)
+ if (x < 1)
+ fun_l11_n54(x)
+ else
+ fun_l11_n944(x)
+ end
+end
+
+def fun_l10_n403(x)
+ if (x < 1)
+ fun_l11_n617(x)
+ else
+ fun_l11_n324(x)
+ end
+end
+
+def fun_l10_n404(x)
+ if (x < 1)
+ fun_l11_n962(x)
+ else
+ fun_l11_n139(x)
+ end
+end
+
+def fun_l10_n405(x)
+ if (x < 1)
+ fun_l11_n466(x)
+ else
+ fun_l11_n388(x)
+ end
+end
+
+def fun_l10_n406(x)
+ if (x < 1)
+ fun_l11_n914(x)
+ else
+ fun_l11_n847(x)
+ end
+end
+
+def fun_l10_n407(x)
+ if (x < 1)
+ fun_l11_n741(x)
+ else
+ fun_l11_n757(x)
+ end
+end
+
+def fun_l10_n408(x)
+ if (x < 1)
+ fun_l11_n36(x)
+ else
+ fun_l11_n312(x)
+ end
+end
+
+def fun_l10_n409(x)
+ if (x < 1)
+ fun_l11_n203(x)
+ else
+ fun_l11_n523(x)
+ end
+end
+
+def fun_l10_n410(x)
+ if (x < 1)
+ fun_l11_n187(x)
+ else
+ fun_l11_n291(x)
+ end
+end
+
+def fun_l10_n411(x)
+ if (x < 1)
+ fun_l11_n13(x)
+ else
+ fun_l11_n107(x)
+ end
+end
+
+def fun_l10_n412(x)
+ if (x < 1)
+ fun_l11_n960(x)
+ else
+ fun_l11_n565(x)
+ end
+end
+
+def fun_l10_n413(x)
+ if (x < 1)
+ fun_l11_n549(x)
+ else
+ fun_l11_n885(x)
+ end
+end
+
+def fun_l10_n414(x)
+ if (x < 1)
+ fun_l11_n559(x)
+ else
+ fun_l11_n612(x)
+ end
+end
+
+def fun_l10_n415(x)
+ if (x < 1)
+ fun_l11_n18(x)
+ else
+ fun_l11_n167(x)
+ end
+end
+
+def fun_l10_n416(x)
+ if (x < 1)
+ fun_l11_n125(x)
+ else
+ fun_l11_n206(x)
+ end
+end
+
+def fun_l10_n417(x)
+ if (x < 1)
+ fun_l11_n980(x)
+ else
+ fun_l11_n497(x)
+ end
+end
+
+def fun_l10_n418(x)
+ if (x < 1)
+ fun_l11_n703(x)
+ else
+ fun_l11_n49(x)
+ end
+end
+
+def fun_l10_n419(x)
+ if (x < 1)
+ fun_l11_n690(x)
+ else
+ fun_l11_n63(x)
+ end
+end
+
+def fun_l10_n420(x)
+ if (x < 1)
+ fun_l11_n219(x)
+ else
+ fun_l11_n814(x)
+ end
+end
+
+def fun_l10_n421(x)
+ if (x < 1)
+ fun_l11_n864(x)
+ else
+ fun_l11_n963(x)
+ end
+end
+
+def fun_l10_n422(x)
+ if (x < 1)
+ fun_l11_n672(x)
+ else
+ fun_l11_n87(x)
+ end
+end
+
+def fun_l10_n423(x)
+ if (x < 1)
+ fun_l11_n41(x)
+ else
+ fun_l11_n461(x)
+ end
+end
+
+def fun_l10_n424(x)
+ if (x < 1)
+ fun_l11_n334(x)
+ else
+ fun_l11_n283(x)
+ end
+end
+
+def fun_l10_n425(x)
+ if (x < 1)
+ fun_l11_n369(x)
+ else
+ fun_l11_n269(x)
+ end
+end
+
+def fun_l10_n426(x)
+ if (x < 1)
+ fun_l11_n832(x)
+ else
+ fun_l11_n625(x)
+ end
+end
+
+def fun_l10_n427(x)
+ if (x < 1)
+ fun_l11_n272(x)
+ else
+ fun_l11_n634(x)
+ end
+end
+
+def fun_l10_n428(x)
+ if (x < 1)
+ fun_l11_n122(x)
+ else
+ fun_l11_n53(x)
+ end
+end
+
+def fun_l10_n429(x)
+ if (x < 1)
+ fun_l11_n895(x)
+ else
+ fun_l11_n292(x)
+ end
+end
+
+def fun_l10_n430(x)
+ if (x < 1)
+ fun_l11_n460(x)
+ else
+ fun_l11_n177(x)
+ end
+end
+
+def fun_l10_n431(x)
+ if (x < 1)
+ fun_l11_n238(x)
+ else
+ fun_l11_n938(x)
+ end
+end
+
+def fun_l10_n432(x)
+ if (x < 1)
+ fun_l11_n302(x)
+ else
+ fun_l11_n492(x)
+ end
+end
+
+def fun_l10_n433(x)
+ if (x < 1)
+ fun_l11_n307(x)
+ else
+ fun_l11_n876(x)
+ end
+end
+
+def fun_l10_n434(x)
+ if (x < 1)
+ fun_l11_n64(x)
+ else
+ fun_l11_n892(x)
+ end
+end
+
+def fun_l10_n435(x)
+ if (x < 1)
+ fun_l11_n940(x)
+ else
+ fun_l11_n718(x)
+ end
+end
+
+def fun_l10_n436(x)
+ if (x < 1)
+ fun_l11_n988(x)
+ else
+ fun_l11_n216(x)
+ end
+end
+
+def fun_l10_n437(x)
+ if (x < 1)
+ fun_l11_n282(x)
+ else
+ fun_l11_n168(x)
+ end
+end
+
+def fun_l10_n438(x)
+ if (x < 1)
+ fun_l11_n529(x)
+ else
+ fun_l11_n66(x)
+ end
+end
+
+def fun_l10_n439(x)
+ if (x < 1)
+ fun_l11_n286(x)
+ else
+ fun_l11_n134(x)
+ end
+end
+
+def fun_l10_n440(x)
+ if (x < 1)
+ fun_l11_n454(x)
+ else
+ fun_l11_n873(x)
+ end
+end
+
+def fun_l10_n441(x)
+ if (x < 1)
+ fun_l11_n871(x)
+ else
+ fun_l11_n13(x)
+ end
+end
+
+def fun_l10_n442(x)
+ if (x < 1)
+ fun_l11_n314(x)
+ else
+ fun_l11_n685(x)
+ end
+end
+
+def fun_l10_n443(x)
+ if (x < 1)
+ fun_l11_n752(x)
+ else
+ fun_l11_n162(x)
+ end
+end
+
+def fun_l10_n444(x)
+ if (x < 1)
+ fun_l11_n686(x)
+ else
+ fun_l11_n585(x)
+ end
+end
+
+def fun_l10_n445(x)
+ if (x < 1)
+ fun_l11_n42(x)
+ else
+ fun_l11_n932(x)
+ end
+end
+
+def fun_l10_n446(x)
+ if (x < 1)
+ fun_l11_n433(x)
+ else
+ fun_l11_n241(x)
+ end
+end
+
+def fun_l10_n447(x)
+ if (x < 1)
+ fun_l11_n99(x)
+ else
+ fun_l11_n563(x)
+ end
+end
+
+def fun_l10_n448(x)
+ if (x < 1)
+ fun_l11_n463(x)
+ else
+ fun_l11_n395(x)
+ end
+end
+
+def fun_l10_n449(x)
+ if (x < 1)
+ fun_l11_n393(x)
+ else
+ fun_l11_n151(x)
+ end
+end
+
+def fun_l10_n450(x)
+ if (x < 1)
+ fun_l11_n255(x)
+ else
+ fun_l11_n834(x)
+ end
+end
+
+def fun_l10_n451(x)
+ if (x < 1)
+ fun_l11_n195(x)
+ else
+ fun_l11_n0(x)
+ end
+end
+
+def fun_l10_n452(x)
+ if (x < 1)
+ fun_l11_n372(x)
+ else
+ fun_l11_n228(x)
+ end
+end
+
+def fun_l10_n453(x)
+ if (x < 1)
+ fun_l11_n526(x)
+ else
+ fun_l11_n54(x)
+ end
+end
+
+def fun_l10_n454(x)
+ if (x < 1)
+ fun_l11_n594(x)
+ else
+ fun_l11_n116(x)
+ end
+end
+
+def fun_l10_n455(x)
+ if (x < 1)
+ fun_l11_n25(x)
+ else
+ fun_l11_n930(x)
+ end
+end
+
+def fun_l10_n456(x)
+ if (x < 1)
+ fun_l11_n83(x)
+ else
+ fun_l11_n30(x)
+ end
+end
+
+def fun_l10_n457(x)
+ if (x < 1)
+ fun_l11_n644(x)
+ else
+ fun_l11_n837(x)
+ end
+end
+
+def fun_l10_n458(x)
+ if (x < 1)
+ fun_l11_n311(x)
+ else
+ fun_l11_n86(x)
+ end
+end
+
+def fun_l10_n459(x)
+ if (x < 1)
+ fun_l11_n139(x)
+ else
+ fun_l11_n735(x)
+ end
+end
+
+def fun_l10_n460(x)
+ if (x < 1)
+ fun_l11_n431(x)
+ else
+ fun_l11_n808(x)
+ end
+end
+
+def fun_l10_n461(x)
+ if (x < 1)
+ fun_l11_n488(x)
+ else
+ fun_l11_n439(x)
+ end
+end
+
+def fun_l10_n462(x)
+ if (x < 1)
+ fun_l11_n448(x)
+ else
+ fun_l11_n882(x)
+ end
+end
+
+def fun_l10_n463(x)
+ if (x < 1)
+ fun_l11_n362(x)
+ else
+ fun_l11_n875(x)
+ end
+end
+
+def fun_l10_n464(x)
+ if (x < 1)
+ fun_l11_n88(x)
+ else
+ fun_l11_n842(x)
+ end
+end
+
+def fun_l10_n465(x)
+ if (x < 1)
+ fun_l11_n651(x)
+ else
+ fun_l11_n368(x)
+ end
+end
+
+def fun_l10_n466(x)
+ if (x < 1)
+ fun_l11_n121(x)
+ else
+ fun_l11_n3(x)
+ end
+end
+
+def fun_l10_n467(x)
+ if (x < 1)
+ fun_l11_n654(x)
+ else
+ fun_l11_n260(x)
+ end
+end
+
+def fun_l10_n468(x)
+ if (x < 1)
+ fun_l11_n765(x)
+ else
+ fun_l11_n604(x)
+ end
+end
+
+def fun_l10_n469(x)
+ if (x < 1)
+ fun_l11_n881(x)
+ else
+ fun_l11_n567(x)
+ end
+end
+
+def fun_l10_n470(x)
+ if (x < 1)
+ fun_l11_n41(x)
+ else
+ fun_l11_n92(x)
+ end
+end
+
+def fun_l10_n471(x)
+ if (x < 1)
+ fun_l11_n905(x)
+ else
+ fun_l11_n731(x)
+ end
+end
+
+def fun_l10_n472(x)
+ if (x < 1)
+ fun_l11_n797(x)
+ else
+ fun_l11_n878(x)
+ end
+end
+
+def fun_l10_n473(x)
+ if (x < 1)
+ fun_l11_n487(x)
+ else
+ fun_l11_n951(x)
+ end
+end
+
+def fun_l10_n474(x)
+ if (x < 1)
+ fun_l11_n998(x)
+ else
+ fun_l11_n900(x)
+ end
+end
+
+def fun_l10_n475(x)
+ if (x < 1)
+ fun_l11_n451(x)
+ else
+ fun_l11_n780(x)
+ end
+end
+
+def fun_l10_n476(x)
+ if (x < 1)
+ fun_l11_n427(x)
+ else
+ fun_l11_n760(x)
+ end
+end
+
+def fun_l10_n477(x)
+ if (x < 1)
+ fun_l11_n758(x)
+ else
+ fun_l11_n177(x)
+ end
+end
+
+def fun_l10_n478(x)
+ if (x < 1)
+ fun_l11_n359(x)
+ else
+ fun_l11_n236(x)
+ end
+end
+
+def fun_l10_n479(x)
+ if (x < 1)
+ fun_l11_n70(x)
+ else
+ fun_l11_n49(x)
+ end
+end
+
+def fun_l10_n480(x)
+ if (x < 1)
+ fun_l11_n338(x)
+ else
+ fun_l11_n714(x)
+ end
+end
+
+def fun_l10_n481(x)
+ if (x < 1)
+ fun_l11_n6(x)
+ else
+ fun_l11_n194(x)
+ end
+end
+
+def fun_l10_n482(x)
+ if (x < 1)
+ fun_l11_n801(x)
+ else
+ fun_l11_n591(x)
+ end
+end
+
+def fun_l10_n483(x)
+ if (x < 1)
+ fun_l11_n362(x)
+ else
+ fun_l11_n687(x)
+ end
+end
+
+def fun_l10_n484(x)
+ if (x < 1)
+ fun_l11_n659(x)
+ else
+ fun_l11_n661(x)
+ end
+end
+
+def fun_l10_n485(x)
+ if (x < 1)
+ fun_l11_n903(x)
+ else
+ fun_l11_n419(x)
+ end
+end
+
+def fun_l10_n486(x)
+ if (x < 1)
+ fun_l11_n279(x)
+ else
+ fun_l11_n995(x)
+ end
+end
+
+def fun_l10_n487(x)
+ if (x < 1)
+ fun_l11_n529(x)
+ else
+ fun_l11_n373(x)
+ end
+end
+
+def fun_l10_n488(x)
+ if (x < 1)
+ fun_l11_n198(x)
+ else
+ fun_l11_n899(x)
+ end
+end
+
+def fun_l10_n489(x)
+ if (x < 1)
+ fun_l11_n174(x)
+ else
+ fun_l11_n485(x)
+ end
+end
+
+def fun_l10_n490(x)
+ if (x < 1)
+ fun_l11_n19(x)
+ else
+ fun_l11_n322(x)
+ end
+end
+
+def fun_l10_n491(x)
+ if (x < 1)
+ fun_l11_n0(x)
+ else
+ fun_l11_n855(x)
+ end
+end
+
+def fun_l10_n492(x)
+ if (x < 1)
+ fun_l11_n126(x)
+ else
+ fun_l11_n986(x)
+ end
+end
+
+def fun_l10_n493(x)
+ if (x < 1)
+ fun_l11_n263(x)
+ else
+ fun_l11_n374(x)
+ end
+end
+
+def fun_l10_n494(x)
+ if (x < 1)
+ fun_l11_n21(x)
+ else
+ fun_l11_n749(x)
+ end
+end
+
+def fun_l10_n495(x)
+ if (x < 1)
+ fun_l11_n618(x)
+ else
+ fun_l11_n955(x)
+ end
+end
+
+def fun_l10_n496(x)
+ if (x < 1)
+ fun_l11_n157(x)
+ else
+ fun_l11_n230(x)
+ end
+end
+
+def fun_l10_n497(x)
+ if (x < 1)
+ fun_l11_n387(x)
+ else
+ fun_l11_n841(x)
+ end
+end
+
+def fun_l10_n498(x)
+ if (x < 1)
+ fun_l11_n496(x)
+ else
+ fun_l11_n30(x)
+ end
+end
+
+def fun_l10_n499(x)
+ if (x < 1)
+ fun_l11_n581(x)
+ else
+ fun_l11_n821(x)
+ end
+end
+
+def fun_l10_n500(x)
+ if (x < 1)
+ fun_l11_n202(x)
+ else
+ fun_l11_n26(x)
+ end
+end
+
+def fun_l10_n501(x)
+ if (x < 1)
+ fun_l11_n273(x)
+ else
+ fun_l11_n294(x)
+ end
+end
+
+def fun_l10_n502(x)
+ if (x < 1)
+ fun_l11_n604(x)
+ else
+ fun_l11_n261(x)
+ end
+end
+
+def fun_l10_n503(x)
+ if (x < 1)
+ fun_l11_n632(x)
+ else
+ fun_l11_n340(x)
+ end
+end
+
+def fun_l10_n504(x)
+ if (x < 1)
+ fun_l11_n162(x)
+ else
+ fun_l11_n483(x)
+ end
+end
+
+def fun_l10_n505(x)
+ if (x < 1)
+ fun_l11_n824(x)
+ else
+ fun_l11_n747(x)
+ end
+end
+
+def fun_l10_n506(x)
+ if (x < 1)
+ fun_l11_n575(x)
+ else
+ fun_l11_n932(x)
+ end
+end
+
+def fun_l10_n507(x)
+ if (x < 1)
+ fun_l11_n645(x)
+ else
+ fun_l11_n486(x)
+ end
+end
+
+def fun_l10_n508(x)
+ if (x < 1)
+ fun_l11_n876(x)
+ else
+ fun_l11_n640(x)
+ end
+end
+
+def fun_l10_n509(x)
+ if (x < 1)
+ fun_l11_n280(x)
+ else
+ fun_l11_n116(x)
+ end
+end
+
+def fun_l10_n510(x)
+ if (x < 1)
+ fun_l11_n570(x)
+ else
+ fun_l11_n640(x)
+ end
+end
+
+def fun_l10_n511(x)
+ if (x < 1)
+ fun_l11_n576(x)
+ else
+ fun_l11_n124(x)
+ end
+end
+
+def fun_l10_n512(x)
+ if (x < 1)
+ fun_l11_n328(x)
+ else
+ fun_l11_n880(x)
+ end
+end
+
+def fun_l10_n513(x)
+ if (x < 1)
+ fun_l11_n185(x)
+ else
+ fun_l11_n977(x)
+ end
+end
+
+def fun_l10_n514(x)
+ if (x < 1)
+ fun_l11_n560(x)
+ else
+ fun_l11_n825(x)
+ end
+end
+
+def fun_l10_n515(x)
+ if (x < 1)
+ fun_l11_n28(x)
+ else
+ fun_l11_n908(x)
+ end
+end
+
+def fun_l10_n516(x)
+ if (x < 1)
+ fun_l11_n902(x)
+ else
+ fun_l11_n882(x)
+ end
+end
+
+def fun_l10_n517(x)
+ if (x < 1)
+ fun_l11_n429(x)
+ else
+ fun_l11_n824(x)
+ end
+end
+
+def fun_l10_n518(x)
+ if (x < 1)
+ fun_l11_n706(x)
+ else
+ fun_l11_n149(x)
+ end
+end
+
+def fun_l10_n519(x)
+ if (x < 1)
+ fun_l11_n837(x)
+ else
+ fun_l11_n573(x)
+ end
+end
+
+def fun_l10_n520(x)
+ if (x < 1)
+ fun_l11_n934(x)
+ else
+ fun_l11_n504(x)
+ end
+end
+
+def fun_l10_n521(x)
+ if (x < 1)
+ fun_l11_n254(x)
+ else
+ fun_l11_n551(x)
+ end
+end
+
+def fun_l10_n522(x)
+ if (x < 1)
+ fun_l11_n53(x)
+ else
+ fun_l11_n297(x)
+ end
+end
+
+def fun_l10_n523(x)
+ if (x < 1)
+ fun_l11_n648(x)
+ else
+ fun_l11_n695(x)
+ end
+end
+
+def fun_l10_n524(x)
+ if (x < 1)
+ fun_l11_n737(x)
+ else
+ fun_l11_n636(x)
+ end
+end
+
+def fun_l10_n525(x)
+ if (x < 1)
+ fun_l11_n249(x)
+ else
+ fun_l11_n757(x)
+ end
+end
+
+def fun_l10_n526(x)
+ if (x < 1)
+ fun_l11_n42(x)
+ else
+ fun_l11_n199(x)
+ end
+end
+
+def fun_l10_n527(x)
+ if (x < 1)
+ fun_l11_n783(x)
+ else
+ fun_l11_n856(x)
+ end
+end
+
+def fun_l10_n528(x)
+ if (x < 1)
+ fun_l11_n708(x)
+ else
+ fun_l11_n550(x)
+ end
+end
+
+def fun_l10_n529(x)
+ if (x < 1)
+ fun_l11_n749(x)
+ else
+ fun_l11_n56(x)
+ end
+end
+
+def fun_l10_n530(x)
+ if (x < 1)
+ fun_l11_n838(x)
+ else
+ fun_l11_n57(x)
+ end
+end
+
+def fun_l10_n531(x)
+ if (x < 1)
+ fun_l11_n293(x)
+ else
+ fun_l11_n938(x)
+ end
+end
+
+def fun_l10_n532(x)
+ if (x < 1)
+ fun_l11_n245(x)
+ else
+ fun_l11_n367(x)
+ end
+end
+
+def fun_l10_n533(x)
+ if (x < 1)
+ fun_l11_n255(x)
+ else
+ fun_l11_n234(x)
+ end
+end
+
+def fun_l10_n534(x)
+ if (x < 1)
+ fun_l11_n315(x)
+ else
+ fun_l11_n982(x)
+ end
+end
+
+def fun_l10_n535(x)
+ if (x < 1)
+ fun_l11_n841(x)
+ else
+ fun_l11_n112(x)
+ end
+end
+
+def fun_l10_n536(x)
+ if (x < 1)
+ fun_l11_n481(x)
+ else
+ fun_l11_n622(x)
+ end
+end
+
+def fun_l10_n537(x)
+ if (x < 1)
+ fun_l11_n666(x)
+ else
+ fun_l11_n775(x)
+ end
+end
+
+def fun_l10_n538(x)
+ if (x < 1)
+ fun_l11_n409(x)
+ else
+ fun_l11_n733(x)
+ end
+end
+
+def fun_l10_n539(x)
+ if (x < 1)
+ fun_l11_n633(x)
+ else
+ fun_l11_n122(x)
+ end
+end
+
+def fun_l10_n540(x)
+ if (x < 1)
+ fun_l11_n453(x)
+ else
+ fun_l11_n938(x)
+ end
+end
+
+def fun_l10_n541(x)
+ if (x < 1)
+ fun_l11_n55(x)
+ else
+ fun_l11_n344(x)
+ end
+end
+
+def fun_l10_n542(x)
+ if (x < 1)
+ fun_l11_n167(x)
+ else
+ fun_l11_n908(x)
+ end
+end
+
+def fun_l10_n543(x)
+ if (x < 1)
+ fun_l11_n318(x)
+ else
+ fun_l11_n893(x)
+ end
+end
+
+def fun_l10_n544(x)
+ if (x < 1)
+ fun_l11_n683(x)
+ else
+ fun_l11_n109(x)
+ end
+end
+
+def fun_l10_n545(x)
+ if (x < 1)
+ fun_l11_n728(x)
+ else
+ fun_l11_n432(x)
+ end
+end
+
+def fun_l10_n546(x)
+ if (x < 1)
+ fun_l11_n343(x)
+ else
+ fun_l11_n674(x)
+ end
+end
+
+def fun_l10_n547(x)
+ if (x < 1)
+ fun_l11_n374(x)
+ else
+ fun_l11_n293(x)
+ end
+end
+
+def fun_l10_n548(x)
+ if (x < 1)
+ fun_l11_n661(x)
+ else
+ fun_l11_n619(x)
+ end
+end
+
+def fun_l10_n549(x)
+ if (x < 1)
+ fun_l11_n984(x)
+ else
+ fun_l11_n261(x)
+ end
+end
+
+def fun_l10_n550(x)
+ if (x < 1)
+ fun_l11_n729(x)
+ else
+ fun_l11_n469(x)
+ end
+end
+
+def fun_l10_n551(x)
+ if (x < 1)
+ fun_l11_n829(x)
+ else
+ fun_l11_n30(x)
+ end
+end
+
+def fun_l10_n552(x)
+ if (x < 1)
+ fun_l11_n457(x)
+ else
+ fun_l11_n440(x)
+ end
+end
+
+def fun_l10_n553(x)
+ if (x < 1)
+ fun_l11_n128(x)
+ else
+ fun_l11_n790(x)
+ end
+end
+
+def fun_l10_n554(x)
+ if (x < 1)
+ fun_l11_n135(x)
+ else
+ fun_l11_n118(x)
+ end
+end
+
+def fun_l10_n555(x)
+ if (x < 1)
+ fun_l11_n558(x)
+ else
+ fun_l11_n415(x)
+ end
+end
+
+def fun_l10_n556(x)
+ if (x < 1)
+ fun_l11_n388(x)
+ else
+ fun_l11_n102(x)
+ end
+end
+
+def fun_l10_n557(x)
+ if (x < 1)
+ fun_l11_n575(x)
+ else
+ fun_l11_n877(x)
+ end
+end
+
+def fun_l10_n558(x)
+ if (x < 1)
+ fun_l11_n266(x)
+ else
+ fun_l11_n216(x)
+ end
+end
+
+def fun_l10_n559(x)
+ if (x < 1)
+ fun_l11_n681(x)
+ else
+ fun_l11_n259(x)
+ end
+end
+
+def fun_l10_n560(x)
+ if (x < 1)
+ fun_l11_n922(x)
+ else
+ fun_l11_n828(x)
+ end
+end
+
+def fun_l10_n561(x)
+ if (x < 1)
+ fun_l11_n135(x)
+ else
+ fun_l11_n971(x)
+ end
+end
+
+def fun_l10_n562(x)
+ if (x < 1)
+ fun_l11_n940(x)
+ else
+ fun_l11_n266(x)
+ end
+end
+
+def fun_l10_n563(x)
+ if (x < 1)
+ fun_l11_n291(x)
+ else
+ fun_l11_n587(x)
+ end
+end
+
+def fun_l10_n564(x)
+ if (x < 1)
+ fun_l11_n143(x)
+ else
+ fun_l11_n289(x)
+ end
+end
+
+def fun_l10_n565(x)
+ if (x < 1)
+ fun_l11_n947(x)
+ else
+ fun_l11_n462(x)
+ end
+end
+
+def fun_l10_n566(x)
+ if (x < 1)
+ fun_l11_n307(x)
+ else
+ fun_l11_n904(x)
+ end
+end
+
+def fun_l10_n567(x)
+ if (x < 1)
+ fun_l11_n945(x)
+ else
+ fun_l11_n814(x)
+ end
+end
+
+def fun_l10_n568(x)
+ if (x < 1)
+ fun_l11_n689(x)
+ else
+ fun_l11_n61(x)
+ end
+end
+
+def fun_l10_n569(x)
+ if (x < 1)
+ fun_l11_n446(x)
+ else
+ fun_l11_n305(x)
+ end
+end
+
+def fun_l10_n570(x)
+ if (x < 1)
+ fun_l11_n209(x)
+ else
+ fun_l11_n768(x)
+ end
+end
+
+def fun_l10_n571(x)
+ if (x < 1)
+ fun_l11_n904(x)
+ else
+ fun_l11_n736(x)
+ end
+end
+
+def fun_l10_n572(x)
+ if (x < 1)
+ fun_l11_n87(x)
+ else
+ fun_l11_n474(x)
+ end
+end
+
+def fun_l10_n573(x)
+ if (x < 1)
+ fun_l11_n886(x)
+ else
+ fun_l11_n75(x)
+ end
+end
+
+def fun_l10_n574(x)
+ if (x < 1)
+ fun_l11_n761(x)
+ else
+ fun_l11_n662(x)
+ end
+end
+
+def fun_l10_n575(x)
+ if (x < 1)
+ fun_l11_n255(x)
+ else
+ fun_l11_n321(x)
+ end
+end
+
+def fun_l10_n576(x)
+ if (x < 1)
+ fun_l11_n154(x)
+ else
+ fun_l11_n356(x)
+ end
+end
+
+def fun_l10_n577(x)
+ if (x < 1)
+ fun_l11_n802(x)
+ else
+ fun_l11_n912(x)
+ end
+end
+
+def fun_l10_n578(x)
+ if (x < 1)
+ fun_l11_n709(x)
+ else
+ fun_l11_n66(x)
+ end
+end
+
+def fun_l10_n579(x)
+ if (x < 1)
+ fun_l11_n994(x)
+ else
+ fun_l11_n424(x)
+ end
+end
+
+def fun_l10_n580(x)
+ if (x < 1)
+ fun_l11_n520(x)
+ else
+ fun_l11_n551(x)
+ end
+end
+
+def fun_l10_n581(x)
+ if (x < 1)
+ fun_l11_n386(x)
+ else
+ fun_l11_n615(x)
+ end
+end
+
+def fun_l10_n582(x)
+ if (x < 1)
+ fun_l11_n125(x)
+ else
+ fun_l11_n667(x)
+ end
+end
+
+def fun_l10_n583(x)
+ if (x < 1)
+ fun_l11_n829(x)
+ else
+ fun_l11_n802(x)
+ end
+end
+
+def fun_l10_n584(x)
+ if (x < 1)
+ fun_l11_n649(x)
+ else
+ fun_l11_n39(x)
+ end
+end
+
+def fun_l10_n585(x)
+ if (x < 1)
+ fun_l11_n454(x)
+ else
+ fun_l11_n541(x)
+ end
+end
+
+def fun_l10_n586(x)
+ if (x < 1)
+ fun_l11_n281(x)
+ else
+ fun_l11_n4(x)
+ end
+end
+
+def fun_l10_n587(x)
+ if (x < 1)
+ fun_l11_n382(x)
+ else
+ fun_l11_n656(x)
+ end
+end
+
+def fun_l10_n588(x)
+ if (x < 1)
+ fun_l11_n177(x)
+ else
+ fun_l11_n38(x)
+ end
+end
+
+def fun_l10_n589(x)
+ if (x < 1)
+ fun_l11_n555(x)
+ else
+ fun_l11_n557(x)
+ end
+end
+
+def fun_l10_n590(x)
+ if (x < 1)
+ fun_l11_n999(x)
+ else
+ fun_l11_n645(x)
+ end
+end
+
+def fun_l10_n591(x)
+ if (x < 1)
+ fun_l11_n714(x)
+ else
+ fun_l11_n872(x)
+ end
+end
+
+def fun_l10_n592(x)
+ if (x < 1)
+ fun_l11_n779(x)
+ else
+ fun_l11_n524(x)
+ end
+end
+
+def fun_l10_n593(x)
+ if (x < 1)
+ fun_l11_n557(x)
+ else
+ fun_l11_n906(x)
+ end
+end
+
+def fun_l10_n594(x)
+ if (x < 1)
+ fun_l11_n379(x)
+ else
+ fun_l11_n550(x)
+ end
+end
+
+def fun_l10_n595(x)
+ if (x < 1)
+ fun_l11_n138(x)
+ else
+ fun_l11_n785(x)
+ end
+end
+
+def fun_l10_n596(x)
+ if (x < 1)
+ fun_l11_n486(x)
+ else
+ fun_l11_n381(x)
+ end
+end
+
+def fun_l10_n597(x)
+ if (x < 1)
+ fun_l11_n374(x)
+ else
+ fun_l11_n477(x)
+ end
+end
+
+def fun_l10_n598(x)
+ if (x < 1)
+ fun_l11_n113(x)
+ else
+ fun_l11_n455(x)
+ end
+end
+
+def fun_l10_n599(x)
+ if (x < 1)
+ fun_l11_n31(x)
+ else
+ fun_l11_n983(x)
+ end
+end
+
+def fun_l10_n600(x)
+ if (x < 1)
+ fun_l11_n758(x)
+ else
+ fun_l11_n187(x)
+ end
+end
+
+def fun_l10_n601(x)
+ if (x < 1)
+ fun_l11_n198(x)
+ else
+ fun_l11_n436(x)
+ end
+end
+
+def fun_l10_n602(x)
+ if (x < 1)
+ fun_l11_n207(x)
+ else
+ fun_l11_n86(x)
+ end
+end
+
+def fun_l10_n603(x)
+ if (x < 1)
+ fun_l11_n827(x)
+ else
+ fun_l11_n267(x)
+ end
+end
+
+def fun_l10_n604(x)
+ if (x < 1)
+ fun_l11_n760(x)
+ else
+ fun_l11_n707(x)
+ end
+end
+
+def fun_l10_n605(x)
+ if (x < 1)
+ fun_l11_n791(x)
+ else
+ fun_l11_n249(x)
+ end
+end
+
+def fun_l10_n606(x)
+ if (x < 1)
+ fun_l11_n634(x)
+ else
+ fun_l11_n607(x)
+ end
+end
+
+def fun_l10_n607(x)
+ if (x < 1)
+ fun_l11_n605(x)
+ else
+ fun_l11_n698(x)
+ end
+end
+
+def fun_l10_n608(x)
+ if (x < 1)
+ fun_l11_n401(x)
+ else
+ fun_l11_n316(x)
+ end
+end
+
+def fun_l10_n609(x)
+ if (x < 1)
+ fun_l11_n928(x)
+ else
+ fun_l11_n313(x)
+ end
+end
+
+def fun_l10_n610(x)
+ if (x < 1)
+ fun_l11_n601(x)
+ else
+ fun_l11_n278(x)
+ end
+end
+
+def fun_l10_n611(x)
+ if (x < 1)
+ fun_l11_n554(x)
+ else
+ fun_l11_n584(x)
+ end
+end
+
+def fun_l10_n612(x)
+ if (x < 1)
+ fun_l11_n906(x)
+ else
+ fun_l11_n58(x)
+ end
+end
+
+def fun_l10_n613(x)
+ if (x < 1)
+ fun_l11_n85(x)
+ else
+ fun_l11_n631(x)
+ end
+end
+
+def fun_l10_n614(x)
+ if (x < 1)
+ fun_l11_n221(x)
+ else
+ fun_l11_n85(x)
+ end
+end
+
+def fun_l10_n615(x)
+ if (x < 1)
+ fun_l11_n88(x)
+ else
+ fun_l11_n367(x)
+ end
+end
+
+def fun_l10_n616(x)
+ if (x < 1)
+ fun_l11_n693(x)
+ else
+ fun_l11_n385(x)
+ end
+end
+
+def fun_l10_n617(x)
+ if (x < 1)
+ fun_l11_n18(x)
+ else
+ fun_l11_n526(x)
+ end
+end
+
+def fun_l10_n618(x)
+ if (x < 1)
+ fun_l11_n307(x)
+ else
+ fun_l11_n303(x)
+ end
+end
+
+def fun_l10_n619(x)
+ if (x < 1)
+ fun_l11_n972(x)
+ else
+ fun_l11_n246(x)
+ end
+end
+
+def fun_l10_n620(x)
+ if (x < 1)
+ fun_l11_n185(x)
+ else
+ fun_l11_n512(x)
+ end
+end
+
+def fun_l10_n621(x)
+ if (x < 1)
+ fun_l11_n87(x)
+ else
+ fun_l11_n131(x)
+ end
+end
+
+def fun_l10_n622(x)
+ if (x < 1)
+ fun_l11_n617(x)
+ else
+ fun_l11_n869(x)
+ end
+end
+
+def fun_l10_n623(x)
+ if (x < 1)
+ fun_l11_n514(x)
+ else
+ fun_l11_n824(x)
+ end
+end
+
+def fun_l10_n624(x)
+ if (x < 1)
+ fun_l11_n551(x)
+ else
+ fun_l11_n430(x)
+ end
+end
+
+def fun_l10_n625(x)
+ if (x < 1)
+ fun_l11_n266(x)
+ else
+ fun_l11_n826(x)
+ end
+end
+
+def fun_l10_n626(x)
+ if (x < 1)
+ fun_l11_n90(x)
+ else
+ fun_l11_n866(x)
+ end
+end
+
+def fun_l10_n627(x)
+ if (x < 1)
+ fun_l11_n34(x)
+ else
+ fun_l11_n694(x)
+ end
+end
+
+def fun_l10_n628(x)
+ if (x < 1)
+ fun_l11_n455(x)
+ else
+ fun_l11_n990(x)
+ end
+end
+
+def fun_l10_n629(x)
+ if (x < 1)
+ fun_l11_n573(x)
+ else
+ fun_l11_n874(x)
+ end
+end
+
+def fun_l10_n630(x)
+ if (x < 1)
+ fun_l11_n836(x)
+ else
+ fun_l11_n87(x)
+ end
+end
+
+def fun_l10_n631(x)
+ if (x < 1)
+ fun_l11_n234(x)
+ else
+ fun_l11_n389(x)
+ end
+end
+
+def fun_l10_n632(x)
+ if (x < 1)
+ fun_l11_n647(x)
+ else
+ fun_l11_n655(x)
+ end
+end
+
+def fun_l10_n633(x)
+ if (x < 1)
+ fun_l11_n649(x)
+ else
+ fun_l11_n426(x)
+ end
+end
+
+def fun_l10_n634(x)
+ if (x < 1)
+ fun_l11_n613(x)
+ else
+ fun_l11_n10(x)
+ end
+end
+
+def fun_l10_n635(x)
+ if (x < 1)
+ fun_l11_n315(x)
+ else
+ fun_l11_n419(x)
+ end
+end
+
+def fun_l10_n636(x)
+ if (x < 1)
+ fun_l11_n575(x)
+ else
+ fun_l11_n29(x)
+ end
+end
+
+def fun_l10_n637(x)
+ if (x < 1)
+ fun_l11_n75(x)
+ else
+ fun_l11_n710(x)
+ end
+end
+
+def fun_l10_n638(x)
+ if (x < 1)
+ fun_l11_n42(x)
+ else
+ fun_l11_n45(x)
+ end
+end
+
+def fun_l10_n639(x)
+ if (x < 1)
+ fun_l11_n629(x)
+ else
+ fun_l11_n850(x)
+ end
+end
+
+def fun_l10_n640(x)
+ if (x < 1)
+ fun_l11_n491(x)
+ else
+ fun_l11_n708(x)
+ end
+end
+
+def fun_l10_n641(x)
+ if (x < 1)
+ fun_l11_n673(x)
+ else
+ fun_l11_n722(x)
+ end
+end
+
+def fun_l10_n642(x)
+ if (x < 1)
+ fun_l11_n922(x)
+ else
+ fun_l11_n783(x)
+ end
+end
+
+def fun_l10_n643(x)
+ if (x < 1)
+ fun_l11_n855(x)
+ else
+ fun_l11_n145(x)
+ end
+end
+
+def fun_l10_n644(x)
+ if (x < 1)
+ fun_l11_n897(x)
+ else
+ fun_l11_n281(x)
+ end
+end
+
+def fun_l10_n645(x)
+ if (x < 1)
+ fun_l11_n148(x)
+ else
+ fun_l11_n786(x)
+ end
+end
+
+def fun_l10_n646(x)
+ if (x < 1)
+ fun_l11_n743(x)
+ else
+ fun_l11_n627(x)
+ end
+end
+
+def fun_l10_n647(x)
+ if (x < 1)
+ fun_l11_n343(x)
+ else
+ fun_l11_n234(x)
+ end
+end
+
+def fun_l10_n648(x)
+ if (x < 1)
+ fun_l11_n995(x)
+ else
+ fun_l11_n199(x)
+ end
+end
+
+def fun_l10_n649(x)
+ if (x < 1)
+ fun_l11_n155(x)
+ else
+ fun_l11_n384(x)
+ end
+end
+
+def fun_l10_n650(x)
+ if (x < 1)
+ fun_l11_n329(x)
+ else
+ fun_l11_n313(x)
+ end
+end
+
+def fun_l10_n651(x)
+ if (x < 1)
+ fun_l11_n254(x)
+ else
+ fun_l11_n763(x)
+ end
+end
+
+def fun_l10_n652(x)
+ if (x < 1)
+ fun_l11_n839(x)
+ else
+ fun_l11_n692(x)
+ end
+end
+
+def fun_l10_n653(x)
+ if (x < 1)
+ fun_l11_n388(x)
+ else
+ fun_l11_n187(x)
+ end
+end
+
+def fun_l10_n654(x)
+ if (x < 1)
+ fun_l11_n919(x)
+ else
+ fun_l11_n198(x)
+ end
+end
+
+def fun_l10_n655(x)
+ if (x < 1)
+ fun_l11_n313(x)
+ else
+ fun_l11_n686(x)
+ end
+end
+
+def fun_l10_n656(x)
+ if (x < 1)
+ fun_l11_n91(x)
+ else
+ fun_l11_n95(x)
+ end
+end
+
+def fun_l10_n657(x)
+ if (x < 1)
+ fun_l11_n801(x)
+ else
+ fun_l11_n700(x)
+ end
+end
+
+def fun_l10_n658(x)
+ if (x < 1)
+ fun_l11_n938(x)
+ else
+ fun_l11_n936(x)
+ end
+end
+
+def fun_l10_n659(x)
+ if (x < 1)
+ fun_l11_n78(x)
+ else
+ fun_l11_n611(x)
+ end
+end
+
+def fun_l10_n660(x)
+ if (x < 1)
+ fun_l11_n261(x)
+ else
+ fun_l11_n396(x)
+ end
+end
+
+def fun_l10_n661(x)
+ if (x < 1)
+ fun_l11_n154(x)
+ else
+ fun_l11_n132(x)
+ end
+end
+
+def fun_l10_n662(x)
+ if (x < 1)
+ fun_l11_n603(x)
+ else
+ fun_l11_n718(x)
+ end
+end
+
+def fun_l10_n663(x)
+ if (x < 1)
+ fun_l11_n512(x)
+ else
+ fun_l11_n158(x)
+ end
+end
+
+def fun_l10_n664(x)
+ if (x < 1)
+ fun_l11_n178(x)
+ else
+ fun_l11_n59(x)
+ end
+end
+
+def fun_l10_n665(x)
+ if (x < 1)
+ fun_l11_n703(x)
+ else
+ fun_l11_n54(x)
+ end
+end
+
+def fun_l10_n666(x)
+ if (x < 1)
+ fun_l11_n623(x)
+ else
+ fun_l11_n793(x)
+ end
+end
+
+def fun_l10_n667(x)
+ if (x < 1)
+ fun_l11_n350(x)
+ else
+ fun_l11_n244(x)
+ end
+end
+
+def fun_l10_n668(x)
+ if (x < 1)
+ fun_l11_n253(x)
+ else
+ fun_l11_n297(x)
+ end
+end
+
+def fun_l10_n669(x)
+ if (x < 1)
+ fun_l11_n137(x)
+ else
+ fun_l11_n76(x)
+ end
+end
+
+def fun_l10_n670(x)
+ if (x < 1)
+ fun_l11_n755(x)
+ else
+ fun_l11_n627(x)
+ end
+end
+
+def fun_l10_n671(x)
+ if (x < 1)
+ fun_l11_n393(x)
+ else
+ fun_l11_n394(x)
+ end
+end
+
+def fun_l10_n672(x)
+ if (x < 1)
+ fun_l11_n858(x)
+ else
+ fun_l11_n823(x)
+ end
+end
+
+def fun_l10_n673(x)
+ if (x < 1)
+ fun_l11_n202(x)
+ else
+ fun_l11_n800(x)
+ end
+end
+
+def fun_l10_n674(x)
+ if (x < 1)
+ fun_l11_n977(x)
+ else
+ fun_l11_n343(x)
+ end
+end
+
+def fun_l10_n675(x)
+ if (x < 1)
+ fun_l11_n352(x)
+ else
+ fun_l11_n690(x)
+ end
+end
+
+def fun_l10_n676(x)
+ if (x < 1)
+ fun_l11_n598(x)
+ else
+ fun_l11_n108(x)
+ end
+end
+
+def fun_l10_n677(x)
+ if (x < 1)
+ fun_l11_n126(x)
+ else
+ fun_l11_n226(x)
+ end
+end
+
+def fun_l10_n678(x)
+ if (x < 1)
+ fun_l11_n937(x)
+ else
+ fun_l11_n846(x)
+ end
+end
+
+def fun_l10_n679(x)
+ if (x < 1)
+ fun_l11_n231(x)
+ else
+ fun_l11_n264(x)
+ end
+end
+
+def fun_l10_n680(x)
+ if (x < 1)
+ fun_l11_n657(x)
+ else
+ fun_l11_n766(x)
+ end
+end
+
+def fun_l10_n681(x)
+ if (x < 1)
+ fun_l11_n444(x)
+ else
+ fun_l11_n405(x)
+ end
+end
+
+def fun_l10_n682(x)
+ if (x < 1)
+ fun_l11_n410(x)
+ else
+ fun_l11_n409(x)
+ end
+end
+
+def fun_l10_n683(x)
+ if (x < 1)
+ fun_l11_n690(x)
+ else
+ fun_l11_n673(x)
+ end
+end
+
+def fun_l10_n684(x)
+ if (x < 1)
+ fun_l11_n737(x)
+ else
+ fun_l11_n61(x)
+ end
+end
+
+def fun_l10_n685(x)
+ if (x < 1)
+ fun_l11_n54(x)
+ else
+ fun_l11_n387(x)
+ end
+end
+
+def fun_l10_n686(x)
+ if (x < 1)
+ fun_l11_n271(x)
+ else
+ fun_l11_n698(x)
+ end
+end
+
+def fun_l10_n687(x)
+ if (x < 1)
+ fun_l11_n988(x)
+ else
+ fun_l11_n816(x)
+ end
+end
+
+def fun_l10_n688(x)
+ if (x < 1)
+ fun_l11_n309(x)
+ else
+ fun_l11_n576(x)
+ end
+end
+
+def fun_l10_n689(x)
+ if (x < 1)
+ fun_l11_n390(x)
+ else
+ fun_l11_n35(x)
+ end
+end
+
+def fun_l10_n690(x)
+ if (x < 1)
+ fun_l11_n93(x)
+ else
+ fun_l11_n712(x)
+ end
+end
+
+def fun_l10_n691(x)
+ if (x < 1)
+ fun_l11_n285(x)
+ else
+ fun_l11_n467(x)
+ end
+end
+
+def fun_l10_n692(x)
+ if (x < 1)
+ fun_l11_n403(x)
+ else
+ fun_l11_n41(x)
+ end
+end
+
+def fun_l10_n693(x)
+ if (x < 1)
+ fun_l11_n228(x)
+ else
+ fun_l11_n689(x)
+ end
+end
+
+def fun_l10_n694(x)
+ if (x < 1)
+ fun_l11_n581(x)
+ else
+ fun_l11_n989(x)
+ end
+end
+
+def fun_l10_n695(x)
+ if (x < 1)
+ fun_l11_n451(x)
+ else
+ fun_l11_n618(x)
+ end
+end
+
+def fun_l10_n696(x)
+ if (x < 1)
+ fun_l11_n640(x)
+ else
+ fun_l11_n13(x)
+ end
+end
+
+def fun_l10_n697(x)
+ if (x < 1)
+ fun_l11_n968(x)
+ else
+ fun_l11_n903(x)
+ end
+end
+
+def fun_l10_n698(x)
+ if (x < 1)
+ fun_l11_n918(x)
+ else
+ fun_l11_n244(x)
+ end
+end
+
+def fun_l10_n699(x)
+ if (x < 1)
+ fun_l11_n17(x)
+ else
+ fun_l11_n17(x)
+ end
+end
+
+def fun_l10_n700(x)
+ if (x < 1)
+ fun_l11_n918(x)
+ else
+ fun_l11_n539(x)
+ end
+end
+
+def fun_l10_n701(x)
+ if (x < 1)
+ fun_l11_n924(x)
+ else
+ fun_l11_n103(x)
+ end
+end
+
+def fun_l10_n702(x)
+ if (x < 1)
+ fun_l11_n908(x)
+ else
+ fun_l11_n328(x)
+ end
+end
+
+def fun_l10_n703(x)
+ if (x < 1)
+ fun_l11_n393(x)
+ else
+ fun_l11_n386(x)
+ end
+end
+
+def fun_l10_n704(x)
+ if (x < 1)
+ fun_l11_n194(x)
+ else
+ fun_l11_n812(x)
+ end
+end
+
+def fun_l10_n705(x)
+ if (x < 1)
+ fun_l11_n382(x)
+ else
+ fun_l11_n276(x)
+ end
+end
+
+def fun_l10_n706(x)
+ if (x < 1)
+ fun_l11_n257(x)
+ else
+ fun_l11_n269(x)
+ end
+end
+
+def fun_l10_n707(x)
+ if (x < 1)
+ fun_l11_n940(x)
+ else
+ fun_l11_n593(x)
+ end
+end
+
+def fun_l10_n708(x)
+ if (x < 1)
+ fun_l11_n710(x)
+ else
+ fun_l11_n992(x)
+ end
+end
+
+def fun_l10_n709(x)
+ if (x < 1)
+ fun_l11_n244(x)
+ else
+ fun_l11_n500(x)
+ end
+end
+
+def fun_l10_n710(x)
+ if (x < 1)
+ fun_l11_n712(x)
+ else
+ fun_l11_n544(x)
+ end
+end
+
+def fun_l10_n711(x)
+ if (x < 1)
+ fun_l11_n701(x)
+ else
+ fun_l11_n104(x)
+ end
+end
+
+def fun_l10_n712(x)
+ if (x < 1)
+ fun_l11_n706(x)
+ else
+ fun_l11_n199(x)
+ end
+end
+
+def fun_l10_n713(x)
+ if (x < 1)
+ fun_l11_n189(x)
+ else
+ fun_l11_n840(x)
+ end
+end
+
+def fun_l10_n714(x)
+ if (x < 1)
+ fun_l11_n122(x)
+ else
+ fun_l11_n318(x)
+ end
+end
+
+def fun_l10_n715(x)
+ if (x < 1)
+ fun_l11_n915(x)
+ else
+ fun_l11_n964(x)
+ end
+end
+
+def fun_l10_n716(x)
+ if (x < 1)
+ fun_l11_n341(x)
+ else
+ fun_l11_n118(x)
+ end
+end
+
+def fun_l10_n717(x)
+ if (x < 1)
+ fun_l11_n846(x)
+ else
+ fun_l11_n235(x)
+ end
+end
+
+def fun_l10_n718(x)
+ if (x < 1)
+ fun_l11_n679(x)
+ else
+ fun_l11_n959(x)
+ end
+end
+
+def fun_l10_n719(x)
+ if (x < 1)
+ fun_l11_n121(x)
+ else
+ fun_l11_n708(x)
+ end
+end
+
+def fun_l10_n720(x)
+ if (x < 1)
+ fun_l11_n651(x)
+ else
+ fun_l11_n480(x)
+ end
+end
+
+def fun_l10_n721(x)
+ if (x < 1)
+ fun_l11_n773(x)
+ else
+ fun_l11_n568(x)
+ end
+end
+
+def fun_l10_n722(x)
+ if (x < 1)
+ fun_l11_n809(x)
+ else
+ fun_l11_n111(x)
+ end
+end
+
+def fun_l10_n723(x)
+ if (x < 1)
+ fun_l11_n586(x)
+ else
+ fun_l11_n553(x)
+ end
+end
+
+def fun_l10_n724(x)
+ if (x < 1)
+ fun_l11_n473(x)
+ else
+ fun_l11_n387(x)
+ end
+end
+
+def fun_l10_n725(x)
+ if (x < 1)
+ fun_l11_n584(x)
+ else
+ fun_l11_n224(x)
+ end
+end
+
+def fun_l10_n726(x)
+ if (x < 1)
+ fun_l11_n110(x)
+ else
+ fun_l11_n827(x)
+ end
+end
+
+def fun_l10_n727(x)
+ if (x < 1)
+ fun_l11_n753(x)
+ else
+ fun_l11_n745(x)
+ end
+end
+
+def fun_l10_n728(x)
+ if (x < 1)
+ fun_l11_n550(x)
+ else
+ fun_l11_n380(x)
+ end
+end
+
+def fun_l10_n729(x)
+ if (x < 1)
+ fun_l11_n844(x)
+ else
+ fun_l11_n401(x)
+ end
+end
+
+def fun_l10_n730(x)
+ if (x < 1)
+ fun_l11_n837(x)
+ else
+ fun_l11_n759(x)
+ end
+end
+
+def fun_l10_n731(x)
+ if (x < 1)
+ fun_l11_n850(x)
+ else
+ fun_l11_n953(x)
+ end
+end
+
+def fun_l10_n732(x)
+ if (x < 1)
+ fun_l11_n872(x)
+ else
+ fun_l11_n864(x)
+ end
+end
+
+def fun_l10_n733(x)
+ if (x < 1)
+ fun_l11_n894(x)
+ else
+ fun_l11_n943(x)
+ end
+end
+
+def fun_l10_n734(x)
+ if (x < 1)
+ fun_l11_n830(x)
+ else
+ fun_l11_n858(x)
+ end
+end
+
+def fun_l10_n735(x)
+ if (x < 1)
+ fun_l11_n661(x)
+ else
+ fun_l11_n366(x)
+ end
+end
+
+def fun_l10_n736(x)
+ if (x < 1)
+ fun_l11_n736(x)
+ else
+ fun_l11_n636(x)
+ end
+end
+
+def fun_l10_n737(x)
+ if (x < 1)
+ fun_l11_n316(x)
+ else
+ fun_l11_n977(x)
+ end
+end
+
+def fun_l10_n738(x)
+ if (x < 1)
+ fun_l11_n54(x)
+ else
+ fun_l11_n386(x)
+ end
+end
+
+def fun_l10_n739(x)
+ if (x < 1)
+ fun_l11_n985(x)
+ else
+ fun_l11_n274(x)
+ end
+end
+
+def fun_l10_n740(x)
+ if (x < 1)
+ fun_l11_n756(x)
+ else
+ fun_l11_n171(x)
+ end
+end
+
+def fun_l10_n741(x)
+ if (x < 1)
+ fun_l11_n457(x)
+ else
+ fun_l11_n251(x)
+ end
+end
+
+def fun_l10_n742(x)
+ if (x < 1)
+ fun_l11_n751(x)
+ else
+ fun_l11_n29(x)
+ end
+end
+
+def fun_l10_n743(x)
+ if (x < 1)
+ fun_l11_n6(x)
+ else
+ fun_l11_n892(x)
+ end
+end
+
+def fun_l10_n744(x)
+ if (x < 1)
+ fun_l11_n85(x)
+ else
+ fun_l11_n205(x)
+ end
+end
+
+def fun_l10_n745(x)
+ if (x < 1)
+ fun_l11_n887(x)
+ else
+ fun_l11_n734(x)
+ end
+end
+
+def fun_l10_n746(x)
+ if (x < 1)
+ fun_l11_n478(x)
+ else
+ fun_l11_n505(x)
+ end
+end
+
+def fun_l10_n747(x)
+ if (x < 1)
+ fun_l11_n760(x)
+ else
+ fun_l11_n286(x)
+ end
+end
+
+def fun_l10_n748(x)
+ if (x < 1)
+ fun_l11_n391(x)
+ else
+ fun_l11_n69(x)
+ end
+end
+
+def fun_l10_n749(x)
+ if (x < 1)
+ fun_l11_n780(x)
+ else
+ fun_l11_n28(x)
+ end
+end
+
+def fun_l10_n750(x)
+ if (x < 1)
+ fun_l11_n337(x)
+ else
+ fun_l11_n647(x)
+ end
+end
+
+def fun_l10_n751(x)
+ if (x < 1)
+ fun_l11_n92(x)
+ else
+ fun_l11_n121(x)
+ end
+end
+
+def fun_l10_n752(x)
+ if (x < 1)
+ fun_l11_n358(x)
+ else
+ fun_l11_n177(x)
+ end
+end
+
+def fun_l10_n753(x)
+ if (x < 1)
+ fun_l11_n642(x)
+ else
+ fun_l11_n426(x)
+ end
+end
+
+def fun_l10_n754(x)
+ if (x < 1)
+ fun_l11_n66(x)
+ else
+ fun_l11_n948(x)
+ end
+end
+
+def fun_l10_n755(x)
+ if (x < 1)
+ fun_l11_n45(x)
+ else
+ fun_l11_n795(x)
+ end
+end
+
+def fun_l10_n756(x)
+ if (x < 1)
+ fun_l11_n734(x)
+ else
+ fun_l11_n162(x)
+ end
+end
+
+def fun_l10_n757(x)
+ if (x < 1)
+ fun_l11_n689(x)
+ else
+ fun_l11_n31(x)
+ end
+end
+
+def fun_l10_n758(x)
+ if (x < 1)
+ fun_l11_n33(x)
+ else
+ fun_l11_n992(x)
+ end
+end
+
+def fun_l10_n759(x)
+ if (x < 1)
+ fun_l11_n210(x)
+ else
+ fun_l11_n330(x)
+ end
+end
+
+def fun_l10_n760(x)
+ if (x < 1)
+ fun_l11_n727(x)
+ else
+ fun_l11_n265(x)
+ end
+end
+
+def fun_l10_n761(x)
+ if (x < 1)
+ fun_l11_n710(x)
+ else
+ fun_l11_n706(x)
+ end
+end
+
+def fun_l10_n762(x)
+ if (x < 1)
+ fun_l11_n791(x)
+ else
+ fun_l11_n325(x)
+ end
+end
+
+def fun_l10_n763(x)
+ if (x < 1)
+ fun_l11_n350(x)
+ else
+ fun_l11_n443(x)
+ end
+end
+
+def fun_l10_n764(x)
+ if (x < 1)
+ fun_l11_n522(x)
+ else
+ fun_l11_n662(x)
+ end
+end
+
+def fun_l10_n765(x)
+ if (x < 1)
+ fun_l11_n786(x)
+ else
+ fun_l11_n969(x)
+ end
+end
+
+def fun_l10_n766(x)
+ if (x < 1)
+ fun_l11_n486(x)
+ else
+ fun_l11_n834(x)
+ end
+end
+
+def fun_l10_n767(x)
+ if (x < 1)
+ fun_l11_n84(x)
+ else
+ fun_l11_n989(x)
+ end
+end
+
+def fun_l10_n768(x)
+ if (x < 1)
+ fun_l11_n511(x)
+ else
+ fun_l11_n0(x)
+ end
+end
+
+def fun_l10_n769(x)
+ if (x < 1)
+ fun_l11_n459(x)
+ else
+ fun_l11_n243(x)
+ end
+end
+
+def fun_l10_n770(x)
+ if (x < 1)
+ fun_l11_n59(x)
+ else
+ fun_l11_n267(x)
+ end
+end
+
+def fun_l10_n771(x)
+ if (x < 1)
+ fun_l11_n435(x)
+ else
+ fun_l11_n132(x)
+ end
+end
+
+def fun_l10_n772(x)
+ if (x < 1)
+ fun_l11_n986(x)
+ else
+ fun_l11_n227(x)
+ end
+end
+
+def fun_l10_n773(x)
+ if (x < 1)
+ fun_l11_n931(x)
+ else
+ fun_l11_n215(x)
+ end
+end
+
+def fun_l10_n774(x)
+ if (x < 1)
+ fun_l11_n586(x)
+ else
+ fun_l11_n170(x)
+ end
+end
+
+def fun_l10_n775(x)
+ if (x < 1)
+ fun_l11_n334(x)
+ else
+ fun_l11_n780(x)
+ end
+end
+
+def fun_l10_n776(x)
+ if (x < 1)
+ fun_l11_n81(x)
+ else
+ fun_l11_n583(x)
+ end
+end
+
+def fun_l10_n777(x)
+ if (x < 1)
+ fun_l11_n118(x)
+ else
+ fun_l11_n451(x)
+ end
+end
+
+def fun_l10_n778(x)
+ if (x < 1)
+ fun_l11_n767(x)
+ else
+ fun_l11_n824(x)
+ end
+end
+
+def fun_l10_n779(x)
+ if (x < 1)
+ fun_l11_n270(x)
+ else
+ fun_l11_n725(x)
+ end
+end
+
+def fun_l10_n780(x)
+ if (x < 1)
+ fun_l11_n146(x)
+ else
+ fun_l11_n407(x)
+ end
+end
+
+def fun_l10_n781(x)
+ if (x < 1)
+ fun_l11_n103(x)
+ else
+ fun_l11_n523(x)
+ end
+end
+
+def fun_l10_n782(x)
+ if (x < 1)
+ fun_l11_n499(x)
+ else
+ fun_l11_n710(x)
+ end
+end
+
+def fun_l10_n783(x)
+ if (x < 1)
+ fun_l11_n971(x)
+ else
+ fun_l11_n426(x)
+ end
+end
+
+def fun_l10_n784(x)
+ if (x < 1)
+ fun_l11_n457(x)
+ else
+ fun_l11_n207(x)
+ end
+end
+
+def fun_l10_n785(x)
+ if (x < 1)
+ fun_l11_n307(x)
+ else
+ fun_l11_n19(x)
+ end
+end
+
+def fun_l10_n786(x)
+ if (x < 1)
+ fun_l11_n458(x)
+ else
+ fun_l11_n243(x)
+ end
+end
+
+def fun_l10_n787(x)
+ if (x < 1)
+ fun_l11_n488(x)
+ else
+ fun_l11_n694(x)
+ end
+end
+
+def fun_l10_n788(x)
+ if (x < 1)
+ fun_l11_n422(x)
+ else
+ fun_l11_n201(x)
+ end
+end
+
+def fun_l10_n789(x)
+ if (x < 1)
+ fun_l11_n45(x)
+ else
+ fun_l11_n993(x)
+ end
+end
+
+def fun_l10_n790(x)
+ if (x < 1)
+ fun_l11_n443(x)
+ else
+ fun_l11_n124(x)
+ end
+end
+
+def fun_l10_n791(x)
+ if (x < 1)
+ fun_l11_n875(x)
+ else
+ fun_l11_n605(x)
+ end
+end
+
+def fun_l10_n792(x)
+ if (x < 1)
+ fun_l11_n836(x)
+ else
+ fun_l11_n776(x)
+ end
+end
+
+def fun_l10_n793(x)
+ if (x < 1)
+ fun_l11_n58(x)
+ else
+ fun_l11_n847(x)
+ end
+end
+
+def fun_l10_n794(x)
+ if (x < 1)
+ fun_l11_n334(x)
+ else
+ fun_l11_n981(x)
+ end
+end
+
+def fun_l10_n795(x)
+ if (x < 1)
+ fun_l11_n187(x)
+ else
+ fun_l11_n599(x)
+ end
+end
+
+def fun_l10_n796(x)
+ if (x < 1)
+ fun_l11_n37(x)
+ else
+ fun_l11_n211(x)
+ end
+end
+
+def fun_l10_n797(x)
+ if (x < 1)
+ fun_l11_n770(x)
+ else
+ fun_l11_n688(x)
+ end
+end
+
+def fun_l10_n798(x)
+ if (x < 1)
+ fun_l11_n728(x)
+ else
+ fun_l11_n150(x)
+ end
+end
+
+def fun_l10_n799(x)
+ if (x < 1)
+ fun_l11_n175(x)
+ else
+ fun_l11_n762(x)
+ end
+end
+
+def fun_l10_n800(x)
+ if (x < 1)
+ fun_l11_n977(x)
+ else
+ fun_l11_n86(x)
+ end
+end
+
+def fun_l10_n801(x)
+ if (x < 1)
+ fun_l11_n669(x)
+ else
+ fun_l11_n120(x)
+ end
+end
+
+def fun_l10_n802(x)
+ if (x < 1)
+ fun_l11_n630(x)
+ else
+ fun_l11_n839(x)
+ end
+end
+
+def fun_l10_n803(x)
+ if (x < 1)
+ fun_l11_n505(x)
+ else
+ fun_l11_n461(x)
+ end
+end
+
+def fun_l10_n804(x)
+ if (x < 1)
+ fun_l11_n757(x)
+ else
+ fun_l11_n620(x)
+ end
+end
+
+def fun_l10_n805(x)
+ if (x < 1)
+ fun_l11_n379(x)
+ else
+ fun_l11_n468(x)
+ end
+end
+
+def fun_l10_n806(x)
+ if (x < 1)
+ fun_l11_n708(x)
+ else
+ fun_l11_n73(x)
+ end
+end
+
+def fun_l10_n807(x)
+ if (x < 1)
+ fun_l11_n340(x)
+ else
+ fun_l11_n671(x)
+ end
+end
+
+def fun_l10_n808(x)
+ if (x < 1)
+ fun_l11_n628(x)
+ else
+ fun_l11_n912(x)
+ end
+end
+
+def fun_l10_n809(x)
+ if (x < 1)
+ fun_l11_n455(x)
+ else
+ fun_l11_n20(x)
+ end
+end
+
+def fun_l10_n810(x)
+ if (x < 1)
+ fun_l11_n211(x)
+ else
+ fun_l11_n724(x)
+ end
+end
+
+def fun_l10_n811(x)
+ if (x < 1)
+ fun_l11_n982(x)
+ else
+ fun_l11_n876(x)
+ end
+end
+
+def fun_l10_n812(x)
+ if (x < 1)
+ fun_l11_n455(x)
+ else
+ fun_l11_n846(x)
+ end
+end
+
+def fun_l10_n813(x)
+ if (x < 1)
+ fun_l11_n317(x)
+ else
+ fun_l11_n909(x)
+ end
+end
+
+def fun_l10_n814(x)
+ if (x < 1)
+ fun_l11_n622(x)
+ else
+ fun_l11_n147(x)
+ end
+end
+
+def fun_l10_n815(x)
+ if (x < 1)
+ fun_l11_n174(x)
+ else
+ fun_l11_n660(x)
+ end
+end
+
+def fun_l10_n816(x)
+ if (x < 1)
+ fun_l11_n63(x)
+ else
+ fun_l11_n427(x)
+ end
+end
+
+def fun_l10_n817(x)
+ if (x < 1)
+ fun_l11_n944(x)
+ else
+ fun_l11_n40(x)
+ end
+end
+
+def fun_l10_n818(x)
+ if (x < 1)
+ fun_l11_n887(x)
+ else
+ fun_l11_n641(x)
+ end
+end
+
+def fun_l10_n819(x)
+ if (x < 1)
+ fun_l11_n237(x)
+ else
+ fun_l11_n568(x)
+ end
+end
+
+def fun_l10_n820(x)
+ if (x < 1)
+ fun_l11_n415(x)
+ else
+ fun_l11_n374(x)
+ end
+end
+
+def fun_l10_n821(x)
+ if (x < 1)
+ fun_l11_n183(x)
+ else
+ fun_l11_n38(x)
+ end
+end
+
+def fun_l10_n822(x)
+ if (x < 1)
+ fun_l11_n835(x)
+ else
+ fun_l11_n406(x)
+ end
+end
+
+def fun_l10_n823(x)
+ if (x < 1)
+ fun_l11_n233(x)
+ else
+ fun_l11_n118(x)
+ end
+end
+
+def fun_l10_n824(x)
+ if (x < 1)
+ fun_l11_n898(x)
+ else
+ fun_l11_n543(x)
+ end
+end
+
+def fun_l10_n825(x)
+ if (x < 1)
+ fun_l11_n971(x)
+ else
+ fun_l11_n750(x)
+ end
+end
+
+def fun_l10_n826(x)
+ if (x < 1)
+ fun_l11_n254(x)
+ else
+ fun_l11_n759(x)
+ end
+end
+
+def fun_l10_n827(x)
+ if (x < 1)
+ fun_l11_n953(x)
+ else
+ fun_l11_n187(x)
+ end
+end
+
+def fun_l10_n828(x)
+ if (x < 1)
+ fun_l11_n403(x)
+ else
+ fun_l11_n170(x)
+ end
+end
+
+def fun_l10_n829(x)
+ if (x < 1)
+ fun_l11_n251(x)
+ else
+ fun_l11_n506(x)
+ end
+end
+
+def fun_l10_n830(x)
+ if (x < 1)
+ fun_l11_n426(x)
+ else
+ fun_l11_n325(x)
+ end
+end
+
+def fun_l10_n831(x)
+ if (x < 1)
+ fun_l11_n958(x)
+ else
+ fun_l11_n4(x)
+ end
+end
+
+def fun_l10_n832(x)
+ if (x < 1)
+ fun_l11_n628(x)
+ else
+ fun_l11_n920(x)
+ end
+end
+
+def fun_l10_n833(x)
+ if (x < 1)
+ fun_l11_n241(x)
+ else
+ fun_l11_n459(x)
+ end
+end
+
+def fun_l10_n834(x)
+ if (x < 1)
+ fun_l11_n559(x)
+ else
+ fun_l11_n671(x)
+ end
+end
+
+def fun_l10_n835(x)
+ if (x < 1)
+ fun_l11_n980(x)
+ else
+ fun_l11_n295(x)
+ end
+end
+
+def fun_l10_n836(x)
+ if (x < 1)
+ fun_l11_n306(x)
+ else
+ fun_l11_n143(x)
+ end
+end
+
+def fun_l10_n837(x)
+ if (x < 1)
+ fun_l11_n176(x)
+ else
+ fun_l11_n341(x)
+ end
+end
+
+def fun_l10_n838(x)
+ if (x < 1)
+ fun_l11_n130(x)
+ else
+ fun_l11_n380(x)
+ end
+end
+
+def fun_l10_n839(x)
+ if (x < 1)
+ fun_l11_n630(x)
+ else
+ fun_l11_n216(x)
+ end
+end
+
+def fun_l10_n840(x)
+ if (x < 1)
+ fun_l11_n500(x)
+ else
+ fun_l11_n579(x)
+ end
+end
+
+def fun_l10_n841(x)
+ if (x < 1)
+ fun_l11_n312(x)
+ else
+ fun_l11_n656(x)
+ end
+end
+
+def fun_l10_n842(x)
+ if (x < 1)
+ fun_l11_n209(x)
+ else
+ fun_l11_n332(x)
+ end
+end
+
+def fun_l10_n843(x)
+ if (x < 1)
+ fun_l11_n555(x)
+ else
+ fun_l11_n179(x)
+ end
+end
+
+def fun_l10_n844(x)
+ if (x < 1)
+ fun_l11_n154(x)
+ else
+ fun_l11_n284(x)
+ end
+end
+
+def fun_l10_n845(x)
+ if (x < 1)
+ fun_l11_n912(x)
+ else
+ fun_l11_n543(x)
+ end
+end
+
+def fun_l10_n846(x)
+ if (x < 1)
+ fun_l11_n346(x)
+ else
+ fun_l11_n553(x)
+ end
+end
+
+def fun_l10_n847(x)
+ if (x < 1)
+ fun_l11_n117(x)
+ else
+ fun_l11_n738(x)
+ end
+end
+
+def fun_l10_n848(x)
+ if (x < 1)
+ fun_l11_n664(x)
+ else
+ fun_l11_n988(x)
+ end
+end
+
+def fun_l10_n849(x)
+ if (x < 1)
+ fun_l11_n298(x)
+ else
+ fun_l11_n753(x)
+ end
+end
+
+def fun_l10_n850(x)
+ if (x < 1)
+ fun_l11_n177(x)
+ else
+ fun_l11_n690(x)
+ end
+end
+
+def fun_l10_n851(x)
+ if (x < 1)
+ fun_l11_n259(x)
+ else
+ fun_l11_n713(x)
+ end
+end
+
+def fun_l10_n852(x)
+ if (x < 1)
+ fun_l11_n151(x)
+ else
+ fun_l11_n580(x)
+ end
+end
+
+def fun_l10_n853(x)
+ if (x < 1)
+ fun_l11_n274(x)
+ else
+ fun_l11_n703(x)
+ end
+end
+
+def fun_l10_n854(x)
+ if (x < 1)
+ fun_l11_n295(x)
+ else
+ fun_l11_n885(x)
+ end
+end
+
+def fun_l10_n855(x)
+ if (x < 1)
+ fun_l11_n513(x)
+ else
+ fun_l11_n963(x)
+ end
+end
+
+def fun_l10_n856(x)
+ if (x < 1)
+ fun_l11_n855(x)
+ else
+ fun_l11_n435(x)
+ end
+end
+
+def fun_l10_n857(x)
+ if (x < 1)
+ fun_l11_n573(x)
+ else
+ fun_l11_n489(x)
+ end
+end
+
+def fun_l10_n858(x)
+ if (x < 1)
+ fun_l11_n880(x)
+ else
+ fun_l11_n91(x)
+ end
+end
+
+def fun_l10_n859(x)
+ if (x < 1)
+ fun_l11_n993(x)
+ else
+ fun_l11_n358(x)
+ end
+end
+
+def fun_l10_n860(x)
+ if (x < 1)
+ fun_l11_n836(x)
+ else
+ fun_l11_n192(x)
+ end
+end
+
+def fun_l10_n861(x)
+ if (x < 1)
+ fun_l11_n493(x)
+ else
+ fun_l11_n564(x)
+ end
+end
+
+def fun_l10_n862(x)
+ if (x < 1)
+ fun_l11_n111(x)
+ else
+ fun_l11_n8(x)
+ end
+end
+
+def fun_l10_n863(x)
+ if (x < 1)
+ fun_l11_n596(x)
+ else
+ fun_l11_n129(x)
+ end
+end
+
+def fun_l10_n864(x)
+ if (x < 1)
+ fun_l11_n0(x)
+ else
+ fun_l11_n687(x)
+ end
+end
+
+def fun_l10_n865(x)
+ if (x < 1)
+ fun_l11_n543(x)
+ else
+ fun_l11_n866(x)
+ end
+end
+
+def fun_l10_n866(x)
+ if (x < 1)
+ fun_l11_n398(x)
+ else
+ fun_l11_n532(x)
+ end
+end
+
+def fun_l10_n867(x)
+ if (x < 1)
+ fun_l11_n15(x)
+ else
+ fun_l11_n991(x)
+ end
+end
+
+def fun_l10_n868(x)
+ if (x < 1)
+ fun_l11_n890(x)
+ else
+ fun_l11_n222(x)
+ end
+end
+
+def fun_l10_n869(x)
+ if (x < 1)
+ fun_l11_n206(x)
+ else
+ fun_l11_n983(x)
+ end
+end
+
+def fun_l10_n870(x)
+ if (x < 1)
+ fun_l11_n869(x)
+ else
+ fun_l11_n619(x)
+ end
+end
+
+def fun_l10_n871(x)
+ if (x < 1)
+ fun_l11_n417(x)
+ else
+ fun_l11_n18(x)
+ end
+end
+
+def fun_l10_n872(x)
+ if (x < 1)
+ fun_l11_n738(x)
+ else
+ fun_l11_n473(x)
+ end
+end
+
+def fun_l10_n873(x)
+ if (x < 1)
+ fun_l11_n773(x)
+ else
+ fun_l11_n128(x)
+ end
+end
+
+def fun_l10_n874(x)
+ if (x < 1)
+ fun_l11_n930(x)
+ else
+ fun_l11_n563(x)
+ end
+end
+
+def fun_l10_n875(x)
+ if (x < 1)
+ fun_l11_n868(x)
+ else
+ fun_l11_n356(x)
+ end
+end
+
+def fun_l10_n876(x)
+ if (x < 1)
+ fun_l11_n533(x)
+ else
+ fun_l11_n402(x)
+ end
+end
+
+def fun_l10_n877(x)
+ if (x < 1)
+ fun_l11_n959(x)
+ else
+ fun_l11_n921(x)
+ end
+end
+
+def fun_l10_n878(x)
+ if (x < 1)
+ fun_l11_n717(x)
+ else
+ fun_l11_n484(x)
+ end
+end
+
+def fun_l10_n879(x)
+ if (x < 1)
+ fun_l11_n583(x)
+ else
+ fun_l11_n801(x)
+ end
+end
+
+def fun_l10_n880(x)
+ if (x < 1)
+ fun_l11_n576(x)
+ else
+ fun_l11_n811(x)
+ end
+end
+
+def fun_l10_n881(x)
+ if (x < 1)
+ fun_l11_n382(x)
+ else
+ fun_l11_n585(x)
+ end
+end
+
+def fun_l10_n882(x)
+ if (x < 1)
+ fun_l11_n648(x)
+ else
+ fun_l11_n930(x)
+ end
+end
+
+def fun_l10_n883(x)
+ if (x < 1)
+ fun_l11_n246(x)
+ else
+ fun_l11_n636(x)
+ end
+end
+
+def fun_l10_n884(x)
+ if (x < 1)
+ fun_l11_n112(x)
+ else
+ fun_l11_n798(x)
+ end
+end
+
+def fun_l10_n885(x)
+ if (x < 1)
+ fun_l11_n517(x)
+ else
+ fun_l11_n633(x)
+ end
+end
+
+def fun_l10_n886(x)
+ if (x < 1)
+ fun_l11_n651(x)
+ else
+ fun_l11_n377(x)
+ end
+end
+
+def fun_l10_n887(x)
+ if (x < 1)
+ fun_l11_n652(x)
+ else
+ fun_l11_n419(x)
+ end
+end
+
+def fun_l10_n888(x)
+ if (x < 1)
+ fun_l11_n346(x)
+ else
+ fun_l11_n294(x)
+ end
+end
+
+def fun_l10_n889(x)
+ if (x < 1)
+ fun_l11_n809(x)
+ else
+ fun_l11_n844(x)
+ end
+end
+
+def fun_l10_n890(x)
+ if (x < 1)
+ fun_l11_n382(x)
+ else
+ fun_l11_n930(x)
+ end
+end
+
+def fun_l10_n891(x)
+ if (x < 1)
+ fun_l11_n900(x)
+ else
+ fun_l11_n293(x)
+ end
+end
+
+def fun_l10_n892(x)
+ if (x < 1)
+ fun_l11_n912(x)
+ else
+ fun_l11_n46(x)
+ end
+end
+
+def fun_l10_n893(x)
+ if (x < 1)
+ fun_l11_n429(x)
+ else
+ fun_l11_n174(x)
+ end
+end
+
+def fun_l10_n894(x)
+ if (x < 1)
+ fun_l11_n142(x)
+ else
+ fun_l11_n739(x)
+ end
+end
+
+def fun_l10_n895(x)
+ if (x < 1)
+ fun_l11_n300(x)
+ else
+ fun_l11_n546(x)
+ end
+end
+
+def fun_l10_n896(x)
+ if (x < 1)
+ fun_l11_n253(x)
+ else
+ fun_l11_n555(x)
+ end
+end
+
+def fun_l10_n897(x)
+ if (x < 1)
+ fun_l11_n29(x)
+ else
+ fun_l11_n913(x)
+ end
+end
+
+def fun_l10_n898(x)
+ if (x < 1)
+ fun_l11_n879(x)
+ else
+ fun_l11_n2(x)
+ end
+end
+
+def fun_l10_n899(x)
+ if (x < 1)
+ fun_l11_n191(x)
+ else
+ fun_l11_n418(x)
+ end
+end
+
+def fun_l10_n900(x)
+ if (x < 1)
+ fun_l11_n239(x)
+ else
+ fun_l11_n904(x)
+ end
+end
+
+def fun_l10_n901(x)
+ if (x < 1)
+ fun_l11_n962(x)
+ else
+ fun_l11_n978(x)
+ end
+end
+
+def fun_l10_n902(x)
+ if (x < 1)
+ fun_l11_n343(x)
+ else
+ fun_l11_n358(x)
+ end
+end
+
+def fun_l10_n903(x)
+ if (x < 1)
+ fun_l11_n334(x)
+ else
+ fun_l11_n806(x)
+ end
+end
+
+def fun_l10_n904(x)
+ if (x < 1)
+ fun_l11_n973(x)
+ else
+ fun_l11_n708(x)
+ end
+end
+
+def fun_l10_n905(x)
+ if (x < 1)
+ fun_l11_n990(x)
+ else
+ fun_l11_n211(x)
+ end
+end
+
+def fun_l10_n906(x)
+ if (x < 1)
+ fun_l11_n499(x)
+ else
+ fun_l11_n605(x)
+ end
+end
+
+def fun_l10_n907(x)
+ if (x < 1)
+ fun_l11_n335(x)
+ else
+ fun_l11_n855(x)
+ end
+end
+
+def fun_l10_n908(x)
+ if (x < 1)
+ fun_l11_n197(x)
+ else
+ fun_l11_n130(x)
+ end
+end
+
+def fun_l10_n909(x)
+ if (x < 1)
+ fun_l11_n462(x)
+ else
+ fun_l11_n838(x)
+ end
+end
+
+def fun_l10_n910(x)
+ if (x < 1)
+ fun_l11_n254(x)
+ else
+ fun_l11_n16(x)
+ end
+end
+
+def fun_l10_n911(x)
+ if (x < 1)
+ fun_l11_n321(x)
+ else
+ fun_l11_n158(x)
+ end
+end
+
+def fun_l10_n912(x)
+ if (x < 1)
+ fun_l11_n113(x)
+ else
+ fun_l11_n50(x)
+ end
+end
+
+def fun_l10_n913(x)
+ if (x < 1)
+ fun_l11_n492(x)
+ else
+ fun_l11_n502(x)
+ end
+end
+
+def fun_l10_n914(x)
+ if (x < 1)
+ fun_l11_n221(x)
+ else
+ fun_l11_n732(x)
+ end
+end
+
+def fun_l10_n915(x)
+ if (x < 1)
+ fun_l11_n853(x)
+ else
+ fun_l11_n625(x)
+ end
+end
+
+def fun_l10_n916(x)
+ if (x < 1)
+ fun_l11_n172(x)
+ else
+ fun_l11_n17(x)
+ end
+end
+
+def fun_l10_n917(x)
+ if (x < 1)
+ fun_l11_n438(x)
+ else
+ fun_l11_n480(x)
+ end
+end
+
+def fun_l10_n918(x)
+ if (x < 1)
+ fun_l11_n646(x)
+ else
+ fun_l11_n833(x)
+ end
+end
+
+def fun_l10_n919(x)
+ if (x < 1)
+ fun_l11_n628(x)
+ else
+ fun_l11_n40(x)
+ end
+end
+
+def fun_l10_n920(x)
+ if (x < 1)
+ fun_l11_n387(x)
+ else
+ fun_l11_n824(x)
+ end
+end
+
+def fun_l10_n921(x)
+ if (x < 1)
+ fun_l11_n641(x)
+ else
+ fun_l11_n669(x)
+ end
+end
+
+def fun_l10_n922(x)
+ if (x < 1)
+ fun_l11_n484(x)
+ else
+ fun_l11_n591(x)
+ end
+end
+
+def fun_l10_n923(x)
+ if (x < 1)
+ fun_l11_n610(x)
+ else
+ fun_l11_n132(x)
+ end
+end
+
+def fun_l10_n924(x)
+ if (x < 1)
+ fun_l11_n90(x)
+ else
+ fun_l11_n727(x)
+ end
+end
+
+def fun_l10_n925(x)
+ if (x < 1)
+ fun_l11_n44(x)
+ else
+ fun_l11_n412(x)
+ end
+end
+
+def fun_l10_n926(x)
+ if (x < 1)
+ fun_l11_n912(x)
+ else
+ fun_l11_n229(x)
+ end
+end
+
+def fun_l10_n927(x)
+ if (x < 1)
+ fun_l11_n647(x)
+ else
+ fun_l11_n13(x)
+ end
+end
+
+def fun_l10_n928(x)
+ if (x < 1)
+ fun_l11_n820(x)
+ else
+ fun_l11_n316(x)
+ end
+end
+
+def fun_l10_n929(x)
+ if (x < 1)
+ fun_l11_n850(x)
+ else
+ fun_l11_n868(x)
+ end
+end
+
+def fun_l10_n930(x)
+ if (x < 1)
+ fun_l11_n373(x)
+ else
+ fun_l11_n183(x)
+ end
+end
+
+def fun_l10_n931(x)
+ if (x < 1)
+ fun_l11_n217(x)
+ else
+ fun_l11_n230(x)
+ end
+end
+
+def fun_l10_n932(x)
+ if (x < 1)
+ fun_l11_n77(x)
+ else
+ fun_l11_n917(x)
+ end
+end
+
+def fun_l10_n933(x)
+ if (x < 1)
+ fun_l11_n264(x)
+ else
+ fun_l11_n992(x)
+ end
+end
+
+def fun_l10_n934(x)
+ if (x < 1)
+ fun_l11_n125(x)
+ else
+ fun_l11_n441(x)
+ end
+end
+
+def fun_l10_n935(x)
+ if (x < 1)
+ fun_l11_n586(x)
+ else
+ fun_l11_n946(x)
+ end
+end
+
+def fun_l10_n936(x)
+ if (x < 1)
+ fun_l11_n186(x)
+ else
+ fun_l11_n595(x)
+ end
+end
+
+def fun_l10_n937(x)
+ if (x < 1)
+ fun_l11_n210(x)
+ else
+ fun_l11_n186(x)
+ end
+end
+
+def fun_l10_n938(x)
+ if (x < 1)
+ fun_l11_n131(x)
+ else
+ fun_l11_n519(x)
+ end
+end
+
+def fun_l10_n939(x)
+ if (x < 1)
+ fun_l11_n863(x)
+ else
+ fun_l11_n486(x)
+ end
+end
+
+def fun_l10_n940(x)
+ if (x < 1)
+ fun_l11_n753(x)
+ else
+ fun_l11_n916(x)
+ end
+end
+
+def fun_l10_n941(x)
+ if (x < 1)
+ fun_l11_n8(x)
+ else
+ fun_l11_n153(x)
+ end
+end
+
+def fun_l10_n942(x)
+ if (x < 1)
+ fun_l11_n798(x)
+ else
+ fun_l11_n873(x)
+ end
+end
+
+def fun_l10_n943(x)
+ if (x < 1)
+ fun_l11_n862(x)
+ else
+ fun_l11_n293(x)
+ end
+end
+
+def fun_l10_n944(x)
+ if (x < 1)
+ fun_l11_n554(x)
+ else
+ fun_l11_n599(x)
+ end
+end
+
+def fun_l10_n945(x)
+ if (x < 1)
+ fun_l11_n134(x)
+ else
+ fun_l11_n535(x)
+ end
+end
+
+def fun_l10_n946(x)
+ if (x < 1)
+ fun_l11_n992(x)
+ else
+ fun_l11_n624(x)
+ end
+end
+
+def fun_l10_n947(x)
+ if (x < 1)
+ fun_l11_n436(x)
+ else
+ fun_l11_n153(x)
+ end
+end
+
+def fun_l10_n948(x)
+ if (x < 1)
+ fun_l11_n72(x)
+ else
+ fun_l11_n154(x)
+ end
+end
+
+def fun_l10_n949(x)
+ if (x < 1)
+ fun_l11_n61(x)
+ else
+ fun_l11_n612(x)
+ end
+end
+
+def fun_l10_n950(x)
+ if (x < 1)
+ fun_l11_n964(x)
+ else
+ fun_l11_n999(x)
+ end
+end
+
+def fun_l10_n951(x)
+ if (x < 1)
+ fun_l11_n689(x)
+ else
+ fun_l11_n165(x)
+ end
+end
+
+def fun_l10_n952(x)
+ if (x < 1)
+ fun_l11_n856(x)
+ else
+ fun_l11_n7(x)
+ end
+end
+
+def fun_l10_n953(x)
+ if (x < 1)
+ fun_l11_n421(x)
+ else
+ fun_l11_n416(x)
+ end
+end
+
+def fun_l10_n954(x)
+ if (x < 1)
+ fun_l11_n969(x)
+ else
+ fun_l11_n260(x)
+ end
+end
+
+def fun_l10_n955(x)
+ if (x < 1)
+ fun_l11_n716(x)
+ else
+ fun_l11_n364(x)
+ end
+end
+
+def fun_l10_n956(x)
+ if (x < 1)
+ fun_l11_n491(x)
+ else
+ fun_l11_n575(x)
+ end
+end
+
+def fun_l10_n957(x)
+ if (x < 1)
+ fun_l11_n522(x)
+ else
+ fun_l11_n430(x)
+ end
+end
+
+def fun_l10_n958(x)
+ if (x < 1)
+ fun_l11_n712(x)
+ else
+ fun_l11_n939(x)
+ end
+end
+
+def fun_l10_n959(x)
+ if (x < 1)
+ fun_l11_n230(x)
+ else
+ fun_l11_n819(x)
+ end
+end
+
+def fun_l10_n960(x)
+ if (x < 1)
+ fun_l11_n790(x)
+ else
+ fun_l11_n939(x)
+ end
+end
+
+def fun_l10_n961(x)
+ if (x < 1)
+ fun_l11_n844(x)
+ else
+ fun_l11_n847(x)
+ end
+end
+
+def fun_l10_n962(x)
+ if (x < 1)
+ fun_l11_n105(x)
+ else
+ fun_l11_n722(x)
+ end
+end
+
+def fun_l10_n963(x)
+ if (x < 1)
+ fun_l11_n166(x)
+ else
+ fun_l11_n990(x)
+ end
+end
+
+def fun_l10_n964(x)
+ if (x < 1)
+ fun_l11_n224(x)
+ else
+ fun_l11_n751(x)
+ end
+end
+
+def fun_l10_n965(x)
+ if (x < 1)
+ fun_l11_n809(x)
+ else
+ fun_l11_n739(x)
+ end
+end
+
+def fun_l10_n966(x)
+ if (x < 1)
+ fun_l11_n840(x)
+ else
+ fun_l11_n795(x)
+ end
+end
+
+def fun_l10_n967(x)
+ if (x < 1)
+ fun_l11_n791(x)
+ else
+ fun_l11_n926(x)
+ end
+end
+
+def fun_l10_n968(x)
+ if (x < 1)
+ fun_l11_n484(x)
+ else
+ fun_l11_n409(x)
+ end
+end
+
+def fun_l10_n969(x)
+ if (x < 1)
+ fun_l11_n22(x)
+ else
+ fun_l11_n284(x)
+ end
+end
+
+def fun_l10_n970(x)
+ if (x < 1)
+ fun_l11_n539(x)
+ else
+ fun_l11_n661(x)
+ end
+end
+
+def fun_l10_n971(x)
+ if (x < 1)
+ fun_l11_n453(x)
+ else
+ fun_l11_n619(x)
+ end
+end
+
+def fun_l10_n972(x)
+ if (x < 1)
+ fun_l11_n80(x)
+ else
+ fun_l11_n848(x)
+ end
+end
+
+def fun_l10_n973(x)
+ if (x < 1)
+ fun_l11_n599(x)
+ else
+ fun_l11_n147(x)
+ end
+end
+
+def fun_l10_n974(x)
+ if (x < 1)
+ fun_l11_n784(x)
+ else
+ fun_l11_n603(x)
+ end
+end
+
+def fun_l10_n975(x)
+ if (x < 1)
+ fun_l11_n166(x)
+ else
+ fun_l11_n12(x)
+ end
+end
+
+def fun_l10_n976(x)
+ if (x < 1)
+ fun_l11_n739(x)
+ else
+ fun_l11_n381(x)
+ end
+end
+
+def fun_l10_n977(x)
+ if (x < 1)
+ fun_l11_n45(x)
+ else
+ fun_l11_n67(x)
+ end
+end
+
+def fun_l10_n978(x)
+ if (x < 1)
+ fun_l11_n243(x)
+ else
+ fun_l11_n51(x)
+ end
+end
+
+def fun_l10_n979(x)
+ if (x < 1)
+ fun_l11_n964(x)
+ else
+ fun_l11_n949(x)
+ end
+end
+
+def fun_l10_n980(x)
+ if (x < 1)
+ fun_l11_n881(x)
+ else
+ fun_l11_n893(x)
+ end
+end
+
+def fun_l10_n981(x)
+ if (x < 1)
+ fun_l11_n808(x)
+ else
+ fun_l11_n205(x)
+ end
+end
+
+def fun_l10_n982(x)
+ if (x < 1)
+ fun_l11_n897(x)
+ else
+ fun_l11_n259(x)
+ end
+end
+
+def fun_l10_n983(x)
+ if (x < 1)
+ fun_l11_n671(x)
+ else
+ fun_l11_n115(x)
+ end
+end
+
+def fun_l10_n984(x)
+ if (x < 1)
+ fun_l11_n9(x)
+ else
+ fun_l11_n994(x)
+ end
+end
+
+def fun_l10_n985(x)
+ if (x < 1)
+ fun_l11_n663(x)
+ else
+ fun_l11_n979(x)
+ end
+end
+
+def fun_l10_n986(x)
+ if (x < 1)
+ fun_l11_n8(x)
+ else
+ fun_l11_n949(x)
+ end
+end
+
+def fun_l10_n987(x)
+ if (x < 1)
+ fun_l11_n230(x)
+ else
+ fun_l11_n822(x)
+ end
+end
+
+def fun_l10_n988(x)
+ if (x < 1)
+ fun_l11_n767(x)
+ else
+ fun_l11_n691(x)
+ end
+end
+
+def fun_l10_n989(x)
+ if (x < 1)
+ fun_l11_n524(x)
+ else
+ fun_l11_n514(x)
+ end
+end
+
+def fun_l10_n990(x)
+ if (x < 1)
+ fun_l11_n556(x)
+ else
+ fun_l11_n34(x)
+ end
+end
+
+def fun_l10_n991(x)
+ if (x < 1)
+ fun_l11_n516(x)
+ else
+ fun_l11_n817(x)
+ end
+end
+
+def fun_l10_n992(x)
+ if (x < 1)
+ fun_l11_n273(x)
+ else
+ fun_l11_n189(x)
+ end
+end
+
+def fun_l10_n993(x)
+ if (x < 1)
+ fun_l11_n726(x)
+ else
+ fun_l11_n260(x)
+ end
+end
+
+def fun_l10_n994(x)
+ if (x < 1)
+ fun_l11_n789(x)
+ else
+ fun_l11_n993(x)
+ end
+end
+
+def fun_l10_n995(x)
+ if (x < 1)
+ fun_l11_n187(x)
+ else
+ fun_l11_n291(x)
+ end
+end
+
+def fun_l10_n996(x)
+ if (x < 1)
+ fun_l11_n145(x)
+ else
+ fun_l11_n500(x)
+ end
+end
+
+def fun_l10_n997(x)
+ if (x < 1)
+ fun_l11_n437(x)
+ else
+ fun_l11_n925(x)
+ end
+end
+
+def fun_l10_n998(x)
+ if (x < 1)
+ fun_l11_n945(x)
+ else
+ fun_l11_n300(x)
+ end
+end
+
+def fun_l10_n999(x)
+ if (x < 1)
+ fun_l11_n703(x)
+ else
+ fun_l11_n542(x)
+ end
+end
+
+def fun_l11_n0(x)
+ if (x < 1)
+ fun_l12_n539(x)
+ else
+ fun_l12_n60(x)
+ end
+end
+
+def fun_l11_n1(x)
+ if (x < 1)
+ fun_l12_n659(x)
+ else
+ fun_l12_n739(x)
+ end
+end
+
+def fun_l11_n2(x)
+ if (x < 1)
+ fun_l12_n273(x)
+ else
+ fun_l12_n392(x)
+ end
+end
+
+def fun_l11_n3(x)
+ if (x < 1)
+ fun_l12_n394(x)
+ else
+ fun_l12_n989(x)
+ end
+end
+
+def fun_l11_n4(x)
+ if (x < 1)
+ fun_l12_n254(x)
+ else
+ fun_l12_n906(x)
+ end
+end
+
+def fun_l11_n5(x)
+ if (x < 1)
+ fun_l12_n229(x)
+ else
+ fun_l12_n345(x)
+ end
+end
+
+def fun_l11_n6(x)
+ if (x < 1)
+ fun_l12_n15(x)
+ else
+ fun_l12_n893(x)
+ end
+end
+
+def fun_l11_n7(x)
+ if (x < 1)
+ fun_l12_n185(x)
+ else
+ fun_l12_n187(x)
+ end
+end
+
+def fun_l11_n8(x)
+ if (x < 1)
+ fun_l12_n761(x)
+ else
+ fun_l12_n850(x)
+ end
+end
+
+def fun_l11_n9(x)
+ if (x < 1)
+ fun_l12_n644(x)
+ else
+ fun_l12_n274(x)
+ end
+end
+
+def fun_l11_n10(x)
+ if (x < 1)
+ fun_l12_n652(x)
+ else
+ fun_l12_n223(x)
+ end
+end
+
+def fun_l11_n11(x)
+ if (x < 1)
+ fun_l12_n905(x)
+ else
+ fun_l12_n215(x)
+ end
+end
+
+def fun_l11_n12(x)
+ if (x < 1)
+ fun_l12_n58(x)
+ else
+ fun_l12_n495(x)
+ end
+end
+
+def fun_l11_n13(x)
+ if (x < 1)
+ fun_l12_n529(x)
+ else
+ fun_l12_n477(x)
+ end
+end
+
+def fun_l11_n14(x)
+ if (x < 1)
+ fun_l12_n743(x)
+ else
+ fun_l12_n847(x)
+ end
+end
+
+def fun_l11_n15(x)
+ if (x < 1)
+ fun_l12_n558(x)
+ else
+ fun_l12_n252(x)
+ end
+end
+
+def fun_l11_n16(x)
+ if (x < 1)
+ fun_l12_n115(x)
+ else
+ fun_l12_n972(x)
+ end
+end
+
+def fun_l11_n17(x)
+ if (x < 1)
+ fun_l12_n374(x)
+ else
+ fun_l12_n637(x)
+ end
+end
+
+def fun_l11_n18(x)
+ if (x < 1)
+ fun_l12_n978(x)
+ else
+ fun_l12_n570(x)
+ end
+end
+
+def fun_l11_n19(x)
+ if (x < 1)
+ fun_l12_n264(x)
+ else
+ fun_l12_n743(x)
+ end
+end
+
+def fun_l11_n20(x)
+ if (x < 1)
+ fun_l12_n936(x)
+ else
+ fun_l12_n986(x)
+ end
+end
+
+def fun_l11_n21(x)
+ if (x < 1)
+ fun_l12_n107(x)
+ else
+ fun_l12_n167(x)
+ end
+end
+
+def fun_l11_n22(x)
+ if (x < 1)
+ fun_l12_n777(x)
+ else
+ fun_l12_n143(x)
+ end
+end
+
+def fun_l11_n23(x)
+ if (x < 1)
+ fun_l12_n510(x)
+ else
+ fun_l12_n236(x)
+ end
+end
+
+def fun_l11_n24(x)
+ if (x < 1)
+ fun_l12_n180(x)
+ else
+ fun_l12_n309(x)
+ end
+end
+
+def fun_l11_n25(x)
+ if (x < 1)
+ fun_l12_n850(x)
+ else
+ fun_l12_n374(x)
+ end
+end
+
+def fun_l11_n26(x)
+ if (x < 1)
+ fun_l12_n573(x)
+ else
+ fun_l12_n722(x)
+ end
+end
+
+def fun_l11_n27(x)
+ if (x < 1)
+ fun_l12_n800(x)
+ else
+ fun_l12_n981(x)
+ end
+end
+
+def fun_l11_n28(x)
+ if (x < 1)
+ fun_l12_n705(x)
+ else
+ fun_l12_n636(x)
+ end
+end
+
+def fun_l11_n29(x)
+ if (x < 1)
+ fun_l12_n618(x)
+ else
+ fun_l12_n429(x)
+ end
+end
+
+def fun_l11_n30(x)
+ if (x < 1)
+ fun_l12_n139(x)
+ else
+ fun_l12_n95(x)
+ end
+end
+
+def fun_l11_n31(x)
+ if (x < 1)
+ fun_l12_n861(x)
+ else
+ fun_l12_n797(x)
+ end
+end
+
+def fun_l11_n32(x)
+ if (x < 1)
+ fun_l12_n576(x)
+ else
+ fun_l12_n719(x)
+ end
+end
+
+def fun_l11_n33(x)
+ if (x < 1)
+ fun_l12_n236(x)
+ else
+ fun_l12_n429(x)
+ end
+end
+
+def fun_l11_n34(x)
+ if (x < 1)
+ fun_l12_n375(x)
+ else
+ fun_l12_n696(x)
+ end
+end
+
+def fun_l11_n35(x)
+ if (x < 1)
+ fun_l12_n291(x)
+ else
+ fun_l12_n799(x)
+ end
+end
+
+def fun_l11_n36(x)
+ if (x < 1)
+ fun_l12_n180(x)
+ else
+ fun_l12_n228(x)
+ end
+end
+
+def fun_l11_n37(x)
+ if (x < 1)
+ fun_l12_n455(x)
+ else
+ fun_l12_n983(x)
+ end
+end
+
+def fun_l11_n38(x)
+ if (x < 1)
+ fun_l12_n486(x)
+ else
+ fun_l12_n871(x)
+ end
+end
+
+def fun_l11_n39(x)
+ if (x < 1)
+ fun_l12_n945(x)
+ else
+ fun_l12_n47(x)
+ end
+end
+
+def fun_l11_n40(x)
+ if (x < 1)
+ fun_l12_n531(x)
+ else
+ fun_l12_n506(x)
+ end
+end
+
+def fun_l11_n41(x)
+ if (x < 1)
+ fun_l12_n248(x)
+ else
+ fun_l12_n142(x)
+ end
+end
+
+def fun_l11_n42(x)
+ if (x < 1)
+ fun_l12_n415(x)
+ else
+ fun_l12_n177(x)
+ end
+end
+
+def fun_l11_n43(x)
+ if (x < 1)
+ fun_l12_n560(x)
+ else
+ fun_l12_n63(x)
+ end
+end
+
+def fun_l11_n44(x)
+ if (x < 1)
+ fun_l12_n180(x)
+ else
+ fun_l12_n268(x)
+ end
+end
+
+def fun_l11_n45(x)
+ if (x < 1)
+ fun_l12_n814(x)
+ else
+ fun_l12_n967(x)
+ end
+end
+
+def fun_l11_n46(x)
+ if (x < 1)
+ fun_l12_n273(x)
+ else
+ fun_l12_n702(x)
+ end
+end
+
+def fun_l11_n47(x)
+ if (x < 1)
+ fun_l12_n626(x)
+ else
+ fun_l12_n615(x)
+ end
+end
+
+def fun_l11_n48(x)
+ if (x < 1)
+ fun_l12_n374(x)
+ else
+ fun_l12_n669(x)
+ end
+end
+
+def fun_l11_n49(x)
+ if (x < 1)
+ fun_l12_n681(x)
+ else
+ fun_l12_n48(x)
+ end
+end
+
+def fun_l11_n50(x)
+ if (x < 1)
+ fun_l12_n3(x)
+ else
+ fun_l12_n455(x)
+ end
+end
+
+def fun_l11_n51(x)
+ if (x < 1)
+ fun_l12_n461(x)
+ else
+ fun_l12_n350(x)
+ end
+end
+
+def fun_l11_n52(x)
+ if (x < 1)
+ fun_l12_n129(x)
+ else
+ fun_l12_n266(x)
+ end
+end
+
+def fun_l11_n53(x)
+ if (x < 1)
+ fun_l12_n802(x)
+ else
+ fun_l12_n863(x)
+ end
+end
+
+def fun_l11_n54(x)
+ if (x < 1)
+ fun_l12_n216(x)
+ else
+ fun_l12_n820(x)
+ end
+end
+
+def fun_l11_n55(x)
+ if (x < 1)
+ fun_l12_n626(x)
+ else
+ fun_l12_n476(x)
+ end
+end
+
+def fun_l11_n56(x)
+ if (x < 1)
+ fun_l12_n162(x)
+ else
+ fun_l12_n796(x)
+ end
+end
+
+def fun_l11_n57(x)
+ if (x < 1)
+ fun_l12_n45(x)
+ else
+ fun_l12_n893(x)
+ end
+end
+
+def fun_l11_n58(x)
+ if (x < 1)
+ fun_l12_n227(x)
+ else
+ fun_l12_n919(x)
+ end
+end
+
+def fun_l11_n59(x)
+ if (x < 1)
+ fun_l12_n300(x)
+ else
+ fun_l12_n360(x)
+ end
+end
+
+def fun_l11_n60(x)
+ if (x < 1)
+ fun_l12_n265(x)
+ else
+ fun_l12_n552(x)
+ end
+end
+
+def fun_l11_n61(x)
+ if (x < 1)
+ fun_l12_n927(x)
+ else
+ fun_l12_n486(x)
+ end
+end
+
+def fun_l11_n62(x)
+ if (x < 1)
+ fun_l12_n94(x)
+ else
+ fun_l12_n28(x)
+ end
+end
+
+def fun_l11_n63(x)
+ if (x < 1)
+ fun_l12_n295(x)
+ else
+ fun_l12_n967(x)
+ end
+end
+
+def fun_l11_n64(x)
+ if (x < 1)
+ fun_l12_n713(x)
+ else
+ fun_l12_n463(x)
+ end
+end
+
+def fun_l11_n65(x)
+ if (x < 1)
+ fun_l12_n454(x)
+ else
+ fun_l12_n616(x)
+ end
+end
+
+def fun_l11_n66(x)
+ if (x < 1)
+ fun_l12_n121(x)
+ else
+ fun_l12_n277(x)
+ end
+end
+
+def fun_l11_n67(x)
+ if (x < 1)
+ fun_l12_n524(x)
+ else
+ fun_l12_n242(x)
+ end
+end
+
+def fun_l11_n68(x)
+ if (x < 1)
+ fun_l12_n570(x)
+ else
+ fun_l12_n658(x)
+ end
+end
+
+def fun_l11_n69(x)
+ if (x < 1)
+ fun_l12_n343(x)
+ else
+ fun_l12_n826(x)
+ end
+end
+
+def fun_l11_n70(x)
+ if (x < 1)
+ fun_l12_n732(x)
+ else
+ fun_l12_n993(x)
+ end
+end
+
+def fun_l11_n71(x)
+ if (x < 1)
+ fun_l12_n380(x)
+ else
+ fun_l12_n486(x)
+ end
+end
+
+def fun_l11_n72(x)
+ if (x < 1)
+ fun_l12_n55(x)
+ else
+ fun_l12_n342(x)
+ end
+end
+
+def fun_l11_n73(x)
+ if (x < 1)
+ fun_l12_n789(x)
+ else
+ fun_l12_n646(x)
+ end
+end
+
+def fun_l11_n74(x)
+ if (x < 1)
+ fun_l12_n106(x)
+ else
+ fun_l12_n557(x)
+ end
+end
+
+def fun_l11_n75(x)
+ if (x < 1)
+ fun_l12_n368(x)
+ else
+ fun_l12_n5(x)
+ end
+end
+
+def fun_l11_n76(x)
+ if (x < 1)
+ fun_l12_n992(x)
+ else
+ fun_l12_n521(x)
+ end
+end
+
+def fun_l11_n77(x)
+ if (x < 1)
+ fun_l12_n754(x)
+ else
+ fun_l12_n259(x)
+ end
+end
+
+def fun_l11_n78(x)
+ if (x < 1)
+ fun_l12_n855(x)
+ else
+ fun_l12_n946(x)
+ end
+end
+
+def fun_l11_n79(x)
+ if (x < 1)
+ fun_l12_n727(x)
+ else
+ fun_l12_n422(x)
+ end
+end
+
+def fun_l11_n80(x)
+ if (x < 1)
+ fun_l12_n649(x)
+ else
+ fun_l12_n593(x)
+ end
+end
+
+def fun_l11_n81(x)
+ if (x < 1)
+ fun_l12_n206(x)
+ else
+ fun_l12_n222(x)
+ end
+end
+
+def fun_l11_n82(x)
+ if (x < 1)
+ fun_l12_n995(x)
+ else
+ fun_l12_n471(x)
+ end
+end
+
+def fun_l11_n83(x)
+ if (x < 1)
+ fun_l12_n926(x)
+ else
+ fun_l12_n165(x)
+ end
+end
+
+def fun_l11_n84(x)
+ if (x < 1)
+ fun_l12_n988(x)
+ else
+ fun_l12_n495(x)
+ end
+end
+
+def fun_l11_n85(x)
+ if (x < 1)
+ fun_l12_n839(x)
+ else
+ fun_l12_n565(x)
+ end
+end
+
+def fun_l11_n86(x)
+ if (x < 1)
+ fun_l12_n381(x)
+ else
+ fun_l12_n981(x)
+ end
+end
+
+def fun_l11_n87(x)
+ if (x < 1)
+ fun_l12_n64(x)
+ else
+ fun_l12_n616(x)
+ end
+end
+
+def fun_l11_n88(x)
+ if (x < 1)
+ fun_l12_n714(x)
+ else
+ fun_l12_n995(x)
+ end
+end
+
+def fun_l11_n89(x)
+ if (x < 1)
+ fun_l12_n796(x)
+ else
+ fun_l12_n534(x)
+ end
+end
+
+def fun_l11_n90(x)
+ if (x < 1)
+ fun_l12_n31(x)
+ else
+ fun_l12_n584(x)
+ end
+end
+
+def fun_l11_n91(x)
+ if (x < 1)
+ fun_l12_n741(x)
+ else
+ fun_l12_n356(x)
+ end
+end
+
+def fun_l11_n92(x)
+ if (x < 1)
+ fun_l12_n418(x)
+ else
+ fun_l12_n184(x)
+ end
+end
+
+def fun_l11_n93(x)
+ if (x < 1)
+ fun_l12_n427(x)
+ else
+ fun_l12_n136(x)
+ end
+end
+
+def fun_l11_n94(x)
+ if (x < 1)
+ fun_l12_n968(x)
+ else
+ fun_l12_n647(x)
+ end
+end
+
+def fun_l11_n95(x)
+ if (x < 1)
+ fun_l12_n641(x)
+ else
+ fun_l12_n675(x)
+ end
+end
+
+def fun_l11_n96(x)
+ if (x < 1)
+ fun_l12_n465(x)
+ else
+ fun_l12_n507(x)
+ end
+end
+
+def fun_l11_n97(x)
+ if (x < 1)
+ fun_l12_n910(x)
+ else
+ fun_l12_n718(x)
+ end
+end
+
+def fun_l11_n98(x)
+ if (x < 1)
+ fun_l12_n686(x)
+ else
+ fun_l12_n94(x)
+ end
+end
+
+def fun_l11_n99(x)
+ if (x < 1)
+ fun_l12_n94(x)
+ else
+ fun_l12_n632(x)
+ end
+end
+
+def fun_l11_n100(x)
+ if (x < 1)
+ fun_l12_n270(x)
+ else
+ fun_l12_n228(x)
+ end
+end
+
+def fun_l11_n101(x)
+ if (x < 1)
+ fun_l12_n569(x)
+ else
+ fun_l12_n492(x)
+ end
+end
+
+def fun_l11_n102(x)
+ if (x < 1)
+ fun_l12_n187(x)
+ else
+ fun_l12_n183(x)
+ end
+end
+
+def fun_l11_n103(x)
+ if (x < 1)
+ fun_l12_n894(x)
+ else
+ fun_l12_n655(x)
+ end
+end
+
+def fun_l11_n104(x)
+ if (x < 1)
+ fun_l12_n347(x)
+ else
+ fun_l12_n467(x)
+ end
+end
+
+def fun_l11_n105(x)
+ if (x < 1)
+ fun_l12_n198(x)
+ else
+ fun_l12_n838(x)
+ end
+end
+
+def fun_l11_n106(x)
+ if (x < 1)
+ fun_l12_n941(x)
+ else
+ fun_l12_n98(x)
+ end
+end
+
+def fun_l11_n107(x)
+ if (x < 1)
+ fun_l12_n406(x)
+ else
+ fun_l12_n773(x)
+ end
+end
+
+def fun_l11_n108(x)
+ if (x < 1)
+ fun_l12_n980(x)
+ else
+ fun_l12_n508(x)
+ end
+end
+
+def fun_l11_n109(x)
+ if (x < 1)
+ fun_l12_n286(x)
+ else
+ fun_l12_n928(x)
+ end
+end
+
+def fun_l11_n110(x)
+ if (x < 1)
+ fun_l12_n710(x)
+ else
+ fun_l12_n20(x)
+ end
+end
+
+def fun_l11_n111(x)
+ if (x < 1)
+ fun_l12_n358(x)
+ else
+ fun_l12_n549(x)
+ end
+end
+
+def fun_l11_n112(x)
+ if (x < 1)
+ fun_l12_n359(x)
+ else
+ fun_l12_n548(x)
+ end
+end
+
+def fun_l11_n113(x)
+ if (x < 1)
+ fun_l12_n665(x)
+ else
+ fun_l12_n802(x)
+ end
+end
+
+def fun_l11_n114(x)
+ if (x < 1)
+ fun_l12_n199(x)
+ else
+ fun_l12_n904(x)
+ end
+end
+
+def fun_l11_n115(x)
+ if (x < 1)
+ fun_l12_n995(x)
+ else
+ fun_l12_n319(x)
+ end
+end
+
+def fun_l11_n116(x)
+ if (x < 1)
+ fun_l12_n873(x)
+ else
+ fun_l12_n786(x)
+ end
+end
+
+def fun_l11_n117(x)
+ if (x < 1)
+ fun_l12_n147(x)
+ else
+ fun_l12_n542(x)
+ end
+end
+
+def fun_l11_n118(x)
+ if (x < 1)
+ fun_l12_n669(x)
+ else
+ fun_l12_n871(x)
+ end
+end
+
+def fun_l11_n119(x)
+ if (x < 1)
+ fun_l12_n552(x)
+ else
+ fun_l12_n998(x)
+ end
+end
+
+def fun_l11_n120(x)
+ if (x < 1)
+ fun_l12_n300(x)
+ else
+ fun_l12_n333(x)
+ end
+end
+
+def fun_l11_n121(x)
+ if (x < 1)
+ fun_l12_n846(x)
+ else
+ fun_l12_n133(x)
+ end
+end
+
+def fun_l11_n122(x)
+ if (x < 1)
+ fun_l12_n946(x)
+ else
+ fun_l12_n540(x)
+ end
+end
+
+def fun_l11_n123(x)
+ if (x < 1)
+ fun_l12_n71(x)
+ else
+ fun_l12_n264(x)
+ end
+end
+
+def fun_l11_n124(x)
+ if (x < 1)
+ fun_l12_n410(x)
+ else
+ fun_l12_n60(x)
+ end
+end
+
+def fun_l11_n125(x)
+ if (x < 1)
+ fun_l12_n62(x)
+ else
+ fun_l12_n211(x)
+ end
+end
+
+def fun_l11_n126(x)
+ if (x < 1)
+ fun_l12_n252(x)
+ else
+ fun_l12_n635(x)
+ end
+end
+
+def fun_l11_n127(x)
+ if (x < 1)
+ fun_l12_n444(x)
+ else
+ fun_l12_n735(x)
+ end
+end
+
+def fun_l11_n128(x)
+ if (x < 1)
+ fun_l12_n3(x)
+ else
+ fun_l12_n403(x)
+ end
+end
+
+def fun_l11_n129(x)
+ if (x < 1)
+ fun_l12_n121(x)
+ else
+ fun_l12_n882(x)
+ end
+end
+
+def fun_l11_n130(x)
+ if (x < 1)
+ fun_l12_n985(x)
+ else
+ fun_l12_n983(x)
+ end
+end
+
+def fun_l11_n131(x)
+ if (x < 1)
+ fun_l12_n413(x)
+ else
+ fun_l12_n117(x)
+ end
+end
+
+def fun_l11_n132(x)
+ if (x < 1)
+ fun_l12_n856(x)
+ else
+ fun_l12_n312(x)
+ end
+end
+
+def fun_l11_n133(x)
+ if (x < 1)
+ fun_l12_n249(x)
+ else
+ fun_l12_n870(x)
+ end
+end
+
+def fun_l11_n134(x)
+ if (x < 1)
+ fun_l12_n367(x)
+ else
+ fun_l12_n748(x)
+ end
+end
+
+def fun_l11_n135(x)
+ if (x < 1)
+ fun_l12_n670(x)
+ else
+ fun_l12_n464(x)
+ end
+end
+
+def fun_l11_n136(x)
+ if (x < 1)
+ fun_l12_n698(x)
+ else
+ fun_l12_n238(x)
+ end
+end
+
+def fun_l11_n137(x)
+ if (x < 1)
+ fun_l12_n856(x)
+ else
+ fun_l12_n660(x)
+ end
+end
+
+def fun_l11_n138(x)
+ if (x < 1)
+ fun_l12_n693(x)
+ else
+ fun_l12_n793(x)
+ end
+end
+
+def fun_l11_n139(x)
+ if (x < 1)
+ fun_l12_n959(x)
+ else
+ fun_l12_n377(x)
+ end
+end
+
+def fun_l11_n140(x)
+ if (x < 1)
+ fun_l12_n24(x)
+ else
+ fun_l12_n250(x)
+ end
+end
+
+def fun_l11_n141(x)
+ if (x < 1)
+ fun_l12_n270(x)
+ else
+ fun_l12_n720(x)
+ end
+end
+
+def fun_l11_n142(x)
+ if (x < 1)
+ fun_l12_n775(x)
+ else
+ fun_l12_n781(x)
+ end
+end
+
+def fun_l11_n143(x)
+ if (x < 1)
+ fun_l12_n266(x)
+ else
+ fun_l12_n608(x)
+ end
+end
+
+def fun_l11_n144(x)
+ if (x < 1)
+ fun_l12_n196(x)
+ else
+ fun_l12_n70(x)
+ end
+end
+
+def fun_l11_n145(x)
+ if (x < 1)
+ fun_l12_n100(x)
+ else
+ fun_l12_n846(x)
+ end
+end
+
+def fun_l11_n146(x)
+ if (x < 1)
+ fun_l12_n406(x)
+ else
+ fun_l12_n966(x)
+ end
+end
+
+def fun_l11_n147(x)
+ if (x < 1)
+ fun_l12_n816(x)
+ else
+ fun_l12_n54(x)
+ end
+end
+
+def fun_l11_n148(x)
+ if (x < 1)
+ fun_l12_n856(x)
+ else
+ fun_l12_n710(x)
+ end
+end
+
+def fun_l11_n149(x)
+ if (x < 1)
+ fun_l12_n597(x)
+ else
+ fun_l12_n596(x)
+ end
+end
+
+def fun_l11_n150(x)
+ if (x < 1)
+ fun_l12_n938(x)
+ else
+ fun_l12_n349(x)
+ end
+end
+
+def fun_l11_n151(x)
+ if (x < 1)
+ fun_l12_n987(x)
+ else
+ fun_l12_n231(x)
+ end
+end
+
+def fun_l11_n152(x)
+ if (x < 1)
+ fun_l12_n67(x)
+ else
+ fun_l12_n685(x)
+ end
+end
+
+def fun_l11_n153(x)
+ if (x < 1)
+ fun_l12_n786(x)
+ else
+ fun_l12_n71(x)
+ end
+end
+
+def fun_l11_n154(x)
+ if (x < 1)
+ fun_l12_n245(x)
+ else
+ fun_l12_n109(x)
+ end
+end
+
+def fun_l11_n155(x)
+ if (x < 1)
+ fun_l12_n665(x)
+ else
+ fun_l12_n565(x)
+ end
+end
+
+def fun_l11_n156(x)
+ if (x < 1)
+ fun_l12_n885(x)
+ else
+ fun_l12_n27(x)
+ end
+end
+
+def fun_l11_n157(x)
+ if (x < 1)
+ fun_l12_n704(x)
+ else
+ fun_l12_n140(x)
+ end
+end
+
+def fun_l11_n158(x)
+ if (x < 1)
+ fun_l12_n505(x)
+ else
+ fun_l12_n428(x)
+ end
+end
+
+def fun_l11_n159(x)
+ if (x < 1)
+ fun_l12_n646(x)
+ else
+ fun_l12_n250(x)
+ end
+end
+
+def fun_l11_n160(x)
+ if (x < 1)
+ fun_l12_n125(x)
+ else
+ fun_l12_n22(x)
+ end
+end
+
+def fun_l11_n161(x)
+ if (x < 1)
+ fun_l12_n49(x)
+ else
+ fun_l12_n852(x)
+ end
+end
+
+def fun_l11_n162(x)
+ if (x < 1)
+ fun_l12_n992(x)
+ else
+ fun_l12_n321(x)
+ end
+end
+
+def fun_l11_n163(x)
+ if (x < 1)
+ fun_l12_n457(x)
+ else
+ fun_l12_n162(x)
+ end
+end
+
+def fun_l11_n164(x)
+ if (x < 1)
+ fun_l12_n612(x)
+ else
+ fun_l12_n107(x)
+ end
+end
+
+def fun_l11_n165(x)
+ if (x < 1)
+ fun_l12_n786(x)
+ else
+ fun_l12_n338(x)
+ end
+end
+
+def fun_l11_n166(x)
+ if (x < 1)
+ fun_l12_n623(x)
+ else
+ fun_l12_n18(x)
+ end
+end
+
+def fun_l11_n167(x)
+ if (x < 1)
+ fun_l12_n605(x)
+ else
+ fun_l12_n963(x)
+ end
+end
+
+def fun_l11_n168(x)
+ if (x < 1)
+ fun_l12_n111(x)
+ else
+ fun_l12_n822(x)
+ end
+end
+
+def fun_l11_n169(x)
+ if (x < 1)
+ fun_l12_n502(x)
+ else
+ fun_l12_n291(x)
+ end
+end
+
+def fun_l11_n170(x)
+ if (x < 1)
+ fun_l12_n984(x)
+ else
+ fun_l12_n950(x)
+ end
+end
+
+def fun_l11_n171(x)
+ if (x < 1)
+ fun_l12_n915(x)
+ else
+ fun_l12_n81(x)
+ end
+end
+
+def fun_l11_n172(x)
+ if (x < 1)
+ fun_l12_n839(x)
+ else
+ fun_l12_n405(x)
+ end
+end
+
+def fun_l11_n173(x)
+ if (x < 1)
+ fun_l12_n331(x)
+ else
+ fun_l12_n906(x)
+ end
+end
+
+def fun_l11_n174(x)
+ if (x < 1)
+ fun_l12_n698(x)
+ else
+ fun_l12_n378(x)
+ end
+end
+
+def fun_l11_n175(x)
+ if (x < 1)
+ fun_l12_n648(x)
+ else
+ fun_l12_n824(x)
+ end
+end
+
+def fun_l11_n176(x)
+ if (x < 1)
+ fun_l12_n876(x)
+ else
+ fun_l12_n873(x)
+ end
+end
+
+def fun_l11_n177(x)
+ if (x < 1)
+ fun_l12_n932(x)
+ else
+ fun_l12_n797(x)
+ end
+end
+
+def fun_l11_n178(x)
+ if (x < 1)
+ fun_l12_n975(x)
+ else
+ fun_l12_n166(x)
+ end
+end
+
+def fun_l11_n179(x)
+ if (x < 1)
+ fun_l12_n66(x)
+ else
+ fun_l12_n624(x)
+ end
+end
+
+def fun_l11_n180(x)
+ if (x < 1)
+ fun_l12_n676(x)
+ else
+ fun_l12_n603(x)
+ end
+end
+
+def fun_l11_n181(x)
+ if (x < 1)
+ fun_l12_n80(x)
+ else
+ fun_l12_n504(x)
+ end
+end
+
+def fun_l11_n182(x)
+ if (x < 1)
+ fun_l12_n465(x)
+ else
+ fun_l12_n319(x)
+ end
+end
+
+def fun_l11_n183(x)
+ if (x < 1)
+ fun_l12_n493(x)
+ else
+ fun_l12_n412(x)
+ end
+end
+
+def fun_l11_n184(x)
+ if (x < 1)
+ fun_l12_n278(x)
+ else
+ fun_l12_n145(x)
+ end
+end
+
+def fun_l11_n185(x)
+ if (x < 1)
+ fun_l12_n328(x)
+ else
+ fun_l12_n777(x)
+ end
+end
+
+def fun_l11_n186(x)
+ if (x < 1)
+ fun_l12_n120(x)
+ else
+ fun_l12_n462(x)
+ end
+end
+
+def fun_l11_n187(x)
+ if (x < 1)
+ fun_l12_n755(x)
+ else
+ fun_l12_n260(x)
+ end
+end
+
+def fun_l11_n188(x)
+ if (x < 1)
+ fun_l12_n378(x)
+ else
+ fun_l12_n174(x)
+ end
+end
+
+def fun_l11_n189(x)
+ if (x < 1)
+ fun_l12_n100(x)
+ else
+ fun_l12_n234(x)
+ end
+end
+
+def fun_l11_n190(x)
+ if (x < 1)
+ fun_l12_n132(x)
+ else
+ fun_l12_n437(x)
+ end
+end
+
+def fun_l11_n191(x)
+ if (x < 1)
+ fun_l12_n399(x)
+ else
+ fun_l12_n989(x)
+ end
+end
+
+def fun_l11_n192(x)
+ if (x < 1)
+ fun_l12_n355(x)
+ else
+ fun_l12_n953(x)
+ end
+end
+
+def fun_l11_n193(x)
+ if (x < 1)
+ fun_l12_n139(x)
+ else
+ fun_l12_n138(x)
+ end
+end
+
+def fun_l11_n194(x)
+ if (x < 1)
+ fun_l12_n294(x)
+ else
+ fun_l12_n676(x)
+ end
+end
+
+def fun_l11_n195(x)
+ if (x < 1)
+ fun_l12_n581(x)
+ else
+ fun_l12_n549(x)
+ end
+end
+
+def fun_l11_n196(x)
+ if (x < 1)
+ fun_l12_n555(x)
+ else
+ fun_l12_n738(x)
+ end
+end
+
+def fun_l11_n197(x)
+ if (x < 1)
+ fun_l12_n170(x)
+ else
+ fun_l12_n476(x)
+ end
+end
+
+def fun_l11_n198(x)
+ if (x < 1)
+ fun_l12_n350(x)
+ else
+ fun_l12_n576(x)
+ end
+end
+
+def fun_l11_n199(x)
+ if (x < 1)
+ fun_l12_n647(x)
+ else
+ fun_l12_n94(x)
+ end
+end
+
+def fun_l11_n200(x)
+ if (x < 1)
+ fun_l12_n28(x)
+ else
+ fun_l12_n706(x)
+ end
+end
+
+def fun_l11_n201(x)
+ if (x < 1)
+ fun_l12_n189(x)
+ else
+ fun_l12_n366(x)
+ end
+end
+
+def fun_l11_n202(x)
+ if (x < 1)
+ fun_l12_n690(x)
+ else
+ fun_l12_n807(x)
+ end
+end
+
+def fun_l11_n203(x)
+ if (x < 1)
+ fun_l12_n402(x)
+ else
+ fun_l12_n685(x)
+ end
+end
+
+def fun_l11_n204(x)
+ if (x < 1)
+ fun_l12_n232(x)
+ else
+ fun_l12_n257(x)
+ end
+end
+
+def fun_l11_n205(x)
+ if (x < 1)
+ fun_l12_n108(x)
+ else
+ fun_l12_n888(x)
+ end
+end
+
+def fun_l11_n206(x)
+ if (x < 1)
+ fun_l12_n494(x)
+ else
+ fun_l12_n372(x)
+ end
+end
+
+def fun_l11_n207(x)
+ if (x < 1)
+ fun_l12_n607(x)
+ else
+ fun_l12_n336(x)
+ end
+end
+
+def fun_l11_n208(x)
+ if (x < 1)
+ fun_l12_n913(x)
+ else
+ fun_l12_n134(x)
+ end
+end
+
+def fun_l11_n209(x)
+ if (x < 1)
+ fun_l12_n996(x)
+ else
+ fun_l12_n918(x)
+ end
+end
+
+def fun_l11_n210(x)
+ if (x < 1)
+ fun_l12_n674(x)
+ else
+ fun_l12_n260(x)
+ end
+end
+
+def fun_l11_n211(x)
+ if (x < 1)
+ fun_l12_n344(x)
+ else
+ fun_l12_n219(x)
+ end
+end
+
+def fun_l11_n212(x)
+ if (x < 1)
+ fun_l12_n161(x)
+ else
+ fun_l12_n415(x)
+ end
+end
+
+def fun_l11_n213(x)
+ if (x < 1)
+ fun_l12_n65(x)
+ else
+ fun_l12_n777(x)
+ end
+end
+
+def fun_l11_n214(x)
+ if (x < 1)
+ fun_l12_n369(x)
+ else
+ fun_l12_n899(x)
+ end
+end
+
+def fun_l11_n215(x)
+ if (x < 1)
+ fun_l12_n310(x)
+ else
+ fun_l12_n118(x)
+ end
+end
+
+def fun_l11_n216(x)
+ if (x < 1)
+ fun_l12_n326(x)
+ else
+ fun_l12_n823(x)
+ end
+end
+
+def fun_l11_n217(x)
+ if (x < 1)
+ fun_l12_n981(x)
+ else
+ fun_l12_n824(x)
+ end
+end
+
+def fun_l11_n218(x)
+ if (x < 1)
+ fun_l12_n425(x)
+ else
+ fun_l12_n483(x)
+ end
+end
+
+def fun_l11_n219(x)
+ if (x < 1)
+ fun_l12_n931(x)
+ else
+ fun_l12_n366(x)
+ end
+end
+
+def fun_l11_n220(x)
+ if (x < 1)
+ fun_l12_n325(x)
+ else
+ fun_l12_n926(x)
+ end
+end
+
+def fun_l11_n221(x)
+ if (x < 1)
+ fun_l12_n924(x)
+ else
+ fun_l12_n764(x)
+ end
+end
+
+def fun_l11_n222(x)
+ if (x < 1)
+ fun_l12_n407(x)
+ else
+ fun_l12_n137(x)
+ end
+end
+
+def fun_l11_n223(x)
+ if (x < 1)
+ fun_l12_n584(x)
+ else
+ fun_l12_n294(x)
+ end
+end
+
+def fun_l11_n224(x)
+ if (x < 1)
+ fun_l12_n39(x)
+ else
+ fun_l12_n795(x)
+ end
+end
+
+def fun_l11_n225(x)
+ if (x < 1)
+ fun_l12_n37(x)
+ else
+ fun_l12_n738(x)
+ end
+end
+
+def fun_l11_n226(x)
+ if (x < 1)
+ fun_l12_n779(x)
+ else
+ fun_l12_n152(x)
+ end
+end
+
+def fun_l11_n227(x)
+ if (x < 1)
+ fun_l12_n116(x)
+ else
+ fun_l12_n589(x)
+ end
+end
+
+def fun_l11_n228(x)
+ if (x < 1)
+ fun_l12_n500(x)
+ else
+ fun_l12_n916(x)
+ end
+end
+
+def fun_l11_n229(x)
+ if (x < 1)
+ fun_l12_n443(x)
+ else
+ fun_l12_n126(x)
+ end
+end
+
+def fun_l11_n230(x)
+ if (x < 1)
+ fun_l12_n311(x)
+ else
+ fun_l12_n184(x)
+ end
+end
+
+def fun_l11_n231(x)
+ if (x < 1)
+ fun_l12_n604(x)
+ else
+ fun_l12_n171(x)
+ end
+end
+
+def fun_l11_n232(x)
+ if (x < 1)
+ fun_l12_n287(x)
+ else
+ fun_l12_n899(x)
+ end
+end
+
+def fun_l11_n233(x)
+ if (x < 1)
+ fun_l12_n834(x)
+ else
+ fun_l12_n435(x)
+ end
+end
+
+def fun_l11_n234(x)
+ if (x < 1)
+ fun_l12_n187(x)
+ else
+ fun_l12_n584(x)
+ end
+end
+
+def fun_l11_n235(x)
+ if (x < 1)
+ fun_l12_n711(x)
+ else
+ fun_l12_n542(x)
+ end
+end
+
+def fun_l11_n236(x)
+ if (x < 1)
+ fun_l12_n797(x)
+ else
+ fun_l12_n702(x)
+ end
+end
+
+def fun_l11_n237(x)
+ if (x < 1)
+ fun_l12_n645(x)
+ else
+ fun_l12_n691(x)
+ end
+end
+
+def fun_l11_n238(x)
+ if (x < 1)
+ fun_l12_n920(x)
+ else
+ fun_l12_n97(x)
+ end
+end
+
+def fun_l11_n239(x)
+ if (x < 1)
+ fun_l12_n491(x)
+ else
+ fun_l12_n172(x)
+ end
+end
+
+def fun_l11_n240(x)
+ if (x < 1)
+ fun_l12_n882(x)
+ else
+ fun_l12_n596(x)
+ end
+end
+
+def fun_l11_n241(x)
+ if (x < 1)
+ fun_l12_n178(x)
+ else
+ fun_l12_n671(x)
+ end
+end
+
+def fun_l11_n242(x)
+ if (x < 1)
+ fun_l12_n483(x)
+ else
+ fun_l12_n143(x)
+ end
+end
+
+def fun_l11_n243(x)
+ if (x < 1)
+ fun_l12_n677(x)
+ else
+ fun_l12_n542(x)
+ end
+end
+
+def fun_l11_n244(x)
+ if (x < 1)
+ fun_l12_n676(x)
+ else
+ fun_l12_n873(x)
+ end
+end
+
+def fun_l11_n245(x)
+ if (x < 1)
+ fun_l12_n393(x)
+ else
+ fun_l12_n532(x)
+ end
+end
+
+def fun_l11_n246(x)
+ if (x < 1)
+ fun_l12_n704(x)
+ else
+ fun_l12_n885(x)
+ end
+end
+
+def fun_l11_n247(x)
+ if (x < 1)
+ fun_l12_n668(x)
+ else
+ fun_l12_n305(x)
+ end
+end
+
+def fun_l11_n248(x)
+ if (x < 1)
+ fun_l12_n925(x)
+ else
+ fun_l12_n342(x)
+ end
+end
+
+def fun_l11_n249(x)
+ if (x < 1)
+ fun_l12_n487(x)
+ else
+ fun_l12_n517(x)
+ end
+end
+
+def fun_l11_n250(x)
+ if (x < 1)
+ fun_l12_n889(x)
+ else
+ fun_l12_n91(x)
+ end
+end
+
+def fun_l11_n251(x)
+ if (x < 1)
+ fun_l12_n908(x)
+ else
+ fun_l12_n645(x)
+ end
+end
+
+def fun_l11_n252(x)
+ if (x < 1)
+ fun_l12_n747(x)
+ else
+ fun_l12_n736(x)
+ end
+end
+
+def fun_l11_n253(x)
+ if (x < 1)
+ fun_l12_n749(x)
+ else
+ fun_l12_n264(x)
+ end
+end
+
+def fun_l11_n254(x)
+ if (x < 1)
+ fun_l12_n353(x)
+ else
+ fun_l12_n120(x)
+ end
+end
+
+def fun_l11_n255(x)
+ if (x < 1)
+ fun_l12_n231(x)
+ else
+ fun_l12_n211(x)
+ end
+end
+
+def fun_l11_n256(x)
+ if (x < 1)
+ fun_l12_n66(x)
+ else
+ fun_l12_n998(x)
+ end
+end
+
+def fun_l11_n257(x)
+ if (x < 1)
+ fun_l12_n818(x)
+ else
+ fun_l12_n37(x)
+ end
+end
+
+def fun_l11_n258(x)
+ if (x < 1)
+ fun_l12_n96(x)
+ else
+ fun_l12_n489(x)
+ end
+end
+
+def fun_l11_n259(x)
+ if (x < 1)
+ fun_l12_n86(x)
+ else
+ fun_l12_n432(x)
+ end
+end
+
+def fun_l11_n260(x)
+ if (x < 1)
+ fun_l12_n774(x)
+ else
+ fun_l12_n203(x)
+ end
+end
+
+def fun_l11_n261(x)
+ if (x < 1)
+ fun_l12_n935(x)
+ else
+ fun_l12_n29(x)
+ end
+end
+
+def fun_l11_n262(x)
+ if (x < 1)
+ fun_l12_n599(x)
+ else
+ fun_l12_n204(x)
+ end
+end
+
+def fun_l11_n263(x)
+ if (x < 1)
+ fun_l12_n63(x)
+ else
+ fun_l12_n866(x)
+ end
+end
+
+def fun_l11_n264(x)
+ if (x < 1)
+ fun_l12_n892(x)
+ else
+ fun_l12_n847(x)
+ end
+end
+
+def fun_l11_n265(x)
+ if (x < 1)
+ fun_l12_n695(x)
+ else
+ fun_l12_n881(x)
+ end
+end
+
+def fun_l11_n266(x)
+ if (x < 1)
+ fun_l12_n176(x)
+ else
+ fun_l12_n39(x)
+ end
+end
+
+def fun_l11_n267(x)
+ if (x < 1)
+ fun_l12_n448(x)
+ else
+ fun_l12_n744(x)
+ end
+end
+
+def fun_l11_n268(x)
+ if (x < 1)
+ fun_l12_n587(x)
+ else
+ fun_l12_n462(x)
+ end
+end
+
+def fun_l11_n269(x)
+ if (x < 1)
+ fun_l12_n670(x)
+ else
+ fun_l12_n797(x)
+ end
+end
+
+def fun_l11_n270(x)
+ if (x < 1)
+ fun_l12_n856(x)
+ else
+ fun_l12_n903(x)
+ end
+end
+
+def fun_l11_n271(x)
+ if (x < 1)
+ fun_l12_n914(x)
+ else
+ fun_l12_n459(x)
+ end
+end
+
+def fun_l11_n272(x)
+ if (x < 1)
+ fun_l12_n932(x)
+ else
+ fun_l12_n10(x)
+ end
+end
+
+def fun_l11_n273(x)
+ if (x < 1)
+ fun_l12_n554(x)
+ else
+ fun_l12_n528(x)
+ end
+end
+
+def fun_l11_n274(x)
+ if (x < 1)
+ fun_l12_n698(x)
+ else
+ fun_l12_n899(x)
+ end
+end
+
+def fun_l11_n275(x)
+ if (x < 1)
+ fun_l12_n907(x)
+ else
+ fun_l12_n945(x)
+ end
+end
+
+def fun_l11_n276(x)
+ if (x < 1)
+ fun_l12_n421(x)
+ else
+ fun_l12_n575(x)
+ end
+end
+
+def fun_l11_n277(x)
+ if (x < 1)
+ fun_l12_n757(x)
+ else
+ fun_l12_n808(x)
+ end
+end
+
+def fun_l11_n278(x)
+ if (x < 1)
+ fun_l12_n527(x)
+ else
+ fun_l12_n438(x)
+ end
+end
+
+def fun_l11_n279(x)
+ if (x < 1)
+ fun_l12_n263(x)
+ else
+ fun_l12_n842(x)
+ end
+end
+
+def fun_l11_n280(x)
+ if (x < 1)
+ fun_l12_n603(x)
+ else
+ fun_l12_n899(x)
+ end
+end
+
+def fun_l11_n281(x)
+ if (x < 1)
+ fun_l12_n486(x)
+ else
+ fun_l12_n380(x)
+ end
+end
+
+def fun_l11_n282(x)
+ if (x < 1)
+ fun_l12_n699(x)
+ else
+ fun_l12_n199(x)
+ end
+end
+
+def fun_l11_n283(x)
+ if (x < 1)
+ fun_l12_n422(x)
+ else
+ fun_l12_n258(x)
+ end
+end
+
+def fun_l11_n284(x)
+ if (x < 1)
+ fun_l12_n255(x)
+ else
+ fun_l12_n752(x)
+ end
+end
+
+def fun_l11_n285(x)
+ if (x < 1)
+ fun_l12_n506(x)
+ else
+ fun_l12_n801(x)
+ end
+end
+
+def fun_l11_n286(x)
+ if (x < 1)
+ fun_l12_n535(x)
+ else
+ fun_l12_n892(x)
+ end
+end
+
+def fun_l11_n287(x)
+ if (x < 1)
+ fun_l12_n547(x)
+ else
+ fun_l12_n74(x)
+ end
+end
+
+def fun_l11_n288(x)
+ if (x < 1)
+ fun_l12_n923(x)
+ else
+ fun_l12_n135(x)
+ end
+end
+
+def fun_l11_n289(x)
+ if (x < 1)
+ fun_l12_n78(x)
+ else
+ fun_l12_n704(x)
+ end
+end
+
+def fun_l11_n290(x)
+ if (x < 1)
+ fun_l12_n771(x)
+ else
+ fun_l12_n915(x)
+ end
+end
+
+def fun_l11_n291(x)
+ if (x < 1)
+ fun_l12_n184(x)
+ else
+ fun_l12_n188(x)
+ end
+end
+
+def fun_l11_n292(x)
+ if (x < 1)
+ fun_l12_n736(x)
+ else
+ fun_l12_n190(x)
+ end
+end
+
+def fun_l11_n293(x)
+ if (x < 1)
+ fun_l12_n301(x)
+ else
+ fun_l12_n745(x)
+ end
+end
+
+def fun_l11_n294(x)
+ if (x < 1)
+ fun_l12_n525(x)
+ else
+ fun_l12_n603(x)
+ end
+end
+
+def fun_l11_n295(x)
+ if (x < 1)
+ fun_l12_n339(x)
+ else
+ fun_l12_n996(x)
+ end
+end
+
+def fun_l11_n296(x)
+ if (x < 1)
+ fun_l12_n491(x)
+ else
+ fun_l12_n601(x)
+ end
+end
+
+def fun_l11_n297(x)
+ if (x < 1)
+ fun_l12_n449(x)
+ else
+ fun_l12_n361(x)
+ end
+end
+
+def fun_l11_n298(x)
+ if (x < 1)
+ fun_l12_n617(x)
+ else
+ fun_l12_n346(x)
+ end
+end
+
+def fun_l11_n299(x)
+ if (x < 1)
+ fun_l12_n578(x)
+ else
+ fun_l12_n147(x)
+ end
+end
+
+def fun_l11_n300(x)
+ if (x < 1)
+ fun_l12_n73(x)
+ else
+ fun_l12_n360(x)
+ end
+end
+
+def fun_l11_n301(x)
+ if (x < 1)
+ fun_l12_n398(x)
+ else
+ fun_l12_n749(x)
+ end
+end
+
+def fun_l11_n302(x)
+ if (x < 1)
+ fun_l12_n871(x)
+ else
+ fun_l12_n745(x)
+ end
+end
+
+def fun_l11_n303(x)
+ if (x < 1)
+ fun_l12_n669(x)
+ else
+ fun_l12_n196(x)
+ end
+end
+
+def fun_l11_n304(x)
+ if (x < 1)
+ fun_l12_n202(x)
+ else
+ fun_l12_n173(x)
+ end
+end
+
+def fun_l11_n305(x)
+ if (x < 1)
+ fun_l12_n975(x)
+ else
+ fun_l12_n859(x)
+ end
+end
+
+def fun_l11_n306(x)
+ if (x < 1)
+ fun_l12_n776(x)
+ else
+ fun_l12_n433(x)
+ end
+end
+
+def fun_l11_n307(x)
+ if (x < 1)
+ fun_l12_n760(x)
+ else
+ fun_l12_n4(x)
+ end
+end
+
+def fun_l11_n308(x)
+ if (x < 1)
+ fun_l12_n834(x)
+ else
+ fun_l12_n276(x)
+ end
+end
+
+def fun_l11_n309(x)
+ if (x < 1)
+ fun_l12_n162(x)
+ else
+ fun_l12_n741(x)
+ end
+end
+
+def fun_l11_n310(x)
+ if (x < 1)
+ fun_l12_n231(x)
+ else
+ fun_l12_n646(x)
+ end
+end
+
+def fun_l11_n311(x)
+ if (x < 1)
+ fun_l12_n90(x)
+ else
+ fun_l12_n46(x)
+ end
+end
+
+def fun_l11_n312(x)
+ if (x < 1)
+ fun_l12_n101(x)
+ else
+ fun_l12_n75(x)
+ end
+end
+
+def fun_l11_n313(x)
+ if (x < 1)
+ fun_l12_n415(x)
+ else
+ fun_l12_n535(x)
+ end
+end
+
+def fun_l11_n314(x)
+ if (x < 1)
+ fun_l12_n550(x)
+ else
+ fun_l12_n305(x)
+ end
+end
+
+def fun_l11_n315(x)
+ if (x < 1)
+ fun_l12_n971(x)
+ else
+ fun_l12_n700(x)
+ end
+end
+
+def fun_l11_n316(x)
+ if (x < 1)
+ fun_l12_n942(x)
+ else
+ fun_l12_n793(x)
+ end
+end
+
+def fun_l11_n317(x)
+ if (x < 1)
+ fun_l12_n771(x)
+ else
+ fun_l12_n383(x)
+ end
+end
+
+def fun_l11_n318(x)
+ if (x < 1)
+ fun_l12_n846(x)
+ else
+ fun_l12_n504(x)
+ end
+end
+
+def fun_l11_n319(x)
+ if (x < 1)
+ fun_l12_n28(x)
+ else
+ fun_l12_n174(x)
+ end
+end
+
+def fun_l11_n320(x)
+ if (x < 1)
+ fun_l12_n437(x)
+ else
+ fun_l12_n355(x)
+ end
+end
+
+def fun_l11_n321(x)
+ if (x < 1)
+ fun_l12_n954(x)
+ else
+ fun_l12_n78(x)
+ end
+end
+
+def fun_l11_n322(x)
+ if (x < 1)
+ fun_l12_n256(x)
+ else
+ fun_l12_n729(x)
+ end
+end
+
+def fun_l11_n323(x)
+ if (x < 1)
+ fun_l12_n41(x)
+ else
+ fun_l12_n88(x)
+ end
+end
+
+def fun_l11_n324(x)
+ if (x < 1)
+ fun_l12_n256(x)
+ else
+ fun_l12_n162(x)
+ end
+end
+
+def fun_l11_n325(x)
+ if (x < 1)
+ fun_l12_n143(x)
+ else
+ fun_l12_n506(x)
+ end
+end
+
+def fun_l11_n326(x)
+ if (x < 1)
+ fun_l12_n686(x)
+ else
+ fun_l12_n968(x)
+ end
+end
+
+def fun_l11_n327(x)
+ if (x < 1)
+ fun_l12_n235(x)
+ else
+ fun_l12_n246(x)
+ end
+end
+
+def fun_l11_n328(x)
+ if (x < 1)
+ fun_l12_n85(x)
+ else
+ fun_l12_n125(x)
+ end
+end
+
+def fun_l11_n329(x)
+ if (x < 1)
+ fun_l12_n877(x)
+ else
+ fun_l12_n193(x)
+ end
+end
+
+def fun_l11_n330(x)
+ if (x < 1)
+ fun_l12_n678(x)
+ else
+ fun_l12_n147(x)
+ end
+end
+
+def fun_l11_n331(x)
+ if (x < 1)
+ fun_l12_n402(x)
+ else
+ fun_l12_n747(x)
+ end
+end
+
+def fun_l11_n332(x)
+ if (x < 1)
+ fun_l12_n390(x)
+ else
+ fun_l12_n852(x)
+ end
+end
+
+def fun_l11_n333(x)
+ if (x < 1)
+ fun_l12_n299(x)
+ else
+ fun_l12_n122(x)
+ end
+end
+
+def fun_l11_n334(x)
+ if (x < 1)
+ fun_l12_n398(x)
+ else
+ fun_l12_n886(x)
+ end
+end
+
+def fun_l11_n335(x)
+ if (x < 1)
+ fun_l12_n128(x)
+ else
+ fun_l12_n729(x)
+ end
+end
+
+def fun_l11_n336(x)
+ if (x < 1)
+ fun_l12_n611(x)
+ else
+ fun_l12_n75(x)
+ end
+end
+
+def fun_l11_n337(x)
+ if (x < 1)
+ fun_l12_n327(x)
+ else
+ fun_l12_n442(x)
+ end
+end
+
+def fun_l11_n338(x)
+ if (x < 1)
+ fun_l12_n353(x)
+ else
+ fun_l12_n259(x)
+ end
+end
+
+def fun_l11_n339(x)
+ if (x < 1)
+ fun_l12_n173(x)
+ else
+ fun_l12_n846(x)
+ end
+end
+
+def fun_l11_n340(x)
+ if (x < 1)
+ fun_l12_n579(x)
+ else
+ fun_l12_n869(x)
+ end
+end
+
+def fun_l11_n341(x)
+ if (x < 1)
+ fun_l12_n336(x)
+ else
+ fun_l12_n364(x)
+ end
+end
+
+def fun_l11_n342(x)
+ if (x < 1)
+ fun_l12_n355(x)
+ else
+ fun_l12_n317(x)
+ end
+end
+
+def fun_l11_n343(x)
+ if (x < 1)
+ fun_l12_n560(x)
+ else
+ fun_l12_n202(x)
+ end
+end
+
+def fun_l11_n344(x)
+ if (x < 1)
+ fun_l12_n232(x)
+ else
+ fun_l12_n447(x)
+ end
+end
+
+def fun_l11_n345(x)
+ if (x < 1)
+ fun_l12_n712(x)
+ else
+ fun_l12_n205(x)
+ end
+end
+
+def fun_l11_n346(x)
+ if (x < 1)
+ fun_l12_n102(x)
+ else
+ fun_l12_n785(x)
+ end
+end
+
+def fun_l11_n347(x)
+ if (x < 1)
+ fun_l12_n545(x)
+ else
+ fun_l12_n186(x)
+ end
+end
+
+def fun_l11_n348(x)
+ if (x < 1)
+ fun_l12_n530(x)
+ else
+ fun_l12_n705(x)
+ end
+end
+
+def fun_l11_n349(x)
+ if (x < 1)
+ fun_l12_n428(x)
+ else
+ fun_l12_n295(x)
+ end
+end
+
+def fun_l11_n350(x)
+ if (x < 1)
+ fun_l12_n855(x)
+ else
+ fun_l12_n232(x)
+ end
+end
+
+def fun_l11_n351(x)
+ if (x < 1)
+ fun_l12_n759(x)
+ else
+ fun_l12_n13(x)
+ end
+end
+
+def fun_l11_n352(x)
+ if (x < 1)
+ fun_l12_n366(x)
+ else
+ fun_l12_n700(x)
+ end
+end
+
+def fun_l11_n353(x)
+ if (x < 1)
+ fun_l12_n814(x)
+ else
+ fun_l12_n101(x)
+ end
+end
+
+def fun_l11_n354(x)
+ if (x < 1)
+ fun_l12_n788(x)
+ else
+ fun_l12_n509(x)
+ end
+end
+
+def fun_l11_n355(x)
+ if (x < 1)
+ fun_l12_n619(x)
+ else
+ fun_l12_n531(x)
+ end
+end
+
+def fun_l11_n356(x)
+ if (x < 1)
+ fun_l12_n142(x)
+ else
+ fun_l12_n586(x)
+ end
+end
+
+def fun_l11_n357(x)
+ if (x < 1)
+ fun_l12_n772(x)
+ else
+ fun_l12_n714(x)
+ end
+end
+
+def fun_l11_n358(x)
+ if (x < 1)
+ fun_l12_n506(x)
+ else
+ fun_l12_n405(x)
+ end
+end
+
+def fun_l11_n359(x)
+ if (x < 1)
+ fun_l12_n991(x)
+ else
+ fun_l12_n141(x)
+ end
+end
+
+def fun_l11_n360(x)
+ if (x < 1)
+ fun_l12_n156(x)
+ else
+ fun_l12_n67(x)
+ end
+end
+
+def fun_l11_n361(x)
+ if (x < 1)
+ fun_l12_n525(x)
+ else
+ fun_l12_n293(x)
+ end
+end
+
+def fun_l11_n362(x)
+ if (x < 1)
+ fun_l12_n396(x)
+ else
+ fun_l12_n931(x)
+ end
+end
+
+def fun_l11_n363(x)
+ if (x < 1)
+ fun_l12_n153(x)
+ else
+ fun_l12_n831(x)
+ end
+end
+
+def fun_l11_n364(x)
+ if (x < 1)
+ fun_l12_n240(x)
+ else
+ fun_l12_n450(x)
+ end
+end
+
+def fun_l11_n365(x)
+ if (x < 1)
+ fun_l12_n514(x)
+ else
+ fun_l12_n312(x)
+ end
+end
+
+def fun_l11_n366(x)
+ if (x < 1)
+ fun_l12_n456(x)
+ else
+ fun_l12_n221(x)
+ end
+end
+
+def fun_l11_n367(x)
+ if (x < 1)
+ fun_l12_n920(x)
+ else
+ fun_l12_n699(x)
+ end
+end
+
+def fun_l11_n368(x)
+ if (x < 1)
+ fun_l12_n738(x)
+ else
+ fun_l12_n793(x)
+ end
+end
+
+def fun_l11_n369(x)
+ if (x < 1)
+ fun_l12_n13(x)
+ else
+ fun_l12_n812(x)
+ end
+end
+
+def fun_l11_n370(x)
+ if (x < 1)
+ fun_l12_n392(x)
+ else
+ fun_l12_n342(x)
+ end
+end
+
+def fun_l11_n371(x)
+ if (x < 1)
+ fun_l12_n470(x)
+ else
+ fun_l12_n623(x)
+ end
+end
+
+def fun_l11_n372(x)
+ if (x < 1)
+ fun_l12_n26(x)
+ else
+ fun_l12_n138(x)
+ end
+end
+
+def fun_l11_n373(x)
+ if (x < 1)
+ fun_l12_n70(x)
+ else
+ fun_l12_n682(x)
+ end
+end
+
+def fun_l11_n374(x)
+ if (x < 1)
+ fun_l12_n413(x)
+ else
+ fun_l12_n900(x)
+ end
+end
+
+def fun_l11_n375(x)
+ if (x < 1)
+ fun_l12_n328(x)
+ else
+ fun_l12_n848(x)
+ end
+end
+
+def fun_l11_n376(x)
+ if (x < 1)
+ fun_l12_n604(x)
+ else
+ fun_l12_n823(x)
+ end
+end
+
+def fun_l11_n377(x)
+ if (x < 1)
+ fun_l12_n469(x)
+ else
+ fun_l12_n868(x)
+ end
+end
+
+def fun_l11_n378(x)
+ if (x < 1)
+ fun_l12_n241(x)
+ else
+ fun_l12_n571(x)
+ end
+end
+
+def fun_l11_n379(x)
+ if (x < 1)
+ fun_l12_n669(x)
+ else
+ fun_l12_n75(x)
+ end
+end
+
+def fun_l11_n380(x)
+ if (x < 1)
+ fun_l12_n587(x)
+ else
+ fun_l12_n454(x)
+ end
+end
+
+def fun_l11_n381(x)
+ if (x < 1)
+ fun_l12_n210(x)
+ else
+ fun_l12_n512(x)
+ end
+end
+
+def fun_l11_n382(x)
+ if (x < 1)
+ fun_l12_n73(x)
+ else
+ fun_l12_n981(x)
+ end
+end
+
+def fun_l11_n383(x)
+ if (x < 1)
+ fun_l12_n59(x)
+ else
+ fun_l12_n274(x)
+ end
+end
+
+def fun_l11_n384(x)
+ if (x < 1)
+ fun_l12_n242(x)
+ else
+ fun_l12_n631(x)
+ end
+end
+
+def fun_l11_n385(x)
+ if (x < 1)
+ fun_l12_n933(x)
+ else
+ fun_l12_n678(x)
+ end
+end
+
+def fun_l11_n386(x)
+ if (x < 1)
+ fun_l12_n695(x)
+ else
+ fun_l12_n766(x)
+ end
+end
+
+def fun_l11_n387(x)
+ if (x < 1)
+ fun_l12_n49(x)
+ else
+ fun_l12_n459(x)
+ end
+end
+
+def fun_l11_n388(x)
+ if (x < 1)
+ fun_l12_n751(x)
+ else
+ fun_l12_n594(x)
+ end
+end
+
+def fun_l11_n389(x)
+ if (x < 1)
+ fun_l12_n149(x)
+ else
+ fun_l12_n290(x)
+ end
+end
+
+def fun_l11_n390(x)
+ if (x < 1)
+ fun_l12_n307(x)
+ else
+ fun_l12_n723(x)
+ end
+end
+
+def fun_l11_n391(x)
+ if (x < 1)
+ fun_l12_n606(x)
+ else
+ fun_l12_n97(x)
+ end
+end
+
+def fun_l11_n392(x)
+ if (x < 1)
+ fun_l12_n31(x)
+ else
+ fun_l12_n610(x)
+ end
+end
+
+def fun_l11_n393(x)
+ if (x < 1)
+ fun_l12_n798(x)
+ else
+ fun_l12_n940(x)
+ end
+end
+
+def fun_l11_n394(x)
+ if (x < 1)
+ fun_l12_n131(x)
+ else
+ fun_l12_n79(x)
+ end
+end
+
+def fun_l11_n395(x)
+ if (x < 1)
+ fun_l12_n230(x)
+ else
+ fun_l12_n72(x)
+ end
+end
+
+def fun_l11_n396(x)
+ if (x < 1)
+ fun_l12_n506(x)
+ else
+ fun_l12_n433(x)
+ end
+end
+
+def fun_l11_n397(x)
+ if (x < 1)
+ fun_l12_n410(x)
+ else
+ fun_l12_n648(x)
+ end
+end
+
+def fun_l11_n398(x)
+ if (x < 1)
+ fun_l12_n946(x)
+ else
+ fun_l12_n630(x)
+ end
+end
+
+def fun_l11_n399(x)
+ if (x < 1)
+ fun_l12_n394(x)
+ else
+ fun_l12_n220(x)
+ end
+end
+
+def fun_l11_n400(x)
+ if (x < 1)
+ fun_l12_n761(x)
+ else
+ fun_l12_n407(x)
+ end
+end
+
+def fun_l11_n401(x)
+ if (x < 1)
+ fun_l12_n386(x)
+ else
+ fun_l12_n411(x)
+ end
+end
+
+def fun_l11_n402(x)
+ if (x < 1)
+ fun_l12_n290(x)
+ else
+ fun_l12_n334(x)
+ end
+end
+
+def fun_l11_n403(x)
+ if (x < 1)
+ fun_l12_n600(x)
+ else
+ fun_l12_n282(x)
+ end
+end
+
+def fun_l11_n404(x)
+ if (x < 1)
+ fun_l12_n334(x)
+ else
+ fun_l12_n214(x)
+ end
+end
+
+def fun_l11_n405(x)
+ if (x < 1)
+ fun_l12_n103(x)
+ else
+ fun_l12_n188(x)
+ end
+end
+
+def fun_l11_n406(x)
+ if (x < 1)
+ fun_l12_n755(x)
+ else
+ fun_l12_n773(x)
+ end
+end
+
+def fun_l11_n407(x)
+ if (x < 1)
+ fun_l12_n269(x)
+ else
+ fun_l12_n579(x)
+ end
+end
+
+def fun_l11_n408(x)
+ if (x < 1)
+ fun_l12_n54(x)
+ else
+ fun_l12_n961(x)
+ end
+end
+
+def fun_l11_n409(x)
+ if (x < 1)
+ fun_l12_n699(x)
+ else
+ fun_l12_n117(x)
+ end
+end
+
+def fun_l11_n410(x)
+ if (x < 1)
+ fun_l12_n802(x)
+ else
+ fun_l12_n65(x)
+ end
+end
+
+def fun_l11_n411(x)
+ if (x < 1)
+ fun_l12_n776(x)
+ else
+ fun_l12_n676(x)
+ end
+end
+
+def fun_l11_n412(x)
+ if (x < 1)
+ fun_l12_n520(x)
+ else
+ fun_l12_n447(x)
+ end
+end
+
+def fun_l11_n413(x)
+ if (x < 1)
+ fun_l12_n856(x)
+ else
+ fun_l12_n285(x)
+ end
+end
+
+def fun_l11_n414(x)
+ if (x < 1)
+ fun_l12_n797(x)
+ else
+ fun_l12_n590(x)
+ end
+end
+
+def fun_l11_n415(x)
+ if (x < 1)
+ fun_l12_n128(x)
+ else
+ fun_l12_n677(x)
+ end
+end
+
+def fun_l11_n416(x)
+ if (x < 1)
+ fun_l12_n422(x)
+ else
+ fun_l12_n632(x)
+ end
+end
+
+def fun_l11_n417(x)
+ if (x < 1)
+ fun_l12_n159(x)
+ else
+ fun_l12_n722(x)
+ end
+end
+
+def fun_l11_n418(x)
+ if (x < 1)
+ fun_l12_n989(x)
+ else
+ fun_l12_n670(x)
+ end
+end
+
+def fun_l11_n419(x)
+ if (x < 1)
+ fun_l12_n952(x)
+ else
+ fun_l12_n65(x)
+ end
+end
+
+def fun_l11_n420(x)
+ if (x < 1)
+ fun_l12_n796(x)
+ else
+ fun_l12_n493(x)
+ end
+end
+
+def fun_l11_n421(x)
+ if (x < 1)
+ fun_l12_n897(x)
+ else
+ fun_l12_n729(x)
+ end
+end
+
+def fun_l11_n422(x)
+ if (x < 1)
+ fun_l12_n557(x)
+ else
+ fun_l12_n545(x)
+ end
+end
+
+def fun_l11_n423(x)
+ if (x < 1)
+ fun_l12_n716(x)
+ else
+ fun_l12_n746(x)
+ end
+end
+
+def fun_l11_n424(x)
+ if (x < 1)
+ fun_l12_n335(x)
+ else
+ fun_l12_n968(x)
+ end
+end
+
+def fun_l11_n425(x)
+ if (x < 1)
+ fun_l12_n891(x)
+ else
+ fun_l12_n115(x)
+ end
+end
+
+def fun_l11_n426(x)
+ if (x < 1)
+ fun_l12_n757(x)
+ else
+ fun_l12_n750(x)
+ end
+end
+
+def fun_l11_n427(x)
+ if (x < 1)
+ fun_l12_n391(x)
+ else
+ fun_l12_n729(x)
+ end
+end
+
+def fun_l11_n428(x)
+ if (x < 1)
+ fun_l12_n249(x)
+ else
+ fun_l12_n347(x)
+ end
+end
+
+def fun_l11_n429(x)
+ if (x < 1)
+ fun_l12_n234(x)
+ else
+ fun_l12_n154(x)
+ end
+end
+
+def fun_l11_n430(x)
+ if (x < 1)
+ fun_l12_n890(x)
+ else
+ fun_l12_n40(x)
+ end
+end
+
+def fun_l11_n431(x)
+ if (x < 1)
+ fun_l12_n804(x)
+ else
+ fun_l12_n90(x)
+ end
+end
+
+def fun_l11_n432(x)
+ if (x < 1)
+ fun_l12_n936(x)
+ else
+ fun_l12_n127(x)
+ end
+end
+
+def fun_l11_n433(x)
+ if (x < 1)
+ fun_l12_n551(x)
+ else
+ fun_l12_n404(x)
+ end
+end
+
+def fun_l11_n434(x)
+ if (x < 1)
+ fun_l12_n246(x)
+ else
+ fun_l12_n759(x)
+ end
+end
+
+def fun_l11_n435(x)
+ if (x < 1)
+ fun_l12_n708(x)
+ else
+ fun_l12_n735(x)
+ end
+end
+
+def fun_l11_n436(x)
+ if (x < 1)
+ fun_l12_n535(x)
+ else
+ fun_l12_n785(x)
+ end
+end
+
+def fun_l11_n437(x)
+ if (x < 1)
+ fun_l12_n322(x)
+ else
+ fun_l12_n629(x)
+ end
+end
+
+def fun_l11_n438(x)
+ if (x < 1)
+ fun_l12_n986(x)
+ else
+ fun_l12_n899(x)
+ end
+end
+
+def fun_l11_n439(x)
+ if (x < 1)
+ fun_l12_n147(x)
+ else
+ fun_l12_n582(x)
+ end
+end
+
+def fun_l11_n440(x)
+ if (x < 1)
+ fun_l12_n375(x)
+ else
+ fun_l12_n667(x)
+ end
+end
+
+def fun_l11_n441(x)
+ if (x < 1)
+ fun_l12_n467(x)
+ else
+ fun_l12_n994(x)
+ end
+end
+
+def fun_l11_n442(x)
+ if (x < 1)
+ fun_l12_n176(x)
+ else
+ fun_l12_n558(x)
+ end
+end
+
+def fun_l11_n443(x)
+ if (x < 1)
+ fun_l12_n452(x)
+ else
+ fun_l12_n37(x)
+ end
+end
+
+def fun_l11_n444(x)
+ if (x < 1)
+ fun_l12_n701(x)
+ else
+ fun_l12_n382(x)
+ end
+end
+
+def fun_l11_n445(x)
+ if (x < 1)
+ fun_l12_n881(x)
+ else
+ fun_l12_n896(x)
+ end
+end
+
+def fun_l11_n446(x)
+ if (x < 1)
+ fun_l12_n901(x)
+ else
+ fun_l12_n822(x)
+ end
+end
+
+def fun_l11_n447(x)
+ if (x < 1)
+ fun_l12_n6(x)
+ else
+ fun_l12_n648(x)
+ end
+end
+
+def fun_l11_n448(x)
+ if (x < 1)
+ fun_l12_n359(x)
+ else
+ fun_l12_n682(x)
+ end
+end
+
+def fun_l11_n449(x)
+ if (x < 1)
+ fun_l12_n675(x)
+ else
+ fun_l12_n379(x)
+ end
+end
+
+def fun_l11_n450(x)
+ if (x < 1)
+ fun_l12_n254(x)
+ else
+ fun_l12_n475(x)
+ end
+end
+
+def fun_l11_n451(x)
+ if (x < 1)
+ fun_l12_n951(x)
+ else
+ fun_l12_n947(x)
+ end
+end
+
+def fun_l11_n452(x)
+ if (x < 1)
+ fun_l12_n909(x)
+ else
+ fun_l12_n703(x)
+ end
+end
+
+def fun_l11_n453(x)
+ if (x < 1)
+ fun_l12_n555(x)
+ else
+ fun_l12_n862(x)
+ end
+end
+
+def fun_l11_n454(x)
+ if (x < 1)
+ fun_l12_n379(x)
+ else
+ fun_l12_n852(x)
+ end
+end
+
+def fun_l11_n455(x)
+ if (x < 1)
+ fun_l12_n85(x)
+ else
+ fun_l12_n219(x)
+ end
+end
+
+def fun_l11_n456(x)
+ if (x < 1)
+ fun_l12_n84(x)
+ else
+ fun_l12_n678(x)
+ end
+end
+
+def fun_l11_n457(x)
+ if (x < 1)
+ fun_l12_n663(x)
+ else
+ fun_l12_n637(x)
+ end
+end
+
+def fun_l11_n458(x)
+ if (x < 1)
+ fun_l12_n627(x)
+ else
+ fun_l12_n764(x)
+ end
+end
+
+def fun_l11_n459(x)
+ if (x < 1)
+ fun_l12_n3(x)
+ else
+ fun_l12_n419(x)
+ end
+end
+
+def fun_l11_n460(x)
+ if (x < 1)
+ fun_l12_n69(x)
+ else
+ fun_l12_n802(x)
+ end
+end
+
+def fun_l11_n461(x)
+ if (x < 1)
+ fun_l12_n708(x)
+ else
+ fun_l12_n304(x)
+ end
+end
+
+def fun_l11_n462(x)
+ if (x < 1)
+ fun_l12_n323(x)
+ else
+ fun_l12_n92(x)
+ end
+end
+
+def fun_l11_n463(x)
+ if (x < 1)
+ fun_l12_n987(x)
+ else
+ fun_l12_n434(x)
+ end
+end
+
+def fun_l11_n464(x)
+ if (x < 1)
+ fun_l12_n532(x)
+ else
+ fun_l12_n82(x)
+ end
+end
+
+def fun_l11_n465(x)
+ if (x < 1)
+ fun_l12_n664(x)
+ else
+ fun_l12_n982(x)
+ end
+end
+
+def fun_l11_n466(x)
+ if (x < 1)
+ fun_l12_n488(x)
+ else
+ fun_l12_n799(x)
+ end
+end
+
+def fun_l11_n467(x)
+ if (x < 1)
+ fun_l12_n882(x)
+ else
+ fun_l12_n948(x)
+ end
+end
+
+def fun_l11_n468(x)
+ if (x < 1)
+ fun_l12_n585(x)
+ else
+ fun_l12_n528(x)
+ end
+end
+
+def fun_l11_n469(x)
+ if (x < 1)
+ fun_l12_n318(x)
+ else
+ fun_l12_n233(x)
+ end
+end
+
+def fun_l11_n470(x)
+ if (x < 1)
+ fun_l12_n537(x)
+ else
+ fun_l12_n803(x)
+ end
+end
+
+def fun_l11_n471(x)
+ if (x < 1)
+ fun_l12_n391(x)
+ else
+ fun_l12_n298(x)
+ end
+end
+
+def fun_l11_n472(x)
+ if (x < 1)
+ fun_l12_n266(x)
+ else
+ fun_l12_n345(x)
+ end
+end
+
+def fun_l11_n473(x)
+ if (x < 1)
+ fun_l12_n355(x)
+ else
+ fun_l12_n713(x)
+ end
+end
+
+def fun_l11_n474(x)
+ if (x < 1)
+ fun_l12_n352(x)
+ else
+ fun_l12_n12(x)
+ end
+end
+
+def fun_l11_n475(x)
+ if (x < 1)
+ fun_l12_n23(x)
+ else
+ fun_l12_n715(x)
+ end
+end
+
+def fun_l11_n476(x)
+ if (x < 1)
+ fun_l12_n342(x)
+ else
+ fun_l12_n323(x)
+ end
+end
+
+def fun_l11_n477(x)
+ if (x < 1)
+ fun_l12_n563(x)
+ else
+ fun_l12_n905(x)
+ end
+end
+
+def fun_l11_n478(x)
+ if (x < 1)
+ fun_l12_n313(x)
+ else
+ fun_l12_n489(x)
+ end
+end
+
+def fun_l11_n479(x)
+ if (x < 1)
+ fun_l12_n75(x)
+ else
+ fun_l12_n291(x)
+ end
+end
+
+def fun_l11_n480(x)
+ if (x < 1)
+ fun_l12_n693(x)
+ else
+ fun_l12_n991(x)
+ end
+end
+
+def fun_l11_n481(x)
+ if (x < 1)
+ fun_l12_n246(x)
+ else
+ fun_l12_n664(x)
+ end
+end
+
+def fun_l11_n482(x)
+ if (x < 1)
+ fun_l12_n524(x)
+ else
+ fun_l12_n1(x)
+ end
+end
+
+def fun_l11_n483(x)
+ if (x < 1)
+ fun_l12_n712(x)
+ else
+ fun_l12_n289(x)
+ end
+end
+
+def fun_l11_n484(x)
+ if (x < 1)
+ fun_l12_n435(x)
+ else
+ fun_l12_n163(x)
+ end
+end
+
+def fun_l11_n485(x)
+ if (x < 1)
+ fun_l12_n338(x)
+ else
+ fun_l12_n883(x)
+ end
+end
+
+def fun_l11_n486(x)
+ if (x < 1)
+ fun_l12_n958(x)
+ else
+ fun_l12_n143(x)
+ end
+end
+
+def fun_l11_n487(x)
+ if (x < 1)
+ fun_l12_n22(x)
+ else
+ fun_l12_n674(x)
+ end
+end
+
+def fun_l11_n488(x)
+ if (x < 1)
+ fun_l12_n905(x)
+ else
+ fun_l12_n955(x)
+ end
+end
+
+def fun_l11_n489(x)
+ if (x < 1)
+ fun_l12_n590(x)
+ else
+ fun_l12_n623(x)
+ end
+end
+
+def fun_l11_n490(x)
+ if (x < 1)
+ fun_l12_n434(x)
+ else
+ fun_l12_n793(x)
+ end
+end
+
+def fun_l11_n491(x)
+ if (x < 1)
+ fun_l12_n274(x)
+ else
+ fun_l12_n879(x)
+ end
+end
+
+def fun_l11_n492(x)
+ if (x < 1)
+ fun_l12_n639(x)
+ else
+ fun_l12_n915(x)
+ end
+end
+
+def fun_l11_n493(x)
+ if (x < 1)
+ fun_l12_n547(x)
+ else
+ fun_l12_n87(x)
+ end
+end
+
+def fun_l11_n494(x)
+ if (x < 1)
+ fun_l12_n364(x)
+ else
+ fun_l12_n133(x)
+ end
+end
+
+def fun_l11_n495(x)
+ if (x < 1)
+ fun_l12_n568(x)
+ else
+ fun_l12_n449(x)
+ end
+end
+
+def fun_l11_n496(x)
+ if (x < 1)
+ fun_l12_n751(x)
+ else
+ fun_l12_n580(x)
+ end
+end
+
+def fun_l11_n497(x)
+ if (x < 1)
+ fun_l12_n864(x)
+ else
+ fun_l12_n505(x)
+ end
+end
+
+def fun_l11_n498(x)
+ if (x < 1)
+ fun_l12_n68(x)
+ else
+ fun_l12_n280(x)
+ end
+end
+
+def fun_l11_n499(x)
+ if (x < 1)
+ fun_l12_n821(x)
+ else
+ fun_l12_n816(x)
+ end
+end
+
+def fun_l11_n500(x)
+ if (x < 1)
+ fun_l12_n793(x)
+ else
+ fun_l12_n558(x)
+ end
+end
+
+def fun_l11_n501(x)
+ if (x < 1)
+ fun_l12_n460(x)
+ else
+ fun_l12_n358(x)
+ end
+end
+
+def fun_l11_n502(x)
+ if (x < 1)
+ fun_l12_n99(x)
+ else
+ fun_l12_n149(x)
+ end
+end
+
+def fun_l11_n503(x)
+ if (x < 1)
+ fun_l12_n599(x)
+ else
+ fun_l12_n257(x)
+ end
+end
+
+def fun_l11_n504(x)
+ if (x < 1)
+ fun_l12_n853(x)
+ else
+ fun_l12_n477(x)
+ end
+end
+
+def fun_l11_n505(x)
+ if (x < 1)
+ fun_l12_n845(x)
+ else
+ fun_l12_n776(x)
+ end
+end
+
+def fun_l11_n506(x)
+ if (x < 1)
+ fun_l12_n533(x)
+ else
+ fun_l12_n6(x)
+ end
+end
+
+def fun_l11_n507(x)
+ if (x < 1)
+ fun_l12_n700(x)
+ else
+ fun_l12_n47(x)
+ end
+end
+
+def fun_l11_n508(x)
+ if (x < 1)
+ fun_l12_n449(x)
+ else
+ fun_l12_n261(x)
+ end
+end
+
+def fun_l11_n509(x)
+ if (x < 1)
+ fun_l12_n773(x)
+ else
+ fun_l12_n583(x)
+ end
+end
+
+def fun_l11_n510(x)
+ if (x < 1)
+ fun_l12_n252(x)
+ else
+ fun_l12_n215(x)
+ end
+end
+
+def fun_l11_n511(x)
+ if (x < 1)
+ fun_l12_n280(x)
+ else
+ fun_l12_n589(x)
+ end
+end
+
+def fun_l11_n512(x)
+ if (x < 1)
+ fun_l12_n796(x)
+ else
+ fun_l12_n803(x)
+ end
+end
+
+def fun_l11_n513(x)
+ if (x < 1)
+ fun_l12_n450(x)
+ else
+ fun_l12_n606(x)
+ end
+end
+
+def fun_l11_n514(x)
+ if (x < 1)
+ fun_l12_n509(x)
+ else
+ fun_l12_n521(x)
+ end
+end
+
+def fun_l11_n515(x)
+ if (x < 1)
+ fun_l12_n658(x)
+ else
+ fun_l12_n912(x)
+ end
+end
+
+def fun_l11_n516(x)
+ if (x < 1)
+ fun_l12_n181(x)
+ else
+ fun_l12_n724(x)
+ end
+end
+
+def fun_l11_n517(x)
+ if (x < 1)
+ fun_l12_n167(x)
+ else
+ fun_l12_n996(x)
+ end
+end
+
+def fun_l11_n518(x)
+ if (x < 1)
+ fun_l12_n654(x)
+ else
+ fun_l12_n67(x)
+ end
+end
+
+def fun_l11_n519(x)
+ if (x < 1)
+ fun_l12_n192(x)
+ else
+ fun_l12_n110(x)
+ end
+end
+
+def fun_l11_n520(x)
+ if (x < 1)
+ fun_l12_n271(x)
+ else
+ fun_l12_n74(x)
+ end
+end
+
+def fun_l11_n521(x)
+ if (x < 1)
+ fun_l12_n324(x)
+ else
+ fun_l12_n224(x)
+ end
+end
+
+def fun_l11_n522(x)
+ if (x < 1)
+ fun_l12_n535(x)
+ else
+ fun_l12_n537(x)
+ end
+end
+
+def fun_l11_n523(x)
+ if (x < 1)
+ fun_l12_n103(x)
+ else
+ fun_l12_n942(x)
+ end
+end
+
+def fun_l11_n524(x)
+ if (x < 1)
+ fun_l12_n755(x)
+ else
+ fun_l12_n431(x)
+ end
+end