From 897f553f3f17e03599e23218cd1f2e34ae1f006e Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Wed, 12 Oct 2022 09:56:37 -0700 Subject: [PATCH 01/12] Remove booleans from cranelift Remove the boolean types from cranelift, and the associated instructions "breduce", "bextend", "bconst", and "bint". Standardize on using `1`/`0` for the return value from instructions that produce scalar boolean results, and `-1`/`0` for boolean vector elements. Co-authored-by: Afonso Bordado --- cranelift/codegen/meta/src/cdsl/types.rs | 36 +--- cranelift/codegen/meta/src/cdsl/typevar.rs | 137 +----------- cranelift/codegen/meta/src/gen_inst.rs | 4 - cranelift/codegen/meta/src/shared/formats.rs | 3 - .../codegen/meta/src/shared/immediates.rs | 6 - .../codegen/meta/src/shared/instructions.rs | 181 +++------------- cranelift/codegen/meta/src/shared/types.rs | 56 ----- cranelift/codegen/src/data_value.rs | 41 +--- cranelift/codegen/src/egraph/node.rs | 12 +- cranelift/codegen/src/inst_predicates.rs | 18 +- cranelift/codegen/src/ir/builder.rs | 2 +- cranelift/codegen/src/ir/entities.rs | 1 - cranelift/codegen/src/ir/extfunc.rs | 6 +- cranelift/codegen/src/ir/instructions.rs | 23 +- cranelift/codegen/src/ir/types.rs | 158 +++----------- cranelift/codegen/src/isa/aarch64/inst.isle | 149 +++++++------ .../codegen/src/isa/aarch64/inst/imms.rs | 3 - cranelift/codegen/src/isa/aarch64/inst/mod.rs | 30 +-- cranelift/codegen/src/isa/aarch64/lower.isle | 197 ++++-------------- .../codegen/src/isa/aarch64/lower/isle.rs | 1 - .../codegen/src/isa/aarch64/lower_inst.rs | 8 +- cranelift/codegen/src/isa/riscv64/inst.isle | 51 ++++- .../codegen/src/isa/riscv64/inst/args.rs | 6 +- .../codegen/src/isa/riscv64/inst/emit.rs | 4 +- .../src/isa/riscv64/inst/emit_tests.rs | 31 --- .../codegen/src/isa/riscv64/inst/imms.rs | 2 +- cranelift/codegen/src/isa/riscv64/inst/mod.rs | 19 +- cranelift/codegen/src/isa/riscv64/lower.isle | 70 +------ .../codegen/src/isa/riscv64/lower/isle.rs | 34 +-- cranelift/codegen/src/isa/s390x/abi.rs | 1 - cranelift/codegen/src/isa/s390x/inst.isle | 16 +- cranelift/codegen/src/isa/s390x/inst/mod.rs | 38 ++-- cranelift/codegen/src/isa/s390x/lower.isle | 103 ++------- cranelift/codegen/src/isa/s390x/lower.rs | 4 - cranelift/codegen/src/isa/s390x/lower/isle.rs | 6 +- cranelift/codegen/src/isa/x64/abi.rs | 8 +- cranelift/codegen/src/isa/x64/inst/mod.rs | 13 +- cranelift/codegen/src/isa/x64/lower.isle | 132 ++---------- cranelift/codegen/src/isa/x64/lower.rs | 5 - cranelift/codegen/src/isa/x64/lower/isle.rs | 3 +- cranelift/codegen/src/isle_prelude.rs | 35 +--- cranelift/codegen/src/machinst/helpers.rs | 2 +- cranelift/codegen/src/opts/algebraic.isle | 4 - cranelift/codegen/src/prelude.isle | 46 +--- cranelift/codegen/src/simple_preopt.rs | 43 +--- cranelift/codegen/src/souper_harvest.rs | 23 +- cranelift/codegen/src/verifier/mod.rs | 5 +- cranelift/codegen/src/write.rs | 2 - cranelift/docs/ir.md | 44 +--- cranelift/filetests/src/function_runner.rs | 66 ++---- cranelift/frontend/src/frontend.rs | 30 +-- cranelift/frontend/src/ssa.rs | 8 +- cranelift/fuzzgen/src/function_generator.rs | 20 +- cranelift/fuzzgen/src/lib.rs | 1 - cranelift/interpreter/src/frame.rs | 6 +- cranelift/interpreter/src/interpreter.rs | 12 +- cranelift/interpreter/src/step.rs | 84 ++++---- cranelift/interpreter/src/value.rs | 60 ++++-- cranelift/preopt/src/constant_folding.rs | 10 - cranelift/reader/src/lexer.rs | 10 +- cranelift/reader/src/parser.rs | 71 ++----- cranelift/src/bugpoint.rs | 3 - cranelift/src/interpret.rs | 6 +- cranelift/src/run.rs | 4 +- cranelift/wasm/src/code_translator.rs | 24 +-- cranelift/wasm/src/environ/spec.rs | 2 +- crates/cranelift/src/func_environ.rs | 2 +- 67 files changed, 538 insertions(+), 1703 deletions(-) diff --git a/cranelift/codegen/meta/src/cdsl/types.rs b/cranelift/codegen/meta/src/cdsl/types.rs index 1c2ca3f1cc56..b8681e619a8e 100644 --- a/cranelift/codegen/meta/src/cdsl/types.rs +++ b/cranelift/codegen/meta/src/cdsl/types.rs @@ -145,7 +145,6 @@ impl From for ValueType { /// A concrete scalar type that can appear as a vector lane too. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub(crate) enum LaneType { - Bool(shared_types::Bool), Float(shared_types::Float), Int(shared_types::Int), } @@ -154,7 +153,6 @@ impl LaneType { /// Return a string containing the documentation comment for this lane type. pub fn doc(self) -> String { match self { - LaneType::Bool(_) => format!("A boolean type with {} bits.", self.lane_bits()), LaneType::Float(shared_types::Float::F32) => String::from( "A 32-bit floating point type represented in the IEEE 754-2008 *binary32* interchange format. This corresponds to the :c:type:`float` @@ -178,7 +176,6 @@ impl LaneType { /// Return the number of bits in a lane. pub fn lane_bits(self) -> u64 { match self { - LaneType::Bool(ref b) => *b as u64, LaneType::Float(ref f) => *f as u64, LaneType::Int(ref i) => *i as u64, } @@ -188,12 +185,6 @@ impl LaneType { pub fn number(self) -> u16 { constants::LANE_BASE + match self { - LaneType::Bool(shared_types::Bool::B1) => 0, - LaneType::Bool(shared_types::Bool::B8) => 1, - LaneType::Bool(shared_types::Bool::B16) => 2, - LaneType::Bool(shared_types::Bool::B32) => 3, - LaneType::Bool(shared_types::Bool::B64) => 4, - LaneType::Bool(shared_types::Bool::B128) => 5, LaneType::Int(shared_types::Int::I8) => 6, LaneType::Int(shared_types::Int::I16) => 7, LaneType::Int(shared_types::Int::I32) => 8, @@ -204,18 +195,6 @@ impl LaneType { } } - pub fn bool_from_bits(num_bits: u16) -> LaneType { - LaneType::Bool(match num_bits { - 1 => shared_types::Bool::B1, - 8 => shared_types::Bool::B8, - 16 => shared_types::Bool::B16, - 32 => shared_types::Bool::B32, - 64 => shared_types::Bool::B64, - 128 => shared_types::Bool::B128, - _ => unreachable!("unxpected num bits for bool"), - }) - } - pub fn int_from_bits(num_bits: u16) -> LaneType { LaneType::Int(match num_bits { 8 => shared_types::Int::I8, @@ -251,7 +230,6 @@ impl LaneType { impl fmt::Display for LaneType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - LaneType::Bool(_) => write!(f, "b{}", self.lane_bits()), LaneType::Float(_) => write!(f, "f{}", self.lane_bits()), LaneType::Int(_) => write!(f, "i{}", self.lane_bits()), } @@ -265,7 +243,6 @@ impl fmt::Debug for LaneType { f, "{}", match *self { - LaneType::Bool(_) => format!("BoolType({})", inner_msg), LaneType::Float(_) => format!("FloatType({})", inner_msg), LaneType::Int(_) => format!("IntType({})", inner_msg), } @@ -273,13 +250,6 @@ impl fmt::Debug for LaneType { } } -/// Create a LaneType from a given bool variant. -impl From for LaneType { - fn from(b: shared_types::Bool) -> Self { - LaneType::Bool(b) - } -} - /// Create a LaneType from a given float variant. impl From for LaneType { fn from(f: shared_types::Float) -> Self { @@ -296,7 +266,6 @@ impl From for LaneType { /// An iterator for different lane types. pub(crate) struct LaneTypeIterator { - bool_iter: shared_types::BoolIterator, int_iter: shared_types::IntIterator, float_iter: shared_types::FloatIterator, } @@ -305,7 +274,6 @@ impl LaneTypeIterator { /// Create a new lane type iterator. fn new() -> Self { Self { - bool_iter: shared_types::BoolIterator::new(), int_iter: shared_types::IntIterator::new(), float_iter: shared_types::FloatIterator::new(), } @@ -315,9 +283,7 @@ impl LaneTypeIterator { impl Iterator for LaneTypeIterator { type Item = LaneType; fn next(&mut self) -> Option { - if let Some(b) = self.bool_iter.next() { - Some(LaneType::from(b)) - } else if let Some(i) = self.int_iter.next() { + if let Some(i) = self.int_iter.next() { Some(LaneType::from(i)) } else if let Some(f) = self.float_iter.next() { Some(LaneType::from(f)) diff --git a/cranelift/codegen/meta/src/cdsl/typevar.rs b/cranelift/codegen/meta/src/cdsl/typevar.rs index 63c14f861a91..1c307fccdf46 100644 --- a/cranelift/codegen/meta/src/cdsl/typevar.rs +++ b/cranelift/codegen/meta/src/cdsl/typevar.rs @@ -90,10 +90,6 @@ impl TypeVar { let bits = float_type as RangeBound; builder.floats(bits..bits) } - LaneType::Bool(bool_type) => { - let bits = bool_type as RangeBound; - builder.bools(bits..bits) - } }; TypeVar::new(name, doc, builder.build()) } @@ -171,10 +167,6 @@ impl TypeVar { ts.floats.is_empty() || *ts.floats.iter().min().unwrap() > 32, "can't halve all float types" ); - assert!( - ts.bools.is_empty() || *ts.bools.iter().min().unwrap() > 8, - "can't halve all boolean types" - ); } DerivedFunc::DoubleWidth => { assert!( @@ -185,10 +177,6 @@ impl TypeVar { ts.floats.is_empty() || *ts.floats.iter().max().unwrap() < MAX_FLOAT_BITS, "can't double all float types" ); - assert!( - ts.bools.is_empty() || *ts.bools.iter().max().unwrap() < MAX_BITS, - "can't double all boolean types" - ); } DerivedFunc::HalfVector => { assert!( @@ -211,10 +199,6 @@ impl TypeVar { ts.floats.is_empty() || *ts.floats.iter().min().unwrap() > 32, "can't halve all float types" ); - assert!( - ts.bools.is_empty() || *ts.bools.iter().min().unwrap() > 8, - "can't halve all boolean types" - ); assert!( *ts.lanes.iter().max().unwrap() < MAX_LANES, "can't double 256 lanes" @@ -229,10 +213,6 @@ impl TypeVar { ts.floats.is_empty() || *ts.floats.iter().max().unwrap() < MAX_FLOAT_BITS, "can't double all float types" ); - assert!( - ts.bools.is_empty() || *ts.bools.iter().max().unwrap() < MAX_BITS, - "can't double all boolean types" - ); assert!( *ts.lanes.iter().min().unwrap() > 1, "can't halve a scalar type" @@ -404,7 +384,6 @@ pub(crate) struct TypeSet { pub dynamic_lanes: NumSet, pub ints: NumSet, pub floats: NumSet, - pub bools: NumSet, pub refs: NumSet, pub specials: Vec, } @@ -415,7 +394,6 @@ impl TypeSet { dynamic_lanes: NumSet, ints: NumSet, floats: NumSet, - bools: NumSet, refs: NumSet, specials: Vec, ) -> Self { @@ -424,7 +402,6 @@ impl TypeSet { dynamic_lanes, ints, floats, - bools, refs, specials, } @@ -432,10 +409,8 @@ impl TypeSet { /// Return the number of concrete types represented by this typeset. pub fn size(&self) -> usize { - self.lanes.len() - * (self.ints.len() + self.floats.len() + self.bools.len() + self.refs.len()) - + self.dynamic_lanes.len() - * (self.ints.len() + self.floats.len() + self.bools.len() + self.refs.len()) + self.lanes.len() * (self.ints.len() + self.floats.len() + self.refs.len()) + + self.dynamic_lanes.len() * (self.ints.len() + self.floats.len() + self.refs.len()) + self.specials.len() } @@ -467,13 +442,6 @@ impl TypeSet { copy.ints = NumSet::new(); copy.floats = NumSet::new(); copy.refs = NumSet::new(); - if !(&self.lanes - &num_set![1]).is_empty() { - copy.bools = &self.ints | &self.floats; - copy.bools = ©.bools | &self.bools; - } - if self.lanes.contains(&1) { - copy.bools.insert(1); - } copy } @@ -482,7 +450,6 @@ impl TypeSet { let mut copy = self.clone(); copy.ints = NumSet::from_iter(self.ints.iter().filter(|&&x| x > 8).map(|&x| x / 2)); copy.floats = NumSet::from_iter(self.floats.iter().filter(|&&x| x > 32).map(|&x| x / 2)); - copy.bools = NumSet::from_iter(self.bools.iter().filter(|&&x| x > 8).map(|&x| x / 2)); copy.specials = Vec::new(); copy } @@ -497,13 +464,6 @@ impl TypeSet { .filter(|&&x| x < MAX_FLOAT_BITS) .map(|&x| x * 2), ); - copy.bools = NumSet::from_iter( - self.bools - .iter() - .filter(|&&x| x < MAX_BITS) - .map(|&x| x * 2) - .filter(|x| legal_bool(*x)), - ); copy.specials = Vec::new(); copy } @@ -551,9 +511,6 @@ impl TypeSet { for &bits in &self.floats { ret.push(LaneType::float_from_bits(bits).by(num_lanes)); } - for &bits in &self.bools { - ret.push(LaneType::bool_from_bits(bits).by(num_lanes)); - } for &bits in &self.refs { ret.push(ReferenceType::ref_from_bits(bits).into()); } @@ -565,9 +522,6 @@ impl TypeSet { for &bits in &self.floats { ret.push(LaneType::float_from_bits(bits).to_dynamic(num_lanes)); } - for &bits in &self.bools { - ret.push(LaneType::bool_from_bits(bits).to_dynamic(num_lanes)); - } } for &special in &self.specials { ret.push(special.into()); @@ -612,12 +566,6 @@ impl fmt::Debug for TypeSet { Vec::from_iter(self.floats.iter().map(|x| x.to_string())).join(", ") )); } - if !self.bools.is_empty() { - subsets.push(format!( - "bools={{{}}}", - Vec::from_iter(self.bools.iter().map(|x| x.to_string())).join(", ") - )); - } if !self.refs.is_empty() { subsets.push(format!( "refs={{{}}}", @@ -639,7 +587,6 @@ impl fmt::Debug for TypeSet { pub(crate) struct TypeSetBuilder { ints: Interval, floats: Interval, - bools: Interval, refs: Interval, includes_scalars: bool, simd_lanes: Interval, @@ -652,7 +599,6 @@ impl TypeSetBuilder { Self { ints: Interval::None, floats: Interval::None, - bools: Interval::None, refs: Interval::None, includes_scalars: true, simd_lanes: Interval::None, @@ -671,11 +617,6 @@ impl TypeSetBuilder { self.floats = interval.into(); self } - pub fn bools(mut self, interval: impl Into) -> Self { - assert!(self.bools == Interval::None); - self.bools = interval.into(); - self - } pub fn refs(mut self, interval: impl Into) -> Self { assert!(self.refs == Interval::None); self.refs = interval.into(); @@ -704,17 +645,11 @@ impl TypeSetBuilder { pub fn build(self) -> TypeSet { let min_lanes = if self.includes_scalars { 1 } else { 2 }; - let bools = range_to_set(self.bools.to_range(1..MAX_BITS, None)) - .into_iter() - .filter(|x| legal_bool(*x)) - .collect(); - TypeSet::new( range_to_set(self.simd_lanes.to_range(min_lanes..MAX_LANES, Some(1))), range_to_set(self.dynamic_simd_lanes.to_range(2..MAX_LANES, None)), range_to_set(self.ints.to_range(8..MAX_BITS, None)), range_to_set(self.floats.to_range(32..64, None)), - bools, range_to_set(self.refs.to_range(32..64, None)), self.specials, ) @@ -760,11 +695,6 @@ impl Into for Range { } } -fn legal_bool(bits: RangeBound) -> bool { - // Only allow legal bit widths for bool types. - bits == 1 || (bits >= 8 && bits <= MAX_BITS && bits.is_power_of_two()) -} - /// Generates a set with all the powers of two included in the range. fn range_to_set(range: Option) -> NumSet { let mut set = NumSet::new(); @@ -791,21 +721,12 @@ fn test_typevar_builder() { assert_eq!(type_set.lanes, num_set![1]); assert!(type_set.floats.is_empty()); assert_eq!(type_set.ints, num_set![8, 16, 32, 64, 128]); - assert!(type_set.bools.is_empty()); - assert!(type_set.specials.is_empty()); - - let type_set = TypeSetBuilder::new().bools(Interval::All).build(); - assert_eq!(type_set.lanes, num_set![1]); - assert!(type_set.floats.is_empty()); - assert!(type_set.ints.is_empty()); - assert_eq!(type_set.bools, num_set![1, 8, 16, 32, 64, 128]); assert!(type_set.specials.is_empty()); let type_set = TypeSetBuilder::new().floats(Interval::All).build(); assert_eq!(type_set.lanes, num_set![1]); assert_eq!(type_set.floats, num_set![32, 64]); assert!(type_set.ints.is_empty()); - assert!(type_set.bools.is_empty()); assert!(type_set.specials.is_empty()); let type_set = TypeSetBuilder::new() @@ -816,7 +737,6 @@ fn test_typevar_builder() { assert_eq!(type_set.lanes, num_set![2, 4, 8, 16, 32, 64, 128, 256]); assert_eq!(type_set.floats, num_set![32, 64]); assert!(type_set.ints.is_empty()); - assert!(type_set.bools.is_empty()); assert!(type_set.specials.is_empty()); let type_set = TypeSetBuilder::new() @@ -827,7 +747,6 @@ fn test_typevar_builder() { assert_eq!(type_set.lanes, num_set![1, 2, 4, 8, 16, 32, 64, 128, 256]); assert_eq!(type_set.floats, num_set![32, 64]); assert!(type_set.ints.is_empty()); - assert!(type_set.bools.is_empty()); assert!(type_set.specials.is_empty()); let type_set = TypeSetBuilder::new() @@ -839,12 +758,10 @@ fn test_typevar_builder() { assert_eq!(type_set.floats, num_set![32, 64]); assert!(type_set.dynamic_lanes.is_empty()); assert!(type_set.ints.is_empty()); - assert!(type_set.bools.is_empty()); assert!(type_set.specials.is_empty()); let type_set = TypeSetBuilder::new() .ints(Interval::All) - .bools(Interval::All) .floats(Interval::All) .dynamic_simd_lanes(Interval::All) .includes_scalars(false) @@ -854,7 +771,6 @@ fn test_typevar_builder() { num_set![2, 4, 8, 16, 32, 64, 128, 256] ); assert_eq!(type_set.ints, num_set![8, 16, 32, 64, 128]); - assert_eq!(type_set.bools, num_set![1, 8, 16, 32, 64, 128]); assert_eq!(type_set.floats, num_set![32, 64]); assert_eq!(type_set.lanes, num_set![1]); assert!(type_set.specials.is_empty()); @@ -871,14 +787,12 @@ fn test_typevar_builder() { assert_eq!(type_set.floats, num_set![32, 64]); assert_eq!(type_set.lanes, num_set![1]); assert!(type_set.ints.is_empty()); - assert!(type_set.bools.is_empty()); assert!(type_set.specials.is_empty()); let type_set = TypeSetBuilder::new().ints(16..64).build(); assert_eq!(type_set.lanes, num_set![1]); assert_eq!(type_set.ints, num_set![16, 32, 64]); assert!(type_set.floats.is_empty()); - assert!(type_set.bools.is_empty()); assert!(type_set.specials.is_empty()); } @@ -897,17 +811,6 @@ fn test_dynamic_to_vector() { .ints(Interval::All) .build() ); - assert_eq!( - TypeSetBuilder::new() - .dynamic_simd_lanes(Interval::All) - .bools(Interval::All) - .build() - .dynamic_to_vector(), - TypeSetBuilder::new() - .simd_lanes(2..128) - .bools(Interval::All) - .build() - ); assert_eq!( TypeSetBuilder::new() .dynamic_simd_lanes(Interval::All) @@ -944,20 +847,6 @@ fn test_as_bool() { a.lane_of(), TypeSetBuilder::new().ints(8..8).floats(32..32).build() ); - - // Test as_bool with disjoint intervals. - let mut a_as_bool = TypeSetBuilder::new().simd_lanes(2..8).build(); - a_as_bool.bools = num_set![8, 32]; - assert_eq!(a.as_bool(), a_as_bool); - - let b = TypeSetBuilder::new() - .simd_lanes(1..8) - .ints(8..8) - .floats(32..32) - .build(); - let mut b_as_bool = TypeSetBuilder::new().simd_lanes(1..8).build(); - b_as_bool.bools = num_set![1, 8, 32]; - assert_eq!(b.as_bool(), b_as_bool); } #[test] @@ -1002,14 +891,6 @@ fn test_forward_images() { TypeSetBuilder::new().floats(32..64).build().half_width(), TypeSetBuilder::new().floats(32..32).build() ); - assert_eq!( - TypeSetBuilder::new().bools(1..8).build().half_width(), - empty_set - ); - assert_eq!( - TypeSetBuilder::new().bools(1..32).build().half_width(), - TypeSetBuilder::new().bools(8..16).build() - ); // Double width. assert_eq!( @@ -1028,14 +909,6 @@ fn test_forward_images() { TypeSetBuilder::new().floats(32..64).build().double_width(), TypeSetBuilder::new().floats(64..64).build() ); - assert_eq!( - TypeSetBuilder::new().bools(1..16).build().double_width(), - TypeSetBuilder::new().bools(16..32).build() - ); - assert_eq!( - TypeSetBuilder::new().bools(32..64).build().double_width(), - TypeSetBuilder::new().bools(64..128).build() - ); } #[test] @@ -1069,10 +942,6 @@ fn test_typeset_singleton() { TypeSetBuilder::new().floats(64..64).build().get_singleton(), ValueType::Lane(shared_types::Float::F64.into()) ); - assert_eq!( - TypeSetBuilder::new().bools(1..1).build().get_singleton(), - ValueType::Lane(shared_types::Bool::B1.into()) - ); assert_eq!( TypeSetBuilder::new() .simd_lanes(4..4) @@ -1110,7 +979,6 @@ fn test_typevar_singleton() { assert_eq!(typevar.name, "i32"); assert_eq!(typevar.type_set.ints, num_set![32]); assert!(typevar.type_set.floats.is_empty()); - assert!(typevar.type_set.bools.is_empty()); assert!(typevar.type_set.specials.is_empty()); assert_eq!(typevar.type_set.lanes, num_set![1]); @@ -1123,6 +991,5 @@ fn test_typevar_singleton() { assert!(typevar.type_set.ints.is_empty()); assert_eq!(typevar.type_set.floats, num_set![32]); assert_eq!(typevar.type_set.lanes, num_set![4]); - assert!(typevar.type_set.bools.is_empty()); assert!(typevar.type_set.specials.is_empty()); } diff --git a/cranelift/codegen/meta/src/gen_inst.rs b/cranelift/codegen/meta/src/gen_inst.rs index 581297a40eba..ba65cd339e8d 100644 --- a/cranelift/codegen/meta/src/gen_inst.rs +++ b/cranelift/codegen/meta/src/gen_inst.rs @@ -769,9 +769,6 @@ fn typeset_to_string(ts: &TypeSet) -> String { if !ts.floats.is_empty() { result += &format!(", floats={}", iterable_to_string(&ts.floats)); } - if !ts.bools.is_empty() { - result += &format!(", bools={}", iterable_to_string(&ts.bools)); - } if !ts.specials.is_empty() { result += &format!(", specials=[{}]", iterable_to_string(&ts.specials)); } @@ -804,7 +801,6 @@ pub(crate) fn gen_typesets_table(type_sets: &UniqueTable, fmt: &mut For gen_bitset(&ts.dynamic_lanes, "dynamic_lanes", 16, fmt); gen_bitset(&ts.ints, "ints", 8, fmt); gen_bitset(&ts.floats, "floats", 8, fmt); - gen_bitset(&ts.bools, "bools", 8, fmt); gen_bitset(&ts.refs, "refs", 8, fmt); }); fmt.line("},"); diff --git a/cranelift/codegen/meta/src/shared/formats.rs b/cranelift/codegen/meta/src/shared/formats.rs index 84c2a39af735..93a3f781c9c0 100644 --- a/cranelift/codegen/meta/src/shared/formats.rs +++ b/cranelift/codegen/meta/src/shared/formats.rs @@ -43,7 +43,6 @@ pub(crate) struct Formats { pub(crate) ternary_imm8: Rc, pub(crate) trap: Rc, pub(crate) unary: Rc, - pub(crate) unary_bool: Rc, pub(crate) unary_const: Rc, pub(crate) unary_global_value: Rc, pub(crate) unary_ieee32: Rc, @@ -62,8 +61,6 @@ impl Formats { unary_ieee64: Builder::new("UnaryIeee64").imm(&imm.ieee64).build(), - unary_bool: Builder::new("UnaryBool").imm(&imm.boolean).build(), - unary_const: Builder::new("UnaryConst").imm(&imm.pool_constant).build(), unary_global_value: Builder::new("UnaryGlobalValue") diff --git a/cranelift/codegen/meta/src/shared/immediates.rs b/cranelift/codegen/meta/src/shared/immediates.rs index 8b5b0d994e9e..6e1e93347320 100644 --- a/cranelift/codegen/meta/src/shared/immediates.rs +++ b/cranelift/codegen/meta/src/shared/immediates.rs @@ -44,11 +44,6 @@ pub(crate) struct Immediates { /// IEEE 754-2008 binary64 interchange format. pub ieee64: OperandKind, - /// An immediate boolean operand. - /// - /// This type of immediate boolean can interact with SSA values with any BoolType type. - pub boolean: OperandKind, - /// A condition code for comparing integer values. /// /// This enumerated operand kind is used for the `icmp` instruction and corresponds to the @@ -142,7 +137,6 @@ impl Immediates { "ir::immediates::Ieee64", "A 64-bit immediate floating point number.", ), - boolean: new_imm("imm", "bool", "An immediate boolean."), intcc: { let mut intcc_values = HashMap::new(); intcc_values.insert("eq", "Equal"); diff --git a/cranelift/codegen/meta/src/shared/instructions.rs b/cranelift/codegen/meta/src/shared/instructions.rs index 7d0b92c5b0f5..ebeaf9d646a9 100644 --- a/cranelift/codegen/meta/src/shared/instructions.rs +++ b/cranelift/codegen/meta/src/shared/instructions.rs @@ -37,17 +37,14 @@ fn define_control_flow( .is_branch(true), ); - let Testable = &TypeVar::new( - "Testable", - "A scalar boolean or integer type", - TypeSetBuilder::new() - .ints(Interval::All) - .bools(Interval::All) - .build(), + let ScalarTruthy = &TypeVar::new( + "ScalarTruthy", + "A scalar truthy type", + TypeSetBuilder::new().ints(Interval::All).build(), ); { - let c = &Operand::new("c", Testable).with_doc("Controlling value to test"); + let c = &Operand::new("c", ScalarTruthy).with_doc("Controlling value to test"); ig.push( Inst::new( @@ -226,7 +223,7 @@ fn define_control_flow( .is_terminator(true), ); - let c = &Operand::new("c", Testable).with_doc("Controlling value to test"); + let c = &Operand::new("c", ScalarTruthy).with_doc("Controlling value to test"); ig.push( Inst::new( "trapz", @@ -255,7 +252,7 @@ fn define_control_flow( .can_trap(true), ); - let c = &Operand::new("c", Testable).with_doc("Controlling value to test"); + let c = &Operand::new("c", ScalarTruthy).with_doc("Controlling value to test"); ig.push( Inst::new( "trapnz", @@ -412,7 +409,6 @@ fn define_simd_lane_access( TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) - .bools(Interval::All) .simd_lanes(Interval::All) .dynamic_simd_lanes(Interval::All) .includes_scalars(false) @@ -685,7 +681,7 @@ pub(crate) fn define( let iflags: &TypeVar = &ValueType::Special(types::Flag::IFlags.into()).into(); let fflags: &TypeVar = &ValueType::Special(types::Flag::FFlags.into()).into(); - let b1: &TypeVar = &ValueType::from(LaneType::from(types::Bool::B1)).into(); + let i8: &TypeVar = &ValueType::from(LaneType::from(types::Int::I8)).into(); let f32_: &TypeVar = &ValueType::from(LaneType::from(types::Float::F32)).into(); let f64_: &TypeVar = &ValueType::from(LaneType::from(types::Float::F64)).into(); @@ -700,19 +696,10 @@ pub(crate) fn define( .build(), ); - let Bool = &TypeVar::new( - "Bool", - "A scalar or vector boolean type", - TypeSetBuilder::new() - .bools(Interval::All) - .simd_lanes(Interval::All) - .build(), - ); - - let ScalarBool = &TypeVar::new( - "ScalarBool", - "A scalar boolean type", - TypeSetBuilder::new().bools(Interval::All).build(), + let ScalarTruthy = &TypeVar::new( + "ScalarTruthy", + "A scalar truthy type", + TypeSetBuilder::new().ints(Interval::All).build(), ); let iB = &TypeVar::new( @@ -733,33 +720,22 @@ pub(crate) fn define( TypeSetBuilder::new().refs(Interval::All).build(), ); - let Testable = &TypeVar::new( - "Testable", - "A scalar boolean or integer type", - TypeSetBuilder::new() - .ints(Interval::All) - .bools(Interval::All) - .build(), - ); - let TxN = &TypeVar::new( "TxN", "A SIMD vector type", TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) - .bools(Interval::All) .simd_lanes(Interval::All) .includes_scalars(false) .build(), ); let Any = &TypeVar::new( "Any", - "Any integer, float, boolean, or reference scalar or vector type", + "Any integer, float, or reference scalar or vector type", TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) - .bools(Interval::All) .refs(Interval::All) .simd_lanes(Interval::All) .includes_scalars(true) @@ -1419,24 +1395,6 @@ pub(crate) fn define( .operands_out(vec![a]), ); - let N = &Operand::new("N", &imm.boolean); - let a = &Operand::new("a", Bool).with_doc("A constant boolean scalar or vector value"); - - ig.push( - Inst::new( - "bconst", - r#" - Boolean constant. - - Create a scalar boolean SSA value with an immediate constant value, or - a boolean vector where all the lanes have the same value. - "#, - &formats.unary_bool, - ) - .operands_in(vec![N]) - .operands_out(vec![a]), - ); - let N = &Operand::new("N", &imm.pool_constant) .with_doc("The 16 immediate bytes of a 128-bit vector"); let a = &Operand::new("a", TxN).with_doc("A constant vector value"); @@ -1463,7 +1421,6 @@ pub(crate) fn define( lane counts and widths", TypeSetBuilder::new() .ints(8..8) - .bools(8..8) .simd_lanes(16..16) .includes_scalars(false) .build(), @@ -1513,7 +1470,7 @@ pub(crate) fn define( &formats.nullary, )); - let c = &Operand::new("c", Testable).with_doc("Controlling value to test"); + let c = &Operand::new("c", ScalarTruthy).with_doc("Controlling value to test"); let x = &Operand::new("x", Any).with_doc("Value to use when `c` is true"); let y = &Operand::new("y", Any).with_doc("Value to use when `c` is false"); let a = &Operand::new("a", Any); @@ -1640,7 +1597,6 @@ pub(crate) fn define( TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) - .bools(Interval::All) .simd_lanes(1..128) .includes_scalars(true) .build(), @@ -1680,7 +1636,7 @@ pub(crate) fn define( r#" Vector lane select. - Select lanes from ``x`` or ``y`` controlled by the lanes of the boolean + Select lanes from ``x`` or ``y`` controlled by the lanes of the truthy vector ``c``. "#, &formats.ternary, @@ -1689,7 +1645,7 @@ pub(crate) fn define( .operands_out(vec![a]), ); - let s = &Operand::new("s", b1); + let s = &Operand::new("s", i8); ig.push( Inst::new( @@ -1760,8 +1716,8 @@ pub(crate) fn define( | sgt | ugt | Greater than | | sle | ule | Less than or equal | - When this instruction compares integer vectors, it returns a boolean - vector of lane-wise comparisons. + When this instruction compares integer vectors, it returns a vector of + lane-wise comparisons. "#, &formats.int_compare, ) @@ -1769,7 +1725,7 @@ pub(crate) fn define( .operands_out(vec![a]), ); - let a = &Operand::new("a", b1); + let a = &Operand::new("a", i8); let x = &Operand::new("x", iB); let Y = &Operand::new("Y", &imm.imm64); @@ -2158,10 +2114,10 @@ pub(crate) fn define( let x = &Operand::new("x", iB); let y = &Operand::new("y", iB); - let c_in = &Operand::new("c_in", b1).with_doc("Input carry flag"); - let c_out = &Operand::new("c_out", b1).with_doc("Output carry flag"); - let b_in = &Operand::new("b_in", b1).with_doc("Input borrow flag"); - let b_out = &Operand::new("b_out", b1).with_doc("Output borrow flag"); + let c_in = &Operand::new("c_in", i8).with_doc("Input carry flag"); + let c_out = &Operand::new("c_out", i8).with_doc("Output carry flag"); + let b_in = &Operand::new("b_in", i8).with_doc("Input borrow flag"); + let b_out = &Operand::new("b_out", i8).with_doc("Output borrow flag"); let c_if_in = &Operand::new("c_in", iflags); let c_if_out = &Operand::new("c_out", iflags); @@ -2430,11 +2386,10 @@ pub(crate) fn define( let bits = &TypeVar::new( "bits", - "Any integer, float, or boolean scalar or vector type", + "Any integer, float, or vector type", TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) - .bools(Interval::All) .simd_lanes(Interval::All) .includes_scalars(true) .build(), @@ -2916,7 +2871,7 @@ pub(crate) fn define( floating point comparisons of the same name. When this instruction compares floating point vectors, it returns a - boolean vector with the results of lane-wise comparisons. + vector with the results of lane-wise comparisons. "#, &formats.float_compare, ) @@ -3195,7 +3150,7 @@ pub(crate) fn define( .operands_out(vec![a]), ); - let a = &Operand::new("a", b1); + let a = &Operand::new("a", i8); let x = &Operand::new("x", Ref); ig.push( @@ -3213,7 +3168,7 @@ pub(crate) fn define( .operands_out(vec![a]), ); - let a = &Operand::new("a", b1); + let a = &Operand::new("a", i8); let x = &Operand::new("x", Ref); ig.push( @@ -3233,7 +3188,7 @@ pub(crate) fn define( let Cond = &Operand::new("Cond", &imm.intcc); let f = &Operand::new("f", iflags); - let a = &Operand::new("a", b1); + let a = &Operand::new("a", i8); ig.push( Inst::new( @@ -3329,80 +3284,11 @@ pub(crate) fn define( .operands_out(vec![a]), ); - let Bool = &TypeVar::new( - "Bool", - "A scalar boolean type", - TypeSetBuilder::new().bools(Interval::All).build(), - ); - - let BoolTo = &TypeVar::new( - "BoolTo", - "A smaller boolean type", - TypeSetBuilder::new().bools(Interval::All).build(), - ); - - let x = &Operand::new("x", Bool); - let a = &Operand::new("a", BoolTo); - - ig.push( - Inst::new( - "breduce", - r#" - Convert `x` to a smaller boolean type by discarding the most significant bits. - "#, - &formats.unary, - ) - .operands_in(vec![x]) - .operands_out(vec![a]), - ); - - let BoolTo = &TypeVar::new( - "BoolTo", - "A larger boolean type", - TypeSetBuilder::new().bools(Interval::All).build(), - ); - let x = &Operand::new("x", Bool); - let a = &Operand::new("a", BoolTo); - - ig.push( - Inst::new( - "bextend", - r#" - Convert `x` to a larger boolean type - "#, - &formats.unary, - ) - .operands_in(vec![x]) - .operands_out(vec![a]), - ); - - let IntTo = &TypeVar::new( - "IntTo", - "A scalar integer type", - TypeSetBuilder::new().ints(Interval::All).build(), - ); - let x = &Operand::new("x", ScalarBool); - let a = &Operand::new("a", IntTo); - - ig.push( - Inst::new( - "bint", - r#" - Convert `x` to an integer. - - True maps to 1 and false maps to 0. - "#, - &formats.unary, - ) - .operands_in(vec![x]) - .operands_out(vec![a]), - ); - - let Bool = &TypeVar::new( - "Bool", - "A scalar or vector boolean type", + let Truthy = &TypeVar::new( + "Truthy", + "A scalar or vector whose values are truthy", TypeSetBuilder::new() - .bools(Interval::All) + .ints(Interval::All) .simd_lanes(Interval::All) .build(), ); @@ -3414,7 +3300,7 @@ pub(crate) fn define( .simd_lanes(Interval::All) .build(), ); - let x = &Operand::new("x", Bool); + let x = &Operand::new("x", Truthy); let a = &Operand::new("a", IntTo); ig.push( @@ -4136,7 +4022,6 @@ pub(crate) fn define( TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) - .bools(Interval::All) .dynamic_simd_lanes(Interval::All) .build(), ); diff --git a/cranelift/codegen/meta/src/shared/types.rs b/cranelift/codegen/meta/src/shared/types.rs index 631e5433e953..85ff018538a8 100644 --- a/cranelift/codegen/meta/src/shared/types.rs +++ b/cranelift/codegen/meta/src/shared/types.rs @@ -1,49 +1,5 @@ //! This module predefines all the Cranelift scalar types. -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] -pub(crate) enum Bool { - /// 1-bit bool. - B1 = 1, - /// 8-bit bool. - B8 = 8, - /// 16-bit bool. - B16 = 16, - /// 32-bit bool. - B32 = 32, - /// 64-bit bool. - B64 = 64, - /// 128-bit bool. - B128 = 128, -} - -/// This provides an iterator through all of the supported bool variants. -pub(crate) struct BoolIterator { - index: u8, -} - -impl BoolIterator { - pub fn new() -> Self { - Self { index: 0 } - } -} - -impl Iterator for BoolIterator { - type Item = Bool; - fn next(&mut self) -> Option { - let res = match self.index { - 0 => Some(Bool::B1), - 1 => Some(Bool::B8), - 2 => Some(Bool::B16), - 3 => Some(Bool::B32), - 4 => Some(Bool::B64), - 5 => Some(Bool::B128), - _ => return None, - }; - self.index += 1; - res - } -} - #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub(crate) enum Int { /// 8-bit int. @@ -187,18 +143,6 @@ impl Iterator for ReferenceIterator { mod iter_tests { use super::*; - #[test] - fn bool_iter_works() { - let mut bool_iter = BoolIterator::new(); - assert_eq!(bool_iter.next(), Some(Bool::B1)); - assert_eq!(bool_iter.next(), Some(Bool::B8)); - assert_eq!(bool_iter.next(), Some(Bool::B16)); - assert_eq!(bool_iter.next(), Some(Bool::B32)); - assert_eq!(bool_iter.next(), Some(Bool::B64)); - assert_eq!(bool_iter.next(), Some(Bool::B128)); - assert_eq!(bool_iter.next(), None); - } - #[test] fn int_iter_works() { let mut int_iter = IntIterator::new(); diff --git a/cranelift/codegen/src/data_value.rs b/cranelift/codegen/src/data_value.rs index 86f1882c325b..2da55fc9c7e6 100644 --- a/cranelift/codegen/src/data_value.rs +++ b/cranelift/codegen/src/data_value.rs @@ -12,7 +12,6 @@ use core::fmt::{self, Display, Formatter}; #[allow(missing_docs)] #[derive(Clone, Debug, PartialOrd)] pub enum DataValue { - B(bool), I8(i8), I16(i16), I32(i32), @@ -33,8 +32,6 @@ impl PartialEq for DataValue { fn eq(&self, other: &Self) -> bool { use DataValue::*; match (self, other) { - (B(l), B(r)) => l == r, - (B(_), _) => false, (I8(l), I8(r)) => l == r, (I8(_), _) => false, (I16(l), I16(r)) => l == r, @@ -84,7 +81,6 @@ impl DataValue { /// Return the Cranelift IR [Type] for this [DataValue]. pub fn ty(&self) -> Type { match self { - DataValue::B(_) => types::B8, // A default type. DataValue::I8(_) | DataValue::U8(_) => types::I8, DataValue::I16(_) | DataValue::U16(_) => types::I16, DataValue::I32(_) | DataValue::U32(_) => types::I32, @@ -105,14 +101,6 @@ impl DataValue { } } - /// Return true if the value is a bool (i.e. `DataValue::B`). - pub fn is_bool(&self) -> bool { - match self { - DataValue::B(_) => true, - _ => false, - } - } - /// Write a [DataValue] to a slice. /// /// # Panics: @@ -120,8 +108,6 @@ impl DataValue { /// Panics if the slice does not have enough space to accommodate the [DataValue] pub fn write_to_slice(&self, dst: &mut [u8]) { match self { - DataValue::B(true) => dst[..16].copy_from_slice(&[u8::MAX; 16][..]), - DataValue::B(false) => dst[..16].copy_from_slice(&[0; 16][..]), DataValue::I8(i) => dst[..1].copy_from_slice(&i.to_ne_bytes()[..]), DataValue::I16(i) => dst[..2].copy_from_slice(&i.to_ne_bytes()[..]), DataValue::I32(i) => dst[..4].copy_from_slice(&i.to_ne_bytes()[..]), @@ -153,13 +139,6 @@ impl DataValue { types::F64 => DataValue::F64(Ieee64::with_bits(u64::from_ne_bytes( src[..8].try_into().unwrap(), ))), - _ if ty.is_bool() => { - // Only `ty.bytes()` are guaranteed to be written - // so we can only test the first n bytes of `src` - - let size = ty.bytes() as usize; - DataValue::B(src[..size].iter().any(|&i| i != 0)) - } _ if ty.is_vector() => { if ty.bytes() == 16 { DataValue::V128(src[..16].try_into().unwrap()) @@ -175,13 +154,7 @@ impl DataValue { /// Write a [DataValue] to a memory location. pub unsafe fn write_value_to(&self, p: *mut u128) { - // Since `DataValue` does not have type info for bools we always - // write out a full 16 byte slot. - let size = match self.ty() { - ty if ty.is_bool() => 16, - ty => ty.bytes() as usize, - }; - + let size = self.ty().bytes() as usize; self.write_to_slice(std::slice::from_raw_parts_mut(p as *mut u8, size)); } @@ -270,7 +243,6 @@ macro_rules! build_conversion_impl { } }; } -build_conversion_impl!(bool, B, B8); build_conversion_impl!(i8, I8, I8); build_conversion_impl!(i16, I16, I16); build_conversion_impl!(i32, I32, I32); @@ -294,7 +266,6 @@ impl From for DataValue { impl Display for DataValue { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - DataValue::B(dv) => write!(f, "{}", dv), DataValue::I8(dv) => write!(f, "{}", dv), DataValue::I16(dv) => write!(f, "{}", dv), DataValue::I32(dv) => write!(f, "{}", dv), @@ -354,16 +325,6 @@ mod test { #[test] fn type_conversions() { - assert_eq!(DataValue::B(true).ty(), types::B8); - assert_eq!( - TryInto::::try_into(DataValue::B(false)).unwrap(), - false - ); - assert_eq!( - TryInto::::try_into(DataValue::B(false)).unwrap_err(), - DataValueCastFailure::TryInto(types::B8, types::I32) - ); - assert_eq!(DataValue::V128([0; 16]).ty(), types::I8X16); assert_eq!( TryInto::<[u8; 16]>::try_into(DataValue::V128([0; 16])).unwrap(), diff --git a/cranelift/codegen/src/egraph/node.rs b/cranelift/codegen/src/egraph/node.rs index 2e8ea42b2f94..3fb350224157 100644 --- a/cranelift/codegen/src/egraph/node.rs +++ b/cranelift/codegen/src/egraph/node.rs @@ -322,15 +322,11 @@ impl std::ops::Add for Cost { pub(crate) fn op_cost(op: &InstructionImms) -> Cost { match op.opcode() { // Constants. - Opcode::Iconst | Opcode::F32const | Opcode::F64const | Opcode::Bconst => Cost(0), + Opcode::Iconst | Opcode::F32const | Opcode::F64const => Cost(0), // Extends/reduces. - Opcode::Bextend - | Opcode::Breduce - | Opcode::Uextend - | Opcode::Sextend - | Opcode::Ireduce - | Opcode::Iconcat - | Opcode::Isplit => Cost(1), + Opcode::Uextend | Opcode::Sextend | Opcode::Ireduce | Opcode::Iconcat | Opcode::Isplit => { + Cost(1) + } // "Simple" arithmetic. Opcode::Iadd | Opcode::Isub diff --git a/cranelift/codegen/src/inst_predicates.rs b/cranelift/codegen/src/inst_predicates.rs index cc16f9ac6d8b..76245722f5fb 100644 --- a/cranelift/codegen/src/inst_predicates.rs +++ b/cranelift/codegen/src/inst_predicates.rs @@ -2,7 +2,6 @@ use crate::ir::immediates::Offset32; use crate::ir::instructions::BranchInfo; use crate::ir::{Block, DataFlowGraph, Function, Inst, InstructionData, Opcode, Type, Value}; -use crate::machinst::ty_bits; use cranelift_entity::EntityRef; /// Preserve instructions with used result values. @@ -53,7 +52,7 @@ pub fn has_lowering_side_effect(func: &Function, inst: Inst) -> bool { op != Opcode::GetPinnedReg && (has_side_effect(func, inst) || op.can_load()) } -/// Is the given instruction a constant value (`iconst`, `fconst`, `bconst`) that can be +/// Is the given instruction a constant value (`iconst`, `fconst`) that can be /// represented in 64 bits? pub fn is_constant_64bit(func: &Function, inst: Inst) -> Option { let data = &func.dfg[inst]; @@ -64,21 +63,6 @@ pub fn is_constant_64bit(func: &Function, inst: Inst) -> Option { &InstructionData::UnaryImm { imm, .. } => Some(imm.bits() as u64), &InstructionData::UnaryIeee32 { imm, .. } => Some(imm.bits() as u64), &InstructionData::UnaryIeee64 { imm, .. } => Some(imm.bits()), - &InstructionData::UnaryBool { imm, .. } => { - let imm = if imm { - let bits = ty_bits(func.dfg.value_type(func.dfg.inst_results(inst)[0])); - - if bits < 64 { - (1u64 << bits) - 1 - } else { - u64::MAX - } - } else { - 0 - }; - - Some(imm) - } _ => None, } } diff --git a/cranelift/codegen/src/ir/builder.rs b/cranelift/codegen/src/ir/builder.rs index 4fd8b0665f3b..b28bcd1e01cb 100644 --- a/cranelift/codegen/src/ir/builder.rs +++ b/cranelift/codegen/src/ir/builder.rs @@ -238,7 +238,7 @@ mod tests { // Formula. let cmp = pos.ins().icmp(IntCC::Equal, arg0, v0); - assert_eq!(pos.func.dfg.value_type(cmp), B1); + assert_eq!(pos.func.dfg.value_type(cmp), I8); } #[test] diff --git a/cranelift/codegen/src/ir/entities.rs b/cranelift/codegen/src/ir/entities.rs index 7418b84460f3..315ea2183a8e 100644 --- a/cranelift/codegen/src/ir/entities.rs +++ b/cranelift/codegen/src/ir/entities.rs @@ -58,7 +58,6 @@ impl Block { /// - [`iconst`](super::InstBuilder::iconst) for integer constants /// - [`f32const`](super::InstBuilder::f32const) for 32-bit float constants /// - [`f64const`](super::InstBuilder::f64const) for 64-bit float constants -/// - [`bconst`](super::InstBuilder::bconst) for boolean constants /// - [`vconst`](super::InstBuilder::vconst) for vector constants /// - [`null`](super::InstBuilder::null) for null reference constants /// diff --git a/cranelift/codegen/src/ir/extfunc.rs b/cranelift/codegen/src/ir/extfunc.rs index 07bc52c8c1fb..6b07315b15a4 100644 --- a/cranelift/codegen/src/ir/extfunc.rs +++ b/cranelift/codegen/src/ir/extfunc.rs @@ -372,7 +372,7 @@ impl<'a> fmt::Display for DisplayableExtFuncData<'a> { #[cfg(test)] mod tests { use super::*; - use crate::ir::types::{B8, F32, I32}; + use crate::ir::types::{F32, I32, I8}; use alloc::string::ToString; #[test] @@ -424,7 +424,7 @@ mod tests { assert_eq!(sig.to_string(), "(i32) -> f32 windows_fastcall"); sig.params.push(AbiParam::new(I32.by(4).unwrap())); assert_eq!(sig.to_string(), "(i32, i32x4) -> f32 windows_fastcall"); - sig.returns.push(AbiParam::new(B8)); - assert_eq!(sig.to_string(), "(i32, i32x4) -> f32, b8 windows_fastcall"); + sig.returns.push(AbiParam::new(I8)); + assert_eq!(sig.to_string(), "(i32, i32x4) -> f32, i8 windows_fastcall"); } } diff --git a/cranelift/codegen/src/ir/instructions.rs b/cranelift/codegen/src/ir/instructions.rs index b6c20a575c8f..d3a4bef04496 100644 --- a/cranelift/codegen/src/ir/instructions.rs +++ b/cranelift/codegen/src/ir/instructions.rs @@ -575,8 +575,6 @@ pub struct ValueTypeSet { pub ints: BitSet8, /// Allowed float widths pub floats: BitSet8, - /// Allowed bool widths - pub bools: BitSet8, /// Allowed ref widths pub refs: BitSet8, /// Allowed dynamic vectors minimum lane sizes @@ -593,8 +591,6 @@ impl ValueTypeSet { self.ints.contains(l2b) } else if scalar.is_float() { self.floats.contains(l2b) - } else if scalar.is_bool() { - self.bools.contains(l2b) } else if scalar.is_ref() { self.refs.contains(l2b) } else { @@ -621,10 +617,8 @@ impl ValueTypeSet { types::I32 } else if self.floats.max().unwrap_or(0) > 5 { types::F32 - } else if self.bools.max().unwrap_or(0) > 5 { - types::B32 } else { - types::B1 + types::I8 }; t.by(1 << self.lanes.min().unwrap()).unwrap() } @@ -860,7 +854,6 @@ mod tests { lanes: BitSet16::from_range(0, 8), ints: BitSet8::from_range(4, 7), floats: BitSet8::from_range(0, 0), - bools: BitSet8::from_range(3, 7), refs: BitSet8::from_range(5, 7), dynamic_lanes: BitSet16::from_range(0, 4), }; @@ -870,9 +863,6 @@ mod tests { assert!(vts.contains(I32X4)); assert!(vts.contains(I32X4XN)); assert!(!vts.contains(F32)); - assert!(!vts.contains(B1)); - assert!(vts.contains(B8)); - assert!(vts.contains(B64)); assert!(vts.contains(R32)); assert!(vts.contains(R64)); assert_eq!(vts.example().to_string(), "i32"); @@ -881,7 +871,6 @@ mod tests { lanes: BitSet16::from_range(0, 8), ints: BitSet8::from_range(0, 0), floats: BitSet8::from_range(5, 7), - bools: BitSet8::from_range(3, 7), refs: BitSet8::from_range(0, 0), dynamic_lanes: BitSet16::from_range(0, 8), }; @@ -891,7 +880,6 @@ mod tests { lanes: BitSet16::from_range(1, 8), ints: BitSet8::from_range(0, 0), floats: BitSet8::from_range(5, 7), - bools: BitSet8::from_range(3, 7), refs: BitSet8::from_range(0, 0), dynamic_lanes: BitSet16::from_range(0, 8), }; @@ -899,23 +887,18 @@ mod tests { let vts = ValueTypeSet { lanes: BitSet16::from_range(2, 8), - ints: BitSet8::from_range(0, 0), + ints: BitSet8::from_range(3, 7), floats: BitSet8::from_range(0, 0), - bools: BitSet8::from_range(3, 7), refs: BitSet8::from_range(0, 0), dynamic_lanes: BitSet16::from_range(0, 8), }; - assert!(!vts.contains(B32X2)); - assert!(vts.contains(B32X4)); - assert!(vts.contains(B16X4XN)); - assert_eq!(vts.example().to_string(), "b32x4"); + assert_eq!(vts.example().to_string(), "i32x4"); let vts = ValueTypeSet { // TypeSet(lanes=(1, 256), ints=(8, 64)) lanes: BitSet16::from_range(0, 9), ints: BitSet8::from_range(3, 7), floats: BitSet8::from_range(0, 0), - bools: BitSet8::from_range(0, 0), refs: BitSet8::from_range(0, 0), dynamic_lanes: BitSet16::from_range(0, 8), }; diff --git a/cranelift/codegen/src/ir/types.rs b/cranelift/codegen/src/ir/types.rs index 311addadf7cf..3c5e3b4bc6bd 100644 --- a/cranelift/codegen/src/ir/types.rs +++ b/cranelift/codegen/src/ir/types.rs @@ -17,10 +17,7 @@ use target_lexicon::{PointerWidth, Triple}; /// /// Basic floating point types: `F32` and `F64`. IEEE single and double precision. /// -/// Boolean types: `B1`, `B8`, `B16`, `B32`, `B64`, and `B128`. These all encode 'true' or 'false'. The -/// larger types use redundant bits. -/// -/// SIMD vector types have power-of-two lanes, up to 256. Lanes can be any int/float/bool type. +/// SIMD vector types have power-of-two lanes, up to 256. Lanes can be any int/float type. /// /// Note that this is encoded in a `u16` currently for extensibility, /// but allows only 14 bits to be used due to some bitpacking tricks @@ -59,12 +56,11 @@ impl Type { /// Get log_2 of the number of bits in a lane. pub fn log2_lane_bits(self) -> u32 { match self.lane_type() { - B1 => 0, - B8 | I8 => 3, - B16 | I16 => 4, - B32 | I32 | F32 | R32 => 5, - B64 | I64 | F64 | R64 => 6, - B128 | I128 => 7, + I8 => 3, + I16 => 4, + I32 | F32 | R32 => 5, + I64 | F64 | R64 => 6, + I128 => 7, _ => 0, } } @@ -72,12 +68,11 @@ impl Type { /// Get the number of bits in a lane. pub fn lane_bits(self) -> u32 { match self.lane_type() { - B1 => 1, - B8 | I8 => 8, - B16 | I16 => 16, - B32 | I32 | F32 | R32 => 32, - B64 | I64 | F64 | R64 => 64, - B128 | I128 => 128, + I8 => 8, + I16 => 16, + I32 | F32 | R32 => 32, + I64 | F64 | R64 => 64, + I128 => 128, _ => 0, } } @@ -141,13 +136,13 @@ impl Type { pub fn as_bool_pedantic(self) -> Self { // Replace the low 4 bits with the boolean version, preserve the high 4 bits. self.replace_lanes(match self.lane_type() { - B8 | I8 => B8, - B16 | I16 => B16, - B32 | I32 | F32 => B32, - B64 | I64 | F64 => B64, + I8 => I8, + I16 => I16, + I32 | F32 => I32, + I64 | F64 => I64, R32 | R64 => panic!("Reference types should not convert to bool"), - B128 | I128 => B128, - _ => B1, + I128 => I128, + _ => I8, }) } @@ -157,7 +152,7 @@ impl Type { /// Scalar types are all converted to `b1` which is usually what you want. pub fn as_bool(self) -> Self { if !self.is_vector() { - B1 + I8 } else { self.as_bool_pedantic() } @@ -169,11 +164,11 @@ impl Type { /// Scalar types follow this same rule, but `b1` is converted into `i8` pub fn as_int(self) -> Self { self.replace_lanes(match self.lane_type() { - I8 | B1 | B8 => I8, - I16 | B16 => I16, - I32 | B32 | F32 => I32, - I64 | B64 | F64 => I64, - I128 | B128 => I128, + I8 => I8, + I16 => I16, + I32 | F32 => I32, + I64 | F64 => I64, + I128 => I128, _ => unimplemented!(), }) } @@ -187,10 +182,6 @@ impl Type { I64 => I32, I128 => I64, F64 => F32, - B16 => B8, - B32 => B16, - B64 => B32, - B128 => B64, _ => return None, })) } @@ -204,10 +195,6 @@ impl Type { I32 => I64, I64 => I128, F32 => F64, - B8 => B16, - B16 => B32, - B32 => B64, - B64 => B128, _ => return None, })) } @@ -241,19 +228,6 @@ impl Type { self.0 >= constants::DYNAMIC_VECTOR_BASE } - /// Is this a scalar boolean type? - pub fn is_bool(self) -> bool { - match self { - B1 | B8 | B16 | B32 | B64 | B128 => true, - _ => false, - } - } - - /// Is this a vector boolean type? - pub fn is_bool_vector(self) -> bool { - self.is_vector() && self.lane_type().is_bool() - } - /// Is this a scalar integer type? pub fn is_int(self) -> bool { match self { @@ -453,19 +427,6 @@ impl Type { } } - /// Coerces boolean types (scalar and vectors) into their integer counterparts. - /// B1 is converted into I8. - pub fn coerce_bools_to_ints(self) -> Self { - let is_scalar_bool = self.is_bool(); - let is_vector_bool = self.is_vector() && self.lane_type().is_bool(); - - if is_scalar_bool || is_vector_bool { - self.as_int() - } else { - self - } - } - /// Gets a bit-level representation of the type. Used only /// internally for efficiently storing types. pub(crate) fn repr(self) -> u16 { @@ -481,9 +442,7 @@ impl Type { impl Display for Type { fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.is_bool() { - write!(f, "b{}", self.lane_bits()) - } else if self.is_int() { + if self.is_int() { write!(f, "i{}", self.lane_bits()) } else if self.is_float() { write!(f, "f{}", self.lane_bits()) @@ -506,9 +465,7 @@ impl Display for Type { impl Debug for Type { fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.is_bool() { - write!(f, "types::B{}", self.lane_bits()) - } else if self.is_int() { + if self.is_int() { write!(f, "types::I{}", self.lane_bits()) } else if self.is_float() { write!(f, "types::F{}", self.lane_bits()) @@ -548,12 +505,6 @@ mod tests { assert_eq!(0, IFLAGS.bits()); assert_eq!(FFLAGS, FFLAGS.lane_type()); assert_eq!(0, FFLAGS.bits()); - assert_eq!(B1, B1.lane_type()); - assert_eq!(B8, B8.lane_type()); - assert_eq!(B16, B16.lane_type()); - assert_eq!(B32, B32.lane_type()); - assert_eq!(B64, B64.lane_type()); - assert_eq!(B128, B128.lane_type()); assert_eq!(I8, I8.lane_type()); assert_eq!(I16, I16.lane_type()); assert_eq!(I32, I32.lane_type()); @@ -561,7 +512,6 @@ mod tests { assert_eq!(I128, I128.lane_type()); assert_eq!(F32, F32.lane_type()); assert_eq!(F64, F64.lane_type()); - assert_eq!(B1, B1.by(8).unwrap().lane_type()); assert_eq!(I32, I32X4.lane_type()); assert_eq!(F64, F64X2.lane_type()); assert_eq!(R32, R32.lane_type()); @@ -570,12 +520,6 @@ mod tests { assert_eq!(INVALID.lane_bits(), 0); assert_eq!(IFLAGS.lane_bits(), 0); assert_eq!(FFLAGS.lane_bits(), 0); - assert_eq!(B1.lane_bits(), 1); - assert_eq!(B8.lane_bits(), 8); - assert_eq!(B16.lane_bits(), 16); - assert_eq!(B32.lane_bits(), 32); - assert_eq!(B64.lane_bits(), 64); - assert_eq!(B128.lane_bits(), 128); assert_eq!(I8.lane_bits(), 8); assert_eq!(I16.lane_bits(), 16); assert_eq!(I32.lane_bits(), 32); @@ -592,12 +536,6 @@ mod tests { assert_eq!(INVALID.half_width(), None); assert_eq!(INVALID.half_width(), None); assert_eq!(FFLAGS.half_width(), None); - assert_eq!(B1.half_width(), None); - assert_eq!(B8.half_width(), None); - assert_eq!(B16.half_width(), Some(B8)); - assert_eq!(B32.half_width(), Some(B16)); - assert_eq!(B64.half_width(), Some(B32)); - assert_eq!(B128.half_width(), Some(B64)); assert_eq!(I8.half_width(), None); assert_eq!(I16.half_width(), Some(I8)); assert_eq!(I32.half_width(), Some(I16)); @@ -610,12 +548,6 @@ mod tests { assert_eq!(INVALID.double_width(), None); assert_eq!(IFLAGS.double_width(), None); assert_eq!(FFLAGS.double_width(), None); - assert_eq!(B1.double_width(), None); - assert_eq!(B8.double_width(), Some(B16)); - assert_eq!(B16.double_width(), Some(B32)); - assert_eq!(B32.double_width(), Some(B64)); - assert_eq!(B64.double_width(), Some(B128)); - assert_eq!(B128.double_width(), None); assert_eq!(I8.double_width(), Some(I16)); assert_eq!(I16.double_width(), Some(I32)); assert_eq!(I32.double_width(), Some(I64)); @@ -634,7 +566,6 @@ mod tests { assert_eq!(big.bits(), 64 * 256); assert_eq!(big.half_vector().unwrap().to_string(), "f64x128"); - assert_eq!(B1.by(2).unwrap().half_vector().unwrap().to_string(), "b1"); assert_eq!(I32.half_vector(), None); assert_eq!(INVALID.half_vector(), None); @@ -647,7 +578,6 @@ mod tests { fn dynamic_vectors() { // Identification. assert_eq!(I8X16XN.is_dynamic_vector(), true); - assert_eq!(B16X4XN.is_dynamic_vector(), true); assert_eq!(F32X8XN.is_dynamic_vector(), true); assert_eq!(F64X4XN.is_dynamic_vector(), true); assert_eq!(I128X2XN.is_dynamic_vector(), true); @@ -656,28 +586,19 @@ mod tests { assert_eq!(I16X8XN.lane_count(), 0); assert_eq!(I16X8XN.min_lane_count(), 8); - // Size - assert_eq!(B32X2XN.bits(), 0); - assert_eq!(B32X2XN.min_bits(), 64); - // Change lane counts assert_eq!(F64X4XN.half_vector(), None); assert_eq!(I8X8XN.by(2), None); // Conversions to and from vectors. - assert_eq!(B8.by(8).unwrap().vector_to_dynamic(), Some(B8X8XN)); assert_eq!(I8.by(16).unwrap().vector_to_dynamic(), Some(I8X16XN)); assert_eq!(I16.by(8).unwrap().vector_to_dynamic(), Some(I16X8XN)); - assert_eq!(B16.by(16).unwrap().vector_to_dynamic(), Some(B16X16XN)); - assert_eq!(B32.by(2).unwrap().vector_to_dynamic(), Some(B32X2XN)); - assert_eq!(B32.by(8).unwrap().vector_to_dynamic(), Some(B32X8XN)); assert_eq!(I32.by(4).unwrap().vector_to_dynamic(), Some(I32X4XN)); assert_eq!(F32.by(4).unwrap().vector_to_dynamic(), Some(F32X4XN)); assert_eq!(F64.by(2).unwrap().vector_to_dynamic(), Some(F64X2XN)); assert_eq!(I128.by(2).unwrap().vector_to_dynamic(), Some(I128X2XN)); assert_eq!(I128X2XN.dynamic_to_vector(), Some(I128X2)); - assert_eq!(B64X2XN.dynamic_to_vector(), Some(B64X2)); assert_eq!(F32X4XN.dynamic_to_vector(), Some(F32X4)); assert_eq!(F64X4XN.dynamic_to_vector(), Some(F64X4)); assert_eq!(I32X2XN.dynamic_to_vector(), Some(I32X2)); @@ -686,7 +607,6 @@ mod tests { assert_eq!(I8X32XN.dynamic_to_vector(), Some(I8X32)); assert_eq!(I8X64.vector_to_dynamic(), None); - assert_eq!(B16X32.vector_to_dynamic(), None); assert_eq!(F32X16.vector_to_dynamic(), None); assert_eq!(I64X8.vector_to_dynamic(), None); assert_eq!(I128X4.vector_to_dynamic(), None); @@ -696,12 +616,6 @@ mod tests { fn format_scalars() { assert_eq!(IFLAGS.to_string(), "iflags"); assert_eq!(FFLAGS.to_string(), "fflags"); - assert_eq!(B1.to_string(), "b1"); - assert_eq!(B8.to_string(), "b8"); - assert_eq!(B16.to_string(), "b16"); - assert_eq!(B32.to_string(), "b32"); - assert_eq!(B64.to_string(), "b64"); - assert_eq!(B128.to_string(), "b128"); assert_eq!(I8.to_string(), "i8"); assert_eq!(I16.to_string(), "i16"); assert_eq!(I32.to_string(), "i32"); @@ -715,11 +629,6 @@ mod tests { #[test] fn format_vectors() { - assert_eq!(B1.by(8).unwrap().to_string(), "b1x8"); - assert_eq!(B8.by(1).unwrap().to_string(), "b8"); - assert_eq!(B16.by(256).unwrap().to_string(), "b16x256"); - assert_eq!(B32.by(4).unwrap().by(2).unwrap().to_string(), "b32x8"); - assert_eq!(B64.by(8).unwrap().to_string(), "b64x8"); assert_eq!(I8.by(64).unwrap().to_string(), "i8x64"); assert_eq!(F64.by(2).unwrap().to_string(), "f64x2"); assert_eq!(I8.by(3), None); @@ -729,19 +638,10 @@ mod tests { #[test] fn as_bool() { - assert_eq!(I32X4.as_bool(), B32X4); - assert_eq!(I32.as_bool(), B1); - assert_eq!(I32X4.as_bool_pedantic(), B32X4); - assert_eq!(I32.as_bool_pedantic(), B32); - } - - #[test] - fn as_int() { - assert_eq!(B32X4.as_int(), I32X4); - assert_eq!(B8X8.as_int(), I8X8); - assert_eq!(B1.as_int(), I8); - assert_eq!(B8.as_int(), I8); - assert_eq!(B128.as_int(), I128); + assert_eq!(I32X4.as_bool(), I32X4); + assert_eq!(I32.as_bool(), I8); + assert_eq!(I32X4.as_bool_pedantic(), I32X4); + assert_eq!(I32.as_bool_pedantic(), I32); } #[test] diff --git a/cranelift/codegen/src/isa/aarch64/inst.isle b/cranelift/codegen/src/isa/aarch64/inst.isle index 0ed2b14e8173..52aa391983f9 100644 --- a/cranelift/codegen/src/isa/aarch64/inst.isle +++ b/cranelift/codegen/src/isa/aarch64/inst.isle @@ -1156,12 +1156,6 @@ (rule (scalar_size $I64) (ScalarSize.Size64)) (rule (scalar_size $I128) (ScalarSize.Size128)) -(rule (scalar_size $B8) (ScalarSize.Size8)) -(rule (scalar_size $B16) (ScalarSize.Size16)) -(rule (scalar_size $B32) (ScalarSize.Size32)) -(rule (scalar_size $B64) (ScalarSize.Size64)) -(rule (scalar_size $B128) (ScalarSize.Size128)) - (rule (scalar_size $F32) (ScalarSize.Size32)) (rule (scalar_size $F64) (ScalarSize.Size64)) @@ -1947,19 +1941,13 @@ ;; Helper for materializing a boolean value into a register from ;; flags. -(decl materialize_bool_result (u8 Cond) ConsumesFlags) -(rule (materialize_bool_result 1 cond) +(decl materialize_bool_result (Cond) ConsumesFlags) +(rule (materialize_bool_result cond) (let ((dst WritableReg (temp_writable_reg $I64))) (ConsumesFlags.ConsumesFlagsReturnsReg (MInst.CSet dst cond) dst))) -(rule -1 (materialize_bool_result _ty_bits cond) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsReturnsReg - (MInst.CSetm dst cond) - dst))) - (decl cmn_imm (OperandSize Reg Imm12) ProducesFlags) (rule (cmn_imm size src1 src2) (ProducesFlags.ProducesFlagsSideEffect @@ -2224,6 +2212,18 @@ (MInst.CSel dst cond if_true if_false) dst))) +;; Helper for constructing `cset` instructions. +(decl cset (Cond) ConsumesFlags) +(rule (cset cond) + (let ((dst WritableReg (temp_writable_reg $I64))) + (ConsumesFlags.ConsumesFlagsReturnsReg (MInst.CSet dst cond) dst))) + +;; Helper for constructing `csetm` instructions. +(decl csetm (Cond) ConsumesFlags) +(rule (csetm cond) + (let ((dst WritableReg (temp_writable_reg $I64))) + (ConsumesFlags.ConsumesFlagsReturnsReg (MInst.CSetm dst cond) dst))) + ;; Helper for generating a `CSNeg` instruction. ;; ;; Note that this doesn't actually emit anything, instead it produces a @@ -2244,21 +2244,14 @@ (produces_flags_append inst_input (MInst.CCmp size rn rm nzcv cond))) ;; Helper for generating `MInst.CCmpImm` instructions. -(decl ccmp_imm (OperandSize u8 Reg UImm5 NZCV Cond) ConsumesFlags) -(rule 1 (ccmp_imm size 1 rn imm nzcv cond) +(decl ccmp_imm (OperandSize Reg UImm5 NZCV Cond) ConsumesFlags) +(rule 1 (ccmp_imm size rn imm nzcv cond) (let ((dst WritableReg (temp_writable_reg $I64))) (ConsumesFlags.ConsumesFlagsTwiceReturnsValueRegs (MInst.CCmpImm size rn imm nzcv cond) (MInst.CSet dst cond) (value_reg dst)))) -(rule (ccmp_imm size _ty_bits rn imm nzcv cond) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsTwiceReturnsValueRegs - (MInst.CCmpImm size rn imm nzcv cond) - (MInst.CSetm dst cond) - (value_reg dst)))) - ;; Helpers for generating `add` instructions. (decl add (Type Reg Reg) Reg) @@ -3381,11 +3374,11 @@ ;; Integers <= 64-bits. (rule -2 (lower_icmp_into_reg cond rn rm in_ty out_ty) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 in_ty)) (let ((cc Cond (cond_code cond))) (with_flags (lower_icmp cond rn rm in_ty) - (materialize_bool_result (ty_bits out_ty) cc)))) + (materialize_bool_result cc)))) (rule 1 (lower_icmp cond rn rm (fits_in_16 ty)) (if (signed_cond_code cond)) @@ -3398,23 +3391,23 @@ (let ((rn Reg (put_in_reg_zext32 rn))) (cmp_extend (operand_size ty) rn rm (lower_icmp_extend ty $false)))) (rule -3 (lower_icmp cond rn (imm12_from_value rm) ty) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (cmp_imm (operand_size ty) rn rm)) (rule -4 (lower_icmp cond rn rm ty) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (cmp (operand_size ty) rn rm)) ;; 128-bit integers. -(rule (lower_icmp_into_reg cond @ (IntCC.Equal) rn rm $I128 out_ty) +(rule (lower_icmp_into_reg cond @ (IntCC.Equal) rn rm $I128 $I8) (let ((cc Cond (cond_code cond))) (with_flags (lower_icmp cond rn rm $I128) - (materialize_bool_result (ty_bits out_ty) cc)))) -(rule (lower_icmp_into_reg cond @ (IntCC.NotEqual) rn rm $I128 out_ty) + (materialize_bool_result cc)))) +(rule (lower_icmp_into_reg cond @ (IntCC.NotEqual) rn rm $I128 $I8) (let ((cc Cond (cond_code cond))) (with_flags (lower_icmp cond rn rm $I128) - (materialize_bool_result (ty_bits out_ty) cc)))) + (materialize_bool_result cc)))) ;; cmp lhs_lo, rhs_lo ;; ccmp lhs_hi, rhs_hi, #0, eq @@ -3440,7 +3433,7 @@ ;; cmp lhs_hi, rhs_hi ;; cset tmp2, cond ;; csel dst, tmp1, tmp2, eq -(rule -1 (lower_icmp_into_reg cond lhs rhs $I128 out_ty) +(rule -1 (lower_icmp_into_reg cond lhs rhs $I128 $I8) (let ((unsigned_cond Cond (cond_code (intcc_unsigned cond))) (cond Cond (cond_code cond)) (lhs ValueRegs (put_in_regs lhs)) @@ -3449,78 +3442,100 @@ (lhs_hi Reg (value_regs_get lhs 1)) (rhs_lo Reg (value_regs_get rhs 0)) (rhs_hi Reg (value_regs_get rhs 1)) - (tmp1 ValueRegs - (with_flags (cmp (OperandSize.Size64) lhs_lo rhs_lo) - (materialize_bool_result - (ty_bits out_ty) unsigned_cond))) - (tmp1 Reg (value_regs_get tmp1 0)) - (dst ValueRegs - (with_flags (cmp (OperandSize.Size64) lhs_hi rhs_hi) - (lower_icmp_i128_consumer cond (ty_bits out_ty) - tmp1 lhs_hi rhs_hi)))) - dst)) + (tmp1 Reg (with_flags_reg (cmp (OperandSize.Size64) lhs_lo rhs_lo) + (materialize_bool_result unsigned_cond)))) + (with_flags (cmp (OperandSize.Size64) lhs_hi rhs_hi) + (lower_icmp_i128_consumer cond tmp1)))) -(decl lower_icmp_i128_consumer (Cond u8 Reg Reg Reg) ConsumesFlags) -(rule (lower_icmp_i128_consumer cond 1 tmp1 lhs_hi rhs_hi) +(decl lower_icmp_i128_consumer (Cond Reg) ConsumesFlags) +(rule (lower_icmp_i128_consumer cond tmp1) (let ((tmp2 WritableReg (temp_writable_reg $I64)) (dst WritableReg (temp_writable_reg $I64))) (ConsumesFlags.ConsumesFlagsTwiceReturnsValueRegs (MInst.CSet tmp2 cond) (MInst.CSel dst (Cond.Eq) tmp1 tmp2) (value_reg dst)))) -(rule (lower_icmp_i128_consumer cond 128 tmp1 lhs_hi rhs_hi) - (let ((tmp2 WritableReg (temp_writable_reg $I64)) - (dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsTwiceReturnsValueRegs - (MInst.CSetm tmp2 cond) - (MInst.CSel dst (Cond.Eq) tmp1 tmp2) - (value_regs dst dst)))) -(rule -1 (lower_icmp_i128_consumer cond _out_ty_bits tmp1 lhs_hi rhs_hi) - (let ((tmp2 WritableReg (temp_writable_reg $I64)) - (dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsTwiceReturnsValueRegs - (MInst.CSetm tmp2 cond) - (MInst.CSel dst (Cond.Eq) tmp1 tmp2) - (value_reg dst)))) + +(decl lower_bmask (Type Type ValueRegs) ValueRegs) + +;; For conversions that fit in a regsiter, we can use csetm. +;; +;; cmp val, #0 +;; csetm res, ne +(rule 0 + (lower_bmask (fits_in_64 _) (fits_in_64 _) val) + (with_flags_reg + (cmp64_imm (value_regs_get val 0) (u8_into_imm12 0)) + (csetm (Cond.Ne)))) + +;; For conversions from a 128-bit value into a 64-bit or smaller one, we or the +;; two registers of the 128-bit value together, and then recurse with the +;; combined value as a 64-bit test. +;; +;; orr val, lo, hi +;; cmp val, #0 +;; csetm res, ne +(rule 1 + (lower_bmask (fits_in_64 ty) $I128 val) + (let ((lo Reg (value_regs_get val 0)) + (hi Reg (value_regs_get val 1)) + (combined Reg (orr $I64 lo hi))) + (lower_bmask ty $I64 (value_reg combined)))) + +;; For converting from a smaller type into i128, duplicate the result of +;; converting to i64. +(rule 2 + (lower_bmask $I128 (fits_in_64 ty) val) + (let ((res ValueRegs (lower_bmask $I64 ty val)) + (res Reg (value_regs_get res 0))) + (value_regs res res))) + +;; For conversions to a 128-bit mask, we duplicate the result of converting to +;; an I64. +(rule 3 + (lower_bmask $I128 $I128 val) + (let ((res ValueRegs (lower_bmask $I64 $I128 val)) + (res Reg (value_regs_get res 0))) + (value_regs res res))) ;; Exceptional `lower_icmp_into_flags` rules. ;; We need to guarantee that the flags for `cond` are correct, so we ;; compare `dst` with 1. (rule (lower_icmp_into_flags cond @ (IntCC.SignedGreaterThanOrEqual) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0)) (tmp Reg (imm $I64 (ImmExtend.Sign) 1))) ;; mov tmp, #1 (cmp (OperandSize.Size64) dst tmp))) (rule (lower_icmp_into_flags cond @ (IntCC.UnsignedGreaterThanOrEqual) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0)) (tmp Reg (imm $I64 (ImmExtend.Zero) 1))) (cmp (OperandSize.Size64) dst tmp))) (rule (lower_icmp_into_flags cond @ (IntCC.SignedLessThanOrEqual) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0)) (tmp Reg (imm $I64 (ImmExtend.Sign) 1))) (cmp (OperandSize.Size64) tmp dst))) (rule (lower_icmp_into_flags cond @ (IntCC.UnsignedLessThanOrEqual) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0)) (tmp Reg (imm $I64 (ImmExtend.Zero) 1))) (cmp (OperandSize.Size64) tmp dst))) ;; For strict comparisons, we compare with 0. (rule (lower_icmp_into_flags cond @ (IntCC.SignedGreaterThan) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0))) (cmp (OperandSize.Size64) dst (zero_reg)))) (rule (lower_icmp_into_flags cond @ (IntCC.UnsignedGreaterThan) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0))) (cmp (OperandSize.Size64) dst (zero_reg)))) (rule (lower_icmp_into_flags cond @ (IntCC.SignedLessThan) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0))) (cmp (OperandSize.Size64) (zero_reg) dst))) (rule (lower_icmp_into_flags cond @ (IntCC.UnsignedLessThan) lhs rhs $I128) - (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $B1)) + (let ((dst ValueRegs (lower_icmp_into_reg cond lhs rhs $I128 $I8)) (dst Reg (value_regs_get dst 0))) (cmp (OperandSize.Size64) (zero_reg) dst))) @@ -3548,7 +3563,7 @@ (MInst.CSel dst_hi cond rn_hi rm_hi) (value_regs dst_lo dst_hi))))) (rule 1 (lower_select flags cond ty rn rm) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (with_flags flags (csel cond rn rm))) ;; Helper for emitting `MInst.Jump` instructions. diff --git a/cranelift/codegen/src/isa/aarch64/inst/imms.rs b/cranelift/codegen/src/isa/aarch64/inst/imms.rs index c18737693b96..47be7a449804 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/imms.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/imms.rs @@ -221,9 +221,6 @@ impl UImm12Scaled { /// Create a UImm12Scaled from a raw offset and the known scale type, if /// possible. pub fn maybe_from_i64(value: i64, scale_ty: Type) -> Option { - // Ensure the type is at least one byte. - let scale_ty = if scale_ty == B1 { B8 } else { scale_ty }; - let scale = scale_ty.bytes(); assert!(scale.is_power_of_two()); let scale = scale as i64; diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index 5e3f4e4a36fb..2effc1fb8bfa 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -1,9 +1,7 @@ //! This module defines aarch64-specific machine instruction types. use crate::binemit::{Addend, CodeOffset, Reloc}; -use crate::ir::types::{ - B1, B128, B16, B32, B64, B8, F32, F64, FFLAGS, I128, I16, I32, I64, I8, I8X16, IFLAGS, R32, R64, -}; +use crate::ir::types::{F32, F64, FFLAGS, I128, I16, I32, I64, I8, I8X16, IFLAGS, R32, R64}; use crate::ir::{types, ExternalName, MemFlags, Opcode, Type}; use crate::isa::CallConv; use crate::machinst::*; @@ -440,22 +438,22 @@ impl Inst { /// Generic constructor for a load (zero-extending where appropriate). pub fn gen_load(into_reg: Writable, mem: AMode, ty: Type, flags: MemFlags) -> Inst { match ty { - B1 | B8 | I8 => Inst::ULoad8 { + I8 => Inst::ULoad8 { rd: into_reg, mem, flags, }, - B16 | I16 => Inst::ULoad16 { + I16 => Inst::ULoad16 { rd: into_reg, mem, flags, }, - B32 | I32 | R32 => Inst::ULoad32 { + I32 | R32 => Inst::ULoad32 { rd: into_reg, mem, flags, }, - B64 | I64 | R64 => Inst::ULoad64 { + I64 | R64 => Inst::ULoad64 { rd: into_reg, mem, flags, @@ -491,22 +489,22 @@ impl Inst { /// Generic constructor for a store. pub fn gen_store(mem: AMode, from_reg: Reg, ty: Type, flags: MemFlags) -> Inst { match ty { - B1 | B8 | I8 => Inst::Store8 { + I8 => Inst::Store8 { rd: from_reg, mem, flags, }, - B16 | I16 => Inst::Store16 { + I16 => Inst::Store16 { rd: from_reg, mem, flags, }, - B32 | I32 | R32 => Inst::Store32 { + I32 | R32 => Inst::Store32 { rd: from_reg, mem, flags, }, - B64 | I64 | R64 => Inst::Store64 { + I64 | R64 => Inst::Store64 { rd: from_reg, mem, flags, @@ -1209,9 +1207,7 @@ impl MachInst for Inst { match ty { F64 => Inst::load_fp_constant64(to_reg.unwrap(), value as u64, alloc_tmp), F32 => Inst::load_fp_constant32(to_reg.unwrap(), value as u32, alloc_tmp), - B1 | B8 | B16 | B32 | B64 | I8 | I16 | I32 | I64 | R32 | R64 => { - Inst::load_constant(to_reg.unwrap(), value as u64) - } + I8 | I16 | I32 | I64 | R32 | R64 => Inst::load_constant(to_reg.unwrap(), value as u64), I128 => Inst::load_constant128(to_regs, value), _ => panic!("Cannot generate constant for type: {}", ty), } @@ -1236,17 +1232,11 @@ impl MachInst for Inst { I16 => Ok((&[RegClass::Int], &[I16])), I32 => Ok((&[RegClass::Int], &[I32])), I64 => Ok((&[RegClass::Int], &[I64])), - B1 => Ok((&[RegClass::Int], &[B1])), - B8 => Ok((&[RegClass::Int], &[B8])), - B16 => Ok((&[RegClass::Int], &[B16])), - B32 => Ok((&[RegClass::Int], &[B32])), - B64 => Ok((&[RegClass::Int], &[B64])), R32 => panic!("32-bit reftype pointer should never be seen on AArch64"), R64 => Ok((&[RegClass::Int], &[R64])), F32 => Ok((&[RegClass::Float], &[F32])), F64 => Ok((&[RegClass::Float], &[F64])), I128 => Ok((&[RegClass::Int, RegClass::Int], &[I64, I64])), - B128 => Ok((&[RegClass::Int, RegClass::Int], &[B64, B64])), _ if ty.is_vector() => { assert!(ty.bits() <= 128); Ok((&[RegClass::Float], &[I8X16])) diff --git a/cranelift/codegen/src/isa/aarch64/lower.isle b/cranelift/codegen/src/isa/aarch64/lower.isle index dbe56d7a0e1a..32ea9ed357f6 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.isle +++ b/cranelift/codegen/src/isa/aarch64/lower.isle @@ -19,14 +19,6 @@ (rule (lower (has_type ty (iconst (u64_from_imm64 n)))) (imm ty (ImmExtend.Zero) n)) -;;;; Rules for `bconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(rule (lower (has_type ty (bconst $false))) - (imm ty (ImmExtend.Zero) 0)) - -(rule (lower (has_type ty (bconst $true))) - (imm ty (ImmExtend.Zero) 1)) - ;;;; Rules for `null` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type ty (null))) @@ -142,10 +134,10 @@ (rule (lower (has_type $F64X2 (scalar_to_vector x))) (fpu_extend x (ScalarSize.Size64))) -(rule -1 (lower (scalar_to_vector x @ (value_type (ty_int_bool_64 _)))) +(rule -1 (lower (scalar_to_vector x @ (value_type $I64))) (mov_to_fpu x (ScalarSize.Size64))) -(rule -2 (lower (scalar_to_vector x @ (value_type (int_bool_fits_in_32 _)))) +(rule -2 (lower (scalar_to_vector x @ (value_type (int_fits_in_32 _)))) (mov_to_fpu (put_in_reg_zext32 x) (ScalarSize.Size32))) ;;;; Rules for `vall_true` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -159,18 +151,17 @@ ;; 0 when all input elements are true, i.e. non-zero, or a NaN otherwise ;; (either -1 or -2 when represented as an integer); NaNs are the only ;; floating-point numbers that compare unequal to themselves. -(rule (lower (has_type out_ty (vall_true x @ (value_type (multi_lane 64 2))))) +(rule (lower (vall_true x @ (value_type (multi_lane 64 2)))) (let ((x1 Reg (cmeq0 x (VectorSize.Size64x2))) (x2 Reg (addp x1 x1 (VectorSize.Size64x2)))) (with_flags (fpu_cmp (ScalarSize.Size64) x2 x2) - (materialize_bool_result (ty_bits out_ty) (Cond.Eq))))) + (materialize_bool_result (Cond.Eq))))) -(rule (lower (has_type out_ty (vall_true x @ (value_type (multi_lane 32 2))))) +(rule (lower (vall_true x @ (value_type (multi_lane 32 2)))) (let ((x1 Reg (mov_from_vec x 0 (ScalarSize.Size64)))) (with_flags (cmp_rr_shift (OperandSize.Size64) (zero_reg) x1 32) (ccmp_imm (OperandSize.Size32) - (ty_bits out_ty) x1 (u8_into_uimm5 0) (nzcv $false $true $false $false) @@ -183,18 +174,18 @@ ;; mov xm, vn.d[0] ;; cmp xm, #0 ;; cset xm, ne -(rule -1 (lower (has_type out_ty (vall_true x @ (value_type (lane_fits_in_32 ty))))) +(rule -1 (lower (vall_true x @ (value_type (lane_fits_in_32 ty)))) (if (not_vec32x2 ty)) (let ((x1 Reg (vec_lanes (VecLanesOp.Uminv) x (vector_size ty))) (x2 Reg (mov_from_vec x1 0 (ScalarSize.Size64)))) (with_flags (cmp_imm (OperandSize.Size64) x2 (u8_into_imm12 0)) - (materialize_bool_result (ty_bits out_ty) (Cond.Ne))))) + (materialize_bool_result (Cond.Ne))))) ;;;; Rules for `vany_true` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type out_ty (vany_true x @ (value_type in_ty)))) +(rule (lower (vany_true x @ (value_type in_ty))) (with_flags (vanytrue x in_ty) - (materialize_bool_result (ty_bits out_ty) (Cond.Ne)))) + (materialize_bool_result (Cond.Ne)))) ;;;; Rules for `iadd_pairwise` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -1536,60 +1527,11 @@ (rule -1 (lower (has_type ty (cls x))) (a64_cls ty x)) -;;;; Rules for `bint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Booleans are stored as all-zeroes (0) or all-ones (-1). We AND -;; out the LSB to give a 0 / 1-valued integer result. - -(rule 1 (lower (has_type $I128 (bint x))) - (let ((val ValueRegs x) - (in_lo Reg (value_regs_get val 0)) - (dst_lo Reg (and_imm $I32 in_lo (u64_into_imm_logic $I32 1))) - (dst_hi Reg (imm $I64 (ImmExtend.Zero) 0))) - (value_regs dst_lo dst_hi))) - -(rule (lower (bint x)) - (and_imm $I32 x (u64_into_imm_logic $I32 1))) +;;;; Rules for `bmask` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;;; Rules for `bmask`/`bextend` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Bextend and Bmask both simply sign-extend. This works for: -;; - Bextend, because booleans are stored as 0 / -1, so we -;; sign-extend the -1 to a -1 in the wider width. -;; - Bmask, because the resulting integer mask value must be -;; all-ones (-1) if the argument is true. - -;; Use a common helper to type cast bools to either bool or integer types. -(decl cast_bool (Type Type Value) InstOutput) -(rule (lower (has_type out_ty (bextend x @ (value_type in_ty)))) - (cast_bool in_ty out_ty x)) +;; Bmask tests the value against zero, and uses `csetm` to assert the result. (rule (lower (has_type out_ty (bmask x @ (value_type in_ty)))) - (cast_bool in_ty out_ty x)) - - -;; If the target has the same or a smaller size than the source, it's a no-op. -(rule (cast_bool $B8 $I8 x) x) -(rule (cast_bool $B16 (fits_in_16 _out) x) x) -(rule (cast_bool $B32 (fits_in_32 _out) x) x) -(rule (cast_bool $B64 (fits_in_64 _out) x) x) - -;; Casting between 128 bits is a noop -(rule -1 (cast_bool (ty_int_bool_128 _in) (ty_int_bool_128 _out) x) - x) - -;; Converting from 128 bits to anything below we just ignore the top register -(rule -2 (cast_bool (ty_int_bool_128 _in) (fits_in_64 _out) x) - (value_regs_get x 0)) - -;; Extend to 64 bits first, then this will be all 0s or all 1s and we can -;; duplicate to both halves of 128 bits -(rule -3 (cast_bool in (ty_int_bool_128 _out) x) - (let ((tmp Reg (extend x $true (ty_bits in) 64))) - (value_regs tmp tmp))) - -;; Values that fit in a single register are sign extended normally -(rule -4 (cast_bool (fits_in_64 in) (fits_in_64 out) x) - (extend x $true (ty_bits in) (ty_bits out))) + (lower_bmask out_ty in_ty x)) ;;;; Rules for `popcnt` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -1648,7 +1590,7 @@ ;;;; Rules for `bitselect` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type ty (bitselect c x y))) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (let ((tmp1 Reg (and_reg ty x c)) (tmp2 Reg (bic ty y c))) (orr ty tmp1 tmp2))) @@ -1661,22 +1603,15 @@ (rule (lower (has_type (ty_vec128 ty) (vselect c x y))) (bsl ty c x y)) -;;;; Rules for `ireduce` / `breduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;; Rules for `ireduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; T -> I{64,32,16,8}: We can simply pass through the value: values ;; are always stored with high bits undefined, so we can just leave ;; them be. (rule (lower (has_type ty (ireduce src))) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (value_regs_get src 0)) -;; Likewise for breduce. - -(rule (lower (has_type ty (breduce src))) - (if (ty_int_bool_ref_scalar_64 ty)) - (value_regs_get src 0)) - - ;;;; Rules for `fcmp` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule 4 (lower (has_type ty @ (multi_lane _ _) (fcmp (fcmp_zero_cond_not_eq cond) x y))) @@ -1706,9 +1641,7 @@ (rule 0 (lower (has_type out_ty (fcmp cond x @ (value_type (ty_scalar_float in_ty)) y))) (with_flags (fpu_cmp (scalar_size in_ty) x y) - (materialize_bool_result - (ty_bits out_ty) - (fp_cond_code cond)))) + (materialize_bool_result (fp_cond_code cond)))) (rule -1 (lower (has_type out_ty (fcmp cond x @ (value_type in_ty) y))) (if (ty_vector_float in_ty)) @@ -1740,8 +1673,8 @@ (vec_size VectorSize (vector_size ty))) (value_reg (int_cmp_zero_swap cond rn vec_size)))) -(rule -1 (lower (has_type out_ty (icmp cond x @ (value_type in_ty) y))) - (lower_icmp_into_reg cond x y in_ty out_ty)) +(rule -1 (lower (icmp cond x @ (value_type in_ty) y)) + (lower_icmp_into_reg cond x y in_ty $I8)) ;;;; Rules for `trap` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -1783,10 +1716,10 @@ ;;;; Rules for `trueff` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Verification ensures the input is always a single-def ffcmp. -(rule (lower (has_type ty (trueff cc insn @ (ffcmp x @ (value_type in_ty) y)))) +(rule (lower (trueff cc insn @ (ffcmp x @ (value_type in_ty) y))) (with_flags_reg (fpu_cmp (scalar_size in_ty) x y) - (materialize_bool_result (ty_bits ty) (fp_cond_code cc)))) + (materialize_bool_result (fp_cond_code cc)))) ;;;; Rules for `select` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -1797,13 +1730,6 @@ (lower_icmp_into_flags cc x y in_ty) cond ty rn rm))) -(rule (lower (has_type ty - (select _flags @ (bint (icmp cc x @ (value_type in_ty) y)) rn rm))) - (let ((cond Cond (cond_code cc))) - (lower_select - (lower_icmp_into_flags cc x y in_ty) - cond ty rn rm))) - (rule (lower (has_type ty (select _flags @ (fcmp cc x @ (value_type in_ty) y) rn rm))) (let ((cond Cond (fp_cond_code cc))) @@ -1811,13 +1737,6 @@ (fpu_cmp (scalar_size in_ty) x y) cond ty rn rm))) -(rule (lower (has_type ty - (select _flags @ (bint (fcmp cc x @ (value_type in_ty) y)) rn rm))) - (let ((cond Cond (fp_cond_code cc))) - (lower_select - (fpu_cmp (scalar_size in_ty) x y) - cond ty rn rm))) - (rule -1 (lower (has_type ty (select rcond @ (value_type (fits_in_32 _)) rn rm))) (let ((rcond Reg (put_in_reg_zext32 rcond))) (lower_select @@ -1865,18 +1784,12 @@ ;;;; Rules for `splat` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule -1 (lower (has_type ty (splat x @ (value_type in_ty)))) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 in_ty)) (vec_dup x (vector_size ty))) (rule -2 (lower (has_type ty (splat x @ (value_type (ty_scalar_float _))))) (vec_dup_from_fpu x (vector_size ty))) -(rule (lower (has_type ty (splat (bconst (u64_from_bool n))))) - (splat_const n (vector_size ty))) - -(rule (lower (has_type ty (splat (breduce (bconst (u64_from_bool n)))))) - (splat_const n (vector_size ty))) - (rule (lower (has_type ty (splat (f32const (u64_from_ieee32 n))))) (splat_const n (vector_size ty))) @@ -2089,17 +2002,15 @@ ;;;; Rules for `IsNull` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type out_ty (is_null x @ (value_type ty)))) +(rule (lower (is_null x @ (value_type ty))) (with_flags (cmp_imm (operand_size ty) x (u8_into_imm12 0)) - (materialize_bool_result - (ty_bits out_ty) (Cond.Eq)))) + (materialize_bool_result (Cond.Eq)))) ;;;; Rules for `IsInvalid` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type out_ty (is_invalid x @ (value_type ty)))) +(rule (lower (is_invalid x @ (value_type ty))) (with_flags (cmn_imm (operand_size ty) x (u8_into_imm12 1)) - (materialize_bool_result - (ty_bits out_ty) (Cond.Eq)))) + (materialize_bool_result (Cond.Eq)))) ;;;; Rules for `Debugtrap` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -2325,18 +2236,18 @@ ; GPR => SIMD&FP (rule 4 (lower (has_type (ty_float_or_vec _) (bitcast x @ (value_type in_ty)))) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 in_ty)) (mov_to_fpu x (scalar_size in_ty))) ; SIMD&FP => GPR (rule 3 (lower (has_type out_ty (bitcast x @ (value_type (fits_in_64 (ty_float_or_vec _)))))) - (if (ty_int_bool_ref_scalar_64 out_ty)) + (if (ty_int_ref_scalar_64 out_ty)) (mov_from_vec x 0 (scalar_size out_ty))) ; GPR <=> GPR (rule 2 (lower (has_type out_ty (bitcast x @ (value_type in_ty)))) - (if (ty_int_bool_ref_scalar_64 out_ty)) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 out_ty)) + (if (ty_int_ref_scalar_64 in_ty)) x) (rule 1 (lower (has_type $I128 (bitcast x @ (value_type $I128)))) x) @@ -2352,7 +2263,7 @@ (rule 2 (lower (has_type (ty_scalar_float _) (extractlane val (u8_from_uimm8 0)))) val) -(rule 0 (lower (has_type (ty_int_bool ty) +(rule 0 (lower (has_type (ty_int ty) (extractlane val (u8_from_uimm8 lane)))) (mov_from_vec val lane (scalar_size ty))) @@ -2365,7 +2276,7 @@ ;;; Rules for `insertlane` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule 1 (lower (insertlane vec @ (value_type vty) - val @ (value_type (ty_int_bool _)) + val @ (value_type (ty_int _)) (u8_from_uimm8 lane))) (mov_to_vec vec val lane (vector_size vty))) @@ -2507,7 +2418,7 @@ ;;; Rules for `brz`/`brnz`/`brif`/`brff`/`bricmp` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; `brz` following `icmp`, possibly converted via `bint`. +;; `brz` following `icmp` (rule (lower_branch (brz (icmp cc x @ (value_type ty) y) _ _) targets) (let ((cond Cond (cond_code cc)) (cond Cond (invert_cond cond)) ;; negate for `brz` @@ -2517,16 +2428,7 @@ (with_flags_side_effect (lower_icmp_into_flags cc x y ty) (cond_br taken not_taken (cond_br_cond cond)))))) -(rule (lower_branch (brz (bint (icmp cc x @ (value_type ty) y)) _ _) targets) - (let ((cond Cond (cond_code cc)) - (cond Cond (invert_cond cond)) ;; negate for `brz` - (taken BranchTarget (branch_target targets 0)) - (not_taken BranchTarget (branch_target targets 1))) - (side_effect - (with_flags_side_effect (lower_icmp_into_flags cc x y ty) - (cond_br taken not_taken - (cond_br_cond cond)))))) -;; `brnz` following `icmp`, possibly converted via `bint`. +;; `brnz` following `icmp` (rule (lower_branch (brnz (icmp cc x @ (value_type ty) y) _ _) targets) (let ((cond Cond (cond_code cc)) (taken BranchTarget (branch_target targets 0)) @@ -2535,15 +2437,7 @@ (with_flags_side_effect (lower_icmp_into_flags cc x y ty) (cond_br taken not_taken (cond_br_cond cond)))))) -(rule (lower_branch (brnz (bint (icmp cc x @ (value_type ty) y)) _ _) targets) - (let ((cond Cond (cond_code cc)) - (taken BranchTarget (branch_target targets 0)) - (not_taken BranchTarget (branch_target targets 1))) - (side_effect - (with_flags_side_effect (lower_icmp_into_flags cc x y ty) - (cond_br taken not_taken - (cond_br_cond cond)))))) -;; `brz` following `fcmp`, possibly converted via `bint`. +;; `brz` following `fcmp` (rule (lower_branch (brz (fcmp cc x @ (value_type (ty_scalar_float ty)) y) _ _) targets) (let ((cond Cond (fp_cond_code cc)) (cond Cond (invert_cond cond)) ;; negate for `brz` @@ -2553,16 +2447,7 @@ (with_flags_side_effect (fpu_cmp (scalar_size ty) x y) (cond_br taken not_taken (cond_br_cond cond)))))) -(rule (lower_branch (brz (bint (fcmp cc x @ (value_type (ty_scalar_float ty)) y)) _ _) targets) - (let ((cond Cond (fp_cond_code cc)) - (cond Cond (invert_cond cond)) ;; negate for `brz` - (taken BranchTarget (branch_target targets 0)) - (not_taken BranchTarget (branch_target targets 1))) - (side_effect - (with_flags_side_effect (fpu_cmp (scalar_size ty) x y) - (cond_br taken not_taken - (cond_br_cond cond)))))) -;; `brnz` following `fcmp`, possibly converted via `bint`. +;; `brnz` following `fcmp` (rule (lower_branch (brnz (fcmp cc x @ (value_type (ty_scalar_float ty)) y) _ _) targets) (let ((cond Cond (fp_cond_code cc)) (taken BranchTarget (branch_target targets 0)) @@ -2571,14 +2456,6 @@ (with_flags_side_effect (fpu_cmp (scalar_size ty) x y) (cond_br taken not_taken (cond_br_cond cond)))))) -(rule (lower_branch (brnz (bint (fcmp cc x @ (value_type (ty_scalar_float ty)) y)) _ _) targets) - (let ((cond Cond (fp_cond_code cc)) - (taken BranchTarget (branch_target targets 0)) - (not_taken BranchTarget (branch_target targets 1))) - (side_effect - (with_flags_side_effect (fpu_cmp (scalar_size ty) x y) - (cond_br taken not_taken - (cond_br_cond cond)))))) ;; standard `brz` (rule -1 (lower_branch (brz c @ (value_type $I128) _ _) targets) (let ((flags ProducesFlags (flags_to_producesflags c)) @@ -2592,7 +2469,7 @@ (with_flags_side_effect flags (cond_br taken not_taken (cond_br_zero rt)))))) (rule -2 (lower_branch (brz c @ (value_type ty) _ _) targets) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (let ((flags ProducesFlags (flags_to_producesflags c)) (rt Reg (put_in_reg_zext64 c)) (taken BranchTarget (branch_target targets 0)) @@ -2613,7 +2490,7 @@ (with_flags_side_effect flags (cond_br taken not_taken (cond_br_not_zero rt)))))) (rule -2 (lower_branch (brnz c @ (value_type ty) _ _) targets) - (if (ty_int_bool_ref_scalar_64 ty)) + (if (ty_int_ref_scalar_64 ty)) (let ((flags ProducesFlags (flags_to_producesflags c)) (rt Reg (put_in_reg_zext64 c)) (taken BranchTarget (branch_target targets 0)) diff --git a/cranelift/codegen/src/isa/aarch64/lower/isle.rs b/cranelift/codegen/src/isa/aarch64/lower/isle.rs index ccd7411679ce..1ceb17007b38 100644 --- a/cranelift/codegen/src/isa/aarch64/lower/isle.rs +++ b/cranelift/codegen/src/isa/aarch64/lower/isle.rs @@ -165,7 +165,6 @@ impl Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> { fn integral_ty(&mut self, ty: Type) -> Option { match ty { I8 | I16 | I32 | I64 | R64 => Some(ty), - ty if ty.is_bool() => Some(ty), _ => None, } } diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index 808c79f71691..980f8e1eb746 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -39,7 +39,7 @@ pub(crate) fn lower_insn_to_regs( }; match op { - Opcode::Iconst | Opcode::Bconst | Opcode::Null => implemented_in_isle(ctx), + Opcode::Iconst | Opcode::Null => implemented_in_isle(ctx), Opcode::F32const => { let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); @@ -163,11 +163,9 @@ pub(crate) fn lower_insn_to_regs( Opcode::Copy => implemented_in_isle(ctx), - Opcode::Breduce | Opcode::Ireduce => implemented_in_isle(ctx), + Opcode::Ireduce => implemented_in_isle(ctx), - Opcode::Bextend | Opcode::Bmask => implemented_in_isle(ctx), - - Opcode::Bint => implemented_in_isle(ctx), + Opcode::Bmask => implemented_in_isle(ctx), Opcode::Bitcast => implemented_in_isle(ctx), diff --git a/cranelift/codegen/src/isa/riscv64/inst.isle b/cranelift/codegen/src/isa/riscv64/inst.isle index 4e43edc8de55..6c179b1748ec 100644 --- a/cranelift/codegen/src/isa/riscv64/inst.isle +++ b/cranelift/codegen/src/isa/riscv64/inst.isle @@ -1659,11 +1659,6 @@ (result Reg (alu_rrr (AluOPRRR.Or) tmp_x tmp_y))) result)) -(decl gen_bint (Reg) Reg) -(rule - (gen_bint r) - (alu_rr_imm12 (AluOPRRI.Andi) r (imm12_const 1))) - (decl gen_int_select (Type IntSelectOP ValueRegs ValueRegs) ValueRegs) (rule (gen_int_select ty op x y) @@ -1729,12 +1724,6 @@ (_ Unit (emit (MInst.FcvtToInt is_sat result tmp rs is_signed in_type out_type)))) result)) -;;;; in_type out_type -;;;; out_type is returned. -(decl pure valid_bextend_ty (Type Type) Type) -(extern constructor valid_bextend_ty valid_bextend_ty) - - ;;; some float binary operation ;;; 1. need move into x reister. ;;; 2. do the operation. @@ -2082,3 +2071,43 @@ (decl umulh (Reg Reg) Reg) (rule (umulh a b) (alu_rrr (AluOPRRR.Mulhu) a b)) + +;;;; Helpers for bmask ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(decl lower_bmask (Type Type ValueRegs) ValueRegs) + +;; Produces -1 if the 64-bit value is non-zero, and 0 otherwise. +(rule + 0 + (lower_bmask (fits_in_64 _) (fits_in_64 _) val) + (let ((input Reg val) + (zero Reg (zero_reg)) + (ones Reg (load_imm12 -1))) + (value_reg (gen_select_reg (IntCC.Equal) zero input zero ones)))) + +;; Bitwise-or the two registers that make up the 128-bit value, then recurse as +;; though it was a 64-bit value. +(rule + 1 + (lower_bmask (fits_in_64 ty) $I128 val) + (let ((lo Reg (value_regs_get val 0)) + (hi Reg (value_regs_get val 1)) + (combined Reg (alu_rrr (AluOPRRR.Or) lo hi))) + (lower_bmask ty $I64 (value_reg combined)))) + +;; Conversion of one 64-bit value to a 128-bit one. Duplicate the result of the +;; bmask of the 64-bit value into both result registers of the i128. +(rule + 2 + (lower_bmask $I128 (fits_in_64 _) val) + (let ((res ValueRegs (lower_bmask $I64 $I64 val))) + (value_regs (value_regs_get res 0) (value_regs_get res 0)))) + +;; Conversion of one 64-bit value to a 128-bit one. Duplicate the result of +;; bmasking the 128-bit value to a 64-bit value into both registers of the +;; 128-bit result. +(rule + 3 + (lower_bmask $I128 $I128 val) + (let ((res ValueRegs (lower_bmask $I64 $I128 val))) + (value_regs (value_regs_get res 0) (value_regs_get res 0)))) diff --git a/cranelift/codegen/src/isa/riscv64/inst/args.rs b/cranelift/codegen/src/isa/riscv64/inst/args.rs index afb440080c49..2ab4c39bdde1 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/args.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/args.rs @@ -1189,10 +1189,8 @@ impl LoadOP { return if t == F32 { Self::Flw } else { Self::Fld }; } match t { - B1 | B8 => Self::Lbu, - B16 => Self::Lhu, - B32 | R32 => Self::Lwu, - B64 | R64 | I64 => Self::Ld, + R32 => Self::Lwu, + R64 | I64 => Self::Ld, I8 => Self::Lb, I16 => Self::Lh, diff --git a/cranelift/codegen/src/isa/riscv64/inst/emit.rs b/cranelift/codegen/src/isa/riscv64/inst/emit.rs index bbbecb97c282..e3385ead4dec 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/emit.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/emit.rs @@ -1385,13 +1385,13 @@ impl MachInstEmit for Inst { .for_each(|i| i.emit(&[], sink, emit_info, state)); sink.bind_label(label_true); - Inst::load_imm12(rd, Imm12::from_bits(-1)).emit(&[], sink, emit_info, state); + Inst::load_imm12(rd, Imm12::TRUE).emit(&[], sink, emit_info, state); Inst::Jal { dest: BranchTarget::offset(Inst::INSTRUCTION_SIZE * 2), } .emit(&[], sink, emit_info, state); sink.bind_label(label_false); - Inst::load_imm12(rd, Imm12::from_bits(0)).emit(&[], sink, emit_info, state); + Inst::load_imm12(rd, Imm12::FALSE).emit(&[], sink, emit_info, state); } &Inst::AtomicCas { offset, diff --git a/cranelift/codegen/src/isa/riscv64/inst/emit_tests.rs b/cranelift/codegen/src/isa/riscv64/inst/emit_tests.rs index 8992cf694af2..5cac0b211404 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/emit_tests.rs @@ -572,16 +572,6 @@ fn test_riscv64_binemit() { "lb a0,100(a1)", 0x6458503, )); - insns.push(TestUnit::new( - Inst::Load { - rd: writable_a0(), - op: LoadOP::Lbu, - flags: MemFlags::new(), - from: AMode::RegOffset(a1(), 100, B8), - }, - "lbu a0,100(a1)", - 0x645c503, - )); insns.push(TestUnit::new( Inst::Load { rd: writable_a0(), @@ -593,17 +583,6 @@ fn test_riscv64_binemit() { 0x6459503, )); - insns.push(TestUnit::new( - Inst::Load { - rd: writable_a0(), - op: LoadOP::Lhu, - flags: MemFlags::new(), - from: AMode::RegOffset(a1(), 100, B16), - }, - "lhu a0,100(a1)", - 0x645d503, - )); - insns.push(TestUnit::new( Inst::Load { rd: writable_a0(), @@ -615,16 +594,6 @@ fn test_riscv64_binemit() { 0x645a503, )); - insns.push(TestUnit::new( - Inst::Load { - rd: writable_a0(), - op: LoadOP::Lwu, - flags: MemFlags::new(), - from: AMode::RegOffset(a1(), 100, B32), - }, - "lwu a0,100(a1)", - 0x645e503, - )); insns.push(TestUnit::new( Inst::Load { rd: writable_a0(), diff --git a/cranelift/codegen/src/isa/riscv64/inst/imms.rs b/cranelift/codegen/src/isa/riscv64/inst/imms.rs index d53315ade081..bee1971636c8 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/imms.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/imms.rs @@ -12,7 +12,7 @@ pub struct Imm12 { impl Imm12 { pub(crate) const FALSE: Self = Self { bits: 0 }; - pub(crate) const TRUE: Self = Self { bits: -1 }; + pub(crate) const TRUE: Self = Self { bits: 1 }; pub fn maybe_from_u64(val: u64) -> Option { let sign_bit = 1 << 11; if val == 0 { diff --git a/cranelift/codegen/src/isa/riscv64/inst/mod.rs b/cranelift/codegen/src/isa/riscv64/inst/mod.rs index f212f45326fc..1221edd2d9dc 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/mod.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/mod.rs @@ -6,9 +6,7 @@ use crate::binemit::{Addend, CodeOffset, Reloc}; pub use crate::ir::condcodes::IntCC; -use crate::ir::types::{ - B1, B128, B16, B32, B64, B8, F32, F64, FFLAGS, I128, I16, I32, I64, I8, IFLAGS, R32, R64, -}; +use crate::ir::types::{F32, F64, FFLAGS, I128, I16, I32, I64, I8, IFLAGS, R32, R64}; pub use crate::ir::{ExternalName, MemFlags, Opcode, SourceLoc, Type, ValueLabel}; use crate::isa::CallConv; @@ -691,14 +689,11 @@ impl MachInst for Inst { fn gen_constant Writable>( to_regs: ValueRegs>, - mut value: u128, + value: u128, ty: Type, mut alloc_tmp: F, ) -> SmallVec<[Inst; 4]> { - if ty.is_bool() && value != 0 { - value = !0; - } - if (ty.bits() <= 64 && (ty.is_bool() || ty.is_int())) || ty == R32 || ty == R64 { + if (ty.bits() <= 64 && ty.is_int()) || ty == R32 || ty == R64 { return Inst::load_constant_u64(to_regs.only_reg().unwrap(), value as u64); }; match ty { @@ -708,7 +703,7 @@ impl MachInst for Inst { F64 => { Inst::load_fp_constant64(to_regs.only_reg().unwrap(), value as u64, alloc_tmp(I64)) } - I128 | B128 => { + I128 => { let mut insts = SmallInstVec::new(); insts.extend(Inst::load_constant_u64( to_regs.regs()[0], @@ -736,17 +731,11 @@ impl MachInst for Inst { I16 => Ok((&[RegClass::Int], &[I16])), I32 => Ok((&[RegClass::Int], &[I32])), I64 => Ok((&[RegClass::Int], &[I64])), - B1 => Ok((&[RegClass::Int], &[B1])), - B8 => Ok((&[RegClass::Int], &[B8])), - B16 => Ok((&[RegClass::Int], &[B16])), - B32 => Ok((&[RegClass::Int], &[B32])), - B64 => Ok((&[RegClass::Int], &[B64])), R32 => panic!("32-bit reftype pointer should never be seen on riscv64"), R64 => Ok((&[RegClass::Int], &[R64])), F32 => Ok((&[RegClass::Float], &[F32])), F64 => Ok((&[RegClass::Float], &[F64])), I128 => Ok((&[RegClass::Int, RegClass::Int], &[I64, I64])), - B128 => Ok((&[RegClass::Int, RegClass::Int], &[B64, B64])), IFLAGS => Ok((&[RegClass::Int], &[IFLAGS])), FFLAGS => Ok((&[RegClass::Int], &[FFLAGS])), _ => Err(CodegenError::Unsupported(format!( diff --git a/cranelift/codegen/src/isa/riscv64/lower.isle b/cranelift/codegen/src/isa/riscv64/lower.isle index 5b5f6ba6ad52..4f5bb1a114ee 100644 --- a/cranelift/codegen/src/isa/riscv64/lower.isle +++ b/cranelift/codegen/src/isa/riscv64/lower.isle @@ -9,15 +9,6 @@ (rule (lower (has_type ty (iconst (u64_from_imm64 n)))) (imm ty n)) -;;;; Rules for `bconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(rule (lower (has_type ty (bconst $false))) - (imm ty 0)) - -(rule (lower (has_type ty (bconst $true))) - (imm ty 1)) - - ;;;; Rules for `null` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type ty (null))) @@ -194,8 +185,6 @@ (rule 1 (lower (has_type (fits_in_64 ty) (band (imm12_from_value x) y))) (alu_rr_imm12 (AluOPRRI.Andi) y x)) -(rule (lower (has_type $B128 (band x y))) - (lower_b128_binary (AluOPRRR.And) x y)) (rule (lower (has_type $I128 (band x y))) (lower_b128_binary (AluOPRRR.And) x y)) @@ -215,8 +204,6 @@ (rule 1 (lower (has_type (fits_in_64 ty) (bor (imm12_from_value x) y))) (alu_rr_imm12 (AluOPRRI.Ori) y x)) -(rule (lower (has_type $B128 (bor x y))) - (lower_b128_binary (AluOPRRR.Or) x y)) (rule (lower (has_type $I128 (bor x y))) (lower_b128_binary (AluOPRRR.Or) x y)) (rule (lower (has_type $F32 (bor x y))) @@ -235,8 +222,6 @@ (rule 1 (lower (has_type (fits_in_64 ty) (bxor (imm12_from_value x) y))) (alu_rr_imm12 (AluOPRRI.Xori) y x)) -(rule (lower (has_type $B128 (bxor x y))) - (lower_b128_binary (AluOPRRR.Xor) x y)) (rule (lower (has_type $I128 (bxor x y))) (lower_b128_binary (AluOPRRR.Xor) x y)) (rule (lower (has_type $F32 (bxor x y))) @@ -251,8 +236,6 @@ (rule (lower (has_type $I128 (bnot x))) (bnot_128 x)) -(rule (lower (has_type $B128 (bnot x))) - (bnot_128 x)) (rule (lower (has_type $F32 (bnot x))) (lower_float_bnot x $F32) @@ -556,11 +539,6 @@ (rule (lower (has_type ty (copy x))) (gen_move2 x ty ty)) -;;;;; Rules for `breduce`;;;;;;;;;;;;;;;;; -(rule - (lower (has_type ty (breduce x))) - (gen_move2 (value_regs_get x 0) ty ty)) - ;;;;; Rules for `ireduce`;;;;;;;;;;;;;;;;; (rule (lower (has_type ty (ireduce x))) @@ -633,15 +611,6 @@ (lower (has_type ty (bitselect c x y))) (gen_bitselect ty c x y)) -;;;;; Rules for `bint`;;;;;;;;; -(rule - (lower (has_type (fits_in_64 ty) (bint (valueregs_2_reg x)))) - (gen_bint x)) -(rule 1 - (lower (has_type $I128 (bint (valueregs_2_reg x)))) - (let ((tmp Reg (gen_bint x))) - (value_regs tmp (load_u64_constant 0)))) - ;;;;; Rules for `isplit`;;;;;;;;; (rule (lower (isplit x)) @@ -733,10 +702,6 @@ (rule 1 (lower (has_type $I128 (load flags p offset))) (gen_load_128 p offset flags)) -;;;; for B128 -(rule 1 - (lower (has_type $B128 (load flags p offset))) - (gen_load_128 p offset flags)) ;;;;; Rules for `istore8`;;;;;;;;; (rule @@ -762,11 +727,6 @@ (lower (store flags x @ (value_type $I128 ) p offset)) (gen_store_128 p offset flags x)) -;;; special for B128 -(rule 1 - (lower (store flags x @ (value_type $B128 ) p offset)) - (gen_store_128 p offset flags x)) - (decl gen_icmp (IntCC ValueRegs ValueRegs Type) Reg) (rule (gen_icmp cc x y ty) @@ -923,34 +883,8 @@ ;;;;; Rules for `bmask`;;;;;;;;; (rule - ;; because we encode bool all 1s. - ;; move is just ok. - (lower (has_type (fits_in_64 ty) (bmask x @ (value_type ity)))) - (gen_move2 (value_regs_get x 0) ity ty)) -;;; for i128 -(rule 1 - ;; because we encode bool all 1s. - ;; move is just ok. - (lower (has_type $I128 (bmask x @ (value_type ity)))) - (value_regs (gen_move2 (value_regs_get x 0) $I64 $I64) (gen_move2 (value_regs_get x 0) $I64 $I64))) - -;;;;; Rules for `bextend`;;;;;;;;; -(rule - ;; because we encode bool all 1s. - ;; move is just ok. - (lower (has_type ty (bextend x @ (value_type ity)))) - ;;extra checks. - (if-let _ (valid_bextend_ty ity ty)) - (gen_moves x ity ty)) - -;;; for B128 -(rule 1 - ;; because we encode bool all 1s. - ;; move is just ok. - (lower (has_type ty (bextend x @ (value_type ity)))) - ;;extra checks. - (if-let $B128 (valid_bextend_ty ity ty)) - (value_regs (gen_moves x $I64 $I64) (gen_moves x $I64 $I64))) + (lower (has_type oty (bmask x @ (value_type ity)))) + (lower_bmask oty ity x)) ;; N.B.: the Ret itself is generated by the ABI. (rule (lower (return args)) diff --git a/cranelift/codegen/src/isa/riscv64/lower/isle.rs b/cranelift/codegen/src/isa/riscv64/lower/isle.rs index c9038daab75d..ff2b52f6ba8c 100644 --- a/cranelift/codegen/src/isa/riscv64/lower/isle.rs +++ b/cranelift/codegen/src/isa/riscv64/lower/isle.rs @@ -71,13 +71,6 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> } } - fn valid_bextend_ty(&mut self, from: Type, to: Type) -> Option { - if from.is_bool() && to.is_bool() && from.bits() < to.bits() { - Some(to) - } else { - None - } - } fn lower_br_fcmp( &mut self, cc: &FloatCC, @@ -155,7 +148,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> } } fn int_zero_reg(&mut self, ty: Type) -> ValueRegs { - assert!(ty.is_int() || ty.is_bool(), "{:?}", ty); + assert!(ty.is_int(), "{:?}", ty); if ty.bits() == 128 { ValueRegs::two(self.zero_reg(), self.zero_reg()) } else { @@ -190,7 +183,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> Imm12::from_bits(imm.as_i16() & (x as i16)) } fn alloc_vec_writable(&mut self, ty: Type) -> VecWritableReg { - if ty.is_int() || ty.is_bool() || ty == R32 || ty == R64 { + if ty.is_int() || ty == R32 || ty == R64 { if ty.bits() <= 64 { vec![self.temp_writable_reg(I64)] } else { @@ -203,26 +196,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> } } - fn imm(&mut self, ty: Type, mut val: u64) -> Reg { - // Boolean types - // Boolean values are either true or false. - - // The b1 type represents an abstract boolean value. It can only exist as an SSA value, and can't be directly stored in memory. It can, however, be converted into an integer with value 0 or 1 by the bint instruction (and converted back with icmp_imm with 0). - - // Several larger boolean types are also defined, primarily to be used as SIMD element types. They can be stored in memory, and are represented as either all zero bits or all one bits. - - // b1 - // b8 - // b16 - // b32 - // b64 - // /////////////////////////////////////////////////////////// - // "represented as either all zero bits or all one bits." - // \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ - if ty.is_bool() && val != 0 { - // need all be one - val = !0; - } + fn imm(&mut self, ty: Type, val: u64) -> Reg { let tmp = self.temp_writable_reg(ty); self.emit_list(&MInst::load_constant_u64(tmp, val)); tmp.to_reg() @@ -526,7 +500,7 @@ fn construct_dest WritableReg>( mut alloc: F, ty: Type, ) -> WritableValueRegs { - if ty.is_bool() || ty.is_int() { + if ty.is_int() { if ty.bits() == 128 { WritableValueRegs::two(alloc(I64), alloc(I64)) } else { diff --git a/cranelift/codegen/src/isa/s390x/abi.rs b/cranelift/codegen/src/isa/s390x/abi.rs index c77ed2f90590..8d48e4bc9a2a 100644 --- a/cranelift/codegen/src/isa/s390x/abi.rs +++ b/cranelift/codegen/src/isa/s390x/abi.rs @@ -94,7 +94,6 @@ pub type S390xCallee = Callee; fn in_int_reg(ty: Type) -> bool { match ty { types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true, - types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true, _ => false, } } diff --git a/cranelift/codegen/src/isa/s390x/inst.isle b/cranelift/codegen/src/isa/s390x/inst.isle index 70e51f37d9e8..9360d9b3b01c 100644 --- a/cranelift/codegen/src/isa/s390x/inst.isle +++ b/cranelift/codegen/src/isa/s390x/inst.isle @@ -3129,7 +3129,6 @@ dst)) ;; Sign-extend a register from a smaller `Type` into a 32-bit register. -;; This handles both integer and boolean input types (except $B1). (decl sext32_reg (Type Reg) Reg) (rule (sext32_reg ty src) (let ((dst WritableReg (temp_writable_reg $I32)) @@ -3137,7 +3136,6 @@ dst)) ;; Zero-extend a register from a smaller `Type` into a 64-bit register. -;; This handles both integer and boolean input types (except $B1). (decl zext64_reg (Type Reg) Reg) (rule (zext64_reg ty src) (let ((dst WritableReg (temp_writable_reg $I64)) @@ -3145,7 +3143,6 @@ dst)) ;; Sign-extend a register from a smaller `Type` into a 64-bit register. -;; This handles both integer and boolean input types (except $B1). (decl sext64_reg (Type Reg) Reg) (rule (sext64_reg ty src) (let ((dst WritableReg (temp_writable_reg $I64)) @@ -3478,13 +3475,14 @@ dst)) ;; Lower a boolean condition to a boolean type. The value used to represent -;; "true" is -1 for all result types except for $B1, which uses 1. +;; "true" is -1 for all result types except for $I8, which uses 1. (decl lower_bool (Type ProducesBool) Reg) -(rule (lower_bool $B1 cond) (select_bool_imm $B1 cond 1 0)) -(rule (lower_bool $B8 cond) (select_bool_imm $B8 cond -1 0)) -(rule (lower_bool $B16 cond) (select_bool_imm $B16 cond -1 0)) -(rule (lower_bool $B32 cond) (select_bool_imm $B32 cond -1 0)) -(rule (lower_bool $B64 cond) (select_bool_imm $B64 cond -1 0)) +(rule (lower_bool $I8 cond) (select_bool_imm $I8 cond 1 0)) + +;; TODO: do we need these cases anymore if B8..B128 are missing? +(rule (lower_bool $I16 cond) (select_bool_imm $I16 cond -1 0)) +(rule (lower_bool $I32 cond) (select_bool_imm $I32 cond -1 0)) +(rule (lower_bool $I64 cond) (select_bool_imm $I64 cond -1 0)) ;; Emit a conditional branch based on a boolean condition. (decl cond_br_bool (ProducesBool MachLabel MachLabel) SideEffectNoResult) diff --git a/cranelift/codegen/src/isa/s390x/inst/mod.rs b/cranelift/codegen/src/isa/s390x/inst/mod.rs index a28c8913edc7..08d4b85efd52 100644 --- a/cranelift/codegen/src/isa/s390x/inst/mod.rs +++ b/cranelift/codegen/src/isa/s390x/inst/mod.rs @@ -397,10 +397,10 @@ impl Inst { /// Generic constructor for a load (zero-extending where appropriate). pub fn gen_load(into_reg: Writable, mem: MemArg, ty: Type) -> Inst { match ty { - types::B1 | types::B8 | types::I8 => Inst::Load64ZExt8 { rd: into_reg, mem }, - types::B16 | types::I16 => Inst::Load64ZExt16 { rd: into_reg, mem }, - types::B32 | types::I32 => Inst::Load64ZExt32 { rd: into_reg, mem }, - types::B64 | types::I64 | types::R64 => Inst::Load64 { rd: into_reg, mem }, + types::I8 => Inst::Load64ZExt8 { rd: into_reg, mem }, + types::I16 => Inst::Load64ZExt16 { rd: into_reg, mem }, + types::I32 => Inst::Load64ZExt32 { rd: into_reg, mem }, + types::I64 | types::R64 => Inst::Load64 { rd: into_reg, mem }, types::F32 => Inst::VecLoadLaneUndef { size: 32, rd: into_reg, @@ -414,7 +414,7 @@ impl Inst { lane_imm: 0, }, _ if ty.is_vector() && ty.bits() == 128 => Inst::VecLoad { rd: into_reg, mem }, - types::B128 | types::I128 => Inst::VecLoad { rd: into_reg, mem }, + types::I128 => Inst::VecLoad { rd: into_reg, mem }, _ => unimplemented!("gen_load({})", ty), } } @@ -422,10 +422,10 @@ impl Inst { /// Generic constructor for a store. pub fn gen_store(mem: MemArg, from_reg: Reg, ty: Type) -> Inst { match ty { - types::B1 | types::B8 | types::I8 => Inst::Store8 { rd: from_reg, mem }, - types::B16 | types::I16 => Inst::Store16 { rd: from_reg, mem }, - types::B32 | types::I32 => Inst::Store32 { rd: from_reg, mem }, - types::B64 | types::I64 | types::R64 => Inst::Store64 { rd: from_reg, mem }, + types::I8 => Inst::Store8 { rd: from_reg, mem }, + types::I16 => Inst::Store16 { rd: from_reg, mem }, + types::I32 => Inst::Store32 { rd: from_reg, mem }, + types::I64 | types::R64 => Inst::Store64 { rd: from_reg, mem }, types::F32 => Inst::VecStoreLane { size: 32, rd: from_reg, @@ -439,7 +439,7 @@ impl Inst { lane_imm: 0, }, _ if ty.is_vector() && ty.bits() == 128 => Inst::VecStore { rd: from_reg, mem }, - types::B128 | types::I128 => Inst::VecStore { rd: from_reg, mem }, + types::I128 => Inst::VecStore { rd: from_reg, mem }, _ => unimplemented!("gen_store({})", ty), } } @@ -1086,7 +1086,7 @@ impl MachInst for Inst { .only_reg() .expect("multi-reg values not supported yet"); match ty { - types::I128 | types::B128 => { + types::I128 => { let mut ret = SmallVec::new(); ret.push(Inst::load_vec_constant(to_reg, value)); ret @@ -1112,14 +1112,8 @@ impl MachInst for Inst { )); ret } - types::I64 | types::B64 | types::R64 => Inst::load_constant64(to_reg, value as u64), - types::B1 - | types::I8 - | types::B8 - | types::I16 - | types::B16 - | types::I32 - | types::B32 => Inst::load_constant32(to_reg, value as u32), + types::I64 | types::R64 => Inst::load_constant64(to_reg, value as u64), + types::I8 | types::I16 | types::I32 => Inst::load_constant32(to_reg, value as u32), _ => unreachable!(), } } @@ -1140,17 +1134,11 @@ impl MachInst for Inst { types::I16 => Ok((&[RegClass::Int], &[types::I16])), types::I32 => Ok((&[RegClass::Int], &[types::I32])), types::I64 => Ok((&[RegClass::Int], &[types::I64])), - types::B1 => Ok((&[RegClass::Int], &[types::B1])), - types::B8 => Ok((&[RegClass::Int], &[types::B8])), - types::B16 => Ok((&[RegClass::Int], &[types::B16])), - types::B32 => Ok((&[RegClass::Int], &[types::B32])), - types::B64 => Ok((&[RegClass::Int], &[types::B64])), types::R32 => panic!("32-bit reftype pointer should never be seen on s390x"), types::R64 => Ok((&[RegClass::Int], &[types::R64])), types::F32 => Ok((&[RegClass::Float], &[types::F32])), types::F64 => Ok((&[RegClass::Float], &[types::F64])), types::I128 => Ok((&[RegClass::Float], &[types::I128])), - types::B128 => Ok((&[RegClass::Float], &[types::B128])), _ if ty.is_vector() && ty.bits() == 128 => Ok((&[RegClass::Float], &[types::I8X16])), // FIXME: We don't really have IFLAGS, but need to allow it here // for now to support the SelectifSpectreGuard instruction. diff --git a/cranelift/codegen/src/isa/s390x/lower.isle b/cranelift/codegen/src/isa/s390x/lower.isle index 4c05690651a0..291853c805f8 100644 --- a/cranelift/codegen/src/isa/s390x/lower.isle +++ b/cranelift/codegen/src/isa/s390x/lower.isle @@ -16,14 +16,6 @@ (imm ty n)) -;;;; Rules for `bconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(rule (lower (has_type ty (bconst $false))) - (imm ty 0)) -(rule (lower (has_type ty (bconst $true))) - (imm ty 1)) - - ;;;; Rules for `f32const` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (f32const (u64_from_ieee32 x))) @@ -1163,57 +1155,36 @@ (vec_select ty y z x)) -;;;; Rules for `breduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Up to 64-bit source type: Always a no-op. -(rule 1 (lower (breduce x @ (value_type (fits_in_64 _ty)))) - x) - -;; 128-bit source type: Extract the low half. -(rule (lower (breduce x @ (value_type (vr128_ty _ty)))) - (vec_extract_lane $I64X2 x 1 (zero_reg))) - - -;;;; Rules for `bextend` and `bmask` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;; Rules for `bmask` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Use a common helper to type cast bools to either bool or integer types. (decl cast_bool (Type Value) Reg) -(rule (lower (has_type ty (bextend x))) - (cast_bool ty x)) (rule (lower (has_type ty (bmask x))) (cast_bool ty x)) ;; If the target has the same or a smaller size than the source, it's a no-op. -(rule 8 (cast_bool $B1 x @ (value_type $B1)) x) -(rule 8 (cast_bool $B1 x @ (value_type $B8)) x) -(rule 8 (cast_bool $B8 x @ (value_type $B8)) x) -(rule 8 (cast_bool $I8 x @ (value_type $B8)) x) -(rule 7 (cast_bool (fits_in_16 _ty) x @ (value_type $B16)) x) -(rule 6 (cast_bool (fits_in_32 _ty) x @ (value_type $B32)) x) -(rule 5 (cast_bool (fits_in_64 _ty) x @ (value_type $B64)) x) -(rule 4 (cast_bool (vr128_ty _ty) x @ (value_type $B128)) x) -(rule 5 (cast_bool (fits_in_64 _ty) x @ (value_type $B128)) +(rule 7 (cast_bool (fits_in_16 _ty) x @ (value_type $I16)) x) +(rule 6 (cast_bool (fits_in_32 _ty) x @ (value_type $I32)) x) +(rule 5 (cast_bool (fits_in_64 _ty) x @ (value_type $I64)) x) +(rule 4 (cast_bool (vr128_ty _ty) x @ (value_type $I128)) x) +(rule 5 (cast_bool (fits_in_64 _ty) x @ (value_type $I128)) (vec_extract_lane $I64X2 x 1 (zero_reg))) ;; Single-bit values are sign-extended via a pair of shifts. -(rule 0 (cast_bool (gpr32_ty ty) x @ (value_type $B1)) +(rule 0 (cast_bool (gpr32_ty ty) x @ (value_type $I8)) (ashr_imm $I32 (lshl_imm $I32 x 31) 31)) -(rule 1 (cast_bool (gpr64_ty ty) x @ (value_type $B1)) +(rule 1 (cast_bool (gpr64_ty ty) x @ (value_type $I8)) (ashr_imm $I64 (lshl_imm $I64 x 63) 63)) -(rule 4 (cast_bool (vr128_ty ty) x @ (value_type $B1)) +(rule 4 (cast_bool (vr128_ty ty) x @ (value_type $I8)) (let ((gpr Reg (ashr_imm $I64 (lshl_imm $I64 x 63) 63))) (mov_to_vec128 ty gpr gpr))) ;; Other values are just sign-extended normally. -(rule 0 (cast_bool (gpr32_ty _ty) x @ (value_type $B8)) - (sext32_reg $I8 x)) -(rule 0 (cast_bool (gpr32_ty _ty) x @ (value_type $B16)) +(rule 0 (cast_bool (gpr32_ty _ty) x @ (value_type $I16)) (sext32_reg $I16 x)) -(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $B8)) - (sext64_reg $I8 x)) -(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $B16)) +(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $I16)) (sext64_reg $I16 x)) -(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $B32)) +(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $I32)) (sext64_reg $I32 x)) (rule 3 (cast_bool (vr128_ty ty) x @ (value_type (gpr32_ty src_ty))) (let ((x_ext Reg (sext64_reg src_ty x))) @@ -1222,35 +1193,6 @@ (mov_to_vec128 ty x x)) -;;;; Rules for `bint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Mask with 1 to get a 0/1 result (8- or 16-bit result types). -(rule 5 (lower (has_type (fits_in_16 ty) (bint x @ (value_type (fits_in_64 _))))) - (and_uimm16shifted ty x (uimm16shifted 1 0))) - -;; Mask with 1 to get a 0/1 result (32-bit result types). -(rule 4 (lower (has_type (fits_in_32 ty) (bint x @ (value_type (fits_in_64 _))))) - (and_uimm32shifted ty x (uimm32shifted 1 0))) - -;; Mask with 1 to get a 0/1 result (64-bit result types). -(rule 3 (lower (has_type (fits_in_64 ty) (bint x @ (value_type (fits_in_64 _))))) - (and_reg ty x (imm ty 1))) - -;; Mask with 1 to get a 0/1 result (128-bit result types). -(rule 1 (lower (has_type (vr128_ty ty) (bint x @ (value_type (fits_in_64 _))))) - (let ((x_ext Reg (and_uimm16shifted $I8 x (uimm16shifted 1 0)))) - (vec_insert_lane $I8X16 (vec_imm ty 0) x_ext 15 (zero_reg)))) - -;; Mask with 1 to get a 0/1 result (128-bit source types). -(rule 2 (lower (has_type (fits_in_64 ty) (bint x @ (value_type (vr128_ty _))))) - (let ((x_gpr Reg (vec_extract_lane $I8X16 x 15 (zero_reg)))) - (and_uimm16shifted ty x_gpr (uimm16shifted 1 0)))) - -;; Mask with 1 to get a 0/1 result (128-bit source and result types). -(rule 0 (lower (has_type (vr128_ty ty) (bint x @ (value_type (vr128_ty _))))) - (vec_and ty x (vec_imm ty 1))) - - ;;;; Rules for `bitrev` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (has_type ty (bitrev x))) @@ -1864,7 +1806,7 @@ (rule 1 (lower (insertlane x @ (value_type ty) y @ (value_type in_ty) (u8_from_uimm8 idx))) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 in_ty)) (vec_insert_lane ty x y (be_lane_idx ty idx) (zero_reg))) ;; Insert vector lane from floating-point register. @@ -1980,7 +1922,7 @@ ;; Extract vector lane to general-purpose register. (rule 1 (lower (has_type out_ty (extractlane x @ (value_type ty) (u8_from_uimm8 idx)))) - (if (ty_int_bool_ref_scalar_64 out_ty)) + (if (ty_int_ref_scalar_64 out_ty)) (vec_extract_lane ty x (be_lane_idx ty idx) (zero_reg))) ;; Extract vector lane to floating-point register. @@ -2037,7 +1979,7 @@ ;; Load replicated value from general-purpose register. (rule 1 (lower (has_type ty (splat x @ (value_type in_ty)))) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 in_ty)) (vec_replicate_lane ty (vec_insert_lane_undef ty x 0 (zero_reg)) 0)) ;; Load replicated value from floating-point register. @@ -2097,7 +2039,7 @@ ;; Load scalar value from general-purpose register. (rule 1 (lower (has_type ty (scalar_to_vector x @ (value_type in_ty)))) - (if (ty_int_bool_ref_scalar_64 in_ty)) + (if (ty_int_ref_scalar_64 in_ty)) (vec_insert_lane ty (vec_imm ty 0) x (be_lane_idx ty 0) (zero_reg))) ;; Load scalar value from floating-point register. @@ -3783,14 +3725,14 @@ ;;;; Rules for `is_null` and `is_invalid` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Null references are represented by the constant value 0. -(rule (lower (has_type $B1 (is_null x @ (value_type $R64)))) - (lower_bool $B1 (bool (icmps_simm16 $I64 x 0) +(rule (lower (has_type $I8 (is_null x @ (value_type $R64)))) + (lower_bool $I8 (bool (icmps_simm16 $I64 x 0) (intcc_as_cond (IntCC.Equal))))) ;; Invalid references are represented by the constant value -1. -(rule (lower (has_type $B1 (is_invalid x @ (value_type $R64)))) - (lower_bool $B1 (bool (icmps_simm16 $I64 x -1) +(rule (lower (has_type $I8 (is_invalid x @ (value_type $R64)))) + (lower_bool $I8 (bool (icmps_simm16 $I64 x -1) (intcc_as_cond (IntCC.Equal))))) @@ -3798,10 +3740,9 @@ ;; Return a `ProducesBool` to capture the fact that the input value is nonzero. ;; In the common case where that input is the result of an `icmp` or `fcmp` -;; instruction (possibly via an intermediate `bint`), directly use that compare. -;; Note that it is not safe to sink memory loads here, see the `icmp` comment. +;; instruction, directly use that compare. Note that it is not safe to sink +;; memory loads here, see the `icmp` comment. (decl value_nonzero (Value) ProducesBool) -(rule (value_nonzero (bint val)) (value_nonzero val)) (rule (value_nonzero (icmp int_cc x y)) (icmp_val $false int_cc x y)) (rule (value_nonzero (fcmp float_cc x y)) (fcmp_val float_cc x y)) (rule -1 (value_nonzero val @ (value_type (gpr32_ty ty))) diff --git a/cranelift/codegen/src/isa/s390x/lower.rs b/cranelift/codegen/src/isa/s390x/lower.rs index a78a099aabb4..6fa2509ebb8d 100644 --- a/cranelift/codegen/src/isa/s390x/lower.rs +++ b/cranelift/codegen/src/isa/s390x/lower.rs @@ -45,7 +45,6 @@ impl LowerBackend for S390xBackend { Opcode::Nop | Opcode::Copy | Opcode::Iconst - | Opcode::Bconst | Opcode::F32const | Opcode::F64const | Opcode::Vconst @@ -100,10 +99,7 @@ impl LowerBackend for S390xBackend { | Opcode::BxorNot | Opcode::Bitselect | Opcode::Vselect - | Opcode::Breduce - | Opcode::Bextend | Opcode::Bmask - | Opcode::Bint | Opcode::Bitrev | Opcode::Clz | Opcode::Cls diff --git a/cranelift/codegen/src/isa/s390x/lower/isle.rs b/cranelift/codegen/src/isa/s390x/lower/isle.rs index b41ae258f0a1..58f8bdba3e82 100644 --- a/cranelift/codegen/src/isa/s390x/lower/isle.rs +++ b/cranelift/codegen/src/isa/s390x/lower/isle.rs @@ -252,7 +252,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> #[inline] fn gpr32_ty(&mut self, ty: Type) -> Option { match ty { - I8 | I16 | I32 | B1 | B8 | B16 | B32 => Some(ty), + I8 | I16 | I32 => Some(ty), _ => None, } } @@ -260,7 +260,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> #[inline] fn gpr64_ty(&mut self, ty: Type) -> Option { match ty { - I64 | B64 | R64 => Some(ty), + I64 | R64 => Some(ty), _ => None, } } @@ -268,7 +268,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> #[inline] fn vr128_ty(&mut self, ty: Type) -> Option { match ty { - I128 | B128 => Some(ty), + I128 => Some(ty), _ if ty.is_vector() && ty.bits() == 128 => Some(ty), _ => None, } diff --git a/cranelift/codegen/src/isa/x64/abi.rs b/cranelift/codegen/src/isa/x64/abi.rs index 7911be775dbb..5aaea546d126 100644 --- a/cranelift/codegen/src/isa/x64/abi.rs +++ b/cranelift/codegen/src/isa/x64/abi.rs @@ -260,13 +260,7 @@ impl ABIMachineSpec for X64ABIMachineSpec { // For integer-typed values, we always load a full 64 bits (and we always spill a full 64 // bits as well -- see `Inst::store()`). let ty = match ty { - types::B1 - | types::B8 - | types::I8 - | types::B16 - | types::I16 - | types::B32 - | types::I32 => types::I64, + types::I8 | types::I16 | types::I32 => types::I64, _ => ty, }; Inst::load(ty, mem, into_reg, ExtKind::None) diff --git a/cranelift/codegen/src/isa/x64/inst/mod.rs b/cranelift/codegen/src/isa/x64/inst/mod.rs index ae92b7307ef2..393b3779a839 100644 --- a/cranelift/codegen/src/isa/x64/inst/mod.rs +++ b/cranelift/codegen/src/isa/x64/inst/mod.rs @@ -2217,17 +2217,11 @@ impl MachInst for Inst { types::I16 => Ok((&[RegClass::Int], &[types::I16])), types::I32 => Ok((&[RegClass::Int], &[types::I32])), types::I64 => Ok((&[RegClass::Int], &[types::I64])), - types::B1 => Ok((&[RegClass::Int], &[types::B1])), - types::B8 => Ok((&[RegClass::Int], &[types::B8])), - types::B16 => Ok((&[RegClass::Int], &[types::B16])), - types::B32 => Ok((&[RegClass::Int], &[types::B32])), - types::B64 => Ok((&[RegClass::Int], &[types::B64])), types::R32 => panic!("32-bit reftype pointer should never be seen on x86-64"), types::R64 => Ok((&[RegClass::Int], &[types::R64])), types::F32 => Ok((&[RegClass::Float], &[types::F32])), types::F64 => Ok((&[RegClass::Float], &[types::F64])), types::I128 => Ok((&[RegClass::Int, RegClass::Int], &[types::I64, types::I64])), - types::B128 => Ok((&[RegClass::Int, RegClass::Int], &[types::B64, types::B64])), _ if ty.is_vector() => { assert!(ty.bits() <= 128); Ok((&[RegClass::Float], &[types::I8X16])) @@ -2326,15 +2320,10 @@ impl MachInst for Inst { } else { // Must be an integer type. debug_assert!( - ty == types::B1 - || ty == types::I8 - || ty == types::B8 + ty == types::I8 || ty == types::I16 - || ty == types::B16 || ty == types::I32 - || ty == types::B32 || ty == types::I64 - || ty == types::B64 || ty == types::R32 || ty == types::R64 ); diff --git a/cranelift/codegen/src/isa/x64/lower.isle b/cranelift/codegen/src/isa/x64/lower.isle index c03239b8de74..0873c0f20c6d 100644 --- a/cranelift/codegen/src/isa/x64/lower.isle +++ b/cranelift/codegen/src/isa/x64/lower.isle @@ -22,30 +22,6 @@ (value_regs (imm $I64 x) (imm $I64 0))) -;;;; Rules for `bconst` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; `b64` and smaller. - -(rule (lower (has_type (fits_in_64 ty) - (bconst $false))) - (imm ty 0)) - -(rule (lower (has_type (fits_in_64 ty) - (bconst $true))) - (imm ty 1)) - -;; `b128` - -(rule 1 (lower (has_type $B128 - (bconst $false))) - (value_regs (imm $B64 0) - (imm $B64 0))) - -(rule 1 (lower (has_type $B128 - (bconst $true))) - (value_regs (imm $B64 1) - (imm $B64 0))) - ;;;; Rules for `f32const` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (f32const (u64_from_ieee32 x))) @@ -303,7 +279,7 @@ (band x y))) (sse_and ty x y)) -;; `{i,b}128`. +;; `i128`. (rule 6 (lower (has_type $I128 (band x y))) (let ((x_regs ValueRegs x) @@ -315,17 +291,6 @@ (value_gprs (x64_and $I64 x_lo y_lo) (x64_and $I64 x_hi y_hi)))) -(rule 6 (lower (has_type $B128 (band x y))) - ;; Booleans are always `0` or `1`, so we only need to do the `and` on the - ;; low half. The high half is always zero but, rather than generate a new - ;; zero, we just reuse `x`'s high half which is already zero. - (let ((x_regs ValueRegs x) - (x_lo Gpr (value_regs_get_gpr x_regs 0)) - (x_hi Gpr (value_regs_get_gpr x_regs 1)) - (y_lo Gpr (lo_gpr y))) - (value_gprs (x64_and $I64 x_lo y_lo) - x_hi))) - ;;;; Rules for `bor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; `{i,b}64` and smaller. @@ -381,17 +346,6 @@ (rule 6 (lower (has_type $I128 (bor x y))) (or_i128 x y)) -(rule 6 (lower (has_type $B128 (bor x y))) - ;; Booleans are always `0` or `1`, so we only need to do the `or` on the - ;; low half. The high half is always zero but, rather than generate a new - ;; zero, we just reuse `x`'s high half which is already zero. - (let ((x_regs ValueRegs x) - (x_lo Gpr (value_regs_get_gpr x_regs 0)) - (x_hi Gpr (value_regs_get_gpr x_regs 1)) - (y_lo Gpr (lo_gpr y))) - (value_gprs (x64_or $I64 x_lo y_lo) - x_hi))) - ;;;; Rules for `bxor` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; `{i,b}64` and smaller. @@ -439,17 +393,6 @@ (value_gprs (x64_xor $I64 x_lo y_lo) (x64_xor $I64 x_hi y_hi)))) -(rule 6 (lower (has_type $B128 (bxor x y))) - ;; Booleans are always `0` or `1`, so we only need to do the `xor` on the - ;; low half. The high half is always zero but, rather than generate a new - ;; zero, we just reuse `x`'s high half which is already zero. - (let ((x_regs ValueRegs x) - (x_lo Gpr (value_regs_get_gpr x_regs 0)) - (x_hi Gpr (value_regs_get_gpr x_regs 1)) - (y_lo Gpr (lo_gpr y))) - (value_gprs (x64_xor $I64 x_lo y_lo) - x_hi))) - ;;;; Rules for `ishl` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; `i64` and smaller. @@ -1240,9 +1183,6 @@ (rule (lower (has_type $I128 (bnot x))) (i128_not x)) -(rule (lower (has_type $B128 (bnot x))) - (i128_not x)) - ;; Special case for vector-types where bit-negation is an xor against an ;; all-one value (rule -1 (lower (has_type ty @ (multi_lane _bits _lanes) (bnot x))) @@ -1450,35 +1390,35 @@ (lower_icmp_bool (emit_cmp cc a b))) ;; Peephole optimization for `x < 0`, when x is a signed 64 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedLessThan) x @ (value_type $I64) (u64_from_iconst 0)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedLessThan) x @ (value_type $I64) (u64_from_iconst 0)))) (x64_shr $I64 x (Imm8Reg.Imm8 63))) ;; Peephole optimization for `0 > x`, when x is a signed 64 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedGreaterThan) (u64_from_iconst 0) x @ (value_type $I64)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedGreaterThan) (u64_from_iconst 0) x @ (value_type $I64)))) (x64_shr $I64 x (Imm8Reg.Imm8 63))) ;; Peephole optimization for `0 <= x`, when x is a signed 64 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedLessThanOrEqual) (u64_from_iconst 0) x @ (value_type $I64)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedLessThanOrEqual) (u64_from_iconst 0) x @ (value_type $I64)))) (x64_shr $I64 (x64_not $I64 x) (Imm8Reg.Imm8 63))) ;; Peephole optimization for `x >= 0`, when x is a signed 64 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedGreaterThanOrEqual) x @ (value_type $I64) (u64_from_iconst 0)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedGreaterThanOrEqual) x @ (value_type $I64) (u64_from_iconst 0)))) (x64_shr $I64 (x64_not $I64 x) (Imm8Reg.Imm8 63))) ;; Peephole optimization for `x < 0`, when x is a signed 32 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedLessThan) x @ (value_type $I32) (u64_from_iconst 0)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedLessThan) x @ (value_type $I32) (u64_from_iconst 0)))) (x64_shr $I32 x (Imm8Reg.Imm8 31))) ;; Peephole optimization for `0 > x`, when x is a signed 32 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedGreaterThan) (u64_from_iconst 0) x @ (value_type $I32)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedGreaterThan) (u64_from_iconst 0) x @ (value_type $I32)))) (x64_shr $I32 x (Imm8Reg.Imm8 31))) ;; Peephole optimization for `0 <= x`, when x is a signed 32 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedLessThanOrEqual) (u64_from_iconst 0) x @ (value_type $I32)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedLessThanOrEqual) (u64_from_iconst 0) x @ (value_type $I32)))) (x64_shr $I32 (x64_not $I64 x) (Imm8Reg.Imm8 31))) ;; Peephole optimization for `x >= 0`, when x is a signed 32 bit value -(rule 2 (lower (has_type $B1 (icmp (IntCC.SignedGreaterThanOrEqual) x @ (value_type $I32) (u64_from_iconst 0)))) +(rule 2 (lower (has_type $I8 (icmp (IntCC.SignedGreaterThanOrEqual) x @ (value_type $I32) (u64_from_iconst 0)))) (x64_shr $I32 (x64_not $I64 x) (Imm8Reg.Imm8 31))) ;; For XMM-held values, we lower to `PCMP*` instructions, sometimes more than @@ -1710,8 +1650,8 @@ ;; Finally, we lower `select` from a condition value `c`. These rules are meant ;; to be the final, default lowerings if no other patterns matched above. -(rule -1 (lower (has_type ty (select c @ (value_type $B1) x y))) - (let ((size OperandSize (raw_operand_size_of_type $B1)) +(rule -1 (lower (has_type ty (select c @ (value_type $I8) x y))) + (let ((size OperandSize (raw_operand_size_of_type $I8)) ;; N.B.: disallow load-op fusion, see above. TODO: ;; https://github.com/bytecodealliance/wasmtime/issues/3953. (gpr_c Gpr (put_in_gpr c))) @@ -2125,7 +2065,7 @@ (uextend src @ (has_type $I32 (uload32 _ _ _))))) src) -;; Rules for `sextend` / `bextend` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Rules for `sextend` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (decl generic_sextend (Value Type Type) InstOutput) @@ -2140,17 +2080,17 @@ (x64_sar $I64 src (Imm8Reg.Imm8 63))) ;; I64 -> I128. -(rule 3 (generic_sextend src (ty_int_bool_64 _) (ty_int_bool_128 _)) +(rule 3 (generic_sextend src $I64 $I128) (value_regs src (spread_sign_bit src))) ;; I{8,16,32} -> I128. -(rule 2 (generic_sextend src (fits_in_32 src_ty) (ty_int_bool_128 _)) +(rule 2 (generic_sextend src (fits_in_32 src_ty) $I128) (let ((lo Gpr (extend_to_gpr src $I64 (ExtendKind.Sign))) (hi Gpr (spread_sign_bit lo))) (value_regs lo hi))) ;; I{8,16,32} -> I64. -(rule 1 (generic_sextend src (fits_in_32 src_ty) (ty_int_bool_64 _)) +(rule 1 (generic_sextend src (fits_in_32 src_ty) $I64) (extend_to_gpr src $I64 (ExtendKind.Sign))) ;; I8 -> I{16,32}, I16 -> I32. @@ -2162,13 +2102,7 @@ (sextend src @ (value_type src_ty)))) (generic_sextend src src_ty dst_ty)) -;; Bools are stored as 0/-1 so extends must sign-extend as well. -(rule (lower - (has_type dst_ty - (bextend src @ (value_type src_ty)))) - (generic_sextend src src_ty dst_ty)) - -;; Rules for `ireduce` / `breduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Rules for `ireduce` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; T -> T is always a no-op, even I128 -> I128. (rule (lower (has_type ty (ireduce src @ (value_type ty)))) @@ -2180,28 +2114,6 @@ (rule 1 (lower (has_type (fits_in_64 ty) (ireduce src))) (value_regs_get_gpr src 0)) -;; Likewise for breduce. - -(rule (lower (has_type ty (breduce src @ (value_type ty)))) - src) - -(rule 1 (lower (has_type (fits_in_64 ty) (breduce src))) - (value_regs_get_gpr src 0)) - -;; Rules for `bint` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Booleans are stored as all-zeroes (0) or all-ones (-1). We AND out -;; the LSB to give a 0 / 1-valued integer result. - -(rule (lower (has_type (fits_in_64 ty) - (bint src))) - (x64_and ty src (RegMemImm.Imm 1))) -(rule 1 (lower (has_type $I128 - (bint src))) - (value_regs - (x64_and $I64 src (RegMemImm.Imm 1)) - (imm $I64 0))) - ;; Rules for `debugtrap` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (rule (lower (debugtrap)) @@ -2505,7 +2417,7 @@ (x64_movzx (ext_mode (ty_bits_u16 ty) 64) (to_amode flags address offset))) ;; But if we know that both the `from` and `to` are 64 bits, we simply load with ;; no extension. -(rule -1 (lower (has_type (ty_int_bool_ref_64 ty) (load flags address offset))) +(rule -1 (lower (has_type (ty_int_ref_64 ty) (load flags address offset))) (x64_mov (to_amode flags address offset))) ;; Also, certain scalar loads have a specific `from` width and extension kind ;; (signed -> `sx`, zeroed -> `zx`). We overwrite the high bits of the 64-bit @@ -2538,8 +2450,8 @@ (rule -2 (lower (has_type (ty_vec128 ty) (load flags address offset))) (x64_movdqu (to_amode flags address offset))) -;; We can load an I128/B128 by doing two 64-bit loads. -(rule -3 (lower (has_type (ty_int_bool_128 _) +;; We can load an I128 by doing two 64-bit loads. +(rule -3 (lower (has_type $I128 (load flags address offset))) (let ((addr_lo Amode (to_amode flags address offset)) (addr_hi Amode (amode_offset addr_lo 8)) @@ -2623,9 +2535,9 @@ (side_effect (x64_xmm_movrm (SseOpcode.Movdqu) (to_amode flags address offset) value))) -;; Stores of I128/B128 values: store the two 64-bit halves separately. +;; Stores of I128 values: store the two 64-bit halves separately. (rule 0 (lower (store flags - value @ (value_type (ty_int_bool_128 _)) + value @ (value_type $I128) address offset)) (let ((value_reg ValueRegs value) @@ -2918,7 +2830,7 @@ (decl cmp_zero_int_bool_ref (Value) ProducesFlags) -(rule 1 (cmp_zero_int_bool_ref val @ (value_type $B1)) +(rule 1 (cmp_zero_int_bool_ref val @ (value_type $I8)) (x64_test (OperandSize.Size8) (RegMemImm.Imm 1) val)) (rule (cmp_zero_int_bool_ref val @ (value_type ty)) (let ((size OperandSize (raw_operand_size_of_type ty)) diff --git a/cranelift/codegen/src/isa/x64/lower.rs b/cranelift/codegen/src/isa/x64/lower.rs index 836b2c005663..da8c7ca62fc1 100644 --- a/cranelift/codegen/src/isa/x64/lower.rs +++ b/cranelift/codegen/src/isa/x64/lower.rs @@ -22,7 +22,6 @@ use target_lexicon::Triple; fn is_int_or_ref_ty(ty: Type) -> bool { match ty { types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true, - types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true, types::R32 => panic!("shouldn't have 32-bits refs on x64"), _ => false, } @@ -328,7 +327,6 @@ fn lower_insn_to_regs( let op = ctx.data(insn).opcode(); match op { Opcode::Iconst - | Opcode::Bconst | Opcode::F32const | Opcode::F64const | Opcode::Null @@ -369,10 +367,7 @@ fn lower_insn_to_regs( | Opcode::IsInvalid | Opcode::Uextend | Opcode::Sextend - | Opcode::Breduce - | Opcode::Bextend | Opcode::Ireduce - | Opcode::Bint | Opcode::Debugtrap | Opcode::WideningPairwiseDotProductS | Opcode::Fadd diff --git a/cranelift/codegen/src/isa/x64/lower/isle.rs b/cranelift/codegen/src/isa/x64/lower/isle.rs index 2e624b039c2c..b7e59de895da 100644 --- a/cranelift/codegen/src/isa/x64/lower/isle.rs +++ b/cranelift/codegen/src/isa/x64/lower/isle.rs @@ -549,7 +549,7 @@ impl Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> { #[inline] fn type_register_class(&mut self, ty: Type) -> Option { - if is_int_or_ref_ty(ty) || ty == I128 || ty == B128 { + if is_int_or_ref_ty(ty) || ty == I128 { Some(RegisterClass::Gpr { single_register: ty != I128, }) @@ -564,7 +564,6 @@ impl Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> { fn ty_int_bool_or_ref(&mut self, ty: Type) -> Option<()> { match ty { types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => Some(()), - types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => Some(()), types::R32 => panic!("shouldn't have 32-bits refs on x64"), _ => None, } diff --git a/cranelift/codegen/src/isle_prelude.rs b/cranelift/codegen/src/isle_prelude.rs index a9e5ba097c13..f5d6ee1b2907 100644 --- a/cranelift/codegen/src/isle_prelude.rs +++ b/cranelift/codegen/src/isle_prelude.rs @@ -171,7 +171,7 @@ macro_rules! isle_common_prelude_methods { } #[inline] - fn ty_int_bool_ref_scalar_64(&mut self, ty: Type) -> Option { + fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option { if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() { Some(ty) } else { @@ -216,33 +216,17 @@ macro_rules! isle_common_prelude_methods { } #[inline] - fn int_bool_fits_in_32(&mut self, ty: Type) -> Option { + fn int_fits_in_32(&mut self, ty: Type) -> Option { match ty { - I8 | I16 | I32 | B8 | B16 | B32 => Some(ty), + I8 | I16 | I32 => Some(ty), _ => None, } } #[inline] - fn ty_int_bool_64(&mut self, ty: Type) -> Option { + fn ty_int_ref_64(&mut self, ty: Type) -> Option { match ty { - I64 | B64 => Some(ty), - _ => None, - } - } - - #[inline] - fn ty_int_bool_ref_64(&mut self, ty: Type) -> Option { - match ty { - I64 | B64 | R64 => Some(ty), - _ => None, - } - } - - #[inline] - fn ty_int_bool_128(&mut self, ty: Type) -> Option { - match ty { - I128 | B128 => Some(ty), + I64 | R64 => Some(ty), _ => None, } } @@ -252,15 +236,6 @@ macro_rules! isle_common_prelude_methods { ty.is_int().then(|| ty) } - #[inline] - fn ty_int_bool(&mut self, ty: Type) -> Option { - if ty.is_int() || ty.is_bool() { - Some(ty) - } else { - None - } - } - #[inline] fn ty_scalar_float(&mut self, ty: Type) -> Option { match ty { diff --git a/cranelift/codegen/src/machinst/helpers.rs b/cranelift/codegen/src/machinst/helpers.rs index 8d4e4e23f07a..ff5c1af0c6f6 100644 --- a/cranelift/codegen/src/machinst/helpers.rs +++ b/cranelift/codegen/src/machinst/helpers.rs @@ -12,7 +12,7 @@ pub fn ty_bits(ty: Type) -> usize { /// Is the type represented by an integer (not float) at the machine level? pub(crate) fn ty_has_int_representation(ty: Type) -> bool { - ty.is_int() || ty.is_bool() || ty.is_ref() + ty.is_int() || ty.is_ref() } /// Is the type represented by a float or vector value at the machine level? diff --git a/cranelift/codegen/src/opts/algebraic.isle b/cranelift/codegen/src/opts/algebraic.isle index 5611f40e0019..5a76941bd2bd 100644 --- a/cranelift/codegen/src/opts/algebraic.isle +++ b/cranelift/codegen/src/opts/algebraic.isle @@ -170,10 +170,6 @@ (if (u8_lt lz lx)) (iadd ty (iadd ty y z) x)) -;; Select's selector input doesn't need bint; remove the redundant op. -(rule (simplify (select ty (bint _ b) x y)) - (subsume (select ty b x y))) - ;; Rematerialize ALU-op-with-imm and iconsts in each block where they're ;; used. This is neutral (add-with-imm) or positive (iconst) for ;; register pressure, and these ops are very cheap. diff --git a/cranelift/codegen/src/prelude.isle b/cranelift/codegen/src/prelude.isle index c2029c8f6837..c469f9fbde16 100644 --- a/cranelift/codegen/src/prelude.isle +++ b/cranelift/codegen/src/prelude.isle @@ -12,7 +12,7 @@ (decl unit () Unit) (extern constructor unit unit) -;; `bool` is declared in `clif.isle`. +(type bool (primitive bool)) (extern const $true bool) (extern const $false bool) @@ -139,13 +139,6 @@ ;;;; `cranelift_codegen::ir::Type` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(extern const $B1 Type) -(extern const $B8 Type) -(extern const $B16 Type) -(extern const $B32 Type) -(extern const $B64 Type) -(extern const $B128 Type) - (extern const $I8 Type) (extern const $I16 Type) (extern const $I32 Type) @@ -158,11 +151,6 @@ (extern const $F32 Type) (extern const $F64 Type) -(extern const $B8X16 Type) -(extern const $B16X8 Type) -(extern const $B32X4 Type) -(extern const $B64X2 Type) - (extern const $I8X8 Type) (extern const $I8X16 Type) (extern const $I16X4 Type) @@ -232,10 +220,10 @@ (decl ty_64 (Type) Type) (extern extractor ty_64 ty_64) -;; A pure constructor that only matches scalar booleans, integers, and -;; references that can fit in 64 bits. -(decl pure ty_int_bool_ref_scalar_64 (Type) Type) -(extern constructor ty_int_bool_ref_scalar_64 ty_int_bool_ref_scalar_64) +;; A pure constructor that only matches scalar integers, and references that can +;; fit in 64 bits. +(decl pure ty_int_ref_scalar_64 (Type) Type) +(extern constructor ty_int_ref_scalar_64 ty_int_ref_scalar_64) ;; An extractor that matches 32- and 64-bit types only. (decl ty_32_or_64 (Type) Type) @@ -245,25 +233,13 @@ (decl ty_8_or_16 (Type) Type) (extern extractor ty_8_or_16 ty_8_or_16) -;; An extractor that matches int and bool types that fit in 32 bits. -(decl int_bool_fits_in_32 (Type) Type) -(extern extractor int_bool_fits_in_32 int_bool_fits_in_32) - -;; An extractor that matches I64 or B64. -(decl ty_int_bool_64 (Type) Type) -(extern extractor ty_int_bool_64 ty_int_bool_64) - -;; An extractor that matches I64 or B64 or R64. -(decl ty_int_bool_ref_64 (Type) Type) -(extern extractor ty_int_bool_ref_64 ty_int_bool_ref_64) - -;; An extractor that matches I128 or B128. -(decl ty_int_bool_128 (Type) Type) -(extern extractor ty_int_bool_128 ty_int_bool_128) +;; An extractor that matches int types that fit in 32 bits. +(decl int_fits_in_32 (Type) Type) +(extern extractor int_fits_in_32 int_fits_in_32) -;; An extractor that matches any int or bool. -(decl ty_int_bool (Type) Type) -(extern extractor ty_int_bool ty_int_bool) +;; An extractor that matches I64 or R64. +(decl ty_int_ref_64 (Type) Type) +(extern extractor ty_int_ref_64 ty_int_ref_64) ;; An extractor that only matches integers. (decl ty_int (Type) Type) diff --git a/cranelift/codegen/src/simple_preopt.rs b/cranelift/codegen/src/simple_preopt.rs index 80f2937a76ea..850c60f274b3 100644 --- a/cranelift/codegen/src/simple_preopt.rs +++ b/cranelift/codegen/src/simple_preopt.rs @@ -614,7 +614,7 @@ mod simplify { dfg::ValueDef, immediates, instructions::{Opcode, ValueList}, - types::{B8, I16, I32, I8}, + types::{I16, I32, I8}, }; use std::marker::PhantomData; @@ -861,29 +861,6 @@ mod simplify { } } - InstructionData::CondTrap { .. } - | InstructionData::Branch { .. } - | InstructionData::Ternary { - opcode: Opcode::Select, - .. - } => { - // Fold away a redundant `bint`. - let condition_def = { - let args = pos.func.dfg.inst_args(inst); - pos.func.dfg.value_def(args[0]) - }; - if let ValueDef::Result(def_inst, _) = condition_def { - if let InstructionData::Unary { - opcode: Opcode::Bint, - arg: bool_val, - } = pos.func.dfg[def_inst] - { - let args = pos.func.dfg.inst_args_mut(inst); - args[0] = bool_val; - } - } - } - InstructionData::Ternary { opcode: Opcode::Bitselect, args, @@ -898,15 +875,13 @@ mod simplify { // while vselect can be encoded using single BLEND instruction. if let ValueDef::Result(def_inst, _) = pos.func.dfg.value_def(args[0]) { let (cond_val, cond_type) = match pos.func.dfg[def_inst] { - InstructionData::Unary { - opcode: Opcode::RawBitcast, - arg, - } => { - // If controlling mask is raw-bitcasted boolean vector then - // we know each lane is either all zeroes or ones, - // so we can use vselect instruction instead. + InstructionData::IntCompare { .. } + | InstructionData::FloatCompare { .. } => { + // If the controlled mask is from a comparison, the value will be all + // zeros or ones in each output lane. + let arg = args[0]; let arg_type = pos.func.dfg.value_type(arg); - if !arg_type.is_vector() || !arg_type.lane_type().is_bool() { + if !arg_type.is_vector() { return; } (arg, arg_type) @@ -916,13 +891,13 @@ mod simplify { constant_handle, } => { // If each byte of controlling mask is 0x00 or 0xFF then - // we will always bitcast our way to vselect(B8x16, I8x16, I8x16). + // we will always bitcast our way to vselect(I8x16, I8x16). // Bitselect operates at bit level, so the lane types don't matter. let const_data = pos.func.dfg.constants.get(constant_handle); if !const_data.iter().all(|&b| b == 0 || b == 0xFF) { return; } - let new_type = B8.by(old_cond_type.bytes()).unwrap(); + let new_type = I8.by(old_cond_type.bytes()).unwrap(); (pos.ins().raw_bitcast(new_type, args[0]), new_type) } _ => return, diff --git a/cranelift/codegen/src/souper_harvest.rs b/cranelift/codegen/src/souper_harvest.rs index c037f03955be..b036ebcea22b 100644 --- a/cranelift/codegen/src/souper_harvest.rs +++ b/cranelift/codegen/src/souper_harvest.rs @@ -150,11 +150,11 @@ fn harvest_candidate_lhs( a.into() } else { // The only arguments we get that we haven't already - // converted into a souper instruction are `iconst`s and - // `bconst`s. This is because souper only allows + // converted into a souper instruction are `iconst`s. + // This is because souper only allows // constants as operands, and it doesn't allow assigning // constants to a variable name. So we lazily convert - // `iconst`s and `bconst`s into souper operands here, + // `iconst`s into souper operands here, // when they are actually used. match func.dfg.value_def(arg) { ir::ValueDef::Result(inst, 0) => match func.dfg[inst] { @@ -166,20 +166,13 @@ fn harvest_candidate_lhs( r#type: souper_type_of(&func.dfg, arg), }) } - ir::InstructionData::UnaryBool { opcode, imm } => { - debug_assert_eq!(opcode, ir::Opcode::Iconst); - ast::Operand::Constant(ast::Constant { - value: imm.into(), - r#type: souper_type_of(&func.dfg, arg), - }) - } _ => unreachable!( - "only iconst and bconst instructions \ + "only iconst instructions \ aren't in `ir_to_souper_val`" ), }, _ => unreachable!( - "only iconst and bconst instructions \ + "only iconst instructions \ aren't in `ir_to_souper_val`" ), } @@ -487,11 +480,11 @@ fn harvest_candidate_lhs( } // Because Souper doesn't allow constants to be on the right // hand side of an assignment (i.e. `%0:i32 = 1234` is - // disallowed) we have to ignore `iconst` and `bconst` + // disallowed) we have to ignore `iconst` // instructions until we process them as operands for some // other instruction. See the `arg` closure above for // details. - (ir::Opcode::Iconst, _) | (ir::Opcode::Bconst, _) => return, + (ir::Opcode::Iconst, _) => return, _ => ast::AssignmentRhs::Var, } } @@ -533,7 +526,7 @@ fn harvest_candidate_lhs( fn souper_type_of(dfg: &ir::DataFlowGraph, val: ir::Value) -> Option { let ty = dfg.value_type(val); - assert!(ty.is_int() || ty.is_bool()); + assert!(ty.is_int()); assert_eq!(ty.lane_count(), 1); Some(ast::Type { width: ty.bits().try_into().unwrap(), diff --git a/cranelift/codegen/src/verifier/mod.rs b/cranelift/codegen/src/verifier/mod.rs index 7f7cd88f123f..91dfa3ec60cd 100644 --- a/cranelift/codegen/src/verifier/mod.rs +++ b/cranelift/codegen/src/verifier/mod.rs @@ -768,7 +768,6 @@ impl<'a> Verifier<'a> { | UnaryImm { .. } | UnaryIeee32 { .. } | UnaryIeee64 { .. } - | UnaryBool { .. } | Binary { .. } | BinaryImm8 { .. } | BinaryImm64 { .. } @@ -1514,7 +1513,7 @@ impl<'a> Verifier<'a> { ir::InstructionData::Unary { opcode, arg } => { let arg_type = self.func.dfg.value_type(arg); match opcode { - Opcode::Bextend | Opcode::Uextend | Opcode::Sextend | Opcode::Fpromote => { + Opcode::Uextend | Opcode::Sextend | Opcode::Fpromote => { if arg_type.lane_count() != ctrl_type.lane_count() { return errors.nonfatal(( inst, @@ -1536,7 +1535,7 @@ impl<'a> Verifier<'a> { )); } } - Opcode::Breduce | Opcode::Ireduce | Opcode::Fdemote => { + Opcode::Ireduce | Opcode::Fdemote => { if arg_type.lane_count() != ctrl_type.lane_count() { return errors.nonfatal(( inst, diff --git a/cranelift/codegen/src/write.rs b/cranelift/codegen/src/write.rs index 08a723c16fc6..1e54805c5622 100644 --- a/cranelift/codegen/src/write.rs +++ b/cranelift/codegen/src/write.rs @@ -393,7 +393,6 @@ pub fn write_operands(w: &mut dyn Write, dfg: &DataFlowGraph, inst: Inst) -> fmt UnaryImm { imm, .. } => write!(w, " {}", imm), UnaryIeee32 { imm, .. } => write!(w, " {}", imm), UnaryIeee64 { imm, .. } => write!(w, " {}", imm), - UnaryBool { imm, .. } => write!(w, " {}", imm), UnaryGlobalValue { global_value, .. } => write!(w, " {}", global_value), UnaryConst { constant_handle, .. @@ -539,7 +538,6 @@ pub fn write_operands(w: &mut dyn Write, dfg: &DataFlowGraph, inst: Inst) -> fmt UnaryImm { imm, .. } => imm.to_string(), UnaryIeee32 { imm, .. } => imm.to_string(), UnaryIeee64 { imm, .. } => imm.to_string(), - UnaryBool { imm, .. } => imm.to_string(), UnaryConst { constant_handle, .. } => constant_handle.to_string(), diff --git a/cranelift/docs/ir.md b/cranelift/docs/ir.md index 4f52ee30f614..66d787295a94 100644 --- a/cranelift/docs/ir.md +++ b/cranelift/docs/ir.md @@ -138,25 +138,6 @@ All SSA values have a type which determines the size and shape (for SIMD vectors) of the value. Many instructions are polymorphic -- they can operate on different types. -### Boolean types - -Boolean values are either true or false. - -The `b1` type represents an abstract boolean value. It can only exist as -an SSA value, and can't be directly stored in memory. It can, however, be -converted into an integer with value 0 or 1 by the `bint` instruction (and -converted back with `icmp_imm` with 0). - -Several larger boolean types are also defined, primarily to be used as SIMD -element types. They can be stored in memory, and are represented as either all -zero bits or all one bits. - -- b1 -- b8 -- b16 -- b32 -- b64 - ### Integer types Integer values have a fixed size and can be interpreted as either signed or @@ -219,8 +200,8 @@ instructions either. The verifier enforces these rules. ### SIMD vector types A SIMD vector type represents a vector of values from one of the scalar types -(boolean, integer, and floating point). Each scalar value in a SIMD type is -called a *lane*. The number of lanes must be a power of two in the range 2-256. +(integer, and floating point). Each scalar value in a SIMD type is called a +*lane*. The number of lanes must be a power of two in the range 2-256. i%Bx%N A SIMD vector of integers. The lane type `iB` is one of the integer @@ -247,14 +228,6 @@ f64x%N The size of a `f64` vector in memory is :math:`8N` bytes. -b1x%N - A boolean SIMD vector. - - Boolean vectors are used when comparing SIMD vectors. For example, - comparing two `i32x4` values would produce a `b1x4` result. - - Like the `b1` type, a boolean vector cannot be stored in memory. - ### Pseudo-types and type classes These are not concrete types, but convenient names used to refer to real types @@ -314,12 +287,6 @@ ieee64 A 64-bit immediate floating point number in the IEEE 754-2008 binary64 interchange format. All bit patterns are allowed. -bool - A boolean immediate value, either false or true. - - In the textual format, `bool` immediates appear as 'false' - and 'true'. - intcc An integer condition code. See the `icmp` instruction for details. @@ -790,10 +757,9 @@ an instruction is required to load a constant into an SSA value: `iconst`, ### Bitwise operations -The bitwise operations and operate on any value type: Integers, floating point -numbers, and booleans. When operating on integer or floating point types, the -bitwise operations are working on the binary representation of the values. When -operating on boolean values, the bitwise operations work as logical operators. +The bitwise operations and operate on any value type: Integers, and floating +point numbers. When operating on integer or floating point types, the bitwise +operations are working on the binary representation of the values. The shift and rotate operations only work on integer types (scalar and vector). The shift amount does not have to be the same type as the value being shifted. diff --git a/cranelift/filetests/src/function_runner.rs b/cranelift/filetests/src/function_runner.rs index 248a8e8b8eca..51b9dbab0d89 100644 --- a/cranelift/filetests/src/function_runner.rs +++ b/cranelift/filetests/src/function_runner.rs @@ -3,8 +3,7 @@ use anyhow::{anyhow, Result}; use core::mem; use cranelift_codegen::data_value::DataValue; use cranelift_codegen::ir::{ - condcodes::IntCC, ExternalName, Function, InstBuilder, Signature, UserExternalName, - UserFuncName, + ExternalName, Function, InstBuilder, Signature, UserExternalName, UserFuncName, }; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{ir, settings, CodegenError, Context}; @@ -357,7 +356,7 @@ impl UnboxedValues { // Store the argument values into `values_vec`. for ((arg, slot), param) in arguments.iter().zip(&mut values_vec).zip(&signature.params) { assert!( - arg.ty() == param.value_type || arg.is_vector() || arg.is_bool(), + arg.ty() == param.value_type || arg.is_vector(), "argument type mismatch: {} != {}", arg.ty(), param.value_type @@ -425,9 +424,6 @@ fn make_trampoline(name: UserFuncName, signature: &ir::Signature, isa: &dyn Targ .iter() .enumerate() .map(|(i, param)| { - // Calculate the type to load from memory, using integers for booleans (no encodings). - let ty = param.value_type.coerce_bools_to_ints(); - // We always store vector types in little-endian byte order as DataValue. let mut flags = ir::MemFlags::trusted(); if param.value_type.is_vector() { @@ -435,32 +431,12 @@ fn make_trampoline(name: UserFuncName, signature: &ir::Signature, isa: &dyn Targ } // Load the value. - let loaded = builder.ins().load( - ty, + builder.ins().load( + param.value_type, flags, values_vec_ptr_val, (i * UnboxedValues::SLOT_SIZE) as i32, - ); - - // For booleans, we want to type-convert the loaded integer into a boolean and ensure - // that we are using the architecture's canonical boolean representation (presumably - // comparison will emit this). - if param.value_type.is_bool() { - let b = builder.ins().icmp_imm(IntCC::NotEqual, loaded, 0); - - // icmp_imm always produces a `b1`, `bextend` it if we need a larger bool - if param.value_type.bits() > 1 { - builder.ins().bextend(param.value_type, b) - } else { - b - } - } else if param.value_type.is_bool_vector() { - let zero_constant = builder.func.dfg.constants.insert(vec![0; 16].into()); - let zero_vec = builder.ins().vconst(ty, zero_constant); - builder.ins().icmp(IntCC::NotEqual, loaded, zero_vec) - } else { - loaded - } + ) }) .collect::>(); @@ -473,13 +449,6 @@ fn make_trampoline(name: UserFuncName, signature: &ir::Signature, isa: &dyn Targ // Store the return values into `values_vec`. let results = builder.func.dfg.inst_results(call).to_vec(); for ((i, value), param) in results.iter().enumerate().zip(&signature.returns) { - // Before storing return values, we convert booleans to their integer representation. - let value = if param.value_type.lane_type().is_bool() { - let ty = param.value_type.lane_type().as_int(); - builder.ins().bint(ty, *value) - } else { - *value - }; // We always store vector types in little-endian byte order as DataValue. let mut flags = ir::MemFlags::trusted(); if param.value_type.is_vector() { @@ -488,7 +457,7 @@ fn make_trampoline(name: UserFuncName, signature: &ir::Signature, isa: &dyn Targ // Store the value. builder.ins().store( flags, - value, + *value, values_vec_ptr_val, (i * UnboxedValues::SLOT_SIZE) as i32, ); @@ -514,10 +483,10 @@ mod test { let code = String::from( " test run - function %test() -> b8 { + function %test() -> i8 { block0: nop - v1 = bconst.b8 true + v1 = iconst.i8 -1 return v1 }", ); @@ -535,17 +504,17 @@ mod test { let compiled = compiler.compile().unwrap(); let trampoline = compiled.get_trampoline(&function).unwrap(); let returned = trampoline.call(&[]); - assert_eq!(returned, vec![DataValue::B(true)]) + assert_eq!(returned, vec![DataValue::I8(-1)]) } #[test] fn trampolines() { let function = parse( " - function %test(f32, i8, i64x2, b1) -> f32x4, b64 { - block0(v0: f32, v1: i8, v2: i64x2, v3: b1): + function %test(f32, i8, i64x2, i8) -> f32x4, i64 { + block0(v0: f32, v1: i8, v2: i64x2, v3: i8): v4 = vconst.f32x4 [0x0.1 0x0.2 0x0.3 0x0.4] - v5 = bconst.b64 true + v5 = iconst.i64 -1 return v4, v5 }", ); @@ -556,19 +525,18 @@ mod test { &function.signature, compiler.module.isa(), ); + println!("{}", trampoline); assert!(format!("{}", trampoline).ends_with( - "sig0 = (f32, i8, i64x2, b1) -> f32x4, b64 fast + "sig0 = (f32, i8, i64x2, i8) -> f32x4, i64 fast block0(v0: i64, v1: i64): v2 = load.f32 notrap aligned v1 v3 = load.i8 notrap aligned v1+16 v4 = load.i64x2 notrap aligned little v1+32 v5 = load.i8 notrap aligned v1+48 - v6 = icmp_imm ne v5, 0 - v7, v8 = call_indirect sig0, v0(v2, v3, v4, v6) - store notrap aligned little v7, v1 - v9 = bint.i64 v8 - store notrap aligned v9, v1+16 + v6, v7 = call_indirect sig0, v0(v2, v3, v4, v5) + store notrap aligned little v6, v1 + store notrap aligned v7, v1+16 return } " diff --git a/cranelift/frontend/src/frontend.rs b/cranelift/frontend/src/frontend.rs index 5df6b77e6c91..0eb9b606b4cb 100644 --- a/cranelift/frontend/src/frontend.rs +++ b/cranelift/frontend/src/frontend.rs @@ -1012,13 +1012,13 @@ impl<'a> FunctionBuilder<'a> { use IntCC::*; let (zero_cc, empty_imm) = match int_cc { // - Equal => (Equal, true), - NotEqual => (NotEqual, false), + Equal => (Equal, 1), + NotEqual => (NotEqual, 0), - UnsignedLessThan => (SignedLessThan, false), - UnsignedGreaterThanOrEqual => (SignedGreaterThanOrEqual, true), - UnsignedGreaterThan => (SignedGreaterThan, false), - UnsignedLessThanOrEqual => (SignedLessThanOrEqual, true), + UnsignedLessThan => (SignedLessThan, 0), + UnsignedGreaterThanOrEqual => (SignedGreaterThanOrEqual, 1), + UnsignedGreaterThan => (SignedGreaterThan, 0), + UnsignedLessThanOrEqual => (SignedLessThanOrEqual, 1), SignedLessThan | SignedGreaterThanOrEqual @@ -1029,7 +1029,7 @@ impl<'a> FunctionBuilder<'a> { }; if size == 0 { - return self.ins().bconst(types::B1, empty_imm); + return self.ins().iconst(types::I8, empty_imm); } // Future work could consider expanding this to handle more-complex scenarios. @@ -1562,8 +1562,8 @@ block0: v1 -> v4 v3 = iconst.i64 0 v0 -> v3 - v2 = bconst.b1 true - return v2 ; v2 = true", + v2 = iconst.i8 1 + return v2 ; v2 = 1", |builder, target, x, y| { builder.emit_small_memory_compare( target.frontend_config(), @@ -1718,7 +1718,7 @@ block0: .expect("Should be able to create backend with default flags"); let mut sig = Signature::new(target.default_call_conv()); - sig.returns.push(AbiParam::new(B1)); + sig.returns.push(AbiParam::new(I8)); let mut fn_ctx = FunctionBuilderContext::new(); let mut func = Function::with_name_signature(UserFuncName::testcase("sample"), sig); @@ -1744,7 +1744,7 @@ block0: check( &func, - &format!("function %sample() -> b1 system_v {{{}\n}}\n", expected), + &format!("function %sample() -> i8 system_v {{{}\n}}\n", expected), ); } @@ -1752,7 +1752,7 @@ block0: fn undef_vector_vars() { let mut sig = Signature::new(CallConv::SystemV); sig.returns.push(AbiParam::new(I8X16)); - sig.returns.push(AbiParam::new(B8X16)); + sig.returns.push(AbiParam::new(I8X16)); sig.returns.push(AbiParam::new(F32X4)); let mut fn_ctx = FunctionBuilderContext::new(); @@ -1765,7 +1765,7 @@ block0: let b = Variable::new(1); let c = Variable::new(2); builder.declare_var(a, I8X16); - builder.declare_var(b, B8X16); + builder.declare_var(b, I8X16); builder.declare_var(c, F32X4); builder.switch_to_block(block0); @@ -1780,14 +1780,14 @@ block0: check( &func, - "function %sample() -> i8x16, b8x16, f32x4 system_v { + "function %sample() -> i8x16, i8x16, f32x4 system_v { const0 = 0x00000000000000000000000000000000 block0: v5 = f32const 0.0 v6 = splat.f32x4 v5 ; v5 = 0.0 v2 -> v6 - v4 = vconst.b8x16 const0 + v4 = vconst.i8x16 const0 v1 -> v4 v3 = vconst.i8x16 const0 v0 -> v3 diff --git a/cranelift/frontend/src/ssa.rs b/cranelift/frontend/src/ssa.rs index d271fb06eebd..64331a91687c 100644 --- a/cranelift/frontend/src/ssa.rs +++ b/cranelift/frontend/src/ssa.rs @@ -142,8 +142,6 @@ enum Call { fn emit_zero(ty: Type, mut cur: FuncCursor) -> Value { if ty.is_int() { cur.ins().iconst(ty, 0) - } else if ty.is_bool() { - cur.ins().bconst(ty, false) } else if ty == F32 { cur.ins().f32const(Ieee32::with_bits(0)) } else if ty == F64 { @@ -152,7 +150,7 @@ fn emit_zero(ty: Type, mut cur: FuncCursor) -> Value { cur.ins().null(ty) } else if ty.is_vector() { let scalar_ty = ty.lane_type(); - if scalar_ty.is_int() || scalar_ty.is_bool() { + if scalar_ty.is_int() { let zero = cur.func.dfg.constants.insert( core::iter::repeat(0) .take(ty.bytes().try_into().unwrap()) @@ -1167,12 +1165,12 @@ mod tests { let i32_var = Variable::new(0); let f32_var = Variable::new(1); let f64_var = Variable::new(2); - let b1_var = Variable::new(3); + let i8_var = Variable::new(3); let f32x4_var = Variable::new(4); ssa.use_var(&mut func, i32_var, I32, block0); ssa.use_var(&mut func, f32_var, F32, block0); ssa.use_var(&mut func, f64_var, F64, block0); - ssa.use_var(&mut func, b1_var, B1, block0); + ssa.use_var(&mut func, i8_var, I8, block0); ssa.use_var(&mut func, f32x4_var, F32X4, block0); assert_eq!(func.dfg.num_block_params(block0), 0); } diff --git a/cranelift/fuzzgen/src/function_generator.rs b/cranelift/fuzzgen/src/function_generator.rs index 3bd15f00e621..0cf404e8faa1 100644 --- a/cranelift/fuzzgen/src/function_generator.rs +++ b/cranelift/fuzzgen/src/function_generator.rs @@ -714,14 +714,14 @@ const OPCODE_SIGNATURES: &'static [( #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] (Opcode::FcvtFromSint, &[I128], &[F64], insert_opcode), // Fcmp - (Opcode::Fcmp, &[F32, F32], &[B1], insert_cmp), - (Opcode::Fcmp, &[F64, F64], &[B1], insert_cmp), + (Opcode::Fcmp, &[F32, F32], &[I8], insert_cmp), + (Opcode::Fcmp, &[F64, F64], &[I8], insert_cmp), // Icmp - (Opcode::Icmp, &[I8, I8], &[B1], insert_cmp), - (Opcode::Icmp, &[I16, I16], &[B1], insert_cmp), - (Opcode::Icmp, &[I32, I32], &[B1], insert_cmp), - (Opcode::Icmp, &[I64, I64], &[B1], insert_cmp), - (Opcode::Icmp, &[I128, I128], &[B1], insert_cmp), + (Opcode::Icmp, &[I8, I8], &[I8], insert_cmp), + (Opcode::Icmp, &[I16, I16], &[I8], insert_cmp), + (Opcode::Icmp, &[I32, I32], &[I8], insert_cmp), + (Opcode::Icmp, &[I64, I64], &[I8], insert_cmp), + (Opcode::Icmp, &[I128, I128], &[I8], insert_cmp), // Stack Access (Opcode::StackStore, &[I8], &[], insert_stack_store), (Opcode::StackStore, &[I16], &[], insert_stack_store), @@ -785,8 +785,6 @@ const OPCODE_SIGNATURES: &'static [( // Float Consts (Opcode::F32const, &[], &[F32], insert_const), (Opcode::F64const, &[], &[F64], insert_const), - // Bool Consts - (Opcode::Bconst, &[], &[B1], insert_const), // Call (Opcode::Call, &[], &[], insert_call), ]; @@ -900,7 +898,6 @@ where // TODO: It would be nice if we could get these directly from cranelift let scalars = [ // IFLAGS, FFLAGS, - B1, // B8, B16, B32, B64, B128, I8, I16, I32, I64, I128, F32, F64, // R32, R64, ]; @@ -1013,7 +1010,6 @@ where }; builder.ins().iconst(ty, imm64) } - ty if ty.is_bool() => builder.ins().bconst(ty, bool::arbitrary(self.u)?), // f{32,64}::arbitrary does not generate a bunch of important values // such as Signaling NaN's / NaN's with payload, so generate floats from integers. F32 => builder @@ -1095,7 +1091,7 @@ where let left_args = self.generate_values_for_block(builder, left)?; let right_args = self.generate_values_for_block(builder, right)?; - let condbr_types = [I8, I16, I32, I64, I128, B1]; + let condbr_types = [I8, I16, I32, I64, I128]; let _type = *self.u.choose(&condbr_types[..])?; let val = builder.use_var(self.get_variable_of_type(_type)?); diff --git a/cranelift/fuzzgen/src/lib.rs b/cranelift/fuzzgen/src/lib.rs index 69355dc05e04..0a44d09c585d 100644 --- a/cranelift/fuzzgen/src/lib.rs +++ b/cranelift/fuzzgen/src/lib.rs @@ -129,7 +129,6 @@ where }; DataValue::from_integer(imm, ty)? } - ty if ty.is_bool() => DataValue::B(bool::arbitrary(self.u)?), // f{32,64}::arbitrary does not generate a bunch of important values // such as Signaling NaN's / NaN's with payload, so generate floats from integers. F32 => DataValue::F32(Ieee32::with_bits(u32::arbitrary(self.u)?)), diff --git a/cranelift/interpreter/src/frame.rs b/cranelift/interpreter/src/frame.rs index ede48e9fefe3..4270225af3de 100644 --- a/cranelift/interpreter/src/frame.rs +++ b/cranelift/interpreter/src/frame.rs @@ -194,7 +194,7 @@ mod tests { ValueRef::from_u32(6), ]; let values = vec![ - DataValue::B(true), + DataValue::I8(1), DataValue::I8(42), DataValue::F32(Ieee32::from(0.42)), ]; @@ -214,7 +214,7 @@ mod tests { let func = function("function %test(i32) -> i32 { block0(v10:i32): return v10 }"); let mut frame = Frame::new(&func); let old_ssa_value_refs = [ValueRef::from_u32(9), ValueRef::from_u32(10)]; - let values = vec![DataValue::B(true), DataValue::F64(Ieee64::from(0.0))]; + let values = vec![DataValue::I8(1), DataValue::F64(Ieee64::from(0.0))]; frame.set_all(&old_ssa_value_refs, values.clone()); // Rename the old SSA values to the new values. @@ -232,7 +232,7 @@ mod tests { let func = function("function %test(i32) -> i32 { block0(v10:i32): return v10 }"); let mut frame = Frame::new(&func); let old_ssa_value_refs = [ValueRef::from_u32(1), ValueRef::from_u32(9)]; - let values = vec![DataValue::B(true), DataValue::F64(Ieee64::from(f64::NAN))]; + let values = vec![DataValue::I8(1), DataValue::F64(Ieee64::from(f64::NAN))]; frame.set_all(&old_ssa_value_refs, values.clone()); // Rename the old SSA values to the new values. diff --git a/cranelift/interpreter/src/interpreter.rs b/cranelift/interpreter/src/interpreter.rs index eb619c5f7187..eb1af7b4e312 100644 --- a/cranelift/interpreter/src/interpreter.rs +++ b/cranelift/interpreter/src/interpreter.rs @@ -510,7 +510,7 @@ impl<'a> State<'a, DataValue> for InterpreterState<'a> { // We start with a sentinel value that will fail if we try to load / add to it // without resolving the base GV First. - let mut current_val = DataValue::B(false); + let mut current_val = DataValue::I8(0); let mut action_stack = vec![ResolveAction::Resolve(gv)]; loop { @@ -639,7 +639,7 @@ mod tests { // filetest infrastructure. #[test] fn sanity() { - let code = "function %test() -> b1 { + let code = "function %test() -> i8 { block0: v0 = iconst.i32 1 v1 = iadd_imm v0, 1 @@ -657,7 +657,7 @@ mod tests { .unwrap() .unwrap_return(); - assert_eq!(result, vec![DataValue::B(true)]) + assert_eq!(result, vec![DataValue::I8(1)]) } // We don't have a way to check for traps with the current filetest infrastructure @@ -750,7 +750,7 @@ mod tests { #[test] fn fuel() { - let code = "function %test() -> b1 { + let code = "function %test() -> i8 { block0: v0 = iconst.i32 1 v1 = iadd_imm v0, 1 @@ -1000,7 +1000,7 @@ mod tests { #[test] fn heap_sanity_test() { let code = " - function %heap_load_store(i64 vmctx) -> b1 { + function %heap_load_store(i64 vmctx) -> i8 { gv0 = vmctx gv1 = load.i64 notrap aligned gv0+0 ; gv2/3 do nothing, but makes sure we understand the iadd_imm mechanism @@ -1039,7 +1039,7 @@ mod tests { .unwrap() .unwrap_return(); - assert_eq!(result, vec![DataValue::B(true)]) + assert_eq!(result, vec![DataValue::I8(1)]) } #[test] diff --git a/cranelift/interpreter/src/step.rs b/cranelift/interpreter/src/step.rs index 8e93fdf1b981..51ea6566a2ff 100644 --- a/cranelift/interpreter/src/step.rs +++ b/cranelift/interpreter/src/step.rs @@ -29,7 +29,6 @@ fn validate_signature_params(sig: &[AbiParam], args: &[impl Value]) -> bool { // but we don't have enough information there either. // // Ideally the user has run the verifier and caught this properly... - (a, b) if a.is_bool() && b.is_bool() => true, (a, b) if a.is_vector() && b.is_vector() => true, (a, b) => a == b, }) @@ -108,13 +107,12 @@ where .get(imm) .unwrap() .as_slice(); - match ctrl_ty.bytes() { + match mask.len() { 16 => DataValue::V128(mask.try_into().expect("a 16-byte vector mask")), 8 => DataValue::V64(mask.try_into().expect("an 8-byte vector mask")), - length => panic!("unexpected Shuffle mask length {}", length), + length => panic!("unexpected Shuffle mask length {}", mask.len()), } } - InstructionData::UnaryBool { imm, .. } => DataValue::from(imm), // 8-bit. InstructionData::BinaryImm8 { imm, .. } | InstructionData::TernaryImm8 { imm, .. } => { DataValue::from(imm as i8) // Note the switch from unsigned to signed. @@ -552,7 +550,6 @@ where Opcode::Iconst => assign(Value::int(imm().into_int()?, ctrl_ty)?), Opcode::F32const => assign(imm()), Opcode::F64const => assign(imm()), - Opcode::Bconst => assign(imm()), Opcode::Vconst => assign(imm()), Opcode::Null => unimplemented!("Null"), Opcode::Nop => ControlFlow::Continue, @@ -754,7 +751,7 @@ where Opcode::IaddCout => { let sum = Value::add(arg(0)?, arg(1)?)?; let carry = Value::lt(&sum, &arg(0)?)? && Value::lt(&sum, &arg(1)?)?; - assign_multiple(&[sum, Value::bool(carry, types::B1)?]) + assign_multiple(&[sum, Value::bool(carry, false, types::I8)?]) } Opcode::IaddIfcout => unimplemented!("IaddIfcout"), Opcode::IaddCarry => { @@ -763,7 +760,7 @@ where sum = Value::add(sum, Value::int(1, ctrl_ty)?)? } let carry = Value::lt(&sum, &arg(0)?)? && Value::lt(&sum, &arg(1)?)?; - assign_multiple(&[sum, Value::bool(carry, types::B1)?]) + assign_multiple(&[sum, Value::bool(carry, false, types::I8)?]) } Opcode::IaddIfcarry => unimplemented!("IaddIfcarry"), Opcode::IsubBin => choose( @@ -775,7 +772,7 @@ where Opcode::IsubBout => { let sum = Value::sub(arg(0)?, arg(1)?)?; let borrow = Value::lt(&arg(0)?, &arg(1)?)?; - assign_multiple(&[sum, Value::bool(borrow, types::B1)?]) + assign_multiple(&[sum, Value::bool(borrow, false, types::I8)?]) } Opcode::IsubIfbout => unimplemented!("IsubIfbout"), Opcode::IsubBorrow => { @@ -786,7 +783,7 @@ where }; let borrow = Value::lt(&arg(0)?, &rhs)?; let sum = Value::sub(arg(0)?, rhs)?; - assign_multiple(&[sum, Value::bool(borrow, types::B1)?]) + assign_multiple(&[sum, Value::bool(borrow, false, types::I8)?]) } Opcode::IsubIfborrow => unimplemented!("IsubIfborrow"), Opcode::Band => binary(Value::and, arg(0)?, arg(1)?)?, @@ -844,6 +841,7 @@ where .map(|(x, y)| { V::bool( fcmp(inst.fp_cond_code().unwrap(), &x, &y).unwrap(), + ctrl_ty.is_vector(), ctrl_ty.lane_type().as_bool(), ) }) @@ -946,19 +944,15 @@ where // return a 1-bit boolean value. Opcode::Trueif => choose( state.has_iflag(inst.cond_code().unwrap()), - Value::bool(true, types::B1)?, - Value::bool(false, types::B1)?, + Value::bool(true, false, types::I8)?, + Value::bool(false, false, types::I8)?, ), Opcode::Trueff => choose( state.has_fflag(inst.fp_cond_code().unwrap()), - Value::bool(true, types::B1)?, - Value::bool(false, types::B1)?, + Value::bool(true, false, types::I8)?, + Value::bool(false, false, types::I8)?, ), - Opcode::Bitcast - | Opcode::RawBitcast - | Opcode::ScalarToVector - | Opcode::Breduce - | Opcode::Bextend => { + Opcode::Bitcast | Opcode::RawBitcast | Opcode::ScalarToVector => { let input_ty = inst_context.type_of(inst_context.args()[0]).unwrap(); let arg0 = extractlanes(&arg(0)?, input_ty)?; @@ -974,11 +968,6 @@ where arg(0)?, ValueConversionKind::Truncate(ctrl_ty), )?), - Opcode::Bint => { - let bool = arg(0)?.into_bool()?; - let int = if bool { 1 } else { 0 }; - assign(Value::int(int, ctrl_ty)?) - } Opcode::Snarrow | Opcode::Unarrow | Opcode::Uunarrow => { let arg0 = extractlanes(&arg(0)?, ctrl_ty)?; let arg1 = extractlanes(&arg(1)?, ctrl_ty)?; @@ -1014,7 +1003,7 @@ where let bool_ty = ctrl_ty.as_bool_pedantic(); let lanes = extractlanes(&bool, bool_ty)? .into_iter() - .map(|lane| lane.convert(ValueConversionKind::Exact(ctrl_ty.lane_type()))) + .map(|lane| lane.convert(ValueConversionKind::Mask(ctrl_ty.lane_type()))) .collect::>>()?; vectorizelanes(&lanes, ctrl_ty)? }), @@ -1046,7 +1035,7 @@ where new[i] = b[mask[i] as usize - a.len()]; } // else leave as 0. } - assign(Value::vector(new, ctrl_ty)?) + assign(Value::vector(new, types::I8X16)?) } Opcode::Swizzle => { let x = Value::into_array(&arg(0)?)?; @@ -1092,18 +1081,18 @@ where Opcode::Vsplit => unimplemented!("Vsplit"), Opcode::Vconcat => unimplemented!("Vconcat"), Opcode::Vselect => assign(vselect(&arg(0)?, &arg(1)?, &arg(2)?, ctrl_ty)?), - Opcode::VanyTrue => assign(fold_vector( - arg(0)?, - ctrl_ty, - V::bool(false, types::B1)?, - |acc, lane| acc.or(lane), - )?), - Opcode::VallTrue => assign(fold_vector( - arg(0)?, - ctrl_ty, - V::bool(true, types::B1)?, - |acc, lane| acc.and(lane), - )?), + Opcode::VanyTrue => { + let lane_ty = ctrl_ty.lane_type(); + let init = V::bool(false, true, lane_ty)?; + let any = fold_vector(arg(0)?, ctrl_ty, init.clone(), |acc, lane| acc.or(lane))?; + assign(V::bool(!V::eq(&any, &init)?, false, types::I8)?) + } + Opcode::VallTrue => { + let lane_ty = ctrl_ty.lane_type(); + let init = V::bool(true, true, lane_ty)?; + let all = fold_vector(arg(0)?, ctrl_ty, init.clone(), |acc, lane| acc.and(lane))?; + assign(V::bool(V::eq(&all, &init)?, false, types::I8)?) + } Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh => { let new_type = ctrl_ty.merge_lanes().unwrap(); let conv_type = match inst.opcode() { @@ -1426,6 +1415,7 @@ where &right.clone().convert(ValueConversionKind::ToUnsigned)?, )?, }, + ctrl_ty.is_vector(), bool_ty, )?) }; @@ -1489,10 +1479,10 @@ where } let iterations = match lane_type { - types::I8 | types::B1 | types::B8 => 1, - types::I16 | types::B16 => 2, - types::I32 | types::B32 | types::F32 => 4, - types::I64 | types::B64 | types::F64 => 8, + types::I8 => 1, + types::I16 => 2, + types::I32 | types::F32 => 4, + types::I64 | types::F64 => 8, _ => unimplemented!("vectors with lanes wider than 64-bits are currently unsupported."), }; @@ -1503,9 +1493,7 @@ where lane += (x[((i * iterations) + j) as usize] as i128) << (8 * j); } - let lane_val: V = if lane_type.is_bool() { - Value::bool(lane != 0, lane_type)? - } else if lane_type.is_float() { + let lane_val: V = if lane_type.is_float() { Value::float(lane as u64, lane_type)? } else { Value::int(lane, lane_type)? @@ -1528,10 +1516,10 @@ where let lane_type = vector_type.lane_type(); let iterations = match lane_type { - types::I8 | types::B1 | types::B8 => 1, - types::I16 | types::B16 => 2, - types::I32 | types::B32 | types::F32 => 4, - types::I64 | types::B64 | types::F64 => 8, + types::I8 => 1, + types::I16 => 2, + types::I32 | types::F32 => 4, + types::I64 | types::F64 => 8, _ => unimplemented!("vectors with lanes wider than 64-bits are currently unsupported."), }; let mut result: [u8; 16] = [0; 16]; diff --git a/cranelift/interpreter/src/value.rs b/cranelift/interpreter/src/value.rs index 1bbb68a1d77c..0f5358e18424 100644 --- a/cranelift/interpreter/src/value.rs +++ b/cranelift/interpreter/src/value.rs @@ -20,7 +20,7 @@ pub trait Value: Clone + From { fn into_float(self) -> ValueResult; fn is_float(&self) -> bool; fn is_nan(&self) -> ValueResult; - fn bool(b: bool, ty: Type) -> ValueResult; + fn bool(b: bool, vec_elem: bool, ty: Type) -> ValueResult; fn into_bool(self) -> ValueResult; fn vector(v: [u8; 16], ty: Type) -> ValueResult; fn into_array(&self) -> ValueResult<[u8; 16]>; @@ -152,6 +152,8 @@ pub enum ValueConversionKind { /// Converts an integer into a boolean, zero integers are converted into a /// `false`, while other integers are converted into `true`. Booleans are passed through. ToBoolean, + /// Converts an integer into either -1 or zero. + Mask(Type), } /// Helper for creating match expressions over [DataValue]. @@ -268,14 +270,39 @@ impl Value for DataValue { } } - fn bool(b: bool, ty: Type) -> ValueResult { - assert!(ty.is_bool()); - Ok(DataValue::B(b)) + fn bool(b: bool, vec_elem: bool, ty: Type) -> ValueResult { + assert!(ty.is_int()); + macro_rules! make_bool { + ($ty:ident) => { + Ok(DataValue::$ty(if b { + if vec_elem { + -1 + } else { + 1 + } + } else { + 0 + })) + }; + } + + match ty { + types::I8 => make_bool!(I8), + types::I16 => make_bool!(I16), + types::I32 => make_bool!(I32), + types::I64 => make_bool!(I64), + types::I128 => make_bool!(I128), + _ => Err(ValueError::InvalidType(ValueTypeClass::Integer, ty)), + } } fn into_bool(self) -> ValueResult { match self { - DataValue::B(b) => Ok(b), + DataValue::I8(b) => Ok(b != 0), + DataValue::I16(b) => Ok(b != 0), + DataValue::I32(b) => Ok(b != 0), + DataValue::I64(b) => Ok(b != 0), + DataValue::I128(b) => Ok(b != 0), _ => Err(ValueError::InvalidType(ValueTypeClass::Boolean, self.ty())), } } @@ -316,16 +343,6 @@ impl Value for DataValue { (DataValue::F32(n), types::I32) => DataValue::I32(n.bits() as i32), (DataValue::F64(n), types::I64) => DataValue::I64(n.bits() as i64), (DataValue::F32(n), types::F64) => DataValue::F64((n.as_f32() as f64).into()), - (DataValue::B(b), t) if t.is_bool() => DataValue::B(b), - (DataValue::B(b), t) if t.is_int() => { - // Bools are represented in memory as all 1's - let val = match (b, t) { - (true, types::I128) => -1, - (true, t) => (1i128 << t.bits()) - 1, - _ => 0, - }; - DataValue::int(val, t)? - } (dv, t) if (t.is_int() || t.is_float()) && dv.ty() == t => dv, (dv, _) => unimplemented!("conversion: {} -> {:?}", dv.ty(), kind), }, @@ -432,10 +449,13 @@ impl Value for DataValue { (s, _) => unimplemented!("conversion: {} -> {:?}", s.ty(), kind), }, ValueConversionKind::ToBoolean => match self.ty() { - ty if ty.is_bool() => DataValue::B(self.into_bool()?), - ty if ty.is_int() => DataValue::B(self.into_int()? != 0), + ty if ty.is_int() => DataValue::I8(if self.into_int()? != 0 { 1 } else { 0 }), ty => unimplemented!("conversion: {} -> {:?}", ty, kind), }, + ValueConversionKind::Mask(ty) => { + let b = self.into_bool()?; + Self::bool(b, true, ty).unwrap() + } }) } @@ -662,11 +682,11 @@ impl Value for DataValue { } fn and(self, other: Self) -> ValueResult { - binary_match!(&(self, other); [B, I8, I16, I32, I64, I128, F32, F64]) + binary_match!(&(self, other); [I8, I16, I32, I64, I128, F32, F64]) } fn or(self, other: Self) -> ValueResult { - binary_match!(|(self, other); [B, I8, I16, I32, I64, I128, F32, F64]) + binary_match!(|(self, other); [I8, I16, I32, I64, I128, F32, F64]) } fn xor(self, other: Self) -> ValueResult { @@ -674,7 +694,7 @@ impl Value for DataValue { } fn not(self) -> ValueResult { - unary_match!(!(self); [B, I8, I16, I32, I64, I128, F32, F64]) + unary_match!(!(self); [I8, I16, I32, I64, I128, F32, F64]) } fn count_ones(self) -> ValueResult { diff --git a/cranelift/preopt/src/constant_folding.rs b/cranelift/preopt/src/constant_folding.rs index 40d597eddc96..1faf22e927e5 100644 --- a/cranelift/preopt/src/constant_folding.rs +++ b/cranelift/preopt/src/constant_folding.rs @@ -11,7 +11,6 @@ use cranelift_codegen::{ // }; enum ConstImm { - Bool(bool), I64(i64), Ieee32(f32), // Ieee32 and Ieee64 will be replaced with `Single` and `Double` from the rust_apfloat library eventually. Ieee64(f64), @@ -28,7 +27,6 @@ impl ConstImm { fn evaluate_truthiness(self) -> bool { match self { - Self::Bool(b) => b, Self::I64(imm) => imm != 0, _ => panic!( "Only a `ConstImm::Bool` and `ConstImm::I64` can be evaluated for \"truthiness\"" @@ -93,10 +91,6 @@ fn resolve_value_to_imm(dfg: &ir::DataFlowGraph, value: ir::Value) -> Option Some(ConstImm::Bool(imm)), _ => None, } } @@ -183,10 +177,6 @@ fn replace_inst(dfg: &mut ir::DataFlowGraph, inst: ir::Inst, const_imm: ConstImm dfg.replace(inst) .f64const(ir::immediates::Ieee64::with_bits(imm.to_bits())); } - Bool(imm) => { - let typevar = dfg.ctrl_typevar(inst); - dfg.replace(inst).bconst(typevar, imm); - } } } diff --git a/cranelift/reader/src/lexer.rs b/cranelift/reader/src/lexer.rs index ba2b0dc5ebf8..6a23dd32b91a 100644 --- a/cranelift/reader/src/lexer.rs +++ b/cranelift/reader/src/lexer.rs @@ -376,12 +376,6 @@ impl<'a> Lexer<'a> { "i128" => types::I128, "f32" => types::F32, "f64" => types::F64, - "b1" => types::B1, - "b8" => types::B8, - "b16" => types::B16, - "b32" => types::B32, - "b64" => types::B64, - "b128" => types::B128, "r32" => types::R32, "r64" => types::R64, _ => return None, @@ -628,7 +622,7 @@ mod tests { fn lex_identifiers() { let mut lex = Lexer::new( "v0 v00 vx01 block1234567890 block5234567890 v1x vx1 vxvx4 \ - function0 function b1 i32x4 f32x5 \ + function0 function i8 i32x4 f32x5 \ iflags fflags iflagss", ); assert_eq!( @@ -647,7 +641,7 @@ mod tests { assert_eq!(lex.next(), token(Token::Identifier("vxvx4"), 1)); assert_eq!(lex.next(), token(Token::Identifier("function0"), 1)); assert_eq!(lex.next(), token(Token::Identifier("function"), 1)); - assert_eq!(lex.next(), token(Token::Type(types::B1), 1)); + assert_eq!(lex.next(), token(Token::Type(types::I8), 1)); assert_eq!(lex.next(), token(Token::Type(types::I32X4), 1)); assert_eq!(lex.next(), token(Token::Identifier("f32x5"), 1)); assert_eq!(lex.next(), token(Token::Type(types::IFLAGS), 1)); diff --git a/cranelift/reader/src/parser.rs b/cranelift/reader/src/parser.rs index 00e7a5227c30..a4946c5462b6 100644 --- a/cranelift/reader/src/parser.rs +++ b/cranelift/reader/src/parser.rs @@ -975,20 +975,6 @@ impl<'a> Parser<'a> { } } - // Match and consume a boolean immediate. - fn match_bool(&mut self, err_msg: &str) -> ParseResult { - if let Some(Token::Identifier(text)) = self.token() { - self.consume(); - match text { - "true" => Ok(true), - "false" => Ok(false), - _ => err!(self.loc, err_msg), - } - } else { - err!(self.loc, err_msg) - } - } - // Match and consume an enumerated immediate, like one of the condition codes. fn match_enum(&mut self, err_msg: &str) -> ParseResult { if let Some(Token::Identifier(text)) = self.token() { @@ -1053,15 +1039,6 @@ impl<'a> Parser<'a> { }}; } - fn boolean_to_vec(value: bool, ty: Type) -> Vec { - let lane_size = ty.bytes() / u32::from(ty.lane_count()); - if lane_size < 1 { - panic!("The boolean lane must have a byte size greater than zero."); - } - let value = if value { 0xFF } else { 0 }; - vec![value; lane_size as usize] - } - if !ty.is_vector() && !ty.is_dynamic_vector() { err!(self.loc, "Expected a controlling vector type, not {}", ty) } else { @@ -1072,10 +1049,6 @@ impl<'a> Parser<'a> { I64 => consume!(ty, self.match_imm64("Expected a 64-bit integer")?), F32 => consume!(ty, self.match_ieee32("Expected a 32-bit float")?), F64 => consume!(ty, self.match_ieee64("Expected a 64-bit float")?), - b if b.is_bool() => consume!( - ty, - boolean_to_vec(self.match_bool("Expected a boolean")?, ty) - ), _ => return err!(self.loc, "Expected a type of: float, int, bool"), }; Ok(constant_data) @@ -2565,14 +2538,14 @@ impl<'a> Parser<'a> { Ok(RunCommand::Run(invocation, comparison, expected)) } else if sig.params.is_empty() && sig.returns.len() == 1 - && sig.returns[0].value_type.is_bool() + && sig.returns[0].value_type.is_int() { // To match the existing run behavior that does not require an explicit - // invocation, we create an invocation from a function like `() -> b*` and - // compare it to `true`. + // invocation, we create an invocation from a function like `() -> i*` and + // compare it to not `false`. let invocation = Invocation::new("default", vec![]); - let expected = vec![DataValue::B(true)]; - let comparison = Comparison::Equals; + let expected = vec![DataValue::I8(0)]; + let comparison = Comparison::NotEquals; Ok(RunCommand::Run(invocation, comparison, expected)) } else { Err(self.error("unable to parse the run command")) @@ -2713,9 +2686,6 @@ impl<'a> Parser<'a> { return Err(self.error("only 128-bit vectors are currently supported")); } } - _ if ty.is_bool() && !ty.is_vector() => { - DataValue::from(self.match_bool("expected a boolean")?) - } _ => return Err(self.error(&format!("don't know how to parse data values of: {}", ty))), }; Ok(dv) @@ -2746,10 +2716,6 @@ impl<'a> Parser<'a> { opcode, imm: self.match_ieee64("expected immediate 64-bit float operand")?, }, - InstructionFormat::UnaryBool => InstructionData::UnaryBool { - opcode, - imm: self.match_bool("expected immediate boolean operand")?, - }, InstructionFormat::UnaryConst => { let constant_handle = if let Some(Token::Constant(_)) = self.token() { // If handed a `const?`, use that. @@ -3807,10 +3773,10 @@ mod tests { can_parse_as_constant_data!("1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", I8X16); can_parse_as_constant_data!("0x1.1 0x2.2 0x3.3 0x4.4", F32X4); can_parse_as_constant_data!("0x0 0x1 0x2 0x3", I32X4); - can_parse_as_constant_data!("true false true false true false true false", B16X8); + can_parse_as_constant_data!("-1 0 -1 0 -1 0 -1 0", I16X8); can_parse_as_constant_data!("0 -1", I64X2); - can_parse_as_constant_data!("true false", B64X2); - can_parse_as_constant_data!("true true true true true", B32X4); // note that parse_literals_to_constant_data will leave extra tokens unconsumed + can_parse_as_constant_data!("-1 0", I64X2); + can_parse_as_constant_data!("-1 -1 -1 -1 -1", I32X4); // note that parse_literals_to_constant_data will leave extra tokens unconsumed cannot_parse_as_constant_data!("1 2 3", I32X4); cannot_parse_as_constant_data!(" ", F32X4); @@ -3818,8 +3784,8 @@ mod tests { #[test] fn parse_constant_from_booleans() { - let c = Parser::new("true false true false") - .parse_literals_to_constant_data(B32X4) + let c = Parser::new("-1 0 -1 0") + .parse_literals_to_constant_data(I32X4) .unwrap(); assert_eq!( c.into_vec(), @@ -3864,18 +3830,18 @@ mod tests { } assert_roundtrip("run: %fn0() == 42", &sig(&[], &[I32])); assert_roundtrip( - "run: %fn0(8, 16, 32, 64) == true", - &sig(&[I8, I16, I32, I64], &[B8]), + "run: %fn0(8, 16, 32, 64) == 1", + &sig(&[I8, I16, I32, I64], &[I8]), ); assert_roundtrip( - "run: %my_func(true) == 0x0f0e0d0c0b0a09080706050403020100", - &sig(&[B32], &[I8X16]), + "run: %my_func(1) == 0x0f0e0d0c0b0a09080706050403020100", + &sig(&[I32], &[I8X16]), ); // Verify that default invocations are created when not specified. assert_eq!( - parse("run", &sig(&[], &[B32])).unwrap().to_string(), - "run: %default() == true" + parse("run", &sig(&[], &[I32])).unwrap().to_string(), + "run: %default() != 0" ); assert_eq!( parse("print", &sig(&[], &[F32X4, I16X8])) @@ -3885,8 +3851,7 @@ mod tests { ); // Demonstrate some unparseable cases. - assert!(parse("print", &sig(&[I32], &[B32])).is_err()); - assert!(parse("run", &sig(&[], &[I32])).is_err()); + assert!(parse("print", &sig(&[I32], &[I32])).is_err()); assert!(parse("print:", &sig(&[], &[])).is_err()); assert!(parse("run: ", &sig(&[], &[])).is_err()); } @@ -3947,8 +3912,6 @@ mod tests { assert_eq!(parse("1234567", I128).to_string(), "1234567"); assert_eq!(parse("0x32.32", F32).to_string(), "0x1.919000p5"); assert_eq!(parse("0x64.64", F64).to_string(), "0x1.9190000000000p6"); - assert_eq!(parse("true", B1).to_string(), "true"); - assert_eq!(parse("false", B64).to_string(), "false"); assert_eq!( parse("[0 1 2 3]", I32X4).to_string(), "0x00000003000000020000000100000000" diff --git a/cranelift/src/bugpoint.rs b/cranelift/src/bugpoint.rs index 702f0e8133db..bbd96016abae 100644 --- a/cranelift/src/bugpoint.rs +++ b/cranelift/src/bugpoint.rs @@ -762,9 +762,6 @@ fn const_for_type<'f, T: InstBuilder<'f>>(mut builder: T, ty: ir::Type) -> &'sta } else if ty == F64 { builder.f64const(0.0); "f64const" - } else if ty.is_bool() { - builder.bconst(ty, false); - "bconst" } else if ty.is_ref() { builder.null(ty); "null" diff --git a/cranelift/src/interpret.rs b/cranelift/src/interpret.rs index e2d49db5f1a1..a752d692dac9 100644 --- a/cranelift/src/interpret.rs +++ b/cranelift/src/interpret.rs @@ -156,14 +156,14 @@ mod test { fn nop() { let code = String::from( " - function %test() -> b8 { + function %test() -> i8 { block0: nop - v1 = bconst.b8 true + v1 = iconst.i8 -1 v2 = iconst.i8 42 return v1 } - ; run: %test() == true + ; run: %test() == -1 ", ); FileInterpreter::from_inline_code(code).run().unwrap() diff --git a/cranelift/src/run.rs b/cranelift/src/run.rs index bd10e1762a79..8a5fa2d0f6f3 100644 --- a/cranelift/src/run.rs +++ b/cranelift/src/run.rs @@ -126,10 +126,10 @@ mod test { fn nop() { let code = String::from( " - function %test() -> b8 { + function %test() -> i8 { block0: nop - v1 = bconst.b8 true + v1 = iconst.i8 -1 return v1 } ; run diff --git a/cranelift/wasm/src/code_translator.rs b/cranelift/wasm/src/code_translator.rs index b2e5203922f8..9b9745dafa7a 100644 --- a/cranelift/wasm/src/code_translator.rs +++ b/cranelift/wasm/src/code_translator.rs @@ -1029,7 +1029,7 @@ pub fn translate_operator( Operator::I32Eqz | Operator::I64Eqz => { let arg = state.pop1(); let val = builder.ins().icmp_imm(IntCC::Equal, arg, 0); - state.push1(builder.ins().bint(I32, val)); + state.push1(builder.ins().uextend(I32, val)); } Operator::I32Eq | Operator::I64Eq => translate_icmp(IntCC::Equal, builder, state), Operator::F32Eq | Operator::F64Eq => translate_fcmp(FloatCC::Equal, builder, state), @@ -1653,7 +1653,7 @@ pub fn translate_operator( Operator::V128AnyTrue => { let a = pop1_with_bitcast(state, type_of(op), builder); let bool_result = builder.ins().vany_true(a); - state.push1(builder.ins().bint(I32, bool_result)) + state.push1(builder.ins().uextend(I32, bool_result)) } Operator::I8x16AllTrue | Operator::I16x8AllTrue @@ -1661,7 +1661,7 @@ pub fn translate_operator( | Operator::I64x2AllTrue => { let a = pop1_with_bitcast(state, type_of(op), builder); let bool_result = builder.ins().vall_true(a); - state.push1(builder.ins().bint(I32, bool_result)) + state.push1(builder.ins().uextend(I32, bool_result)) } Operator::I8x16Bitmask | Operator::I16x8Bitmask @@ -2433,16 +2433,8 @@ fn translate_store( state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()> { - let mut val = state.pop1(); - let mut val_ty = builder.func.dfg.value_type(val); - - // Boolean-vector types don't validate with a `store` instruction, so - // bitcast them to a vector type which is compatible with the store - // instruction. - if val_ty.is_vector() && val_ty.lane_type().is_bool() { - val = builder.ins().raw_bitcast(I8X16, val); - val_ty = I8X16; - } + let val = state.pop1(); + let val_ty = builder.func.dfg.value_type(val); let (flags, base, offset) = prepare_addr(memarg, mem_op_size(opcode, val_ty), builder, state, environ)?; @@ -2465,7 +2457,7 @@ fn mem_op_size(opcode: ir::Opcode, ty: Type) -> u32 { fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) { let (arg0, arg1) = state.pop2(); let val = builder.ins().icmp(cc, arg0, arg1); - state.push1(builder.ins().bint(I32, val)); + state.push1(builder.ins().uextend(I32, val)); } fn translate_atomic_rmw( @@ -2644,7 +2636,7 @@ fn translate_vector_icmp( fn translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) { let (arg0, arg1) = state.pop2(); let val = builder.ins().fcmp(cc, arg0, arg1); - state.push1(builder.ins().bint(I32, val)); + state.push1(builder.ins().uextend(I32, val)); } fn translate_vector_fcmp( @@ -2919,7 +2911,7 @@ fn optionally_bitcast_vector( #[inline(always)] fn is_non_canonical_v128(ty: ir::Type) -> bool { match ty { - B8X16 | B16X8 | B32X4 | B64X2 | I64X2 | I32X4 | I16X8 | F32X4 | F64X2 => true, + I64X2 | I32X4 | I16X8 | F32X4 | F64X2 => true, _ => false, } } diff --git a/cranelift/wasm/src/environ/spec.rs b/cranelift/wasm/src/environ/spec.rs index 2584630f7727..5d579d0a47a2 100644 --- a/cranelift/wasm/src/environ/spec.rs +++ b/cranelift/wasm/src/environ/spec.rs @@ -378,7 +378,7 @@ pub trait FuncEnvironment: TargetEnvironment { value: ir::Value, ) -> WasmResult { let is_null = pos.ins().is_null(value); - Ok(pos.ins().bint(ir::types::I32, is_null)) + Ok(pos.ins().uextend(ir::types::I32, is_null)) } /// Translate a `ref.func` WebAssembly instruction. diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 59822f41573f..9d8f788a93f5 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -1290,7 +1290,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m _ => unreachable!(), }; - Ok(pos.ins().bint(ir::types::I32, bool_is_null)) + Ok(pos.ins().uextend(ir::types::I32, bool_is_null)) } fn translate_ref_func( From a48c8841faeb22a4ed779f555ea164bc28453607 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Wed, 12 Oct 2022 12:18:08 -0700 Subject: [PATCH 02/12] Fix bmask on s390x --- cranelift/codegen/src/isa/s390x/inst.isle | 36 ++++++++++++++++++++++ cranelift/codegen/src/isa/s390x/lower.isle | 36 ++-------------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/cranelift/codegen/src/isa/s390x/inst.isle b/cranelift/codegen/src/isa/s390x/inst.isle index 9360d9b3b01c..f6e4611a026a 100644 --- a/cranelift/codegen/src/isa/s390x/inst.isle +++ b/cranelift/codegen/src/isa/s390x/inst.isle @@ -5096,6 +5096,42 @@ (decl vec_fcmphes (Type Reg Reg) ProducesFlags) (rule (vec_fcmphes (ty_vec128 ty) x y) (vec_float_cmps ty (vecop_float_cmphe ty) x y)) +;; Helpers for bmask lowering ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +(decl lower_bmask_type (Type) Type) + +(rule 1 (lower_bmask_type (fits_in_32 ty)) $I32) +(rule 0 (lower_bmask_type _) $I64) + +;; Lower a bmask call, given the output and input types. +(decl lower_bmask (Type Type Reg) Reg) + +(rule + 0 + (lower_bmask (fits_in_64 oty) (fits_in_64 ity) input) + (let ((cmp ProducesFlags (icmps_simm16 (lower_bmask_type ity) input 0)) + (op Cond (intcc_as_cond (IntCC.NotEqual)))) + (select_bool_imm oty (bool cmp op) -1 0))) + +(rule + 1 + (lower_bmask $I128 (fits_in_64 ity) input) + (let ((res Reg (lower_bmask $I64 ity input))) + (mov_to_vec128 $I128 res res))) + +(rule + 2 + (lower_bmask (fits_in_64 oty) $I128 input) + (let ((lo Reg (vec_extract_lane $I64X2 input 0 (zero_reg))) + (hi Reg (vec_extract_lane $I64X2 input 1 (zero_reg))) + (combined Reg (or_reg $I64 lo hi))) + (lower_bmask oty $I64 combined))) + +(rule + 3 + (lower_bmask $I128 $I128 input) + (let ((res Reg (lower_bmask $I64 $I128 input))) + (mov_to_vec128 $I128 res res))) ;; Implicit conversions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; diff --git a/cranelift/codegen/src/isa/s390x/lower.isle b/cranelift/codegen/src/isa/s390x/lower.isle index 291853c805f8..dec3dac839b6 100644 --- a/cranelift/codegen/src/isa/s390x/lower.isle +++ b/cranelift/codegen/src/isa/s390x/lower.isle @@ -1157,40 +1157,8 @@ ;;;; Rules for `bmask` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; Use a common helper to type cast bools to either bool or integer types. -(decl cast_bool (Type Value) Reg) -(rule (lower (has_type ty (bmask x))) - (cast_bool ty x)) - -;; If the target has the same or a smaller size than the source, it's a no-op. -(rule 7 (cast_bool (fits_in_16 _ty) x @ (value_type $I16)) x) -(rule 6 (cast_bool (fits_in_32 _ty) x @ (value_type $I32)) x) -(rule 5 (cast_bool (fits_in_64 _ty) x @ (value_type $I64)) x) -(rule 4 (cast_bool (vr128_ty _ty) x @ (value_type $I128)) x) -(rule 5 (cast_bool (fits_in_64 _ty) x @ (value_type $I128)) - (vec_extract_lane $I64X2 x 1 (zero_reg))) - -;; Single-bit values are sign-extended via a pair of shifts. -(rule 0 (cast_bool (gpr32_ty ty) x @ (value_type $I8)) - (ashr_imm $I32 (lshl_imm $I32 x 31) 31)) -(rule 1 (cast_bool (gpr64_ty ty) x @ (value_type $I8)) - (ashr_imm $I64 (lshl_imm $I64 x 63) 63)) -(rule 4 (cast_bool (vr128_ty ty) x @ (value_type $I8)) - (let ((gpr Reg (ashr_imm $I64 (lshl_imm $I64 x 63) 63))) - (mov_to_vec128 ty gpr gpr))) - -;; Other values are just sign-extended normally. -(rule 0 (cast_bool (gpr32_ty _ty) x @ (value_type $I16)) - (sext32_reg $I16 x)) -(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $I16)) - (sext64_reg $I16 x)) -(rule 1(cast_bool (gpr64_ty _ty) x @ (value_type $I32)) - (sext64_reg $I32 x)) -(rule 3 (cast_bool (vr128_ty ty) x @ (value_type (gpr32_ty src_ty))) - (let ((x_ext Reg (sext64_reg src_ty x))) - (mov_to_vec128 ty x_ext x_ext))) -(rule 2 (cast_bool (vr128_ty ty) x @ (value_type (gpr64_ty src_ty))) - (mov_to_vec128 ty x x)) +(rule (lower (has_type oty (bmask x @ (value_type ity)))) + (lower_bmask oty ity x)) ;;;; Rules for `bitrev` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; From 88f04486719291bc12d264d5fc6a3fe63aba45e8 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Thu, 6 Oct 2022 11:48:24 -0700 Subject: [PATCH 03/12] Update filetests --- .../filetests/isa/aarch64/atomic-cas.clif | 6 +- .../filetests/isa/aarch64/bitops.clif | 18 +- .../filetests/isa/aarch64/compare_zero.clif | 96 +- .../filetests/isa/aarch64/condbr.clif | 23 +- .../filetests/isa/aarch64/condops.clif | 14 +- .../filetests/isa/aarch64/constants.clif | 10 +- .../filetests/isa/aarch64/i128-bmask.clif | 112 +++ .../isa/aarch64/iconst-icmp-small.clif | 14 +- .../filetests/isa/aarch64/reftypes.clif | 8 +- .../isa/aarch64/simd-bitwise-compile.clif | 12 +- .../isa/aarch64/simd-comparison-legalize.clif | 8 +- .../isa/aarch64/simd-lane-access-compile.clif | 6 +- .../isa/aarch64/simd-logical-compile.clif | 10 +- .../filetests/isa/aarch64/simd-valltrue.clif | 29 +- .../filetests/filetests/isa/aarch64/simd.clif | 12 - .../filetests/isa/aarch64/stack.clif | 4 +- .../filetests/isa/riscv64/bitops.clif | 22 - .../filetests/isa/riscv64/condbr.clif | 22 +- .../filetests/isa/riscv64/condops.clif | 10 +- .../filetests/isa/riscv64/constants.clif | 8 +- .../filetests/isa/riscv64/i128-bmask.clif | 113 +++ .../isa/riscv64/iconst-icmp-small.clif | 18 +- .../filetests/isa/riscv64/reftypes.clif | 6 +- .../filetests/isa/riscv64/stack.clif | 4 +- .../filetests/filetests/isa/s390x/condbr.clif | 2 +- .../filetests/isa/s390x/condops.clif | 6 +- .../filetests/isa/s390x/constants.clif | 10 +- .../filetests/isa/s390x/conversions.clif | 929 ++++-------------- .../filetests/isa/s390x/icmp-i128.clif | 20 +- .../filetests/filetests/isa/s390x/icmp.clif | 98 +- .../filetests/isa/s390x/reftypes.clif | 8 +- .../filetests/isa/s390x/vec-bitwise.clif | 16 +- .../filetests/isa/s390x/vec-fcmp.clif | 56 +- .../filetests/isa/s390x/vec-icmp.clif | 80 +- .../filetests/isa/s390x/vec-logical.clif | 96 +- .../isa/s390x/vec-permute-le-lane.clif | 94 +- .../filetests/isa/s390x/vec-permute.clif | 94 +- .../filetests/isa/x64/atomic-cas-bug.clif | 27 +- .../isa/x64/atomic_cas_const_addr.clif | 2 +- cranelift/filetests/filetests/isa/x64/b1.clif | 28 +- .../filetests/filetests/isa/x64/branches.clif | 30 +- .../filetests/isa/x64/cmp-mem-bug.clif | 14 +- .../filetests/isa/x64/fcmp-mem-bug.clif | 9 +- .../filetests/filetests/isa/x64/i128.clif | 15 +- .../filetests/filetests/isa/x64/load-op.clif | 6 +- .../filetests/isa/x64/move-elision.clif | 4 +- .../isa/x64/{bextend.clif => sextend.clif} | 6 +- .../isa/x64/simd-bitwise-compile.clif | 12 +- .../isa/x64/simd-comparison-legalize.clif | 8 +- .../isa/x64/simd-lane-access-compile.clif | 8 +- .../isa/x64/simd-logical-compile.clif | 10 +- .../filetests/licm/rewrite-jump-table.clif | 4 +- .../filetests/filetests/parser/call.clif | 8 +- .../filetests/filetests/parser/tiny.clif | 20 +- .../filetests/filetests/preopt/branch.clif | 16 +- .../filetests/filetests/runtests/bextend.clif | 89 -- .../filetests/filetests/runtests/bint.clif | 341 ------- .../filetests/runtests/bitcast-ref64.clif | 10 +- .../filetests/filetests/runtests/bitops.clif | 6 +- .../filetests/filetests/runtests/bmask.clif | 160 ++- .../filetests/filetests/runtests/bnot.clif | 48 +- .../filetests/filetests/runtests/br.clif | 138 +-- .../filetests/filetests/runtests/br_icmp.clif | 624 ++++++------ .../filetests/filetests/runtests/breduce.clif | 90 -- .../filetests/filetests/runtests/call.clif | 19 +- .../filetests/filetests/runtests/ceil.clif | 4 +- .../filetests/filetests/runtests/const.clif | 50 +- .../filetests/filetests/runtests/copy.clif | 40 - .../filetests/filetests/runtests/fadd.clif | 4 +- .../filetests/filetests/runtests/fcmp-eq.clif | 524 +++++----- .../filetests/filetests/runtests/fcmp-ge.clif | 524 +++++----- .../filetests/filetests/runtests/fcmp-gt.clif | 524 +++++----- .../filetests/filetests/runtests/fcmp-le.clif | 524 +++++----- .../filetests/filetests/runtests/fcmp-lt.clif | 524 +++++----- .../filetests/filetests/runtests/fcmp-ne.clif | 524 +++++----- .../filetests/runtests/fcmp-one.clif | 524 +++++----- .../filetests/runtests/fcmp-ord.clif | 524 +++++----- .../filetests/runtests/fcmp-ueq.clif | 524 +++++----- .../filetests/runtests/fcmp-uge.clif | 524 +++++----- .../filetests/runtests/fcmp-ugt.clif | 524 +++++----- .../filetests/runtests/fcmp-ule.clif | 524 +++++----- .../filetests/runtests/fcmp-ult.clif | 524 +++++----- .../filetests/runtests/fcmp-uno.clif | 524 +++++----- .../filetests/filetests/runtests/fdiv.clif | 4 +- .../filetests/filetests/runtests/floor.clif | 4 +- .../filetests/filetests/runtests/fma.clif | 6 +- .../filetests/runtests/fmax-pseudo.clif | 4 +- .../filetests/filetests/runtests/fmax.clif | 4 +- .../filetests/runtests/fmin-pseudo.clif | 4 +- .../filetests/filetests/runtests/fmin.clif | 4 +- .../filetests/filetests/runtests/fmul.clif | 4 +- .../filetests/filetests/runtests/fsub.clif | 4 +- .../filetests/runtests/i128-bextend.clif | 46 - .../filetests/runtests/i128-bint.clif | 87 -- .../filetests/runtests/i128-bitrev.clif | 6 +- .../filetests/runtests/i128-bmask.clif | 80 +- .../filetests/runtests/i128-bnot.clif | 10 +- .../filetests/filetests/runtests/i128-br.clif | 28 +- .../filetests/runtests/i128-breduce.clif | 42 - .../filetests/runtests/i128-bricmp.clif | 260 ++--- .../filetests/runtests/i128-icmp.clif | 238 ++--- .../filetests/runtests/i128-load-store.clif | 80 +- .../filetests/runtests/i128-select.clif | 12 +- .../filetests/runtests/iaddcarry.clif | 128 +-- .../filetests/filetests/runtests/iaddcin.clif | 60 +- .../filetests/runtests/iaddcout.clif | 46 +- .../filetests/runtests/icmp-eq-imm.clif | 64 +- .../filetests/filetests/runtests/icmp-eq.clif | 32 +- .../filetests/filetests/runtests/icmp-ne.clif | 32 +- .../filetests/runtests/icmp-sge.clif | 56 +- .../filetests/runtests/icmp-sgt.clif | 56 +- .../filetests/runtests/icmp-sle.clif | 56 +- .../filetests/runtests/icmp-slt.clif | 56 +- .../filetests/runtests/icmp-uge.clif | 56 +- .../filetests/runtests/icmp-ugt.clif | 56 +- .../filetests/runtests/icmp-ule.clif | 56 +- .../filetests/runtests/icmp-ult.clif | 56 +- .../filetests/filetests/runtests/icmp.clif | 6 +- .../filetests/filetests/runtests/isubbin.clif | 64 +- .../filetests/runtests/isubborrow.clif | 128 +-- .../filetests/runtests/isubbout.clif | 46 +- .../filetests/filetests/runtests/nearest.clif | 4 +- .../runtests/ref64-invalid-null.clif | 26 +- .../runtests/riscv64_issue_4996.clif | 6 +- .../filetests/filetests/runtests/select.clif | 14 +- .../filetests/runtests/simd-arithmetic.clif | 4 +- .../filetests/runtests/simd-bitselect.clif | 2 +- .../filetests/runtests/simd-bmask.clif | 24 +- .../filetests/runtests/simd-comparison.clif | 40 +- .../filetests/runtests/simd-fma-64bit.clif | 14 +- .../filetests/runtests/simd-fma.clif | 20 +- .../filetests/runtests/simd-icmp-eq.clif | 18 +- .../filetests/runtests/simd-icmp-ne.clif | 18 +- .../filetests/runtests/simd-icmp-sge.clif | 24 +- .../filetests/runtests/simd-icmp-sgt.clif | 24 +- .../filetests/runtests/simd-icmp-sle.clif | 24 +- .../filetests/runtests/simd-icmp-slt.clif | 24 +- .../filetests/runtests/simd-icmp-uge.clif | 24 +- .../filetests/runtests/simd-icmp-ugt.clif | 24 +- .../filetests/runtests/simd-icmp-ule.clif | 24 +- .../filetests/runtests/simd-icmp-ult.clif | 24 +- .../filetests/runtests/simd-lane-access.clif | 24 +- .../filetests/runtests/simd-logical.clif | 44 +- .../filetests/runtests/simd-splat.clif | 3 - .../filetests/runtests/simd-ushr.clif | 2 +- .../runtests/simd-valltrue-64bit.clif | 48 +- .../filetests/runtests/simd-valltrue.clif | 40 +- .../runtests/simd-vanytrue-64bit.clif | 48 +- .../filetests/runtests/simd-vanytrue.clif | 40 +- .../filetests/runtests/simd-vconst.clif | 6 +- .../filetests/runtests/simd-vselect.clif | 38 +- .../filetests/runtests/simd_compare_zero.clif | 74 +- .../filetests/filetests/runtests/sqrt.clif | 4 +- .../filetests/runtests/stack-addr-32.clif | 28 +- .../filetests/runtests/stack-addr-64.clif | 16 +- .../filetests/runtests/trueif-ff.clif | 86 +- .../filetests/filetests/runtests/trunc.clif | 4 +- .../filetests/simple_gvn/reject.clif | 2 +- .../filetests/simple_preopt/bitselect.clif | 13 +- ...ing_instructions_and_cfg_predecessors.clif | 2 +- .../filetests/simple_preopt/sign_extend.clif | 4 +- .../filetests/simple_preopt/simplify32.clif | 4 +- .../filetests/simple_preopt/simplify64.clif | 18 +- .../filetests/verifier/simd-lane-index.clif | 14 +- .../filetests/verifier/type_check.clif | 6 +- .../filetests/wasm/f32-compares.clif | 12 +- .../filetests/wasm/f64-compares.clif | 12 +- .../filetests/wasm/i32-compares.clif | 22 +- .../filetests/wasm/i64-compares.clif | 22 +- .../filetests/wasm/multi-val-mixed.clif | 776 +++++++-------- cranelift/tests/bugpoint_consts.clif | 4 +- cranelift/tests/bugpoint_consts_expected.clif | 12 +- cranelift/tests/bugpoint_test.clif | 99 +- 173 files changed, 6946 insertions(+), 8224 deletions(-) create mode 100644 cranelift/filetests/filetests/isa/aarch64/i128-bmask.clif create mode 100644 cranelift/filetests/filetests/isa/riscv64/i128-bmask.clif rename cranelift/filetests/filetests/isa/x64/{bextend.clif => sextend.clif} (73%) delete mode 100644 cranelift/filetests/filetests/runtests/bextend.clif delete mode 100644 cranelift/filetests/filetests/runtests/bint.clif delete mode 100644 cranelift/filetests/filetests/runtests/breduce.clif delete mode 100644 cranelift/filetests/filetests/runtests/i128-bextend.clif delete mode 100644 cranelift/filetests/filetests/runtests/i128-bint.clif delete mode 100644 cranelift/filetests/filetests/runtests/i128-breduce.clif diff --git a/cranelift/filetests/filetests/isa/aarch64/atomic-cas.clif b/cranelift/filetests/filetests/isa/aarch64/atomic-cas.clif index f3d5c249c01e..0508b6825512 100644 --- a/cranelift/filetests/filetests/isa/aarch64/atomic-cas.clif +++ b/cranelift/filetests/filetests/isa/aarch64/atomic-cas.clif @@ -7,8 +7,7 @@ function u0:0(i64, i32, i32) -> i8 system_v { block0(v0: i64, v1: i32, v2: i32): v6 = atomic_cas.i32 v0, v1, v2 v7 = icmp eq v6, v1 - v8 = bint.i8 v7 - return v8 + return v7 } ; stp fp, lr, [sp, #-16]! @@ -22,8 +21,7 @@ block0(v0: i64, v1: i32, v2: i32): ; mov x28, x2 ; atomic_cas_loop_32 addr=x25, expect=x26, replacement=x28, oldval=x27, scratch=x24 ; subs wzr, w27, w26 -; cset x8, eq -; and w0, w8, #1 +; cset x0, eq ; ldp x24, x25, [sp], #16 ; ldp x26, x27, [sp], #16 ; ldr x28, [sp], #16 diff --git a/cranelift/filetests/filetests/isa/aarch64/bitops.clif b/cranelift/filetests/filetests/isa/aarch64/bitops.clif index 306d2f8b94d3..b74e9ef420fa 100644 --- a/cranelift/filetests/filetests/isa/aarch64/bitops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/bitops.clif @@ -304,28 +304,28 @@ block0(v0: i8): ; umov w0, v5.b[0] ; ret -function %bextend_b8() -> b32 { +function %sextend_i8() -> i32 { block0: - v1 = bconst.b8 true - v2 = bextend.b32 v1 + v1 = iconst.i8 -1 + v2 = sextend.i32 v1 return v2 } ; block0: -; movz x1, #255 +; movn x1, #0 ; sxtb w0, w1 ; ret -function %bextend_b1() -> b32 { +function %sextend_i8() -> i32 { block0: - v1 = bconst.b1 true - v2 = bextend.b32 v1 + v1 = iconst.i8 -1 + v2 = sextend.i32 v1 return v2 } ; block0: -; movz x1, #1 -; sbfx w0, w1, #0, #1 +; movn x1, #0 +; sxtb w0, w1 ; ret function %bnot_i32(i32) -> i32 { diff --git a/cranelift/filetests/filetests/isa/aarch64/compare_zero.clif b/cranelift/filetests/filetests/isa/aarch64/compare_zero.clif index 6827b774ca0c..556d7b5f2c3a 100644 --- a/cranelift/filetests/filetests/isa/aarch64/compare_zero.clif +++ b/cranelift/filetests/filetests/isa/aarch64/compare_zero.clif @@ -2,7 +2,7 @@ test compile precise-output set unwind_info=false target aarch64 -function %f0(i8x16) -> b8x16 { +function %f0(i8x16) -> i8x16 { block0(v0: i8x16): v1 = iconst.i8 0 v2 = splat.i8x16 v1 @@ -14,7 +14,7 @@ block0(v0: i8x16): ; cmeq v0.16b, v0.16b, #0 ; ret -function %f0_vconst(i8x16) -> b8x16 { +function %f0_vconst(i8x16) -> i8x16 { block0(v0: i8x16): v1 = vconst.i8x16 0x00 v2 = icmp eq v0, v1 @@ -25,7 +25,7 @@ block0(v0: i8x16): ; cmeq v0.16b, v0.16b, #0 ; ret -function %f1(i16x8) -> b16x8 { +function %f1(i16x8) -> i16x8 { block0(v0: i16x8): v1 = iconst.i16 0 v2 = splat.i16x8 v1 @@ -37,7 +37,7 @@ block0(v0: i16x8): ; cmeq v0.8h, v0.8h, #0 ; ret -function %f1_vconst(i16x8) -> b16x8 { +function %f1_vconst(i16x8) -> i16x8 { block0(v0: i16x8): v1 = vconst.i16x8 0x00 v2 = icmp eq v1, v0 @@ -48,7 +48,7 @@ block0(v0: i16x8): ; cmeq v0.8h, v0.8h, #0 ; ret -function %f2(i32x4) -> b32x4 { +function %f2(i32x4) -> i32x4 { block0(v0: i32x4): v1 = iconst.i32 0 v2 = splat.i32x4 v1 @@ -61,7 +61,7 @@ block0(v0: i32x4): ; mvn v0.16b, v3.16b ; ret -function %f2_vconst(i32x4) -> b32x4 { +function %f2_vconst(i32x4) -> i32x4 { block0(v0: i32x4): v1 = vconst.i32x4 0x00 v2 = icmp ne v0, v1 @@ -73,7 +73,7 @@ block0(v0: i32x4): ; mvn v0.16b, v3.16b ; ret -function %f3(i64x2) -> b64x2 { +function %f3(i64x2) -> i64x2 { block0(v0: i64x2): v1 = iconst.i64 0 v2 = splat.i64x2 v1 @@ -86,7 +86,7 @@ block0(v0: i64x2): ; mvn v0.16b, v3.16b ; ret -function %f3_vconst(i64x2) -> b64x2 { +function %f3_vconst(i64x2) -> i64x2 { block0(v0: i64x2): v1 = vconst.i64x2 0x00 v2 = icmp ne v1, v0 @@ -98,7 +98,7 @@ block0(v0: i64x2): ; mvn v0.16b, v3.16b ; ret -function %f4(i8x16) -> b8x16 { +function %f4(i8x16) -> i8x16 { block0(v0: i8x16): v1 = iconst.i8 0 v2 = splat.i8x16 v1 @@ -110,7 +110,7 @@ block0(v0: i8x16): ; cmle v0.16b, v0.16b, #0 ; ret -function %f4_vconst(i8x16) -> b8x16 { +function %f4_vconst(i8x16) -> i8x16 { block0(v0: i8x16): v1 = vconst.i8x16 0x00 v2 = icmp sle v0, v1 @@ -121,7 +121,7 @@ block0(v0: i8x16): ; cmle v0.16b, v0.16b, #0 ; ret -function %f5(i16x8) -> b16x8 { +function %f5(i16x8) -> i16x8 { block0(v0: i16x8): v1 = iconst.i16 0 v2 = splat.i16x8 v1 @@ -133,7 +133,7 @@ block0(v0: i16x8): ; cmge v0.8h, v0.8h, #0 ; ret -function %f5_vconst(i16x8) -> b16x8 { +function %f5_vconst(i16x8) -> i16x8 { block0(v0: i16x8): v1 = vconst.i16x8 0x00 v2 = icmp sle v1, v0 @@ -144,7 +144,7 @@ block0(v0: i16x8): ; cmge v0.8h, v0.8h, #0 ; ret -function %f6(i32x4) -> b32x4 { +function %f6(i32x4) -> i32x4 { block0(v0: i32x4): v1 = iconst.i32 0 v2 = splat.i32x4 v1 @@ -156,7 +156,7 @@ block0(v0: i32x4): ; cmge v0.4s, v0.4s, #0 ; ret -function %f6_vconst(i32x4) -> b32x4 { +function %f6_vconst(i32x4) -> i32x4 { block0(v0: i32x4): v1 = vconst.i32x4 0x00 v2 = icmp sge v0, v1 @@ -167,7 +167,7 @@ block0(v0: i32x4): ; cmge v0.4s, v0.4s, #0 ; ret -function %f7(i64x2) -> b64x2 { +function %f7(i64x2) -> i64x2 { block0(v0: i64x2): v1 = iconst.i64 0 v2 = splat.i64x2 v1 @@ -179,7 +179,7 @@ block0(v0: i64x2): ; cmle v0.2d, v0.2d, #0 ; ret -function %f7_vconst(i64x2) -> b64x2 { +function %f7_vconst(i64x2) -> i64x2 { block0(v0: i64x2): v1 = vconst.i64x2 0x00 v2 = icmp sge v1, v0 @@ -190,7 +190,7 @@ block0(v0: i64x2): ; cmle v0.2d, v0.2d, #0 ; ret -function %f8(i8x16) -> b8x16 { +function %f8(i8x16) -> i8x16 { block0(v0: i8x16): v1 = iconst.i8 0 v2 = splat.i8x16 v1 @@ -202,7 +202,7 @@ block0(v0: i8x16): ; cmlt v0.16b, v0.16b, #0 ; ret -function %f8_vconst(i8x16) -> b8x16 { +function %f8_vconst(i8x16) -> i8x16 { block0(v0: i8x16): v1 = vconst.i8x16 0x00 v2 = icmp slt v0, v1 @@ -213,7 +213,7 @@ block0(v0: i8x16): ; cmlt v0.16b, v0.16b, #0 ; ret -function %f9(i16x8) -> b16x8 { +function %f9(i16x8) -> i16x8 { block0(v0: i16x8): v1 = iconst.i16 0 v2 = splat.i16x8 v1 @@ -225,7 +225,7 @@ block0(v0: i16x8): ; cmgt v0.8h, v0.8h, #0 ; ret -function %f9_vconst(i16x8) -> b16x8 { +function %f9_vconst(i16x8) -> i16x8 { block0(v0: i16x8): v1 = vconst.i16x8 0x00 v2 = icmp slt v1, v0 @@ -236,7 +236,7 @@ block0(v0: i16x8): ; cmgt v0.8h, v0.8h, #0 ; ret -function %f10(i32x4) -> b32x4 { +function %f10(i32x4) -> i32x4 { block0(v0: i32x4): v1 = iconst.i32 0 v2 = splat.i32x4 v1 @@ -248,7 +248,7 @@ block0(v0: i32x4): ; cmgt v0.4s, v0.4s, #0 ; ret -function %f10_vconst(i32x4) -> b32x4 { +function %f10_vconst(i32x4) -> i32x4 { block0(v0: i32x4): v1 = vconst.i32x4 0x00 v2 = icmp sgt v0, v1 @@ -259,7 +259,7 @@ block0(v0: i32x4): ; cmgt v0.4s, v0.4s, #0 ; ret -function %f11(i64x2) -> b64x2 { +function %f11(i64x2) -> i64x2 { block0(v0: i64x2): v1 = iconst.i64 0 v2 = splat.i64x2 v1 @@ -271,7 +271,7 @@ block0(v0: i64x2): ; cmlt v0.2d, v0.2d, #0 ; ret -function %f11_vconst(i64x2) -> b64x2 { +function %f11_vconst(i64x2) -> i64x2 { block0(v0: i64x2): v1 = vconst.i64x2 0x00 v2 = icmp sgt v1, v0 @@ -282,7 +282,7 @@ block0(v0: i64x2): ; cmlt v0.2d, v0.2d, #0 ; ret -function %f12(f32x4) -> b32x4 { +function %f12(f32x4) -> i32x4 { block0(v0: f32x4): v1 = f32const 0.0 v2 = splat.f32x4 v1 @@ -294,7 +294,7 @@ block0(v0: f32x4): ; fcmeq v0.4s, v0.4s, #0.0 ; ret -function %f12_vconst(f32x4) -> b32x4 { +function %f12_vconst(f32x4) -> i32x4 { block0(v0: f32x4): v1 = vconst.f32x4 [0.0 0.0 0.0 0.0] v2 = fcmp eq v0, v1 @@ -305,7 +305,7 @@ block0(v0: f32x4): ; fcmeq v0.4s, v0.4s, #0.0 ; ret -function %f13(f64x2) -> b64x2 { +function %f13(f64x2) -> i64x2 { block0(v0: f64x2): v1 = f64const 0.0 v2 = splat.f64x2 v1 @@ -317,7 +317,7 @@ block0(v0: f64x2): ; fcmeq v0.2d, v0.2d, #0.0 ; ret -function %f13_vconst(f64x2) -> b64x2 { +function %f13_vconst(f64x2) -> i64x2 { block0(v0: f64x2): v1 = vconst.f64x2 [0.0 0.0] v2 = fcmp eq v1, v0 @@ -328,7 +328,7 @@ block0(v0: f64x2): ; fcmeq v0.2d, v0.2d, #0.0 ; ret -function %f14(f64x2) -> b64x2 { +function %f14(f64x2) -> i64x2 { block0(v0: f64x2): v1 = f64const 0.0 v2 = splat.f64x2 v1 @@ -341,7 +341,7 @@ block0(v0: f64x2): ; mvn v0.16b, v3.16b ; ret -function %f14_vconst(f64x2) -> b64x2 { +function %f14_vconst(f64x2) -> i64x2 { block0(v0: f64x2): v1 = vconst.f64x2 [0.0 0.0] v2 = fcmp ne v0, v1 @@ -353,7 +353,7 @@ block0(v0: f64x2): ; mvn v0.16b, v3.16b ; ret -function %f15(f32x4) -> b32x4 { +function %f15(f32x4) -> i32x4 { block0(v0: f32x4): v1 = f32const 0.0 v2 = splat.f32x4 v1 @@ -366,7 +366,7 @@ block0(v0: f32x4): ; mvn v0.16b, v3.16b ; ret -function %f15_vconst(f32x4) -> b32x4 { +function %f15_vconst(f32x4) -> i32x4 { block0(v0: f32x4): v1 = vconst.f32x4 [0.0 0.0 0.0 0.0] v2 = fcmp ne v1, v0 @@ -378,7 +378,7 @@ block0(v0: f32x4): ; mvn v0.16b, v3.16b ; ret -function %f16(f32x4) -> b32x4 { +function %f16(f32x4) -> i32x4 { block0(v0: f32x4): v1 = f32const 0.0 v2 = splat.f32x4 v1 @@ -390,7 +390,7 @@ block0(v0: f32x4): ; fcmle v0.4s, v0.4s, #0.0 ; ret -function %f16_vconst(f32x4) -> b32x4 { +function %f16_vconst(f32x4) -> i32x4 { block0(v0: f32x4): v1 = vconst.f32x4 [0.0 0.0 0.0 0.0] v2 = fcmp le v0, v1 @@ -401,7 +401,7 @@ block0(v0: f32x4): ; fcmle v0.4s, v0.4s, #0.0 ; ret -function %f17(f64x2) -> b64x2 { +function %f17(f64x2) -> i64x2 { block0(v0: f64x2): v1 = f64const 0.0 v2 = splat.f64x2 v1 @@ -413,7 +413,7 @@ block0(v0: f64x2): ; fcmge v0.2d, v0.2d, #0.0 ; ret -function %f17_vconst(f64x2) -> b64x2 { +function %f17_vconst(f64x2) -> i64x2 { block0(v0: f64x2): v1 = vconst.f64x2 [0.0 0.0] v2 = fcmp le v1, v0 @@ -424,7 +424,7 @@ block0(v0: f64x2): ; fcmge v0.2d, v0.2d, #0.0 ; ret -function %f18(f64x2) -> b64x2 { +function %f18(f64x2) -> i64x2 { block0(v0: f64x2): v1 = f64const 0.0 v2 = splat.f64x2 v1 @@ -436,7 +436,7 @@ block0(v0: f64x2): ; fcmge v0.2d, v0.2d, #0.0 ; ret -function %f18_vconst(f64x2) -> b64x2 { +function %f18_vconst(f64x2) -> i64x2 { block0(v0: f64x2): v1 = vconst.f64x2 [0.0 0.0] v2 = fcmp ge v0, v1 @@ -447,7 +447,7 @@ block0(v0: f64x2): ; fcmge v0.2d, v0.2d, #0.0 ; ret -function %f19(f32x4) -> b32x4 { +function %f19(f32x4) -> i32x4 { block0(v0: f32x4): v1 = f32const 0.0 v2 = splat.f32x4 v1 @@ -459,7 +459,7 @@ block0(v0: f32x4): ; fcmle v0.4s, v0.4s, #0.0 ; ret -function %f19_vconst(f32x4) -> b32x4 { +function %f19_vconst(f32x4) -> i32x4 { block0(v0: f32x4): v1 = vconst.f32x4 [0.0 0.0 0.0 0.0] v2 = fcmp ge v1, v0 @@ -470,7 +470,7 @@ block0(v0: f32x4): ; fcmle v0.4s, v0.4s, #0.0 ; ret -function %f20(f32x4) -> b32x4 { +function %f20(f32x4) -> i32x4 { block0(v0: f32x4): v1 = f32const 0.0 v2 = splat.f32x4 v1 @@ -482,7 +482,7 @@ block0(v0: f32x4): ; fcmlt v0.4s, v0.4s, #0.0 ; ret -function %f20_vconst(f32x4) -> b32x4 { +function %f20_vconst(f32x4) -> i32x4 { block0(v0: f32x4): v1 = vconst.f32x4 [0.0 0.0 0.0 0.0] v2 = fcmp lt v0, v1 @@ -493,7 +493,7 @@ block0(v0: f32x4): ; fcmlt v0.4s, v0.4s, #0.0 ; ret -function %f21(f64x2) -> b64x2 { +function %f21(f64x2) -> i64x2 { block0(v0: f64x2): v1 = f64const 0.0 v2 = splat.f64x2 v1 @@ -505,7 +505,7 @@ block0(v0: f64x2): ; fcmgt v0.2d, v0.2d, #0.0 ; ret -function %f21_vconst(f64x2) -> b64x2 { +function %f21_vconst(f64x2) -> i64x2 { block0(v0: f64x2): v1 = vconst.f64x2 [0.0 0.0] v2 = fcmp lt v1, v0 @@ -516,7 +516,7 @@ block0(v0: f64x2): ; fcmgt v0.2d, v0.2d, #0.0 ; ret -function %f22(f64x2) -> b64x2 { +function %f22(f64x2) -> i64x2 { block0(v0: f64x2): v1 = f64const 0.0 v2 = splat.f64x2 v1 @@ -528,7 +528,7 @@ block0(v0: f64x2): ; fcmgt v0.2d, v0.2d, #0.0 ; ret -function %f22_vconst(f64x2) -> b64x2 { +function %f22_vconst(f64x2) -> i64x2 { block0(v0: f64x2): v1 = vconst.f64x2 [0.0 0.0] v2 = fcmp gt v0, v1 @@ -539,7 +539,7 @@ block0(v0: f64x2): ; fcmgt v0.2d, v0.2d, #0.0 ; ret -function %f23(f32x4) -> b32x4 { +function %f23(f32x4) -> i32x4 { block0(v0: f32x4): v1 = f32const 0.0 v2 = splat.f32x4 v1 @@ -551,7 +551,7 @@ block0(v0: f32x4): ; fcmlt v0.4s, v0.4s, #0.0 ; ret -function %f23_vconst(f32x4) -> b32x4 { +function %f23_vconst(f32x4) -> i32x4 { block0(v0: f32x4): v1 = vconst.f32x4 [0.0 0.0 0.0 0.0] v2 = fcmp gt v1, v0 diff --git a/cranelift/filetests/filetests/isa/aarch64/condbr.clif b/cranelift/filetests/filetests/isa/aarch64/condbr.clif index dd30f7ea8b7a..ee8aefcd57a8 100644 --- a/cranelift/filetests/filetests/isa/aarch64/condbr.clif +++ b/cranelift/filetests/filetests/isa/aarch64/condbr.clif @@ -2,7 +2,7 @@ test compile precise-output set unwind_info=false target aarch64 -function %f(i64, i64) -> b1 { +function %f(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp eq v0, v1 return v2 @@ -13,7 +13,7 @@ block0(v0: i64, v1: i64): ; cset x0, eq ; ret -function %icmp_eq_i128(i128, i128) -> b1 { +function %icmp_eq_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp eq v0, v1 return v2 @@ -25,7 +25,7 @@ block0(v0: i128, v1: i128): ; cset x0, eq ; ret -function %icmp_ne_i128(i128, i128) -> b1 { +function %icmp_ne_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ne v0, v1 return v2 @@ -37,7 +37,7 @@ block0(v0: i128, v1: i128): ; cset x0, ne ; ret -function %icmp_slt_i128(i128, i128) -> b1 { +function %icmp_slt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp slt v0, v1 return v2 @@ -51,7 +51,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_ult_i128(i128, i128) -> b1 { +function %icmp_ult_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ult v0, v1 return v2 @@ -65,7 +65,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_sle_i128(i128, i128) -> b1 { +function %icmp_sle_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp sle v0, v1 return v2 @@ -79,7 +79,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_ule_i128(i128, i128) -> b1 { +function %icmp_ule_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ule v0, v1 return v2 @@ -93,7 +93,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_sgt_i128(i128, i128) -> b1 { +function %icmp_sgt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp sgt v0, v1 return v2 @@ -107,7 +107,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_ugt_i128(i128, i128) -> b1 { +function %icmp_ugt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ugt v0, v1 return v2 @@ -121,7 +121,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_sge_i128(i128, i128) -> b1 { +function %icmp_sge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp sge v0, v1 return v2 @@ -135,7 +135,7 @@ block0(v0: i128, v1: i128): ; csel x0, x7, x10, eq ; ret -function %icmp_uge_i128(i128, i128) -> b1 { +function %icmp_uge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp uge v0, v1 return v2 @@ -471,3 +471,4 @@ block1: ; b label3 ; block3: ; ret + diff --git a/cranelift/filetests/filetests/isa/aarch64/condops.clif b/cranelift/filetests/filetests/isa/aarch64/condops.clif index 8e422a0da818..1ffc8c58db15 100644 --- a/cranelift/filetests/filetests/isa/aarch64/condops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/condops.clif @@ -737,7 +737,7 @@ block0(v0: i128, v1: i128, v2: i128): ; csdb ; ret -function %g(i8) -> b1 { +function %g(i8) -> i8 { block0(v0: i8): v3 = iconst.i8 42 v4 = ifcmp v0, v3 @@ -763,14 +763,14 @@ block0(v0: i8, v1: i8, v2: i8): ; orr w0, w5, w7 ; ret -function %i(b1, i8, i8) -> i8 { -block0(v0: b1, v1: i8, v2: i8): +function %i(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3 = select.i8 v0, v1, v2 return v3 } ; block0: -; and w5, w0, #1 +; uxtb w5, w0 ; subs wzr, w5, wzr ; csel x0, x1, x2, ne ; ret @@ -788,14 +788,14 @@ block0(v0: i32, v1: i8, v2: i8): ; csel x0, x1, x2, eq ; ret -function %i128_select(b1, i128, i128) -> i128 { -block0(v0: b1, v1: i128, v2: i128): +function %i128_select(i8, i128, i128) -> i128 { +block0(v0: i8, v1: i128, v2: i128): v3 = select.i128 v0, v1, v2 return v3 } ; block0: -; and w8, w0, #1 +; uxtb w8, w0 ; subs wzr, w8, wzr ; csel x0, x2, x4, ne ; csel x1, x3, x5, ne diff --git a/cranelift/filetests/filetests/isa/aarch64/constants.clif b/cranelift/filetests/filetests/isa/aarch64/constants.clif index a7fa74698f9d..f40a4c908294 100644 --- a/cranelift/filetests/filetests/isa/aarch64/constants.clif +++ b/cranelift/filetests/filetests/isa/aarch64/constants.clif @@ -2,19 +2,19 @@ test compile precise-output set unwind_info=false target aarch64 -function %f() -> b8 { +function %f() -> i8 { block0: - v0 = bconst.b8 true + v0 = iconst.i8 -1 return v0 } ; block0: -; movz x0, #255 +; movn x0, #0 ; ret -function %f() -> b16 { +function %f() -> i16 { block0: - v0 = bconst.b16 false + v0 = iconst.i16 0 return v0 } diff --git a/cranelift/filetests/filetests/isa/aarch64/i128-bmask.clif b/cranelift/filetests/filetests/isa/aarch64/i128-bmask.clif new file mode 100644 index 000000000000..869d4b71e05e --- /dev/null +++ b/cranelift/filetests/filetests/isa/aarch64/i128-bmask.clif @@ -0,0 +1,112 @@ +test compile precise-output +target aarch64 + +function %bmask_i128_i128(i128) -> i128 { +block0(v0: i128): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; orr x5, x0, x1 +; subs xzr, x5, #0 +; csetm x1, ne +; mov x0, x1 +; ret + +function %bmask_i128_i64(i128) -> i64 { +block0(v0: i128): + v1 = bmask.i64 v0 + return v1 +} + +; block0: +; orr x4, x0, x1 +; subs xzr, x4, #0 +; csetm x0, ne +; ret + +function %bmask_i128_i32(i128) -> i32 { +block0(v0: i128): + v1 = bmask.i32 v0 + return v1 +} + +; block0: +; orr x4, x0, x1 +; subs xzr, x4, #0 +; csetm x0, ne +; ret + +function %bmask_i128_i16(i128) -> i16 { +block0(v0: i128): + v1 = bmask.i16 v0 + return v1 +} + +; block0: +; orr x4, x0, x1 +; subs xzr, x4, #0 +; csetm x0, ne +; ret + +function %bmask_i128_i8(i128) -> i8 { +block0(v0: i128): + v1 = bmask.i8 v0 + return v1 +} + +; block0: +; orr x4, x0, x1 +; subs xzr, x4, #0 +; csetm x0, ne +; ret + +function %bmask_i64_i128(i64) -> i128 { +block0(v0: i64): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; subs xzr, x0, #0 +; csetm x1, ne +; mov x0, x1 +; ret + +function %bmask_i32_i128(i32) -> i128 { +block0(v0: i32): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; subs xzr, x0, #0 +; csetm x1, ne +; mov x0, x1 +; ret + +function %bmask_i16_i128(i16) -> i128 { +block0(v0: i16): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; subs xzr, x0, #0 +; csetm x1, ne +; mov x0, x1 +; ret + +function %bmask_i8_i128(i8) -> i128 { +block0(v0: i8): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; subs xzr, x0, #0 +; csetm x1, ne +; mov x0, x1 +; ret + diff --git a/cranelift/filetests/filetests/isa/aarch64/iconst-icmp-small.clif b/cranelift/filetests/filetests/isa/aarch64/iconst-icmp-small.clif index bac108aadb19..c66f4c196ae0 100644 --- a/cranelift/filetests/filetests/isa/aarch64/iconst-icmp-small.clif +++ b/cranelift/filetests/filetests/isa/aarch64/iconst-icmp-small.clif @@ -10,16 +10,14 @@ function u0:0() -> i8 system_v { block0: v0 = iconst.i16 0xddcc v1 = icmp.i16 ne v0, v0 - v2 = bint.i8 v1 - return v2 + return v1 } ; block0: -; movz x2, #56780 -; uxth w4, w2 -; movz x6, #56780 -; subs wzr, w4, w6, UXTH -; cset x9, ne -; and w0, w9, #1 +; movz x1, #56780 +; uxth w3, w1 +; movz x5, #56780 +; subs wzr, w3, w5, UXTH +; cset x0, ne ; ret diff --git a/cranelift/filetests/filetests/isa/aarch64/reftypes.clif b/cranelift/filetests/filetests/isa/aarch64/reftypes.clif index 49520a0cf611..cc2dc49b48ae 100644 --- a/cranelift/filetests/filetests/isa/aarch64/reftypes.clif +++ b/cranelift/filetests/filetests/isa/aarch64/reftypes.clif @@ -10,7 +10,7 @@ block0(v0: r64): ; block0: ; ret -function %f1(r64) -> b1 { +function %f1(r64) -> i8 { block0(v0: r64): v1 = is_null v0 return v1 @@ -21,7 +21,7 @@ block0(v0: r64): ; cset x0, eq ; ret -function %f2(r64) -> b1 { +function %f2(r64) -> i8 { block0(v0: r64): v1 = is_invalid v0 return v1 @@ -43,7 +43,7 @@ block0: ; ret function %f4(r64, r64) -> r64, r64, r64 { - fn0 = %f(r64) -> b1 + fn0 = %f(r64) -> i8 ss0 = explicit_slot 8 block0(v0: r64, v1: r64): @@ -74,7 +74,7 @@ block3(v7: r64, v8: r64): ; mov x2, sp ; ldr x9, [sp, #8] ; str x9, [x2] -; and w3, w0, #1 +; uxtb w3, w0 ; cbz x3, label1 ; b label3 ; block1: ; b label2 diff --git a/cranelift/filetests/filetests/isa/aarch64/simd-bitwise-compile.clif b/cranelift/filetests/filetests/isa/aarch64/simd-bitwise-compile.clif index d9d0b23e1285..49f36e18a6e8 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd-bitwise-compile.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd-bitwise-compile.clif @@ -108,8 +108,8 @@ block0: ; bsl v0.16b, v0.16b, v4.16b, v5.16b ; ret -function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 { -block0(v0: b16x8, v1: i16x8, v2: i16x8): +function %vselect_i16x8(i16x8, i16x8, i16x8) -> i16x8 { +block0(v0: i16x8, v1: i16x8, v2: i16x8): v3 = vselect v0, v1, v2 return v3 } @@ -118,8 +118,8 @@ block0(v0: b16x8, v1: i16x8, v2: i16x8): ; bsl v0.16b, v0.16b, v1.16b, v2.16b ; ret -function %vselect_f32x4(b32x4, f32x4, f32x4) -> f32x4 { -block0(v0: b32x4, v1: f32x4, v2: f32x4): +function %vselect_f32x4(i32x4, f32x4, f32x4) -> f32x4 { +block0(v0: i32x4, v1: f32x4, v2: f32x4): v3 = vselect v0, v1, v2 return v3 } @@ -128,8 +128,8 @@ block0(v0: b32x4, v1: f32x4, v2: f32x4): ; bsl v0.16b, v0.16b, v1.16b, v2.16b ; ret -function %vselect_f64x2(b64x2, f64x2, f64x2) -> f64x2 { -block0(v0: b64x2, v1: f64x2, v2: f64x2): +function %vselect_f64x2(i64x2, f64x2, f64x2) -> f64x2 { +block0(v0: i64x2, v1: f64x2, v2: f64x2): v3 = vselect v0, v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/isa/aarch64/simd-comparison-legalize.clif b/cranelift/filetests/filetests/isa/aarch64/simd-comparison-legalize.clif index 5f724d03fc37..05b98bb6431d 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd-comparison-legalize.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd-comparison-legalize.clif @@ -2,7 +2,7 @@ test compile precise-output set enable_simd target aarch64 -function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ne_32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ne v0, v1 return v2 @@ -13,7 +13,7 @@ block0(v0: i32x4, v1: i32x4): ; mvn v0.16b, v4.16b ; ret -function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ugt_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ugt v0, v1 return v2 @@ -23,7 +23,7 @@ block0(v0: i32x4, v1: i32x4): ; cmhi v0.4s, v0.4s, v1.4s ; ret -function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_sge_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp sge v0, v1 return v2 @@ -33,7 +33,7 @@ block0(v0: i16x8, v1: i16x8): ; cmge v0.8h, v0.8h, v1.8h ; ret -function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_uge_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp uge v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/aarch64/simd-lane-access-compile.clif b/cranelift/filetests/filetests/isa/aarch64/simd-lane-access-compile.clif index a6968ab206f7..f1a8e1a0bfa0 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd-lane-access-compile.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd-lane-access-compile.clif @@ -59,10 +59,10 @@ block0(v0: i8): ; dup v0.16b, w0 ; ret -function %splat_b16() -> b16x8 { +function %splat_i16() -> i16x8 { block0: - v0 = bconst.b16 true - v1 = splat.b16x8 v0 + v0 = iconst.i16 -1 + v1 = splat.i16x8 v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/aarch64/simd-logical-compile.clif b/cranelift/filetests/filetests/isa/aarch64/simd-logical-compile.clif index 34a41ea5413b..654fcb2bef9b 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd-logical-compile.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd-logical-compile.clif @@ -2,8 +2,8 @@ test compile precise-output set enable_simd target aarch64 -function %bnot_b32x4(b32x4) -> b32x4 { -block0(v0: b32x4): +function %bnot_i32x4(i32x4) -> i32x4 { +block0(v0: i32x4): v1 = bnot v0 return v1 } @@ -12,8 +12,8 @@ block0(v0: b32x4): ; mvn v0.16b, v0.16b ; ret -function %vany_true_b32x4(b32x4) -> b1 { -block0(v0: b32x4): +function %vany_true_i32x4(i32x4) -> i8 { +block0(v0: i32x4): v1 = vany_true v0 return v1 } @@ -25,7 +25,7 @@ block0(v0: b32x4): ; cset x0, ne ; ret -function %vall_true_i64x2(i64x2) -> b1 { +function %vall_true_i64x2(i64x2) -> i8 { block0(v0: i64x2): v1 = vall_true v0 return v1 diff --git a/cranelift/filetests/filetests/isa/aarch64/simd-valltrue.clif b/cranelift/filetests/filetests/isa/aarch64/simd-valltrue.clif index c969b1e9be86..19a9f0455d49 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd-valltrue.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd-valltrue.clif @@ -2,8 +2,8 @@ test compile precise-output set unwind_info=false target aarch64 -function %fn0(b8x8) -> b1 { -block0(v0: b8x8): +function %fn0(i8x8) -> i8 { +block0(v0: i8x8): v1 = vall_true v0 return v1 } @@ -15,8 +15,8 @@ block0(v0: b8x8): ; cset x0, ne ; ret -function %fn1(b8x16) -> b1 { -block0(v0: b8x16): +function %fn1(i8x16) -> i8 { +block0(v0: i8x16): v1 = vall_true v0 return v1 } @@ -28,8 +28,8 @@ block0(v0: b8x16): ; cset x0, ne ; ret -function %fn2(b16x4) -> b1 { -block0(v0: b16x4): +function %fn2(i16x4) -> i8 { +block0(v0: i16x4): v1 = vall_true v0 return v1 } @@ -41,8 +41,8 @@ block0(v0: b16x4): ; cset x0, ne ; ret -function %fn3(b16x8) -> b1 { -block0(v0: b16x8): +function %fn3(i16x8) -> i8 { +block0(v0: i16x8): v1 = vall_true v0 return v1 } @@ -54,8 +54,8 @@ block0(v0: b16x8): ; cset x0, ne ; ret -function %fn4(b32x2) -> b1 { -block0(v0: b32x2): +function %fn4(i32x2) -> i8 { +block0(v0: i32x2): v1 = vall_true v0 return v1 } @@ -67,8 +67,8 @@ block0(v0: b32x2): ; cset x0, ne ; ret -function %fn5(b32x4) -> b1 { -block0(v0: b32x4): +function %fn5(i32x4) -> i8 { +block0(v0: i32x4): v1 = vall_true v0 return v1 } @@ -80,8 +80,8 @@ block0(v0: b32x4): ; cset x0, ne ; ret -function %fn6(b64x2) -> b1 { -block0(v0: b64x2): +function %fn6(i64x2) -> i8 { +block0(v0: i64x2): v1 = vall_true v0 return v1 } @@ -92,3 +92,4 @@ block0(v0: b64x2): ; fcmp d5, d5 ; cset x0, eq ; ret + diff --git a/cranelift/filetests/filetests/isa/aarch64/simd.clif b/cranelift/filetests/filetests/isa/aarch64/simd.clif index f4b357f73732..087a26269a35 100644 --- a/cranelift/filetests/filetests/isa/aarch64/simd.clif +++ b/cranelift/filetests/filetests/isa/aarch64/simd.clif @@ -28,18 +28,6 @@ block0: ; dup v0.8h, w2 ; ret -function %f3() -> b8x16 { -block0: - v0 = bconst.b32 true - v1 = breduce.b8 v0 - v2 = splat.b8x16 v1 - return v2 -} - -; block0: -; movi v0.16b, #255 -; ret - function %f4(i32, i8x16, i8x16) -> i8x16 { block0(v0: i32, v1: i8x16, v2: i8x16): v3 = select v0, v1, v2 diff --git a/cranelift/filetests/filetests/isa/aarch64/stack.clif b/cranelift/filetests/filetests/isa/aarch64/stack.clif index a70b543473d9..0969956a8f5c 100644 --- a/cranelift/filetests/filetests/isa/aarch64/stack.clif +++ b/cranelift/filetests/filetests/isa/aarch64/stack.clif @@ -123,10 +123,10 @@ block0(v0: i64): ; ldp fp, lr, [sp], #16 ; ret -function %b1_spill_slot(b1) -> b1, i64 { +function %i8_spill_slot(i8) -> i8, i64 { ss0 = explicit_slot 1000 -block0(v0: b1): +block0(v0: i8): v1 = iconst.i64 1 v2 = iconst.i64 2 v3 = iconst.i64 3 diff --git a/cranelift/filetests/filetests/isa/riscv64/bitops.clif b/cranelift/filetests/filetests/isa/riscv64/bitops.clif index 59ec0be9c302..650e23e88fe1 100644 --- a/cranelift/filetests/filetests/isa/riscv64/bitops.clif +++ b/cranelift/filetests/filetests/isa/riscv64/bitops.clif @@ -315,28 +315,6 @@ block0(v0: i8): ; mv a0,a3 ; ret -function %bextend_b8() -> b32 { -block0: - v1 = bconst.b8 true - v2 = bextend.b32 v1 - return v2 -} - -; block0: -; li a0,-1 -; ret - -function %bextend_b1() -> b32 { -block0: - v1 = bconst.b1 true - v2 = bextend.b32 v1 - return v2 -} - -; block0: -; li a0,-1 -; ret - function %bnot_i32(i32) -> i32 { block0(v0: i32): v1 = bnot v0 diff --git a/cranelift/filetests/filetests/isa/riscv64/condbr.clif b/cranelift/filetests/filetests/isa/riscv64/condbr.clif index 0142b3842371..2d547c39edbb 100644 --- a/cranelift/filetests/filetests/isa/riscv64/condbr.clif +++ b/cranelift/filetests/filetests/isa/riscv64/condbr.clif @@ -2,7 +2,7 @@ test compile precise-output set unwind_info=false target riscv64 -function %f(i64, i64) -> b1 { +function %f(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp eq v0, v1 return v2 @@ -12,7 +12,7 @@ block0(v0: i64, v1: i64): ; eq a0,a0,a1##ty=i64 ; ret -function %icmp_eq_i128(i128, i128) -> b1 { +function %icmp_eq_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp eq v0, v1 return v2 @@ -22,7 +22,7 @@ block0(v0: i128, v1: i128): ; eq a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_ne_i128(i128, i128) -> b1 { +function %icmp_ne_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ne v0, v1 return v2 @@ -32,7 +32,7 @@ block0(v0: i128, v1: i128): ; ne a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_slt_i128(i128, i128) -> b1 { +function %icmp_slt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp slt v0, v1 return v2 @@ -42,7 +42,7 @@ block0(v0: i128, v1: i128): ; slt a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_ult_i128(i128, i128) -> b1 { +function %icmp_ult_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ult v0, v1 return v2 @@ -52,7 +52,7 @@ block0(v0: i128, v1: i128): ; ult a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_sle_i128(i128, i128) -> b1 { +function %icmp_sle_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp sle v0, v1 return v2 @@ -62,7 +62,7 @@ block0(v0: i128, v1: i128): ; sle a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_ule_i128(i128, i128) -> b1 { +function %icmp_ule_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ule v0, v1 return v2 @@ -72,7 +72,7 @@ block0(v0: i128, v1: i128): ; ule a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_sgt_i128(i128, i128) -> b1 { +function %icmp_sgt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp sgt v0, v1 return v2 @@ -82,7 +82,7 @@ block0(v0: i128, v1: i128): ; sgt a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_ugt_i128(i128, i128) -> b1 { +function %icmp_ugt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp ugt v0, v1 return v2 @@ -92,7 +92,7 @@ block0(v0: i128, v1: i128): ; ugt a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_sge_i128(i128, i128) -> b1 { +function %icmp_sge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp sge v0, v1 return v2 @@ -102,7 +102,7 @@ block0(v0: i128, v1: i128): ; sge a0,[a0,a1],[a2,a3]##ty=i128 ; ret -function %icmp_uge_i128(i128, i128) -> b1 { +function %icmp_uge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp uge v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/riscv64/condops.clif b/cranelift/filetests/filetests/isa/riscv64/condops.clif index b1594909bfe5..eecac50e3859 100644 --- a/cranelift/filetests/filetests/isa/riscv64/condops.clif +++ b/cranelift/filetests/filetests/isa/riscv64/condops.clif @@ -18,7 +18,7 @@ block0(v0: i8, v1: i64, v2: i64): ; selectif a0,a1,a2##test=t4 ; ret -function %g(i8) -> b1 { +function %g(i8) -> i8 { block0(v0: i8): v3 = iconst.i8 42 v4 = ifcmp v0, v3 @@ -48,8 +48,8 @@ block0(v0: i8, v1: i8, v2: i8): ; or a0,a2,a6 ; ret -function %i(b1, i8, i8) -> i8 { -block0(v0: b1, v1: i8, v2: i8): +function %i(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3 = select.i8 v0, v1, v2 return v3 } @@ -74,8 +74,8 @@ block0(v0: i32, v1: i8, v2: i8): ; select_i8 a0,a1,a2##condition=t4 ; ret -function %i128_select(b1, i128, i128) -> i128 { -block0(v0: b1, v1: i128, v2: i128): +function %i128_select(i8, i128, i128) -> i128 { +block0(v0: i8, v1: i128, v2: i128): v3 = select.i128 v0, v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/isa/riscv64/constants.clif b/cranelift/filetests/filetests/isa/riscv64/constants.clif index 39f0a095b6cd..84be21c1f276 100644 --- a/cranelift/filetests/filetests/isa/riscv64/constants.clif +++ b/cranelift/filetests/filetests/isa/riscv64/constants.clif @@ -2,9 +2,9 @@ test compile precise-output set unwind_info=false target riscv64 -function %f() -> b8 { +function %f() -> i8 { block0: - v0 = bconst.b8 true + v0 = iconst.i8 -1 return v0 } @@ -12,9 +12,9 @@ block0: ; li a0,-1 ; ret -function %f() -> b16 { +function %f() -> i16 { block0: - v0 = bconst.b16 false + v0 = iconst.i16 0 return v0 } diff --git a/cranelift/filetests/filetests/isa/riscv64/i128-bmask.clif b/cranelift/filetests/filetests/isa/riscv64/i128-bmask.clif new file mode 100644 index 000000000000..558b8b369762 --- /dev/null +++ b/cranelift/filetests/filetests/isa/riscv64/i128-bmask.clif @@ -0,0 +1,113 @@ +test compile precise-output +set unwind_info=false +target riscv64 + +function %bmask_i128_i128(i128) -> i128 { +block0(v0: i128): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; or a2,a0,a1 +; li a4,-1 +; select_reg a1,zero,a4##condition=(zero eq a2) +; mv a0,a1 +; ret + +function %bmask_i128_i64(i128) -> i64 { +block0(v0: i128): + v1 = bmask.i64 v0 + return v1 +} + +; block0: +; or a1,a0,a1 +; li a3,-1 +; select_reg a0,zero,a3##condition=(zero eq a1) +; ret + +function %bmask_i128_i32(i128) -> i32 { +block0(v0: i128): + v1 = bmask.i32 v0 + return v1 +} + +; block0: +; or a1,a0,a1 +; li a3,-1 +; select_reg a0,zero,a3##condition=(zero eq a1) +; ret + +function %bmask_i128_i16(i128) -> i16 { +block0(v0: i128): + v1 = bmask.i16 v0 + return v1 +} + +; block0: +; or a1,a0,a1 +; li a3,-1 +; select_reg a0,zero,a3##condition=(zero eq a1) +; ret + +function %bmask_i128_i8(i128) -> i8 { +block0(v0: i128): + v1 = bmask.i8 v0 + return v1 +} + +; block0: +; or a1,a0,a1 +; li a3,-1 +; select_reg a0,zero,a3##condition=(zero eq a1) +; ret + +function %bmask_i64_i128(i64) -> i128 { +block0(v0: i64): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; li a1,-1 +; select_reg a1,zero,a1##condition=(zero eq a0) +; mv a0,a1 +; ret + +function %bmask_i32_i128(i32) -> i128 { +block0(v0: i32): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; li a1,-1 +; select_reg a1,zero,a1##condition=(zero eq a0) +; mv a0,a1 +; ret + +function %bmask_i16_i128(i16) -> i128 { +block0(v0: i16): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; li a1,-1 +; select_reg a1,zero,a1##condition=(zero eq a0) +; mv a0,a1 +; ret + +function %bmask_i8_i128(i8) -> i128 { +block0(v0: i8): + v1 = bmask.i128 v0 + return v1 +} + +; block0: +; li a1,-1 +; select_reg a1,zero,a1##condition=(zero eq a0) +; mv a0,a1 +; ret + diff --git a/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif b/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif index e5a546f7e1e1..9cad68ae6f80 100644 --- a/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif +++ b/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif @@ -7,18 +7,16 @@ function u0:0() -> i8 system_v { block0: v0 = iconst.i16 0xddcc v1 = icmp.i16 ne v0, v0 - v2 = bint.i8 v1 - return v2 + return v1 } ; block0: -; lui t2,14 -; addi t2,t2,3532 -; lui a2,14 -; addi a2,a2,3532 -; uext.h a5,t2 -; uext.h a7,a2 -; ne t4,a5,a7##ty=i16 -; andi a0,t4,1 +; lui t1,14 +; addi t1,t1,3532 +; lui a1,14 +; addi a1,a1,3532 +; uext.h a4,t1 +; uext.h a6,a1 +; ne a0,a4,a6##ty=i16 ; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/reftypes.clif b/cranelift/filetests/filetests/isa/riscv64/reftypes.clif index 4ce8c491a0c4..c497aec11513 100644 --- a/cranelift/filetests/filetests/isa/riscv64/reftypes.clif +++ b/cranelift/filetests/filetests/isa/riscv64/reftypes.clif @@ -10,7 +10,7 @@ block0(v0: r64): ; block0: ; ret -function %f1(r64) -> b1 { +function %f1(r64) -> i8 { block0(v0: r64): v1 = is_null v0 return v1 @@ -20,7 +20,7 @@ block0(v0: r64): ; is_null a0,a0 ; ret -function %f2(r64) -> b1 { +function %f2(r64) -> i8 { block0(v0: r64): v1 = is_invalid v0 return v1 @@ -41,7 +41,7 @@ block0: ; ret function %f4(r64, r64) -> r64, r64, r64 { - fn0 = %f(r64) -> b1 + fn0 = %f(r64) -> i8 ss0 = explicit_slot 8 block0(v0: r64, v1: r64): diff --git a/cranelift/filetests/filetests/isa/riscv64/stack.clif b/cranelift/filetests/filetests/isa/riscv64/stack.clif index 5e3aaca467e6..1045b4380b43 100644 --- a/cranelift/filetests/filetests/isa/riscv64/stack.clif +++ b/cranelift/filetests/filetests/isa/riscv64/stack.clif @@ -144,10 +144,10 @@ block0(v0: i64): ; add sp,+16 ; ret -function %b1_spill_slot(b1) -> b1, i64 { +function %i8_spill_slot(i8) -> i8, i64 { ss0 = explicit_slot 1000 -block0(v0: b1): +block0(v0: i8): v1 = iconst.i64 1 v2 = iconst.i64 2 v3 = iconst.i64 3 diff --git a/cranelift/filetests/filetests/isa/s390x/condbr.clif b/cranelift/filetests/filetests/isa/s390x/condbr.clif index 9aa2bf41978d..f971f7b356a4 100644 --- a/cranelift/filetests/filetests/isa/s390x/condbr.clif +++ b/cranelift/filetests/filetests/isa/s390x/condbr.clif @@ -1,7 +1,7 @@ test compile precise-output target s390x -function %f(i64, i64) -> b1 { +function %f(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp eq v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/s390x/condops.clif b/cranelift/filetests/filetests/isa/s390x/condops.clif index 30e95dd61074..d097e44e5d25 100644 --- a/cranelift/filetests/filetests/isa/s390x/condops.clif +++ b/cranelift/filetests/filetests/isa/s390x/condops.clif @@ -16,14 +16,14 @@ block0(v0: i8, v1: i64, v2: i64): ; locgre %r2, %r3 ; br %r14 -function %g(b1, i8, i8) -> i8 { -block0(v0: b1, v1: i8, v2: i8): +function %g(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3 = select.i8 v0, v1, v2 return v3 } ; block0: -; llcr %r5, %r2 +; lbr %r5, %r2 ; chi %r5, 0 ; lgr %r2, %r4 ; locrlh %r2, %r3 diff --git a/cranelift/filetests/filetests/isa/s390x/constants.clif b/cranelift/filetests/filetests/isa/s390x/constants.clif index 9a9025873b20..2f6c0966fa45 100644 --- a/cranelift/filetests/filetests/isa/s390x/constants.clif +++ b/cranelift/filetests/filetests/isa/s390x/constants.clif @@ -1,19 +1,19 @@ test compile precise-output target s390x -function %f() -> b8 { +function %f() -> i8 { block0: - v0 = bconst.b8 true + v0 = iconst.i8 -1 return v0 } ; block0: -; lhi %r2, 255 +; lhi %r2, -1 ; br %r14 -function %f() -> b16 { +function %f() -> i16 { block0: - v0 = bconst.b16 false + v0 = iconst.i16 0 return v0 } diff --git a/cranelift/filetests/filetests/isa/s390x/conversions.clif b/cranelift/filetests/filetests/isa/s390x/conversions.clif index b9b36fdbf6b0..0498db26760b 100644 --- a/cranelift/filetests/filetests/isa/s390x/conversions.clif +++ b/cranelift/filetests/filetests/isa/s390x/conversions.clif @@ -324,972 +324,395 @@ block0(v0: i16, v1: i16): ; lgr %r2, %r3 ; br %r14 -function %bextend_b64_b128(b64) -> b128 { -block0(v0: b64): - v1 = bextend.b128 v0 - return v1 -} - -; block0: -; vlvgp %v4, %r3, %r3 -; vst %v4, 0(%r2) -; br %r14 - -function %bextend_b32_b128(b32) -> b128 { -block0(v0: b32): - v1 = bextend.b128 v0 - return v1 -} - -; block0: -; lgfr %r3, %r3 -; vlvgp %v6, %r3, %r3 -; vst %v6, 0(%r2) -; br %r14 - -function %bextend_b32_b64(b32) -> b64 { -block0(v0: b32): - v1 = bextend.b64 v0 - return v1 -} - -; block0: -; lgfr %r2, %r2 -; br %r14 - -function %bextend_b16_b128(b16) -> b128 { -block0(v0: b16): - v1 = bextend.b128 v0 - return v1 -} - -; block0: -; lghr %r3, %r3 -; vlvgp %v6, %r3, %r3 -; vst %v6, 0(%r2) -; br %r14 - -function %bextend_b16_b64(b16) -> b64 { -block0(v0: b16): - v1 = bextend.b64 v0 - return v1 -} - -; block0: -; lghr %r2, %r2 -; br %r14 - -function %bextend_b16_b32(b16) -> b32 { -block0(v0: b16): - v1 = bextend.b32 v0 - return v1 -} - -; block0: -; lhr %r2, %r2 -; br %r14 - -function %bextend_b8_b128(b8) -> b128 { -block0(v0: b8): - v1 = bextend.b128 v0 - return v1 -} - -; block0: -; lgbr %r3, %r3 -; vlvgp %v6, %r3, %r3 -; vst %v6, 0(%r2) -; br %r14 - -function %bextend_b8_b64(b8) -> b64 { -block0(v0: b8): - v1 = bextend.b64 v0 - return v1 -} - -; block0: -; lgbr %r2, %r2 -; br %r14 - -function %bextend_b8_b32(b8) -> b32 { -block0(v0: b8): - v1 = bextend.b32 v0 - return v1 -} - -; block0: -; lbr %r2, %r2 -; br %r14 - -function %bextend_b8_b16(b8) -> b16 { -block0(v0: b8): - v1 = bextend.b16 v0 - return v1 -} - -; block0: -; lbr %r2, %r2 -; br %r14 - -function %bextend_b1_b128(b1) -> b128 { -block0(v0: b1): - v1 = bextend.b128 v0 - return v1 -} - -; block0: -; sllg %r3, %r3, 63 -; srag %r4, %r3, 63 -; vlvgp %v16, %r4, %r4 -; vst %v16, 0(%r2) -; br %r14 - -function %bextend_b1_b64(b1) -> b64 { -block0(v0: b1): - v1 = bextend.b64 v0 - return v1 -} - -; block0: -; sllg %r5, %r2, 63 -; srag %r2, %r5, 63 -; br %r14 - -function %bextend_b1_b32(b1) -> b32 { -block0(v0: b1): - v1 = bextend.b32 v0 - return v1 -} - -; block0: -; sllk %r5, %r2, 31 -; srak %r2, %r5, 31 -; br %r14 - -function %bextend_b1_b16(b1) -> b16 { -block0(v0: b1): - v1 = bextend.b16 v0 - return v1 -} - -; block0: -; sllk %r5, %r2, 31 -; srak %r2, %r5, 31 -; br %r14 - -function %bextend_b1_b8(b1) -> b8 { -block0(v0: b1): - v1 = bextend.b8 v0 - return v1 -} - -; block0: -; sllk %r5, %r2, 31 -; srak %r2, %r5, 31 -; br %r14 - -function %breduce_b128_b64(b128) -> b64 { -block0(v0: b128): - v1 = breduce.b64 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 -; br %r14 - -function %breduce_b128_b32(b128) -> b32 { -block0(v0: b128): - v1 = breduce.b32 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 -; br %r14 - -function %breduce_b128_b16(b128) -> b16 { -block0(v0: b128): - v1 = breduce.b16 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 -; br %r14 - -function %breduce_b128_b8(b128) -> b8 { -block0(v0: b128): - v1 = breduce.b8 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 -; br %r14 - -function %breduce_b128_b1(b128) -> b1 { -block0(v0: b128): - v1 = breduce.b1 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 -; br %r14 - -function %breduce_b64_b32(b64, b64) -> b32 { -block0(v0: b64, v1: b64): - v2 = breduce.b32 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b64_b16(b64, b64) -> b16 { -block0(v0: b64, v1: b64): - v2 = breduce.b16 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b64_b8(b64, b64) -> b8 { -block0(v0: b64, v1: b64): - v2 = breduce.b8 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b64_b1(b64, b64) -> b1 { -block0(v0: b64, v1: b64): - v2 = breduce.b1 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b32_b16(b32, b32) -> b16 { -block0(v0: b32, v1: b32): - v2 = breduce.b16 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b32_b8(b32, b32) -> b8 { -block0(v0: b32, v1: b32): - v2 = breduce.b8 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b32_b1(b32, b32) -> b1 { -block0(v0: b32, v1: b32): - v2 = breduce.b1 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b16_b8(b16, b16) -> b8 { -block0(v0: b16, v1: b16): - v2 = breduce.b8 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b16_b1(b16, b16) -> b1 { -block0(v0: b16, v1: b16): - v2 = breduce.b1 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %breduce_b8_b1(b8, b8) -> b1 { -block0(v0: b8, v1: b8): - v2 = breduce.b1 v1 - return v2 -} - -; block0: -; lgr %r2, %r3 -; br %r14 - -function %bmask_b128_i128(b128) -> i128 { -block0(v0: b128): +function %bmask_i128_i128(i128) -> i128 { +block0(v0: i128): v1 = bmask.i128 v0 return v1 } ; block0: ; vl %v0, 0(%r3) -; vst %v0, 0(%r2) +; lgdr %r3, %f0 +; vlgvg %r5, %v0, 1 +; ogr %r3, %r5 +; cghi %r3, 0 +; lghi %r3, 0 +; locghilh %r3, -1 +; vlvgp %v23, %r3, %r3 +; vst %v23, 0(%r2) ; br %r14 -function %bmask_b128_i64(b128) -> i64 { -block0(v0: b128): +function %bmask_i128_i64(i128) -> i64 { +block0(v0: i128): v1 = bmask.i64 v0 return v1 } ; block0: ; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 +; lgdr %r5, %f0 +; vlgvg %r3, %v0, 1 +; ogr %r5, %r3 +; cghi %r5, 0 +; lghi %r2, 0 +; locghilh %r2, -1 ; br %r14 -function %bmask_b128_i32(b128) -> i32 { -block0(v0: b128): +function %bmask_i128_i32(i128) -> i32 { +block0(v0: i128): v1 = bmask.i32 v0 return v1 } ; block0: ; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 +; lgdr %r5, %f0 +; vlgvg %r3, %v0, 1 +; ogr %r5, %r3 +; cghi %r5, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b128_i16(b128) -> i16 { -block0(v0: b128): +function %bmask_i128_i16(i128) -> i16 { +block0(v0: i128): v1 = bmask.i16 v0 return v1 } ; block0: ; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 +; lgdr %r5, %f0 +; vlgvg %r3, %v0, 1 +; ogr %r5, %r3 +; cghi %r5, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b128_i8(b128) -> i8 { -block0(v0: b128): +function %bmask_i128_i8(i128) -> i8 { +block0(v0: i128): v1 = bmask.i8 v0 return v1 } ; block0: ; vl %v0, 0(%r2) -; vlgvg %r2, %v0, 1 +; lgdr %r5, %f0 +; vlgvg %r3, %v0, 1 +; ogr %r5, %r3 +; cghi %r5, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b64_i128(b64, b64) -> i128 { -block0(v0: b64, v1: b64): +function %bmask_i64_i128(i64, i64) -> i128 { +block0(v0: i64, v1: i64): v2 = bmask.i128 v1 return v2 } ; block0: -; vlvgp %v5, %r4, %r4 -; vst %v5, 0(%r2) +; cghi %r4, 0 +; lghi %r4, 0 +; locghilh %r4, -1 +; vlvgp %v17, %r4, %r4 +; vst %v17, 0(%r2) ; br %r14 -function %bmask_b64_i64(b64, b64) -> i64 { -block0(v0: b64, v1: b64): +function %bmask_i64_i64(i64, i64) -> i64 { +block0(v0: i64, v1: i64): v2 = bmask.i64 v1 return v2 } ; block0: -; lgr %r2, %r3 +; cghi %r3, 0 +; lghi %r2, 0 +; locghilh %r2, -1 ; br %r14 -function %bmask_b64_i32(b64, b64) -> i32 { -block0(v0: b64, v1: b64): +function %bmask_i64_i32(i64, i64) -> i32 { +block0(v0: i64, v1: i64): v2 = bmask.i32 v1 return v2 } ; block0: -; lgr %r2, %r3 +; cghi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b64_i16(b64, b64) -> i16 { -block0(v0: b64, v1: b64): +function %bmask_i64_i16(i64, i64) -> i16 { +block0(v0: i64, v1: i64): v2 = bmask.i16 v1 return v2 } ; block0: -; lgr %r2, %r3 +; cghi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b64_i8(b64, b64) -> i8 { -block0(v0: b64, v1: b64): +function %bmask_i64_i8(i64, i64) -> i8 { +block0(v0: i64, v1: i64): v2 = bmask.i8 v1 return v2 } ; block0: -; lgr %r2, %r3 +; cghi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b32_i128(b32, b32) -> i128 { -block0(v0: b32, v1: b32): +function %bmask_i32_i128(i32, i32) -> i128 { +block0(v0: i32, v1: i32): v2 = bmask.i128 v1 return v2 } ; block0: -; lgfr %r3, %r4 -; vlvgp %v7, %r3, %r3 -; vst %v7, 0(%r2) +; chi %r4, 0 +; lghi %r4, 0 +; locghilh %r4, -1 +; vlvgp %v17, %r4, %r4 +; vst %v17, 0(%r2) ; br %r14 -function %bmask_b32_i64(b32, b32) -> i64 { -block0(v0: b32, v1: b32): +function %bmask_i32_i64(i32, i32) -> i64 { +block0(v0: i32, v1: i32): v2 = bmask.i64 v1 return v2 } ; block0: -; lgfr %r2, %r3 +; chi %r3, 0 +; lghi %r2, 0 +; locghilh %r2, -1 ; br %r14 -function %bmask_b32_i32(b32, b32) -> i32 { -block0(v0: b32, v1: b32): +function %bmask_i32_i32(i32, i32) -> i32 { +block0(v0: i32, v1: i32): v2 = bmask.i32 v1 return v2 } ; block0: -; lgr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b32_i16(b32, b32) -> i16 { -block0(v0: b32, v1: b32): +function %bmask_i32_i16(i32, i32) -> i16 { +block0(v0: i32, v1: i32): v2 = bmask.i16 v1 return v2 } ; block0: -; lgr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b32_i8(b32, b32) -> i8 { -block0(v0: b32, v1: b32): +function %bmask_i32_i8(i32, i32) -> i8 { +block0(v0: i32, v1: i32): v2 = bmask.i8 v1 return v2 } ; block0: -; lgr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b16_i128(b16, b16) -> i128 { -block0(v0: b16, v1: b16): +function %bmask_i16_i128(i16, i16) -> i128 { +block0(v0: i16, v1: i16): v2 = bmask.i128 v1 return v2 } ; block0: -; lghr %r3, %r4 -; vlvgp %v7, %r3, %r3 -; vst %v7, 0(%r2) +; chi %r4, 0 +; lghi %r4, 0 +; locghilh %r4, -1 +; vlvgp %v17, %r4, %r4 +; vst %v17, 0(%r2) ; br %r14 -function %bmask_b16_i64(b16, b16) -> i64 { -block0(v0: b16, v1: b16): +function %bmask_i16_i64(i16, i16) -> i64 { +block0(v0: i16, v1: i16): v2 = bmask.i64 v1 return v2 } ; block0: -; lghr %r2, %r3 +; chi %r3, 0 +; lghi %r2, 0 +; locghilh %r2, -1 ; br %r14 -function %bmask_b16_i32(b16, b16) -> i32 { -block0(v0: b16, v1: b16): +function %bmask_i16_i32(i16, i16) -> i32 { +block0(v0: i16, v1: i16): v2 = bmask.i32 v1 return v2 } ; block0: -; lhr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b16_i16(b16, b16) -> i16 { -block0(v0: b16, v1: b16): +function %bmask_i16_i16(i16, i16) -> i16 { +block0(v0: i16, v1: i16): v2 = bmask.i16 v1 return v2 } ; block0: -; lgr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b16_i8(b16, b16) -> i8 { -block0(v0: b16, v1: b16): +function %bmask_i16_i8(i16, i16) -> i8 { +block0(v0: i16, v1: i16): v2 = bmask.i8 v1 return v2 } ; block0: -; lgr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b8_i128(b8, b8) -> i128 { -block0(v0: b8, v1: b8): +function %bmask_i8_i128(i8, i8) -> i128 { +block0(v0: i8, v1: i8): v2 = bmask.i128 v1 return v2 } ; block0: -; lgbr %r3, %r4 -; vlvgp %v7, %r3, %r3 -; vst %v7, 0(%r2) +; chi %r4, 0 +; lghi %r4, 0 +; locghilh %r4, -1 +; vlvgp %v17, %r4, %r4 +; vst %v17, 0(%r2) ; br %r14 -function %bmask_b8_i64(b8, b8) -> i64 { -block0(v0: b8, v1: b8): +function %bmask_i8_i64(i8, i8) -> i64 { +block0(v0: i8, v1: i8): v2 = bmask.i64 v1 return v2 } ; block0: -; lgbr %r2, %r3 +; chi %r3, 0 +; lghi %r2, 0 +; locghilh %r2, -1 ; br %r14 -function %bmask_b8_i32(b8, b8) -> i32 { -block0(v0: b8, v1: b8): +function %bmask_i8_i32(i8, i8) -> i32 { +block0(v0: i8, v1: i8): v2 = bmask.i32 v1 return v2 } ; block0: -; lbr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b8_i16(b8, b8) -> i16 { -block0(v0: b8, v1: b8): +function %bmask_i8_i16(i8, i8) -> i16 { +block0(v0: i8, v1: i8): v2 = bmask.i16 v1 return v2 } ; block0: -; lbr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b8_i8(b8, b8) -> i8 { -block0(v0: b8, v1: b8): +function %bmask_i8_i8(i8, i8) -> i8 { +block0(v0: i8, v1: i8): v2 = bmask.i8 v1 return v2 } ; block0: -; lgr %r2, %r3 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b1_i128(b1, b1) -> i128 { -block0(v0: b1, v1: b1): +function %bmask_i8_i128(i8, i8) -> i128 { +block0(v0: i8, v1: i8): v2 = bmask.i128 v1 return v2 } ; block0: -; sllg %r3, %r4, 63 -; srag %r5, %r3, 63 -; vlvgp %v17, %r5, %r5 +; chi %r4, 0 +; lghi %r4, 0 +; locghilh %r4, -1 +; vlvgp %v17, %r4, %r4 ; vst %v17, 0(%r2) ; br %r14 -function %bmask_b1_i64(b1, b1) -> i64 { -block0(v0: b1, v1: b1): +function %bmask_i8_i64(i8, i8) -> i64 { +block0(v0: i8, v1: i8): v2 = bmask.i64 v1 return v2 } ; block0: -; sllg %r2, %r3, 63 -; srag %r2, %r2, 63 +; chi %r3, 0 +; lghi %r2, 0 +; locghilh %r2, -1 ; br %r14 -function %bmask_b1_i32(b1, b1) -> i32 { -block0(v0: b1, v1: b1): +function %bmask_i8_i32(i8, i8) -> i32 { +block0(v0: i8, v1: i8): v2 = bmask.i32 v1 return v2 } ; block0: -; sllk %r2, %r3, 31 -; srak %r2, %r2, 31 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b1_i16(b1, b1) -> i16 { -block0(v0: b1, v1: b1): +function %bmask_i8_i16(i8, i8) -> i16 { +block0(v0: i8, v1: i8): v2 = bmask.i16 v1 return v2 } ; block0: -; sllk %r2, %r3, 31 -; srak %r2, %r2, 31 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 -function %bmask_b1_i8(b1, b1) -> i8 { -block0(v0: b1, v1: b1): +function %bmask_i8_i8(i8, i8) -> i8 { +block0(v0: i8, v1: i8): v2 = bmask.i8 v1 return v2 } ; block0: -; sllk %r2, %r3, 31 -; srak %r2, %r2, 31 -; br %r14 - -function %bint_b128_i128(b128) -> i128 { -block0(v0: b128): - v1 = bint.i128 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r3) -; bras %r1, 20 ; data.u128 0x00000000000000000000000000000001 ; vl %v5, 0(%r1) -; vn %v7, %v0, %v5 -; vst %v7, 0(%r2) -; br %r14 - -function %bint_b128_i64(b128) -> i64 { -block0(v0: b128): - v1 = bint.i64 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvb %r2, %v0, 15 -; nill %r2, 1 -; br %r14 - -function %bint_b128_i32(b128) -> i32 { -block0(v0: b128): - v1 = bint.i32 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvb %r2, %v0, 15 -; nill %r2, 1 -; br %r14 - -function %bint_b128_i16(b128) -> i16 { -block0(v0: b128): - v1 = bint.i16 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvb %r2, %v0, 15 -; nill %r2, 1 -; br %r14 - -function %bint_b128_i8(b128) -> i8 { -block0(v0: b128): - v1 = bint.i8 v0 - return v1 -} - -; block0: -; vl %v0, 0(%r2) -; vlgvb %r2, %v0, 15 -; nill %r2, 1 -; br %r14 - -function %bint_b64_i128(b64) -> i128 { -block0(v0: b64): - v1 = bint.i128 v0 - return v1 -} - -; block0: -; nill %r3, 1 -; vgbm %v6, 0 -; vlvgb %v6, %r3, 15 -; vst %v6, 0(%r2) -; br %r14 - -function %bint_b64_i64(b64) -> i64 { -block0(v0: b64): - v1 = bint.i64 v0 - return v1 -} - -; block0: -; lghi %r5, 1 -; ngr %r2, %r5 -; br %r14 - -function %bint_b64_i32(b64) -> i32 { -block0(v0: b64): - v1 = bint.i32 v0 - return v1 -} - -; block0: -; nilf %r2, 1 -; br %r14 - -function %bint_b64_i16(b64) -> i16 { -block0(v0: b64): - v1 = bint.i16 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b64_i8(b64) -> i8 { -block0(v0: b64): - v1 = bint.i8 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b32_i128(b32) -> i128 { -block0(v0: b32): - v1 = bint.i128 v0 - return v1 -} - -; block0: -; nill %r3, 1 -; vgbm %v6, 0 -; vlvgb %v6, %r3, 15 -; vst %v6, 0(%r2) -; br %r14 - -function %bint_b32_i64(b32) -> i64 { -block0(v0: b32): - v1 = bint.i64 v0 - return v1 -} - -; block0: -; lghi %r5, 1 -; ngr %r2, %r5 -; br %r14 - -function %bint_b32_i32(b32) -> i32 { -block0(v0: b32): - v1 = bint.i32 v0 - return v1 -} - -; block0: -; nilf %r2, 1 -; br %r14 - -function %bint_b32_i16(b32) -> i16 { -block0(v0: b32): - v1 = bint.i16 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b32_i8(b32) -> i8 { -block0(v0: b32): - v1 = bint.i8 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b16_i128(b16) -> i128 { -block0(v0: b16): - v1 = bint.i128 v0 - return v1 -} - -; block0: -; nill %r3, 1 -; vgbm %v6, 0 -; vlvgb %v6, %r3, 15 -; vst %v6, 0(%r2) -; br %r14 - -function %bint_b16_i64(b16) -> i64 { -block0(v0: b16): - v1 = bint.i64 v0 - return v1 -} - -; block0: -; lghi %r5, 1 -; ngr %r2, %r5 -; br %r14 - -function %bint_b16_i32(b16) -> i32 { -block0(v0: b16): - v1 = bint.i32 v0 - return v1 -} - -; block0: -; nilf %r2, 1 -; br %r14 - -function %bint_b16_i16(b16) -> i16 { -block0(v0: b16): - v1 = bint.i16 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b16_i8(b16) -> i8 { -block0(v0: b16): - v1 = bint.i8 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b8_i128(b8) -> i128 { -block0(v0: b8): - v1 = bint.i128 v0 - return v1 -} - -; block0: -; nill %r3, 1 -; vgbm %v6, 0 -; vlvgb %v6, %r3, 15 -; vst %v6, 0(%r2) -; br %r14 - -function %bint_b8_i64(b8) -> i64 { -block0(v0: b8): - v1 = bint.i64 v0 - return v1 -} - -; block0: -; lghi %r5, 1 -; ngr %r2, %r5 -; br %r14 - -function %bint_b8_i32(b8) -> i32 { -block0(v0: b8): - v1 = bint.i32 v0 - return v1 -} - -; block0: -; nilf %r2, 1 -; br %r14 - -function %bint_b8_i16(b8) -> i16 { -block0(v0: b8): - v1 = bint.i16 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b8_i8(b8) -> i8 { -block0(v0: b8): - v1 = bint.i8 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b1_i128(b1) -> i128 { -block0(v0: b1): - v1 = bint.i128 v0 - return v1 -} - -; block0: -; nill %r3, 1 -; vgbm %v6, 0 -; vlvgb %v6, %r3, 15 -; vst %v6, 0(%r2) -; br %r14 - -function %bint_b1_i64(b1) -> i64 { -block0(v0: b1): - v1 = bint.i64 v0 - return v1 -} - -; block0: -; lghi %r5, 1 -; ngr %r2, %r5 -; br %r14 - -function %bint_b1_i32(b1) -> i32 { -block0(v0: b1): - v1 = bint.i32 v0 - return v1 -} - -; block0: -; nilf %r2, 1 -; br %r14 - -function %bint_b1_i16(b1) -> i16 { -block0(v0: b1): - v1 = bint.i16 v0 - return v1 -} - -; block0: -; nill %r2, 1 -; br %r14 - -function %bint_b1_i8(b1) -> i8 { -block0(v0: b1): - v1 = bint.i8 v0 - return v1 -} - -; block0: -; nill %r2, 1 +; chi %r3, 0 +; lhi %r2, 0 +; lochilh %r2, -1 ; br %r14 diff --git a/cranelift/filetests/filetests/isa/s390x/icmp-i128.clif b/cranelift/filetests/filetests/isa/s390x/icmp-i128.clif index efb90b119ef4..91e75492e852 100644 --- a/cranelift/filetests/filetests/isa/s390x/icmp-i128.clif +++ b/cranelift/filetests/filetests/isa/s390x/icmp-i128.clif @@ -1,7 +1,7 @@ test compile precise-output target s390x -function %icmp_eq_i128(i128, i128) -> b1 { +function %icmp_eq_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 eq v0, v1 return v2 @@ -15,7 +15,7 @@ block0(v0: i128, v1: i128): ; lochie %r2, 1 ; br %r14 -function %icmp_ne_i128(i128, i128) -> b1 { +function %icmp_ne_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ne v0, v1 return v2 @@ -29,7 +29,7 @@ block0(v0: i128, v1: i128): ; lochine %r2, 1 ; br %r14 -function %icmp_slt_i128(i128, i128) -> b1 { +function %icmp_slt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 slt v0, v1 return v2 @@ -43,7 +43,7 @@ block0(v0: i128, v1: i128): ; lochil %r2, 1 ; br %r14 -function %icmp_sgt_i128(i128, i128) -> b1 { +function %icmp_sgt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 sgt v0, v1 return v2 @@ -57,7 +57,7 @@ block0(v0: i128, v1: i128): ; lochil %r2, 1 ; br %r14 -function %icmp_sle_i128(i128, i128) -> b1 { +function %icmp_sle_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 sle v0, v1 return v2 @@ -71,7 +71,7 @@ block0(v0: i128, v1: i128): ; lochinl %r2, 1 ; br %r14 -function %icmp_sge_i128(i128, i128) -> b1 { +function %icmp_sge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 sge v0, v1 return v2 @@ -85,7 +85,7 @@ block0(v0: i128, v1: i128): ; lochinl %r2, 1 ; br %r14 -function %icmp_ult_i128(i128, i128) -> b1 { +function %icmp_ult_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ult v0, v1 return v2 @@ -99,7 +99,7 @@ block0(v0: i128, v1: i128): ; lochil %r2, 1 ; br %r14 -function %icmp_ugt_i128(i128, i128) -> b1 { +function %icmp_ugt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ugt v0, v1 return v2 @@ -113,7 +113,7 @@ block0(v0: i128, v1: i128): ; lochil %r2, 1 ; br %r14 -function %icmp_ule_i128(i128, i128) -> b1 { +function %icmp_ule_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ule v0, v1 return v2 @@ -127,7 +127,7 @@ block0(v0: i128, v1: i128): ; lochinl %r2, 1 ; br %r14 -function %icmp_uge_i128(i128, i128) -> b1 { +function %icmp_uge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 uge v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/s390x/icmp.clif b/cranelift/filetests/filetests/isa/s390x/icmp.clif index 99a7ea1d1e0a..a1b8146f26dd 100644 --- a/cranelift/filetests/filetests/isa/s390x/icmp.clif +++ b/cranelift/filetests/filetests/isa/s390x/icmp.clif @@ -1,7 +1,7 @@ test compile precise-output target s390x -function %icmp_slt_i64(i64, i64) -> b1 { +function %icmp_slt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp.i64 slt v0, v1 return v2 @@ -13,7 +13,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_ext32(i64, i32) -> b1 { +function %icmp_slt_i64_ext32(i64, i32) -> i8 { block0(v0: i64, v1: i32): v2 = sextend.i64 v1 v3 = icmp.i64 slt v0, v2 @@ -26,7 +26,7 @@ block0(v0: i64, v1: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_imm16(i64) -> b1 { +function %icmp_slt_i64_imm16(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 1 v2 = icmp.i64 slt v0, v1 @@ -39,7 +39,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_imm32(i64) -> b1 { +function %icmp_slt_i64_imm32(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 32768 v2 = icmp.i64 slt v0, v1 @@ -52,7 +52,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_mem(i64, i64) -> b1 { +function %icmp_slt_i64_mem(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = load.i64 v1 v3 = icmp.i64 slt v0, v2 @@ -65,7 +65,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_sym(i64) -> b1 { +function %icmp_slt_i64_sym(i64) -> i8 { gv0 = symbol colocated %sym block0(v0: i64): v1 = symbol_value.i64 gv0 @@ -80,7 +80,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_mem_ext16(i64, i64) -> b1 { +function %icmp_slt_i64_mem_ext16(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = sload16.i64 v1 v3 = icmp.i64 slt v0, v2 @@ -93,7 +93,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_sym_ext16(i64) -> b1 { +function %icmp_slt_i64_sym_ext16(i64) -> i8 { gv0 = symbol colocated %sym block0(v0: i64): v1 = symbol_value.i64 gv0 @@ -108,7 +108,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_mem_ext32(i64, i64) -> b1 { +function %icmp_slt_i64_mem_ext32(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = sload32.i64 v1 v3 = icmp.i64 slt v0, v2 @@ -121,7 +121,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i64_sym_ext32(i64) -> b1 { +function %icmp_slt_i64_sym_ext32(i64) -> i8 { gv0 = symbol colocated %sym block0(v0: i64): v1 = symbol_value.i64 gv0 @@ -136,7 +136,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32(i32, i32) -> b1 { +function %icmp_slt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp.i32 slt v0, v1 return v2 @@ -148,7 +148,7 @@ block0(v0: i32, v1: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_imm16(i32) -> b1 { +function %icmp_slt_i32_imm16(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 1 v2 = icmp.i32 slt v0, v1 @@ -161,7 +161,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_imm(i32) -> b1 { +function %icmp_slt_i32_imm(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 32768 v2 = icmp.i32 slt v0, v1 @@ -174,7 +174,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_mem(i32, i64) -> b1 { +function %icmp_slt_i32_mem(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = load.i32 v1 v3 = icmp.i32 slt v0, v2 @@ -187,7 +187,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_memoff(i32, i64) -> b1 { +function %icmp_slt_i32_memoff(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = load.i32 v1+4096 v3 = icmp.i32 slt v0, v2 @@ -200,7 +200,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_sym(i32) -> b1 { +function %icmp_slt_i32_sym(i32) -> i8 { gv0 = symbol colocated %sym block0(v0: i32): v1 = symbol_value.i64 gv0 @@ -215,7 +215,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_mem_ext16(i32, i64) -> b1 { +function %icmp_slt_i32_mem_ext16(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = sload16.i32 v1 v3 = icmp.i32 slt v0, v2 @@ -228,7 +228,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_memoff_ext16(i32, i64) -> b1 { +function %icmp_slt_i32_memoff_ext16(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = sload16.i32 v1+4096 v3 = icmp.i32 slt v0, v2 @@ -241,7 +241,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i32_sym_ext16(i32) -> b1 { +function %icmp_slt_i32_sym_ext16(i32) -> i8 { gv0 = symbol colocated %sym block0(v0: i32): v1 = symbol_value.i64 gv0 @@ -256,7 +256,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i16(i16, i16) -> b1 { +function %icmp_slt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp.i16 slt v0, v1 return v2 @@ -270,7 +270,7 @@ block0(v0: i16, v1: i16): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i16_imm(i16) -> b1 { +function %icmp_slt_i16_imm(i16) -> i8 { block0(v0: i16): v1 = iconst.i16 1 v2 = icmp.i16 slt v0, v1 @@ -284,7 +284,7 @@ block0(v0: i16): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i16_mem(i16, i64) -> b1 { +function %icmp_slt_i16_mem(i16, i64) -> i8 { block0(v0: i16, v1: i64): v2 = load.i16 v1 v3 = icmp.i16 slt v0, v2 @@ -298,7 +298,7 @@ block0(v0: i16, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i16_sym(i16) -> b1 { +function %icmp_slt_i16_sym(i16) -> i8 { gv0 = symbol colocated %sym block0(v0: i16): v1 = symbol_value.i64 gv0 @@ -314,7 +314,7 @@ block0(v0: i16): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i8(i8, i8) -> b1 { +function %icmp_slt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp.i8 slt v0, v1 return v2 @@ -328,7 +328,7 @@ block0(v0: i8, v1: i8): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i8_imm(i8) -> b1 { +function %icmp_slt_i8_imm(i8) -> i8 { block0(v0: i8): v1 = iconst.i8 1 v2 = icmp.i8 slt v0, v1 @@ -342,7 +342,7 @@ block0(v0: i8): ; lochil %r2, 1 ; br %r14 -function %icmp_slt_i8_mem(i8, i64) -> b1 { +function %icmp_slt_i8_mem(i8, i64) -> i8 { block0(v0: i8, v1: i64): v2 = load.i8 v1 v3 = icmp.i8 slt v0, v2 @@ -357,7 +357,7 @@ block0(v0: i8, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64(i64, i64) -> b1 { +function %icmp_ult_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp.i64 ult v0, v1 return v2 @@ -369,7 +369,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_ext32(i64, i32) -> b1 { +function %icmp_ult_i64_ext32(i64, i32) -> i8 { block0(v0: i64, v1: i32): v2 = uextend.i64 v1 v3 = icmp.i64 ult v0, v2 @@ -382,7 +382,7 @@ block0(v0: i64, v1: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_imm(i64) -> b1 { +function %icmp_ult_i64_imm(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 1 v2 = icmp.i64 ult v0, v1 @@ -395,7 +395,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_mem(i64, i64) -> b1 { +function %icmp_ult_i64_mem(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = load.i64 v1 v3 = icmp.i64 ult v0, v2 @@ -408,7 +408,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_sym(i64) -> b1 { +function %icmp_ult_i64_sym(i64) -> i8 { gv0 = symbol colocated %sym block0(v0: i64): v1 = symbol_value.i64 gv0 @@ -423,7 +423,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_mem_ext32(i64, i64) -> b1 { +function %icmp_ult_i64_mem_ext32(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = uload32.i64 v1 v3 = icmp.i64 ult v0, v2 @@ -436,7 +436,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_sym_ext32(i64) -> b1 { +function %icmp_ult_i64_sym_ext32(i64) -> i8 { gv0 = symbol colocated %sym block0(v0: i64): v1 = symbol_value.i64 gv0 @@ -451,7 +451,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_mem_ext16(i64, i64) -> b1 { +function %icmp_ult_i64_mem_ext16(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = uload16.i64 v1 v3 = icmp.i64 ult v0, v2 @@ -465,7 +465,7 @@ block0(v0: i64, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i64_sym_ext16(i64) -> b1 { +function %icmp_ult_i64_sym_ext16(i64) -> i8 { gv0 = symbol colocated %sym block0(v0: i64): v1 = symbol_value.i64 gv0 @@ -480,7 +480,7 @@ block0(v0: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32(i32, i32) -> b1 { +function %icmp_ult_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp.i32 ult v0, v1 return v2 @@ -492,7 +492,7 @@ block0(v0: i32, v1: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32_imm(i32) -> b1 { +function %icmp_ult_i32_imm(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 1 v2 = icmp.i32 ult v0, v1 @@ -505,7 +505,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32_mem(i32, i64) -> b1 { +function %icmp_ult_i32_mem(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = load.i32 v1 v3 = icmp.i32 ult v0, v2 @@ -518,7 +518,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32_memoff(i32, i64) -> b1 { +function %icmp_ult_i32_memoff(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = load.i32 v1+4096 v3 = icmp.i32 ult v0, v2 @@ -531,7 +531,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32_sym(i32) -> b1 { +function %icmp_ult_i32_sym(i32) -> i8 { gv0 = symbol colocated %sym block0(v0: i32): v1 = symbol_value.i64 gv0 @@ -546,7 +546,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32_mem_ext16(i32, i64) -> b1 { +function %icmp_ult_i32_mem_ext16(i32, i64) -> i8 { block0(v0: i32, v1: i64): v2 = uload16.i32 v1 v3 = icmp.i32 ult v0, v2 @@ -560,7 +560,7 @@ block0(v0: i32, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i32_sym_ext16(i32) -> b1 { +function %icmp_ult_i32_sym_ext16(i32) -> i8 { gv0 = symbol colocated %sym block0(v0: i32): v1 = symbol_value.i64 gv0 @@ -575,7 +575,7 @@ block0(v0: i32): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i16(i16, i16) -> b1 { +function %icmp_ult_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp.i16 ult v0, v1 return v2 @@ -589,7 +589,7 @@ block0(v0: i16, v1: i16): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i16_imm(i16) -> b1 { +function %icmp_ult_i16_imm(i16) -> i8 { block0(v0: i16): v1 = iconst.i16 1 v2 = icmp.i16 ult v0, v1 @@ -603,7 +603,7 @@ block0(v0: i16): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i16_mem(i16, i64) -> b1 { +function %icmp_ult_i16_mem(i16, i64) -> i8 { block0(v0: i16, v1: i64): v2 = load.i16 v1 v3 = icmp.i16 ult v0, v2 @@ -618,7 +618,7 @@ block0(v0: i16, v1: i64): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i16_mem(i16) -> b1 { +function %icmp_ult_i16_mem(i16) -> i8 { gv0 = symbol colocated %sym block0(v0: i16): v1 = symbol_value.i64 gv0 @@ -634,7 +634,7 @@ block0(v0: i16): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i8(i8, i8) -> b1 { +function %icmp_ult_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp.i8 ult v0, v1 return v2 @@ -648,7 +648,7 @@ block0(v0: i8, v1: i8): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i8_imm(i8) -> b1 { +function %icmp_ult_i8_imm(i8) -> i8 { block0(v0: i8): v1 = iconst.i8 1 v2 = icmp.i8 ult v0, v1 @@ -662,7 +662,7 @@ block0(v0: i8): ; lochil %r2, 1 ; br %r14 -function %icmp_ult_i8_mem(i8, i64) -> b1 { +function %icmp_ult_i8_mem(i8, i64) -> i8 { block0(v0: i8, v1: i64): v2 = load.i8 v1 v3 = icmp.i8 ult v0, v2 diff --git a/cranelift/filetests/filetests/isa/s390x/reftypes.clif b/cranelift/filetests/filetests/isa/s390x/reftypes.clif index 036fd218d56d..c3178c935be4 100644 --- a/cranelift/filetests/filetests/isa/s390x/reftypes.clif +++ b/cranelift/filetests/filetests/isa/s390x/reftypes.clif @@ -10,7 +10,7 @@ block0(v0: r64, v1: r64): ; lgr %r2, %r3 ; br %r14 -function %f1(r64) -> b1 { +function %f1(r64) -> i8 { block0(v0: r64): v1 = is_null v0 return v1 @@ -22,7 +22,7 @@ block0(v0: r64): ; lochie %r2, 1 ; br %r14 -function %f2(r64) -> b1 { +function %f2(r64) -> i8 { block0(v0: r64): v1 = is_invalid v0 return v1 @@ -45,7 +45,7 @@ block0: ; br %r14 function %f4(r64, r64) -> r64, r64, r64 { - fn0 = %f(r64) -> b1 + fn0 = %f(r64) -> i8 ss0 = explicit_slot 8 block0(v0: r64, v1: r64): @@ -76,7 +76,7 @@ block3(v7: r64, v8: r64): ; la %r5, 160(%r15) ; lg %r3, 168(%r15) ; stg %r3, 0(%r5) -; llcr %r2, %r2 +; lbr %r2, %r2 ; chi %r2, 0 ; jgnlh label1 ; jg label3 ; block1: diff --git a/cranelift/filetests/filetests/isa/s390x/vec-bitwise.clif b/cranelift/filetests/filetests/isa/s390x/vec-bitwise.clif index 8722a78703b3..8b92db5881e3 100644 --- a/cranelift/filetests/filetests/isa/s390x/vec-bitwise.clif +++ b/cranelift/filetests/filetests/isa/s390x/vec-bitwise.clif @@ -322,8 +322,8 @@ block0(v0: i8x16, v1: i8x16, v2: i8x16): ; vsel %v24, %v25, %v26, %v24 ; br %r14 -function %vselect_i64x2(b64x2, i64x2, i64x2) -> i64x2 { -block0(v0: b64x2, v1: i64x2, v2: i64x2): +function %vselect_i64x2(i64x2, i64x2, i64x2) -> i64x2 { +block0(v0: i64x2, v1: i64x2, v2: i64x2): v3 = vselect.i64x2 v0, v1, v2 return v3 } @@ -332,8 +332,8 @@ block0(v0: b64x2, v1: i64x2, v2: i64x2): ; vsel %v24, %v25, %v26, %v24 ; br %r14 -function %vselect_i32x4(b32x4, i32x4, i32x4) -> i32x4 { -block0(v0: b32x4, v1: i32x4, v2: i32x4): +function %vselect_i32x4(i32x4, i32x4, i32x4) -> i32x4 { +block0(v0: i32x4, v1: i32x4, v2: i32x4): v3 = vselect.i32x4 v0, v1, v2 return v3 } @@ -342,8 +342,8 @@ block0(v0: b32x4, v1: i32x4, v2: i32x4): ; vsel %v24, %v25, %v26, %v24 ; br %r14 -function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 { -block0(v0: b16x8, v1: i16x8, v2: i16x8): +function %vselect_i16x8(i16x8, i16x8, i16x8) -> i16x8 { +block0(v0: i16x8, v1: i16x8, v2: i16x8): v3 = vselect.i16x8 v0, v1, v2 return v3 } @@ -352,8 +352,8 @@ block0(v0: b16x8, v1: i16x8, v2: i16x8): ; vsel %v24, %v25, %v26, %v24 ; br %r14 -function %vselect_i8x16(b8x16, i8x16, i8x16) -> i8x16 { -block0(v0: b8x16, v1: i8x16, v2: i8x16): +function %vselect_i8x16(i8x16, i8x16, i8x16) -> i8x16 { +block0(v0: i8x16, v1: i8x16, v2: i8x16): v3 = vselect.i8x16 v0, v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/isa/s390x/vec-fcmp.clif b/cranelift/filetests/filetests/isa/s390x/vec-fcmp.clif index 32c4f472d195..d85d53b0bb7b 100644 --- a/cranelift/filetests/filetests/isa/s390x/vec-fcmp.clif +++ b/cranelift/filetests/filetests/isa/s390x/vec-fcmp.clif @@ -1,7 +1,7 @@ test compile precise-output target s390x -function %fcmp_eq_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_eq_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 eq v0, v1 return v2 @@ -11,7 +11,7 @@ block0(v0: f64x2, v1: f64x2): ; vfcedb %v24, %v24, %v25 ; br %r14 -function %fcmp_ne_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ne_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ne v0, v1 return v2 @@ -22,7 +22,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_gt_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_gt_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 gt v0, v1 return v2 @@ -32,7 +32,7 @@ block0(v0: f64x2, v1: f64x2): ; vfchdb %v24, %v24, %v25 ; br %r14 -function %fcmp_lt_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_lt_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 lt v0, v1 return v2 @@ -42,7 +42,7 @@ block0(v0: f64x2, v1: f64x2): ; vfchdb %v24, %v25, %v24 ; br %r14 -function %fcmp_ge_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ge_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ge v0, v1 return v2 @@ -52,7 +52,7 @@ block0(v0: f64x2, v1: f64x2): ; vfchedb %v24, %v24, %v25 ; br %r14 -function %fcmp_le_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_le_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 le v0, v1 return v2 @@ -62,7 +62,7 @@ block0(v0: f64x2, v1: f64x2): ; vfchedb %v24, %v25, %v24 ; br %r14 -function %fcmp_ueq_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ueq_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ueq v0, v1 return v2 @@ -74,7 +74,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v6 ; br %r14 -function %fcmp_one_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_one_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 one v0, v1 return v2 @@ -86,7 +86,7 @@ block0(v0: f64x2, v1: f64x2): ; vo %v24, %v4, %v6 ; br %r14 -function %fcmp_ugt_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ugt_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ugt v0, v1 return v2 @@ -97,7 +97,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_ult_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ult_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ult v0, v1 return v2 @@ -108,7 +108,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_uge_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_uge_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 uge v0, v1 return v2 @@ -119,7 +119,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_ule_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ule_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ule v0, v1 return v2 @@ -130,7 +130,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_ord_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_ord_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 ord v0, v1 return v2 @@ -142,7 +142,7 @@ block0(v0: f64x2, v1: f64x2): ; vo %v24, %v4, %v6 ; br %r14 -function %fcmp_uno_f64x2(f64x2, f64x2) -> b64x2 { +function %fcmp_uno_f64x2(f64x2, f64x2) -> i64x2 { block0(v0: f64x2, v1: f64x2): v2 = fcmp.f64x2 uno v0, v1 return v2 @@ -154,7 +154,7 @@ block0(v0: f64x2, v1: f64x2): ; vno %v24, %v4, %v6 ; br %r14 -function %fcmp_eq_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_eq_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 eq v0, v1 return v2 @@ -164,7 +164,7 @@ block0(v0: f32x4, v1: f32x4): ; vfcesb %v24, %v24, %v25 ; br %r14 -function %fcmp_ne_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ne_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ne v0, v1 return v2 @@ -175,7 +175,7 @@ block0(v0: f32x4, v1: f32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_gt_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_gt_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 gt v0, v1 return v2 @@ -185,7 +185,7 @@ block0(v0: f32x4, v1: f32x4): ; vfchsb %v24, %v24, %v25 ; br %r14 -function %fcmp_lt_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_lt_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 lt v0, v1 return v2 @@ -195,7 +195,7 @@ block0(v0: f32x4, v1: f32x4): ; vfchsb %v24, %v25, %v24 ; br %r14 -function %fcmp_ge_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ge_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ge v0, v1 return v2 @@ -205,7 +205,7 @@ block0(v0: f32x4, v1: f32x4): ; vfchesb %v24, %v24, %v25 ; br %r14 -function %fcmp_le_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_le_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 le v0, v1 return v2 @@ -215,7 +215,7 @@ block0(v0: f32x4, v1: f32x4): ; vfchesb %v24, %v25, %v24 ; br %r14 -function %fcmp_ueq_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ueq_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ueq v0, v1 return v2 @@ -227,7 +227,7 @@ block0(v0: f32x4, v1: f32x4): ; vno %v24, %v4, %v6 ; br %r14 -function %fcmp_one_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_one_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 one v0, v1 return v2 @@ -239,7 +239,7 @@ block0(v0: f32x4, v1: f32x4): ; vo %v24, %v4, %v6 ; br %r14 -function %fcmp_ugt_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ugt_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ugt v0, v1 return v2 @@ -250,7 +250,7 @@ block0(v0: f32x4, v1: f32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_ult_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ult_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ult v0, v1 return v2 @@ -261,7 +261,7 @@ block0(v0: f32x4, v1: f32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_uge_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_uge_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 uge v0, v1 return v2 @@ -272,7 +272,7 @@ block0(v0: f32x4, v1: f32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_ule_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ule_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ule v0, v1 return v2 @@ -283,7 +283,7 @@ block0(v0: f32x4, v1: f32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %fcmp_ord_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_ord_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 ord v0, v1 return v2 @@ -295,7 +295,7 @@ block0(v0: f32x4, v1: f32x4): ; vo %v24, %v4, %v6 ; br %r14 -function %fcmp_uno_f32x4(f32x4, f32x4) -> b32x4 { +function %fcmp_uno_f32x4(f32x4, f32x4) -> i32x4 { block0(v0: f32x4, v1: f32x4): v2 = fcmp.f32x4 uno v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/s390x/vec-icmp.clif b/cranelift/filetests/filetests/isa/s390x/vec-icmp.clif index 6fead120581f..530629372de6 100644 --- a/cranelift/filetests/filetests/isa/s390x/vec-icmp.clif +++ b/cranelift/filetests/filetests/isa/s390x/vec-icmp.clif @@ -1,7 +1,7 @@ test compile precise-output target s390x -function %icmp_eq_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_eq_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 eq v0, v1 return v2 @@ -11,7 +11,7 @@ block0(v0: i64x2, v1: i64x2): ; vceqg %v24, %v24, %v25 ; br %r14 -function %icmp_ne_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_ne_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 ne v0, v1 return v2 @@ -22,7 +22,7 @@ block0(v0: i64x2, v1: i64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sgt_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_sgt_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 sgt v0, v1 return v2 @@ -32,7 +32,7 @@ block0(v0: i64x2, v1: i64x2): ; vchg %v24, %v24, %v25 ; br %r14 -function %icmp_slt_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_slt_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 slt v0, v1 return v2 @@ -42,7 +42,7 @@ block0(v0: i64x2, v1: i64x2): ; vchg %v24, %v25, %v24 ; br %r14 -function %icmp_sge_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_sge_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 sge v0, v1 return v2 @@ -53,7 +53,7 @@ block0(v0: i64x2, v1: i64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sle_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_sle_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 sle v0, v1 return v2 @@ -64,7 +64,7 @@ block0(v0: i64x2, v1: i64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ugt_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_ugt_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 ugt v0, v1 return v2 @@ -74,7 +74,7 @@ block0(v0: i64x2, v1: i64x2): ; vchlg %v24, %v24, %v25 ; br %r14 -function %icmp_ult_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_ult_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 ult v0, v1 return v2 @@ -84,7 +84,7 @@ block0(v0: i64x2, v1: i64x2): ; vchlg %v24, %v25, %v24 ; br %r14 -function %icmp_uge_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_uge_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 uge v0, v1 return v2 @@ -95,7 +95,7 @@ block0(v0: i64x2, v1: i64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ule_i64x2(i64x2, i64x2) -> b64x2 { +function %icmp_ule_i64x2(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp.i64x2 ule v0, v1 return v2 @@ -106,7 +106,7 @@ block0(v0: i64x2, v1: i64x2): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_eq_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_eq_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 eq v0, v1 return v2 @@ -116,7 +116,7 @@ block0(v0: i32x4, v1: i32x4): ; vceqf %v24, %v24, %v25 ; br %r14 -function %icmp_ne_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ne_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 ne v0, v1 return v2 @@ -127,7 +127,7 @@ block0(v0: i32x4, v1: i32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sgt_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_sgt_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 sgt v0, v1 return v2 @@ -137,7 +137,7 @@ block0(v0: i32x4, v1: i32x4): ; vchf %v24, %v24, %v25 ; br %r14 -function %icmp_slt_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_slt_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 slt v0, v1 return v2 @@ -147,7 +147,7 @@ block0(v0: i32x4, v1: i32x4): ; vchf %v24, %v25, %v24 ; br %r14 -function %icmp_sge_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_sge_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 sge v0, v1 return v2 @@ -158,7 +158,7 @@ block0(v0: i32x4, v1: i32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sle_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_sle_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 sle v0, v1 return v2 @@ -169,7 +169,7 @@ block0(v0: i32x4, v1: i32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ugt_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 ugt v0, v1 return v2 @@ -179,7 +179,7 @@ block0(v0: i32x4, v1: i32x4): ; vchlf %v24, %v24, %v25 ; br %r14 -function %icmp_ult_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ult_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 ult v0, v1 return v2 @@ -189,7 +189,7 @@ block0(v0: i32x4, v1: i32x4): ; vchlf %v24, %v25, %v24 ; br %r14 -function %icmp_uge_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_uge_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 uge v0, v1 return v2 @@ -200,7 +200,7 @@ block0(v0: i32x4, v1: i32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ule_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ule_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp.i32x4 ule v0, v1 return v2 @@ -211,7 +211,7 @@ block0(v0: i32x4, v1: i32x4): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_eq_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_eq_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 eq v0, v1 return v2 @@ -221,7 +221,7 @@ block0(v0: i16x8, v1: i16x8): ; vceqh %v24, %v24, %v25 ; br %r14 -function %icmp_ne_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_ne_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 ne v0, v1 return v2 @@ -232,7 +232,7 @@ block0(v0: i16x8, v1: i16x8): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sgt_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_sgt_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 sgt v0, v1 return v2 @@ -242,7 +242,7 @@ block0(v0: i16x8, v1: i16x8): ; vchh %v24, %v24, %v25 ; br %r14 -function %icmp_slt_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_slt_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 slt v0, v1 return v2 @@ -252,7 +252,7 @@ block0(v0: i16x8, v1: i16x8): ; vchh %v24, %v25, %v24 ; br %r14 -function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_sge_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 sge v0, v1 return v2 @@ -263,7 +263,7 @@ block0(v0: i16x8, v1: i16x8): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sle_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_sle_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 sle v0, v1 return v2 @@ -274,7 +274,7 @@ block0(v0: i16x8, v1: i16x8): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ugt_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_ugt_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 ugt v0, v1 return v2 @@ -284,7 +284,7 @@ block0(v0: i16x8, v1: i16x8): ; vchlh %v24, %v24, %v25 ; br %r14 -function %icmp_ult_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_ult_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 ult v0, v1 return v2 @@ -294,7 +294,7 @@ block0(v0: i16x8, v1: i16x8): ; vchlh %v24, %v25, %v24 ; br %r14 -function %icmp_uge_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_uge_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 uge v0, v1 return v2 @@ -305,7 +305,7 @@ block0(v0: i16x8, v1: i16x8): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ule_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_ule_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp.i16x8 ule v0, v1 return v2 @@ -316,7 +316,7 @@ block0(v0: i16x8, v1: i16x8): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_eq_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_eq_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 eq v0, v1 return v2 @@ -326,7 +326,7 @@ block0(v0: i8x16, v1: i8x16): ; vceqb %v24, %v24, %v25 ; br %r14 -function %icmp_ne_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_ne_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 ne v0, v1 return v2 @@ -337,7 +337,7 @@ block0(v0: i8x16, v1: i8x16): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sgt_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_sgt_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 sgt v0, v1 return v2 @@ -347,7 +347,7 @@ block0(v0: i8x16, v1: i8x16): ; vchb %v24, %v24, %v25 ; br %r14 -function %icmp_slt_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_slt_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 slt v0, v1 return v2 @@ -357,7 +357,7 @@ block0(v0: i8x16, v1: i8x16): ; vchb %v24, %v25, %v24 ; br %r14 -function %icmp_sge_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_sge_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 sge v0, v1 return v2 @@ -368,7 +368,7 @@ block0(v0: i8x16, v1: i8x16): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_sle_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_sle_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 sle v0, v1 return v2 @@ -379,7 +379,7 @@ block0(v0: i8x16, v1: i8x16): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ugt_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_ugt_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 ugt v0, v1 return v2 @@ -389,7 +389,7 @@ block0(v0: i8x16, v1: i8x16): ; vchlb %v24, %v24, %v25 ; br %r14 -function %icmp_ult_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_ult_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 ult v0, v1 return v2 @@ -399,7 +399,7 @@ block0(v0: i8x16, v1: i8x16): ; vchlb %v24, %v25, %v24 ; br %r14 -function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_uge_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 uge v0, v1 return v2 @@ -410,7 +410,7 @@ block0(v0: i8x16, v1: i8x16): ; vno %v24, %v4, %v4 ; br %r14 -function %icmp_ule_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_ule_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp.i8x16 ule v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/s390x/vec-logical.clif b/cranelift/filetests/filetests/isa/s390x/vec-logical.clif index d9e34af56134..0d4b63019d32 100644 --- a/cranelift/filetests/filetests/isa/s390x/vec-logical.clif +++ b/cranelift/filetests/filetests/isa/s390x/vec-logical.clif @@ -1,7 +1,7 @@ test compile precise-output target s390x -function %vany_true_i64x2(i64x2) -> b1 { +function %vany_true_i64x2(i64x2) -> i8 { block0(v0: i64x2): v1 = vany_true v0 return v1 @@ -14,7 +14,7 @@ block0(v0: i64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_i32x4(i32x4) -> b1 { +function %vany_true_i32x4(i32x4) -> i8 { block0(v0: i32x4): v1 = vany_true v0 return v1 @@ -27,7 +27,7 @@ block0(v0: i32x4): ; lochine %r2, 1 ; br %r14 -function %vany_true_i16x8(i16x8) -> b1 { +function %vany_true_i16x8(i16x8) -> i8 { block0(v0: i16x8): v1 = vany_true v0 return v1 @@ -40,7 +40,7 @@ block0(v0: i16x8): ; lochine %r2, 1 ; br %r14 -function %vany_true_i8x16(i8x16) -> b1 { +function %vany_true_i8x16(i8x16) -> i8 { block0(v0: i8x16): v1 = vany_true v0 return v1 @@ -53,7 +53,7 @@ block0(v0: i8x16): ; lochine %r2, 1 ; br %r14 -function %vall_true_i64x2(i64x2) -> b1 { +function %vall_true_i64x2(i64x2) -> i8 { block0(v0: i64x2): v1 = vall_true v0 return v1 @@ -66,7 +66,7 @@ block0(v0: i64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_i32x4(i32x4) -> b1 { +function %vall_true_i32x4(i32x4) -> i8 { block0(v0: i32x4): v1 = vall_true v0 return v1 @@ -79,7 +79,7 @@ block0(v0: i32x4): ; lochio %r2, 1 ; br %r14 -function %vall_true_i16x8(i16x8) -> b1 { +function %vall_true_i16x8(i16x8) -> i8 { block0(v0: i16x8): v1 = vall_true v0 return v1 @@ -92,7 +92,7 @@ block0(v0: i16x8): ; lochio %r2, 1 ; br %r14 -function %vall_true_i8x16(i8x16) -> b1 { +function %vall_true_i8x16(i8x16) -> i8 { block0(v0: i8x16): v1 = vall_true v0 return v1 @@ -105,7 +105,7 @@ block0(v0: i8x16): ; lochio %r2, 1 ; br %r14 -function %vany_true_icmp_eq_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_eq_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp eq v0, v1 v3 = vany_true v2 @@ -118,7 +118,7 @@ block0(v0: i64x2, v1: i64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_icmp_ne_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_ne_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ne v0, v1 v3 = vany_true v2 @@ -131,7 +131,7 @@ block0(v0: i64x2, v1: i64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_icmp_sgt_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_sgt_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp sgt v0, v1 v3 = vany_true v2 @@ -144,7 +144,7 @@ block0(v0: i64x2, v1: i64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_icmp_sle_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_sle_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp sle v0, v1 v3 = vany_true v2 @@ -157,7 +157,7 @@ block0(v0: i64x2, v1: i64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_icmp_slt_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_slt_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp slt v0, v1 v3 = vany_true v2 @@ -170,7 +170,7 @@ block0(v0: i64x2, v1: i64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_icmp_sge_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_sge_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp sge v0, v1 v3 = vany_true v2 @@ -183,7 +183,7 @@ block0(v0: i64x2, v1: i64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_icmp_ugt_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_ugt_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ugt v0, v1 v3 = vany_true v2 @@ -196,7 +196,7 @@ block0(v0: i64x2, v1: i64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_icmp_ule_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_ule_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ule v0, v1 v3 = vany_true v2 @@ -209,7 +209,7 @@ block0(v0: i64x2, v1: i64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_icmp_ult_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_ult_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ult v0, v1 v3 = vany_true v2 @@ -222,7 +222,7 @@ block0(v0: i64x2, v1: i64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_icmp_uge_i64x2(i64x2, i64x2) -> b1 { +function %vany_true_icmp_uge_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp uge v0, v1 v3 = vany_true v2 @@ -235,7 +235,7 @@ block0(v0: i64x2, v1: i64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_fcmp_eq_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_eq_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp eq v0, v1 v3 = vany_true v2 @@ -248,7 +248,7 @@ block0(v0: f64x2, v1: f64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_fcmp_ne_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_ne_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ne v0, v1 v3 = vany_true v2 @@ -261,7 +261,7 @@ block0(v0: f64x2, v1: f64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_fcmp_gt_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_gt_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp gt v0, v1 v3 = vany_true v2 @@ -274,7 +274,7 @@ block0(v0: f64x2, v1: f64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_fcmp_ule_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_ule_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ule v0, v1 v3 = vany_true v2 @@ -287,7 +287,7 @@ block0(v0: f64x2, v1: f64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_fcmp_ge_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_ge_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ge v0, v1 v3 = vany_true v2 @@ -300,7 +300,7 @@ block0(v0: f64x2, v1: f64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_fcmp_ult_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_ult_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ult v0, v1 v3 = vany_true v2 @@ -313,7 +313,7 @@ block0(v0: f64x2, v1: f64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_fcmp_lt_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_lt_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp lt v0, v1 v3 = vany_true v2 @@ -326,7 +326,7 @@ block0(v0: f64x2, v1: f64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_fcmp_uge_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_uge_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp uge v0, v1 v3 = vany_true v2 @@ -339,7 +339,7 @@ block0(v0: f64x2, v1: f64x2): ; lochine %r2, 1 ; br %r14 -function %vany_true_fcmp_le_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_le_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp le v0, v1 v3 = vany_true v2 @@ -352,7 +352,7 @@ block0(v0: f64x2, v1: f64x2): ; lochino %r2, 1 ; br %r14 -function %vany_true_fcmp_ugt_f64x2(f64x2, f64x2) -> b1 { +function %vany_true_fcmp_ugt_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ugt v0, v1 v3 = vany_true v2 @@ -365,7 +365,7 @@ block0(v0: f64x2, v1: f64x2): ; lochine %r2, 1 ; br %r14 -function %vall_true_icmp_eq_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_eq_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp eq v0, v1 v3 = vall_true v2 @@ -378,7 +378,7 @@ block0(v0: i64x2, v1: i64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_icmp_ne_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_ne_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ne v0, v1 v3 = vall_true v2 @@ -391,7 +391,7 @@ block0(v0: i64x2, v1: i64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_icmp_sgt_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_sgt_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp sgt v0, v1 v3 = vall_true v2 @@ -404,7 +404,7 @@ block0(v0: i64x2, v1: i64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_icmp_sle_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_sle_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp sle v0, v1 v3 = vall_true v2 @@ -417,7 +417,7 @@ block0(v0: i64x2, v1: i64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_icmp_slt_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_slt_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp slt v0, v1 v3 = vall_true v2 @@ -430,7 +430,7 @@ block0(v0: i64x2, v1: i64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_icmp_sge_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_sge_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp sge v0, v1 v3 = vall_true v2 @@ -443,7 +443,7 @@ block0(v0: i64x2, v1: i64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_icmp_ugt_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_ugt_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ugt v0, v1 v3 = vall_true v2 @@ -456,7 +456,7 @@ block0(v0: i64x2, v1: i64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_icmp_ule_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_ule_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ule v0, v1 v3 = vall_true v2 @@ -469,7 +469,7 @@ block0(v0: i64x2, v1: i64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_icmp_ult_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_ult_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp ult v0, v1 v3 = vall_true v2 @@ -482,7 +482,7 @@ block0(v0: i64x2, v1: i64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_icmp_uge_i64x2(i64x2, i64x2) -> b1 { +function %vall_true_icmp_uge_i64x2(i64x2, i64x2) -> i8 { block0(v0: i64x2, v1: i64x2): v2 = icmp uge v0, v1 v3 = vall_true v2 @@ -495,7 +495,7 @@ block0(v0: i64x2, v1: i64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_fcmp_eq_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_eq_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp eq v0, v1 v3 = vall_true v2 @@ -508,7 +508,7 @@ block0(v0: f64x2, v1: f64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_fcmp_ne_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_ne_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ne v0, v1 v3 = vall_true v2 @@ -521,7 +521,7 @@ block0(v0: f64x2, v1: f64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_fcmp_gt_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_gt_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp gt v0, v1 v3 = vall_true v2 @@ -534,7 +534,7 @@ block0(v0: f64x2, v1: f64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_fcmp_ule_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_ule_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ule v0, v1 v3 = vall_true v2 @@ -547,7 +547,7 @@ block0(v0: f64x2, v1: f64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_fcmp_ge_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_ge_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ge v0, v1 v3 = vall_true v2 @@ -560,7 +560,7 @@ block0(v0: f64x2, v1: f64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_fcmp_ult_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_ult_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ult v0, v1 v3 = vall_true v2 @@ -573,7 +573,7 @@ block0(v0: f64x2, v1: f64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_fcmp_lt_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_lt_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp lt v0, v1 v3 = vall_true v2 @@ -586,7 +586,7 @@ block0(v0: f64x2, v1: f64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_fcmp_uge_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_uge_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp uge v0, v1 v3 = vall_true v2 @@ -599,7 +599,7 @@ block0(v0: f64x2, v1: f64x2): ; lochio %r2, 1 ; br %r14 -function %vall_true_fcmp_le_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_le_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp le v0, v1 v3 = vall_true v2 @@ -612,7 +612,7 @@ block0(v0: f64x2, v1: f64x2): ; lochie %r2, 1 ; br %r14 -function %vall_true_fcmp_ugt_f64x2(f64x2, f64x2) -> b1 { +function %vall_true_fcmp_ugt_f64x2(f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2): v2 = fcmp ugt v0, v1 v3 = vall_true v2 diff --git a/cranelift/filetests/filetests/isa/s390x/vec-permute-le-lane.clif b/cranelift/filetests/filetests/isa/s390x/vec-permute-le-lane.clif index 129a11764038..0280857b2b7c 100644 --- a/cranelift/filetests/filetests/isa/s390x/vec-permute-le-lane.clif +++ b/cranelift/filetests/filetests/isa/s390x/vec-permute-le-lane.clif @@ -17,7 +17,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_0(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] + v2 = shuffle v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] return v2 } @@ -28,7 +28,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_1(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5] + v2 = shuffle v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5] return v2 } @@ -39,7 +39,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_2(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47] + v2 = shuffle v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47] return v2 } @@ -52,7 +52,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15] + v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15] return v2 } @@ -62,7 +62,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15] + v2 = shuffle v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15] return v2 } @@ -72,7 +72,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15] + v2 = shuffle v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15] return v2 } @@ -82,7 +82,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15] + v2 = shuffle v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15] return v2 } @@ -92,7 +92,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31] + v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31] return v2 } @@ -102,7 +102,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31] + v2 = shuffle v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31] return v2 } @@ -112,7 +112,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31] + v2 = shuffle v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31] return v2 } @@ -122,7 +122,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31] + v2 = shuffle v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31] return v2 } @@ -132,7 +132,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15] + v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15] return v2 } @@ -142,7 +142,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15] + v2 = shuffle v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15] return v2 } @@ -152,7 +152,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15] + v2 = shuffle v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15] return v2 } @@ -162,7 +162,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15] + v2 = shuffle v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15] return v2 } @@ -172,7 +172,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31] + v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31] return v2 } @@ -182,7 +182,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31] + v2 = shuffle v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31] return v2 } @@ -192,7 +192,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31] + v2 = shuffle v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31] return v2 } @@ -202,7 +202,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31] + v2 = shuffle v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31] return v2 } @@ -212,7 +212,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7] + v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7] return v2 } @@ -222,7 +222,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7] + v2 = shuffle v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7] return v2 } @@ -232,7 +232,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7] + v2 = shuffle v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7] return v2 } @@ -242,7 +242,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7] + v2 = shuffle v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7] return v2 } @@ -252,7 +252,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23] + v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23] return v2 } @@ -262,7 +262,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23] + v2 = shuffle v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23] return v2 } @@ -272,7 +272,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23] + v2 = shuffle v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23] return v2 } @@ -282,7 +282,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23] + v2 = shuffle v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23] return v2 } @@ -292,7 +292,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7] + v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7] return v2 } @@ -302,7 +302,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7] + v2 = shuffle v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7] return v2 } @@ -312,7 +312,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7] + v2 = shuffle v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7] return v2 } @@ -322,7 +322,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7] + v2 = shuffle v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7] return v2 } @@ -332,7 +332,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23] + v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23] return v2 } @@ -342,7 +342,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23] + v2 = shuffle v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23] return v2 } @@ -352,7 +352,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23] + v2 = shuffle v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23] return v2 } @@ -362,7 +362,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23] + v2 = shuffle v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23] return v2 } @@ -373,7 +373,7 @@ block0(v0: i8x16, v1: i8x16): ;; Special patterns that can be implemented via PACK. function %shuffle_vpkg_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 24 25 26 27 0 1 2 3 8 9 10 11] + v2 = shuffle v0, v1, [16 17 18 19 24 25 26 27 0 1 2 3 8 9 10 11] return v2 } @@ -383,7 +383,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 20 21 24 25 28 29 0 1 4 5 8 9 12 13] + v2 = shuffle v0, v1, [16 17 20 21 24 25 28 29 0 1 4 5 8 9 12 13] return v2 } @@ -393,7 +393,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_xy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 18 20 22 24 26 28 30 0 2 4 6 8 10 12 14] + v2 = shuffle v0, v1, [16 18 20 22 24 26 28 30 0 2 4 6 8 10 12 14] return v2 } @@ -403,7 +403,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkg_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27] + v2 = shuffle v0, v1, [0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27] return v2 } @@ -413,7 +413,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29] + v2 = shuffle v0, v1, [0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29] return v2 } @@ -423,7 +423,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_yx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30] + v2 = shuffle v0, v1, [0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30] return v2 } @@ -433,7 +433,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkg_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 8 9 10 11 0 1 2 3 8 9 10 11] + v2 = shuffle v0, v1, [0 1 2 3 8 9 10 11 0 1 2 3 8 9 10 11] return v2 } @@ -443,7 +443,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 4 5 8 9 12 13 0 1 4 5 8 9 12 13] + v2 = shuffle v0, v1, [0 1 4 5 8 9 12 13 0 1 4 5 8 9 12 13] return v2 } @@ -453,7 +453,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_xx(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 2 4 6 8 10 12 14 0 2 4 6 8 10 12 14] + v2 = shuffle v0, v1, [0 2 4 6 8 10 12 14 0 2 4 6 8 10 12 14] return v2 } @@ -463,7 +463,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkg_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 24 25 26 27 16 17 18 19 24 25 26 27] + v2 = shuffle v0, v1, [16 17 18 19 24 25 26 27 16 17 18 19 24 25 26 27] return v2 } @@ -473,7 +473,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 20 21 24 25 28 29 16 17 20 21 24 25 28 29] + v2 = shuffle v0, v1, [16 17 20 21 24 25 28 29 16 17 20 21 24 25 28 29] return v2 } @@ -483,7 +483,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_yy(i8x16, i8x16) -> i8x16 wasmtime_system_v { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 18 20 22 24 26 28 30 16 18 20 22 24 26 28 30] + v2 = shuffle v0, v1, [16 18 20 22 24 26 28 30 16 18 20 22 24 26 28 30] return v2 } diff --git a/cranelift/filetests/filetests/isa/s390x/vec-permute.clif b/cranelift/filetests/filetests/isa/s390x/vec-permute.clif index 19725cdd97f5..52df85b78bb3 100644 --- a/cranelift/filetests/filetests/isa/s390x/vec-permute.clif +++ b/cranelift/filetests/filetests/isa/s390x/vec-permute.clif @@ -16,7 +16,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_0(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] + v2 = shuffle v0, v1, [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] return v2 } @@ -27,7 +27,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_1(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5] + v2 = shuffle v0, v1, [3 0 31 26 4 6 12 11 23 13 24 4 2 15 17 5] return v2 } @@ -38,7 +38,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_2(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47] + v2 = shuffle v0, v1, [0 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47] return v2 } @@ -51,7 +51,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23] + v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23] return v2 } @@ -61,7 +61,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23] + v2 = shuffle v0, v1, [0 1 2 3 16 17 18 19 4 5 6 7 20 21 22 23] return v2 } @@ -71,7 +71,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23] + v2 = shuffle v0, v1, [0 1 16 17 2 3 18 19 4 5 20 21 6 7 22 23] return v2 } @@ -81,7 +81,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23] + v2 = shuffle v0, v1, [0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23] return v2 } @@ -91,7 +91,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7] + v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 0 1 2 3 4 5 6 7] return v2 } @@ -101,7 +101,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7] + v2 = shuffle v0, v1, [16 17 18 19 0 1 2 3 20 21 22 23 4 5 6 7] return v2 } @@ -111,7 +111,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7] + v2 = shuffle v0, v1, [16 17 0 1 18 19 2 3 20 21 4 5 22 23 6 7] return v2 } @@ -121,7 +121,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7] + v2 = shuffle v0, v1, [16 0 17 1 18 2 19 3 20 4 21 5 22 6 23 7] return v2 } @@ -131,7 +131,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7] + v2 = shuffle v0, v1, [0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7] return v2 } @@ -141,7 +141,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7] + v2 = shuffle v0, v1, [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7] return v2 } @@ -151,7 +151,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7] + v2 = shuffle v0, v1, [0 1 0 1 2 3 2 3 4 5 4 5 6 7 6 7] return v2 } @@ -161,7 +161,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7] + v2 = shuffle v0, v1, [0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7] return v2 } @@ -171,7 +171,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhg_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23] + v2 = shuffle v0, v1, [16 17 18 19 20 21 22 23 16 17 18 19 20 21 22 23] return v2 } @@ -181,7 +181,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhf_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23] + v2 = shuffle v0, v1, [16 17 18 19 16 17 18 19 20 21 22 23 20 21 22 23] return v2 } @@ -191,7 +191,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhh_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23] + v2 = shuffle v0, v1, [16 17 16 17 18 19 18 19 20 21 20 21 22 23 22 23] return v2 } @@ -201,7 +201,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrhb_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23] + v2 = shuffle v0, v1, [16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23] return v2 } @@ -211,7 +211,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31] + v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31] return v2 } @@ -221,7 +221,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31] + v2 = shuffle v0, v1, [8 9 10 11 24 25 26 27 12 13 14 15 28 29 30 31] return v2 } @@ -231,7 +231,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31] + v2 = shuffle v0, v1, [8 9 24 25 10 11 26 27 12 13 28 29 14 15 30 31] return v2 } @@ -241,7 +241,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31] + v2 = shuffle v0, v1, [8 24 9 25 10 26 11 27 12 28 13 29 14 30 15 31] return v2 } @@ -251,7 +251,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15] + v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 8 9 10 11 12 13 14 15] return v2 } @@ -261,7 +261,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15] + v2 = shuffle v0, v1, [24 25 26 27 8 9 10 11 28 29 30 31 12 13 14 15] return v2 } @@ -271,7 +271,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15] + v2 = shuffle v0, v1, [24 25 8 9 26 27 10 11 28 29 12 13 30 31 14 15] return v2 } @@ -281,7 +281,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15] + v2 = shuffle v0, v1, [24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15] return v2 } @@ -291,7 +291,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15] + v2 = shuffle v0, v1, [8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15] return v2 } @@ -301,7 +301,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15] + v2 = shuffle v0, v1, [8 9 10 11 8 9 10 11 12 13 14 15 12 13 14 15] return v2 } @@ -311,7 +311,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15] + v2 = shuffle v0, v1, [8 9 8 9 10 11 10 11 12 13 12 13 14 15 14 15] return v2 } @@ -321,7 +321,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15] + v2 = shuffle v0, v1, [8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15] return v2 } @@ -331,7 +331,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlg_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31] + v2 = shuffle v0, v1, [24 25 26 27 28 29 30 31 24 25 26 27 28 29 30 31] return v2 } @@ -341,7 +341,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlf_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31] + v2 = shuffle v0, v1, [24 25 26 27 24 25 26 27 28 29 30 31 28 29 30 31] return v2 } @@ -351,7 +351,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlh_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31] + v2 = shuffle v0, v1, [24 25 24 25 26 27 26 27 28 29 28 29 30 31 30 31] return v2 } @@ -361,7 +361,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vmrlb_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31] + v2 = shuffle v0, v1, [24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31] return v2 } @@ -372,7 +372,7 @@ block0(v0: i8x16, v1: i8x16): ;; Special patterns that can be implemented via PACK. function %shuffle_vpkg_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [4 5 6 7 12 13 14 15 20 21 22 23 28 29 30 31] + v2 = shuffle v0, v1, [4 5 6 7 12 13 14 15 20 21 22 23 28 29 30 31] return v2 } @@ -382,7 +382,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [2 3 6 7 10 11 14 15 18 19 22 23 26 27 30 31] + v2 = shuffle v0, v1, [2 3 6 7 10 11 14 15 18 19 22 23 26 27 30 31] return v2 } @@ -392,7 +392,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_xy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31] + v2 = shuffle v0, v1, [1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31] return v2 } @@ -402,7 +402,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkg_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [20 21 22 23 28 29 30 31 4 5 6 7 12 13 14 15] + v2 = shuffle v0, v1, [20 21 22 23 28 29 30 31 4 5 6 7 12 13 14 15] return v2 } @@ -412,7 +412,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [18 19 22 23 26 27 30 31 2 3 6 7 10 11 14 15] + v2 = shuffle v0, v1, [18 19 22 23 26 27 30 31 2 3 6 7 10 11 14 15] return v2 } @@ -422,7 +422,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_yx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [17 19 21 23 25 27 29 31 1 3 5 7 9 11 13 15] + v2 = shuffle v0, v1, [17 19 21 23 25 27 29 31 1 3 5 7 9 11 13 15] return v2 } @@ -432,7 +432,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkg_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [4 5 6 7 12 13 14 15 4 5 6 7 12 13 14 15] + v2 = shuffle v0, v1, [4 5 6 7 12 13 14 15 4 5 6 7 12 13 14 15] return v2 } @@ -442,7 +442,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [2 3 6 7 10 11 14 15 2 3 6 7 10 11 14 15] + v2 = shuffle v0, v1, [2 3 6 7 10 11 14 15 2 3 6 7 10 11 14 15] return v2 } @@ -452,7 +452,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_xx(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [1 3 5 7 9 11 13 15 1 3 5 7 9 11 13 15] + v2 = shuffle v0, v1, [1 3 5 7 9 11 13 15 1 3 5 7 9 11 13 15] return v2 } @@ -462,7 +462,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkg_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [20 21 22 23 28 29 30 31 20 21 22 23 28 29 30 31] + v2 = shuffle v0, v1, [20 21 22 23 28 29 30 31 20 21 22 23 28 29 30 31] return v2 } @@ -472,7 +472,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkf_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [18 19 22 23 26 27 30 31 18 19 22 23 26 27 30 31] + v2 = shuffle v0, v1, [18 19 22 23 26 27 30 31 18 19 22 23 26 27 30 31] return v2 } @@ -482,7 +482,7 @@ block0(v0: i8x16, v1: i8x16): function %shuffle_vpkh_yy(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): - v2 = shuffle.i8x16 v0, v1, [17 19 21 23 25 27 29 31 17 19 21 23 25 27 29 31] + v2 = shuffle v0, v1, [17 19 21 23 25 27 29 31 17 19 21 23 25 27 29 31] return v2 } diff --git a/cranelift/filetests/filetests/isa/x64/atomic-cas-bug.clif b/cranelift/filetests/filetests/isa/x64/atomic-cas-bug.clif index 196bde2b111c..830b3b4c353f 100644 --- a/cranelift/filetests/filetests/isa/x64/atomic-cas-bug.clif +++ b/cranelift/filetests/filetests/isa/x64/atomic-cas-bug.clif @@ -158,83 +158,74 @@ function u0:31(i64, i32, i32, i8, i8) -> i32, i32 system_v { block9: @000d v37 = atomic_cas.i32 v34, v35, v36 @000d v38 = icmp eq v37, v35 -@000d v39 = bint.i8 v38 @000d jump block10 block10: -@000e jump block32(v37, v39) +@000e jump block32(v37, v38) block11: @0012 v43 = atomic_cas.i32 v40, v41, v42 @0012 v44 = icmp eq v43, v41 -@0012 v45 = bint.i8 v44 @0012 jump block12 block12: -@0013 jump block32(v43, v45) +@0013 jump block32(v43, v44) block13: @0017 v49 = atomic_cas.i32 v46, v47, v48 @0017 v50 = icmp eq v49, v47 -@0017 v51 = bint.i8 v50 @0017 jump block14 block14: -@0018 jump block32(v49, v51) +@0018 jump block32(v49, v50) block15: @001c v55 = atomic_cas.i32 v52, v53, v54 @001c v56 = icmp eq v55, v53 -@001c v57 = bint.i8 v56 @001c jump block16 block16: -@001d jump block32(v55, v57) +@001d jump block32(v55, v56) block17: @0021 v61 = atomic_cas.i32 v58, v59, v60 @0021 v62 = icmp eq v61, v59 -@0021 v63 = bint.i8 v62 @0021 jump block18 block18: -@0022 jump block32(v61, v63) +@0022 jump block32(v61, v62) block19: @0026 v67 = atomic_cas.i32 v64, v65, v66 @0026 v68 = icmp eq v67, v65 -@0026 v69 = bint.i8 v68 @0026 jump block20 block20: -@0027 jump block32(v67, v69) +@0027 jump block32(v67, v68) block21: @002b v73 = atomic_cas.i32 v70, v71, v72 @002b v74 = icmp eq v73, v71 -@002b v75 = bint.i8 v74 @002b jump block22 block22: -@002c jump block32(v73, v75) +@002c jump block32(v73, v74) block23: @0030 v79 = atomic_cas.i32 v76, v77, v78 @0030 v80 = icmp eq v79, v77 -@0030 v81 = bint.i8 v80 @0030 jump block24 block24: -@0031 jump block32(v79, v81) +@0031 jump block32(v79, v80) block25: @0035 v85 = atomic_cas.i32 v82, v83, v84 @0035 v86 = icmp eq v85, v83 -@0035 v87 = bint.i8 v86 @0035 jump block26 block26: -@0036 jump block32(v85, v87) +@0036 jump block32(v85, v86) block27: @0038 v88 = global_value.i64 gv2 diff --git a/cranelift/filetests/filetests/isa/x64/atomic_cas_const_addr.clif b/cranelift/filetests/filetests/isa/x64/atomic_cas_const_addr.clif index 00d0b715c383..f0000f4cae8a 100644 --- a/cranelift/filetests/filetests/isa/x64/atomic_cas_const_addr.clif +++ b/cranelift/filetests/filetests/isa/x64/atomic_cas_const_addr.clif @@ -10,7 +10,7 @@ function u0:31() -> i32, i32 system_v { v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = iconst.i32 0 -@0004 v28 = bconst.b1 false +@0004 v28 = iconst.i8 0 @0005 brnz v28, block25 jump block1 diff --git a/cranelift/filetests/filetests/isa/x64/b1.clif b/cranelift/filetests/filetests/isa/x64/b1.clif index 65cf14c407a8..4b0c1af26516 100644 --- a/cranelift/filetests/filetests/isa/x64/b1.clif +++ b/cranelift/filetests/filetests/isa/x64/b1.clif @@ -1,8 +1,8 @@ test compile precise-output target x86_64 -function %f0(b1, i32, i32) -> i32 { -block0(v0: b1, v1: i32, v2: i32): +function %f0(i8, i32, i32) -> i32 { +block0(v0: i8, v1: i32, v2: i32): v3 = select.i32 v0, v1, v2 return v3 } @@ -17,8 +17,8 @@ block0(v0: b1, v1: i32, v2: i32): ; popq %rbp ; ret -function %f1(b1) -> i32 { -block0(v0: b1): +function %f1(i8) -> i32 { +block0(v0: i8): brnz v0, block1 jump block2 block1: @@ -45,8 +45,8 @@ block2: ; popq %rbp ; ret -function %f2(b1) -> i32 { -block0(v0: b1): +function %f2(i8) -> i32 { +block0(v0: i8): brz v0, block1 jump block2 block1: @@ -137,7 +137,7 @@ block2: ; popq %rbp ; ret -function %test_x_slt_0_i64(i64) -> b1 { +function %test_x_slt_0_i64(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 0 v2 = icmp slt v0, v1 @@ -153,7 +153,7 @@ block0(v0: i64): ; popq %rbp ; ret -function %test_x_slt_0_i32f4(i32) -> b1 { +function %test_x_slt_0_i32f4(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 0 v2 = icmp slt v0, v1 @@ -169,7 +169,7 @@ block0(v0: i32): ; popq %rbp ; ret -function %test_0_sgt_x_i64(i64) -> b1 { +function %test_0_sgt_x_i64(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 0 v2 = icmp sgt v1, v0 @@ -185,7 +185,7 @@ block0(v0: i64): ; popq %rbp ; ret -function %test_0_sgt_x_i32f4(i32) -> b1 { +function %test_0_sgt_x_i32f4(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 0 v2 = icmp sgt v1, v0 @@ -201,7 +201,7 @@ block0(v0: i32): ; popq %rbp ; ret -function %test_0_sle_x_i64(i64) -> b1 { +function %test_0_sle_x_i64(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 0 v2 = icmp sle v1, v0 @@ -218,7 +218,7 @@ block0(v0: i64): ; popq %rbp ; ret -function %test_0_sle_x_i32f4(i32) -> b1 { +function %test_0_sle_x_i32f4(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 0 v2 = icmp sle v1, v0 @@ -235,7 +235,7 @@ block0(v0: i32): ; popq %rbp ; ret -function %test_x_sge_x_i64(i64) -> b1 { +function %test_x_sge_x_i64(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 0 v2 = icmp sge v0, v1 @@ -252,7 +252,7 @@ block0(v0: i64): ; popq %rbp ; ret -function %test_x_sge_x_i32f4(i32) -> b1 { +function %test_x_sge_x_i32f4(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 0 v2 = icmp sge v0, v1 diff --git a/cranelift/filetests/filetests/isa/x64/branches.clif b/cranelift/filetests/filetests/isa/x64/branches.clif index 9bdd14e2b7cb..433f09a4334a 100644 --- a/cranelift/filetests/filetests/isa/x64/branches.clif +++ b/cranelift/filetests/filetests/isa/x64/branches.clif @@ -126,16 +126,16 @@ block2: ; popq %rbp ; ret -function %f4(f32, f32) -> b1 { +function %f4(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp eq v0, v1 brz v2, block1 jump block2 block1: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 block2: - v4 = bconst.b1 false + v4 = iconst.i8 0 return v4 } @@ -156,16 +156,16 @@ block2: ; popq %rbp ; ret -function %f4(f32, f32) -> b1 { +function %f4(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ne v0, v1 brz v2, block1 jump block2 block1: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 block2: - v4 = bconst.b1 false + v4 = iconst.i8 0 return v4 } @@ -187,18 +187,18 @@ block2: ; ret -function %f5(i32) -> b1 { +function %f5(i32) -> i8 { jt0 = jump_table [block1, block2] block0(v0: i32): br_table v0, block1, jt0 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } @@ -222,17 +222,17 @@ block2: ; popq %rbp ; ret -function %f6(i64) -> b1 { +function %f6(i64) -> i8 { block0(v0: i64): v1 = iconst.i64 0 v2 = icmp slt v0, v1 brnz v2, block1 jump block2 block1: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 block2: - v4 = bconst.b1 false + v4 = iconst.i8 0 return v4 } @@ -252,17 +252,17 @@ block2: ; popq %rbp ; ret -function %f7(i32) -> b1 { +function %f7(i32) -> i8 { block0(v0: i32): v1 = iconst.i32 0 v2 = icmp slt v0, v1 brnz v2, block1 jump block2 block1: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 block2: - v4 = bconst.b1 false + v4 = iconst.i8 0 return v4 } diff --git a/cranelift/filetests/filetests/isa/x64/cmp-mem-bug.clif b/cranelift/filetests/filetests/isa/x64/cmp-mem-bug.clif index 0553a26d0a26..db699e341c43 100644 --- a/cranelift/filetests/filetests/isa/x64/cmp-mem-bug.clif +++ b/cranelift/filetests/filetests/isa/x64/cmp-mem-bug.clif @@ -5,7 +5,7 @@ function %f0(i64, i64) -> i64, i64 { block0(v0: i64, v1: i64): v2 = load.i64 v1 v3 = icmp eq v0, v2 - v4 = bint.i64 v3 + v4 = uextend.i64 v3 v5 = select.i64 v3, v0, v1 return v4, v5 } @@ -16,7 +16,7 @@ block0(v0: i64, v1: i64): ; movq 0(%rsi), %r11 ; cmpq %r11, %rdi ; setz %al -; andq %rax, $1, %rax +; movzbq %al, %rax ; cmpq %r11, %rdi ; movq %rsi, %rdx ; cmovzq %rdi, %rdx, %rdx @@ -28,7 +28,7 @@ function %f1(f64, i64) -> i64, f64 { block0(v0: f64, v1: i64): v2 = load.f64 v1 v3 = fcmp eq v0, v2 - v4 = bint.i64 v3 + v4 = uextend.i64 v3 v5 = select.f64 v3, v0, v0 return v4, v5 } @@ -38,10 +38,10 @@ block0(v0: f64, v1: i64): ; block0: ; movsd 0(%rdi), %xmm11 ; ucomisd %xmm11, %xmm0 -; setnp %al -; setz %cl -; andl %eax, %ecx, %eax -; andq %rax, $1, %rax +; setnp %cl +; setz %dl +; andl %ecx, %edx, %ecx +; movzbq %cl, %rax ; ucomisd %xmm0, %xmm11 ; movdqa %xmm0, %xmm12 ; mov z, sd; j%xmm0 $next; mov%xmm12 %xmm12, %xmm12; $next: diff --git a/cranelift/filetests/filetests/isa/x64/fcmp-mem-bug.clif b/cranelift/filetests/filetests/isa/x64/fcmp-mem-bug.clif index a53d075ed8b2..7c72e89dee99 100644 --- a/cranelift/filetests/filetests/isa/x64/fcmp-mem-bug.clif +++ b/cranelift/filetests/filetests/isa/x64/fcmp-mem-bug.clif @@ -223,9 +223,9 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast { v394 -> v99 v395 -> v356 @4b666c v16 = icmp sle v14, v15 -@4b666c v17 = bint.i32 v16 +@4b666c v17 = uextend.i32 v16 @4b6671 v19 = icmp sle v18, v15 -@4b6671 v20 = bint.i32 v19 +@4b6671 v20 = uextend.i32 v19 @4b6672 v21 = bor v17, v20 @4b6674 brnz v21, block9 @4b6674 jump block10 @@ -272,8 +272,7 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast { @4b6695 v37 = iadd.i64 v438, v443 @4b6695 v38 = load.f32 little v37+68 @4b6698 v39 = fcmp.f32 gt v32, v38 -@4b6698 v40 = bint.i32 v39 -@4b669a brnz v40, block14 +@4b669a brnz v39, block14 @4b669a jump block15 block15: @@ -685,7 +684,7 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast { @4b69f3 v366 = iadd.i64 v438, v534 @4b69f3 v367 = load.f32 little v366+68 @4b69f6 v368 = fcmp gt v362, v367 -@4b69f6 v369 = bint.i32 v368 +@4b69f6 v369 = uextend.i32 v368 @4b69f9 v371 = bxor v369, v468 @4b69fb brnz v371, block71 @4b69fb jump block72 diff --git a/cranelift/filetests/filetests/isa/x64/i128.clif b/cranelift/filetests/filetests/isa/x64/i128.clif index 504cb1cc40e0..0d4afcd6f76b 100644 --- a/cranelift/filetests/filetests/isa/x64/i128.clif +++ b/cranelift/filetests/filetests/isa/x64/i128.clif @@ -159,7 +159,7 @@ block0(v0: i128): ; popq %rbp ; ret -function %f9(i128, i128) -> b1 { +function %f9(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp eq v0, v1 v3 = icmp ne v0, v1 @@ -457,17 +457,16 @@ block0(v0: i128): ; popq %rbp ; ret -function %f18(b1) -> i128 { -block0(v0: b1): - v1 = bint.i128 v0 +function %f18(i8) -> i128 { +block0(v0: i8): + v1 = uextend.i128 v0 return v1 } ; pushq %rbp ; movq %rsp, %rbp ; block0: -; movq %rdi, %rax -; andq %rax, $1, %rax +; movzbq %dil, %rax ; xorq %rdx, %rdx, %rdx ; movq %rbp, %rsp ; popq %rbp @@ -655,8 +654,8 @@ block0(v0: i64): ; popq %rbp ; ret -function %f23(i128, b1) -> i128 { -block0(v0: i128, v1: b1): +function %f23(i128, i8) -> i128 { +block0(v0: i128, v1: i8): v2 = iconst.i128 0 brnz v1, block1(v2) jump block2(v2) diff --git a/cranelift/filetests/filetests/isa/x64/load-op.clif b/cranelift/filetests/filetests/isa/x64/load-op.clif index dff0e567b02e..69a7d4feaaaf 100644 --- a/cranelift/filetests/filetests/isa/x64/load-op.clif +++ b/cranelift/filetests/filetests/isa/x64/load-op.clif @@ -126,7 +126,7 @@ function %cmp_mem(i64) -> i64 { block0(v0: i64): v1 = load.i64 v0 v2 = icmp eq v0, v1 - v3 = bint.i64 v2 + v3 = uextend.i64 v2 return v3 } @@ -134,8 +134,8 @@ block0(v0: i64): ; movq %rsp, %rbp ; block0: ; cmpq 0(%rdi), %rdi -; setz %al -; andq %rax, $1, %rax +; setz %r8b +; movzbq %r8b, %rax ; movq %rbp, %rsp ; popq %rbp ; ret diff --git a/cranelift/filetests/filetests/isa/x64/move-elision.clif b/cranelift/filetests/filetests/isa/x64/move-elision.clif index af16a95c83b3..f570f7005103 100644 --- a/cranelift/filetests/filetests/isa/x64/move-elision.clif +++ b/cranelift/filetests/filetests/isa/x64/move-elision.clif @@ -2,14 +2,14 @@ test compile precise-output set enable_simd target x86_64 skylake -function %move_registers(i32x4) -> b8x16 { +function %move_registers(i32x4) -> i8x16 { block0(v0: i32x4): ;; In the x64 backend, all of these pseudo-instructions are lowered to moves between registers (e.g. MOVAPD, MOVDQA, ;; etc.). Because these have been marked as moves, no instructions are emitted by this function besides the prologue ;; and epilogue. v1 = raw_bitcast.f32x4 v0 v2 = raw_bitcast.f64x2 v1 - v3 = raw_bitcast.b8x16 v2 + v3 = raw_bitcast.i8x16 v2 return v3 } diff --git a/cranelift/filetests/filetests/isa/x64/bextend.clif b/cranelift/filetests/filetests/isa/x64/sextend.clif similarity index 73% rename from cranelift/filetests/filetests/isa/x64/bextend.clif rename to cranelift/filetests/filetests/isa/x64/sextend.clif index 8c79762d5604..6b56614e86d3 100644 --- a/cranelift/filetests/filetests/isa/x64/bextend.clif +++ b/cranelift/filetests/filetests/isa/x64/sextend.clif @@ -1,9 +1,9 @@ test compile precise-output target x86_64 -function %f0(b8) -> b64 { -block0(v0: b8): - v1 = bextend.b64 v0 +function %f0(i8) -> i64 { +block0(v0: i8): + v1 = sextend.i64 v0 return v1 } diff --git a/cranelift/filetests/filetests/isa/x64/simd-bitwise-compile.clif b/cranelift/filetests/filetests/isa/x64/simd-bitwise-compile.clif index 69141f475054..231c2fc9e4e6 100644 --- a/cranelift/filetests/filetests/isa/x64/simd-bitwise-compile.clif +++ b/cranelift/filetests/filetests/isa/x64/simd-bitwise-compile.clif @@ -150,8 +150,8 @@ block0: ; popq %rbp ; ret -function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 { -block0(v0: b16x8, v1: i16x8, v2: i16x8): +function %vselect_i16x8(i16x8, i16x8, i16x8) -> i16x8 { +block0(v0: i16x8, v1: i16x8, v2: i16x8): v3 = vselect v0, v1, v2 return v3 } @@ -166,8 +166,8 @@ block0(v0: b16x8, v1: i16x8, v2: i16x8): ; popq %rbp ; ret -function %vselect_f32x4(b32x4, f32x4, f32x4) -> f32x4 { -block0(v0: b32x4, v1: f32x4, v2: f32x4): +function %vselect_f32x4(i32x4, f32x4, f32x4) -> f32x4 { +block0(v0: i32x4, v1: f32x4, v2: f32x4): v3 = vselect v0, v1, v2 return v3 } @@ -182,8 +182,8 @@ block0(v0: b32x4, v1: f32x4, v2: f32x4): ; popq %rbp ; ret -function %vselect_f64x2(b64x2, f64x2, f64x2) -> f64x2 { -block0(v0: b64x2, v1: f64x2, v2: f64x2): +function %vselect_f64x2(i64x2, f64x2, f64x2) -> f64x2 { +block0(v0: i64x2, v1: f64x2, v2: f64x2): v3 = vselect v0, v1, v2 return v3 } diff --git a/cranelift/filetests/filetests/isa/x64/simd-comparison-legalize.clif b/cranelift/filetests/filetests/isa/x64/simd-comparison-legalize.clif index 31f0bf6a9bae..f1e049735bd1 100644 --- a/cranelift/filetests/filetests/isa/x64/simd-comparison-legalize.clif +++ b/cranelift/filetests/filetests/isa/x64/simd-comparison-legalize.clif @@ -2,7 +2,7 @@ test compile precise-output set enable_simd target x86_64 skylake -function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ne_32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ne v0, v1 return v2 @@ -18,7 +18,7 @@ block0(v0: i32x4, v1: i32x4): ; popq %rbp ; ret -function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 { +function %icmp_ugt_i32x4(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ugt v0, v1 return v2 @@ -35,7 +35,7 @@ block0(v0: i32x4, v1: i32x4): ; popq %rbp ; ret -function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 { +function %icmp_sge_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp sge v0, v1 return v2 @@ -51,7 +51,7 @@ block0(v0: i16x8, v1: i16x8): ; popq %rbp ; ret -function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 { +function %icmp_uge_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp uge v0, v1 return v2 diff --git a/cranelift/filetests/filetests/isa/x64/simd-lane-access-compile.clif b/cranelift/filetests/filetests/isa/x64/simd-lane-access-compile.clif index 46621034b449..6022d99b083d 100644 --- a/cranelift/filetests/filetests/isa/x64/simd-lane-access-compile.clif +++ b/cranelift/filetests/filetests/isa/x64/simd-lane-access-compile.clif @@ -80,17 +80,17 @@ block0(v0: i8): ; popq %rbp ; ret -function %splat_b16() -> b16x8 { +function %splat_i16() -> i16x8 { block0: - v0 = bconst.b16 true - v1 = splat.b16x8 v0 + v0 = iconst.i16 -1 + v1 = splat.i16x8 v0 return v1 } ; pushq %rbp ; movq %rsp, %rbp ; block0: -; movl $65535, %edi +; movl $-1, %edi ; uninit %xmm5 ; pinsrw $0, %xmm5, %rdi, %xmm5 ; pinsrw $1, %xmm5, %rdi, %xmm5 diff --git a/cranelift/filetests/filetests/isa/x64/simd-logical-compile.clif b/cranelift/filetests/filetests/isa/x64/simd-logical-compile.clif index 40ab8cc76b0c..acb12c809a37 100644 --- a/cranelift/filetests/filetests/isa/x64/simd-logical-compile.clif +++ b/cranelift/filetests/filetests/isa/x64/simd-logical-compile.clif @@ -2,8 +2,8 @@ test compile precise-output set enable_simd target x86_64 skylake -function %bnot_b32x4(b32x4) -> b32x4 { -block0(v0: b32x4): +function %bnot_i32x4(i32x4) -> i32x4 { +block0(v0: i32x4): v1 = bnot v0 return v1 } @@ -17,8 +17,8 @@ block0(v0: b32x4): ; popq %rbp ; ret -function %vany_true_b32x4(b32x4) -> b1 { -block0(v0: b32x4): +function %vany_true_i32x4(i32x4) -> i8 { +block0(v0: i32x4): v1 = vany_true v0 return v1 } @@ -32,7 +32,7 @@ block0(v0: b32x4): ; popq %rbp ; ret -function %vall_true_i64x2(i64x2) -> b1 { +function %vall_true_i64x2(i64x2) -> i8 { block0(v0: i64x2): v1 = vall_true v0 return v1 diff --git a/cranelift/filetests/filetests/licm/rewrite-jump-table.clif b/cranelift/filetests/filetests/licm/rewrite-jump-table.clif index 485e11983a53..7899f8975251 100644 --- a/cranelift/filetests/filetests/licm/rewrite-jump-table.clif +++ b/cranelift/filetests/filetests/licm/rewrite-jump-table.clif @@ -12,14 +12,14 @@ function %rewrite_jump_table() { return block2: - v4 = bconst.b1 false + v4 = iconst.i8 0 jump block2 } ; sameln: function ; nextln: jt0 = jump_table [block1, block3] ; check: block3: -; nextln: v4 = bconst.b1 false +; nextln: v4 = iconst.i8 0 ; nextln: jump block2 ; check: block2: ; nextln: jump block2 diff --git a/cranelift/filetests/filetests/parser/call.clif b/cranelift/filetests/filetests/parser/call.clif index 871963d15300..1334b07afbdd 100644 --- a/cranelift/filetests/filetests/parser/call.clif +++ b/cranelift/filetests/filetests/parser/call.clif @@ -25,14 +25,14 @@ block1: function %signatures() { sig10 = () - sig11 = (i32, f64) -> i32, b1 + sig11 = (i32, f64) -> i32, i8 fn5 = %foo sig11 - fn8 = %bar(i32) -> b1 + fn8 = %bar(i32) -> i8 } ; sameln: function %signatures() fast { ; check: sig10 = () fast -; check: sig11 = (i32, f64) -> i32, b1 -; check: sig12 = (i32) -> b1 fast +; check: sig11 = (i32, f64) -> i32, i8 +; check: sig12 = (i32) -> i8 fast ; not: fn0 ; check: fn5 = %foo sig11 ; check: fn8 = %bar sig12 diff --git a/cranelift/filetests/filetests/parser/tiny.clif b/cranelift/filetests/filetests/parser/tiny.clif index 60d19508f161..da6c66f77a67 100644 --- a/cranelift/filetests/filetests/parser/tiny.clif +++ b/cranelift/filetests/filetests/parser/tiny.clif @@ -29,36 +29,36 @@ block0: ; Polymorphic instructions with type suffix. function %bvalues() { block0: - v0 = bconst.b32 true - v1 = bconst.b8 false - v2 = bextend.b32 v1 + v0 = iconst.i32 -1 + v1 = iconst.i8 0 + v2 = sextend.i32 v1 v3 = bxor v0, v2 } ; sameln: function %bvalues() fast { ; nextln: block0: -; nextln: v0 = bconst.b32 true -; nextln: v1 = bconst.b8 false -; nextln: v2 = bextend.b32 v1 +; nextln: v0 = iconst.i32 -1 +; nextln: v1 = iconst.i8 0 +; nextln: v2 = sextend.i32 v1 ; nextln: v3 = bxor v0, v2 ; nextln: } ; Polymorphic instruction controlled by second operand. function %select() { -block0(v90: i32, v91: i32, v92: b1): +block0(v90: i32, v91: i32, v92: i8): v0 = select v92, v90, v91 } ; sameln: function %select() fast { -; nextln: block0(v90: i32, v91: i32, v92: b1): +; nextln: block0(v90: i32, v91: i32, v92: i8): ; nextln: v0 = select v92, v90, v91 ; nextln: } ; Polymorphic instruction controlled by third operand. function %selectif() system_v { -block0(v95: i32, v96: i32, v97: b1): +block0(v95: i32, v96: i32, v97: i8): v98 = selectif.i32 eq v97, v95, v96 } ; sameln: function %selectif() system_v { -; nextln: block0(v95: i32, v96: i32, v97: b1): +; nextln: block0(v95: i32, v96: i32, v97: i8): ; nextln: v98 = selectif.i32 eq v97, v95, v96 ; nextln: } diff --git a/cranelift/filetests/filetests/preopt/branch.clif b/cranelift/filetests/filetests/preopt/branch.clif index dc6f0acee2e9..a39810973c94 100644 --- a/cranelift/filetests/filetests/preopt/branch.clif +++ b/cranelift/filetests/filetests/preopt/branch.clif @@ -4,7 +4,7 @@ target x86_64 function %brz_fold() -> i32 { block0: - v0 = bconst.b1 false + v0 = iconst.i8 0 brz v0, block2 jump block1 block1: @@ -16,7 +16,7 @@ block2: } ; sameln: function %brz_fold ; nextln: block0: -; nextln: v0 = bconst.b1 false +; nextln: v0 = iconst.i8 0 ; nextln: jump block2 ; nextln: ; nextln: block1: @@ -30,7 +30,7 @@ block2: function %brnz_fold() -> i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 brnz v0, block2 jump block1 block1: @@ -42,7 +42,7 @@ block2: } ; sameln: function %brnz_fold ; nextln: block0: -; nextln: v0 = bconst.b1 true +; nextln: v0 = iconst.i8 1 ; nextln: jump block2 ; nextln: ; nextln: block1: @@ -54,8 +54,8 @@ block2: ; nextln: return v2 ; nextln: } -function %brz_fold_param(b1) -> i32 { -block0(v0: b1): +function %brz_fold_param(i8) -> i32 { +block0(v0: i8): brz v0, block2 jump block1 block1: @@ -65,8 +65,8 @@ block2: v2 = iconst.i32 24 return v2 } -; sameln: function %brz_fold_param(b1) -> i32 fast { -; nextln: block0(v0: b1): +; sameln: function %brz_fold_param(i8) -> i32 fast { +; nextln: block0(v0: i8): ; nextln: brz v0, block2 ; nextln: jump block1 ; nextln: diff --git a/cranelift/filetests/filetests/runtests/bextend.clif b/cranelift/filetests/filetests/runtests/bextend.clif deleted file mode 100644 index d7bccf50bd23..000000000000 --- a/cranelift/filetests/filetests/runtests/bextend.clif +++ /dev/null @@ -1,89 +0,0 @@ -test interpret -test run -target aarch64 -target x86_64 -target s390x -target riscv64 - -function %bextend_b1_b8(b1) -> b8 { -block0(v0: b1): - v1 = bextend.b8 v0 - return v1 -} -; run: %bextend_b1_b8(true) == true -; run: %bextend_b1_b8(false) == false - -function %bextend_b1_b16(b1) -> b16 { -block0(v0: b1): - v1 = bextend.b16 v0 - return v1 -} -; run: %bextend_b1_b16(true) == true -; run: %bextend_b1_b16(false) == false - -function %bextend_b1_b32(b1) -> b32 { -block0(v0: b1): - v1 = bextend.b32 v0 - return v1 -} -; run: %bextend_b1_b32(true) == true -; run: %bextend_b1_b32(false) == false - -function %bextend_b1_b64(b1) -> b64 { -block0(v0: b1): - v1 = bextend.b64 v0 - return v1 -} -; run: %bextend_b1_b64(true) == true -; run: %bextend_b1_b64(false) == false - - -function %bextend_b8_b16(b8) -> b16 { -block0(v0: b8): - v1 = bextend.b16 v0 - return v1 -} -; run: %bextend_b8_b16(true) == true -; run: %bextend_b8_b16(false) == false - -function %bextend_b8_b32(b8) -> b32 { -block0(v0: b8): - v1 = bextend.b32 v0 - return v1 -} -; run: %bextend_b8_b32(true) == true -; run: %bextend_b8_b32(false) == false - -function %bextend_b8_b64(b8) -> b64 { -block0(v0: b8): - v1 = bextend.b64 v0 - return v1 -} -; run: %bextend_b8_b64(true) == true -; run: %bextend_b8_b64(false) == false - - -function %bextend_b16_b32(b16) -> b32 { -block0(v0: b16): - v1 = bextend.b32 v0 - return v1 -} -; run: %bextend_b16_b32(true) == true -; run: %bextend_b16_b32(false) == false - -function %bextend_b16_b64(b16) -> b64 { -block0(v0: b16): - v1 = bextend.b64 v0 - return v1 -} -; run: %bextend_b16_b64(true) == true -; run: %bextend_b16_b64(false) == false - - -function %bextend_b32_b64(b32) -> b64 { -block0(v0: b32): - v1 = bextend.b64 v0 - return v1 -} -; run: %bextend_b32_b64(true) == true -; run: %bextend_b32_b64(false) == false diff --git a/cranelift/filetests/filetests/runtests/bint.clif b/cranelift/filetests/filetests/runtests/bint.clif deleted file mode 100644 index 44dc94ba4cc7..000000000000 --- a/cranelift/filetests/filetests/runtests/bint.clif +++ /dev/null @@ -1,341 +0,0 @@ -test interpret -test run -target aarch64 -target s390x -target x86_64 -target riscv64 - -function %bint_b1_i8_true() -> i8 { -block0: - v0 = bconst.b1 true - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b1_i8_true() == 1 - -function %bint_b1_i8_false() -> i8 { -block0: - v0 = bconst.b1 false - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b1_i8_false() == 0 - -function %bint_b1_i16_true() -> i16 { -block0: - v0 = bconst.b1 true - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b1_i16_true() == 1 - -function %bint_b1_i16_false() -> i16 { -block0: - v0 = bconst.b1 false - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b1_i16_false() == 0 - -function %bint_b1_i32_true() -> i32 { -block0: - v0 = bconst.b1 true - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b1_i32_true() == 1 - -function %bint_b1_i32_false() -> i32 { -block0: - v0 = bconst.b1 false - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b1_i32_false() == 0 - -function %bint_b1_i64_true() -> i64 { -block0: - v0 = bconst.b1 true - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b1_i64_true() == 1 - -function %bint_b1_i64_false() -> i64 { -block0: - v0 = bconst.b1 false - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b1_i64_false() == 0 - - - - -function %bint_b8_i8_true() -> i8 { -block0: - v0 = bconst.b8 true - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b8_i8_true() == 1 - -function %bint_b8_i8_false() -> i8 { -block0: - v0 = bconst.b8 false - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b8_i8_false() == 0 - -function %bint_b8_i16_true() -> i16 { -block0: - v0 = bconst.b8 true - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b8_i16_true() == 1 - -function %bint_b8_i16_false() -> i16 { -block0: - v0 = bconst.b8 false - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b8_i16_false() == 0 - -function %bint_b8_i32_true() -> i32 { -block0: - v0 = bconst.b8 true - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b8_i32_true() == 1 - -function %bint_b8_i32_false() -> i32 { -block0: - v0 = bconst.b8 false - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b8_i32_false() == 0 - -function %bint_b8_i64_true() -> i64 { -block0: - v0 = bconst.b8 true - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b8_i64_true() == 1 - -function %bint_b8_i64_false() -> i64 { -block0: - v0 = bconst.b8 false - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b8_i64_false() == 0 - - - - - -function %bint_b16_i8_true() -> i8 { -block0: - v0 = bconst.b16 true - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b16_i8_true() == 1 - -function %bint_b16_i8_false() -> i8 { -block0: - v0 = bconst.b16 false - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b16_i8_false() == 0 - -function %bint_b16_i16_true() -> i16 { -block0: - v0 = bconst.b16 true - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b16_i16_true() == 1 - -function %bint_b16_i16_false() -> i16 { -block0: - v0 = bconst.b16 false - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b16_i16_false() == 0 - -function %bint_b16_i32_true() -> i32 { -block0: - v0 = bconst.b16 true - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b16_i32_true() == 1 - -function %bint_b16_i32_false() -> i32 { -block0: - v0 = bconst.b16 false - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b16_i32_false() == 0 - -function %bint_b16_i64_true() -> i64 { -block0: - v0 = bconst.b16 true - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b16_i64_true() == 1 - -function %bint_b16_i64_false() -> i64 { -block0: - v0 = bconst.b16 false - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b16_i64_false() == 0 - - - - -function %bint_b32_i8_true() -> i8 { -block0: - v0 = bconst.b32 true - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b32_i8_true() == 1 - -function %bint_b32_i8_false() -> i8 { -block0: - v0 = bconst.b32 false - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b32_i8_false() == 0 - -function %bint_b32_i16_true() -> i16 { -block0: - v0 = bconst.b32 true - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b32_i16_true() == 1 - -function %bint_b32_i16_false() -> i16 { -block0: - v0 = bconst.b32 false - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b32_i16_false() == 0 - -function %bint_b32_i32_true() -> i32 { -block0: - v0 = bconst.b32 true - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b32_i32_true() == 1 - -function %bint_b32_i32_false() -> i32 { -block0: - v0 = bconst.b32 false - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b32_i32_false() == 0 - -function %bint_b32_i64_true() -> i64 { -block0: - v0 = bconst.b32 true - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b32_i64_true() == 1 - -function %bint_b32_i64_false() -> i64 { -block0: - v0 = bconst.b32 false - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b32_i64_false() == 0 - - - - - - -function %bint_b64_i8_true() -> i8 { -block0: - v0 = bconst.b64 true - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b64_i8_true() == 1 - -function %bint_b64_i8_false() -> i8 { -block0: - v0 = bconst.b64 false - v1 = bint.i8 v0 - return v1 -} -; run: %bint_b64_i8_false() == 0 - -function %bint_b64_i16_true() -> i16 { -block0: - v0 = bconst.b64 true - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b64_i16_true() == 1 - -function %bint_b64_i16_false() -> i16 { -block0: - v0 = bconst.b64 false - v1 = bint.i16 v0 - return v1 -} -; run: %bint_b64_i16_false() == 0 - -function %bint_b64_i32_true() -> i32 { -block0: - v0 = bconst.b64 true - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b64_i32_true() == 1 - -function %bint_b64_i32_false() -> i32 { -block0: - v0 = bconst.b64 false - v1 = bint.i32 v0 - return v1 -} -; run: %bint_b64_i32_false() == 0 - -function %bint_b64_i64_true() -> i64 { -block0: - v0 = bconst.b64 true - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b64_i64_true() == 1 - -function %bint_b64_i64_false() -> i64 { -block0: - v0 = bconst.b64 false - v1 = bint.i64 v0 - return v1 -} -; run: %bint_b64_i64_false() == 0 diff --git a/cranelift/filetests/filetests/runtests/bitcast-ref64.clif b/cranelift/filetests/filetests/runtests/bitcast-ref64.clif index b104efc77b41..73d9a177437a 100644 --- a/cranelift/filetests/filetests/runtests/bitcast-ref64.clif +++ b/cranelift/filetests/filetests/runtests/bitcast-ref64.clif @@ -3,16 +3,16 @@ target aarch64 ; the interpreter, x86_64, and s390x do not support bitcasting to/from ; references -function %bitcast_ir64(i64) -> b1 { +function %bitcast_ir64(i64) -> i8 { block0(v0: i64): v1 = bitcast.r64 v0 v2 = is_null v1 return v2 } -; run: %bitcast_ir64(0) == true -; run: %bitcast_ir64(18446744073709551615) == false -; run: %bitcast_ir64(-1) == false -; run: %bitcast_ir64(127) == false +; run: %bitcast_ir64(0) == 1 +; run: %bitcast_ir64(18446744073709551615) == 0 +; run: %bitcast_ir64(-1) == 0 +; run: %bitcast_ir64(127) == 0 function %bitcast_ri64(i64) -> i64 { block0(v0: i64): diff --git a/cranelift/filetests/filetests/runtests/bitops.clif b/cranelift/filetests/filetests/runtests/bitops.clif index c72962da607b..fe7615afb4a6 100644 --- a/cranelift/filetests/filetests/runtests/bitops.clif +++ b/cranelift/filetests/filetests/runtests/bitops.clif @@ -5,10 +5,10 @@ target riscv64 target s390x has_mie2 ; target x86_64 TODO: Not yet implemented on x86_64 -function %bnot_band() -> b1 { +function %bnot_band() -> i8 { block0: - v1 = bconst.b1 false - v2 = bconst.b1 true + v1 = iconst.i8 0 + v2 = iconst.i8 1 v3 = bnot v1 v4 = band v3, v2 return v4 diff --git a/cranelift/filetests/filetests/runtests/bmask.clif b/cranelift/filetests/filetests/runtests/bmask.clif index 9cd9da682d2b..5e004108cf39 100644 --- a/cranelift/filetests/filetests/runtests/bmask.clif +++ b/cranelift/filetests/filetests/runtests/bmask.clif @@ -4,162 +4,130 @@ target aarch64 target s390x target riscv64 -function %bmask_b64_i64(b64) -> i64 { -block0(v0: b64): +function %bmask_i64_i64(i64) -> i64 { +block0(v0: i64): v1 = bmask.i64 v0 return v1 } -; run: %bmask_b64_i64(true) == -1 -; run: %bmask_b64_i64(false) == 0 +; run: %bmask_i64_i64(1) == -1 +; run: %bmask_i64_i64(0) == 0 -function %bmask_b64_i32(b64) -> i32 { -block0(v0: b64): +function %bmask_i64_i32(i64) -> i32 { +block0(v0: i64): v1 = bmask.i32 v0 return v1 } -; run: %bmask_b64_i32(true) == -1 -; run: %bmask_b64_i32(false) == 0 +; run: %bmask_i64_i32(1) == -1 +; run: %bmask_i64_i32(0) == 0 -function %bmask_b64_i16(b64) -> i16 { -block0(v0: b64): +function %bmask_i64_i16(i64) -> i16 { +block0(v0: i64): v1 = bmask.i16 v0 return v1 } -; run: %bmask_b64_i16(true) == -1 -; run: %bmask_b64_i16(false) == 0 +; run: %bmask_i64_i16(1) == -1 +; run: %bmask_i64_i16(0) == 0 -function %bmask_b64_i8(b64) -> i8 { -block0(v0: b64): +function %bmask_i64_i8(i64) -> i8 { +block0(v0: i64): v1 = bmask.i8 v0 return v1 } -; run: %bmask_b64_i8(true) == -1 -; run: %bmask_b64_i8(false) == 0 +; run: %bmask_i64_i8(1) == -1 +; run: %bmask_i64_i8(0) == 0 -function %bmask_b32_i64(b32) -> i64 { -block0(v0: b32): +function %bmask_i32_i64(i32) -> i64 { +block0(v0: i32): v1 = bmask.i64 v0 return v1 } -; run: %bmask_b32_i64(true) == -1 -; run: %bmask_b32_i64(false) == 0 +; run: %bmask_i32_i64(1) == -1 +; run: %bmask_i32_i64(0) == 0 -function %bmask_b32_i32(b32) -> i32 { -block0(v0: b32): +function %bmask_i32_i32(i32) -> i32 { +block0(v0: i32): v1 = bmask.i32 v0 return v1 } -; run: %bmask_b32_i32(true) == -1 -; run: %bmask_b32_i32(false) == 0 +; run: %bmask_i32_i32(1) == -1 +; run: %bmask_i32_i32(0) == 0 -function %bmask_b32_i16(b32) -> i16 { -block0(v0: b32): +function %bmask_i32_i16(i32) -> i16 { +block0(v0: i32): v1 = bmask.i16 v0 return v1 } -; run: %bmask_b32_i16(true) == -1 -; run: %bmask_b32_i16(false) == 0 +; run: %bmask_i32_i16(1) == -1 +; run: %bmask_i32_i16(0) == 0 -function %bmask_b32_i8(b32) -> i8 { -block0(v0: b32): +function %bmask_i32_i8(i32) -> i8 { +block0(v0: i32): v1 = bmask.i8 v0 return v1 } -; run: %bmask_b32_i8(true) == -1 -; run: %bmask_b32_i8(false) == 0 +; run: %bmask_i32_i8(1) == -1 +; run: %bmask_i32_i8(0) == 0 -function %bmask_b16_i64(b16) -> i64 { -block0(v0: b16): +function %bmask_i16_i64(i16) -> i64 { +block0(v0: i16): v1 = bmask.i64 v0 return v1 } -; run: %bmask_b16_i64(true) == -1 -; run: %bmask_b16_i64(false) == 0 +; run: %bmask_i16_i64(1) == -1 +; run: %bmask_i16_i64(0) == 0 -function %bmask_b16_i32(b16) -> i32 { -block0(v0: b16): +function %bmask_i16_i32(i16) -> i32 { +block0(v0: i16): v1 = bmask.i32 v0 return v1 } -; run: %bmask_b16_i32(true) == -1 -; run: %bmask_b16_i32(false) == 0 +; run: %bmask_i16_i32(1) == -1 +; run: %bmask_i16_i32(0) == 0 -function %bmask_b16_i16(b16) -> i16 { -block0(v0: b16): +function %bmask_i16_i16(i16) -> i16 { +block0(v0: i16): v1 = bmask.i16 v0 return v1 } -; run: %bmask_b16_i16(true) == -1 -; run: %bmask_b16_i16(false) == 0 +; run: %bmask_i16_i16(1) == -1 +; run: %bmask_i16_i16(0) == 0 -function %bmask_b16_i8(b16) -> i8 { -block0(v0: b16): +function %bmask_i16_i8(i16) -> i8 { +block0(v0: i16): v1 = bmask.i8 v0 return v1 } -; run: %bmask_b16_i8(true) == -1 -; run: %bmask_b16_i8(false) == 0 +; run: %bmask_i16_i8(1) == -1 +; run: %bmask_i16_i8(0) == 0 -function %bmask_b8_i64(b8) -> i64 { -block0(v0: b8): +function %bmask_i8_i64(i8) -> i64 { +block0(v0: i8): v1 = bmask.i64 v0 return v1 } -; run: %bmask_b8_i64(true) == -1 -; run: %bmask_b8_i64(false) == 0 +; run: %bmask_i8_i64(1) == -1 +; run: %bmask_i8_i64(0) == 0 -function %bmask_b8_i32(b8) -> i32 { -block0(v0: b8): +function %bmask_i8_i32(i8) -> i32 { +block0(v0: i8): v1 = bmask.i32 v0 return v1 } -; run: %bmask_b8_i32(true) == -1 -; run: %bmask_b8_i32(false) == 0 +; run: %bmask_i8_i32(1) == -1 +; run: %bmask_i8_i32(0) == 0 -function %bmask_b8_i16(b8) -> i16 { -block0(v0: b8): +function %bmask_i8_i16(i8) -> i16 { +block0(v0: i8): v1 = bmask.i16 v0 return v1 } -; run: %bmask_b8_i16(true) == -1 -; run: %bmask_b8_i16(false) == 0 +; run: %bmask_i8_i16(1) == -1 +; run: %bmask_i8_i16(0) == 0 -function %bmask_b8_i8(b8) -> i8 { -block0(v0: b8): +function %bmask_i8_i8(i8) -> i8 { +block0(v0: i8): v1 = bmask.i8 v0 return v1 } -; run: %bmask_b8_i8(true) == -1 -; run: %bmask_b8_i8(false) == 0 - -function %bmask_b1_i64(b1) -> i64 { -block0(v0: b1): - v1 = bmask.i64 v0 - return v1 -} -; run: %bmask_b1_i64(true) == -1 -; run: %bmask_b1_i64(false) == 0 - -function %bmask_b1_i32(b1) -> i32 { -block0(v0: b1): - v1 = bmask.i32 v0 - return v1 -} -; run: %bmask_b1_i32(true) == -1 -; run: %bmask_b1_i32(false) == 0 - -function %bmask_b1_i16(b1) -> i16 { -block0(v0: b1): - v1 = bmask.i16 v0 - return v1 -} -; run: %bmask_b1_i16(true) == -1 -; run: %bmask_b1_i16(false) == 0 - -function %bmask_b1_i8(b1) -> i8 { -block0(v0: b1): - v1 = bmask.i8 v0 - return v1 -} -; run: %bmask_b1_i8(true) == -1 -; run: %bmask_b1_i8(false) == 0 +; run: %bmask_i8_i8(1) == -1 +; run: %bmask_i8_i8(0) == 0 diff --git a/cranelift/filetests/filetests/runtests/bnot.clif b/cranelift/filetests/filetests/runtests/bnot.clif index e6e6a8674e21..a169bcd6bc3d 100644 --- a/cranelift/filetests/filetests/runtests/bnot.clif +++ b/cranelift/filetests/filetests/runtests/bnot.clif @@ -4,42 +4,34 @@ target x86_64 target aarch64 target s390x -function %bnot_b1(b1) -> b1 { -block0(v0: b1): - v1 = bnot.b1 v0 +function %bnot_i8(i8) -> i8 { +block0(v0: i8): + v1 = bnot.i8 v0 return v1 } -; run: %bnot_b1(false) == true -; run: %bnot_b1(true) == false +; run: %bnot_i8(0) == -1 +; run: %bnot_i8(1) == -2 -function %bnot_b8(b8) -> b8 { -block0(v0: b8): - v1 = bnot.b8 v0 +function %bnot_i16(i16) -> i16 { +block0(v0: i16): + v1 = bnot.i16 v0 return v1 } -; run: %bnot_b8(false) == true -; run: %bnot_b8(true) == false +; run: %bnot_i16(0) == -1 +; run: %bnot_i16(1) == -2 -function %bnot_b16(b16) -> b16 { -block0(v0: b16): - v1 = bnot.b16 v0 +function %bnot_i32(i32) -> i32 { +block0(v0: i32): + v1 = bnot.i32 v0 return v1 } -; run: %bnot_b16(false) == true -; run: %bnot_b16(true) == false +; run: %bnot_i32(0) == -1 +; run: %bnot_i32(1) == -2 -function %bnot_b32(b32) -> b32 { -block0(v0: b32): - v1 = bnot.b32 v0 +function %bnot_i64(i64) -> i64 { +block0(v0: i64): + v1 = bnot.i64 v0 return v1 } -; run: %bnot_b32(false) == true -; run: %bnot_b32(true) == false - -function %bnot_b64(b64) -> b64 { -block0(v0: b64): - v1 = bnot.b64 v0 - return v1 -} -; run: %bnot_b64(false) == true -; run: %bnot_b64(true) == false +; run: %bnot_i64(0) == -1 +; run: %bnot_i64(1) == -2 diff --git a/cranelift/filetests/filetests/runtests/br.clif b/cranelift/filetests/filetests/runtests/br.clif index 2fac5b5ff7cb..4fcb360f96b6 100644 --- a/cranelift/filetests/filetests/runtests/br.clif +++ b/cranelift/filetests/filetests/runtests/br.clif @@ -5,188 +5,154 @@ target s390x target x86_64 target riscv64 -function %jump() -> b1 { +function %jump() -> i8 { block0: jump block2 block1: - v0 = bconst.b1 false + v0 = iconst.i8 0 return v0 block2: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 } -; run: %jump() == true +; run: %jump() == 1 -function %brz_i64(i64) -> b1 { +function %brz_i64(i64) -> i8 { block0(v0: i64): brz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brz_i64(0) == true -; run: %brz_i64(1) == false -; run: %brz_i64(-1) == false +; run: %brz_i64(0) == 1 +; run: %brz_i64(1) == 0 +; run: %brz_i64(-1) == 0 -function %brz_i32(i32) -> b1 { +function %brz_i32(i32) -> i8 { block0(v0: i32): brz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brz_i32(0) == true -; run: %brz_i32(1) == false -; run: %brz_i32(-1) == false +; run: %brz_i32(0) == 1 +; run: %brz_i32(1) == 0 +; run: %brz_i32(-1) == 0 -function %brz_i16(i16) -> b1 { +function %brz_i16(i16) -> i8 { block0(v0: i16): brz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brz_i16(0) == true -; run: %brz_i16(1) == false -; run: %brz_i16(-1) == false - -function %brz_i8(i8) -> b1 { -block0(v0: i8): - brz v0, block1 - jump block2 - -block1: - v1 = bconst.b1 true - return v1 - -block2: - v2 = bconst.b1 false - return v2 -} -; run: %brz_i8(0) == true -; run: %brz_i8(1) == false -; run: %brz_i8(-1) == false +; run: %brz_i16(0) == 1 +; run: %brz_i16(1) == 0 +; run: %brz_i16(-1) == 0 -function %brz_b1(b1) -> b1 { -block0(v1: b1): +function %brz_i8(i8) -> i8 { +block0(v1: i8): brz v1, block1 jump block2 block1: - v2 = bconst.b1 true + v2 = iconst.i8 1 return v2 block2: - v3 = bconst.b1 false + v3 = iconst.i8 0 return v3 } -; run: %brz_b1(true) == false -; run: %brz_b1(false) == true +; run: %brz_i8(1) == 0 +; run: %brz_i8(0) == 1 -function %brnz_i64(i64) -> b1 { +function %brnz_i64(i64) -> i8 { block0(v0: i64): brnz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brnz_i64(0) == false -; run: %brnz_i64(1) == true -; run: %brnz_i64(-1) == true +; run: %brnz_i64(0) == 0 +; run: %brnz_i64(1) == 1 +; run: %brnz_i64(-1) == 1 -function %brnz_i32(i32) -> b1 { +function %brnz_i32(i32) -> i8 { block0(v0: i32): brnz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brnz_i32(0) == false -; run: %brnz_i32(1) == true -; run: %brnz_i32(-1) == true +; run: %brnz_i32(0) == 0 +; run: %brnz_i32(1) == 1 +; run: %brnz_i32(-1) == 1 -function %brnz_i16(i16) -> b1 { +function %brnz_i16(i16) -> i8 { block0(v0: i16): brnz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brnz_i16(0) == false -; run: %brnz_i16(1) == true -; run: %brnz_i16(-1) == true +; run: %brnz_i16(0) == 0 +; run: %brnz_i16(1) == 1 +; run: %brnz_i16(-1) == 1 -function %brnz_i8(i8) -> b1 { +function %brnz_i8(i8) -> i8 { block0(v0: i8): brnz v0, block1 jump block2 block1: - v1 = bconst.b1 true + v1 = iconst.i8 1 return v1 block2: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 } -; run: %brnz_i8(0) == false -; run: %brnz_i8(1) == true -; run: %brnz_i8(-1) == true - - -function %brnz_b1(b1) -> b1 { -block0(v1: b1): - brnz v1, block1 - jump block2 - -block1: - v2 = bconst.b1 true - return v2 - -block2: - v3 = bconst.b1 false - return v3 -} -; run: %brnz_b1(true) == true -; run: %brnz_b1(false) == false +; run: %brnz_i8(0) == 0 +; run: %brnz_i8(1) == 1 +; run: %brnz_i8(-1) == 1 diff --git a/cranelift/filetests/filetests/runtests/br_icmp.clif b/cranelift/filetests/filetests/runtests/br_icmp.clif index 5443833b355f..bb1dd7b563a3 100644 --- a/cranelift/filetests/filetests/runtests/br_icmp.clif +++ b/cranelift/filetests/filetests/runtests/br_icmp.clif @@ -5,763 +5,763 @@ target s390x target x86_64 target riscv64 -function %bricmp_eq_i64(i64, i64) -> b1 { +function %bricmp_eq_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 eq v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_eq_i64(0, 0) == true -; run: %bricmp_eq_i64(0, 1) == false -; run: %bricmp_eq_i64(1, 0) == false -; run: %bricmp_eq_i64(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == false +; run: %bricmp_eq_i64(0, 0) == 1 +; run: %bricmp_eq_i64(0, 1) == 0 +; run: %bricmp_eq_i64(1, 0) == 0 +; run: %bricmp_eq_i64(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == 0 -function %bricmp_eq_i32(i32, i32) -> b1 { +function %bricmp_eq_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 eq v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_eq_i32(0, 0) == true -; run: %bricmp_eq_i32(0, 1) == false -; run: %bricmp_eq_i32(1, 0) == false -; run: %bricmp_eq_i32(0xC0FFEEEE, 0xDECAFFFF) == false +; run: %bricmp_eq_i32(0, 0) == 1 +; run: %bricmp_eq_i32(0, 1) == 0 +; run: %bricmp_eq_i32(1, 0) == 0 +; run: %bricmp_eq_i32(0xC0FFEEEE, 0xDECAFFFF) == 0 -function %bricmp_eq_i16(i16, i16) -> b1 { +function %bricmp_eq_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 eq v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_eq_i16(0, 0) == true -; run: %bricmp_eq_i16(0, 1) == false -; run: %bricmp_eq_i16(1, 0) == false -; run: %bricmp_eq_i16(0xC0FF, 0xDECA) == false +; run: %bricmp_eq_i16(0, 0) == 1 +; run: %bricmp_eq_i16(0, 1) == 0 +; run: %bricmp_eq_i16(1, 0) == 0 +; run: %bricmp_eq_i16(0xC0FF, 0xDECA) == 0 -function %bricmp_eq_i8(i8, i8) -> b1 { +function %bricmp_eq_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 eq v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_eq_i8(0, 0) == true -; run: %bricmp_eq_i8(0, 1) == false -; run: %bricmp_eq_i8(1, 0) == false -; run: %bricmp_eq_i8(0xC0, 0xDE) == false +; run: %bricmp_eq_i8(0, 0) == 1 +; run: %bricmp_eq_i8(0, 1) == 0 +; run: %bricmp_eq_i8(1, 0) == 0 +; run: %bricmp_eq_i8(0xC0, 0xDE) == 0 -function %bricmp_ne_i64(i64, i64) -> b1 { +function %bricmp_ne_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 ne v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ne_i64(0, 0) == false -; run: %bricmp_ne_i64(0, 1) == true -; run: %bricmp_ne_i64(1, 0) == true -; run: %bricmp_ne_i64(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == true +; run: %bricmp_ne_i64(0, 0) == 0 +; run: %bricmp_ne_i64(0, 1) == 1 +; run: %bricmp_ne_i64(1, 0) == 1 +; run: %bricmp_ne_i64(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == 1 -function %bricmp_ne_i32(i32, i32) -> b1 { +function %bricmp_ne_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 ne v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ne_i32(0, 0) == false -; run: %bricmp_ne_i32(0, 1) == true -; run: %bricmp_ne_i32(1, 0) == true -; run: %bricmp_ne_i32(0xC0FFEEEE, 0xDECAFFFF) == true +; run: %bricmp_ne_i32(0, 0) == 0 +; run: %bricmp_ne_i32(0, 1) == 1 +; run: %bricmp_ne_i32(1, 0) == 1 +; run: %bricmp_ne_i32(0xC0FFEEEE, 0xDECAFFFF) == 1 -function %bricmp_ne_i16(i16, i16) -> b1 { +function %bricmp_ne_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 ne v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ne_i16(0, 0) == false -; run: %bricmp_ne_i16(0, 1) == true -; run: %bricmp_ne_i16(1, 0) == true -; run: %bricmp_ne_i16(0xC0FF, 0xDECA) == true +; run: %bricmp_ne_i16(0, 0) == 0 +; run: %bricmp_ne_i16(0, 1) == 1 +; run: %bricmp_ne_i16(1, 0) == 1 +; run: %bricmp_ne_i16(0xC0FF, 0xDECA) == 1 -function %bricmp_ne_i8(i8, i8) -> b1 { +function %bricmp_ne_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 ne v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ne_i8(0, 0) == false -; run: %bricmp_ne_i8(0, 1) == true -; run: %bricmp_ne_i8(1, 0) == true -; run: %bricmp_ne_i8(0xC0, 0xDE) == true +; run: %bricmp_ne_i8(0, 0) == 0 +; run: %bricmp_ne_i8(0, 1) == 1 +; run: %bricmp_ne_i8(1, 0) == 1 +; run: %bricmp_ne_i8(0xC0, 0xDE) == 1 -function %bricmp_slt_i64(i64, i64) -> b1 { +function %bricmp_slt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 slt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_slt_i64(0, 0) == false -; run: %bricmp_slt_i64(0, 1) == true -; run: %bricmp_slt_i64(1, 0) == false -; run: %bricmp_slt_i64(0, -1) == false -; run: %bricmp_slt_i64(-1, 0) == true +; run: %bricmp_slt_i64(0, 0) == 0 +; run: %bricmp_slt_i64(0, 1) == 1 +; run: %bricmp_slt_i64(1, 0) == 0 +; run: %bricmp_slt_i64(0, -1) == 0 +; run: %bricmp_slt_i64(-1, 0) == 1 -function %bricmp_slt_i32(i32, i32) -> b1 { +function %bricmp_slt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 slt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_slt_i32(0, 0) == false -; run: %bricmp_slt_i32(0, 1) == true -; run: %bricmp_slt_i32(1, 0) == false -; run: %bricmp_slt_i32(0, -1) == false -; run: %bricmp_slt_i32(-1, 0) == true +; run: %bricmp_slt_i32(0, 0) == 0 +; run: %bricmp_slt_i32(0, 1) == 1 +; run: %bricmp_slt_i32(1, 0) == 0 +; run: %bricmp_slt_i32(0, -1) == 0 +; run: %bricmp_slt_i32(-1, 0) == 1 -function %bricmp_slt_i16(i16, i16) -> b1 { +function %bricmp_slt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 slt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_slt_i16(0, 0) == false -; run: %bricmp_slt_i16(0, 1) == true -; run: %bricmp_slt_i16(1, 0) == false -; run: %bricmp_slt_i16(0, -1) == false -; run: %bricmp_slt_i16(-1, 0) == true +; run: %bricmp_slt_i16(0, 0) == 0 +; run: %bricmp_slt_i16(0, 1) == 1 +; run: %bricmp_slt_i16(1, 0) == 0 +; run: %bricmp_slt_i16(0, -1) == 0 +; run: %bricmp_slt_i16(-1, 0) == 1 -function %bricmp_slt_i8(i8, i8) -> b1 { +function %bricmp_slt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 slt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_slt_i8(0, 0) == false -; run: %bricmp_slt_i8(0, 1) == true -; run: %bricmp_slt_i8(1, 0) == false -; run: %bricmp_slt_i8(0, -1) == false -; run: %bricmp_slt_i8(-1, 0) == true +; run: %bricmp_slt_i8(0, 0) == 0 +; run: %bricmp_slt_i8(0, 1) == 1 +; run: %bricmp_slt_i8(1, 0) == 0 +; run: %bricmp_slt_i8(0, -1) == 0 +; run: %bricmp_slt_i8(-1, 0) == 1 -function %bricmp_ult_i64(i64, i64) -> b1 { +function %bricmp_ult_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 ult v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ult_i64(0, 0) == false -; run: %bricmp_ult_i64(0, 1) == true -; run: %bricmp_ult_i64(1, 0) == false -; run: %bricmp_ult_i64(0, -1) == true -; run: %bricmp_ult_i64(-1, 0) == false +; run: %bricmp_ult_i64(0, 0) == 0 +; run: %bricmp_ult_i64(0, 1) == 1 +; run: %bricmp_ult_i64(1, 0) == 0 +; run: %bricmp_ult_i64(0, -1) == 1 +; run: %bricmp_ult_i64(-1, 0) == 0 -function %bricmp_ult_i32(i32, i32) -> b1 { +function %bricmp_ult_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 ult v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ult_i32(0, 0) == false -; run: %bricmp_ult_i32(0, 1) == true -; run: %bricmp_ult_i32(1, 0) == false -; run: %bricmp_ult_i32(0, -1) == true -; run: %bricmp_ult_i32(-1, 0) == false +; run: %bricmp_ult_i32(0, 0) == 0 +; run: %bricmp_ult_i32(0, 1) == 1 +; run: %bricmp_ult_i32(1, 0) == 0 +; run: %bricmp_ult_i32(0, -1) == 1 +; run: %bricmp_ult_i32(-1, 0) == 0 -function %bricmp_ult_i16(i16, i16) -> b1 { +function %bricmp_ult_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 ult v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ult_i16(0, 0) == false -; run: %bricmp_ult_i16(0, 1) == true -; run: %bricmp_ult_i16(1, 0) == false -; run: %bricmp_ult_i16(0, -1) == true -; run: %bricmp_ult_i16(-1, 0) == false +; run: %bricmp_ult_i16(0, 0) == 0 +; run: %bricmp_ult_i16(0, 1) == 1 +; run: %bricmp_ult_i16(1, 0) == 0 +; run: %bricmp_ult_i16(0, -1) == 1 +; run: %bricmp_ult_i16(-1, 0) == 0 -function %bricmp_ult_i8(i8, i8) -> b1 { +function %bricmp_ult_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 ult v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ult_i8(0, 0) == false -; run: %bricmp_ult_i8(0, 1) == true -; run: %bricmp_ult_i8(1, 0) == false -; run: %bricmp_ult_i8(0, -1) == true -; run: %bricmp_ult_i8(-1, 0) == false +; run: %bricmp_ult_i8(0, 0) == 0 +; run: %bricmp_ult_i8(0, 1) == 1 +; run: %bricmp_ult_i8(1, 0) == 0 +; run: %bricmp_ult_i8(0, -1) == 1 +; run: %bricmp_ult_i8(-1, 0) == 0 -function %bricmp_sle_i64(i64, i64) -> b1 { +function %bricmp_sle_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 sle v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sle_i64(0, 0) == true -; run: %bricmp_sle_i64(0, 1) == true -; run: %bricmp_sle_i64(1, 0) == false -; run: %bricmp_sle_i64(0, -1) == false -; run: %bricmp_sle_i64(-1, 0) == true +; run: %bricmp_sle_i64(0, 0) == 1 +; run: %bricmp_sle_i64(0, 1) == 1 +; run: %bricmp_sle_i64(1, 0) == 0 +; run: %bricmp_sle_i64(0, -1) == 0 +; run: %bricmp_sle_i64(-1, 0) == 1 -function %bricmp_sle_i32(i32, i32) -> b1 { +function %bricmp_sle_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 sle v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sle_i32(0, 0) == true -; run: %bricmp_sle_i32(0, 1) == true -; run: %bricmp_sle_i32(1, 0) == false -; run: %bricmp_sle_i32(0, -1) == false -; run: %bricmp_sle_i32(-1, 0) == true +; run: %bricmp_sle_i32(0, 0) == 1 +; run: %bricmp_sle_i32(0, 1) == 1 +; run: %bricmp_sle_i32(1, 0) == 0 +; run: %bricmp_sle_i32(0, -1) == 0 +; run: %bricmp_sle_i32(-1, 0) == 1 -function %bricmp_sle_i16(i16, i16) -> b1 { +function %bricmp_sle_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 sle v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sle_i16(0, 0) == true -; run: %bricmp_sle_i16(0, 1) == true -; run: %bricmp_sle_i16(1, 0) == false -; run: %bricmp_sle_i16(0, -1) == false -; run: %bricmp_sle_i16(-1, 0) == true +; run: %bricmp_sle_i16(0, 0) == 1 +; run: %bricmp_sle_i16(0, 1) == 1 +; run: %bricmp_sle_i16(1, 0) == 0 +; run: %bricmp_sle_i16(0, -1) == 0 +; run: %bricmp_sle_i16(-1, 0) == 1 -function %bricmp_sle_i8(i8, i8) -> b1 { +function %bricmp_sle_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 sle v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sle_i8(0, 0) == true -; run: %bricmp_sle_i8(0, 1) == true -; run: %bricmp_sle_i8(1, 0) == false -; run: %bricmp_sle_i8(0, -1) == false -; run: %bricmp_sle_i8(-1, 0) == true +; run: %bricmp_sle_i8(0, 0) == 1 +; run: %bricmp_sle_i8(0, 1) == 1 +; run: %bricmp_sle_i8(1, 0) == 0 +; run: %bricmp_sle_i8(0, -1) == 0 +; run: %bricmp_sle_i8(-1, 0) == 1 -function %bricmp_ule_i64(i64, i64) -> b1 { +function %bricmp_ule_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 ule v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ule_i64(0, 0) == true -; run: %bricmp_ule_i64(0, 1) == true -; run: %bricmp_ule_i64(1, 0) == false -; run: %bricmp_ule_i64(0, -1) == true -; run: %bricmp_ule_i64(-1, 0) == false +; run: %bricmp_ule_i64(0, 0) == 1 +; run: %bricmp_ule_i64(0, 1) == 1 +; run: %bricmp_ule_i64(1, 0) == 0 +; run: %bricmp_ule_i64(0, -1) == 1 +; run: %bricmp_ule_i64(-1, 0) == 0 -function %bricmp_ule_i32(i32, i32) -> b1 { +function %bricmp_ule_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 ule v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ule_i32(0, 0) == true -; run: %bricmp_ule_i32(0, 1) == true -; run: %bricmp_ule_i32(1, 0) == false -; run: %bricmp_ule_i32(0, -1) == true -; run: %bricmp_ule_i32(-1, 0) == false +; run: %bricmp_ule_i32(0, 0) == 1 +; run: %bricmp_ule_i32(0, 1) == 1 +; run: %bricmp_ule_i32(1, 0) == 0 +; run: %bricmp_ule_i32(0, -1) == 1 +; run: %bricmp_ule_i32(-1, 0) == 0 -function %bricmp_ule_i16(i16, i16) -> b1 { +function %bricmp_ule_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 ule v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ule_i16(0, 0) == true -; run: %bricmp_ule_i16(0, 1) == true -; run: %bricmp_ule_i16(1, 0) == false -; run: %bricmp_ule_i16(0, -1) == true -; run: %bricmp_ule_i16(-1, 0) == false +; run: %bricmp_ule_i16(0, 0) == 1 +; run: %bricmp_ule_i16(0, 1) == 1 +; run: %bricmp_ule_i16(1, 0) == 0 +; run: %bricmp_ule_i16(0, -1) == 1 +; run: %bricmp_ule_i16(-1, 0) == 0 -function %bricmp_ule_i8(i8, i8) -> b1 { +function %bricmp_ule_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 ule v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ule_i8(0, 0) == true -; run: %bricmp_ule_i8(0, 1) == true -; run: %bricmp_ule_i8(1, 0) == false -; run: %bricmp_ule_i8(0, -1) == true -; run: %bricmp_ule_i8(-1, 0) == false +; run: %bricmp_ule_i8(0, 0) == 1 +; run: %bricmp_ule_i8(0, 1) == 1 +; run: %bricmp_ule_i8(1, 0) == 0 +; run: %bricmp_ule_i8(0, -1) == 1 +; run: %bricmp_ule_i8(-1, 0) == 0 -function %bricmp_sgt_i64(i64, i64) -> b1 { +function %bricmp_sgt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 sgt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sgt_i64(0, 0) == false -; run: %bricmp_sgt_i64(0, 1) == false -; run: %bricmp_sgt_i64(1, 0) == true -; run: %bricmp_sgt_i64(0, -1) == true -; run: %bricmp_sgt_i64(-1, 0) == false +; run: %bricmp_sgt_i64(0, 0) == 0 +; run: %bricmp_sgt_i64(0, 1) == 0 +; run: %bricmp_sgt_i64(1, 0) == 1 +; run: %bricmp_sgt_i64(0, -1) == 1 +; run: %bricmp_sgt_i64(-1, 0) == 0 -function %bricmp_sgt_i32(i32, i32) -> b1 { +function %bricmp_sgt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 sgt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sgt_i32(0, 0) == false -; run: %bricmp_sgt_i32(0, 1) == false -; run: %bricmp_sgt_i32(1, 0) == true -; run: %bricmp_sgt_i32(0, -1) == true -; run: %bricmp_sgt_i32(-1, 0) == false +; run: %bricmp_sgt_i32(0, 0) == 0 +; run: %bricmp_sgt_i32(0, 1) == 0 +; run: %bricmp_sgt_i32(1, 0) == 1 +; run: %bricmp_sgt_i32(0, -1) == 1 +; run: %bricmp_sgt_i32(-1, 0) == 0 -function %bricmp_sgt_i16(i16, i16) -> b1 { +function %bricmp_sgt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 sgt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sgt_i16(0, 0) == false -; run: %bricmp_sgt_i16(0, 1) == false -; run: %bricmp_sgt_i16(1, 0) == true -; run: %bricmp_sgt_i16(0, -1) == true -; run: %bricmp_sgt_i16(-1, 0) == false +; run: %bricmp_sgt_i16(0, 0) == 0 +; run: %bricmp_sgt_i16(0, 1) == 0 +; run: %bricmp_sgt_i16(1, 0) == 1 +; run: %bricmp_sgt_i16(0, -1) == 1 +; run: %bricmp_sgt_i16(-1, 0) == 0 -function %bricmp_sgt_i8(i8, i8) -> b1 { +function %bricmp_sgt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 sgt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sgt_i8(0, 0) == false -; run: %bricmp_sgt_i8(0, 1) == false -; run: %bricmp_sgt_i8(1, 0) == true -; run: %bricmp_sgt_i8(0, -1) == true -; run: %bricmp_sgt_i8(-1, 0) == false +; run: %bricmp_sgt_i8(0, 0) == 0 +; run: %bricmp_sgt_i8(0, 1) == 0 +; run: %bricmp_sgt_i8(1, 0) == 1 +; run: %bricmp_sgt_i8(0, -1) == 1 +; run: %bricmp_sgt_i8(-1, 0) == 0 -function %bricmp_ugt_i64(i64, i64) -> b1 { +function %bricmp_ugt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 ugt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ugt_i64(0, 0) == false -; run: %bricmp_ugt_i64(0, 1) == false -; run: %bricmp_ugt_i64(1, 0) == true -; run: %bricmp_ugt_i64(0, -1) == false -; run: %bricmp_ugt_i64(-1, 0) == true +; run: %bricmp_ugt_i64(0, 0) == 0 +; run: %bricmp_ugt_i64(0, 1) == 0 +; run: %bricmp_ugt_i64(1, 0) == 1 +; run: %bricmp_ugt_i64(0, -1) == 0 +; run: %bricmp_ugt_i64(-1, 0) == 1 -function %bricmp_ugt_i32(i32, i32) -> b1 { +function %bricmp_ugt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 ugt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ugt_i32(0, 0) == false -; run: %bricmp_ugt_i32(0, 1) == false -; run: %bricmp_ugt_i32(1, 0) == true -; run: %bricmp_ugt_i32(0, -1) == false -; run: %bricmp_ugt_i32(-1, 0) == true +; run: %bricmp_ugt_i32(0, 0) == 0 +; run: %bricmp_ugt_i32(0, 1) == 0 +; run: %bricmp_ugt_i32(1, 0) == 1 +; run: %bricmp_ugt_i32(0, -1) == 0 +; run: %bricmp_ugt_i32(-1, 0) == 1 -function %bricmp_ugt_i16(i16, i16) -> b1 { +function %bricmp_ugt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 ugt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ugt_i16(0, 0) == false -; run: %bricmp_ugt_i16(0, 1) == false -; run: %bricmp_ugt_i16(1, 0) == true -; run: %bricmp_ugt_i16(0, -1) == false -; run: %bricmp_ugt_i16(-1, 0) == true +; run: %bricmp_ugt_i16(0, 0) == 0 +; run: %bricmp_ugt_i16(0, 1) == 0 +; run: %bricmp_ugt_i16(1, 0) == 1 +; run: %bricmp_ugt_i16(0, -1) == 0 +; run: %bricmp_ugt_i16(-1, 0) == 1 -function %bricmp_ugt_i8(i8, i8) -> b1 { +function %bricmp_ugt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 ugt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_ugt_i8(0, 0) == false -; run: %bricmp_ugt_i8(0, 1) == false -; run: %bricmp_ugt_i8(1, 0) == true -; run: %bricmp_ugt_i8(0, -1) == false -; run: %bricmp_ugt_i8(-1, 0) == true +; run: %bricmp_ugt_i8(0, 0) == 0 +; run: %bricmp_ugt_i8(0, 1) == 0 +; run: %bricmp_ugt_i8(1, 0) == 1 +; run: %bricmp_ugt_i8(0, -1) == 0 +; run: %bricmp_ugt_i8(-1, 0) == 1 -function %bricmp_sge_i64(i64, i64) -> b1 { +function %bricmp_sge_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 sge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sge_i64(0, 0) == true -; run: %bricmp_sge_i64(0, 1) == false -; run: %bricmp_sge_i64(1, 0) == true -; run: %bricmp_sge_i64(0, -1) == true -; run: %bricmp_sge_i64(-1, 0) == false +; run: %bricmp_sge_i64(0, 0) == 1 +; run: %bricmp_sge_i64(0, 1) == 0 +; run: %bricmp_sge_i64(1, 0) == 1 +; run: %bricmp_sge_i64(0, -1) == 1 +; run: %bricmp_sge_i64(-1, 0) == 0 -function %bricmp_sge_i32(i32, i32) -> b1 { +function %bricmp_sge_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 sge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sge_i32(0, 0) == true -; run: %bricmp_sge_i32(0, 1) == false -; run: %bricmp_sge_i32(1, 0) == true -; run: %bricmp_sge_i32(0, -1) == true -; run: %bricmp_sge_i32(-1, 0) == false +; run: %bricmp_sge_i32(0, 0) == 1 +; run: %bricmp_sge_i32(0, 1) == 0 +; run: %bricmp_sge_i32(1, 0) == 1 +; run: %bricmp_sge_i32(0, -1) == 1 +; run: %bricmp_sge_i32(-1, 0) == 0 -function %bricmp_sge_i16(i16, i16) -> b1 { +function %bricmp_sge_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 sge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sge_i16(0, 0) == true -; run: %bricmp_sge_i16(0, 1) == false -; run: %bricmp_sge_i16(1, 0) == true -; run: %bricmp_sge_i16(0, -1) == true -; run: %bricmp_sge_i16(-1, 0) == false +; run: %bricmp_sge_i16(0, 0) == 1 +; run: %bricmp_sge_i16(0, 1) == 0 +; run: %bricmp_sge_i16(1, 0) == 1 +; run: %bricmp_sge_i16(0, -1) == 1 +; run: %bricmp_sge_i16(-1, 0) == 0 -function %bricmp_sge_i8(i8, i8) -> b1 { +function %bricmp_sge_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 sge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_sge_i8(0, 0) == true -; run: %bricmp_sge_i8(0, 1) == false -; run: %bricmp_sge_i8(1, 0) == true -; run: %bricmp_sge_i8(0, -1) == true -; run: %bricmp_sge_i8(-1, 0) == false +; run: %bricmp_sge_i8(0, 0) == 1 +; run: %bricmp_sge_i8(0, 1) == 0 +; run: %bricmp_sge_i8(1, 0) == 1 +; run: %bricmp_sge_i8(0, -1) == 1 +; run: %bricmp_sge_i8(-1, 0) == 0 -function %bricmp_uge_i64(i64, i64) -> b1 { +function %bricmp_uge_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): br_icmp.i64 uge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_uge_i64(0, 0) == true -; run: %bricmp_uge_i64(0, 1) == false -; run: %bricmp_uge_i64(1, 0) == true -; run: %bricmp_uge_i64(0, -1) == false -; run: %bricmp_uge_i64(-1, 0) == true +; run: %bricmp_uge_i64(0, 0) == 1 +; run: %bricmp_uge_i64(0, 1) == 0 +; run: %bricmp_uge_i64(1, 0) == 1 +; run: %bricmp_uge_i64(0, -1) == 0 +; run: %bricmp_uge_i64(-1, 0) == 1 -function %bricmp_uge_i32(i32, i32) -> b1 { +function %bricmp_uge_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): br_icmp.i32 uge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_uge_i32(0, 0) == true -; run: %bricmp_uge_i32(0, 1) == false -; run: %bricmp_uge_i32(1, 0) == true -; run: %bricmp_uge_i32(0, -1) == false -; run: %bricmp_uge_i32(-1, 0) == true +; run: %bricmp_uge_i32(0, 0) == 1 +; run: %bricmp_uge_i32(0, 1) == 0 +; run: %bricmp_uge_i32(1, 0) == 1 +; run: %bricmp_uge_i32(0, -1) == 0 +; run: %bricmp_uge_i32(-1, 0) == 1 -function %bricmp_uge_i16(i16, i16) -> b1 { +function %bricmp_uge_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): br_icmp.i16 uge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_uge_i16(0, 0) == true -; run: %bricmp_uge_i16(0, 1) == false -; run: %bricmp_uge_i16(1, 0) == true -; run: %bricmp_uge_i16(0, -1) == false -; run: %bricmp_uge_i16(-1, 0) == true +; run: %bricmp_uge_i16(0, 0) == 1 +; run: %bricmp_uge_i16(0, 1) == 0 +; run: %bricmp_uge_i16(1, 0) == 1 +; run: %bricmp_uge_i16(0, -1) == 0 +; run: %bricmp_uge_i16(-1, 0) == 1 -function %bricmp_uge_i8(i8, i8) -> b1 { +function %bricmp_uge_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): br_icmp.i8 uge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %bricmp_uge_i8(0, 0) == true -; run: %bricmp_uge_i8(0, 1) == false -; run: %bricmp_uge_i8(1, 0) == true -; run: %bricmp_uge_i8(0, -1) == false -; run: %bricmp_uge_i8(-1, 0) == true +; run: %bricmp_uge_i8(0, 0) == 1 +; run: %bricmp_uge_i8(0, 1) == 0 +; run: %bricmp_uge_i8(1, 0) == 1 +; run: %bricmp_uge_i8(0, -1) == 0 +; run: %bricmp_uge_i8(-1, 0) == 1 diff --git a/cranelift/filetests/filetests/runtests/breduce.clif b/cranelift/filetests/filetests/runtests/breduce.clif deleted file mode 100644 index 08dfdde743a6..000000000000 --- a/cranelift/filetests/filetests/runtests/breduce.clif +++ /dev/null @@ -1,90 +0,0 @@ -test interpret -test run -target aarch64 -target x86_64 -target s390x -target riscv64 - -function %breduce_b8_b1(b8) -> b1 { -block0(v0: b8): - v1 = breduce.b1 v0 - return v1 -} -; run: %breduce_b8_b1(true) == true -; run: %breduce_b8_b1(false) == false - - -function %breduce_b16_b1(b16) -> b1 { -block0(v0: b16): - v1 = breduce.b1 v0 - return v1 -} -; run: %breduce_b16_b1(true) == true -; run: %breduce_b16_b1(false) == false - -function %breduce_b16_b8(b16) -> b8 { -block0(v0: b16): - v1 = breduce.b8 v0 - return v1 -} -; run: %breduce_b16_b8(true) == true -; run: %breduce_b16_b8(false) == false - - -function %breduce_b32_b1(b32) -> b1 { -block0(v0: b32): - v1 = breduce.b1 v0 - return v1 -} -; run: %breduce_b32_b1(true) == true -; run: %breduce_b32_b1(false) == false - -function %breduce_b32_b8(b32) -> b8 { -block0(v0: b32): - v1 = breduce.b8 v0 - return v1 -} -; run: %breduce_b32_b8(true) == true -; run: %breduce_b32_b8(false) == false - -function %breduce_b32_b16(b32) -> b16 { -block0(v0: b32): - v1 = breduce.b16 v0 - return v1 -} -; run: %breduce_b32_b16(true) == true -; run: %breduce_b32_b16(false) == false - - - -function %breduce_b64_b1(b64) -> b1 { -block0(v0: b64): - v1 = breduce.b1 v0 - return v1 -} -; run: %breduce_b64_b1(true) == true -; run: %breduce_b64_b1(false) == false - -function %breduce_b64_b8(b64) -> b8 { -block0(v0: b64): - v1 = breduce.b8 v0 - return v1 -} -; run: %breduce_b64_b8(true) == true -; run: %breduce_b64_b8(false) == false - -function %breduce_b64_b16(b64) -> b16 { -block0(v0: b64): - v1 = breduce.b16 v0 - return v1 -} -; run: %breduce_b64_b16(true) == true -; run: %breduce_b64_b16(false) == false - -function %breduce_b64_b32(b64) -> b32 { -block0(v0: b64): - v1 = breduce.b32 v0 - return v1 -} -; run: %breduce_b64_b32(true) == true -; run: %breduce_b64_b32(false) == false diff --git a/cranelift/filetests/filetests/runtests/call.clif b/cranelift/filetests/filetests/runtests/call.clif index 02a2856515ba..7df6586e3b7b 100644 --- a/cranelift/filetests/filetests/runtests/call.clif +++ b/cranelift/filetests/filetests/runtests/call.clif @@ -52,21 +52,22 @@ block0(v0: f64): -function %callee_b1(b1) -> b1 { -block0(v0: b1): - v1 = bnot.b1 v0 - return v1 +function %callee_i8(i8) -> i8 { +block0(v0: i8): + v1 = iconst.i8 0 + v2 = icmp eq v0, v1 + return v2 } -function %call_b1(b1) -> b1 { - fn0 = %callee_b1(b1) -> b1 +function %call_i8(i8) -> i8 { + fn0 = %callee_i8(i8) -> i8 -block0(v0: b1): +block0(v0: i8): v1 = call fn0(v0) return v1 } -; run: %call_b1(true) == false -; run: %call_b1(false) == true +; run: %call_i8(1) == 0 +; run: %call_i8(0) == 1 diff --git a/cranelift/filetests/filetests/runtests/ceil.clif b/cranelift/filetests/filetests/runtests/ceil.clif index 2bc6998f4347..9cd68c63a867 100644 --- a/cranelift/filetests/filetests/runtests/ceil.clif +++ b/cranelift/filetests/filetests/runtests/ceil.clif @@ -59,7 +59,7 @@ function %ceil_is_nan_f32(f32) -> i32 { block0(v0: f32): v1 = ceil v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %ceil_is_nan_f32(+NaN) == 1 @@ -132,7 +132,7 @@ function %ceil_is_nan_f64(f64) -> i32 { block0(v0: f64): v1 = ceil v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %ceil_is_nan_f64(+NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/const.clif b/cranelift/filetests/filetests/runtests/const.clif index 36c4143879c1..9ef10fee36cf 100644 --- a/cranelift/filetests/filetests/runtests/const.clif +++ b/cranelift/filetests/filetests/runtests/const.clif @@ -93,62 +93,62 @@ block0: -function %b8_bconst_false() -> b8 { +function %i8_iconst_false() -> i8 { block0: - v1 = bconst.b8 false + v1 = iconst.i8 0 return v1 } -; run: %b8_bconst_false() == false +; run: %i8_iconst_false() == 0 -function %b8_bconst_true() -> b8 { +function %i8_iconst_true() -> i8 { block0: - v1 = bconst.b8 true + v1 = iconst.i8 1 return v1 } -; run: %b8_bconst_true() == true +; run: %i8_iconst_true() == 1 -function %b16_bconst_false() -> b16 { +function %i16_iconst_false() -> i16 { block0: - v1 = bconst.b16 false + v1 = iconst.i16 0 return v1 } -; run: %b16_bconst_false() == false +; run: %i16_iconst_false() == 0 -function %b16_bconst_true() -> b16 { +function %i16_iconst_true() -> i16 { block0: - v1 = bconst.b16 true + v1 = iconst.i16 1 return v1 } -; run: %b16_bconst_true() == true +; run: %i16_iconst_true() == 1 -function %b32_bconst_false() -> b32 { +function %i32_iconst_false() -> i32 { block0: - v1 = bconst.b32 false + v1 = iconst.i32 0 return v1 } -; run: %b32_bconst_false() == false +; run: %i32_iconst_false() == 0 -function %b32_bconst_true() -> b32 { +function %i32_iconst_true() -> i32 { block0: - v1 = bconst.b32 true + v1 = iconst.i32 1 return v1 } -; run: %b32_bconst_true() == true +; run: %i32_iconst_true() == 1 -function %b64_bconst_false() -> b64 { +function %i64_iconst_false() -> i64 { block0: - v1 = bconst.b64 false + v1 = iconst.i64 0 return v1 } -; run: %b64_bconst_false() == false +; run: %i64_iconst_false() == 0 -; this verifies that returning b64 immediates does not result in a segmentation fault, see https://github.com/bytecodealliance/cranelift/issues/911 -function %b64_bconst_true() -> b64 { +; this verifies that returning i64 immediates does not result in a segmentation fault, see https://github.com/bytecodealliance/cranelift/issues/911 +function %i64_iconst_true() -> i64 { block0: - v1 = bconst.b64 true + v1 = iconst.i64 1 return v1 } -; run: %b64_bconst_true() == true +; run: %i64_iconst_true() == 1 diff --git a/cranelift/filetests/filetests/runtests/copy.clif b/cranelift/filetests/filetests/runtests/copy.clif index ef586782f123..d9cb3ee7826a 100644 --- a/cranelift/filetests/filetests/runtests/copy.clif +++ b/cranelift/filetests/filetests/runtests/copy.clif @@ -44,46 +44,6 @@ block0(v0: i64): ; run: %copy_i64(-1) == -1 ; run: %copy_i64(127) == 127 -function %copy_b1(b1) -> b1 { -block0(v0: b1): - v1 = copy v0 - return v1 -} -; run: %copy_b1(false) == false -; run: %copy_b1(true) == true - -function %copy_b8(b8) -> b8 { -block0(v0: b8): - v1 = copy v0 - return v1 -} -; run: %copy_b8(false) == false -; run: %copy_b8(true) == true - -function %copy_b16(b16) -> b16 { -block0(v0: b16): - v1 = copy v0 - return v1 -} -; run: %copy_b16(false) == false -; run: %copy_b16(true) == true - -function %copy_b32(b32) -> b32 { -block0(v0: b32): - v1 = copy v0 - return v1 -} -; run: %copy_b32(false) == false -; run: %copy_b32(true) == true - -function %copy_b64(b64) -> b64 { -block0(v0: b64): - v1 = copy v0 - return v1 -} -; run: %copy_b64(false) == false -; run: %copy_b64(true) == true - function %copy_f32(f32) -> f32 { block0(v0: f32): v1 = copy v0 diff --git a/cranelift/filetests/filetests/runtests/fadd.clif b/cranelift/filetests/filetests/runtests/fadd.clif index 0d7debb7de83..88861f1243a2 100644 --- a/cranelift/filetests/filetests/runtests/fadd.clif +++ b/cranelift/filetests/filetests/runtests/fadd.clif @@ -49,7 +49,7 @@ function %fadd_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fadd v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fadd_is_nan_f32(+Inf, -Inf) == 1 @@ -114,7 +114,7 @@ function %fadd_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fadd v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fadd_is_nan_f64(+Inf, -Inf) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-eq.clif b/cranelift/filetests/filetests/runtests/fcmp-eq.clif index 01d2d512c754..206b4dfe6300 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-eq.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-eq.clif @@ -5,316 +5,316 @@ target aarch64 target s390x target riscv64 -function %fcmp_eq_f32(f32, f32) -> b1 { +function %fcmp_eq_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp eq v0, v1 return v2 } -; run: %fcmp_eq_f32(0x0.5, 0x0.5) == true -; run: %fcmp_eq_f32(0x1.0, 0x1.0) == true -; run: %fcmp_eq_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_eq_f32(0x1.0, -0x1.0) == false -; run: %fcmp_eq_f32(0x0.5, 0x1.0) == false -; run: %fcmp_eq_f32(0x1.5, 0x2.9) == false -; run: %fcmp_eq_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_eq_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_eq_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_eq_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_eq_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_eq_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_eq_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_eq_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_eq_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_eq_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_eq_f32(0x0.5, -0x1.0) == false -; run: %fcmp_eq_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_eq_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_eq_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_eq_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_eq_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_eq_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_eq_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_eq_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_eq_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_eq_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_eq_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_eq_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_eq_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_eq_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_eq_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_eq_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_eq_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_eq_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_eq_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_eq_f32(0x0.0, 0x0.0) == true -; run: %fcmp_eq_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_eq_f32(0x0.0, -0x0.0) == true -; run: %fcmp_eq_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_eq_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_eq_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_eq_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_eq_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_eq_f32(Inf, Inf) == true -; run: %fcmp_eq_f32(-Inf, -Inf) == true -; run: %fcmp_eq_f32(Inf, -Inf) == false -; run: %fcmp_eq_f32(-Inf, Inf) == false +; run: %fcmp_eq_f32(Inf, Inf) == 1 +; run: %fcmp_eq_f32(-Inf, -Inf) == 1 +; run: %fcmp_eq_f32(Inf, -Inf) == 0 +; run: %fcmp_eq_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_eq_f32(0x0.0, Inf) == false -; run: %fcmp_eq_f32(-0x0.0, Inf) == false -; run: %fcmp_eq_f32(0x0.0, -Inf) == false -; run: %fcmp_eq_f32(-0x0.0, -Inf) == false -; run: %fcmp_eq_f32(Inf, 0x0.0) == false -; run: %fcmp_eq_f32(Inf, -0x0.0) == false -; run: %fcmp_eq_f32(-Inf, 0x0.0) == false -; run: %fcmp_eq_f32(-Inf, -0x0.0) == false +; run: %fcmp_eq_f32(0x0.0, Inf) == 0 +; run: %fcmp_eq_f32(-0x0.0, Inf) == 0 +; run: %fcmp_eq_f32(0x0.0, -Inf) == 0 +; run: %fcmp_eq_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_eq_f32(Inf, 0x0.0) == 0 +; run: %fcmp_eq_f32(Inf, -0x0.0) == 0 +; run: %fcmp_eq_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_eq_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_eq_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_eq_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_eq_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_eq_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_eq_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_eq_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_eq_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_eq_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_eq_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_eq_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_eq_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_eq_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_eq_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_eq_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_eq_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_eq_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_eq_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_eq_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_eq_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_eq_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_eq_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_eq_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_eq_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_eq_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_eq_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_eq_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_eq_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_eq_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_eq_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_eq_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_eq_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_eq_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_eq_f32(+NaN, +NaN) == false -; run: %fcmp_eq_f32(-NaN, -NaN) == false -; run: %fcmp_eq_f32(+NaN, -NaN) == false -; run: %fcmp_eq_f32(-NaN, +NaN) == false +; run: %fcmp_eq_f32(+NaN, +NaN) == 0 +; run: %fcmp_eq_f32(-NaN, -NaN) == 0 +; run: %fcmp_eq_f32(+NaN, -NaN) == 0 +; run: %fcmp_eq_f32(-NaN, +NaN) == 0 -; run: %fcmp_eq_f32(+NaN, -0x1.0) == false -; run: %fcmp_eq_f32(-NaN, -0x1.0) == false -; run: %fcmp_eq_f32(+NaN, 0x1.0) == false -; run: %fcmp_eq_f32(-NaN, 0x1.0) == false -; run: %fcmp_eq_f32(+NaN, -0x0.0) == false -; run: %fcmp_eq_f32(-NaN, -0x0.0) == false -; run: %fcmp_eq_f32(+NaN, 0x0.0) == false -; run: %fcmp_eq_f32(-NaN, 0x0.0) == false -; run: %fcmp_eq_f32(+NaN, -Inf) == false -; run: %fcmp_eq_f32(-NaN, -Inf) == false -; run: %fcmp_eq_f32(+NaN, Inf) == false -; run: %fcmp_eq_f32(-NaN, Inf) == false -; run: %fcmp_eq_f32(-0x0.0, +NaN) == false -; run: %fcmp_eq_f32(-0x0.0, -NaN) == false -; run: %fcmp_eq_f32(0x0.0, +NaN) == false -; run: %fcmp_eq_f32(0x0.0, -NaN) == false -; run: %fcmp_eq_f32(-Inf, +NaN) == false -; run: %fcmp_eq_f32(-Inf, -NaN) == false -; run: %fcmp_eq_f32(Inf, +NaN) == false -; run: %fcmp_eq_f32(Inf, -NaN) == false +; run: %fcmp_eq_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_eq_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_eq_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_eq_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_eq_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_eq_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_eq_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_eq_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_eq_f32(+NaN, -Inf) == 0 +; run: %fcmp_eq_f32(-NaN, -Inf) == 0 +; run: %fcmp_eq_f32(+NaN, Inf) == 0 +; run: %fcmp_eq_f32(-NaN, Inf) == 0 +; run: %fcmp_eq_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_eq_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_eq_f32(0x0.0, +NaN) == 0 +; run: %fcmp_eq_f32(0x0.0, -NaN) == 0 +; run: %fcmp_eq_f32(-Inf, +NaN) == 0 +; run: %fcmp_eq_f32(-Inf, -NaN) == 0 +; run: %fcmp_eq_f32(Inf, +NaN) == 0 +; run: %fcmp_eq_f32(Inf, -NaN) == 0 -; run: %fcmp_eq_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_eq_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_eq_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_eq_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_eq_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_eq_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_eq_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_eq_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_eq_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_eq_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_eq_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_eq_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_eq_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_eq_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_eq_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_eq_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_eq_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_eq_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_eq_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_eq_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_eq_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_eq_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_eq_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_eq_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_eq_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_eq_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_eq_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_eq_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_eq_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_eq_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_eq_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_eq_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_eq_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_eq_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_eq_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_eq_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_eq_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_eq_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_eq_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_eq_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_eq_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_eq_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_eq_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_eq_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_eq_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_eq_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_eq_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_eq_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_eq_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_eq_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_eq_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_eq_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_eq_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_eq_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_eq_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_eq_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_eq_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_eq_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_eq_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_eq_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_eq_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_eq_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_eq_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_eq_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_eq_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_eq_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_eq_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_eq_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_eq_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_eq_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_eq_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_eq_f64(f64, f64) -> b1 { +function %fcmp_eq_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp eq v0, v1 return v2 } -; run: %fcmp_eq_f64(0x0.5, 0x0.5) == true -; run: %fcmp_eq_f64(0x1.0, 0x1.0) == true -; run: %fcmp_eq_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_eq_f64(0x1.0, -0x1.0) == false -; run: %fcmp_eq_f64(0x0.5, 0x1.0) == false -; run: %fcmp_eq_f64(0x1.5, 0x2.9) == false -; run: %fcmp_eq_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_eq_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_eq_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_eq_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_eq_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_eq_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_eq_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_eq_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_eq_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_eq_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_eq_f64(0x0.5, -0x1.0) == false -; run: %fcmp_eq_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_eq_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_eq_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_eq_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_eq_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_eq_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_eq_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_eq_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_eq_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_eq_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_eq_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_eq_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_eq_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_eq_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_eq_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_eq_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_eq_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_eq_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_eq_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_eq_f64(0x0.0, 0x0.0) == true -; run: %fcmp_eq_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_eq_f64(0x0.0, -0x0.0) == true -; run: %fcmp_eq_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_eq_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_eq_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_eq_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_eq_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_eq_f64(Inf, Inf) == true -; run: %fcmp_eq_f64(-Inf, -Inf) == true -; run: %fcmp_eq_f64(Inf, -Inf) == false -; run: %fcmp_eq_f64(-Inf, Inf) == false +; run: %fcmp_eq_f64(Inf, Inf) == 1 +; run: %fcmp_eq_f64(-Inf, -Inf) == 1 +; run: %fcmp_eq_f64(Inf, -Inf) == 0 +; run: %fcmp_eq_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_eq_f64(0x0.0, Inf) == false -; run: %fcmp_eq_f64(-0x0.0, Inf) == false -; run: %fcmp_eq_f64(0x0.0, -Inf) == false -; run: %fcmp_eq_f64(-0x0.0, -Inf) == false -; run: %fcmp_eq_f64(Inf, 0x0.0) == false -; run: %fcmp_eq_f64(Inf, -0x0.0) == false -; run: %fcmp_eq_f64(-Inf, 0x0.0) == false -; run: %fcmp_eq_f64(-Inf, -0x0.0) == false +; run: %fcmp_eq_f64(0x0.0, Inf) == 0 +; run: %fcmp_eq_f64(-0x0.0, Inf) == 0 +; run: %fcmp_eq_f64(0x0.0, -Inf) == 0 +; run: %fcmp_eq_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_eq_f64(Inf, 0x0.0) == 0 +; run: %fcmp_eq_f64(Inf, -0x0.0) == 0 +; run: %fcmp_eq_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_eq_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_eq_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_eq_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_eq_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_eq_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_eq_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_eq_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_eq_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_eq_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_eq_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_eq_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_eq_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_eq_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_eq_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_eq_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_eq_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_eq_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_eq_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_eq_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_eq_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_eq_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_eq_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_eq_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_eq_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_eq_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_eq_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_eq_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_eq_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_eq_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_eq_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_eq_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_eq_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_eq_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_eq_f64(+NaN, +NaN) == false -; run: %fcmp_eq_f64(-NaN, -NaN) == false -; run: %fcmp_eq_f64(+NaN, -NaN) == false -; run: %fcmp_eq_f64(-NaN, +NaN) == false +; run: %fcmp_eq_f64(+NaN, +NaN) == 0 +; run: %fcmp_eq_f64(-NaN, -NaN) == 0 +; run: %fcmp_eq_f64(+NaN, -NaN) == 0 +; run: %fcmp_eq_f64(-NaN, +NaN) == 0 -; run: %fcmp_eq_f64(+NaN, -0x1.0) == false -; run: %fcmp_eq_f64(-NaN, -0x1.0) == false -; run: %fcmp_eq_f64(+NaN, 0x1.0) == false -; run: %fcmp_eq_f64(-NaN, 0x1.0) == false -; run: %fcmp_eq_f64(+NaN, -0x0.0) == false -; run: %fcmp_eq_f64(-NaN, -0x0.0) == false -; run: %fcmp_eq_f64(+NaN, 0x0.0) == false -; run: %fcmp_eq_f64(-NaN, 0x0.0) == false -; run: %fcmp_eq_f64(+NaN, -Inf) == false -; run: %fcmp_eq_f64(-NaN, -Inf) == false -; run: %fcmp_eq_f64(+NaN, Inf) == false -; run: %fcmp_eq_f64(-NaN, Inf) == false -; run: %fcmp_eq_f64(-0x0.0, +NaN) == false -; run: %fcmp_eq_f64(-0x0.0, -NaN) == false -; run: %fcmp_eq_f64(0x0.0, +NaN) == false -; run: %fcmp_eq_f64(0x0.0, -NaN) == false -; run: %fcmp_eq_f64(-Inf, +NaN) == false -; run: %fcmp_eq_f64(-Inf, -NaN) == false -; run: %fcmp_eq_f64(Inf, +NaN) == false -; run: %fcmp_eq_f64(Inf, -NaN) == false +; run: %fcmp_eq_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_eq_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_eq_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_eq_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_eq_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_eq_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_eq_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_eq_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_eq_f64(+NaN, -Inf) == 0 +; run: %fcmp_eq_f64(-NaN, -Inf) == 0 +; run: %fcmp_eq_f64(+NaN, Inf) == 0 +; run: %fcmp_eq_f64(-NaN, Inf) == 0 +; run: %fcmp_eq_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_eq_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_eq_f64(0x0.0, +NaN) == 0 +; run: %fcmp_eq_f64(0x0.0, -NaN) == 0 +; run: %fcmp_eq_f64(-Inf, +NaN) == 0 +; run: %fcmp_eq_f64(-Inf, -NaN) == 0 +; run: %fcmp_eq_f64(Inf, +NaN) == 0 +; run: %fcmp_eq_f64(Inf, -NaN) == 0 -; run: %fcmp_eq_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_eq_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_eq_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_eq_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_eq_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_eq_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_eq_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_eq_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_eq_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_eq_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_eq_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_eq_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_eq_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_eq_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_eq_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_eq_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_eq_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_eq_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_eq_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_eq_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_eq_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_eq_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_eq_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_eq_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_eq_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_eq_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_eq_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_eq_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_eq_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_eq_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_eq_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_eq_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_eq_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_eq_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_eq_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_eq_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_eq_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_eq_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_eq_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_eq_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_eq_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_eq_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_eq_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_eq_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_eq_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_eq_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_eq_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_eq_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_eq_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_eq_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_eq_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_eq_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_eq_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_eq_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_eq_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_eq_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_eq_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_eq_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_eq_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_eq_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_eq_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_eq_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_eq_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_eq_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_eq_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_eq_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_eq_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_eq_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ge.clif b/cranelift/filetests/filetests/runtests/fcmp-ge.clif index aace5b5dc3d2..d05f800c3de6 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ge.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ge.clif @@ -5,316 +5,316 @@ target aarch64 target s390x target riscv64 -function %fcmp_ge_f32(f32, f32) -> b1 { +function %fcmp_ge_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ge v0, v1 return v2 } -; run: %fcmp_ge_f32(0x0.5, 0x0.5) == true -; run: %fcmp_ge_f32(0x1.0, 0x1.0) == true -; run: %fcmp_ge_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_ge_f32(0x1.0, -0x1.0) == true -; run: %fcmp_ge_f32(0x0.5, 0x1.0) == false -; run: %fcmp_ge_f32(0x1.5, 0x2.9) == false -; run: %fcmp_ge_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ge_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_ge_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_ge_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_ge_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_ge_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_ge_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_ge_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_ge_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_ge_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_ge_f32(0x0.5, -0x1.0) == true -; run: %fcmp_ge_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_ge_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_ge_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_ge_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_ge_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_ge_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_ge_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_ge_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ge_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_ge_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_ge_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_ge_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ge_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ge_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_ge_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_ge_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_ge_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_ge_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_ge_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_ge_f32(0x0.0, 0x0.0) == true -; run: %fcmp_ge_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_ge_f32(0x0.0, -0x0.0) == true -; run: %fcmp_ge_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_ge_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_ge_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ge_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_ge_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ge_f32(Inf, Inf) == true -; run: %fcmp_ge_f32(-Inf, -Inf) == true -; run: %fcmp_ge_f32(Inf, -Inf) == true -; run: %fcmp_ge_f32(-Inf, Inf) == false +; run: %fcmp_ge_f32(Inf, Inf) == 1 +; run: %fcmp_ge_f32(-Inf, -Inf) == 1 +; run: %fcmp_ge_f32(Inf, -Inf) == 1 +; run: %fcmp_ge_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_ge_f32(0x0.0, Inf) == false -; run: %fcmp_ge_f32(-0x0.0, Inf) == false -; run: %fcmp_ge_f32(0x0.0, -Inf) == true -; run: %fcmp_ge_f32(-0x0.0, -Inf) == true -; run: %fcmp_ge_f32(Inf, 0x0.0) == true -; run: %fcmp_ge_f32(Inf, -0x0.0) == true -; run: %fcmp_ge_f32(-Inf, 0x0.0) == false -; run: %fcmp_ge_f32(-Inf, -0x0.0) == false +; run: %fcmp_ge_f32(0x0.0, Inf) == 0 +; run: %fcmp_ge_f32(-0x0.0, Inf) == 0 +; run: %fcmp_ge_f32(0x0.0, -Inf) == 1 +; run: %fcmp_ge_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_ge_f32(Inf, 0x0.0) == 1 +; run: %fcmp_ge_f32(Inf, -0x0.0) == 1 +; run: %fcmp_ge_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_ge_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_ge_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_ge_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_ge_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_ge_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_ge_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_ge_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_ge_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_ge_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_ge_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_ge_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_ge_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_ge_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_ge_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_ge_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_ge_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ge_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_ge_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ge_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_ge_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_ge_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_ge_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_ge_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_ge_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_ge_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_ge_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ge_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_ge_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ge_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_ge_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_ge_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_ge_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_ge_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_ge_f32(+NaN, +NaN) == false -; run: %fcmp_ge_f32(-NaN, -NaN) == false -; run: %fcmp_ge_f32(+NaN, -NaN) == false -; run: %fcmp_ge_f32(-NaN, +NaN) == false +; run: %fcmp_ge_f32(+NaN, +NaN) == 0 +; run: %fcmp_ge_f32(-NaN, -NaN) == 0 +; run: %fcmp_ge_f32(+NaN, -NaN) == 0 +; run: %fcmp_ge_f32(-NaN, +NaN) == 0 -; run: %fcmp_ge_f32(+NaN, -0x1.0) == false -; run: %fcmp_ge_f32(-NaN, -0x1.0) == false -; run: %fcmp_ge_f32(+NaN, 0x1.0) == false -; run: %fcmp_ge_f32(-NaN, 0x1.0) == false -; run: %fcmp_ge_f32(+NaN, -0x0.0) == false -; run: %fcmp_ge_f32(-NaN, -0x0.0) == false -; run: %fcmp_ge_f32(+NaN, 0x0.0) == false -; run: %fcmp_ge_f32(-NaN, 0x0.0) == false -; run: %fcmp_ge_f32(+NaN, -Inf) == false -; run: %fcmp_ge_f32(-NaN, -Inf) == false -; run: %fcmp_ge_f32(+NaN, Inf) == false -; run: %fcmp_ge_f32(-NaN, Inf) == false -; run: %fcmp_ge_f32(-0x0.0, +NaN) == false -; run: %fcmp_ge_f32(-0x0.0, -NaN) == false -; run: %fcmp_ge_f32(0x0.0, +NaN) == false -; run: %fcmp_ge_f32(0x0.0, -NaN) == false -; run: %fcmp_ge_f32(-Inf, +NaN) == false -; run: %fcmp_ge_f32(-Inf, -NaN) == false -; run: %fcmp_ge_f32(Inf, +NaN) == false -; run: %fcmp_ge_f32(Inf, -NaN) == false +; run: %fcmp_ge_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_ge_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_ge_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_ge_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_ge_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_ge_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_ge_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_ge_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_ge_f32(+NaN, -Inf) == 0 +; run: %fcmp_ge_f32(-NaN, -Inf) == 0 +; run: %fcmp_ge_f32(+NaN, Inf) == 0 +; run: %fcmp_ge_f32(-NaN, Inf) == 0 +; run: %fcmp_ge_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_ge_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_ge_f32(0x0.0, +NaN) == 0 +; run: %fcmp_ge_f32(0x0.0, -NaN) == 0 +; run: %fcmp_ge_f32(-Inf, +NaN) == 0 +; run: %fcmp_ge_f32(-Inf, -NaN) == 0 +; run: %fcmp_ge_f32(Inf, +NaN) == 0 +; run: %fcmp_ge_f32(Inf, -NaN) == 0 -; run: %fcmp_ge_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_ge_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_ge_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_ge_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_ge_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_ge_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_ge_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_ge_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_ge_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_ge_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_ge_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_ge_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_ge_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_ge_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_ge_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_ge_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_ge_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_ge_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_ge_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_ge_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_ge_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_ge_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_ge_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_ge_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_ge_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ge_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ge_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ge_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_ge_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_ge_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_ge_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_ge_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_ge_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_ge_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_ge_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_ge_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_ge_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_ge_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_ge_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_ge_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_ge_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_ge_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_ge_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_ge_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_ge_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_ge_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_ge_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_ge_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_ge_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_ge_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_ge_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_ge_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ge_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ge_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ge_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_ge_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_ge_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_ge_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_ge_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_ge_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_ge_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_ge_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_ge_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_ge_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_ge_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_ge_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_ge_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_ge_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_ge_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_ge_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_ge_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_ge_f64(f64, f64) -> b1 { +function %fcmp_ge_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ge v0, v1 return v2 } -; run: %fcmp_ge_f64(0x0.5, 0x0.5) == true -; run: %fcmp_ge_f64(0x1.0, 0x1.0) == true -; run: %fcmp_ge_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_ge_f64(0x1.0, -0x1.0) == true -; run: %fcmp_ge_f64(0x0.5, 0x1.0) == false -; run: %fcmp_ge_f64(0x1.5, 0x2.9) == false -; run: %fcmp_ge_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ge_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_ge_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_ge_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_ge_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_ge_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_ge_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_ge_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_ge_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_ge_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_ge_f64(0x0.5, -0x1.0) == true -; run: %fcmp_ge_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_ge_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_ge_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_ge_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_ge_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_ge_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_ge_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_ge_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ge_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_ge_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_ge_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_ge_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ge_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ge_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_ge_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_ge_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_ge_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_ge_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_ge_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_ge_f64(0x0.0, 0x0.0) == true -; run: %fcmp_ge_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_ge_f64(0x0.0, -0x0.0) == true -; run: %fcmp_ge_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_ge_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_ge_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ge_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_ge_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ge_f64(Inf, Inf) == true -; run: %fcmp_ge_f64(-Inf, -Inf) == true -; run: %fcmp_ge_f64(Inf, -Inf) == true -; run: %fcmp_ge_f64(-Inf, Inf) == false +; run: %fcmp_ge_f64(Inf, Inf) == 1 +; run: %fcmp_ge_f64(-Inf, -Inf) == 1 +; run: %fcmp_ge_f64(Inf, -Inf) == 1 +; run: %fcmp_ge_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_ge_f64(0x0.0, Inf) == false -; run: %fcmp_ge_f64(-0x0.0, Inf) == false -; run: %fcmp_ge_f64(0x0.0, -Inf) == true -; run: %fcmp_ge_f64(-0x0.0, -Inf) == true -; run: %fcmp_ge_f64(Inf, 0x0.0) == true -; run: %fcmp_ge_f64(Inf, -0x0.0) == true -; run: %fcmp_ge_f64(-Inf, 0x0.0) == false -; run: %fcmp_ge_f64(-Inf, -0x0.0) == false +; run: %fcmp_ge_f64(0x0.0, Inf) == 0 +; run: %fcmp_ge_f64(-0x0.0, Inf) == 0 +; run: %fcmp_ge_f64(0x0.0, -Inf) == 1 +; run: %fcmp_ge_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_ge_f64(Inf, 0x0.0) == 1 +; run: %fcmp_ge_f64(Inf, -0x0.0) == 1 +; run: %fcmp_ge_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_ge_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_ge_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_ge_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ge_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_ge_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_ge_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_ge_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_ge_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_ge_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ge_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_ge_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_ge_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_ge_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_ge_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_ge_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_ge_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ge_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_ge_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ge_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_ge_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_ge_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_ge_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_ge_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_ge_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_ge_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_ge_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ge_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_ge_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ge_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_ge_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_ge_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_ge_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_ge_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_ge_f64(+NaN, +NaN) == false -; run: %fcmp_ge_f64(-NaN, -NaN) == false -; run: %fcmp_ge_f64(+NaN, -NaN) == false -; run: %fcmp_ge_f64(-NaN, +NaN) == false +; run: %fcmp_ge_f64(+NaN, +NaN) == 0 +; run: %fcmp_ge_f64(-NaN, -NaN) == 0 +; run: %fcmp_ge_f64(+NaN, -NaN) == 0 +; run: %fcmp_ge_f64(-NaN, +NaN) == 0 -; run: %fcmp_ge_f64(+NaN, -0x1.0) == false -; run: %fcmp_ge_f64(-NaN, -0x1.0) == false -; run: %fcmp_ge_f64(+NaN, 0x1.0) == false -; run: %fcmp_ge_f64(-NaN, 0x1.0) == false -; run: %fcmp_ge_f64(+NaN, -0x0.0) == false -; run: %fcmp_ge_f64(-NaN, -0x0.0) == false -; run: %fcmp_ge_f64(+NaN, 0x0.0) == false -; run: %fcmp_ge_f64(-NaN, 0x0.0) == false -; run: %fcmp_ge_f64(+NaN, -Inf) == false -; run: %fcmp_ge_f64(-NaN, -Inf) == false -; run: %fcmp_ge_f64(+NaN, Inf) == false -; run: %fcmp_ge_f64(-NaN, Inf) == false -; run: %fcmp_ge_f64(-0x0.0, +NaN) == false -; run: %fcmp_ge_f64(-0x0.0, -NaN) == false -; run: %fcmp_ge_f64(0x0.0, +NaN) == false -; run: %fcmp_ge_f64(0x0.0, -NaN) == false -; run: %fcmp_ge_f64(-Inf, +NaN) == false -; run: %fcmp_ge_f64(-Inf, -NaN) == false -; run: %fcmp_ge_f64(Inf, +NaN) == false -; run: %fcmp_ge_f64(Inf, -NaN) == false +; run: %fcmp_ge_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_ge_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_ge_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_ge_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_ge_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_ge_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_ge_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_ge_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_ge_f64(+NaN, -Inf) == 0 +; run: %fcmp_ge_f64(-NaN, -Inf) == 0 +; run: %fcmp_ge_f64(+NaN, Inf) == 0 +; run: %fcmp_ge_f64(-NaN, Inf) == 0 +; run: %fcmp_ge_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_ge_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_ge_f64(0x0.0, +NaN) == 0 +; run: %fcmp_ge_f64(0x0.0, -NaN) == 0 +; run: %fcmp_ge_f64(-Inf, +NaN) == 0 +; run: %fcmp_ge_f64(-Inf, -NaN) == 0 +; run: %fcmp_ge_f64(Inf, +NaN) == 0 +; run: %fcmp_ge_f64(Inf, -NaN) == 0 -; run: %fcmp_ge_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_ge_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_ge_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_ge_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_ge_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_ge_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_ge_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_ge_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_ge_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_ge_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_ge_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_ge_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_ge_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_ge_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_ge_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_ge_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_ge_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_ge_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_ge_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_ge_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_ge_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ge_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ge_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ge_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_ge_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_ge_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_ge_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_ge_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_ge_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_ge_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_ge_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_ge_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_ge_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_ge_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_ge_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_ge_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_ge_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_ge_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_ge_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_ge_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_ge_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_ge_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_ge_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_ge_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_ge_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_ge_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_ge_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_ge_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ge_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ge_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ge_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ge_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ge_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_ge_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ge_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ge_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_ge_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_ge_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_ge_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_ge_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_ge_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_ge_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_ge_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_ge_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_ge_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_ge_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_ge_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_ge_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-gt.clif b/cranelift/filetests/filetests/runtests/fcmp-gt.clif index f29e0c99d598..199b1173c7ee 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-gt.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-gt.clif @@ -5,316 +5,316 @@ target aarch64 target s390x target riscv64 -function %fcmp_gt_f32(f32, f32) -> b1 { +function %fcmp_gt_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp gt v0, v1 return v2 } -; run: %fcmp_gt_f32(0x0.5, 0x0.5) == false -; run: %fcmp_gt_f32(0x1.0, 0x1.0) == false -; run: %fcmp_gt_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_gt_f32(0x1.0, -0x1.0) == true -; run: %fcmp_gt_f32(0x0.5, 0x1.0) == false -; run: %fcmp_gt_f32(0x1.5, 0x2.9) == false -; run: %fcmp_gt_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_gt_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_gt_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_gt_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_gt_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_gt_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_gt_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_gt_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_gt_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_gt_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_gt_f32(0x0.5, -0x1.0) == true -; run: %fcmp_gt_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_gt_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_gt_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_gt_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_gt_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_gt_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_gt_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_gt_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_gt_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_gt_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_gt_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_gt_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_gt_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_gt_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_gt_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_gt_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_gt_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_gt_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_gt_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_gt_f32(0x0.0, 0x0.0) == false -; run: %fcmp_gt_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_gt_f32(0x0.0, -0x0.0) == false -; run: %fcmp_gt_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_gt_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_gt_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_gt_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_gt_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_gt_f32(Inf, Inf) == false -; run: %fcmp_gt_f32(-Inf, -Inf) == false -; run: %fcmp_gt_f32(Inf, -Inf) == true -; run: %fcmp_gt_f32(-Inf, Inf) == false +; run: %fcmp_gt_f32(Inf, Inf) == 0 +; run: %fcmp_gt_f32(-Inf, -Inf) == 0 +; run: %fcmp_gt_f32(Inf, -Inf) == 1 +; run: %fcmp_gt_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_gt_f32(0x0.0, Inf) == false -; run: %fcmp_gt_f32(-0x0.0, Inf) == false -; run: %fcmp_gt_f32(0x0.0, -Inf) == true -; run: %fcmp_gt_f32(-0x0.0, -Inf) == true -; run: %fcmp_gt_f32(Inf, 0x0.0) == true -; run: %fcmp_gt_f32(Inf, -0x0.0) == true -; run: %fcmp_gt_f32(-Inf, 0x0.0) == false -; run: %fcmp_gt_f32(-Inf, -0x0.0) == false +; run: %fcmp_gt_f32(0x0.0, Inf) == 0 +; run: %fcmp_gt_f32(-0x0.0, Inf) == 0 +; run: %fcmp_gt_f32(0x0.0, -Inf) == 1 +; run: %fcmp_gt_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_gt_f32(Inf, 0x0.0) == 1 +; run: %fcmp_gt_f32(Inf, -0x0.0) == 1 +; run: %fcmp_gt_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_gt_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_gt_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_gt_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_gt_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_gt_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_gt_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_gt_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_gt_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_gt_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_gt_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_gt_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_gt_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_gt_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_gt_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_gt_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_gt_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_gt_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_gt_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_gt_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_gt_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_gt_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_gt_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_gt_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_gt_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_gt_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_gt_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_gt_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_gt_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_gt_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_gt_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_gt_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_gt_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_gt_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_gt_f32(+NaN, +NaN) == false -; run: %fcmp_gt_f32(-NaN, -NaN) == false -; run: %fcmp_gt_f32(+NaN, -NaN) == false -; run: %fcmp_gt_f32(-NaN, +NaN) == false +; run: %fcmp_gt_f32(+NaN, +NaN) == 0 +; run: %fcmp_gt_f32(-NaN, -NaN) == 0 +; run: %fcmp_gt_f32(+NaN, -NaN) == 0 +; run: %fcmp_gt_f32(-NaN, +NaN) == 0 -; run: %fcmp_gt_f32(+NaN, -0x1.0) == false -; run: %fcmp_gt_f32(-NaN, -0x1.0) == false -; run: %fcmp_gt_f32(+NaN, 0x1.0) == false -; run: %fcmp_gt_f32(-NaN, 0x1.0) == false -; run: %fcmp_gt_f32(+NaN, -0x0.0) == false -; run: %fcmp_gt_f32(-NaN, -0x0.0) == false -; run: %fcmp_gt_f32(+NaN, 0x0.0) == false -; run: %fcmp_gt_f32(-NaN, 0x0.0) == false -; run: %fcmp_gt_f32(+NaN, -Inf) == false -; run: %fcmp_gt_f32(-NaN, -Inf) == false -; run: %fcmp_gt_f32(+NaN, Inf) == false -; run: %fcmp_gt_f32(-NaN, Inf) == false -; run: %fcmp_gt_f32(-0x0.0, +NaN) == false -; run: %fcmp_gt_f32(-0x0.0, -NaN) == false -; run: %fcmp_gt_f32(0x0.0, +NaN) == false -; run: %fcmp_gt_f32(0x0.0, -NaN) == false -; run: %fcmp_gt_f32(-Inf, +NaN) == false -; run: %fcmp_gt_f32(-Inf, -NaN) == false -; run: %fcmp_gt_f32(Inf, +NaN) == false -; run: %fcmp_gt_f32(Inf, -NaN) == false +; run: %fcmp_gt_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_gt_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_gt_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_gt_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_gt_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_gt_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_gt_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_gt_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_gt_f32(+NaN, -Inf) == 0 +; run: %fcmp_gt_f32(-NaN, -Inf) == 0 +; run: %fcmp_gt_f32(+NaN, Inf) == 0 +; run: %fcmp_gt_f32(-NaN, Inf) == 0 +; run: %fcmp_gt_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_gt_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_gt_f32(0x0.0, +NaN) == 0 +; run: %fcmp_gt_f32(0x0.0, -NaN) == 0 +; run: %fcmp_gt_f32(-Inf, +NaN) == 0 +; run: %fcmp_gt_f32(-Inf, -NaN) == 0 +; run: %fcmp_gt_f32(Inf, +NaN) == 0 +; run: %fcmp_gt_f32(Inf, -NaN) == 0 -; run: %fcmp_gt_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_gt_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_gt_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_gt_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_gt_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_gt_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_gt_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_gt_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_gt_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_gt_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_gt_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_gt_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_gt_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_gt_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_gt_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_gt_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_gt_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_gt_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_gt_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_gt_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_gt_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_gt_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_gt_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_gt_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_gt_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_gt_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_gt_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_gt_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_gt_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_gt_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_gt_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_gt_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_gt_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_gt_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_gt_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_gt_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_gt_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_gt_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_gt_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_gt_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_gt_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_gt_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_gt_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_gt_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_gt_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_gt_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_gt_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_gt_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_gt_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_gt_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_gt_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_gt_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_gt_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_gt_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_gt_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_gt_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_gt_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_gt_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_gt_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_gt_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_gt_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_gt_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_gt_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_gt_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_gt_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_gt_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_gt_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_gt_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_gt_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_gt_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_gt_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_gt_f64(f64, f64) -> b1 { +function %fcmp_gt_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp gt v0, v1 return v2 } -; run: %fcmp_gt_f64(0x0.5, 0x0.5) == false -; run: %fcmp_gt_f64(0x1.0, 0x1.0) == false -; run: %fcmp_gt_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_gt_f64(0x1.0, -0x1.0) == true -; run: %fcmp_gt_f64(0x0.5, 0x1.0) == false -; run: %fcmp_gt_f64(0x1.5, 0x2.9) == false -; run: %fcmp_gt_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_gt_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_gt_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_gt_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_gt_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_gt_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_gt_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_gt_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_gt_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_gt_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_gt_f64(0x0.5, -0x1.0) == true -; run: %fcmp_gt_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_gt_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_gt_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_gt_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_gt_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_gt_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_gt_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_gt_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_gt_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_gt_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_gt_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_gt_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_gt_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_gt_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_gt_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_gt_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_gt_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_gt_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_gt_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_gt_f64(0x0.0, 0x0.0) == false -; run: %fcmp_gt_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_gt_f64(0x0.0, -0x0.0) == false -; run: %fcmp_gt_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_gt_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_gt_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_gt_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_gt_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_gt_f64(Inf, Inf) == false -; run: %fcmp_gt_f64(-Inf, -Inf) == false -; run: %fcmp_gt_f64(Inf, -Inf) == true -; run: %fcmp_gt_f64(-Inf, Inf) == false +; run: %fcmp_gt_f64(Inf, Inf) == 0 +; run: %fcmp_gt_f64(-Inf, -Inf) == 0 +; run: %fcmp_gt_f64(Inf, -Inf) == 1 +; run: %fcmp_gt_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_gt_f64(0x0.0, Inf) == false -; run: %fcmp_gt_f64(-0x0.0, Inf) == false -; run: %fcmp_gt_f64(0x0.0, -Inf) == true -; run: %fcmp_gt_f64(-0x0.0, -Inf) == true -; run: %fcmp_gt_f64(Inf, 0x0.0) == true -; run: %fcmp_gt_f64(Inf, -0x0.0) == true -; run: %fcmp_gt_f64(-Inf, 0x0.0) == false -; run: %fcmp_gt_f64(-Inf, -0x0.0) == false +; run: %fcmp_gt_f64(0x0.0, Inf) == 0 +; run: %fcmp_gt_f64(-0x0.0, Inf) == 0 +; run: %fcmp_gt_f64(0x0.0, -Inf) == 1 +; run: %fcmp_gt_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_gt_f64(Inf, 0x0.0) == 1 +; run: %fcmp_gt_f64(Inf, -0x0.0) == 1 +; run: %fcmp_gt_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_gt_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_gt_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_gt_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_gt_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_gt_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_gt_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_gt_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_gt_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_gt_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_gt_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_gt_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_gt_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_gt_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_gt_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_gt_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_gt_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_gt_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_gt_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_gt_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_gt_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_gt_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_gt_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_gt_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_gt_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_gt_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_gt_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_gt_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_gt_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_gt_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_gt_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_gt_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_gt_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_gt_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_gt_f64(+NaN, +NaN) == false -; run: %fcmp_gt_f64(-NaN, -NaN) == false -; run: %fcmp_gt_f64(+NaN, -NaN) == false -; run: %fcmp_gt_f64(-NaN, +NaN) == false +; run: %fcmp_gt_f64(+NaN, +NaN) == 0 +; run: %fcmp_gt_f64(-NaN, -NaN) == 0 +; run: %fcmp_gt_f64(+NaN, -NaN) == 0 +; run: %fcmp_gt_f64(-NaN, +NaN) == 0 -; run: %fcmp_gt_f64(+NaN, -0x1.0) == false -; run: %fcmp_gt_f64(-NaN, -0x1.0) == false -; run: %fcmp_gt_f64(+NaN, 0x1.0) == false -; run: %fcmp_gt_f64(-NaN, 0x1.0) == false -; run: %fcmp_gt_f64(+NaN, -0x0.0) == false -; run: %fcmp_gt_f64(-NaN, -0x0.0) == false -; run: %fcmp_gt_f64(+NaN, 0x0.0) == false -; run: %fcmp_gt_f64(-NaN, 0x0.0) == false -; run: %fcmp_gt_f64(+NaN, -Inf) == false -; run: %fcmp_gt_f64(-NaN, -Inf) == false -; run: %fcmp_gt_f64(+NaN, Inf) == false -; run: %fcmp_gt_f64(-NaN, Inf) == false -; run: %fcmp_gt_f64(-0x0.0, +NaN) == false -; run: %fcmp_gt_f64(-0x0.0, -NaN) == false -; run: %fcmp_gt_f64(0x0.0, +NaN) == false -; run: %fcmp_gt_f64(0x0.0, -NaN) == false -; run: %fcmp_gt_f64(-Inf, +NaN) == false -; run: %fcmp_gt_f64(-Inf, -NaN) == false -; run: %fcmp_gt_f64(Inf, +NaN) == false -; run: %fcmp_gt_f64(Inf, -NaN) == false +; run: %fcmp_gt_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_gt_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_gt_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_gt_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_gt_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_gt_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_gt_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_gt_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_gt_f64(+NaN, -Inf) == 0 +; run: %fcmp_gt_f64(-NaN, -Inf) == 0 +; run: %fcmp_gt_f64(+NaN, Inf) == 0 +; run: %fcmp_gt_f64(-NaN, Inf) == 0 +; run: %fcmp_gt_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_gt_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_gt_f64(0x0.0, +NaN) == 0 +; run: %fcmp_gt_f64(0x0.0, -NaN) == 0 +; run: %fcmp_gt_f64(-Inf, +NaN) == 0 +; run: %fcmp_gt_f64(-Inf, -NaN) == 0 +; run: %fcmp_gt_f64(Inf, +NaN) == 0 +; run: %fcmp_gt_f64(Inf, -NaN) == 0 -; run: %fcmp_gt_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_gt_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_gt_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_gt_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_gt_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_gt_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_gt_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_gt_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_gt_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_gt_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_gt_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_gt_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_gt_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_gt_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_gt_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_gt_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_gt_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_gt_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_gt_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_gt_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_gt_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_gt_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_gt_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_gt_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_gt_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_gt_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_gt_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_gt_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_gt_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_gt_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_gt_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_gt_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_gt_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_gt_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_gt_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_gt_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_gt_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_gt_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_gt_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_gt_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_gt_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_gt_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_gt_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_gt_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_gt_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_gt_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_gt_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_gt_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_gt_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_gt_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_gt_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_gt_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_gt_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_gt_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_gt_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_gt_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_gt_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_gt_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_gt_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_gt_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_gt_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_gt_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_gt_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_gt_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_gt_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_gt_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_gt_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_gt_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-le.clif b/cranelift/filetests/filetests/runtests/fcmp-le.clif index f5ea4e17fde0..755b018c1b9d 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-le.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-le.clif @@ -5,316 +5,316 @@ target aarch64 target s390x target riscv64 -function %fcmp_le_f32(f32, f32) -> b1 { +function %fcmp_le_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp le v0, v1 return v2 } -; run: %fcmp_le_f32(0x0.5, 0x0.5) == true -; run: %fcmp_le_f32(0x1.0, 0x1.0) == true -; run: %fcmp_le_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_le_f32(0x1.0, -0x1.0) == false -; run: %fcmp_le_f32(0x0.5, 0x1.0) == true -; run: %fcmp_le_f32(0x1.5, 0x2.9) == true -; run: %fcmp_le_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_le_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_le_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_le_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_le_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_le_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_le_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_le_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_le_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_le_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_le_f32(0x0.5, -0x1.0) == false -; run: %fcmp_le_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_le_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_le_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_le_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_le_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_le_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_le_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_le_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_le_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_le_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_le_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_le_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_le_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_le_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_le_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_le_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_le_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_le_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_le_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_le_f32(0x0.0, 0x0.0) == true -; run: %fcmp_le_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_le_f32(0x0.0, -0x0.0) == true -; run: %fcmp_le_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_le_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_le_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_le_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_le_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_le_f32(Inf, Inf) == true -; run: %fcmp_le_f32(-Inf, -Inf) == true -; run: %fcmp_le_f32(Inf, -Inf) == false -; run: %fcmp_le_f32(-Inf, Inf) == true +; run: %fcmp_le_f32(Inf, Inf) == 1 +; run: %fcmp_le_f32(-Inf, -Inf) == 1 +; run: %fcmp_le_f32(Inf, -Inf) == 0 +; run: %fcmp_le_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_le_f32(0x0.0, Inf) == true -; run: %fcmp_le_f32(-0x0.0, Inf) == true -; run: %fcmp_le_f32(0x0.0, -Inf) == false -; run: %fcmp_le_f32(-0x0.0, -Inf) == false -; run: %fcmp_le_f32(Inf, 0x0.0) == false -; run: %fcmp_le_f32(Inf, -0x0.0) == false -; run: %fcmp_le_f32(-Inf, 0x0.0) == true -; run: %fcmp_le_f32(-Inf, -0x0.0) == true +; run: %fcmp_le_f32(0x0.0, Inf) == 1 +; run: %fcmp_le_f32(-0x0.0, Inf) == 1 +; run: %fcmp_le_f32(0x0.0, -Inf) == 0 +; run: %fcmp_le_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_le_f32(Inf, 0x0.0) == 0 +; run: %fcmp_le_f32(Inf, -0x0.0) == 0 +; run: %fcmp_le_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_le_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_le_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_le_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_le_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_le_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_le_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_le_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_le_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_le_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_le_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_le_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_le_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_le_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_le_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_le_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_le_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_le_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_le_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_le_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_le_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_le_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_le_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_le_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_le_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_le_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_le_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_le_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_le_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_le_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_le_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_le_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_le_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_le_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_le_f32(+NaN, +NaN) == false -; run: %fcmp_le_f32(-NaN, -NaN) == false -; run: %fcmp_le_f32(+NaN, -NaN) == false -; run: %fcmp_le_f32(-NaN, +NaN) == false +; run: %fcmp_le_f32(+NaN, +NaN) == 0 +; run: %fcmp_le_f32(-NaN, -NaN) == 0 +; run: %fcmp_le_f32(+NaN, -NaN) == 0 +; run: %fcmp_le_f32(-NaN, +NaN) == 0 -; run: %fcmp_le_f32(+NaN, -0x1.0) == false -; run: %fcmp_le_f32(-NaN, -0x1.0) == false -; run: %fcmp_le_f32(+NaN, 0x1.0) == false -; run: %fcmp_le_f32(-NaN, 0x1.0) == false -; run: %fcmp_le_f32(+NaN, -0x0.0) == false -; run: %fcmp_le_f32(-NaN, -0x0.0) == false -; run: %fcmp_le_f32(+NaN, 0x0.0) == false -; run: %fcmp_le_f32(-NaN, 0x0.0) == false -; run: %fcmp_le_f32(+NaN, -Inf) == false -; run: %fcmp_le_f32(-NaN, -Inf) == false -; run: %fcmp_le_f32(+NaN, Inf) == false -; run: %fcmp_le_f32(-NaN, Inf) == false -; run: %fcmp_le_f32(-0x0.0, +NaN) == false -; run: %fcmp_le_f32(-0x0.0, -NaN) == false -; run: %fcmp_le_f32(0x0.0, +NaN) == false -; run: %fcmp_le_f32(0x0.0, -NaN) == false -; run: %fcmp_le_f32(-Inf, +NaN) == false -; run: %fcmp_le_f32(-Inf, -NaN) == false -; run: %fcmp_le_f32(Inf, +NaN) == false -; run: %fcmp_le_f32(Inf, -NaN) == false +; run: %fcmp_le_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_le_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_le_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_le_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_le_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_le_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_le_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_le_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_le_f32(+NaN, -Inf) == 0 +; run: %fcmp_le_f32(-NaN, -Inf) == 0 +; run: %fcmp_le_f32(+NaN, Inf) == 0 +; run: %fcmp_le_f32(-NaN, Inf) == 0 +; run: %fcmp_le_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_le_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_le_f32(0x0.0, +NaN) == 0 +; run: %fcmp_le_f32(0x0.0, -NaN) == 0 +; run: %fcmp_le_f32(-Inf, +NaN) == 0 +; run: %fcmp_le_f32(-Inf, -NaN) == 0 +; run: %fcmp_le_f32(Inf, +NaN) == 0 +; run: %fcmp_le_f32(Inf, -NaN) == 0 -; run: %fcmp_le_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_le_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_le_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_le_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_le_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_le_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_le_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_le_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_le_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_le_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_le_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_le_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_le_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_le_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_le_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_le_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_le_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_le_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_le_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_le_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_le_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_le_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_le_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_le_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_le_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_le_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_le_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_le_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_le_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_le_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_le_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_le_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_le_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_le_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_le_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_le_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_le_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_le_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_le_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_le_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_le_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_le_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_le_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_le_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_le_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_le_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_le_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_le_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_le_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_le_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_le_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_le_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_le_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_le_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_le_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_le_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_le_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_le_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_le_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_le_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_le_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_le_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_le_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_le_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_le_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_le_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_le_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_le_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_le_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_le_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_le_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_le_f64(f64, f64) -> b1 { +function %fcmp_le_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp le v0, v1 return v2 } -; run: %fcmp_le_f64(0x0.5, 0x0.5) == true -; run: %fcmp_le_f64(0x1.0, 0x1.0) == true -; run: %fcmp_le_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_le_f64(0x1.0, -0x1.0) == false -; run: %fcmp_le_f64(0x0.5, 0x1.0) == true -; run: %fcmp_le_f64(0x1.5, 0x2.9) == true -; run: %fcmp_le_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_le_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_le_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_le_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_le_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_le_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_le_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_le_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_le_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_le_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_le_f64(0x0.5, -0x1.0) == false -; run: %fcmp_le_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_le_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_le_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_le_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_le_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_le_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_le_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_le_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_le_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_le_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_le_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_le_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_le_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_le_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_le_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_le_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_le_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_le_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_le_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_le_f64(0x0.0, 0x0.0) == true -; run: %fcmp_le_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_le_f64(0x0.0, -0x0.0) == true -; run: %fcmp_le_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_le_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_le_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_le_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_le_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_le_f64(Inf, Inf) == true -; run: %fcmp_le_f64(-Inf, -Inf) == true -; run: %fcmp_le_f64(Inf, -Inf) == false -; run: %fcmp_le_f64(-Inf, Inf) == true +; run: %fcmp_le_f64(Inf, Inf) == 1 +; run: %fcmp_le_f64(-Inf, -Inf) == 1 +; run: %fcmp_le_f64(Inf, -Inf) == 0 +; run: %fcmp_le_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_le_f64(0x0.0, Inf) == true -; run: %fcmp_le_f64(-0x0.0, Inf) == true -; run: %fcmp_le_f64(0x0.0, -Inf) == false -; run: %fcmp_le_f64(-0x0.0, -Inf) == false -; run: %fcmp_le_f64(Inf, 0x0.0) == false -; run: %fcmp_le_f64(Inf, -0x0.0) == false -; run: %fcmp_le_f64(-Inf, 0x0.0) == true -; run: %fcmp_le_f64(-Inf, -0x0.0) == true +; run: %fcmp_le_f64(0x0.0, Inf) == 1 +; run: %fcmp_le_f64(-0x0.0, Inf) == 1 +; run: %fcmp_le_f64(0x0.0, -Inf) == 0 +; run: %fcmp_le_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_le_f64(Inf, 0x0.0) == 0 +; run: %fcmp_le_f64(Inf, -0x0.0) == 0 +; run: %fcmp_le_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_le_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_le_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_le_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_le_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_le_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_le_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_le_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_le_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_le_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_le_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_le_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_le_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_le_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_le_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_le_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_le_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_le_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_le_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_le_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_le_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_le_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_le_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_le_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_le_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_le_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_le_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_le_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_le_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_le_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_le_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_le_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_le_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_le_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_le_f64(+NaN, +NaN) == false -; run: %fcmp_le_f64(-NaN, -NaN) == false -; run: %fcmp_le_f64(+NaN, -NaN) == false -; run: %fcmp_le_f64(-NaN, +NaN) == false +; run: %fcmp_le_f64(+NaN, +NaN) == 0 +; run: %fcmp_le_f64(-NaN, -NaN) == 0 +; run: %fcmp_le_f64(+NaN, -NaN) == 0 +; run: %fcmp_le_f64(-NaN, +NaN) == 0 -; run: %fcmp_le_f64(+NaN, -0x1.0) == false -; run: %fcmp_le_f64(-NaN, -0x1.0) == false -; run: %fcmp_le_f64(+NaN, 0x1.0) == false -; run: %fcmp_le_f64(-NaN, 0x1.0) == false -; run: %fcmp_le_f64(+NaN, -0x0.0) == false -; run: %fcmp_le_f64(-NaN, -0x0.0) == false -; run: %fcmp_le_f64(+NaN, 0x0.0) == false -; run: %fcmp_le_f64(-NaN, 0x0.0) == false -; run: %fcmp_le_f64(+NaN, -Inf) == false -; run: %fcmp_le_f64(-NaN, -Inf) == false -; run: %fcmp_le_f64(+NaN, Inf) == false -; run: %fcmp_le_f64(-NaN, Inf) == false -; run: %fcmp_le_f64(-0x0.0, +NaN) == false -; run: %fcmp_le_f64(-0x0.0, -NaN) == false -; run: %fcmp_le_f64(0x0.0, +NaN) == false -; run: %fcmp_le_f64(0x0.0, -NaN) == false -; run: %fcmp_le_f64(-Inf, +NaN) == false -; run: %fcmp_le_f64(-Inf, -NaN) == false -; run: %fcmp_le_f64(Inf, +NaN) == false -; run: %fcmp_le_f64(Inf, -NaN) == false +; run: %fcmp_le_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_le_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_le_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_le_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_le_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_le_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_le_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_le_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_le_f64(+NaN, -Inf) == 0 +; run: %fcmp_le_f64(-NaN, -Inf) == 0 +; run: %fcmp_le_f64(+NaN, Inf) == 0 +; run: %fcmp_le_f64(-NaN, Inf) == 0 +; run: %fcmp_le_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_le_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_le_f64(0x0.0, +NaN) == 0 +; run: %fcmp_le_f64(0x0.0, -NaN) == 0 +; run: %fcmp_le_f64(-Inf, +NaN) == 0 +; run: %fcmp_le_f64(-Inf, -NaN) == 0 +; run: %fcmp_le_f64(Inf, +NaN) == 0 +; run: %fcmp_le_f64(Inf, -NaN) == 0 -; run: %fcmp_le_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_le_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_le_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_le_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_le_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_le_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_le_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_le_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_le_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_le_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_le_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_le_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_le_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_le_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_le_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_le_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_le_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_le_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_le_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_le_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_le_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_le_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_le_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_le_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_le_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_le_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_le_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_le_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_le_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_le_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_le_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_le_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_le_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_le_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_le_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_le_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_le_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_le_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_le_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_le_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_le_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_le_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_le_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_le_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_le_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_le_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_le_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_le_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_le_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_le_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_le_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_le_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_le_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_le_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_le_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_le_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_le_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_le_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_le_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_le_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_le_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_le_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_le_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_le_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_le_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_le_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_le_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_le_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_le_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_le_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_le_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_le_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_le_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_le_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_le_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-lt.clif b/cranelift/filetests/filetests/runtests/fcmp-lt.clif index 4c57a5338dfc..0d5d63afd8f5 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-lt.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-lt.clif @@ -5,316 +5,316 @@ target aarch64 target s390x target riscv64 -function %fcmp_lt_f32(f32, f32) -> b1 { +function %fcmp_lt_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp lt v0, v1 return v2 } -; run: %fcmp_lt_f32(0x0.5, 0x0.5) == false -; run: %fcmp_lt_f32(0x1.0, 0x1.0) == false -; run: %fcmp_lt_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_lt_f32(0x1.0, -0x1.0) == false -; run: %fcmp_lt_f32(0x0.5, 0x1.0) == true -; run: %fcmp_lt_f32(0x1.5, 0x2.9) == true -; run: %fcmp_lt_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_lt_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_lt_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_lt_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_lt_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_lt_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_lt_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_lt_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_lt_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_lt_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_lt_f32(0x0.5, -0x1.0) == false -; run: %fcmp_lt_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_lt_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_lt_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_lt_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_lt_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_lt_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_lt_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_lt_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_lt_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_lt_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_lt_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_lt_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_lt_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_lt_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_lt_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_lt_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_lt_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_lt_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_lt_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_lt_f32(0x0.0, 0x0.0) == false -; run: %fcmp_lt_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_lt_f32(0x0.0, -0x0.0) == false -; run: %fcmp_lt_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_lt_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_lt_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_lt_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_lt_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_lt_f32(Inf, Inf) == false -; run: %fcmp_lt_f32(-Inf, -Inf) == false -; run: %fcmp_lt_f32(Inf, -Inf) == false -; run: %fcmp_lt_f32(-Inf, Inf) == true +; run: %fcmp_lt_f32(Inf, Inf) == 0 +; run: %fcmp_lt_f32(-Inf, -Inf) == 0 +; run: %fcmp_lt_f32(Inf, -Inf) == 0 +; run: %fcmp_lt_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_lt_f32(0x0.0, Inf) == true -; run: %fcmp_lt_f32(-0x0.0, Inf) == true -; run: %fcmp_lt_f32(0x0.0, -Inf) == false -; run: %fcmp_lt_f32(-0x0.0, -Inf) == false -; run: %fcmp_lt_f32(Inf, 0x0.0) == false -; run: %fcmp_lt_f32(Inf, -0x0.0) == false -; run: %fcmp_lt_f32(-Inf, 0x0.0) == true -; run: %fcmp_lt_f32(-Inf, -0x0.0) == true +; run: %fcmp_lt_f32(0x0.0, Inf) == 1 +; run: %fcmp_lt_f32(-0x0.0, Inf) == 1 +; run: %fcmp_lt_f32(0x0.0, -Inf) == 0 +; run: %fcmp_lt_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_lt_f32(Inf, 0x0.0) == 0 +; run: %fcmp_lt_f32(Inf, -0x0.0) == 0 +; run: %fcmp_lt_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_lt_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_lt_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_lt_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_lt_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_lt_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_lt_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_lt_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_lt_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_lt_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_lt_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_lt_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_lt_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_lt_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_lt_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_lt_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_lt_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_lt_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_lt_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_lt_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_lt_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_lt_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_lt_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_lt_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_lt_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_lt_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_lt_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_lt_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_lt_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_lt_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_lt_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_lt_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_lt_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_lt_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_lt_f32(+NaN, +NaN) == false -; run: %fcmp_lt_f32(-NaN, -NaN) == false -; run: %fcmp_lt_f32(+NaN, -NaN) == false -; run: %fcmp_lt_f32(-NaN, +NaN) == false +; run: %fcmp_lt_f32(+NaN, +NaN) == 0 +; run: %fcmp_lt_f32(-NaN, -NaN) == 0 +; run: %fcmp_lt_f32(+NaN, -NaN) == 0 +; run: %fcmp_lt_f32(-NaN, +NaN) == 0 -; run: %fcmp_lt_f32(+NaN, -0x1.0) == false -; run: %fcmp_lt_f32(-NaN, -0x1.0) == false -; run: %fcmp_lt_f32(+NaN, 0x1.0) == false -; run: %fcmp_lt_f32(-NaN, 0x1.0) == false -; run: %fcmp_lt_f32(+NaN, -0x0.0) == false -; run: %fcmp_lt_f32(-NaN, -0x0.0) == false -; run: %fcmp_lt_f32(+NaN, 0x0.0) == false -; run: %fcmp_lt_f32(-NaN, 0x0.0) == false -; run: %fcmp_lt_f32(+NaN, -Inf) == false -; run: %fcmp_lt_f32(-NaN, -Inf) == false -; run: %fcmp_lt_f32(+NaN, Inf) == false -; run: %fcmp_lt_f32(-NaN, Inf) == false -; run: %fcmp_lt_f32(-0x0.0, +NaN) == false -; run: %fcmp_lt_f32(-0x0.0, -NaN) == false -; run: %fcmp_lt_f32(0x0.0, +NaN) == false -; run: %fcmp_lt_f32(0x0.0, -NaN) == false -; run: %fcmp_lt_f32(-Inf, +NaN) == false -; run: %fcmp_lt_f32(-Inf, -NaN) == false -; run: %fcmp_lt_f32(Inf, +NaN) == false -; run: %fcmp_lt_f32(Inf, -NaN) == false +; run: %fcmp_lt_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_lt_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_lt_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_lt_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_lt_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_lt_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_lt_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_lt_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_lt_f32(+NaN, -Inf) == 0 +; run: %fcmp_lt_f32(-NaN, -Inf) == 0 +; run: %fcmp_lt_f32(+NaN, Inf) == 0 +; run: %fcmp_lt_f32(-NaN, Inf) == 0 +; run: %fcmp_lt_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_lt_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_lt_f32(0x0.0, +NaN) == 0 +; run: %fcmp_lt_f32(0x0.0, -NaN) == 0 +; run: %fcmp_lt_f32(-Inf, +NaN) == 0 +; run: %fcmp_lt_f32(-Inf, -NaN) == 0 +; run: %fcmp_lt_f32(Inf, +NaN) == 0 +; run: %fcmp_lt_f32(Inf, -NaN) == 0 -; run: %fcmp_lt_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_lt_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_lt_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_lt_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_lt_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_lt_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_lt_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_lt_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_lt_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_lt_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_lt_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_lt_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_lt_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_lt_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_lt_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_lt_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_lt_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_lt_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_lt_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_lt_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_lt_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_lt_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_lt_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_lt_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_lt_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_lt_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_lt_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_lt_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_lt_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_lt_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_lt_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_lt_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_lt_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_lt_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_lt_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_lt_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_lt_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_lt_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_lt_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_lt_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_lt_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_lt_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_lt_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_lt_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_lt_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_lt_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_lt_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_lt_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_lt_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_lt_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_lt_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_lt_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_lt_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_lt_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_lt_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_lt_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_lt_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_lt_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_lt_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_lt_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_lt_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_lt_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_lt_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_lt_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_lt_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_lt_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_lt_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_lt_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_lt_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_lt_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_lt_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_lt_f64(f64, f64) -> b1 { +function %fcmp_lt_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp lt v0, v1 return v2 } -; run: %fcmp_lt_f64(0x0.5, 0x0.5) == false -; run: %fcmp_lt_f64(0x1.0, 0x1.0) == false -; run: %fcmp_lt_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_lt_f64(0x1.0, -0x1.0) == false -; run: %fcmp_lt_f64(0x0.5, 0x1.0) == true -; run: %fcmp_lt_f64(0x1.5, 0x2.9) == true -; run: %fcmp_lt_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_lt_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_lt_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_lt_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_lt_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_lt_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_lt_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_lt_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_lt_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_lt_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_lt_f64(0x0.5, -0x1.0) == false -; run: %fcmp_lt_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_lt_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_lt_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_lt_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_lt_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_lt_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_lt_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_lt_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_lt_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_lt_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_lt_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_lt_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_lt_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_lt_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_lt_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_lt_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_lt_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_lt_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_lt_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_lt_f64(0x0.0, 0x0.0) == false -; run: %fcmp_lt_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_lt_f64(0x0.0, -0x0.0) == false -; run: %fcmp_lt_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_lt_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_lt_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_lt_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_lt_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_lt_f64(Inf, Inf) == false -; run: %fcmp_lt_f64(-Inf, -Inf) == false -; run: %fcmp_lt_f64(Inf, -Inf) == false -; run: %fcmp_lt_f64(-Inf, Inf) == true +; run: %fcmp_lt_f64(Inf, Inf) == 0 +; run: %fcmp_lt_f64(-Inf, -Inf) == 0 +; run: %fcmp_lt_f64(Inf, -Inf) == 0 +; run: %fcmp_lt_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_lt_f64(0x0.0, Inf) == true -; run: %fcmp_lt_f64(-0x0.0, Inf) == true -; run: %fcmp_lt_f64(0x0.0, -Inf) == false -; run: %fcmp_lt_f64(-0x0.0, -Inf) == false -; run: %fcmp_lt_f64(Inf, 0x0.0) == false -; run: %fcmp_lt_f64(Inf, -0x0.0) == false -; run: %fcmp_lt_f64(-Inf, 0x0.0) == true -; run: %fcmp_lt_f64(-Inf, -0x0.0) == true +; run: %fcmp_lt_f64(0x0.0, Inf) == 1 +; run: %fcmp_lt_f64(-0x0.0, Inf) == 1 +; run: %fcmp_lt_f64(0x0.0, -Inf) == 0 +; run: %fcmp_lt_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_lt_f64(Inf, 0x0.0) == 0 +; run: %fcmp_lt_f64(Inf, -0x0.0) == 0 +; run: %fcmp_lt_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_lt_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_lt_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_lt_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_lt_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_lt_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_lt_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_lt_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_lt_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_lt_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_lt_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_lt_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_lt_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_lt_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_lt_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_lt_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_lt_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_lt_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_lt_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_lt_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_lt_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_lt_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_lt_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_lt_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_lt_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_lt_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_lt_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_lt_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_lt_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_lt_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_lt_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_lt_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_lt_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_lt_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_lt_f64(+NaN, +NaN) == false -; run: %fcmp_lt_f64(-NaN, -NaN) == false -; run: %fcmp_lt_f64(+NaN, -NaN) == false -; run: %fcmp_lt_f64(-NaN, +NaN) == false +; run: %fcmp_lt_f64(+NaN, +NaN) == 0 +; run: %fcmp_lt_f64(-NaN, -NaN) == 0 +; run: %fcmp_lt_f64(+NaN, -NaN) == 0 +; run: %fcmp_lt_f64(-NaN, +NaN) == 0 -; run: %fcmp_lt_f64(+NaN, -0x1.0) == false -; run: %fcmp_lt_f64(-NaN, -0x1.0) == false -; run: %fcmp_lt_f64(+NaN, 0x1.0) == false -; run: %fcmp_lt_f64(-NaN, 0x1.0) == false -; run: %fcmp_lt_f64(+NaN, -0x0.0) == false -; run: %fcmp_lt_f64(-NaN, -0x0.0) == false -; run: %fcmp_lt_f64(+NaN, 0x0.0) == false -; run: %fcmp_lt_f64(-NaN, 0x0.0) == false -; run: %fcmp_lt_f64(+NaN, -Inf) == false -; run: %fcmp_lt_f64(-NaN, -Inf) == false -; run: %fcmp_lt_f64(+NaN, Inf) == false -; run: %fcmp_lt_f64(-NaN, Inf) == false -; run: %fcmp_lt_f64(-0x0.0, +NaN) == false -; run: %fcmp_lt_f64(-0x0.0, -NaN) == false -; run: %fcmp_lt_f64(0x0.0, +NaN) == false -; run: %fcmp_lt_f64(0x0.0, -NaN) == false -; run: %fcmp_lt_f64(-Inf, +NaN) == false -; run: %fcmp_lt_f64(-Inf, -NaN) == false -; run: %fcmp_lt_f64(Inf, +NaN) == false -; run: %fcmp_lt_f64(Inf, -NaN) == false +; run: %fcmp_lt_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_lt_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_lt_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_lt_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_lt_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_lt_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_lt_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_lt_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_lt_f64(+NaN, -Inf) == 0 +; run: %fcmp_lt_f64(-NaN, -Inf) == 0 +; run: %fcmp_lt_f64(+NaN, Inf) == 0 +; run: %fcmp_lt_f64(-NaN, Inf) == 0 +; run: %fcmp_lt_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_lt_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_lt_f64(0x0.0, +NaN) == 0 +; run: %fcmp_lt_f64(0x0.0, -NaN) == 0 +; run: %fcmp_lt_f64(-Inf, +NaN) == 0 +; run: %fcmp_lt_f64(-Inf, -NaN) == 0 +; run: %fcmp_lt_f64(Inf, +NaN) == 0 +; run: %fcmp_lt_f64(Inf, -NaN) == 0 -; run: %fcmp_lt_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_lt_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_lt_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_lt_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_lt_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_lt_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_lt_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_lt_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_lt_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_lt_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_lt_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_lt_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_lt_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_lt_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_lt_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_lt_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_lt_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_lt_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_lt_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_lt_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_lt_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_lt_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_lt_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_lt_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_lt_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_lt_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_lt_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_lt_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_lt_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_lt_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_lt_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_lt_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_lt_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_lt_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_lt_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_lt_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_lt_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_lt_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_lt_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_lt_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_lt_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_lt_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_lt_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_lt_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_lt_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_lt_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_lt_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_lt_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_lt_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_lt_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_lt_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_lt_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_lt_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_lt_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_lt_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_lt_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_lt_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_lt_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_lt_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_lt_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_lt_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_lt_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_lt_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_lt_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_lt_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_lt_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_lt_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_lt_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ne.clif b/cranelift/filetests/filetests/runtests/fcmp-ne.clif index 570ab86f01cd..7102d1a3d369 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ne.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ne.clif @@ -5,316 +5,316 @@ target aarch64 target s390x target riscv64 -function %fcmp_ne_f32(f32, f32) -> b1 { +function %fcmp_ne_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ne v0, v1 return v2 } -; run: %fcmp_ne_f32(0x0.5, 0x0.5) == false -; run: %fcmp_ne_f32(0x1.0, 0x1.0) == false -; run: %fcmp_ne_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_ne_f32(0x1.0, -0x1.0) == true -; run: %fcmp_ne_f32(0x0.5, 0x1.0) == true -; run: %fcmp_ne_f32(0x1.5, 0x2.9) == true -; run: %fcmp_ne_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ne_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_ne_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_ne_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_ne_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_ne_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_ne_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_ne_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_ne_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_ne_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_ne_f32(0x0.5, -0x1.0) == true -; run: %fcmp_ne_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_ne_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_ne_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_ne_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ne_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_ne_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_ne_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_ne_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ne_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_ne_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_ne_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_ne_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ne_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ne_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_ne_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_ne_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_ne_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_ne_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_ne_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_ne_f32(0x0.0, 0x0.0) == false -; run: %fcmp_ne_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_ne_f32(0x0.0, -0x0.0) == false -; run: %fcmp_ne_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_ne_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_ne_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_ne_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_ne_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_ne_f32(Inf, Inf) == false -; run: %fcmp_ne_f32(-Inf, -Inf) == false -; run: %fcmp_ne_f32(Inf, -Inf) == true -; run: %fcmp_ne_f32(-Inf, Inf) == true +; run: %fcmp_ne_f32(Inf, Inf) == 0 +; run: %fcmp_ne_f32(-Inf, -Inf) == 0 +; run: %fcmp_ne_f32(Inf, -Inf) == 1 +; run: %fcmp_ne_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ne_f32(0x0.0, Inf) == true -; run: %fcmp_ne_f32(-0x0.0, Inf) == true -; run: %fcmp_ne_f32(0x0.0, -Inf) == true -; run: %fcmp_ne_f32(-0x0.0, -Inf) == true -; run: %fcmp_ne_f32(Inf, 0x0.0) == true -; run: %fcmp_ne_f32(Inf, -0x0.0) == true -; run: %fcmp_ne_f32(-Inf, 0x0.0) == true -; run: %fcmp_ne_f32(-Inf, -0x0.0) == true +; run: %fcmp_ne_f32(0x0.0, Inf) == 1 +; run: %fcmp_ne_f32(-0x0.0, Inf) == 1 +; run: %fcmp_ne_f32(0x0.0, -Inf) == 1 +; run: %fcmp_ne_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_ne_f32(Inf, 0x0.0) == 1 +; run: %fcmp_ne_f32(Inf, -0x0.0) == 1 +; run: %fcmp_ne_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_ne_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ne_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_ne_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_ne_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_ne_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_ne_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_ne_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_ne_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_ne_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_ne_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_ne_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_ne_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_ne_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_ne_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_ne_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_ne_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ne_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ne_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ne_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ne_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ne_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_ne_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ne_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_ne_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_ne_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_ne_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ne_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ne_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ne_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ne_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ne_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_ne_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ne_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_ne_f32(+NaN, +NaN) == true -; run: %fcmp_ne_f32(-NaN, -NaN) == true -; run: %fcmp_ne_f32(+NaN, -NaN) == true -; run: %fcmp_ne_f32(-NaN, +NaN) == true +; run: %fcmp_ne_f32(+NaN, +NaN) == 1 +; run: %fcmp_ne_f32(-NaN, -NaN) == 1 +; run: %fcmp_ne_f32(+NaN, -NaN) == 1 +; run: %fcmp_ne_f32(-NaN, +NaN) == 1 -; run: %fcmp_ne_f32(+NaN, -0x1.0) == true -; run: %fcmp_ne_f32(-NaN, -0x1.0) == true -; run: %fcmp_ne_f32(+NaN, 0x1.0) == true -; run: %fcmp_ne_f32(-NaN, 0x1.0) == true -; run: %fcmp_ne_f32(+NaN, -0x0.0) == true -; run: %fcmp_ne_f32(-NaN, -0x0.0) == true -; run: %fcmp_ne_f32(+NaN, 0x0.0) == true -; run: %fcmp_ne_f32(-NaN, 0x0.0) == true -; run: %fcmp_ne_f32(+NaN, -Inf) == true -; run: %fcmp_ne_f32(-NaN, -Inf) == true -; run: %fcmp_ne_f32(+NaN, Inf) == true -; run: %fcmp_ne_f32(-NaN, Inf) == true -; run: %fcmp_ne_f32(-0x0.0, +NaN) == true -; run: %fcmp_ne_f32(-0x0.0, -NaN) == true -; run: %fcmp_ne_f32(0x0.0, +NaN) == true -; run: %fcmp_ne_f32(0x0.0, -NaN) == true -; run: %fcmp_ne_f32(-Inf, +NaN) == true -; run: %fcmp_ne_f32(-Inf, -NaN) == true -; run: %fcmp_ne_f32(Inf, +NaN) == true -; run: %fcmp_ne_f32(Inf, -NaN) == true +; run: %fcmp_ne_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_ne_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_ne_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_ne_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_ne_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_ne_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_ne_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_ne_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_ne_f32(+NaN, -Inf) == 1 +; run: %fcmp_ne_f32(-NaN, -Inf) == 1 +; run: %fcmp_ne_f32(+NaN, Inf) == 1 +; run: %fcmp_ne_f32(-NaN, Inf) == 1 +; run: %fcmp_ne_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_ne_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_ne_f32(0x0.0, +NaN) == 1 +; run: %fcmp_ne_f32(0x0.0, -NaN) == 1 +; run: %fcmp_ne_f32(-Inf, +NaN) == 1 +; run: %fcmp_ne_f32(-Inf, -NaN) == 1 +; run: %fcmp_ne_f32(Inf, +NaN) == 1 +; run: %fcmp_ne_f32(Inf, -NaN) == 1 -; run: %fcmp_ne_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_ne_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_ne_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_ne_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_ne_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ne_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ne_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ne_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ne_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ne_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ne_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ne_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ne_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_ne_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_ne_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_ne_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_ne_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ne_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ne_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ne_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ne_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_ne_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_ne_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_ne_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_ne_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ne_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ne_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ne_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ne_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ne_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ne_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ne_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ne_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ne_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ne_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ne_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ne_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ne_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_ne_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_ne_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_ne_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_ne_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ne_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ne_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ne_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ne_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_ne_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_ne_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_ne_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_ne_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ne_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ne_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ne_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ne_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ne_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ne_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ne_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ne_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ne_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ne_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ne_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_ne_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ne_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ne_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_ne_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ne_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ne_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ne_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ne_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_ne_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ne_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_ne_f64(f64, f64) -> b1 { +function %fcmp_ne_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ne v0, v1 return v2 } -; run: %fcmp_ne_f64(0x0.5, 0x0.5) == false -; run: %fcmp_ne_f64(0x1.0, 0x1.0) == false -; run: %fcmp_ne_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_ne_f64(0x1.0, -0x1.0) == true -; run: %fcmp_ne_f64(0x0.5, 0x1.0) == true -; run: %fcmp_ne_f64(0x1.5, 0x2.9) == true -; run: %fcmp_ne_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ne_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_ne_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_ne_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_ne_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_ne_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_ne_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_ne_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_ne_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_ne_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_ne_f64(0x0.5, -0x1.0) == true -; run: %fcmp_ne_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_ne_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_ne_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_ne_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ne_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_ne_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_ne_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_ne_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ne_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_ne_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_ne_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_ne_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ne_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ne_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_ne_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_ne_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_ne_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_ne_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_ne_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_ne_f64(0x0.0, 0x0.0) == false -; run: %fcmp_ne_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_ne_f64(0x0.0, -0x0.0) == false -; run: %fcmp_ne_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_ne_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_ne_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_ne_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_ne_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_ne_f64(Inf, Inf) == false -; run: %fcmp_ne_f64(-Inf, -Inf) == false -; run: %fcmp_ne_f64(Inf, -Inf) == true -; run: %fcmp_ne_f64(-Inf, Inf) == true +; run: %fcmp_ne_f64(Inf, Inf) == 0 +; run: %fcmp_ne_f64(-Inf, -Inf) == 0 +; run: %fcmp_ne_f64(Inf, -Inf) == 1 +; run: %fcmp_ne_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ne_f64(0x0.0, Inf) == true -; run: %fcmp_ne_f64(-0x0.0, Inf) == true -; run: %fcmp_ne_f64(0x0.0, -Inf) == true -; run: %fcmp_ne_f64(-0x0.0, -Inf) == true -; run: %fcmp_ne_f64(Inf, 0x0.0) == true -; run: %fcmp_ne_f64(Inf, -0x0.0) == true -; run: %fcmp_ne_f64(-Inf, 0x0.0) == true -; run: %fcmp_ne_f64(-Inf, -0x0.0) == true +; run: %fcmp_ne_f64(0x0.0, Inf) == 1 +; run: %fcmp_ne_f64(-0x0.0, Inf) == 1 +; run: %fcmp_ne_f64(0x0.0, -Inf) == 1 +; run: %fcmp_ne_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_ne_f64(Inf, 0x0.0) == 1 +; run: %fcmp_ne_f64(Inf, -0x0.0) == 1 +; run: %fcmp_ne_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_ne_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ne_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_ne_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_ne_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_ne_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ne_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_ne_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_ne_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_ne_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_ne_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_ne_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ne_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_ne_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_ne_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_ne_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_ne_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ne_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ne_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ne_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ne_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ne_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_ne_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ne_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_ne_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_ne_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_ne_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ne_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ne_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ne_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ne_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ne_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_ne_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ne_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_ne_f64(+NaN, +NaN) == true -; run: %fcmp_ne_f64(-NaN, -NaN) == true -; run: %fcmp_ne_f64(+NaN, -NaN) == true -; run: %fcmp_ne_f64(-NaN, +NaN) == true +; run: %fcmp_ne_f64(+NaN, +NaN) == 1 +; run: %fcmp_ne_f64(-NaN, -NaN) == 1 +; run: %fcmp_ne_f64(+NaN, -NaN) == 1 +; run: %fcmp_ne_f64(-NaN, +NaN) == 1 -; run: %fcmp_ne_f64(+NaN, -0x1.0) == true -; run: %fcmp_ne_f64(-NaN, -0x1.0) == true -; run: %fcmp_ne_f64(+NaN, 0x1.0) == true -; run: %fcmp_ne_f64(-NaN, 0x1.0) == true -; run: %fcmp_ne_f64(+NaN, -0x0.0) == true -; run: %fcmp_ne_f64(-NaN, -0x0.0) == true -; run: %fcmp_ne_f64(+NaN, 0x0.0) == true -; run: %fcmp_ne_f64(-NaN, 0x0.0) == true -; run: %fcmp_ne_f64(+NaN, -Inf) == true -; run: %fcmp_ne_f64(-NaN, -Inf) == true -; run: %fcmp_ne_f64(+NaN, Inf) == true -; run: %fcmp_ne_f64(-NaN, Inf) == true -; run: %fcmp_ne_f64(-0x0.0, +NaN) == true -; run: %fcmp_ne_f64(-0x0.0, -NaN) == true -; run: %fcmp_ne_f64(0x0.0, +NaN) == true -; run: %fcmp_ne_f64(0x0.0, -NaN) == true -; run: %fcmp_ne_f64(-Inf, +NaN) == true -; run: %fcmp_ne_f64(-Inf, -NaN) == true -; run: %fcmp_ne_f64(Inf, +NaN) == true -; run: %fcmp_ne_f64(Inf, -NaN) == true +; run: %fcmp_ne_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_ne_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_ne_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_ne_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_ne_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_ne_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_ne_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_ne_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_ne_f64(+NaN, -Inf) == 1 +; run: %fcmp_ne_f64(-NaN, -Inf) == 1 +; run: %fcmp_ne_f64(+NaN, Inf) == 1 +; run: %fcmp_ne_f64(-NaN, Inf) == 1 +; run: %fcmp_ne_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_ne_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_ne_f64(0x0.0, +NaN) == 1 +; run: %fcmp_ne_f64(0x0.0, -NaN) == 1 +; run: %fcmp_ne_f64(-Inf, +NaN) == 1 +; run: %fcmp_ne_f64(-Inf, -NaN) == 1 +; run: %fcmp_ne_f64(Inf, +NaN) == 1 +; run: %fcmp_ne_f64(Inf, -NaN) == 1 -; run: %fcmp_ne_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_ne_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_ne_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_ne_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_ne_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ne_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ne_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ne_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ne_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ne_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ne_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ne_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ne_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_ne_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_ne_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_ne_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_ne_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_ne_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ne_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ne_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_ne_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ne_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ne_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ne_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ne_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ne_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ne_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ne_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ne_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ne_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ne_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ne_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ne_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ne_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_ne_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_ne_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_ne_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_ne_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ne_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ne_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ne_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ne_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_ne_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_ne_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_ne_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_ne_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ne_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ne_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ne_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ne_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ne_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ne_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ne_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ne_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ne_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ne_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ne_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ne_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ne_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ne_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ne_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_ne_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ne_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ne_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_ne_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ne_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_ne_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ne_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-one.clif b/cranelift/filetests/filetests/runtests/fcmp-one.clif index 8c48d5dfc33b..ff17c5f841c0 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-one.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-one.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_one_f32(f32, f32) -> b1 { +function %fcmp_one_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp one v0, v1 return v2 } -; run: %fcmp_one_f32(0x0.5, 0x0.5) == false -; run: %fcmp_one_f32(0x1.0, 0x1.0) == false -; run: %fcmp_one_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_one_f32(0x1.0, -0x1.0) == true -; run: %fcmp_one_f32(0x0.5, 0x1.0) == true -; run: %fcmp_one_f32(0x1.5, 0x2.9) == true -; run: %fcmp_one_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_one_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_one_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_one_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_one_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_one_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_one_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_one_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_one_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_one_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_one_f32(0x0.5, -0x1.0) == true -; run: %fcmp_one_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_one_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_one_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_one_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_one_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_one_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_one_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_one_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_one_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_one_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_one_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_one_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_one_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_one_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_one_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_one_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_one_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_one_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_one_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_one_f32(0x0.0, 0x0.0) == false -; run: %fcmp_one_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_one_f32(0x0.0, -0x0.0) == false -; run: %fcmp_one_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_one_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_one_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_one_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_one_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_one_f32(Inf, Inf) == false -; run: %fcmp_one_f32(-Inf, -Inf) == false -; run: %fcmp_one_f32(Inf, -Inf) == true -; run: %fcmp_one_f32(-Inf, Inf) == true +; run: %fcmp_one_f32(Inf, Inf) == 0 +; run: %fcmp_one_f32(-Inf, -Inf) == 0 +; run: %fcmp_one_f32(Inf, -Inf) == 1 +; run: %fcmp_one_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_one_f32(0x0.0, Inf) == true -; run: %fcmp_one_f32(-0x0.0, Inf) == true -; run: %fcmp_one_f32(0x0.0, -Inf) == true -; run: %fcmp_one_f32(-0x0.0, -Inf) == true -; run: %fcmp_one_f32(Inf, 0x0.0) == true -; run: %fcmp_one_f32(Inf, -0x0.0) == true -; run: %fcmp_one_f32(-Inf, 0x0.0) == true -; run: %fcmp_one_f32(-Inf, -0x0.0) == true +; run: %fcmp_one_f32(0x0.0, Inf) == 1 +; run: %fcmp_one_f32(-0x0.0, Inf) == 1 +; run: %fcmp_one_f32(0x0.0, -Inf) == 1 +; run: %fcmp_one_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_one_f32(Inf, 0x0.0) == 1 +; run: %fcmp_one_f32(Inf, -0x0.0) == 1 +; run: %fcmp_one_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_one_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_one_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_one_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_one_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_one_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_one_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_one_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_one_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_one_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_one_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_one_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_one_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_one_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_one_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_one_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_one_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_one_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_one_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_one_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_one_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_one_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_one_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_one_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_one_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_one_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_one_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_one_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_one_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_one_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_one_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_one_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_one_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_one_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_one_f32(+NaN, +NaN) == false -; run: %fcmp_one_f32(-NaN, -NaN) == false -; run: %fcmp_one_f32(+NaN, -NaN) == false -; run: %fcmp_one_f32(-NaN, +NaN) == false +; run: %fcmp_one_f32(+NaN, +NaN) == 0 +; run: %fcmp_one_f32(-NaN, -NaN) == 0 +; run: %fcmp_one_f32(+NaN, -NaN) == 0 +; run: %fcmp_one_f32(-NaN, +NaN) == 0 -; run: %fcmp_one_f32(+NaN, -0x1.0) == false -; run: %fcmp_one_f32(-NaN, -0x1.0) == false -; run: %fcmp_one_f32(+NaN, 0x1.0) == false -; run: %fcmp_one_f32(-NaN, 0x1.0) == false -; run: %fcmp_one_f32(+NaN, -0x0.0) == false -; run: %fcmp_one_f32(-NaN, -0x0.0) == false -; run: %fcmp_one_f32(+NaN, 0x0.0) == false -; run: %fcmp_one_f32(-NaN, 0x0.0) == false -; run: %fcmp_one_f32(+NaN, -Inf) == false -; run: %fcmp_one_f32(-NaN, -Inf) == false -; run: %fcmp_one_f32(+NaN, Inf) == false -; run: %fcmp_one_f32(-NaN, Inf) == false -; run: %fcmp_one_f32(-0x0.0, +NaN) == false -; run: %fcmp_one_f32(-0x0.0, -NaN) == false -; run: %fcmp_one_f32(0x0.0, +NaN) == false -; run: %fcmp_one_f32(0x0.0, -NaN) == false -; run: %fcmp_one_f32(-Inf, +NaN) == false -; run: %fcmp_one_f32(-Inf, -NaN) == false -; run: %fcmp_one_f32(Inf, +NaN) == false -; run: %fcmp_one_f32(Inf, -NaN) == false +; run: %fcmp_one_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_one_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_one_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_one_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_one_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_one_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_one_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_one_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_one_f32(+NaN, -Inf) == 0 +; run: %fcmp_one_f32(-NaN, -Inf) == 0 +; run: %fcmp_one_f32(+NaN, Inf) == 0 +; run: %fcmp_one_f32(-NaN, Inf) == 0 +; run: %fcmp_one_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_one_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_one_f32(0x0.0, +NaN) == 0 +; run: %fcmp_one_f32(0x0.0, -NaN) == 0 +; run: %fcmp_one_f32(-Inf, +NaN) == 0 +; run: %fcmp_one_f32(-Inf, -NaN) == 0 +; run: %fcmp_one_f32(Inf, +NaN) == 0 +; run: %fcmp_one_f32(Inf, -NaN) == 0 -; run: %fcmp_one_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_one_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_one_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_one_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_one_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_one_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_one_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_one_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_one_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_one_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_one_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_one_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_one_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_one_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_one_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_one_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_one_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_one_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_one_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_one_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_one_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_one_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_one_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_one_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_one_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_one_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_one_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_one_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_one_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_one_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_one_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_one_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_one_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_one_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_one_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_one_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_one_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_one_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_one_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_one_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_one_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_one_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_one_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_one_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_one_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_one_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_one_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_one_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_one_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_one_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_one_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_one_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_one_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_one_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_one_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_one_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_one_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_one_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_one_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_one_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_one_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_one_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_one_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_one_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_one_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_one_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_one_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_one_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_one_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_one_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_one_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_one_f64(f64, f64) -> b1 { +function %fcmp_one_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp one v0, v1 return v2 } -; run: %fcmp_one_f64(0x0.5, 0x0.5) == false -; run: %fcmp_one_f64(0x1.0, 0x1.0) == false -; run: %fcmp_one_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_one_f64(0x1.0, -0x1.0) == true -; run: %fcmp_one_f64(0x0.5, 0x1.0) == true -; run: %fcmp_one_f64(0x1.5, 0x2.9) == true -; run: %fcmp_one_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_one_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_one_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_one_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_one_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_one_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_one_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_one_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_one_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_one_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_one_f64(0x0.5, -0x1.0) == true -; run: %fcmp_one_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_one_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_one_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_one_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_one_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_one_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_one_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_one_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_one_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_one_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_one_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_one_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_one_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_one_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_one_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_one_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_one_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_one_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_one_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_one_f64(0x0.0, 0x0.0) == false -; run: %fcmp_one_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_one_f64(0x0.0, -0x0.0) == false -; run: %fcmp_one_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_one_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_one_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_one_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_one_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_one_f64(Inf, Inf) == false -; run: %fcmp_one_f64(-Inf, -Inf) == false -; run: %fcmp_one_f64(Inf, -Inf) == true -; run: %fcmp_one_f64(-Inf, Inf) == true +; run: %fcmp_one_f64(Inf, Inf) == 0 +; run: %fcmp_one_f64(-Inf, -Inf) == 0 +; run: %fcmp_one_f64(Inf, -Inf) == 1 +; run: %fcmp_one_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_one_f64(0x0.0, Inf) == true -; run: %fcmp_one_f64(-0x0.0, Inf) == true -; run: %fcmp_one_f64(0x0.0, -Inf) == true -; run: %fcmp_one_f64(-0x0.0, -Inf) == true -; run: %fcmp_one_f64(Inf, 0x0.0) == true -; run: %fcmp_one_f64(Inf, -0x0.0) == true -; run: %fcmp_one_f64(-Inf, 0x0.0) == true -; run: %fcmp_one_f64(-Inf, -0x0.0) == true +; run: %fcmp_one_f64(0x0.0, Inf) == 1 +; run: %fcmp_one_f64(-0x0.0, Inf) == 1 +; run: %fcmp_one_f64(0x0.0, -Inf) == 1 +; run: %fcmp_one_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_one_f64(Inf, 0x0.0) == 1 +; run: %fcmp_one_f64(Inf, -0x0.0) == 1 +; run: %fcmp_one_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_one_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_one_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_one_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_one_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_one_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_one_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_one_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_one_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_one_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_one_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_one_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_one_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_one_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_one_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_one_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_one_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_one_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_one_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_one_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_one_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_one_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_one_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_one_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_one_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_one_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_one_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_one_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_one_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_one_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_one_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_one_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_one_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_one_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_one_f64(+NaN, +NaN) == false -; run: %fcmp_one_f64(-NaN, -NaN) == false -; run: %fcmp_one_f64(+NaN, -NaN) == false -; run: %fcmp_one_f64(-NaN, +NaN) == false +; run: %fcmp_one_f64(+NaN, +NaN) == 0 +; run: %fcmp_one_f64(-NaN, -NaN) == 0 +; run: %fcmp_one_f64(+NaN, -NaN) == 0 +; run: %fcmp_one_f64(-NaN, +NaN) == 0 -; run: %fcmp_one_f64(+NaN, -0x1.0) == false -; run: %fcmp_one_f64(-NaN, -0x1.0) == false -; run: %fcmp_one_f64(+NaN, 0x1.0) == false -; run: %fcmp_one_f64(-NaN, 0x1.0) == false -; run: %fcmp_one_f64(+NaN, -0x0.0) == false -; run: %fcmp_one_f64(-NaN, -0x0.0) == false -; run: %fcmp_one_f64(+NaN, 0x0.0) == false -; run: %fcmp_one_f64(-NaN, 0x0.0) == false -; run: %fcmp_one_f64(+NaN, -Inf) == false -; run: %fcmp_one_f64(-NaN, -Inf) == false -; run: %fcmp_one_f64(+NaN, Inf) == false -; run: %fcmp_one_f64(-NaN, Inf) == false -; run: %fcmp_one_f64(-0x0.0, +NaN) == false -; run: %fcmp_one_f64(-0x0.0, -NaN) == false -; run: %fcmp_one_f64(0x0.0, +NaN) == false -; run: %fcmp_one_f64(0x0.0, -NaN) == false -; run: %fcmp_one_f64(-Inf, +NaN) == false -; run: %fcmp_one_f64(-Inf, -NaN) == false -; run: %fcmp_one_f64(Inf, +NaN) == false -; run: %fcmp_one_f64(Inf, -NaN) == false +; run: %fcmp_one_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_one_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_one_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_one_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_one_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_one_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_one_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_one_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_one_f64(+NaN, -Inf) == 0 +; run: %fcmp_one_f64(-NaN, -Inf) == 0 +; run: %fcmp_one_f64(+NaN, Inf) == 0 +; run: %fcmp_one_f64(-NaN, Inf) == 0 +; run: %fcmp_one_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_one_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_one_f64(0x0.0, +NaN) == 0 +; run: %fcmp_one_f64(0x0.0, -NaN) == 0 +; run: %fcmp_one_f64(-Inf, +NaN) == 0 +; run: %fcmp_one_f64(-Inf, -NaN) == 0 +; run: %fcmp_one_f64(Inf, +NaN) == 0 +; run: %fcmp_one_f64(Inf, -NaN) == 0 -; run: %fcmp_one_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_one_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_one_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_one_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_one_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_one_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_one_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_one_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_one_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_one_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_one_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_one_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_one_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_one_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_one_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_one_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_one_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_one_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_one_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_one_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_one_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_one_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_one_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_one_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_one_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_one_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_one_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_one_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_one_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_one_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_one_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_one_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_one_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_one_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_one_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_one_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_one_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_one_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_one_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_one_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_one_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_one_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_one_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_one_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_one_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_one_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_one_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_one_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_one_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_one_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_one_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_one_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_one_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_one_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_one_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_one_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_one_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_one_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_one_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_one_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_one_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_one_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_one_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_one_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_one_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_one_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_one_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_one_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_one_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_one_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_one_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_one_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_one_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_one_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_one_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ord.clif b/cranelift/filetests/filetests/runtests/fcmp-ord.clif index 514eadaf2927..b1b1ad47eb2c 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ord.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ord.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_ord_f32(f32, f32) -> b1 { +function %fcmp_ord_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ord v0, v1 return v2 } -; run: %fcmp_ord_f32(0x0.5, 0x0.5) == true -; run: %fcmp_ord_f32(0x1.0, 0x1.0) == true -; run: %fcmp_ord_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_ord_f32(0x1.0, -0x1.0) == true -; run: %fcmp_ord_f32(0x0.5, 0x1.0) == true -; run: %fcmp_ord_f32(0x1.5, 0x2.9) == true -; run: %fcmp_ord_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ord_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_ord_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_ord_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_ord_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_ord_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_ord_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_ord_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_ord_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_ord_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_ord_f32(0x0.5, -0x1.0) == true -; run: %fcmp_ord_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_ord_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_ord_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_ord_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ord_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_ord_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_ord_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_ord_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ord_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_ord_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_ord_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_ord_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ord_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ord_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_ord_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_ord_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_ord_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_ord_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_ord_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_ord_f32(0x0.0, 0x0.0) == true -; run: %fcmp_ord_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_ord_f32(0x0.0, -0x0.0) == true -; run: %fcmp_ord_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_ord_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_ord_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ord_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_ord_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ord_f32(Inf, Inf) == true -; run: %fcmp_ord_f32(-Inf, -Inf) == true -; run: %fcmp_ord_f32(Inf, -Inf) == true -; run: %fcmp_ord_f32(-Inf, Inf) == true +; run: %fcmp_ord_f32(Inf, Inf) == 1 +; run: %fcmp_ord_f32(-Inf, -Inf) == 1 +; run: %fcmp_ord_f32(Inf, -Inf) == 1 +; run: %fcmp_ord_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ord_f32(0x0.0, Inf) == true -; run: %fcmp_ord_f32(-0x0.0, Inf) == true -; run: %fcmp_ord_f32(0x0.0, -Inf) == true -; run: %fcmp_ord_f32(-0x0.0, -Inf) == true -; run: %fcmp_ord_f32(Inf, 0x0.0) == true -; run: %fcmp_ord_f32(Inf, -0x0.0) == true -; run: %fcmp_ord_f32(-Inf, 0x0.0) == true -; run: %fcmp_ord_f32(-Inf, -0x0.0) == true +; run: %fcmp_ord_f32(0x0.0, Inf) == 1 +; run: %fcmp_ord_f32(-0x0.0, Inf) == 1 +; run: %fcmp_ord_f32(0x0.0, -Inf) == 1 +; run: %fcmp_ord_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_ord_f32(Inf, 0x0.0) == 1 +; run: %fcmp_ord_f32(Inf, -0x0.0) == 1 +; run: %fcmp_ord_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_ord_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ord_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_ord_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_ord_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_ord_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_ord_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_ord_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_ord_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_ord_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_ord_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_ord_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_ord_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_ord_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_ord_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_ord_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_ord_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ord_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ord_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ord_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ord_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ord_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_ord_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ord_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_ord_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_ord_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_ord_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ord_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ord_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ord_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ord_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ord_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_ord_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ord_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_ord_f32(+NaN, +NaN) == false -; run: %fcmp_ord_f32(-NaN, -NaN) == false -; run: %fcmp_ord_f32(+NaN, -NaN) == false -; run: %fcmp_ord_f32(-NaN, +NaN) == false +; run: %fcmp_ord_f32(+NaN, +NaN) == 0 +; run: %fcmp_ord_f32(-NaN, -NaN) == 0 +; run: %fcmp_ord_f32(+NaN, -NaN) == 0 +; run: %fcmp_ord_f32(-NaN, +NaN) == 0 -; run: %fcmp_ord_f32(+NaN, -0x1.0) == false -; run: %fcmp_ord_f32(-NaN, -0x1.0) == false -; run: %fcmp_ord_f32(+NaN, 0x1.0) == false -; run: %fcmp_ord_f32(-NaN, 0x1.0) == false -; run: %fcmp_ord_f32(+NaN, -0x0.0) == false -; run: %fcmp_ord_f32(-NaN, -0x0.0) == false -; run: %fcmp_ord_f32(+NaN, 0x0.0) == false -; run: %fcmp_ord_f32(-NaN, 0x0.0) == false -; run: %fcmp_ord_f32(+NaN, -Inf) == false -; run: %fcmp_ord_f32(-NaN, -Inf) == false -; run: %fcmp_ord_f32(+NaN, Inf) == false -; run: %fcmp_ord_f32(-NaN, Inf) == false -; run: %fcmp_ord_f32(-0x0.0, +NaN) == false -; run: %fcmp_ord_f32(-0x0.0, -NaN) == false -; run: %fcmp_ord_f32(0x0.0, +NaN) == false -; run: %fcmp_ord_f32(0x0.0, -NaN) == false -; run: %fcmp_ord_f32(-Inf, +NaN) == false -; run: %fcmp_ord_f32(-Inf, -NaN) == false -; run: %fcmp_ord_f32(Inf, +NaN) == false -; run: %fcmp_ord_f32(Inf, -NaN) == false +; run: %fcmp_ord_f32(+NaN, -0x1.0) == 0 +; run: %fcmp_ord_f32(-NaN, -0x1.0) == 0 +; run: %fcmp_ord_f32(+NaN, 0x1.0) == 0 +; run: %fcmp_ord_f32(-NaN, 0x1.0) == 0 +; run: %fcmp_ord_f32(+NaN, -0x0.0) == 0 +; run: %fcmp_ord_f32(-NaN, -0x0.0) == 0 +; run: %fcmp_ord_f32(+NaN, 0x0.0) == 0 +; run: %fcmp_ord_f32(-NaN, 0x0.0) == 0 +; run: %fcmp_ord_f32(+NaN, -Inf) == 0 +; run: %fcmp_ord_f32(-NaN, -Inf) == 0 +; run: %fcmp_ord_f32(+NaN, Inf) == 0 +; run: %fcmp_ord_f32(-NaN, Inf) == 0 +; run: %fcmp_ord_f32(-0x0.0, +NaN) == 0 +; run: %fcmp_ord_f32(-0x0.0, -NaN) == 0 +; run: %fcmp_ord_f32(0x0.0, +NaN) == 0 +; run: %fcmp_ord_f32(0x0.0, -NaN) == 0 +; run: %fcmp_ord_f32(-Inf, +NaN) == 0 +; run: %fcmp_ord_f32(-Inf, -NaN) == 0 +; run: %fcmp_ord_f32(Inf, +NaN) == 0 +; run: %fcmp_ord_f32(Inf, -NaN) == 0 -; run: %fcmp_ord_f32(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f32(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f32(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f32(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f32(+NaN:0x1, +NaN) == false -; run: %fcmp_ord_f32(+NaN:0x1, -NaN) == false -; run: %fcmp_ord_f32(-NaN:0x1, -NaN) == false -; run: %fcmp_ord_f32(-NaN:0x1, +NaN) == false +; run: %fcmp_ord_f32(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f32(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f32(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f32(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f32(+NaN:0x1, +NaN) == 0 +; run: %fcmp_ord_f32(+NaN:0x1, -NaN) == 0 +; run: %fcmp_ord_f32(-NaN:0x1, -NaN) == 0 +; run: %fcmp_ord_f32(-NaN:0x1, +NaN) == 0 -; run: %fcmp_ord_f32(+NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_ord_f32(-NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_ord_f32(+NaN:0x80001, -NaN:0x80001) == false -; run: %fcmp_ord_f32(-NaN:0x80001, +NaN:0x80001) == false -; run: %fcmp_ord_f32(+NaN:0x80001, +NaN) == false -; run: %fcmp_ord_f32(+NaN:0x80001, -NaN) == false -; run: %fcmp_ord_f32(-NaN:0x80001, -NaN) == false -; run: %fcmp_ord_f32(-NaN:0x80001, +NaN) == false +; run: %fcmp_ord_f32(+NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_ord_f32(-NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_ord_f32(+NaN:0x80001, -NaN:0x80001) == 0 +; run: %fcmp_ord_f32(-NaN:0x80001, +NaN:0x80001) == 0 +; run: %fcmp_ord_f32(+NaN:0x80001, +NaN) == 0 +; run: %fcmp_ord_f32(+NaN:0x80001, -NaN) == 0 +; run: %fcmp_ord_f32(-NaN:0x80001, -NaN) == 0 +; run: %fcmp_ord_f32(-NaN:0x80001, +NaN) == 0 ; sNaN's -; run: %fcmp_ord_f32(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ord_f32(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ord_f32(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ord_f32(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_ord_f32(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_ord_f32(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_ord_f32(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_ord_f32(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_ord_f32(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_ord_f32(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_ord_f32(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_ord_f32(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_ord_f32(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_ord_f32(+sNaN:0x1, -Inf) == false -; run: %fcmp_ord_f32(-sNaN:0x1, -Inf) == false -; run: %fcmp_ord_f32(+sNaN:0x1, Inf) == false -; run: %fcmp_ord_f32(-sNaN:0x1, Inf) == false -; run: %fcmp_ord_f32(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_ord_f32(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_ord_f32(0x0.0, +sNaN:0x1) == false -; run: %fcmp_ord_f32(0x0.0, -sNaN:0x1) == false -; run: %fcmp_ord_f32(-Inf, +sNaN:0x1) == false -; run: %fcmp_ord_f32(-Inf, -sNaN:0x1) == false -; run: %fcmp_ord_f32(Inf, +sNaN:0x1) == false -; run: %fcmp_ord_f32(Inf, -sNaN:0x1) == false +; run: %fcmp_ord_f32(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, Inf) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, Inf) == 0 +; run: %fcmp_ord_f32(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(Inf, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(Inf, -sNaN:0x1) == 0 -; run: %fcmp_ord_f32(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f32(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f32(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f32(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f32(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ord_f32(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ord_f32(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ord_f32(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_ord_f32(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f32(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f32(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f32(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_ord_f32(+sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_ord_f32(-sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_ord_f32(+sNaN:0x80001, -sNaN:0x80001) == false -; run: %fcmp_ord_f32(-sNaN:0x80001, +sNaN:0x80001) == false -; run: %fcmp_ord_f32(+sNaN:0x80001, +sNaN:0x1) == false -; run: %fcmp_ord_f32(+sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_ord_f32(-sNaN:0x80001, -sNaN:0x1) == false -; run: %fcmp_ord_f32(-sNaN:0x80001, +sNaN:0x1) == false +; run: %fcmp_ord_f32(+sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_ord_f32(-sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_ord_f32(+sNaN:0x80001, -sNaN:0x80001) == 0 +; run: %fcmp_ord_f32(-sNaN:0x80001, +sNaN:0x80001) == 0 +; run: %fcmp_ord_f32(+sNaN:0x80001, +sNaN:0x1) == 0 +; run: %fcmp_ord_f32(+sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-sNaN:0x80001, -sNaN:0x1) == 0 +; run: %fcmp_ord_f32(-sNaN:0x80001, +sNaN:0x1) == 0 -function %fcmp_ord_f64(f64, f64) -> b1 { +function %fcmp_ord_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ord v0, v1 return v2 } -; run: %fcmp_ord_f64(0x0.5, 0x0.5) == true -; run: %fcmp_ord_f64(0x1.0, 0x1.0) == true -; run: %fcmp_ord_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_ord_f64(0x1.0, -0x1.0) == true -; run: %fcmp_ord_f64(0x0.5, 0x1.0) == true -; run: %fcmp_ord_f64(0x1.5, 0x2.9) == true -; run: %fcmp_ord_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ord_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_ord_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_ord_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_ord_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_ord_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_ord_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_ord_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_ord_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_ord_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_ord_f64(0x0.5, -0x1.0) == true -; run: %fcmp_ord_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_ord_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_ord_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_ord_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ord_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_ord_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_ord_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_ord_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ord_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_ord_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_ord_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_ord_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ord_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ord_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_ord_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_ord_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_ord_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_ord_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_ord_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_ord_f64(0x0.0, 0x0.0) == true -; run: %fcmp_ord_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_ord_f64(0x0.0, -0x0.0) == true -; run: %fcmp_ord_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_ord_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_ord_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ord_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_ord_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ord_f64(Inf, Inf) == true -; run: %fcmp_ord_f64(-Inf, -Inf) == true -; run: %fcmp_ord_f64(Inf, -Inf) == true -; run: %fcmp_ord_f64(-Inf, Inf) == true +; run: %fcmp_ord_f64(Inf, Inf) == 1 +; run: %fcmp_ord_f64(-Inf, -Inf) == 1 +; run: %fcmp_ord_f64(Inf, -Inf) == 1 +; run: %fcmp_ord_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ord_f64(0x0.0, Inf) == true -; run: %fcmp_ord_f64(-0x0.0, Inf) == true -; run: %fcmp_ord_f64(0x0.0, -Inf) == true -; run: %fcmp_ord_f64(-0x0.0, -Inf) == true -; run: %fcmp_ord_f64(Inf, 0x0.0) == true -; run: %fcmp_ord_f64(Inf, -0x0.0) == true -; run: %fcmp_ord_f64(-Inf, 0x0.0) == true -; run: %fcmp_ord_f64(-Inf, -0x0.0) == true +; run: %fcmp_ord_f64(0x0.0, Inf) == 1 +; run: %fcmp_ord_f64(-0x0.0, Inf) == 1 +; run: %fcmp_ord_f64(0x0.0, -Inf) == 1 +; run: %fcmp_ord_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_ord_f64(Inf, 0x0.0) == 1 +; run: %fcmp_ord_f64(Inf, -0x0.0) == 1 +; run: %fcmp_ord_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_ord_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ord_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_ord_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ord_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_ord_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ord_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_ord_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_ord_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_ord_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ord_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_ord_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ord_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_ord_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_ord_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_ord_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_ord_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ord_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ord_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ord_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ord_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ord_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_ord_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ord_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_ord_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_ord_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_ord_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ord_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ord_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ord_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ord_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ord_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_ord_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ord_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_ord_f64(+NaN, +NaN) == false -; run: %fcmp_ord_f64(-NaN, -NaN) == false -; run: %fcmp_ord_f64(+NaN, -NaN) == false -; run: %fcmp_ord_f64(-NaN, +NaN) == false +; run: %fcmp_ord_f64(+NaN, +NaN) == 0 +; run: %fcmp_ord_f64(-NaN, -NaN) == 0 +; run: %fcmp_ord_f64(+NaN, -NaN) == 0 +; run: %fcmp_ord_f64(-NaN, +NaN) == 0 -; run: %fcmp_ord_f64(+NaN, -0x1.0) == false -; run: %fcmp_ord_f64(-NaN, -0x1.0) == false -; run: %fcmp_ord_f64(+NaN, 0x1.0) == false -; run: %fcmp_ord_f64(-NaN, 0x1.0) == false -; run: %fcmp_ord_f64(+NaN, -0x0.0) == false -; run: %fcmp_ord_f64(-NaN, -0x0.0) == false -; run: %fcmp_ord_f64(+NaN, 0x0.0) == false -; run: %fcmp_ord_f64(-NaN, 0x0.0) == false -; run: %fcmp_ord_f64(+NaN, -Inf) == false -; run: %fcmp_ord_f64(-NaN, -Inf) == false -; run: %fcmp_ord_f64(+NaN, Inf) == false -; run: %fcmp_ord_f64(-NaN, Inf) == false -; run: %fcmp_ord_f64(-0x0.0, +NaN) == false -; run: %fcmp_ord_f64(-0x0.0, -NaN) == false -; run: %fcmp_ord_f64(0x0.0, +NaN) == false -; run: %fcmp_ord_f64(0x0.0, -NaN) == false -; run: %fcmp_ord_f64(-Inf, +NaN) == false -; run: %fcmp_ord_f64(-Inf, -NaN) == false -; run: %fcmp_ord_f64(Inf, +NaN) == false -; run: %fcmp_ord_f64(Inf, -NaN) == false +; run: %fcmp_ord_f64(+NaN, -0x1.0) == 0 +; run: %fcmp_ord_f64(-NaN, -0x1.0) == 0 +; run: %fcmp_ord_f64(+NaN, 0x1.0) == 0 +; run: %fcmp_ord_f64(-NaN, 0x1.0) == 0 +; run: %fcmp_ord_f64(+NaN, -0x0.0) == 0 +; run: %fcmp_ord_f64(-NaN, -0x0.0) == 0 +; run: %fcmp_ord_f64(+NaN, 0x0.0) == 0 +; run: %fcmp_ord_f64(-NaN, 0x0.0) == 0 +; run: %fcmp_ord_f64(+NaN, -Inf) == 0 +; run: %fcmp_ord_f64(-NaN, -Inf) == 0 +; run: %fcmp_ord_f64(+NaN, Inf) == 0 +; run: %fcmp_ord_f64(-NaN, Inf) == 0 +; run: %fcmp_ord_f64(-0x0.0, +NaN) == 0 +; run: %fcmp_ord_f64(-0x0.0, -NaN) == 0 +; run: %fcmp_ord_f64(0x0.0, +NaN) == 0 +; run: %fcmp_ord_f64(0x0.0, -NaN) == 0 +; run: %fcmp_ord_f64(-Inf, +NaN) == 0 +; run: %fcmp_ord_f64(-Inf, -NaN) == 0 +; run: %fcmp_ord_f64(Inf, +NaN) == 0 +; run: %fcmp_ord_f64(Inf, -NaN) == 0 -; run: %fcmp_ord_f64(+NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f64(-NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f64(+NaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f64(-NaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f64(+NaN:0x1, +NaN) == false -; run: %fcmp_ord_f64(+NaN:0x1, -NaN) == false -; run: %fcmp_ord_f64(-NaN:0x1, -NaN) == false -; run: %fcmp_ord_f64(-NaN:0x1, +NaN) == false +; run: %fcmp_ord_f64(+NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f64(-NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f64(+NaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f64(-NaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f64(+NaN:0x1, +NaN) == 0 +; run: %fcmp_ord_f64(+NaN:0x1, -NaN) == 0 +; run: %fcmp_ord_f64(-NaN:0x1, -NaN) == 0 +; run: %fcmp_ord_f64(-NaN:0x1, +NaN) == 0 -; run: %fcmp_ord_f64(+NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_ord_f64(-NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_ord_f64(+NaN:0x800000000001, -NaN:0x800000000001) == false -; run: %fcmp_ord_f64(-NaN:0x800000000001, +NaN:0x800000000001) == false -; run: %fcmp_ord_f64(+NaN:0x800000000001, +NaN) == false -; run: %fcmp_ord_f64(+NaN:0x800000000001, -NaN) == false -; run: %fcmp_ord_f64(-NaN:0x800000000001, -NaN) == false -; run: %fcmp_ord_f64(-NaN:0x800000000001, +NaN) == false +; run: %fcmp_ord_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(+NaN:0x800000000001, +NaN) == 0 +; run: %fcmp_ord_f64(+NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_ord_f64(-NaN:0x800000000001, -NaN) == 0 +; run: %fcmp_ord_f64(-NaN:0x800000000001, +NaN) == 0 ; sNaN's -; run: %fcmp_ord_f64(+sNaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ord_f64(-sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ord_f64(+sNaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ord_f64(-sNaN:0x1, +sNaN:0x1) == false +; run: %fcmp_ord_f64(+sNaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, +sNaN:0x1) == 0 -; run: %fcmp_ord_f64(+sNaN:0x1, -0x1.0) == false -; run: %fcmp_ord_f64(-sNaN:0x1, -0x1.0) == false -; run: %fcmp_ord_f64(+sNaN:0x1, 0x1.0) == false -; run: %fcmp_ord_f64(-sNaN:0x1, 0x1.0) == false -; run: %fcmp_ord_f64(+sNaN:0x1, -0x0.0) == false -; run: %fcmp_ord_f64(-sNaN:0x1, -0x0.0) == false -; run: %fcmp_ord_f64(+sNaN:0x1, 0x0.0) == false -; run: %fcmp_ord_f64(-sNaN:0x1, 0x0.0) == false -; run: %fcmp_ord_f64(+sNaN:0x1, -Inf) == false -; run: %fcmp_ord_f64(-sNaN:0x1, -Inf) == false -; run: %fcmp_ord_f64(+sNaN:0x1, Inf) == false -; run: %fcmp_ord_f64(-sNaN:0x1, Inf) == false -; run: %fcmp_ord_f64(-0x0.0, +sNaN:0x1) == false -; run: %fcmp_ord_f64(-0x0.0, -sNaN:0x1) == false -; run: %fcmp_ord_f64(0x0.0, +sNaN:0x1) == false -; run: %fcmp_ord_f64(0x0.0, -sNaN:0x1) == false -; run: %fcmp_ord_f64(-Inf, +sNaN:0x1) == false -; run: %fcmp_ord_f64(-Inf, -sNaN:0x1) == false -; run: %fcmp_ord_f64(Inf, +sNaN:0x1) == false -; run: %fcmp_ord_f64(Inf, -sNaN:0x1) == false +; run: %fcmp_ord_f64(+sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, -0x1.0) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, 0x1.0) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, -0x0.0) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, 0x0.0) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, -Inf) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, -Inf) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, Inf) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, Inf) == 0 +; run: %fcmp_ord_f64(-0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(0x0.0, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(0x0.0, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-Inf, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-Inf, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(Inf, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(Inf, -sNaN:0x1) == 0 -; run: %fcmp_ord_f64(+sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f64(-sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f64(+sNaN:0x1, -NaN:0x1) == false -; run: %fcmp_ord_f64(-sNaN:0x1, +NaN:0x1) == false -; run: %fcmp_ord_f64(+NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ord_f64(-NaN:0x1, -sNaN:0x1) == false -; run: %fcmp_ord_f64(-NaN:0x1, +sNaN:0x1) == false -; run: %fcmp_ord_f64(+NaN:0x1, -sNaN:0x1) == false +; run: %fcmp_ord_f64(+sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f64(+sNaN:0x1, -NaN:0x1) == 0 +; run: %fcmp_ord_f64(-sNaN:0x1, +NaN:0x1) == 0 +; run: %fcmp_ord_f64(+NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-NaN:0x1, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-NaN:0x1, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(+NaN:0x1, -sNaN:0x1) == 0 -; run: %fcmp_ord_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_ord_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_ord_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == false -; run: %fcmp_ord_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == false -; run: %fcmp_ord_f64(+sNaN:0x800000000001, +sNaN:0x1) == false -; run: %fcmp_ord_f64(+sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_ord_f64(-sNaN:0x800000000001, -sNaN:0x1) == false -; run: %fcmp_ord_f64(-sNaN:0x800000000001, +sNaN:0x1) == false +; run: %fcmp_ord_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 0 +; run: %fcmp_ord_f64(+sNaN:0x800000000001, +sNaN:0x1) == 0 +; run: %fcmp_ord_f64(+sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-sNaN:0x800000000001, -sNaN:0x1) == 0 +; run: %fcmp_ord_f64(-sNaN:0x800000000001, +sNaN:0x1) == 0 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ueq.clif b/cranelift/filetests/filetests/runtests/fcmp-ueq.clif index f20aae820229..665f1a705aa0 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ueq.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ueq.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_ueq_f32(f32, f32) -> b1 { +function %fcmp_ueq_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ueq v0, v1 return v2 } -; run: %fcmp_ueq_f32(0x0.5, 0x0.5) == true -; run: %fcmp_ueq_f32(0x1.0, 0x1.0) == true -; run: %fcmp_ueq_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_ueq_f32(0x1.0, -0x1.0) == false -; run: %fcmp_ueq_f32(0x0.5, 0x1.0) == false -; run: %fcmp_ueq_f32(0x1.5, 0x2.9) == false -; run: %fcmp_ueq_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_ueq_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_ueq_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_ueq_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_ueq_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_ueq_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_ueq_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_ueq_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_ueq_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_ueq_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_ueq_f32(0x0.5, -0x1.0) == false -; run: %fcmp_ueq_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_ueq_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_ueq_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_ueq_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_ueq_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_ueq_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_ueq_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_ueq_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_ueq_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_ueq_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_ueq_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_ueq_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_ueq_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_ueq_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_ueq_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_ueq_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_ueq_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_ueq_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_ueq_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_ueq_f32(0x0.0, 0x0.0) == true -; run: %fcmp_ueq_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_ueq_f32(0x0.0, -0x0.0) == true -; run: %fcmp_ueq_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_ueq_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_ueq_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ueq_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_ueq_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ueq_f32(Inf, Inf) == true -; run: %fcmp_ueq_f32(-Inf, -Inf) == true -; run: %fcmp_ueq_f32(Inf, -Inf) == false -; run: %fcmp_ueq_f32(-Inf, Inf) == false +; run: %fcmp_ueq_f32(Inf, Inf) == 1 +; run: %fcmp_ueq_f32(-Inf, -Inf) == 1 +; run: %fcmp_ueq_f32(Inf, -Inf) == 0 +; run: %fcmp_ueq_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_ueq_f32(0x0.0, Inf) == false -; run: %fcmp_ueq_f32(-0x0.0, Inf) == false -; run: %fcmp_ueq_f32(0x0.0, -Inf) == false -; run: %fcmp_ueq_f32(-0x0.0, -Inf) == false -; run: %fcmp_ueq_f32(Inf, 0x0.0) == false -; run: %fcmp_ueq_f32(Inf, -0x0.0) == false -; run: %fcmp_ueq_f32(-Inf, 0x0.0) == false -; run: %fcmp_ueq_f32(-Inf, -0x0.0) == false +; run: %fcmp_ueq_f32(0x0.0, Inf) == 0 +; run: %fcmp_ueq_f32(-0x0.0, Inf) == 0 +; run: %fcmp_ueq_f32(0x0.0, -Inf) == 0 +; run: %fcmp_ueq_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_ueq_f32(Inf, 0x0.0) == 0 +; run: %fcmp_ueq_f32(Inf, -0x0.0) == 0 +; run: %fcmp_ueq_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_ueq_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_ueq_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_ueq_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_ueq_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_ueq_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_ueq_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_ueq_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_ueq_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_ueq_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_ueq_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_ueq_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_ueq_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_ueq_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_ueq_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_ueq_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_ueq_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_ueq_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_ueq_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_ueq_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_ueq_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_ueq_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_ueq_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_ueq_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_ueq_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_ueq_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_ueq_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_ueq_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_ueq_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_ueq_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_ueq_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_ueq_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_ueq_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_ueq_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_ueq_f32(+NaN, +NaN) == true -; run: %fcmp_ueq_f32(-NaN, -NaN) == true -; run: %fcmp_ueq_f32(+NaN, -NaN) == true -; run: %fcmp_ueq_f32(-NaN, +NaN) == true +; run: %fcmp_ueq_f32(+NaN, +NaN) == 1 +; run: %fcmp_ueq_f32(-NaN, -NaN) == 1 +; run: %fcmp_ueq_f32(+NaN, -NaN) == 1 +; run: %fcmp_ueq_f32(-NaN, +NaN) == 1 -; run: %fcmp_ueq_f32(+NaN, -0x1.0) == true -; run: %fcmp_ueq_f32(-NaN, -0x1.0) == true -; run: %fcmp_ueq_f32(+NaN, 0x1.0) == true -; run: %fcmp_ueq_f32(-NaN, 0x1.0) == true -; run: %fcmp_ueq_f32(+NaN, -0x0.0) == true -; run: %fcmp_ueq_f32(-NaN, -0x0.0) == true -; run: %fcmp_ueq_f32(+NaN, 0x0.0) == true -; run: %fcmp_ueq_f32(-NaN, 0x0.0) == true -; run: %fcmp_ueq_f32(+NaN, -Inf) == true -; run: %fcmp_ueq_f32(-NaN, -Inf) == true -; run: %fcmp_ueq_f32(+NaN, Inf) == true -; run: %fcmp_ueq_f32(-NaN, Inf) == true -; run: %fcmp_ueq_f32(-0x0.0, +NaN) == true -; run: %fcmp_ueq_f32(-0x0.0, -NaN) == true -; run: %fcmp_ueq_f32(0x0.0, +NaN) == true -; run: %fcmp_ueq_f32(0x0.0, -NaN) == true -; run: %fcmp_ueq_f32(-Inf, +NaN) == true -; run: %fcmp_ueq_f32(-Inf, -NaN) == true -; run: %fcmp_ueq_f32(Inf, +NaN) == true -; run: %fcmp_ueq_f32(Inf, -NaN) == true +; run: %fcmp_ueq_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_ueq_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_ueq_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_ueq_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_ueq_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_ueq_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_ueq_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_ueq_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_ueq_f32(+NaN, -Inf) == 1 +; run: %fcmp_ueq_f32(-NaN, -Inf) == 1 +; run: %fcmp_ueq_f32(+NaN, Inf) == 1 +; run: %fcmp_ueq_f32(-NaN, Inf) == 1 +; run: %fcmp_ueq_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_ueq_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_ueq_f32(0x0.0, +NaN) == 1 +; run: %fcmp_ueq_f32(0x0.0, -NaN) == 1 +; run: %fcmp_ueq_f32(-Inf, +NaN) == 1 +; run: %fcmp_ueq_f32(-Inf, -NaN) == 1 +; run: %fcmp_ueq_f32(Inf, +NaN) == 1 +; run: %fcmp_ueq_f32(Inf, -NaN) == 1 -; run: %fcmp_ueq_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_ueq_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_ueq_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_ueq_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_ueq_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ueq_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ueq_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ueq_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ueq_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ueq_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ueq_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ueq_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ueq_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_ueq_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_ueq_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_ueq_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_ueq_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ueq_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ueq_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ueq_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ueq_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_ueq_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_ueq_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_ueq_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_ueq_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ueq_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ueq_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_ueq_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_ueq_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ueq_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ueq_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ueq_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ueq_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ueq_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ueq_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ueq_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ueq_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_ueq_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ueq_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_ueq_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_ueq_f64(f64, f64) -> b1 { +function %fcmp_ueq_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ueq v0, v1 return v2 } -; run: %fcmp_ueq_f64(0x0.5, 0x0.5) == true -; run: %fcmp_ueq_f64(0x1.0, 0x1.0) == true -; run: %fcmp_ueq_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_ueq_f64(0x1.0, -0x1.0) == false -; run: %fcmp_ueq_f64(0x0.5, 0x1.0) == false -; run: %fcmp_ueq_f64(0x1.5, 0x2.9) == false -; run: %fcmp_ueq_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_ueq_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_ueq_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_ueq_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_ueq_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_ueq_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_ueq_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_ueq_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_ueq_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_ueq_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_ueq_f64(0x0.5, -0x1.0) == false -; run: %fcmp_ueq_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_ueq_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_ueq_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_ueq_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_ueq_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_ueq_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_ueq_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_ueq_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_ueq_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_ueq_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_ueq_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_ueq_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_ueq_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_ueq_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_ueq_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_ueq_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_ueq_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_ueq_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_ueq_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_ueq_f64(0x0.0, 0x0.0) == true -; run: %fcmp_ueq_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_ueq_f64(0x0.0, -0x0.0) == true -; run: %fcmp_ueq_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_ueq_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_ueq_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ueq_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_ueq_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ueq_f64(Inf, Inf) == true -; run: %fcmp_ueq_f64(-Inf, -Inf) == true -; run: %fcmp_ueq_f64(Inf, -Inf) == false -; run: %fcmp_ueq_f64(-Inf, Inf) == false +; run: %fcmp_ueq_f64(Inf, Inf) == 1 +; run: %fcmp_ueq_f64(-Inf, -Inf) == 1 +; run: %fcmp_ueq_f64(Inf, -Inf) == 0 +; run: %fcmp_ueq_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_ueq_f64(0x0.0, Inf) == false -; run: %fcmp_ueq_f64(-0x0.0, Inf) == false -; run: %fcmp_ueq_f64(0x0.0, -Inf) == false -; run: %fcmp_ueq_f64(-0x0.0, -Inf) == false -; run: %fcmp_ueq_f64(Inf, 0x0.0) == false -; run: %fcmp_ueq_f64(Inf, -0x0.0) == false -; run: %fcmp_ueq_f64(-Inf, 0x0.0) == false -; run: %fcmp_ueq_f64(-Inf, -0x0.0) == false +; run: %fcmp_ueq_f64(0x0.0, Inf) == 0 +; run: %fcmp_ueq_f64(-0x0.0, Inf) == 0 +; run: %fcmp_ueq_f64(0x0.0, -Inf) == 0 +; run: %fcmp_ueq_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_ueq_f64(Inf, 0x0.0) == 0 +; run: %fcmp_ueq_f64(Inf, -0x0.0) == 0 +; run: %fcmp_ueq_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_ueq_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_ueq_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_ueq_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ueq_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_ueq_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_ueq_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_ueq_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_ueq_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_ueq_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ueq_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_ueq_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_ueq_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_ueq_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_ueq_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_ueq_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_ueq_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_ueq_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_ueq_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_ueq_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_ueq_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_ueq_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_ueq_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_ueq_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_ueq_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_ueq_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_ueq_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_ueq_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_ueq_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_ueq_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_ueq_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_ueq_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_ueq_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_ueq_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_ueq_f64(+NaN, +NaN) == true -; run: %fcmp_ueq_f64(-NaN, -NaN) == true -; run: %fcmp_ueq_f64(+NaN, -NaN) == true -; run: %fcmp_ueq_f64(-NaN, +NaN) == true +; run: %fcmp_ueq_f64(+NaN, +NaN) == 1 +; run: %fcmp_ueq_f64(-NaN, -NaN) == 1 +; run: %fcmp_ueq_f64(+NaN, -NaN) == 1 +; run: %fcmp_ueq_f64(-NaN, +NaN) == 1 -; run: %fcmp_ueq_f64(+NaN, -0x1.0) == true -; run: %fcmp_ueq_f64(-NaN, -0x1.0) == true -; run: %fcmp_ueq_f64(+NaN, 0x1.0) == true -; run: %fcmp_ueq_f64(-NaN, 0x1.0) == true -; run: %fcmp_ueq_f64(+NaN, -0x0.0) == true -; run: %fcmp_ueq_f64(-NaN, -0x0.0) == true -; run: %fcmp_ueq_f64(+NaN, 0x0.0) == true -; run: %fcmp_ueq_f64(-NaN, 0x0.0) == true -; run: %fcmp_ueq_f64(+NaN, -Inf) == true -; run: %fcmp_ueq_f64(-NaN, -Inf) == true -; run: %fcmp_ueq_f64(+NaN, Inf) == true -; run: %fcmp_ueq_f64(-NaN, Inf) == true -; run: %fcmp_ueq_f64(-0x0.0, +NaN) == true -; run: %fcmp_ueq_f64(-0x0.0, -NaN) == true -; run: %fcmp_ueq_f64(0x0.0, +NaN) == true -; run: %fcmp_ueq_f64(0x0.0, -NaN) == true -; run: %fcmp_ueq_f64(-Inf, +NaN) == true -; run: %fcmp_ueq_f64(-Inf, -NaN) == true -; run: %fcmp_ueq_f64(Inf, +NaN) == true -; run: %fcmp_ueq_f64(Inf, -NaN) == true +; run: %fcmp_ueq_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_ueq_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_ueq_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_ueq_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_ueq_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_ueq_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_ueq_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_ueq_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_ueq_f64(+NaN, -Inf) == 1 +; run: %fcmp_ueq_f64(-NaN, -Inf) == 1 +; run: %fcmp_ueq_f64(+NaN, Inf) == 1 +; run: %fcmp_ueq_f64(-NaN, Inf) == 1 +; run: %fcmp_ueq_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_ueq_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_ueq_f64(0x0.0, +NaN) == 1 +; run: %fcmp_ueq_f64(0x0.0, -NaN) == 1 +; run: %fcmp_ueq_f64(-Inf, +NaN) == 1 +; run: %fcmp_ueq_f64(-Inf, -NaN) == 1 +; run: %fcmp_ueq_f64(Inf, +NaN) == 1 +; run: %fcmp_ueq_f64(Inf, -NaN) == 1 -; run: %fcmp_ueq_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_ueq_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_ueq_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_ueq_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_ueq_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ueq_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ueq_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ueq_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ueq_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ueq_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ueq_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ueq_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ueq_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_ueq_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_ueq_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_ueq_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_ueq_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_ueq_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ueq_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ueq_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_ueq_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ueq_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ueq_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_ueq_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_ueq_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ueq_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ueq_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ueq_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ueq_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ueq_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ueq_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ueq_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ueq_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ueq_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ueq_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ueq_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_ueq_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ueq_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_ueq_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ueq_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-uge.clif b/cranelift/filetests/filetests/runtests/fcmp-uge.clif index d5d6e7398ec7..f5012b71e38a 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-uge.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-uge.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_uge_f32(f32, f32) -> b1 { +function %fcmp_uge_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp uge v0, v1 return v2 } -; run: %fcmp_uge_f32(0x0.5, 0x0.5) == true -; run: %fcmp_uge_f32(0x1.0, 0x1.0) == true -; run: %fcmp_uge_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_uge_f32(0x1.0, -0x1.0) == true -; run: %fcmp_uge_f32(0x0.5, 0x1.0) == false -; run: %fcmp_uge_f32(0x1.5, 0x2.9) == false -; run: %fcmp_uge_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_uge_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_uge_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_uge_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_uge_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_uge_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_uge_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_uge_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_uge_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_uge_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_uge_f32(0x0.5, -0x1.0) == true -; run: %fcmp_uge_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_uge_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_uge_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_uge_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_uge_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_uge_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_uge_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_uge_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_uge_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_uge_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_uge_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_uge_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_uge_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_uge_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_uge_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_uge_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_uge_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_uge_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_uge_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_uge_f32(0x0.0, 0x0.0) == true -; run: %fcmp_uge_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_uge_f32(0x0.0, -0x0.0) == true -; run: %fcmp_uge_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_uge_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_uge_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_uge_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_uge_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_uge_f32(Inf, Inf) == true -; run: %fcmp_uge_f32(-Inf, -Inf) == true -; run: %fcmp_uge_f32(Inf, -Inf) == true -; run: %fcmp_uge_f32(-Inf, Inf) == false +; run: %fcmp_uge_f32(Inf, Inf) == 1 +; run: %fcmp_uge_f32(-Inf, -Inf) == 1 +; run: %fcmp_uge_f32(Inf, -Inf) == 1 +; run: %fcmp_uge_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_uge_f32(0x0.0, Inf) == false -; run: %fcmp_uge_f32(-0x0.0, Inf) == false -; run: %fcmp_uge_f32(0x0.0, -Inf) == true -; run: %fcmp_uge_f32(-0x0.0, -Inf) == true -; run: %fcmp_uge_f32(Inf, 0x0.0) == true -; run: %fcmp_uge_f32(Inf, -0x0.0) == true -; run: %fcmp_uge_f32(-Inf, 0x0.0) == false -; run: %fcmp_uge_f32(-Inf, -0x0.0) == false +; run: %fcmp_uge_f32(0x0.0, Inf) == 0 +; run: %fcmp_uge_f32(-0x0.0, Inf) == 0 +; run: %fcmp_uge_f32(0x0.0, -Inf) == 1 +; run: %fcmp_uge_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_uge_f32(Inf, 0x0.0) == 1 +; run: %fcmp_uge_f32(Inf, -0x0.0) == 1 +; run: %fcmp_uge_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_uge_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_uge_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_uge_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_uge_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_uge_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_uge_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_uge_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_uge_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_uge_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_uge_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_uge_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_uge_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_uge_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_uge_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_uge_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_uge_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_uge_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_uge_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_uge_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_uge_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_uge_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_uge_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_uge_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_uge_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_uge_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_uge_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_uge_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_uge_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_uge_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_uge_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_uge_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_uge_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_uge_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_uge_f32(+NaN, +NaN) == true -; run: %fcmp_uge_f32(-NaN, -NaN) == true -; run: %fcmp_uge_f32(+NaN, -NaN) == true -; run: %fcmp_uge_f32(-NaN, +NaN) == true +; run: %fcmp_uge_f32(+NaN, +NaN) == 1 +; run: %fcmp_uge_f32(-NaN, -NaN) == 1 +; run: %fcmp_uge_f32(+NaN, -NaN) == 1 +; run: %fcmp_uge_f32(-NaN, +NaN) == 1 -; run: %fcmp_uge_f32(+NaN, -0x1.0) == true -; run: %fcmp_uge_f32(-NaN, -0x1.0) == true -; run: %fcmp_uge_f32(+NaN, 0x1.0) == true -; run: %fcmp_uge_f32(-NaN, 0x1.0) == true -; run: %fcmp_uge_f32(+NaN, -0x0.0) == true -; run: %fcmp_uge_f32(-NaN, -0x0.0) == true -; run: %fcmp_uge_f32(+NaN, 0x0.0) == true -; run: %fcmp_uge_f32(-NaN, 0x0.0) == true -; run: %fcmp_uge_f32(+NaN, -Inf) == true -; run: %fcmp_uge_f32(-NaN, -Inf) == true -; run: %fcmp_uge_f32(+NaN, Inf) == true -; run: %fcmp_uge_f32(-NaN, Inf) == true -; run: %fcmp_uge_f32(-0x0.0, +NaN) == true -; run: %fcmp_uge_f32(-0x0.0, -NaN) == true -; run: %fcmp_uge_f32(0x0.0, +NaN) == true -; run: %fcmp_uge_f32(0x0.0, -NaN) == true -; run: %fcmp_uge_f32(-Inf, +NaN) == true -; run: %fcmp_uge_f32(-Inf, -NaN) == true -; run: %fcmp_uge_f32(Inf, +NaN) == true -; run: %fcmp_uge_f32(Inf, -NaN) == true +; run: %fcmp_uge_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_uge_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_uge_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_uge_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_uge_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_uge_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_uge_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_uge_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_uge_f32(+NaN, -Inf) == 1 +; run: %fcmp_uge_f32(-NaN, -Inf) == 1 +; run: %fcmp_uge_f32(+NaN, Inf) == 1 +; run: %fcmp_uge_f32(-NaN, Inf) == 1 +; run: %fcmp_uge_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_uge_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_uge_f32(0x0.0, +NaN) == 1 +; run: %fcmp_uge_f32(0x0.0, -NaN) == 1 +; run: %fcmp_uge_f32(-Inf, +NaN) == 1 +; run: %fcmp_uge_f32(-Inf, -NaN) == 1 +; run: %fcmp_uge_f32(Inf, +NaN) == 1 +; run: %fcmp_uge_f32(Inf, -NaN) == 1 -; run: %fcmp_uge_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_uge_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_uge_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_uge_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_uge_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_uge_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_uge_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_uge_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_uge_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_uge_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_uge_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_uge_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_uge_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_uge_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_uge_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_uge_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_uge_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_uge_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_uge_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_uge_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_uge_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_uge_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_uge_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_uge_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_uge_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uge_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uge_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uge_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_uge_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_uge_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_uge_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_uge_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_uge_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_uge_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_uge_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_uge_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_uge_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_uge_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_uge_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_uge_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_uge_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_uge_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_uge_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_uge_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_uge_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_uge_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_uge_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_uge_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_uge_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_uge_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_uge_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_uge_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uge_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uge_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uge_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_uge_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_uge_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_uge_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_uge_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_uge_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_uge_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_uge_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_uge_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_uge_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_uge_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_uge_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_uge_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_uge_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_uge_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_uge_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_uge_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_uge_f64(f64, f64) -> b1 { +function %fcmp_uge_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp uge v0, v1 return v2 } -; run: %fcmp_uge_f64(0x0.5, 0x0.5) == true -; run: %fcmp_uge_f64(0x1.0, 0x1.0) == true -; run: %fcmp_uge_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_uge_f64(0x1.0, -0x1.0) == true -; run: %fcmp_uge_f64(0x0.5, 0x1.0) == false -; run: %fcmp_uge_f64(0x1.5, 0x2.9) == false -; run: %fcmp_uge_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_uge_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_uge_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_uge_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_uge_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_uge_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_uge_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_uge_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_uge_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_uge_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_uge_f64(0x0.5, -0x1.0) == true -; run: %fcmp_uge_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_uge_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_uge_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_uge_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_uge_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_uge_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_uge_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_uge_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_uge_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_uge_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_uge_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_uge_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_uge_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_uge_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_uge_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_uge_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_uge_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_uge_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_uge_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_uge_f64(0x0.0, 0x0.0) == true -; run: %fcmp_uge_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_uge_f64(0x0.0, -0x0.0) == true -; run: %fcmp_uge_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_uge_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_uge_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_uge_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_uge_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_uge_f64(Inf, Inf) == true -; run: %fcmp_uge_f64(-Inf, -Inf) == true -; run: %fcmp_uge_f64(Inf, -Inf) == true -; run: %fcmp_uge_f64(-Inf, Inf) == false +; run: %fcmp_uge_f64(Inf, Inf) == 1 +; run: %fcmp_uge_f64(-Inf, -Inf) == 1 +; run: %fcmp_uge_f64(Inf, -Inf) == 1 +; run: %fcmp_uge_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_uge_f64(0x0.0, Inf) == false -; run: %fcmp_uge_f64(-0x0.0, Inf) == false -; run: %fcmp_uge_f64(0x0.0, -Inf) == true -; run: %fcmp_uge_f64(-0x0.0, -Inf) == true -; run: %fcmp_uge_f64(Inf, 0x0.0) == true -; run: %fcmp_uge_f64(Inf, -0x0.0) == true -; run: %fcmp_uge_f64(-Inf, 0x0.0) == false -; run: %fcmp_uge_f64(-Inf, -0x0.0) == false +; run: %fcmp_uge_f64(0x0.0, Inf) == 0 +; run: %fcmp_uge_f64(-0x0.0, Inf) == 0 +; run: %fcmp_uge_f64(0x0.0, -Inf) == 1 +; run: %fcmp_uge_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_uge_f64(Inf, 0x0.0) == 1 +; run: %fcmp_uge_f64(Inf, -0x0.0) == 1 +; run: %fcmp_uge_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_uge_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_uge_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_uge_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_uge_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_uge_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_uge_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_uge_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_uge_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_uge_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_uge_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_uge_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_uge_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_uge_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_uge_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_uge_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_uge_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_uge_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_uge_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_uge_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_uge_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_uge_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_uge_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_uge_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_uge_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_uge_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_uge_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_uge_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_uge_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_uge_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_uge_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_uge_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_uge_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_uge_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_uge_f64(+NaN, +NaN) == true -; run: %fcmp_uge_f64(-NaN, -NaN) == true -; run: %fcmp_uge_f64(+NaN, -NaN) == true -; run: %fcmp_uge_f64(-NaN, +NaN) == true +; run: %fcmp_uge_f64(+NaN, +NaN) == 1 +; run: %fcmp_uge_f64(-NaN, -NaN) == 1 +; run: %fcmp_uge_f64(+NaN, -NaN) == 1 +; run: %fcmp_uge_f64(-NaN, +NaN) == 1 -; run: %fcmp_uge_f64(+NaN, -0x1.0) == true -; run: %fcmp_uge_f64(-NaN, -0x1.0) == true -; run: %fcmp_uge_f64(+NaN, 0x1.0) == true -; run: %fcmp_uge_f64(-NaN, 0x1.0) == true -; run: %fcmp_uge_f64(+NaN, -0x0.0) == true -; run: %fcmp_uge_f64(-NaN, -0x0.0) == true -; run: %fcmp_uge_f64(+NaN, 0x0.0) == true -; run: %fcmp_uge_f64(-NaN, 0x0.0) == true -; run: %fcmp_uge_f64(+NaN, -Inf) == true -; run: %fcmp_uge_f64(-NaN, -Inf) == true -; run: %fcmp_uge_f64(+NaN, Inf) == true -; run: %fcmp_uge_f64(-NaN, Inf) == true -; run: %fcmp_uge_f64(-0x0.0, +NaN) == true -; run: %fcmp_uge_f64(-0x0.0, -NaN) == true -; run: %fcmp_uge_f64(0x0.0, +NaN) == true -; run: %fcmp_uge_f64(0x0.0, -NaN) == true -; run: %fcmp_uge_f64(-Inf, +NaN) == true -; run: %fcmp_uge_f64(-Inf, -NaN) == true -; run: %fcmp_uge_f64(Inf, +NaN) == true -; run: %fcmp_uge_f64(Inf, -NaN) == true +; run: %fcmp_uge_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_uge_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_uge_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_uge_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_uge_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_uge_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_uge_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_uge_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_uge_f64(+NaN, -Inf) == 1 +; run: %fcmp_uge_f64(-NaN, -Inf) == 1 +; run: %fcmp_uge_f64(+NaN, Inf) == 1 +; run: %fcmp_uge_f64(-NaN, Inf) == 1 +; run: %fcmp_uge_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_uge_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_uge_f64(0x0.0, +NaN) == 1 +; run: %fcmp_uge_f64(0x0.0, -NaN) == 1 +; run: %fcmp_uge_f64(-Inf, +NaN) == 1 +; run: %fcmp_uge_f64(-Inf, -NaN) == 1 +; run: %fcmp_uge_f64(Inf, +NaN) == 1 +; run: %fcmp_uge_f64(Inf, -NaN) == 1 -; run: %fcmp_uge_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_uge_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_uge_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_uge_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_uge_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_uge_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_uge_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_uge_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_uge_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_uge_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_uge_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_uge_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_uge_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_uge_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_uge_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_uge_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_uge_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_uge_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_uge_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_uge_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_uge_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uge_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uge_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uge_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_uge_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_uge_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_uge_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_uge_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_uge_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_uge_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_uge_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_uge_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_uge_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_uge_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_uge_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_uge_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_uge_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_uge_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_uge_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_uge_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_uge_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_uge_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_uge_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_uge_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_uge_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_uge_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_uge_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_uge_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uge_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uge_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uge_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uge_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uge_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_uge_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uge_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uge_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_uge_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_uge_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_uge_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_uge_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_uge_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_uge_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_uge_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_uge_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_uge_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_uge_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_uge_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_uge_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ugt.clif b/cranelift/filetests/filetests/runtests/fcmp-ugt.clif index af3b09dec883..100071217725 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ugt.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ugt.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_ugt_f32(f32, f32) -> b1 { +function %fcmp_ugt_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ugt v0, v1 return v2 } -; run: %fcmp_ugt_f32(0x0.5, 0x0.5) == false -; run: %fcmp_ugt_f32(0x1.0, 0x1.0) == false -; run: %fcmp_ugt_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_ugt_f32(0x1.0, -0x1.0) == true -; run: %fcmp_ugt_f32(0x0.5, 0x1.0) == false -; run: %fcmp_ugt_f32(0x1.5, 0x2.9) == false -; run: %fcmp_ugt_f32(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ugt_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_ugt_f32(0x1.b33334p0, 0x1.99999ap-2) == true -; run: %fcmp_ugt_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_ugt_f32(-0x0.5, -0x1.0) == true -; run: %fcmp_ugt_f32(-0x1.5, -0x2.9) == true -; run: %fcmp_ugt_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_ugt_f32(-0x1.99999ap-2, -0x1.4cccccp0) == true -; run: %fcmp_ugt_f32(-0x1.8p0, -0x1.b33334p0) == true -; run: %fcmp_ugt_f32(-0x1.4p1, -0x1.666666p1) == true -; run: %fcmp_ugt_f32(0x0.5, -0x1.0) == true -; run: %fcmp_ugt_f32(0x1.b33334p0, -0x1.b33334p0) == true +; run: %fcmp_ugt_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_ugt_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_ugt_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_ugt_f32(0x1.0, -0x1.0) == 1 +; run: %fcmp_ugt_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_ugt_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_ugt_f32(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ugt_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_ugt_f32(0x1.b33334p0, 0x1.99999ap-2) == 1 +; run: %fcmp_ugt_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_ugt_f32(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ugt_f32(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ugt_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_ugt_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 1 +; run: %fcmp_ugt_f32(-0x1.8p0, -0x1.b33334p0) == 1 +; run: %fcmp_ugt_f32(-0x1.4p1, -0x1.666666p1) == 1 +; run: %fcmp_ugt_f32(0x0.5, -0x1.0) == 1 +; run: %fcmp_ugt_f32(0x1.b33334p0, -0x1.b33334p0) == 1 ; Zeroes -; run: %fcmp_ugt_f32(0x0.0, 0x0.0) == false -; run: %fcmp_ugt_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_ugt_f32(0x0.0, -0x0.0) == false -; run: %fcmp_ugt_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_ugt_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_ugt_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_ugt_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_ugt_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_ugt_f32(Inf, Inf) == false -; run: %fcmp_ugt_f32(-Inf, -Inf) == false -; run: %fcmp_ugt_f32(Inf, -Inf) == true -; run: %fcmp_ugt_f32(-Inf, Inf) == false +; run: %fcmp_ugt_f32(Inf, Inf) == 0 +; run: %fcmp_ugt_f32(-Inf, -Inf) == 0 +; run: %fcmp_ugt_f32(Inf, -Inf) == 1 +; run: %fcmp_ugt_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_ugt_f32(0x0.0, Inf) == false -; run: %fcmp_ugt_f32(-0x0.0, Inf) == false -; run: %fcmp_ugt_f32(0x0.0, -Inf) == true -; run: %fcmp_ugt_f32(-0x0.0, -Inf) == true -; run: %fcmp_ugt_f32(Inf, 0x0.0) == true -; run: %fcmp_ugt_f32(Inf, -0x0.0) == true -; run: %fcmp_ugt_f32(-Inf, 0x0.0) == false -; run: %fcmp_ugt_f32(-Inf, -0x0.0) == false +; run: %fcmp_ugt_f32(0x0.0, Inf) == 0 +; run: %fcmp_ugt_f32(-0x0.0, Inf) == 0 +; run: %fcmp_ugt_f32(0x0.0, -Inf) == 1 +; run: %fcmp_ugt_f32(-0x0.0, -Inf) == 1 +; run: %fcmp_ugt_f32(Inf, 0x0.0) == 1 +; run: %fcmp_ugt_f32(Inf, -0x0.0) == 1 +; run: %fcmp_ugt_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_ugt_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_ugt_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_ugt_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_ugt_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_ugt_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_ugt_f32(0x1.0p-23, 0x1.0p-126) == true -; run: %fcmp_ugt_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_ugt_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_ugt_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_ugt_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_ugt_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_ugt_f32(0x1.0p-23, 0x1.0p-126) == 1 +; run: %fcmp_ugt_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_ugt_f32(0x0.800002p-126, -0x0.800002p-126) == true -; run: %fcmp_ugt_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_ugt_f32(0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ugt_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_ugt_f32(0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ugt_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_ugt_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_ugt_f32(0x0.0, -0x0.800002p-126) == true -; run: %fcmp_ugt_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_ugt_f32(-0x0.0, -0x0.800002p-126) == true +; run: %fcmp_ugt_f32(0x0.800002p-126, -0x0.800002p-126) == 1 +; run: %fcmp_ugt_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_ugt_f32(0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ugt_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_ugt_f32(0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ugt_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_ugt_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_ugt_f32(0x0.0, -0x0.800002p-126) == 1 +; run: %fcmp_ugt_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_ugt_f32(-0x0.0, -0x0.800002p-126) == 1 ; NaN's -; run: %fcmp_ugt_f32(+NaN, +NaN) == true -; run: %fcmp_ugt_f32(-NaN, -NaN) == true -; run: %fcmp_ugt_f32(+NaN, -NaN) == true -; run: %fcmp_ugt_f32(-NaN, +NaN) == true +; run: %fcmp_ugt_f32(+NaN, +NaN) == 1 +; run: %fcmp_ugt_f32(-NaN, -NaN) == 1 +; run: %fcmp_ugt_f32(+NaN, -NaN) == 1 +; run: %fcmp_ugt_f32(-NaN, +NaN) == 1 -; run: %fcmp_ugt_f32(+NaN, -0x1.0) == true -; run: %fcmp_ugt_f32(-NaN, -0x1.0) == true -; run: %fcmp_ugt_f32(+NaN, 0x1.0) == true -; run: %fcmp_ugt_f32(-NaN, 0x1.0) == true -; run: %fcmp_ugt_f32(+NaN, -0x0.0) == true -; run: %fcmp_ugt_f32(-NaN, -0x0.0) == true -; run: %fcmp_ugt_f32(+NaN, 0x0.0) == true -; run: %fcmp_ugt_f32(-NaN, 0x0.0) == true -; run: %fcmp_ugt_f32(+NaN, -Inf) == true -; run: %fcmp_ugt_f32(-NaN, -Inf) == true -; run: %fcmp_ugt_f32(+NaN, Inf) == true -; run: %fcmp_ugt_f32(-NaN, Inf) == true -; run: %fcmp_ugt_f32(-0x0.0, +NaN) == true -; run: %fcmp_ugt_f32(-0x0.0, -NaN) == true -; run: %fcmp_ugt_f32(0x0.0, +NaN) == true -; run: %fcmp_ugt_f32(0x0.0, -NaN) == true -; run: %fcmp_ugt_f32(-Inf, +NaN) == true -; run: %fcmp_ugt_f32(-Inf, -NaN) == true -; run: %fcmp_ugt_f32(Inf, +NaN) == true -; run: %fcmp_ugt_f32(Inf, -NaN) == true +; run: %fcmp_ugt_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_ugt_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_ugt_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_ugt_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_ugt_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_ugt_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_ugt_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_ugt_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_ugt_f32(+NaN, -Inf) == 1 +; run: %fcmp_ugt_f32(-NaN, -Inf) == 1 +; run: %fcmp_ugt_f32(+NaN, Inf) == 1 +; run: %fcmp_ugt_f32(-NaN, Inf) == 1 +; run: %fcmp_ugt_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_ugt_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_ugt_f32(0x0.0, +NaN) == 1 +; run: %fcmp_ugt_f32(0x0.0, -NaN) == 1 +; run: %fcmp_ugt_f32(-Inf, +NaN) == 1 +; run: %fcmp_ugt_f32(-Inf, -NaN) == 1 +; run: %fcmp_ugt_f32(Inf, +NaN) == 1 +; run: %fcmp_ugt_f32(Inf, -NaN) == 1 -; run: %fcmp_ugt_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_ugt_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_ugt_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_ugt_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_ugt_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ugt_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ugt_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ugt_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ugt_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ugt_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ugt_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ugt_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ugt_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_ugt_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_ugt_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_ugt_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_ugt_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ugt_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ugt_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ugt_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ugt_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_ugt_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_ugt_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_ugt_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_ugt_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ugt_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ugt_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_ugt_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_ugt_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ugt_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ugt_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ugt_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ugt_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ugt_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ugt_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ugt_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ugt_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_ugt_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ugt_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_ugt_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_ugt_f64(f64, f64) -> b1 { +function %fcmp_ugt_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ugt v0, v1 return v2 } -; run: %fcmp_ugt_f64(0x0.5, 0x0.5) == false -; run: %fcmp_ugt_f64(0x1.0, 0x1.0) == false -; run: %fcmp_ugt_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_ugt_f64(0x1.0, -0x1.0) == true -; run: %fcmp_ugt_f64(0x0.5, 0x1.0) == false -; run: %fcmp_ugt_f64(0x1.5, 0x2.9) == false -; run: %fcmp_ugt_f64(0x1.1p10, 0x1.4p1) == true -; run: %fcmp_ugt_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_ugt_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == true -; run: %fcmp_ugt_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_ugt_f64(-0x0.5, -0x1.0) == true -; run: %fcmp_ugt_f64(-0x1.5, -0x2.9) == true -; run: %fcmp_ugt_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_ugt_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == true -; run: %fcmp_ugt_f64(-0x1.8p0, -0x1.b333333333333p0) == true -; run: %fcmp_ugt_f64(-0x1.4p1, -0x1.6666666666666p1) == true -; run: %fcmp_ugt_f64(0x0.5, -0x1.0) == true -; run: %fcmp_ugt_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == true +; run: %fcmp_ugt_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_ugt_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_ugt_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_ugt_f64(0x1.0, -0x1.0) == 1 +; run: %fcmp_ugt_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_ugt_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_ugt_f64(0x1.1p10, 0x1.4p1) == 1 +; run: %fcmp_ugt_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_ugt_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 1 +; run: %fcmp_ugt_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_ugt_f64(-0x0.5, -0x1.0) == 1 +; run: %fcmp_ugt_f64(-0x1.5, -0x2.9) == 1 +; run: %fcmp_ugt_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_ugt_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 1 +; run: %fcmp_ugt_f64(-0x1.8p0, -0x1.b333333333333p0) == 1 +; run: %fcmp_ugt_f64(-0x1.4p1, -0x1.6666666666666p1) == 1 +; run: %fcmp_ugt_f64(0x0.5, -0x1.0) == 1 +; run: %fcmp_ugt_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 1 ; Zeroes -; run: %fcmp_ugt_f64(0x0.0, 0x0.0) == false -; run: %fcmp_ugt_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_ugt_f64(0x0.0, -0x0.0) == false -; run: %fcmp_ugt_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_ugt_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_ugt_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_ugt_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_ugt_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_ugt_f64(Inf, Inf) == false -; run: %fcmp_ugt_f64(-Inf, -Inf) == false -; run: %fcmp_ugt_f64(Inf, -Inf) == true -; run: %fcmp_ugt_f64(-Inf, Inf) == false +; run: %fcmp_ugt_f64(Inf, Inf) == 0 +; run: %fcmp_ugt_f64(-Inf, -Inf) == 0 +; run: %fcmp_ugt_f64(Inf, -Inf) == 1 +; run: %fcmp_ugt_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_ugt_f64(0x0.0, Inf) == false -; run: %fcmp_ugt_f64(-0x0.0, Inf) == false -; run: %fcmp_ugt_f64(0x0.0, -Inf) == true -; run: %fcmp_ugt_f64(-0x0.0, -Inf) == true -; run: %fcmp_ugt_f64(Inf, 0x0.0) == true -; run: %fcmp_ugt_f64(Inf, -0x0.0) == true -; run: %fcmp_ugt_f64(-Inf, 0x0.0) == false -; run: %fcmp_ugt_f64(-Inf, -0x0.0) == false +; run: %fcmp_ugt_f64(0x0.0, Inf) == 0 +; run: %fcmp_ugt_f64(-0x0.0, Inf) == 0 +; run: %fcmp_ugt_f64(0x0.0, -Inf) == 1 +; run: %fcmp_ugt_f64(-0x0.0, -Inf) == 1 +; run: %fcmp_ugt_f64(Inf, 0x0.0) == 1 +; run: %fcmp_ugt_f64(Inf, -0x0.0) == 1 +; run: %fcmp_ugt_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_ugt_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_ugt_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_ugt_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_ugt_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_ugt_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_ugt_f64(0x1.0p-52, 0x1.0p-1022) == true -; run: %fcmp_ugt_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_ugt_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_ugt_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_ugt_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_ugt_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_ugt_f64(0x1.0p-52, 0x1.0p-1022) == 1 +; run: %fcmp_ugt_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_ugt_f64(0x0.8p-1022, -0x0.8p-1022) == true -; run: %fcmp_ugt_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_ugt_f64(0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ugt_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_ugt_f64(0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ugt_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_ugt_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_ugt_f64(0x0.0, -0x0.8p-1022) == true -; run: %fcmp_ugt_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_ugt_f64(-0x0.0, -0x0.8p-1022) == true +; run: %fcmp_ugt_f64(0x0.8p-1022, -0x0.8p-1022) == 1 +; run: %fcmp_ugt_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_ugt_f64(0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ugt_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_ugt_f64(0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ugt_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_ugt_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_ugt_f64(0x0.0, -0x0.8p-1022) == 1 +; run: %fcmp_ugt_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_ugt_f64(-0x0.0, -0x0.8p-1022) == 1 ; NaN's -; run: %fcmp_ugt_f64(+NaN, +NaN) == true -; run: %fcmp_ugt_f64(-NaN, -NaN) == true -; run: %fcmp_ugt_f64(+NaN, -NaN) == true -; run: %fcmp_ugt_f64(-NaN, +NaN) == true +; run: %fcmp_ugt_f64(+NaN, +NaN) == 1 +; run: %fcmp_ugt_f64(-NaN, -NaN) == 1 +; run: %fcmp_ugt_f64(+NaN, -NaN) == 1 +; run: %fcmp_ugt_f64(-NaN, +NaN) == 1 -; run: %fcmp_ugt_f64(+NaN, -0x1.0) == true -; run: %fcmp_ugt_f64(-NaN, -0x1.0) == true -; run: %fcmp_ugt_f64(+NaN, 0x1.0) == true -; run: %fcmp_ugt_f64(-NaN, 0x1.0) == true -; run: %fcmp_ugt_f64(+NaN, -0x0.0) == true -; run: %fcmp_ugt_f64(-NaN, -0x0.0) == true -; run: %fcmp_ugt_f64(+NaN, 0x0.0) == true -; run: %fcmp_ugt_f64(-NaN, 0x0.0) == true -; run: %fcmp_ugt_f64(+NaN, -Inf) == true -; run: %fcmp_ugt_f64(-NaN, -Inf) == true -; run: %fcmp_ugt_f64(+NaN, Inf) == true -; run: %fcmp_ugt_f64(-NaN, Inf) == true -; run: %fcmp_ugt_f64(-0x0.0, +NaN) == true -; run: %fcmp_ugt_f64(-0x0.0, -NaN) == true -; run: %fcmp_ugt_f64(0x0.0, +NaN) == true -; run: %fcmp_ugt_f64(0x0.0, -NaN) == true -; run: %fcmp_ugt_f64(-Inf, +NaN) == true -; run: %fcmp_ugt_f64(-Inf, -NaN) == true -; run: %fcmp_ugt_f64(Inf, +NaN) == true -; run: %fcmp_ugt_f64(Inf, -NaN) == true +; run: %fcmp_ugt_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_ugt_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_ugt_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_ugt_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_ugt_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_ugt_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_ugt_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_ugt_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_ugt_f64(+NaN, -Inf) == 1 +; run: %fcmp_ugt_f64(-NaN, -Inf) == 1 +; run: %fcmp_ugt_f64(+NaN, Inf) == 1 +; run: %fcmp_ugt_f64(-NaN, Inf) == 1 +; run: %fcmp_ugt_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_ugt_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_ugt_f64(0x0.0, +NaN) == 1 +; run: %fcmp_ugt_f64(0x0.0, -NaN) == 1 +; run: %fcmp_ugt_f64(-Inf, +NaN) == 1 +; run: %fcmp_ugt_f64(-Inf, -NaN) == 1 +; run: %fcmp_ugt_f64(Inf, +NaN) == 1 +; run: %fcmp_ugt_f64(Inf, -NaN) == 1 -; run: %fcmp_ugt_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_ugt_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_ugt_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_ugt_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_ugt_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ugt_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ugt_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ugt_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ugt_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ugt_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ugt_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ugt_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ugt_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_ugt_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_ugt_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_ugt_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_ugt_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_ugt_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ugt_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ugt_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_ugt_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ugt_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ugt_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_ugt_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_ugt_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ugt_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ugt_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ugt_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ugt_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ugt_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ugt_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ugt_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ugt_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ugt_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ugt_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ugt_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_ugt_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ugt_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_ugt_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ugt_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ule.clif b/cranelift/filetests/filetests/runtests/fcmp-ule.clif index 9b84f0670255..88c508d6b072 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ule.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ule.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_ule_f32(f32, f32) -> b1 { +function %fcmp_ule_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ule v0, v1 return v2 } -; run: %fcmp_ule_f32(0x0.5, 0x0.5) == true -; run: %fcmp_ule_f32(0x1.0, 0x1.0) == true -; run: %fcmp_ule_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_ule_f32(0x1.0, -0x1.0) == false -; run: %fcmp_ule_f32(0x0.5, 0x1.0) == true -; run: %fcmp_ule_f32(0x1.5, 0x2.9) == true -; run: %fcmp_ule_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_ule_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_ule_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_ule_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_ule_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_ule_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_ule_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_ule_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_ule_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_ule_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_ule_f32(0x0.5, -0x1.0) == false -; run: %fcmp_ule_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_ule_f32(0x0.5, 0x0.5) == 1 +; run: %fcmp_ule_f32(0x1.0, 0x1.0) == 1 +; run: %fcmp_ule_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ule_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_ule_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_ule_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_ule_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_ule_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_ule_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_ule_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_ule_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_ule_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_ule_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_ule_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_ule_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_ule_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_ule_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_ule_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_ule_f32(0x0.0, 0x0.0) == true -; run: %fcmp_ule_f32(-0x0.0, -0x0.0) == true -; run: %fcmp_ule_f32(0x0.0, -0x0.0) == true -; run: %fcmp_ule_f32(-0x0.0, 0x0.0) == true +; run: %fcmp_ule_f32(0x0.0, 0x0.0) == 1 +; run: %fcmp_ule_f32(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ule_f32(0x0.0, -0x0.0) == 1 +; run: %fcmp_ule_f32(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ule_f32(Inf, Inf) == true -; run: %fcmp_ule_f32(-Inf, -Inf) == true -; run: %fcmp_ule_f32(Inf, -Inf) == false -; run: %fcmp_ule_f32(-Inf, Inf) == true +; run: %fcmp_ule_f32(Inf, Inf) == 1 +; run: %fcmp_ule_f32(-Inf, -Inf) == 1 +; run: %fcmp_ule_f32(Inf, -Inf) == 0 +; run: %fcmp_ule_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ule_f32(0x0.0, Inf) == true -; run: %fcmp_ule_f32(-0x0.0, Inf) == true -; run: %fcmp_ule_f32(0x0.0, -Inf) == false -; run: %fcmp_ule_f32(-0x0.0, -Inf) == false -; run: %fcmp_ule_f32(Inf, 0x0.0) == false -; run: %fcmp_ule_f32(Inf, -0x0.0) == false -; run: %fcmp_ule_f32(-Inf, 0x0.0) == true -; run: %fcmp_ule_f32(-Inf, -0x0.0) == true +; run: %fcmp_ule_f32(0x0.0, Inf) == 1 +; run: %fcmp_ule_f32(-0x0.0, Inf) == 1 +; run: %fcmp_ule_f32(0x0.0, -Inf) == 0 +; run: %fcmp_ule_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_ule_f32(Inf, 0x0.0) == 0 +; run: %fcmp_ule_f32(Inf, -0x0.0) == 0 +; run: %fcmp_ule_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_ule_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ule_f32(0x1.0p-23, 0x1.0p-23) == true -; run: %fcmp_ule_f32(0x1.fffffep127, 0x1.fffffep127) == true -; run: %fcmp_ule_f32(0x1.0p-126, 0x1.0p-126) == true -; run: %fcmp_ule_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_ule_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_ule_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_ule_f32(0x1.0p-23, 0x1.0p-23) == 1 +; run: %fcmp_ule_f32(0x1.fffffep127, 0x1.fffffep127) == 1 +; run: %fcmp_ule_f32(0x1.0p-126, 0x1.0p-126) == 1 +; run: %fcmp_ule_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_ule_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_ule_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_ule_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_ule_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_ule_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_ule_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ule_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_ule_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ule_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ule_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_ule_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ule_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_ule_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_ule_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_ule_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_ule_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ule_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_ule_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ule_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ule_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_ule_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ule_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_ule_f32(+NaN, +NaN) == true -; run: %fcmp_ule_f32(-NaN, -NaN) == true -; run: %fcmp_ule_f32(+NaN, -NaN) == true -; run: %fcmp_ule_f32(-NaN, +NaN) == true +; run: %fcmp_ule_f32(+NaN, +NaN) == 1 +; run: %fcmp_ule_f32(-NaN, -NaN) == 1 +; run: %fcmp_ule_f32(+NaN, -NaN) == 1 +; run: %fcmp_ule_f32(-NaN, +NaN) == 1 -; run: %fcmp_ule_f32(+NaN, -0x1.0) == true -; run: %fcmp_ule_f32(-NaN, -0x1.0) == true -; run: %fcmp_ule_f32(+NaN, 0x1.0) == true -; run: %fcmp_ule_f32(-NaN, 0x1.0) == true -; run: %fcmp_ule_f32(+NaN, -0x0.0) == true -; run: %fcmp_ule_f32(-NaN, -0x0.0) == true -; run: %fcmp_ule_f32(+NaN, 0x0.0) == true -; run: %fcmp_ule_f32(-NaN, 0x0.0) == true -; run: %fcmp_ule_f32(+NaN, -Inf) == true -; run: %fcmp_ule_f32(-NaN, -Inf) == true -; run: %fcmp_ule_f32(+NaN, Inf) == true -; run: %fcmp_ule_f32(-NaN, Inf) == true -; run: %fcmp_ule_f32(-0x0.0, +NaN) == true -; run: %fcmp_ule_f32(-0x0.0, -NaN) == true -; run: %fcmp_ule_f32(0x0.0, +NaN) == true -; run: %fcmp_ule_f32(0x0.0, -NaN) == true -; run: %fcmp_ule_f32(-Inf, +NaN) == true -; run: %fcmp_ule_f32(-Inf, -NaN) == true -; run: %fcmp_ule_f32(Inf, +NaN) == true -; run: %fcmp_ule_f32(Inf, -NaN) == true +; run: %fcmp_ule_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_ule_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_ule_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_ule_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_ule_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_ule_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_ule_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_ule_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_ule_f32(+NaN, -Inf) == 1 +; run: %fcmp_ule_f32(-NaN, -Inf) == 1 +; run: %fcmp_ule_f32(+NaN, Inf) == 1 +; run: %fcmp_ule_f32(-NaN, Inf) == 1 +; run: %fcmp_ule_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_ule_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_ule_f32(0x0.0, +NaN) == 1 +; run: %fcmp_ule_f32(0x0.0, -NaN) == 1 +; run: %fcmp_ule_f32(-Inf, +NaN) == 1 +; run: %fcmp_ule_f32(-Inf, -NaN) == 1 +; run: %fcmp_ule_f32(Inf, +NaN) == 1 +; run: %fcmp_ule_f32(Inf, -NaN) == 1 -; run: %fcmp_ule_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_ule_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_ule_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_ule_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_ule_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ule_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ule_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ule_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ule_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ule_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ule_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ule_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ule_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_ule_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_ule_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_ule_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_ule_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ule_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ule_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ule_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ule_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_ule_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_ule_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_ule_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_ule_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ule_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ule_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ule_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ule_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ule_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ule_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ule_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ule_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ule_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ule_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ule_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ule_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ule_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_ule_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_ule_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_ule_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_ule_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ule_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ule_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ule_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ule_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_ule_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_ule_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_ule_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_ule_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ule_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ule_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ule_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ule_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ule_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ule_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ule_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ule_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ule_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ule_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ule_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_ule_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ule_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ule_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_ule_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ule_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ule_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ule_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ule_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_ule_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ule_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_ule_f64(f64, f64) -> b1 { +function %fcmp_ule_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ule v0, v1 return v2 } -; run: %fcmp_ule_f64(0x0.5, 0x0.5) == true -; run: %fcmp_ule_f64(0x1.0, 0x1.0) == true -; run: %fcmp_ule_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_ule_f64(0x1.0, -0x1.0) == false -; run: %fcmp_ule_f64(0x0.5, 0x1.0) == true -; run: %fcmp_ule_f64(0x1.5, 0x2.9) == true -; run: %fcmp_ule_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_ule_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_ule_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_ule_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_ule_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_ule_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_ule_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_ule_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_ule_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_ule_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_ule_f64(0x0.5, -0x1.0) == false -; run: %fcmp_ule_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_ule_f64(0x0.5, 0x0.5) == 1 +; run: %fcmp_ule_f64(0x1.0, 0x1.0) == 1 +; run: %fcmp_ule_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ule_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_ule_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_ule_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_ule_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_ule_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_ule_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_ule_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_ule_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_ule_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_ule_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_ule_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_ule_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_ule_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_ule_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_ule_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_ule_f64(0x0.0, 0x0.0) == true -; run: %fcmp_ule_f64(-0x0.0, -0x0.0) == true -; run: %fcmp_ule_f64(0x0.0, -0x0.0) == true -; run: %fcmp_ule_f64(-0x0.0, 0x0.0) == true +; run: %fcmp_ule_f64(0x0.0, 0x0.0) == 1 +; run: %fcmp_ule_f64(-0x0.0, -0x0.0) == 1 +; run: %fcmp_ule_f64(0x0.0, -0x0.0) == 1 +; run: %fcmp_ule_f64(-0x0.0, 0x0.0) == 1 ; Infinities -; run: %fcmp_ule_f64(Inf, Inf) == true -; run: %fcmp_ule_f64(-Inf, -Inf) == true -; run: %fcmp_ule_f64(Inf, -Inf) == false -; run: %fcmp_ule_f64(-Inf, Inf) == true +; run: %fcmp_ule_f64(Inf, Inf) == 1 +; run: %fcmp_ule_f64(-Inf, -Inf) == 1 +; run: %fcmp_ule_f64(Inf, -Inf) == 0 +; run: %fcmp_ule_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ule_f64(0x0.0, Inf) == true -; run: %fcmp_ule_f64(-0x0.0, Inf) == true -; run: %fcmp_ule_f64(0x0.0, -Inf) == false -; run: %fcmp_ule_f64(-0x0.0, -Inf) == false -; run: %fcmp_ule_f64(Inf, 0x0.0) == false -; run: %fcmp_ule_f64(Inf, -0x0.0) == false -; run: %fcmp_ule_f64(-Inf, 0x0.0) == true -; run: %fcmp_ule_f64(-Inf, -0x0.0) == true +; run: %fcmp_ule_f64(0x0.0, Inf) == 1 +; run: %fcmp_ule_f64(-0x0.0, Inf) == 1 +; run: %fcmp_ule_f64(0x0.0, -Inf) == 0 +; run: %fcmp_ule_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_ule_f64(Inf, 0x0.0) == 0 +; run: %fcmp_ule_f64(Inf, -0x0.0) == 0 +; run: %fcmp_ule_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_ule_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ule_f64(0x1.0p-52, 0x1.0p-52) == true -; run: %fcmp_ule_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ule_f64(0x1.0p-1022, 0x1.0p-1022) == true -; run: %fcmp_ule_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ule_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_ule_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_ule_f64(0x1.0p-52, 0x1.0p-52) == 1 +; run: %fcmp_ule_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ule_f64(0x1.0p-1022, 0x1.0p-1022) == 1 +; run: %fcmp_ule_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ule_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_ule_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_ule_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_ule_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_ule_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_ule_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ule_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_ule_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ule_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ule_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_ule_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ule_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_ule_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_ule_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_ule_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_ule_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ule_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_ule_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ule_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ule_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_ule_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ule_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_ule_f64(+NaN, +NaN) == true -; run: %fcmp_ule_f64(-NaN, -NaN) == true -; run: %fcmp_ule_f64(+NaN, -NaN) == true -; run: %fcmp_ule_f64(-NaN, +NaN) == true +; run: %fcmp_ule_f64(+NaN, +NaN) == 1 +; run: %fcmp_ule_f64(-NaN, -NaN) == 1 +; run: %fcmp_ule_f64(+NaN, -NaN) == 1 +; run: %fcmp_ule_f64(-NaN, +NaN) == 1 -; run: %fcmp_ule_f64(+NaN, -0x1.0) == true -; run: %fcmp_ule_f64(-NaN, -0x1.0) == true -; run: %fcmp_ule_f64(+NaN, 0x1.0) == true -; run: %fcmp_ule_f64(-NaN, 0x1.0) == true -; run: %fcmp_ule_f64(+NaN, -0x0.0) == true -; run: %fcmp_ule_f64(-NaN, -0x0.0) == true -; run: %fcmp_ule_f64(+NaN, 0x0.0) == true -; run: %fcmp_ule_f64(-NaN, 0x0.0) == true -; run: %fcmp_ule_f64(+NaN, -Inf) == true -; run: %fcmp_ule_f64(-NaN, -Inf) == true -; run: %fcmp_ule_f64(+NaN, Inf) == true -; run: %fcmp_ule_f64(-NaN, Inf) == true -; run: %fcmp_ule_f64(-0x0.0, +NaN) == true -; run: %fcmp_ule_f64(-0x0.0, -NaN) == true -; run: %fcmp_ule_f64(0x0.0, +NaN) == true -; run: %fcmp_ule_f64(0x0.0, -NaN) == true -; run: %fcmp_ule_f64(-Inf, +NaN) == true -; run: %fcmp_ule_f64(-Inf, -NaN) == true -; run: %fcmp_ule_f64(Inf, +NaN) == true -; run: %fcmp_ule_f64(Inf, -NaN) == true +; run: %fcmp_ule_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_ule_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_ule_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_ule_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_ule_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_ule_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_ule_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_ule_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_ule_f64(+NaN, -Inf) == 1 +; run: %fcmp_ule_f64(-NaN, -Inf) == 1 +; run: %fcmp_ule_f64(+NaN, Inf) == 1 +; run: %fcmp_ule_f64(-NaN, Inf) == 1 +; run: %fcmp_ule_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_ule_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_ule_f64(0x0.0, +NaN) == 1 +; run: %fcmp_ule_f64(0x0.0, -NaN) == 1 +; run: %fcmp_ule_f64(-Inf, +NaN) == 1 +; run: %fcmp_ule_f64(-Inf, -NaN) == 1 +; run: %fcmp_ule_f64(Inf, +NaN) == 1 +; run: %fcmp_ule_f64(Inf, -NaN) == 1 -; run: %fcmp_ule_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_ule_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_ule_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_ule_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_ule_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ule_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ule_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ule_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ule_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ule_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ule_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ule_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ule_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_ule_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_ule_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_ule_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_ule_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_ule_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ule_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ule_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_ule_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ule_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ule_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ule_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ule_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ule_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ule_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ule_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ule_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ule_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ule_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ule_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ule_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ule_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_ule_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_ule_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_ule_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_ule_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ule_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ule_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ule_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ule_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_ule_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_ule_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_ule_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_ule_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ule_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ule_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ule_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ule_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ule_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ule_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ule_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ule_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ule_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ule_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ule_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ule_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ule_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ule_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ule_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_ule_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ule_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ule_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_ule_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ule_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_ule_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ule_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-ult.clif b/cranelift/filetests/filetests/runtests/fcmp-ult.clif index 8982e235d9f3..9378cb792e47 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-ult.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-ult.clif @@ -4,316 +4,316 @@ target x86_64 target s390x target riscv64 -function %fcmp_ult_f32(f32, f32) -> b1 { +function %fcmp_ult_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp ult v0, v1 return v2 } -; run: %fcmp_ult_f32(0x0.5, 0x0.5) == false -; run: %fcmp_ult_f32(0x1.0, 0x1.0) == false -; run: %fcmp_ult_f32(-0x1.0, 0x1.0) == true -; run: %fcmp_ult_f32(0x1.0, -0x1.0) == false -; run: %fcmp_ult_f32(0x0.5, 0x1.0) == true -; run: %fcmp_ult_f32(0x1.5, 0x2.9) == true -; run: %fcmp_ult_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_ult_f32(0x1.4cccccp0, 0x1.8p0) == true -; run: %fcmp_ult_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_ult_f32(0x1.333334p-1, 0x1.666666p1) == true -; run: %fcmp_ult_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_ult_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_ult_f32(-0x1.1p10, -0x1.333334p-1) == true -; run: %fcmp_ult_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_ult_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_ult_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_ult_f32(0x0.5, -0x1.0) == false -; run: %fcmp_ult_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_ult_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_ult_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_ult_f32(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ult_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_ult_f32(0x0.5, 0x1.0) == 1 +; run: %fcmp_ult_f32(0x1.5, 0x2.9) == 1 +; run: %fcmp_ult_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_ult_f32(0x1.4cccccp0, 0x1.8p0) == 1 +; run: %fcmp_ult_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_ult_f32(0x1.333334p-1, 0x1.666666p1) == 1 +; run: %fcmp_ult_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_ult_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_ult_f32(-0x1.1p10, -0x1.333334p-1) == 1 +; run: %fcmp_ult_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_ult_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_ult_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_ult_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_ult_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_ult_f32(0x0.0, 0x0.0) == false -; run: %fcmp_ult_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_ult_f32(0x0.0, -0x0.0) == false -; run: %fcmp_ult_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_ult_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_ult_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_ult_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_ult_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_ult_f32(Inf, Inf) == false -; run: %fcmp_ult_f32(-Inf, -Inf) == false -; run: %fcmp_ult_f32(Inf, -Inf) == false -; run: %fcmp_ult_f32(-Inf, Inf) == true +; run: %fcmp_ult_f32(Inf, Inf) == 0 +; run: %fcmp_ult_f32(-Inf, -Inf) == 0 +; run: %fcmp_ult_f32(Inf, -Inf) == 0 +; run: %fcmp_ult_f32(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ult_f32(0x0.0, Inf) == true -; run: %fcmp_ult_f32(-0x0.0, Inf) == true -; run: %fcmp_ult_f32(0x0.0, -Inf) == false -; run: %fcmp_ult_f32(-0x0.0, -Inf) == false -; run: %fcmp_ult_f32(Inf, 0x0.0) == false -; run: %fcmp_ult_f32(Inf, -0x0.0) == false -; run: %fcmp_ult_f32(-Inf, 0x0.0) == true -; run: %fcmp_ult_f32(-Inf, -0x0.0) == true +; run: %fcmp_ult_f32(0x0.0, Inf) == 1 +; run: %fcmp_ult_f32(-0x0.0, Inf) == 1 +; run: %fcmp_ult_f32(0x0.0, -Inf) == 0 +; run: %fcmp_ult_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_ult_f32(Inf, 0x0.0) == 0 +; run: %fcmp_ult_f32(Inf, -0x0.0) == 0 +; run: %fcmp_ult_f32(-Inf, 0x0.0) == 1 +; run: %fcmp_ult_f32(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ult_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_ult_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_ult_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_ult_f32(0x1.0p-23, 0x1.fffffep127) == true -; run: %fcmp_ult_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_ult_f32(0x1.0p-126, 0x1.fffffep127) == true +; run: %fcmp_ult_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_ult_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_ult_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_ult_f32(0x1.0p-23, 0x1.fffffep127) == 1 +; run: %fcmp_ult_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_ult_f32(0x1.0p-126, 0x1.fffffep127) == 1 ; Subnormals -; run: %fcmp_ult_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_ult_f32(-0x0.800002p-126, 0x0.800002p-126) == true -; run: %fcmp_ult_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_ult_f32(-0x0.800002p-126, 0x0.0) == true -; run: %fcmp_ult_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_ult_f32(-0x0.800002p-126, -0x0.0) == true -; run: %fcmp_ult_f32(0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ult_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_ult_f32(-0x0.0, 0x0.800002p-126) == true -; run: %fcmp_ult_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_ult_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_ult_f32(-0x0.800002p-126, 0x0.800002p-126) == 1 +; run: %fcmp_ult_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_ult_f32(-0x0.800002p-126, 0x0.0) == 1 +; run: %fcmp_ult_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_ult_f32(-0x0.800002p-126, -0x0.0) == 1 +; run: %fcmp_ult_f32(0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ult_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_ult_f32(-0x0.0, 0x0.800002p-126) == 1 +; run: %fcmp_ult_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_ult_f32(+NaN, +NaN) == true -; run: %fcmp_ult_f32(-NaN, -NaN) == true -; run: %fcmp_ult_f32(+NaN, -NaN) == true -; run: %fcmp_ult_f32(-NaN, +NaN) == true +; run: %fcmp_ult_f32(+NaN, +NaN) == 1 +; run: %fcmp_ult_f32(-NaN, -NaN) == 1 +; run: %fcmp_ult_f32(+NaN, -NaN) == 1 +; run: %fcmp_ult_f32(-NaN, +NaN) == 1 -; run: %fcmp_ult_f32(+NaN, -0x1.0) == true -; run: %fcmp_ult_f32(-NaN, -0x1.0) == true -; run: %fcmp_ult_f32(+NaN, 0x1.0) == true -; run: %fcmp_ult_f32(-NaN, 0x1.0) == true -; run: %fcmp_ult_f32(+NaN, -0x0.0) == true -; run: %fcmp_ult_f32(-NaN, -0x0.0) == true -; run: %fcmp_ult_f32(+NaN, 0x0.0) == true -; run: %fcmp_ult_f32(-NaN, 0x0.0) == true -; run: %fcmp_ult_f32(+NaN, -Inf) == true -; run: %fcmp_ult_f32(-NaN, -Inf) == true -; run: %fcmp_ult_f32(+NaN, Inf) == true -; run: %fcmp_ult_f32(-NaN, Inf) == true -; run: %fcmp_ult_f32(-0x0.0, +NaN) == true -; run: %fcmp_ult_f32(-0x0.0, -NaN) == true -; run: %fcmp_ult_f32(0x0.0, +NaN) == true -; run: %fcmp_ult_f32(0x0.0, -NaN) == true -; run: %fcmp_ult_f32(-Inf, +NaN) == true -; run: %fcmp_ult_f32(-Inf, -NaN) == true -; run: %fcmp_ult_f32(Inf, +NaN) == true -; run: %fcmp_ult_f32(Inf, -NaN) == true +; run: %fcmp_ult_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_ult_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_ult_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_ult_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_ult_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_ult_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_ult_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_ult_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_ult_f32(+NaN, -Inf) == 1 +; run: %fcmp_ult_f32(-NaN, -Inf) == 1 +; run: %fcmp_ult_f32(+NaN, Inf) == 1 +; run: %fcmp_ult_f32(-NaN, Inf) == 1 +; run: %fcmp_ult_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_ult_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_ult_f32(0x0.0, +NaN) == 1 +; run: %fcmp_ult_f32(0x0.0, -NaN) == 1 +; run: %fcmp_ult_f32(-Inf, +NaN) == 1 +; run: %fcmp_ult_f32(-Inf, -NaN) == 1 +; run: %fcmp_ult_f32(Inf, +NaN) == 1 +; run: %fcmp_ult_f32(Inf, -NaN) == 1 -; run: %fcmp_ult_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_ult_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_ult_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_ult_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_ult_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ult_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ult_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ult_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ult_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ult_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ult_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_ult_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_ult_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_ult_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_ult_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_ult_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_ult_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ult_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ult_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_ult_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_ult_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_ult_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_ult_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_ult_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_ult_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ult_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ult_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ult_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ult_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ult_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ult_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ult_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ult_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ult_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ult_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ult_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ult_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ult_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_ult_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_ult_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_ult_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_ult_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ult_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ult_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ult_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ult_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_ult_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_ult_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_ult_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_ult_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ult_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ult_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ult_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ult_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ult_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ult_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ult_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ult_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ult_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_ult_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_ult_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_ult_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ult_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_ult_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_ult_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ult_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ult_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_ult_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_ult_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_ult_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_ult_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_ult_f64(f64, f64) -> b1 { +function %fcmp_ult_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp ult v0, v1 return v2 } -; run: %fcmp_ult_f64(0x0.5, 0x0.5) == false -; run: %fcmp_ult_f64(0x1.0, 0x1.0) == false -; run: %fcmp_ult_f64(-0x1.0, 0x1.0) == true -; run: %fcmp_ult_f64(0x1.0, -0x1.0) == false -; run: %fcmp_ult_f64(0x0.5, 0x1.0) == true -; run: %fcmp_ult_f64(0x1.5, 0x2.9) == true -; run: %fcmp_ult_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_ult_f64(0x1.4cccccccccccdp0, 0x1.8p0) == true -; run: %fcmp_ult_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_ult_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == true -; run: %fcmp_ult_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_ult_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_ult_f64(-0x1.1p10, -0x1.3333333333333p-1) == true -; run: %fcmp_ult_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_ult_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_ult_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_ult_f64(0x0.5, -0x1.0) == false -; run: %fcmp_ult_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_ult_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_ult_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_ult_f64(-0x1.0, 0x1.0) == 1 +; run: %fcmp_ult_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_ult_f64(0x0.5, 0x1.0) == 1 +; run: %fcmp_ult_f64(0x1.5, 0x2.9) == 1 +; run: %fcmp_ult_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_ult_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 1 +; run: %fcmp_ult_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_ult_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 1 +; run: %fcmp_ult_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_ult_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_ult_f64(-0x1.1p10, -0x1.3333333333333p-1) == 1 +; run: %fcmp_ult_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_ult_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_ult_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_ult_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_ult_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_ult_f64(0x0.0, 0x0.0) == false -; run: %fcmp_ult_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_ult_f64(0x0.0, -0x0.0) == false -; run: %fcmp_ult_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_ult_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_ult_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_ult_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_ult_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_ult_f64(Inf, Inf) == false -; run: %fcmp_ult_f64(-Inf, -Inf) == false -; run: %fcmp_ult_f64(Inf, -Inf) == false -; run: %fcmp_ult_f64(-Inf, Inf) == true +; run: %fcmp_ult_f64(Inf, Inf) == 0 +; run: %fcmp_ult_f64(-Inf, -Inf) == 0 +; run: %fcmp_ult_f64(Inf, -Inf) == 0 +; run: %fcmp_ult_f64(-Inf, Inf) == 1 ; Inf/Zero -; run: %fcmp_ult_f64(0x0.0, Inf) == true -; run: %fcmp_ult_f64(-0x0.0, Inf) == true -; run: %fcmp_ult_f64(0x0.0, -Inf) == false -; run: %fcmp_ult_f64(-0x0.0, -Inf) == false -; run: %fcmp_ult_f64(Inf, 0x0.0) == false -; run: %fcmp_ult_f64(Inf, -0x0.0) == false -; run: %fcmp_ult_f64(-Inf, 0x0.0) == true -; run: %fcmp_ult_f64(-Inf, -0x0.0) == true +; run: %fcmp_ult_f64(0x0.0, Inf) == 1 +; run: %fcmp_ult_f64(-0x0.0, Inf) == 1 +; run: %fcmp_ult_f64(0x0.0, -Inf) == 0 +; run: %fcmp_ult_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_ult_f64(Inf, 0x0.0) == 0 +; run: %fcmp_ult_f64(Inf, -0x0.0) == 0 +; run: %fcmp_ult_f64(-Inf, 0x0.0) == 1 +; run: %fcmp_ult_f64(-Inf, -0x0.0) == 1 ; Epsilon / Max / Min Positive -; run: %fcmp_ult_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_ult_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_ult_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_ult_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == true -; run: %fcmp_ult_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_ult_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == true +; run: %fcmp_ult_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_ult_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_ult_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_ult_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 1 +; run: %fcmp_ult_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_ult_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 1 ; Subnormals -; run: %fcmp_ult_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_ult_f64(-0x0.8p-1022, 0x0.8p-1022) == true -; run: %fcmp_ult_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_ult_f64(-0x0.8p-1022, 0x0.0) == true -; run: %fcmp_ult_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_ult_f64(-0x0.8p-1022, -0x0.0) == true -; run: %fcmp_ult_f64(0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ult_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_ult_f64(-0x0.0, 0x0.8p-1022) == true -; run: %fcmp_ult_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_ult_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_ult_f64(-0x0.8p-1022, 0x0.8p-1022) == 1 +; run: %fcmp_ult_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_ult_f64(-0x0.8p-1022, 0x0.0) == 1 +; run: %fcmp_ult_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_ult_f64(-0x0.8p-1022, -0x0.0) == 1 +; run: %fcmp_ult_f64(0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ult_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_ult_f64(-0x0.0, 0x0.8p-1022) == 1 +; run: %fcmp_ult_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_ult_f64(+NaN, +NaN) == true -; run: %fcmp_ult_f64(-NaN, -NaN) == true -; run: %fcmp_ult_f64(+NaN, -NaN) == true -; run: %fcmp_ult_f64(-NaN, +NaN) == true +; run: %fcmp_ult_f64(+NaN, +NaN) == 1 +; run: %fcmp_ult_f64(-NaN, -NaN) == 1 +; run: %fcmp_ult_f64(+NaN, -NaN) == 1 +; run: %fcmp_ult_f64(-NaN, +NaN) == 1 -; run: %fcmp_ult_f64(+NaN, -0x1.0) == true -; run: %fcmp_ult_f64(-NaN, -0x1.0) == true -; run: %fcmp_ult_f64(+NaN, 0x1.0) == true -; run: %fcmp_ult_f64(-NaN, 0x1.0) == true -; run: %fcmp_ult_f64(+NaN, -0x0.0) == true -; run: %fcmp_ult_f64(-NaN, -0x0.0) == true -; run: %fcmp_ult_f64(+NaN, 0x0.0) == true -; run: %fcmp_ult_f64(-NaN, 0x0.0) == true -; run: %fcmp_ult_f64(+NaN, -Inf) == true -; run: %fcmp_ult_f64(-NaN, -Inf) == true -; run: %fcmp_ult_f64(+NaN, Inf) == true -; run: %fcmp_ult_f64(-NaN, Inf) == true -; run: %fcmp_ult_f64(-0x0.0, +NaN) == true -; run: %fcmp_ult_f64(-0x0.0, -NaN) == true -; run: %fcmp_ult_f64(0x0.0, +NaN) == true -; run: %fcmp_ult_f64(0x0.0, -NaN) == true -; run: %fcmp_ult_f64(-Inf, +NaN) == true -; run: %fcmp_ult_f64(-Inf, -NaN) == true -; run: %fcmp_ult_f64(Inf, +NaN) == true -; run: %fcmp_ult_f64(Inf, -NaN) == true +; run: %fcmp_ult_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_ult_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_ult_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_ult_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_ult_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_ult_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_ult_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_ult_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_ult_f64(+NaN, -Inf) == 1 +; run: %fcmp_ult_f64(-NaN, -Inf) == 1 +; run: %fcmp_ult_f64(+NaN, Inf) == 1 +; run: %fcmp_ult_f64(-NaN, Inf) == 1 +; run: %fcmp_ult_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_ult_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_ult_f64(0x0.0, +NaN) == 1 +; run: %fcmp_ult_f64(0x0.0, -NaN) == 1 +; run: %fcmp_ult_f64(-Inf, +NaN) == 1 +; run: %fcmp_ult_f64(-Inf, -NaN) == 1 +; run: %fcmp_ult_f64(Inf, +NaN) == 1 +; run: %fcmp_ult_f64(Inf, -NaN) == 1 -; run: %fcmp_ult_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_ult_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_ult_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_ult_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_ult_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_ult_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_ult_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_ult_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_ult_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ult_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ult_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_ult_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_ult_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_ult_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_ult_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_ult_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_ult_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_ult_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ult_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_ult_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_ult_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ult_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ult_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ult_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_ult_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_ult_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_ult_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_ult_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_ult_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_ult_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_ult_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_ult_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_ult_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_ult_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_ult_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_ult_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_ult_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_ult_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_ult_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_ult_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_ult_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_ult_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_ult_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_ult_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_ult_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_ult_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_ult_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_ult_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_ult_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_ult_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ult_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_ult_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_ult_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_ult_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_ult_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_ult_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_ult_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ult_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ult_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_ult_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_ult_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_ult_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ult_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_ult_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_ult_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_ult_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_ult_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_ult_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fcmp-uno.clif b/cranelift/filetests/filetests/runtests/fcmp-uno.clif index 0fc354df259e..ecd19a904d82 100644 --- a/cranelift/filetests/filetests/runtests/fcmp-uno.clif +++ b/cranelift/filetests/filetests/runtests/fcmp-uno.clif @@ -5,316 +5,316 @@ target s390x target riscv64 -function %fcmp_uno_f32(f32, f32) -> b1 { +function %fcmp_uno_f32(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = fcmp uno v0, v1 return v2 } -; run: %fcmp_uno_f32(0x0.5, 0x0.5) == false -; run: %fcmp_uno_f32(0x1.0, 0x1.0) == false -; run: %fcmp_uno_f32(-0x1.0, 0x1.0) == false -; run: %fcmp_uno_f32(0x1.0, -0x1.0) == false -; run: %fcmp_uno_f32(0x0.5, 0x1.0) == false -; run: %fcmp_uno_f32(0x1.5, 0x2.9) == false -; run: %fcmp_uno_f32(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_uno_f32(0x1.4cccccp0, 0x1.8p0) == false -; run: %fcmp_uno_f32(0x1.b33334p0, 0x1.99999ap-2) == false -; run: %fcmp_uno_f32(0x1.333334p-1, 0x1.666666p1) == false -; run: %fcmp_uno_f32(-0x0.5, -0x1.0) == false -; run: %fcmp_uno_f32(-0x1.5, -0x2.9) == false -; run: %fcmp_uno_f32(-0x1.1p10, -0x1.333334p-1) == false -; run: %fcmp_uno_f32(-0x1.99999ap-2, -0x1.4cccccp0) == false -; run: %fcmp_uno_f32(-0x1.8p0, -0x1.b33334p0) == false -; run: %fcmp_uno_f32(-0x1.4p1, -0x1.666666p1) == false -; run: %fcmp_uno_f32(0x0.5, -0x1.0) == false -; run: %fcmp_uno_f32(0x1.b33334p0, -0x1.b33334p0) == false +; run: %fcmp_uno_f32(0x0.5, 0x0.5) == 0 +; run: %fcmp_uno_f32(0x1.0, 0x1.0) == 0 +; run: %fcmp_uno_f32(-0x1.0, 0x1.0) == 0 +; run: %fcmp_uno_f32(0x1.0, -0x1.0) == 0 +; run: %fcmp_uno_f32(0x0.5, 0x1.0) == 0 +; run: %fcmp_uno_f32(0x1.5, 0x2.9) == 0 +; run: %fcmp_uno_f32(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_uno_f32(0x1.4cccccp0, 0x1.8p0) == 0 +; run: %fcmp_uno_f32(0x1.b33334p0, 0x1.99999ap-2) == 0 +; run: %fcmp_uno_f32(0x1.333334p-1, 0x1.666666p1) == 0 +; run: %fcmp_uno_f32(-0x0.5, -0x1.0) == 0 +; run: %fcmp_uno_f32(-0x1.5, -0x2.9) == 0 +; run: %fcmp_uno_f32(-0x1.1p10, -0x1.333334p-1) == 0 +; run: %fcmp_uno_f32(-0x1.99999ap-2, -0x1.4cccccp0) == 0 +; run: %fcmp_uno_f32(-0x1.8p0, -0x1.b33334p0) == 0 +; run: %fcmp_uno_f32(-0x1.4p1, -0x1.666666p1) == 0 +; run: %fcmp_uno_f32(0x0.5, -0x1.0) == 0 +; run: %fcmp_uno_f32(0x1.b33334p0, -0x1.b33334p0) == 0 ; Zeroes -; run: %fcmp_uno_f32(0x0.0, 0x0.0) == false -; run: %fcmp_uno_f32(-0x0.0, -0x0.0) == false -; run: %fcmp_uno_f32(0x0.0, -0x0.0) == false -; run: %fcmp_uno_f32(-0x0.0, 0x0.0) == false +; run: %fcmp_uno_f32(0x0.0, 0x0.0) == 0 +; run: %fcmp_uno_f32(-0x0.0, -0x0.0) == 0 +; run: %fcmp_uno_f32(0x0.0, -0x0.0) == 0 +; run: %fcmp_uno_f32(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_uno_f32(Inf, Inf) == false -; run: %fcmp_uno_f32(-Inf, -Inf) == false -; run: %fcmp_uno_f32(Inf, -Inf) == false -; run: %fcmp_uno_f32(-Inf, Inf) == false +; run: %fcmp_uno_f32(Inf, Inf) == 0 +; run: %fcmp_uno_f32(-Inf, -Inf) == 0 +; run: %fcmp_uno_f32(Inf, -Inf) == 0 +; run: %fcmp_uno_f32(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_uno_f32(0x0.0, Inf) == false -; run: %fcmp_uno_f32(-0x0.0, Inf) == false -; run: %fcmp_uno_f32(0x0.0, -Inf) == false -; run: %fcmp_uno_f32(-0x0.0, -Inf) == false -; run: %fcmp_uno_f32(Inf, 0x0.0) == false -; run: %fcmp_uno_f32(Inf, -0x0.0) == false -; run: %fcmp_uno_f32(-Inf, 0x0.0) == false -; run: %fcmp_uno_f32(-Inf, -0x0.0) == false +; run: %fcmp_uno_f32(0x0.0, Inf) == 0 +; run: %fcmp_uno_f32(-0x0.0, Inf) == 0 +; run: %fcmp_uno_f32(0x0.0, -Inf) == 0 +; run: %fcmp_uno_f32(-0x0.0, -Inf) == 0 +; run: %fcmp_uno_f32(Inf, 0x0.0) == 0 +; run: %fcmp_uno_f32(Inf, -0x0.0) == 0 +; run: %fcmp_uno_f32(-Inf, 0x0.0) == 0 +; run: %fcmp_uno_f32(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_uno_f32(0x1.0p-23, 0x1.0p-23) == false -; run: %fcmp_uno_f32(0x1.fffffep127, 0x1.fffffep127) == false -; run: %fcmp_uno_f32(0x1.0p-126, 0x1.0p-126) == false -; run: %fcmp_uno_f32(0x1.0p-23, 0x1.fffffep127) == false -; run: %fcmp_uno_f32(0x1.0p-23, 0x1.0p-126) == false -; run: %fcmp_uno_f32(0x1.0p-126, 0x1.fffffep127) == false +; run: %fcmp_uno_f32(0x1.0p-23, 0x1.0p-23) == 0 +; run: %fcmp_uno_f32(0x1.fffffep127, 0x1.fffffep127) == 0 +; run: %fcmp_uno_f32(0x1.0p-126, 0x1.0p-126) == 0 +; run: %fcmp_uno_f32(0x1.0p-23, 0x1.fffffep127) == 0 +; run: %fcmp_uno_f32(0x1.0p-23, 0x1.0p-126) == 0 +; run: %fcmp_uno_f32(0x1.0p-126, 0x1.fffffep127) == 0 ; Subnormals -; run: %fcmp_uno_f32(0x0.800002p-126, -0x0.800002p-126) == false -; run: %fcmp_uno_f32(-0x0.800002p-126, 0x0.800002p-126) == false -; run: %fcmp_uno_f32(0x0.800002p-126, 0x0.0) == false -; run: %fcmp_uno_f32(-0x0.800002p-126, 0x0.0) == false -; run: %fcmp_uno_f32(0x0.800002p-126, -0x0.0) == false -; run: %fcmp_uno_f32(-0x0.800002p-126, -0x0.0) == false -; run: %fcmp_uno_f32(0x0.0, 0x0.800002p-126) == false -; run: %fcmp_uno_f32(0x0.0, -0x0.800002p-126) == false -; run: %fcmp_uno_f32(-0x0.0, 0x0.800002p-126) == false -; run: %fcmp_uno_f32(-0x0.0, -0x0.800002p-126) == false +; run: %fcmp_uno_f32(0x0.800002p-126, -0x0.800002p-126) == 0 +; run: %fcmp_uno_f32(-0x0.800002p-126, 0x0.800002p-126) == 0 +; run: %fcmp_uno_f32(0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_uno_f32(-0x0.800002p-126, 0x0.0) == 0 +; run: %fcmp_uno_f32(0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_uno_f32(-0x0.800002p-126, -0x0.0) == 0 +; run: %fcmp_uno_f32(0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_uno_f32(0x0.0, -0x0.800002p-126) == 0 +; run: %fcmp_uno_f32(-0x0.0, 0x0.800002p-126) == 0 +; run: %fcmp_uno_f32(-0x0.0, -0x0.800002p-126) == 0 ; NaN's -; run: %fcmp_uno_f32(+NaN, +NaN) == true -; run: %fcmp_uno_f32(-NaN, -NaN) == true -; run: %fcmp_uno_f32(+NaN, -NaN) == true -; run: %fcmp_uno_f32(-NaN, +NaN) == true +; run: %fcmp_uno_f32(+NaN, +NaN) == 1 +; run: %fcmp_uno_f32(-NaN, -NaN) == 1 +; run: %fcmp_uno_f32(+NaN, -NaN) == 1 +; run: %fcmp_uno_f32(-NaN, +NaN) == 1 -; run: %fcmp_uno_f32(+NaN, -0x1.0) == true -; run: %fcmp_uno_f32(-NaN, -0x1.0) == true -; run: %fcmp_uno_f32(+NaN, 0x1.0) == true -; run: %fcmp_uno_f32(-NaN, 0x1.0) == true -; run: %fcmp_uno_f32(+NaN, -0x0.0) == true -; run: %fcmp_uno_f32(-NaN, -0x0.0) == true -; run: %fcmp_uno_f32(+NaN, 0x0.0) == true -; run: %fcmp_uno_f32(-NaN, 0x0.0) == true -; run: %fcmp_uno_f32(+NaN, -Inf) == true -; run: %fcmp_uno_f32(-NaN, -Inf) == true -; run: %fcmp_uno_f32(+NaN, Inf) == true -; run: %fcmp_uno_f32(-NaN, Inf) == true -; run: %fcmp_uno_f32(-0x0.0, +NaN) == true -; run: %fcmp_uno_f32(-0x0.0, -NaN) == true -; run: %fcmp_uno_f32(0x0.0, +NaN) == true -; run: %fcmp_uno_f32(0x0.0, -NaN) == true -; run: %fcmp_uno_f32(-Inf, +NaN) == true -; run: %fcmp_uno_f32(-Inf, -NaN) == true -; run: %fcmp_uno_f32(Inf, +NaN) == true -; run: %fcmp_uno_f32(Inf, -NaN) == true +; run: %fcmp_uno_f32(+NaN, -0x1.0) == 1 +; run: %fcmp_uno_f32(-NaN, -0x1.0) == 1 +; run: %fcmp_uno_f32(+NaN, 0x1.0) == 1 +; run: %fcmp_uno_f32(-NaN, 0x1.0) == 1 +; run: %fcmp_uno_f32(+NaN, -0x0.0) == 1 +; run: %fcmp_uno_f32(-NaN, -0x0.0) == 1 +; run: %fcmp_uno_f32(+NaN, 0x0.0) == 1 +; run: %fcmp_uno_f32(-NaN, 0x0.0) == 1 +; run: %fcmp_uno_f32(+NaN, -Inf) == 1 +; run: %fcmp_uno_f32(-NaN, -Inf) == 1 +; run: %fcmp_uno_f32(+NaN, Inf) == 1 +; run: %fcmp_uno_f32(-NaN, Inf) == 1 +; run: %fcmp_uno_f32(-0x0.0, +NaN) == 1 +; run: %fcmp_uno_f32(-0x0.0, -NaN) == 1 +; run: %fcmp_uno_f32(0x0.0, +NaN) == 1 +; run: %fcmp_uno_f32(0x0.0, -NaN) == 1 +; run: %fcmp_uno_f32(-Inf, +NaN) == 1 +; run: %fcmp_uno_f32(-Inf, -NaN) == 1 +; run: %fcmp_uno_f32(Inf, +NaN) == 1 +; run: %fcmp_uno_f32(Inf, -NaN) == 1 -; run: %fcmp_uno_f32(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f32(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f32(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f32(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f32(+NaN:0x1, +NaN) == true -; run: %fcmp_uno_f32(+NaN:0x1, -NaN) == true -; run: %fcmp_uno_f32(-NaN:0x1, -NaN) == true -; run: %fcmp_uno_f32(-NaN:0x1, +NaN) == true +; run: %fcmp_uno_f32(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f32(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f32(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f32(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f32(+NaN:0x1, +NaN) == 1 +; run: %fcmp_uno_f32(+NaN:0x1, -NaN) == 1 +; run: %fcmp_uno_f32(-NaN:0x1, -NaN) == 1 +; run: %fcmp_uno_f32(-NaN:0x1, +NaN) == 1 -; run: %fcmp_uno_f32(+NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_uno_f32(-NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_uno_f32(+NaN:0x80001, -NaN:0x80001) == true -; run: %fcmp_uno_f32(-NaN:0x80001, +NaN:0x80001) == true -; run: %fcmp_uno_f32(+NaN:0x80001, +NaN) == true -; run: %fcmp_uno_f32(+NaN:0x80001, -NaN) == true -; run: %fcmp_uno_f32(-NaN:0x80001, -NaN) == true -; run: %fcmp_uno_f32(-NaN:0x80001, +NaN) == true +; run: %fcmp_uno_f32(+NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_uno_f32(-NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_uno_f32(+NaN:0x80001, -NaN:0x80001) == 1 +; run: %fcmp_uno_f32(-NaN:0x80001, +NaN:0x80001) == 1 +; run: %fcmp_uno_f32(+NaN:0x80001, +NaN) == 1 +; run: %fcmp_uno_f32(+NaN:0x80001, -NaN) == 1 +; run: %fcmp_uno_f32(-NaN:0x80001, -NaN) == 1 +; run: %fcmp_uno_f32(-NaN:0x80001, +NaN) == 1 ; sNaN's -; run: %fcmp_uno_f32(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uno_f32(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uno_f32(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uno_f32(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_uno_f32(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_uno_f32(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_uno_f32(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_uno_f32(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_uno_f32(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_uno_f32(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_uno_f32(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_uno_f32(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_uno_f32(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_uno_f32(+sNaN:0x1, -Inf) == true -; run: %fcmp_uno_f32(-sNaN:0x1, -Inf) == true -; run: %fcmp_uno_f32(+sNaN:0x1, Inf) == true -; run: %fcmp_uno_f32(-sNaN:0x1, Inf) == true -; run: %fcmp_uno_f32(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_uno_f32(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_uno_f32(0x0.0, +sNaN:0x1) == true -; run: %fcmp_uno_f32(0x0.0, -sNaN:0x1) == true -; run: %fcmp_uno_f32(-Inf, +sNaN:0x1) == true -; run: %fcmp_uno_f32(-Inf, -sNaN:0x1) == true -; run: %fcmp_uno_f32(Inf, +sNaN:0x1) == true -; run: %fcmp_uno_f32(Inf, -sNaN:0x1) == true +; run: %fcmp_uno_f32(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, Inf) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, Inf) == 1 +; run: %fcmp_uno_f32(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(Inf, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(Inf, -sNaN:0x1) == 1 -; run: %fcmp_uno_f32(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f32(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f32(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f32(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f32(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uno_f32(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uno_f32(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uno_f32(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_uno_f32(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f32(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f32(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f32(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_uno_f32(+sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_uno_f32(-sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_uno_f32(+sNaN:0x80001, -sNaN:0x80001) == true -; run: %fcmp_uno_f32(-sNaN:0x80001, +sNaN:0x80001) == true -; run: %fcmp_uno_f32(+sNaN:0x80001, +sNaN:0x1) == true -; run: %fcmp_uno_f32(+sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_uno_f32(-sNaN:0x80001, -sNaN:0x1) == true -; run: %fcmp_uno_f32(-sNaN:0x80001, +sNaN:0x1) == true +; run: %fcmp_uno_f32(+sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_uno_f32(-sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_uno_f32(+sNaN:0x80001, -sNaN:0x80001) == 1 +; run: %fcmp_uno_f32(-sNaN:0x80001, +sNaN:0x80001) == 1 +; run: %fcmp_uno_f32(+sNaN:0x80001, +sNaN:0x1) == 1 +; run: %fcmp_uno_f32(+sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-sNaN:0x80001, -sNaN:0x1) == 1 +; run: %fcmp_uno_f32(-sNaN:0x80001, +sNaN:0x1) == 1 -function %fcmp_uno_f64(f64, f64) -> b1 { +function %fcmp_uno_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp uno v0, v1 return v2 } -; run: %fcmp_uno_f64(0x0.5, 0x0.5) == false -; run: %fcmp_uno_f64(0x1.0, 0x1.0) == false -; run: %fcmp_uno_f64(-0x1.0, 0x1.0) == false -; run: %fcmp_uno_f64(0x1.0, -0x1.0) == false -; run: %fcmp_uno_f64(0x0.5, 0x1.0) == false -; run: %fcmp_uno_f64(0x1.5, 0x2.9) == false -; run: %fcmp_uno_f64(0x1.1p10, 0x1.4p1) == false -; run: %fcmp_uno_f64(0x1.4cccccccccccdp0, 0x1.8p0) == false -; run: %fcmp_uno_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == false -; run: %fcmp_uno_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == false -; run: %fcmp_uno_f64(-0x0.5, -0x1.0) == false -; run: %fcmp_uno_f64(-0x1.5, -0x2.9) == false -; run: %fcmp_uno_f64(-0x1.1p10, -0x1.3333333333333p-1) == false -; run: %fcmp_uno_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == false -; run: %fcmp_uno_f64(-0x1.8p0, -0x1.b333333333333p0) == false -; run: %fcmp_uno_f64(-0x1.4p1, -0x1.6666666666666p1) == false -; run: %fcmp_uno_f64(0x0.5, -0x1.0) == false -; run: %fcmp_uno_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == false +; run: %fcmp_uno_f64(0x0.5, 0x0.5) == 0 +; run: %fcmp_uno_f64(0x1.0, 0x1.0) == 0 +; run: %fcmp_uno_f64(-0x1.0, 0x1.0) == 0 +; run: %fcmp_uno_f64(0x1.0, -0x1.0) == 0 +; run: %fcmp_uno_f64(0x0.5, 0x1.0) == 0 +; run: %fcmp_uno_f64(0x1.5, 0x2.9) == 0 +; run: %fcmp_uno_f64(0x1.1p10, 0x1.4p1) == 0 +; run: %fcmp_uno_f64(0x1.4cccccccccccdp0, 0x1.8p0) == 0 +; run: %fcmp_uno_f64(0x1.b333333333333p0, 0x1.999999999999ap-2) == 0 +; run: %fcmp_uno_f64(0x1.3333333333333p-1, 0x1.6666666666666p1) == 0 +; run: %fcmp_uno_f64(-0x0.5, -0x1.0) == 0 +; run: %fcmp_uno_f64(-0x1.5, -0x2.9) == 0 +; run: %fcmp_uno_f64(-0x1.1p10, -0x1.3333333333333p-1) == 0 +; run: %fcmp_uno_f64(-0x1.999999999999ap-2, -0x1.4cccccccccccdp0) == 0 +; run: %fcmp_uno_f64(-0x1.8p0, -0x1.b333333333333p0) == 0 +; run: %fcmp_uno_f64(-0x1.4p1, -0x1.6666666666666p1) == 0 +; run: %fcmp_uno_f64(0x0.5, -0x1.0) == 0 +; run: %fcmp_uno_f64(0x1.b333333333333p0, -0x1.b333333333333p0) == 0 ; Zeroes -; run: %fcmp_uno_f64(0x0.0, 0x0.0) == false -; run: %fcmp_uno_f64(-0x0.0, -0x0.0) == false -; run: %fcmp_uno_f64(0x0.0, -0x0.0) == false -; run: %fcmp_uno_f64(-0x0.0, 0x0.0) == false +; run: %fcmp_uno_f64(0x0.0, 0x0.0) == 0 +; run: %fcmp_uno_f64(-0x0.0, -0x0.0) == 0 +; run: %fcmp_uno_f64(0x0.0, -0x0.0) == 0 +; run: %fcmp_uno_f64(-0x0.0, 0x0.0) == 0 ; Infinities -; run: %fcmp_uno_f64(Inf, Inf) == false -; run: %fcmp_uno_f64(-Inf, -Inf) == false -; run: %fcmp_uno_f64(Inf, -Inf) == false -; run: %fcmp_uno_f64(-Inf, Inf) == false +; run: %fcmp_uno_f64(Inf, Inf) == 0 +; run: %fcmp_uno_f64(-Inf, -Inf) == 0 +; run: %fcmp_uno_f64(Inf, -Inf) == 0 +; run: %fcmp_uno_f64(-Inf, Inf) == 0 ; Inf/Zero -; run: %fcmp_uno_f64(0x0.0, Inf) == false -; run: %fcmp_uno_f64(-0x0.0, Inf) == false -; run: %fcmp_uno_f64(0x0.0, -Inf) == false -; run: %fcmp_uno_f64(-0x0.0, -Inf) == false -; run: %fcmp_uno_f64(Inf, 0x0.0) == false -; run: %fcmp_uno_f64(Inf, -0x0.0) == false -; run: %fcmp_uno_f64(-Inf, 0x0.0) == false -; run: %fcmp_uno_f64(-Inf, -0x0.0) == false +; run: %fcmp_uno_f64(0x0.0, Inf) == 0 +; run: %fcmp_uno_f64(-0x0.0, Inf) == 0 +; run: %fcmp_uno_f64(0x0.0, -Inf) == 0 +; run: %fcmp_uno_f64(-0x0.0, -Inf) == 0 +; run: %fcmp_uno_f64(Inf, 0x0.0) == 0 +; run: %fcmp_uno_f64(Inf, -0x0.0) == 0 +; run: %fcmp_uno_f64(-Inf, 0x0.0) == 0 +; run: %fcmp_uno_f64(-Inf, -0x0.0) == 0 ; Epsilon / Max / Min Positive -; run: %fcmp_uno_f64(0x1.0p-52, 0x1.0p-52) == false -; run: %fcmp_uno_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == false -; run: %fcmp_uno_f64(0x1.0p-1022, 0x1.0p-1022) == false -; run: %fcmp_uno_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == false -; run: %fcmp_uno_f64(0x1.0p-52, 0x1.0p-1022) == false -; run: %fcmp_uno_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == false +; run: %fcmp_uno_f64(0x1.0p-52, 0x1.0p-52) == 0 +; run: %fcmp_uno_f64(0x1.fffffffffffffp1023, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_uno_f64(0x1.0p-1022, 0x1.0p-1022) == 0 +; run: %fcmp_uno_f64(0x1.0p-52, 0x1.fffffffffffffp1023) == 0 +; run: %fcmp_uno_f64(0x1.0p-52, 0x1.0p-1022) == 0 +; run: %fcmp_uno_f64(0x1.0p-1022, 0x1.fffffffffffffp1023) == 0 ; Subnormals -; run: %fcmp_uno_f64(0x0.8p-1022, -0x0.8p-1022) == false -; run: %fcmp_uno_f64(-0x0.8p-1022, 0x0.8p-1022) == false -; run: %fcmp_uno_f64(0x0.8p-1022, 0x0.0) == false -; run: %fcmp_uno_f64(-0x0.8p-1022, 0x0.0) == false -; run: %fcmp_uno_f64(0x0.8p-1022, -0x0.0) == false -; run: %fcmp_uno_f64(-0x0.8p-1022, -0x0.0) == false -; run: %fcmp_uno_f64(0x0.0, 0x0.8p-1022) == false -; run: %fcmp_uno_f64(0x0.0, -0x0.8p-1022) == false -; run: %fcmp_uno_f64(-0x0.0, 0x0.8p-1022) == false -; run: %fcmp_uno_f64(-0x0.0, -0x0.8p-1022) == false +; run: %fcmp_uno_f64(0x0.8p-1022, -0x0.8p-1022) == 0 +; run: %fcmp_uno_f64(-0x0.8p-1022, 0x0.8p-1022) == 0 +; run: %fcmp_uno_f64(0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_uno_f64(-0x0.8p-1022, 0x0.0) == 0 +; run: %fcmp_uno_f64(0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_uno_f64(-0x0.8p-1022, -0x0.0) == 0 +; run: %fcmp_uno_f64(0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_uno_f64(0x0.0, -0x0.8p-1022) == 0 +; run: %fcmp_uno_f64(-0x0.0, 0x0.8p-1022) == 0 +; run: %fcmp_uno_f64(-0x0.0, -0x0.8p-1022) == 0 ; NaN's -; run: %fcmp_uno_f64(+NaN, +NaN) == true -; run: %fcmp_uno_f64(-NaN, -NaN) == true -; run: %fcmp_uno_f64(+NaN, -NaN) == true -; run: %fcmp_uno_f64(-NaN, +NaN) == true +; run: %fcmp_uno_f64(+NaN, +NaN) == 1 +; run: %fcmp_uno_f64(-NaN, -NaN) == 1 +; run: %fcmp_uno_f64(+NaN, -NaN) == 1 +; run: %fcmp_uno_f64(-NaN, +NaN) == 1 -; run: %fcmp_uno_f64(+NaN, -0x1.0) == true -; run: %fcmp_uno_f64(-NaN, -0x1.0) == true -; run: %fcmp_uno_f64(+NaN, 0x1.0) == true -; run: %fcmp_uno_f64(-NaN, 0x1.0) == true -; run: %fcmp_uno_f64(+NaN, -0x0.0) == true -; run: %fcmp_uno_f64(-NaN, -0x0.0) == true -; run: %fcmp_uno_f64(+NaN, 0x0.0) == true -; run: %fcmp_uno_f64(-NaN, 0x0.0) == true -; run: %fcmp_uno_f64(+NaN, -Inf) == true -; run: %fcmp_uno_f64(-NaN, -Inf) == true -; run: %fcmp_uno_f64(+NaN, Inf) == true -; run: %fcmp_uno_f64(-NaN, Inf) == true -; run: %fcmp_uno_f64(-0x0.0, +NaN) == true -; run: %fcmp_uno_f64(-0x0.0, -NaN) == true -; run: %fcmp_uno_f64(0x0.0, +NaN) == true -; run: %fcmp_uno_f64(0x0.0, -NaN) == true -; run: %fcmp_uno_f64(-Inf, +NaN) == true -; run: %fcmp_uno_f64(-Inf, -NaN) == true -; run: %fcmp_uno_f64(Inf, +NaN) == true -; run: %fcmp_uno_f64(Inf, -NaN) == true +; run: %fcmp_uno_f64(+NaN, -0x1.0) == 1 +; run: %fcmp_uno_f64(-NaN, -0x1.0) == 1 +; run: %fcmp_uno_f64(+NaN, 0x1.0) == 1 +; run: %fcmp_uno_f64(-NaN, 0x1.0) == 1 +; run: %fcmp_uno_f64(+NaN, -0x0.0) == 1 +; run: %fcmp_uno_f64(-NaN, -0x0.0) == 1 +; run: %fcmp_uno_f64(+NaN, 0x0.0) == 1 +; run: %fcmp_uno_f64(-NaN, 0x0.0) == 1 +; run: %fcmp_uno_f64(+NaN, -Inf) == 1 +; run: %fcmp_uno_f64(-NaN, -Inf) == 1 +; run: %fcmp_uno_f64(+NaN, Inf) == 1 +; run: %fcmp_uno_f64(-NaN, Inf) == 1 +; run: %fcmp_uno_f64(-0x0.0, +NaN) == 1 +; run: %fcmp_uno_f64(-0x0.0, -NaN) == 1 +; run: %fcmp_uno_f64(0x0.0, +NaN) == 1 +; run: %fcmp_uno_f64(0x0.0, -NaN) == 1 +; run: %fcmp_uno_f64(-Inf, +NaN) == 1 +; run: %fcmp_uno_f64(-Inf, -NaN) == 1 +; run: %fcmp_uno_f64(Inf, +NaN) == 1 +; run: %fcmp_uno_f64(Inf, -NaN) == 1 -; run: %fcmp_uno_f64(+NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f64(-NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f64(+NaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f64(-NaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f64(+NaN:0x1, +NaN) == true -; run: %fcmp_uno_f64(+NaN:0x1, -NaN) == true -; run: %fcmp_uno_f64(-NaN:0x1, -NaN) == true -; run: %fcmp_uno_f64(-NaN:0x1, +NaN) == true +; run: %fcmp_uno_f64(+NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f64(-NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f64(+NaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f64(-NaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f64(+NaN:0x1, +NaN) == 1 +; run: %fcmp_uno_f64(+NaN:0x1, -NaN) == 1 +; run: %fcmp_uno_f64(-NaN:0x1, -NaN) == 1 +; run: %fcmp_uno_f64(-NaN:0x1, +NaN) == 1 -; run: %fcmp_uno_f64(+NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_uno_f64(-NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_uno_f64(+NaN:0x800000000001, -NaN:0x800000000001) == true -; run: %fcmp_uno_f64(-NaN:0x800000000001, +NaN:0x800000000001) == true -; run: %fcmp_uno_f64(+NaN:0x800000000001, +NaN) == true -; run: %fcmp_uno_f64(+NaN:0x800000000001, -NaN) == true -; run: %fcmp_uno_f64(-NaN:0x800000000001, -NaN) == true -; run: %fcmp_uno_f64(-NaN:0x800000000001, +NaN) == true +; run: %fcmp_uno_f64(+NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(-NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(+NaN:0x800000000001, -NaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(-NaN:0x800000000001, +NaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(+NaN:0x800000000001, +NaN) == 1 +; run: %fcmp_uno_f64(+NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_uno_f64(-NaN:0x800000000001, -NaN) == 1 +; run: %fcmp_uno_f64(-NaN:0x800000000001, +NaN) == 1 ; sNaN's -; run: %fcmp_uno_f64(+sNaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uno_f64(-sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uno_f64(+sNaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uno_f64(-sNaN:0x1, +sNaN:0x1) == true +; run: %fcmp_uno_f64(+sNaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, +sNaN:0x1) == 1 -; run: %fcmp_uno_f64(+sNaN:0x1, -0x1.0) == true -; run: %fcmp_uno_f64(-sNaN:0x1, -0x1.0) == true -; run: %fcmp_uno_f64(+sNaN:0x1, 0x1.0) == true -; run: %fcmp_uno_f64(-sNaN:0x1, 0x1.0) == true -; run: %fcmp_uno_f64(+sNaN:0x1, -0x0.0) == true -; run: %fcmp_uno_f64(-sNaN:0x1, -0x0.0) == true -; run: %fcmp_uno_f64(+sNaN:0x1, 0x0.0) == true -; run: %fcmp_uno_f64(-sNaN:0x1, 0x0.0) == true -; run: %fcmp_uno_f64(+sNaN:0x1, -Inf) == true -; run: %fcmp_uno_f64(-sNaN:0x1, -Inf) == true -; run: %fcmp_uno_f64(+sNaN:0x1, Inf) == true -; run: %fcmp_uno_f64(-sNaN:0x1, Inf) == true -; run: %fcmp_uno_f64(-0x0.0, +sNaN:0x1) == true -; run: %fcmp_uno_f64(-0x0.0, -sNaN:0x1) == true -; run: %fcmp_uno_f64(0x0.0, +sNaN:0x1) == true -; run: %fcmp_uno_f64(0x0.0, -sNaN:0x1) == true -; run: %fcmp_uno_f64(-Inf, +sNaN:0x1) == true -; run: %fcmp_uno_f64(-Inf, -sNaN:0x1) == true -; run: %fcmp_uno_f64(Inf, +sNaN:0x1) == true -; run: %fcmp_uno_f64(Inf, -sNaN:0x1) == true +; run: %fcmp_uno_f64(+sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, -0x1.0) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, 0x1.0) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, -0x0.0) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, 0x0.0) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, -Inf) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, -Inf) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, Inf) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, Inf) == 1 +; run: %fcmp_uno_f64(-0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(0x0.0, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(0x0.0, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-Inf, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-Inf, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(Inf, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(Inf, -sNaN:0x1) == 1 -; run: %fcmp_uno_f64(+sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f64(-sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f64(+sNaN:0x1, -NaN:0x1) == true -; run: %fcmp_uno_f64(-sNaN:0x1, +NaN:0x1) == true -; run: %fcmp_uno_f64(+NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uno_f64(-NaN:0x1, -sNaN:0x1) == true -; run: %fcmp_uno_f64(-NaN:0x1, +sNaN:0x1) == true -; run: %fcmp_uno_f64(+NaN:0x1, -sNaN:0x1) == true +; run: %fcmp_uno_f64(+sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f64(+sNaN:0x1, -NaN:0x1) == 1 +; run: %fcmp_uno_f64(-sNaN:0x1, +NaN:0x1) == 1 +; run: %fcmp_uno_f64(+NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-NaN:0x1, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-NaN:0x1, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(+NaN:0x1, -sNaN:0x1) == 1 -; run: %fcmp_uno_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_uno_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_uno_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == true -; run: %fcmp_uno_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == true -; run: %fcmp_uno_f64(+sNaN:0x800000000001, +sNaN:0x1) == true -; run: %fcmp_uno_f64(+sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_uno_f64(-sNaN:0x800000000001, -sNaN:0x1) == true -; run: %fcmp_uno_f64(-sNaN:0x800000000001, +sNaN:0x1) == true +; run: %fcmp_uno_f64(+sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(-sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(+sNaN:0x800000000001, -sNaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(-sNaN:0x800000000001, +sNaN:0x800000000001) == 1 +; run: %fcmp_uno_f64(+sNaN:0x800000000001, +sNaN:0x1) == 1 +; run: %fcmp_uno_f64(+sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-sNaN:0x800000000001, -sNaN:0x1) == 1 +; run: %fcmp_uno_f64(-sNaN:0x800000000001, +sNaN:0x1) == 1 diff --git a/cranelift/filetests/filetests/runtests/fdiv.clif b/cranelift/filetests/filetests/runtests/fdiv.clif index 325648e3a404..01f3404b683a 100644 --- a/cranelift/filetests/filetests/runtests/fdiv.clif +++ b/cranelift/filetests/filetests/runtests/fdiv.clif @@ -64,7 +64,7 @@ function %fdiv_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fdiv v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fdiv_is_nan_f32(0x0.0, -0x0.0) == 1 @@ -148,7 +148,7 @@ function %fdiv_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fdiv v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fdiv_is_nan_f64(0x0.0, -0x0.0) == 1 diff --git a/cranelift/filetests/filetests/runtests/floor.clif b/cranelift/filetests/filetests/runtests/floor.clif index ff3ffe1789df..9be6c6d21e7e 100644 --- a/cranelift/filetests/filetests/runtests/floor.clif +++ b/cranelift/filetests/filetests/runtests/floor.clif @@ -59,7 +59,7 @@ function %floor_is_nan_f32(f32) -> i32 { block0(v0: f32): v1 = floor v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %floor_is_nan_f32(+NaN) == 1 @@ -132,7 +132,7 @@ function %floor_is_nan_f64(f64) -> i32 { block0(v0: f64): v1 = floor v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %floor_is_nan_f64(+NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/fma.clif b/cranelift/filetests/filetests/runtests/fma.clif index f47aa0b11500..7423e57a0f04 100644 --- a/cranelift/filetests/filetests/runtests/fma.clif +++ b/cranelift/filetests/filetests/runtests/fma.clif @@ -58,7 +58,7 @@ function %fma_is_nan_f32(f32, f32, f32) -> i32 { block0(v0: f32, v1: f32, v2: f32): v3 = fma v0, v1, v2 v4 = fcmp ne v3, v3 - v5 = bint.i32 v4 + v5 = uextend.i32 v4 return v5 } ; run: %fma_is_nan_f32(Inf, -Inf, Inf) == 1 @@ -124,7 +124,7 @@ function %fma_is_nan_f64(f64, f64, f64) -> i32 { block0(v0: f64, v1: f64, v2: f64): v3 = fma v0, v1, v2 v4 = fcmp ne v3, v3 - v5 = bint.i32 v4 + v5 = uextend.i32 v4 return v5 } ; run: %fma_is_nan_f64(Inf, -Inf, Inf) == 1 @@ -149,4 +149,4 @@ block0(v0: f32, v1: f32, v2: f32): v4 = fma v0, v1, v3 return v4 } -; run: %fma_load_f32(0x9.0, 0x9.0, 0x9.0) == 0x1.680000p6 \ No newline at end of file +; run: %fma_load_f32(0x9.0, 0x9.0, 0x9.0) == 0x1.680000p6 diff --git a/cranelift/filetests/filetests/runtests/fmax-pseudo.clif b/cranelift/filetests/filetests/runtests/fmax-pseudo.clif index 274992afd7f8..733b4b9f808b 100644 --- a/cranelift/filetests/filetests/runtests/fmax-pseudo.clif +++ b/cranelift/filetests/filetests/runtests/fmax-pseudo.clif @@ -45,7 +45,7 @@ function %fmax_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fmax_pseudo v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmax_is_nan_f32(-NaN, 0x0.0) == 1 @@ -98,7 +98,7 @@ function %fmax_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fmax_pseudo v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmax_is_nan_f64(-NaN, 0x0.0) == 1 diff --git a/cranelift/filetests/filetests/runtests/fmax.clif b/cranelift/filetests/filetests/runtests/fmax.clif index 31de6e052210..050b91208f21 100644 --- a/cranelift/filetests/filetests/runtests/fmax.clif +++ b/cranelift/filetests/filetests/runtests/fmax.clif @@ -37,7 +37,7 @@ function %fmax_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fmax v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmax_is_nan_f32(0x0.0, +NaN) == 1 @@ -89,7 +89,7 @@ function %fmax_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fmax v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmax_is_nan_f64(0x0.0, +NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/fmin-pseudo.clif b/cranelift/filetests/filetests/runtests/fmin-pseudo.clif index fb9b6e8402eb..6d8a0f4018b4 100644 --- a/cranelift/filetests/filetests/runtests/fmin-pseudo.clif +++ b/cranelift/filetests/filetests/runtests/fmin-pseudo.clif @@ -45,7 +45,7 @@ function %fmin_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fmin_pseudo v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmin_is_nan_f32(-NaN, 0x0.0) == 1 @@ -98,7 +98,7 @@ function %fmin_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fmin_pseudo v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmin_is_nan_f64(-NaN, 0x0.0) == 1 diff --git a/cranelift/filetests/filetests/runtests/fmin.clif b/cranelift/filetests/filetests/runtests/fmin.clif index 9693f632d45a..b589c9f1082f 100644 --- a/cranelift/filetests/filetests/runtests/fmin.clif +++ b/cranelift/filetests/filetests/runtests/fmin.clif @@ -37,7 +37,7 @@ function %fmin_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fmin v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmin_is_nan_f32(0x0.0, +NaN) == 1 @@ -89,7 +89,7 @@ function %fmin_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fmin v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmin_is_nan_f64(0x0.0, +NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/fmul.clif b/cranelift/filetests/filetests/runtests/fmul.clif index c7dede8759ad..d423c0fa78f0 100644 --- a/cranelift/filetests/filetests/runtests/fmul.clif +++ b/cranelift/filetests/filetests/runtests/fmul.clif @@ -49,7 +49,7 @@ function %fmul_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fmul v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmul_is_nan_f32(-0x0.0, +Inf) == 1 @@ -114,7 +114,7 @@ function %fmul_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fmul v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fmul_is_nan_f64(-0x0.0, +Inf) == 1 diff --git a/cranelift/filetests/filetests/runtests/fsub.clif b/cranelift/filetests/filetests/runtests/fsub.clif index 0d719e873f8c..0cfb739d69d8 100644 --- a/cranelift/filetests/filetests/runtests/fsub.clif +++ b/cranelift/filetests/filetests/runtests/fsub.clif @@ -51,7 +51,7 @@ function %fsub_is_nan_f32(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fsub v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fsub_is_nan_f32(0x0.0, +NaN) == 1 @@ -116,7 +116,7 @@ function %fsub_is_nan_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fsub v0, v1 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %fsub_is_nan_f64(0x0.0, +NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/i128-bextend.clif b/cranelift/filetests/filetests/runtests/i128-bextend.clif deleted file mode 100644 index c49ac91578be..000000000000 --- a/cranelift/filetests/filetests/runtests/i128-bextend.clif +++ /dev/null @@ -1,46 +0,0 @@ -test interpret -test run -target aarch64 -target s390x -target riscv64 - -function %bextend_b1_b128(b1) -> b128 { -block0(v0: b1): - v1 = bextend.b128 v0 - return v1 -} -; run: %bextend_b1_b128(true) == true -; run: %bextend_b1_b128(false) == false - -function %bextend_b8_b128(b8) -> b128 { -block0(v0: b8): - v1 = bextend.b128 v0 - return v1 -} -; run: %bextend_b8_b128(true) == true -; run: %bextend_b8_b128(false) == false - -function %bextend_b16_b128(b16) -> b128 { -block0(v0: b16): - v1 = bextend.b128 v0 - return v1 -} -; run: %bextend_b16_b128(true) == true -; run: %bextend_b16_b128(false) == false - -function %bextend_b32_b128(b32) -> b128 { -block0(v0: b32): - v1 = bextend.b128 v0 - return v1 -} -; run: %bextend_b32_b128(true) == true -; run: %bextend_b32_b128(false) == false - - -function %bextend_b64_b128(b64) -> b128 { -block0(v0: b64): - v1 = bextend.b128 v0 - return v1 -} -; run: %bextend_b64_b128(true) == true -; run: %bextend_b64_b128(false) == false diff --git a/cranelift/filetests/filetests/runtests/i128-bint.clif b/cranelift/filetests/filetests/runtests/i128-bint.clif deleted file mode 100644 index d16a2822aefd..000000000000 --- a/cranelift/filetests/filetests/runtests/i128-bint.clif +++ /dev/null @@ -1,87 +0,0 @@ -test interpret -test run -set enable_llvm_abi_extensions=true -target aarch64 -target s390x -target x86_64 -target riscv64 - -function %bint_b1_i128_true() -> i128 { -block0: - v0 = bconst.b1 true - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b1_i128_true() == 1 - -function %bint_b1_i128_false() -> i128 { -block0: - v0 = bconst.b1 false - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b1_i128_false() == 0 - -function %bint_b8_i128_true() -> i128 { -block0: - v0 = bconst.b8 true - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b8_i128_true() == 1 - -function %bint_b8_i128_false() -> i128 { -block0: - v0 = bconst.b8 false - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b8_i128_false() == 0 - -function %bint_b16_i128_true() -> i128 { -block0: - v0 = bconst.b16 true - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b16_i128_true() == 1 - -function %bint_b16_i128_false() -> i128 { -block0: - v0 = bconst.b16 false - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b16_i128_false() == 0 - -function %bint_b32_i128_true() -> i128 { -block0: - v0 = bconst.b32 true - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b32_i128_true() == 1 - -function %bint_b32_i128_false() -> i128 { -block0: - v0 = bconst.b32 false - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b32_i128_false() == 0 - -function %bint_b64_i128_true() -> i128 { -block0: - v0 = bconst.b64 true - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b64_i128_true() == 1 - -function %bint_b64_i128_false() -> i128 { -block0: - v0 = bconst.b64 false - v1 = bint.i128 v0 - return v1 -} -; run: %bint_b64_i128_false() == 0 diff --git a/cranelift/filetests/filetests/runtests/i128-bitrev.clif b/cranelift/filetests/filetests/runtests/i128-bitrev.clif index ed07bc259c88..c89fe9b423c1 100644 --- a/cranelift/filetests/filetests/runtests/i128-bitrev.clif +++ b/cranelift/filetests/filetests/runtests/i128-bitrev.clif @@ -5,7 +5,7 @@ target s390x target x86_64 target riscv64 -function %reverse_bits_zero() -> b1 { +function %reverse_bits_zero() -> i8 { block0: v0 = iconst.i64 0 v1 = iconcat v0, v0 @@ -15,7 +15,7 @@ block0: } ; run -function %reverse_bits_one() -> b1 { +function %reverse_bits_one() -> i8 { block0: v0 = iconst.i64 0 v1 = iconst.i64 1 @@ -32,7 +32,7 @@ block0: } ; run -function %reverse_bits() -> b1 { +function %reverse_bits() -> i8 { block0: v0 = iconst.i64 0x06AD_8667_69EC_41BA v1 = iconst.i64 0x6C83_D81A_6E28_83AB diff --git a/cranelift/filetests/filetests/runtests/i128-bmask.clif b/cranelift/filetests/filetests/runtests/i128-bmask.clif index 37d03426e067..9336742b2761 100644 --- a/cranelift/filetests/filetests/runtests/i128-bmask.clif +++ b/cranelift/filetests/filetests/runtests/i128-bmask.clif @@ -4,83 +4,75 @@ target aarch64 target riscv64 target s390x -function %bmask_b128_i128(b128) -> i128 { -block0(v0: b128): +function %bmask_i128_i128(i128) -> i128 { +block0(v0: i128): v1 = bmask.i128 v0 return v1 } -; run: %bmask_b128_i128(true) == -1 -; run: %bmask_b128_i128(false) == 0 +; run: %bmask_i128_i128(1) == -1 +; run: %bmask_i128_i128(0) == 0 -function %bmask_b128_i64(b128) -> i64 { -block0(v0: b128): +function %bmask_i128_i64(i128) -> i64 { +block0(v0: i128): v1 = bmask.i64 v0 return v1 } -; run: %bmask_b128_i64(true) == -1 -; run: %bmask_b128_i64(false) == 0 +; run: %bmask_i128_i64(1) == -1 +; run: %bmask_i128_i64(0) == 0 -function %bmask_b128_i32(b128) -> i32 { -block0(v0: b128): +function %bmask_i128_i32(i128) -> i32 { +block0(v0: i128): v1 = bmask.i32 v0 return v1 } -; run: %bmask_b128_i32(true) == -1 -; run: %bmask_b128_i32(false) == 0 +; run: %bmask_i128_i32(1) == -1 +; run: %bmask_i128_i32(0) == 0 -function %bmask_b128_i16(b128) -> i16 { -block0(v0: b128): +function %bmask_i128_i16(i128) -> i16 { +block0(v0: i128): v1 = bmask.i16 v0 return v1 } -; run: %bmask_b128_i16(true) == -1 -; run: %bmask_b128_i16(false) == 0 +; run: %bmask_i128_i16(1) == -1 +; run: %bmask_i128_i16(0) == 0 -function %bmask_b128_i8(b128) -> i8 { -block0(v0: b128): +function %bmask_i128_i8(i128) -> i8 { +block0(v0: i128): v1 = bmask.i8 v0 return v1 } -; run: %bmask_b128_i8(true) == -1 -; run: %bmask_b128_i8(false) == 0 +; run: %bmask_i128_i8(1) == -1 +; run: %bmask_i128_i8(0) == 0 -function %bmask_b64_i128(b64) -> i128 { -block0(v0: b64): +function %bmask_i64_i128(i64) -> i128 { +block0(v0: i64): v1 = bmask.i128 v0 return v1 } -; run: %bmask_b64_i128(true) == -1 -; run: %bmask_b64_i128(false) == 0 +; run: %bmask_i64_i128(1) == -1 +; run: %bmask_i64_i128(0) == 0 -function %bmask_b32_i128(b32) -> i128 { -block0(v0: b32): +function %bmask_i32_i128(i32) -> i128 { +block0(v0: i32): v1 = bmask.i128 v0 return v1 } -; run: %bmask_b32_i128(true) == -1 -; run: %bmask_b32_i128(false) == 0 +; run: %bmask_i32_i128(1) == -1 +; run: %bmask_i32_i128(0) == 0 -function %bmask_b16_i128(b16) -> i128 { -block0(v0: b16): +function %bmask_i16_i128(i16) -> i128 { +block0(v0: i16): v1 = bmask.i128 v0 return v1 } -; run: %bmask_b16_i128(true) == -1 -; run: %bmask_b16_i128(false) == 0 +; run: %bmask_i16_i128(1) == -1 +; run: %bmask_i16_i128(0) == 0 -function %bmask_b8_i128(b8) -> i128 { -block0(v0: b8): +function %bmask_i8_i128(i8) -> i128 { +block0(v0: i8): v1 = bmask.i128 v0 return v1 } -; run: %bmask_b8_i128(true) == -1 -; run: %bmask_b8_i128(false) == 0 - -function %bmask_b1_i128(b1) -> i128 { -block0(v0: b1): - v1 = bmask.i128 v0 - return v1 -} -; run: %bmask_b1_i128(true) == -1 -; run: %bmask_b1_i128(false) == 0 +; run: %bmask_i8_i128(1) == -1 +; run: %bmask_i8_i128(0) == 0 diff --git a/cranelift/filetests/filetests/runtests/i128-bnot.clif b/cranelift/filetests/filetests/runtests/i128-bnot.clif index 60e693ba9ae3..0031921c37f5 100644 --- a/cranelift/filetests/filetests/runtests/i128-bnot.clif +++ b/cranelift/filetests/filetests/runtests/i128-bnot.clif @@ -2,10 +2,10 @@ test interpret test run target s390x -function %bnot_b128(b128) -> b128 { -block0(v0: b128): - v1 = bnot.b128 v0 +function %bnot_i128(i128) -> i128 { +block0(v0: i128): + v1 = bnot.i128 v0 return v1 } -; run: %bnot_b128(false) == true -; run: %bnot_b128(true) == false +; run: %bnot_i128(0) == -1 +; run: %bnot_i128(1) == -2 diff --git a/cranelift/filetests/filetests/runtests/i128-br.clif b/cranelift/filetests/filetests/runtests/i128-br.clif index 098cf581465a..a73db4f79077 100644 --- a/cranelift/filetests/filetests/runtests/i128-br.clif +++ b/cranelift/filetests/filetests/runtests/i128-br.clif @@ -5,39 +5,39 @@ target s390x target x86_64 target riscv64 -function %i128_brz(i128) -> b1 { +function %i128_brz(i128) -> i8 { block0(v0: i128): brz v0, block2 jump block1 block1: - v1 = bconst.b1 false + v1 = iconst.i8 0 return v1 block2: - v2 = bconst.b1 true + v2 = iconst.i8 1 return v2 } -; run: %i128_brz(0) == true -; run: %i128_brz(-1) == false -; run: %i128_brz(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == false -; run: %i128_brz(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == false +; run: %i128_brz(0) == 1 +; run: %i128_brz(-1) == 0 +; run: %i128_brz(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 0 +; run: %i128_brz(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 0 -function %i128_brnz(i128) -> b1 { +function %i128_brnz(i128) -> i8 { block0(v0: i128): brnz v0, block2 jump block1 block1: - v1 = bconst.b1 false + v1 = iconst.i8 0 return v1 block2: - v2 = bconst.b1 true + v2 = iconst.i8 1 return v2 } -; run: %i128_brnz(0) == false -; run: %i128_brnz(-1) == true -; run: %i128_brnz(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == true -; run: %i128_brnz(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true +; run: %i128_brnz(0) == 0 +; run: %i128_brnz(-1) == 1 +; run: %i128_brnz(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_brnz(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 diff --git a/cranelift/filetests/filetests/runtests/i128-breduce.clif b/cranelift/filetests/filetests/runtests/i128-breduce.clif deleted file mode 100644 index de33346cb11c..000000000000 --- a/cranelift/filetests/filetests/runtests/i128-breduce.clif +++ /dev/null @@ -1,42 +0,0 @@ -test interpret -target riscv64 - -function %breduce_b128_b1(b128) -> b1 { -block0(v0: b128): - v1 = breduce.b1 v0 - return v1 -} -; run: %breduce_b128_b1(true) == true -; run: %breduce_b128_b1(false) == false - -function %breduce_b128_b8(b128) -> b8 { -block0(v0: b128): - v1 = breduce.b8 v0 - return v1 -} -; run: %breduce_b128_b8(true) == true -; run: %breduce_b128_b8(false) == false - -function %breduce_b128_b16(b128) -> b16 { -block0(v0: b128): - v1 = breduce.b16 v0 - return v1 -} -; run: %breduce_b128_b16(true) == true -; run: %breduce_b128_b16(false) == false - -function %breduce_b128_b32(b128) -> b32 { -block0(v0: b128): - v1 = breduce.b32 v0 - return v1 -} -; run: %breduce_b128_b32(true) == true -; run: %breduce_b128_b32(false) == false - -function %breduce_b128_b64(b128) -> b64 { -block0(v0: b128): - v1 = breduce.b64 v0 - return v1 -} -; run: %breduce_b128_b64(true) == true -; run: %breduce_b128_b64(false) == false diff --git a/cranelift/filetests/filetests/runtests/i128-bricmp.clif b/cranelift/filetests/filetests/runtests/i128-bricmp.clif index 14c58e90dd19..38499fe6cf2c 100644 --- a/cranelift/filetests/filetests/runtests/i128-bricmp.clif +++ b/cranelift/filetests/filetests/runtests/i128-bricmp.clif @@ -3,233 +3,233 @@ target aarch64 target riscv64 target s390x -function %i128_bricmp_eq(i128, i128) -> b1 { +function %i128_bricmp_eq(i128, i128) -> i8 { block0(v0: i128, v1: i128): br_icmp.i128 eq v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_eq(0, 0) == true -; run: %i128_bricmp_eq(-1, -1) == true -; run: %i128_bricmp_eq(-1, 0) == false -; run: %i128_bricmp_eq(-1, 0xFFFFFFFF_FFFFFFFF_00000000_00000000) == false -; run: %i128_bricmp_eq(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == false -; run: %i128_bricmp_eq(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == false -; run: %i128_bricmp_eq(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true -; run: %i128_bricmp_eq(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == false -; run: %i128_bricmp_eq(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == false - -function %i128_bricmp_ne(i128, i128) -> b1 { +; run: %i128_bricmp_eq(0, 0) == 1 +; run: %i128_bricmp_eq(-1, -1) == 1 +; run: %i128_bricmp_eq(-1, 0) == 0 +; run: %i128_bricmp_eq(-1, 0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 0 +; run: %i128_bricmp_eq(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == 0 +; run: %i128_bricmp_eq(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == 0 +; run: %i128_bricmp_eq(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 +; run: %i128_bricmp_eq(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 0 +; run: %i128_bricmp_eq(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 0 + +function %i128_bricmp_ne(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 ne v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_ne(0, 0) == false -; run: %i128_bricmp_ne(-1, -1) == false -; run: %i128_bricmp_ne(-1, 0) == true -; run: %i128_bricmp_ne(-1, 0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true -; run: %i128_bricmp_ne(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == true -; run: %i128_bricmp_ne(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == true -; run: %i128_bricmp_ne(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == false -; run: %i128_bricmp_ne(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == true -; run: %i128_bricmp_ne(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == true - - -function %i128_bricmp_slt(i128, i128) -> b1 { +; run: %i128_bricmp_ne(0, 0) == 0 +; run: %i128_bricmp_ne(-1, -1) == 0 +; run: %i128_bricmp_ne(-1, 0) == 1 +; run: %i128_bricmp_ne(-1, 0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 +; run: %i128_bricmp_ne(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == 1 +; run: %i128_bricmp_ne(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == 1 +; run: %i128_bricmp_ne(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 0 +; run: %i128_bricmp_ne(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 1 +; run: %i128_bricmp_ne(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 1 + + +function %i128_bricmp_slt(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 slt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_slt(0, 0) == false -; run: %i128_bricmp_slt(1, 1) == false -; run: %i128_bricmp_slt(0, 1) == true -; run: %i128_bricmp_slt(-1, 0) == true -; run: %i128_bricmp_slt(0, -1) == false -; run: %i128_bricmp_slt(-1, -1) == false -; run: %i128_bricmp_slt(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %i128_bricmp_slt(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %i128_bricmp_slt(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - -function %i128_bricmp_ult(i128, i128) -> b1 { +; run: %i128_bricmp_slt(0, 0) == 0 +; run: %i128_bricmp_slt(1, 1) == 0 +; run: %i128_bricmp_slt(0, 1) == 1 +; run: %i128_bricmp_slt(-1, 0) == 1 +; run: %i128_bricmp_slt(0, -1) == 0 +; run: %i128_bricmp_slt(-1, -1) == 0 +; run: %i128_bricmp_slt(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_bricmp_slt(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %i128_bricmp_slt(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + +function %i128_bricmp_ult(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 ult v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_ult(0, 0) == false -; run: %i128_bricmp_ult(1, 1) == false -; run: %i128_bricmp_ult(0, 1) == true -; run: %i128_bricmp_ult(-1, 0) == false -; run: %i128_bricmp_ult(0, -1) == true -; run: %i128_bricmp_ult(-1, -1) == false -; run: %i128_bricmp_ult(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %i128_bricmp_ult(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %i128_bricmp_ult(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - -function %i128_bricmp_sle(i128, i128) -> b1 { +; run: %i128_bricmp_ult(0, 0) == 0 +; run: %i128_bricmp_ult(1, 1) == 0 +; run: %i128_bricmp_ult(0, 1) == 1 +; run: %i128_bricmp_ult(-1, 0) == 0 +; run: %i128_bricmp_ult(0, -1) == 1 +; run: %i128_bricmp_ult(-1, -1) == 0 +; run: %i128_bricmp_ult(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_bricmp_ult(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %i128_bricmp_ult(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + +function %i128_bricmp_sle(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 sle v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_sle(0, 0) == true -; run: %i128_bricmp_sle(1, 1) == true -; run: %i128_bricmp_sle(0, 1) == true -; run: %i128_bricmp_sle(-1, 0) == true -; run: %i128_bricmp_sle(0, -1) == false -; run: %i128_bricmp_sle(-1, -1) == true -; run: %i128_bricmp_sle(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %i128_bricmp_sle(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %i128_bricmp_sle(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - -function %i128_bricmp_ule(i128, i128) -> b1 { +; run: %i128_bricmp_sle(0, 0) == 1 +; run: %i128_bricmp_sle(1, 1) == 1 +; run: %i128_bricmp_sle(0, 1) == 1 +; run: %i128_bricmp_sle(-1, 0) == 1 +; run: %i128_bricmp_sle(0, -1) == 0 +; run: %i128_bricmp_sle(-1, -1) == 1 +; run: %i128_bricmp_sle(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_bricmp_sle(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %i128_bricmp_sle(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + +function %i128_bricmp_ule(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 ule v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_ule(0, 0) == true -; run: %i128_bricmp_ule(1, 1) == true -; run: %i128_bricmp_ule(0, 1) == true -; run: %i128_bricmp_ule(-1, 0) == false -; run: %i128_bricmp_ule(0, -1) == true -; run: %i128_bricmp_ule(-1, -1) == true -; run: %i128_bricmp_ule(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %i128_bricmp_ule(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %i128_bricmp_ule(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - -function %i128_bricmp_sgt(i128, i128) -> b1 { +; run: %i128_bricmp_ule(0, 0) == 1 +; run: %i128_bricmp_ule(1, 1) == 1 +; run: %i128_bricmp_ule(0, 1) == 1 +; run: %i128_bricmp_ule(-1, 0) == 0 +; run: %i128_bricmp_ule(0, -1) == 1 +; run: %i128_bricmp_ule(-1, -1) == 1 +; run: %i128_bricmp_ule(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_bricmp_ule(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %i128_bricmp_ule(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + +function %i128_bricmp_sgt(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 sgt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_sgt(0, 0) == false -; run: %i128_bricmp_sgt(1, 1) == false -; run: %i128_bricmp_sgt(0, 1) == false -; run: %i128_bricmp_sgt(-1, 0) == false -; run: %i128_bricmp_sgt(0, -1) == true -; run: %i128_bricmp_sgt(-1, -1) == false -; run: %i128_bricmp_sgt(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %i128_bricmp_sgt(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %i128_bricmp_sgt(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true - -function %i128_bricmp_ugt(i128, i128) -> b1 { +; run: %i128_bricmp_sgt(0, 0) == 0 +; run: %i128_bricmp_sgt(1, 1) == 0 +; run: %i128_bricmp_sgt(0, 1) == 0 +; run: %i128_bricmp_sgt(-1, 0) == 0 +; run: %i128_bricmp_sgt(0, -1) == 1 +; run: %i128_bricmp_sgt(-1, -1) == 0 +; run: %i128_bricmp_sgt(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %i128_bricmp_sgt(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %i128_bricmp_sgt(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 + +function %i128_bricmp_ugt(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 ugt v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_ugt(0, 0) == false -; run: %i128_bricmp_ugt(1, 1) == false -; run: %i128_bricmp_ugt(0, 1) == false -; run: %i128_bricmp_ugt(-1, 0) == true -; run: %i128_bricmp_ugt(0, -1) == false -; run: %i128_bricmp_ugt(-1, -1) == false -; run: %i128_bricmp_ugt(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %i128_bricmp_ugt(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %i128_bricmp_ugt(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true - -function %i128_bricmp_sge(i128, i128) -> b1 { +; run: %i128_bricmp_ugt(0, 0) == 0 +; run: %i128_bricmp_ugt(1, 1) == 0 +; run: %i128_bricmp_ugt(0, 1) == 0 +; run: %i128_bricmp_ugt(-1, 0) == 1 +; run: %i128_bricmp_ugt(0, -1) == 0 +; run: %i128_bricmp_ugt(-1, -1) == 0 +; run: %i128_bricmp_ugt(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %i128_bricmp_ugt(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %i128_bricmp_ugt(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 + +function %i128_bricmp_sge(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 sge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_sge(0, 0) == true -; run: %i128_bricmp_sge(1, 1) == true -; run: %i128_bricmp_sge(0, 1) == false -; run: %i128_bricmp_sge(-1, 0) == false -; run: %i128_bricmp_sge(0, -1) == true -; run: %i128_bricmp_sge(-1, -1) == true -; run: %i128_bricmp_sge(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %i128_bricmp_sge(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %i128_bricmp_sge(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true - -function %i128_bricmp_uge(i128, i128) -> b1 { +; run: %i128_bricmp_sge(0, 0) == 1 +; run: %i128_bricmp_sge(1, 1) == 1 +; run: %i128_bricmp_sge(0, 1) == 0 +; run: %i128_bricmp_sge(-1, 0) == 0 +; run: %i128_bricmp_sge(0, -1) == 1 +; run: %i128_bricmp_sge(-1, -1) == 1 +; run: %i128_bricmp_sge(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %i128_bricmp_sge(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %i128_bricmp_sge(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 + +function %i128_bricmp_uge(i128, i128) -> i8 { block0(v0: i128,v1: i128): br_icmp.i128 uge v0, v1, block2 jump block1 block1: - v2 = bconst.b1 false + v2 = iconst.i8 0 return v2 block2: - v3 = bconst.b1 true + v3 = iconst.i8 1 return v3 } -; run: %i128_bricmp_uge(0, 0) == true -; run: %i128_bricmp_uge(1, 1) == true -; run: %i128_bricmp_uge(0, 1) == false -; run: %i128_bricmp_uge(-1, 0) == true -; run: %i128_bricmp_uge(0, -1) == false -; run: %i128_bricmp_uge(-1, -1) == true -; run: %i128_bricmp_uge(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %i128_bricmp_uge(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %i128_bricmp_uge(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true +; run: %i128_bricmp_uge(0, 0) == 1 +; run: %i128_bricmp_uge(1, 1) == 1 +; run: %i128_bricmp_uge(0, 1) == 0 +; run: %i128_bricmp_uge(-1, 0) == 1 +; run: %i128_bricmp_uge(0, -1) == 0 +; run: %i128_bricmp_uge(-1, -1) == 1 +; run: %i128_bricmp_uge(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %i128_bricmp_uge(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %i128_bricmp_uge(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 diff --git a/cranelift/filetests/filetests/runtests/i128-icmp.clif b/cranelift/filetests/filetests/runtests/i128-icmp.clif index 8b8c679d68d1..6469eaeee344 100644 --- a/cranelift/filetests/filetests/runtests/i128-icmp.clif +++ b/cranelift/filetests/filetests/runtests/i128-icmp.clif @@ -6,172 +6,172 @@ target s390x target x86_64 target riscv64 -function %icmp_eq_i128(i128, i128) -> b1 { +function %icmp_eq_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 eq v0, v1 return v2 } -; run: %icmp_eq_i128(0, 0) == true -; run: %icmp_eq_i128(-1, -1) == true -; run: %icmp_eq_i128(-1, 0) == false -; run: %icmp_eq_i128(-1, 0x00000000_00000000_FFFFFFFF_FFFFFFFF) == false -; run: %icmp_eq_i128(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == false -; run: %icmp_eq_i128(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == false -; run: %icmp_eq_i128(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true -; run: %icmp_eq_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == false -; run: %icmp_eq_i128(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == false +; run: %icmp_eq_i128(0, 0) == 1 +; run: %icmp_eq_i128(-1, -1) == 1 +; run: %icmp_eq_i128(-1, 0) == 0 +; run: %icmp_eq_i128(-1, 0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 0 +; run: %icmp_eq_i128(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == 0 +; run: %icmp_eq_i128(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == 0 +; run: %icmp_eq_i128(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 +; run: %icmp_eq_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 0 +; run: %icmp_eq_i128(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 0 ; This is a regression test for aarch64, see: https://github.com/bytecodealliance/wasmtime/issues/4705 -; run: %icmp_eq_i128(36893488147419103231, 0) == false +; run: %icmp_eq_i128(36893488147419103231, 0) == 0 -function %icmp_ne_i128(i128, i128) -> b1 { +function %icmp_ne_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ne v0, v1 return v2 } -; run: %icmp_ne_i128(0, 0) == false -; run: %icmp_ne_i128(-1, -1) == false -; run: %icmp_ne_i128(-1, 0) == true -; run: %icmp_ne_i128(-1, 0x00000000_00000000_FFFFFFFF_FFFFFF) == true -; run: %icmp_ne_i128(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == true -; run: %icmp_ne_i128(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == true -; run: %icmp_ne_i128(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == false -; run: %icmp_ne_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == true -; run: %icmp_ne_i128(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == true +; run: %icmp_ne_i128(0, 0) == 0 +; run: %icmp_ne_i128(-1, -1) == 0 +; run: %icmp_ne_i128(-1, 0) == 1 +; run: %icmp_ne_i128(-1, 0x00000000_00000000_FFFFFFFF_FFFFFF) == 1 +; run: %icmp_ne_i128(0x00000000_00000000_FFFFFFFF_FFFFFFFF, -1) == 1 +; run: %icmp_ne_i128(0xFFFFFFFF_FFFFFFFF_00000000_00000000, -1) == 1 +; run: %icmp_ne_i128(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 0 +; run: %icmp_ne_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 1 +; run: %icmp_ne_i128(0x00000000_00000001_FFFFFFFF_FFFFFFFF, 0x00000000_00000001_00000000_00000001) == 1 -function %icmp_slt_i128(i128, i128) -> b1 { +function %icmp_slt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 slt v0, v1 return v2 } -; run: %icmp_slt_i128(0, 0) == false -; run: %icmp_slt_i128(1, 1) == false -; run: %icmp_slt_i128(0, 1) == true -; run: %icmp_slt_i128(-1, 0) == true -; run: %icmp_slt_i128(0, -1) == false -; run: %icmp_slt_i128(-1, -1) == false -; run: %icmp_slt_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %icmp_slt_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %icmp_slt_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - - -function %icmp_ult_i128(i128, i128) -> b1 { +; run: %icmp_slt_i128(0, 0) == 0 +; run: %icmp_slt_i128(1, 1) == 0 +; run: %icmp_slt_i128(0, 1) == 1 +; run: %icmp_slt_i128(-1, 0) == 1 +; run: %icmp_slt_i128(0, -1) == 0 +; run: %icmp_slt_i128(-1, -1) == 0 +; run: %icmp_slt_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %icmp_slt_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %icmp_slt_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + + +function %icmp_ult_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ult v0, v1 return v2 } -; run: %icmp_ult_i128(0, 0) == false -; run: %icmp_ult_i128(1, 1) == false -; run: %icmp_ult_i128(0, 1) == true -; run: %icmp_ult_i128(-1, 0) == false -; run: %icmp_ult_i128(0, -1) == true -; run: %icmp_ult_i128(-1, -1) == false -; run: %icmp_ult_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %icmp_ult_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %icmp_ult_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - - -function %icmp_sle_i128(i128, i128) -> b1 { +; run: %icmp_ult_i128(0, 0) == 0 +; run: %icmp_ult_i128(1, 1) == 0 +; run: %icmp_ult_i128(0, 1) == 1 +; run: %icmp_ult_i128(-1, 0) == 0 +; run: %icmp_ult_i128(0, -1) == 1 +; run: %icmp_ult_i128(-1, -1) == 0 +; run: %icmp_ult_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %icmp_ult_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %icmp_ult_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + + +function %icmp_sle_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 sle v0, v1 return v2 } -; run: %icmp_sle_i128(0, 0) == true -; run: %icmp_sle_i128(1, 1) == true -; run: %icmp_sle_i128(0, 1) == true -; run: %icmp_sle_i128(-1, 0) == true -; run: %icmp_sle_i128(0, -1) == false -; run: %icmp_sle_i128(-1, -1) == true -; run: %icmp_sle_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %icmp_sle_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %icmp_sle_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - - -function %icmp_ule_i128(i128, i128) -> b1 { +; run: %icmp_sle_i128(0, 0) == 1 +; run: %icmp_sle_i128(1, 1) == 1 +; run: %icmp_sle_i128(0, 1) == 1 +; run: %icmp_sle_i128(-1, 0) == 1 +; run: %icmp_sle_i128(0, -1) == 0 +; run: %icmp_sle_i128(-1, -1) == 1 +; run: %icmp_sle_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %icmp_sle_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %icmp_sle_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + + +function %icmp_ule_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ule v0, v1 return v2 } -; run: %icmp_ule_i128(0, 0) == true -; run: %icmp_ule_i128(1, 1) == true -; run: %icmp_ule_i128(0, 1) == true -; run: %icmp_ule_i128(-1, 0) == false -; run: %icmp_ule_i128(0, -1) == true -; run: %icmp_ule_i128(-1, -1) == true -; run: %icmp_ule_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == true -; run: %icmp_ule_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == true -; run: %icmp_ule_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == false - - -function %icmp_sgt_i128(i128, i128) -> b1 { +; run: %icmp_ule_i128(0, 0) == 1 +; run: %icmp_ule_i128(1, 1) == 1 +; run: %icmp_ule_i128(0, 1) == 1 +; run: %icmp_ule_i128(-1, 0) == 0 +; run: %icmp_ule_i128(0, -1) == 1 +; run: %icmp_ule_i128(-1, -1) == 1 +; run: %icmp_ule_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 1 +; run: %icmp_ule_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 1 +; run: %icmp_ule_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 0 + + +function %icmp_sgt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 sgt v0, v1 return v2 } -; run: %icmp_sgt_i128(0, 0) == false -; run: %icmp_sgt_i128(1, 1) == false -; run: %icmp_sgt_i128(0, 1) == false -; run: %icmp_sgt_i128(-1, 0) == false -; run: %icmp_sgt_i128(0, -1) == true -; run: %icmp_sgt_i128(-1, -1) == false -; run: %icmp_sgt_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %icmp_sgt_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %icmp_sgt_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true - - -function %icmp_ugt_i128(i128, i128) -> b1 { +; run: %icmp_sgt_i128(0, 0) == 0 +; run: %icmp_sgt_i128(1, 1) == 0 +; run: %icmp_sgt_i128(0, 1) == 0 +; run: %icmp_sgt_i128(-1, 0) == 0 +; run: %icmp_sgt_i128(0, -1) == 1 +; run: %icmp_sgt_i128(-1, -1) == 0 +; run: %icmp_sgt_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %icmp_sgt_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %icmp_sgt_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 + + +function %icmp_ugt_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 ugt v0, v1 return v2 } -; run: %icmp_ugt_i128(0, 0) == false -; run: %icmp_ugt_i128(1, 1) == false -; run: %icmp_ugt_i128(0, 1) == false -; run: %icmp_ugt_i128(-1, 0) == true -; run: %icmp_ugt_i128(0, -1) == false -; run: %icmp_ugt_i128(-1, -1) == false -; run: %icmp_ugt_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %icmp_ugt_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %icmp_ugt_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true - - -function %icmp_sge_i128(i128, i128) -> b1 { +; run: %icmp_ugt_i128(0, 0) == 0 +; run: %icmp_ugt_i128(1, 1) == 0 +; run: %icmp_ugt_i128(0, 1) == 0 +; run: %icmp_ugt_i128(-1, 0) == 1 +; run: %icmp_ugt_i128(0, -1) == 0 +; run: %icmp_ugt_i128(-1, -1) == 0 +; run: %icmp_ugt_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %icmp_ugt_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %icmp_ugt_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 + + +function %icmp_sge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 sge v0, v1 return v2 } -; run: %icmp_sge_i128(0, 0) == true -; run: %icmp_sge_i128(1, 1) == true -; run: %icmp_sge_i128(0, 1) == false -; run: %icmp_sge_i128(-1, 0) == false -; run: %icmp_sge_i128(0, -1) == true -; run: %icmp_sge_i128(-1, -1) == true -; run: %icmp_sge_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %icmp_sge_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %icmp_sge_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true - - -function %icmp_uge_i128(i128, i128) -> b1 { +; run: %icmp_sge_i128(0, 0) == 1 +; run: %icmp_sge_i128(1, 1) == 1 +; run: %icmp_sge_i128(0, 1) == 0 +; run: %icmp_sge_i128(-1, 0) == 0 +; run: %icmp_sge_i128(0, -1) == 1 +; run: %icmp_sge_i128(-1, -1) == 1 +; run: %icmp_sge_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %icmp_sge_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %icmp_sge_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 + + +function %icmp_uge_i128(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = icmp.i128 uge v0, v1 return v2 } -; run: %icmp_uge_i128(0, 0) == true -; run: %icmp_uge_i128(1, 1) == true -; run: %icmp_uge_i128(0, 1) == false -; run: %icmp_uge_i128(-1, 0) == true -; run: %icmp_uge_i128(0, -1) == false -; run: %icmp_uge_i128(-1, -1) == true -; run: %icmp_uge_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == false -; run: %icmp_uge_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == false -; run: %icmp_uge_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == true +; run: %icmp_uge_i128(0, 0) == 1 +; run: %icmp_uge_i128(1, 1) == 1 +; run: %icmp_uge_i128(0, 1) == 0 +; run: %icmp_uge_i128(-1, 0) == 1 +; run: %icmp_uge_i128(0, -1) == 0 +; run: %icmp_uge_i128(-1, -1) == 1 +; run: %icmp_uge_i128(0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFD, 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF) == 0 +; run: %icmp_uge_i128(0xC0FFEEEE_C0FFEEEE_00000000_00000000, 0xDECAFFFF_DECAFFFF_00000000_00000000) == 0 +; run: %icmp_uge_i128(0xDECAFFFF_DECAFFFF_00000000_00000000, 0xC0FFEEEE_C0FFEEEE_00000000_00000000) == 1 ; Icmp Imm Tests -function %icmp_imm_eq_i128() -> b1 { +function %icmp_imm_eq_i128() -> i8 { block0: v11 = iconst.i64 0x0 v12 = iconst.i64 0x0 @@ -180,9 +180,9 @@ block0: return v10 } -; run: %icmp_imm_eq_i128() == true +; run: %icmp_imm_eq_i128() == 1 -function %icmp_imm_ne_i128() -> b1 { +function %icmp_imm_ne_i128() -> i8 { block0: v11 = iconst.i64 0x0 v12 = iconst.i64 0x0 @@ -191,4 +191,4 @@ block0: return v10 } -; run: %icmp_imm_ne_i128() == true +; run: %icmp_imm_ne_i128() == 1 diff --git a/cranelift/filetests/filetests/runtests/i128-load-store.clif b/cranelift/filetests/filetests/runtests/i128-load-store.clif index 70607485eef6..a5cd79a58d53 100644 --- a/cranelift/filetests/filetests/runtests/i128-load-store.clif +++ b/cranelift/filetests/filetests/runtests/i128-load-store.clif @@ -7,7 +7,7 @@ target aarch64 target riscv64 target s390x -function %i128_stack_store_load(i128) -> b1 { +function %i128_stack_store_load(i128) -> i8 { ss0 = explicit_slot 16 block0(v0: i128): @@ -17,16 +17,16 @@ block0(v0: i128): v2 = icmp.i128 eq v0, v1 return v2 } -; run: %i128_stack_store_load(0) == true -; run: %i128_stack_store_load(-1) == true -; run: %i128_stack_store_load(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == true -; run: %i128_stack_store_load(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true -; run: %i128_stack_store_load(0xFEDCBA98_76543210_01234567_89ABCDEF) == true -; run: %i128_stack_store_load(0xA00A00A0_0A00A00A_06060606_06060606) == true -; run: %i128_stack_store_load(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true +; run: %i128_stack_store_load(0) == 1 +; run: %i128_stack_store_load(-1) == 1 +; run: %i128_stack_store_load(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_stack_store_load(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 +; run: %i128_stack_store_load(0xFEDCBA98_76543210_01234567_89ABCDEF) == 1 +; run: %i128_stack_store_load(0xA00A00A0_0A00A00A_06060606_06060606) == 1 +; run: %i128_stack_store_load(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 -function %i128_stack_store_load_inst_offset(i128) -> b1 { +function %i128_stack_store_load_inst_offset(i128) -> i8 { ss0 = explicit_slot 16 ss1 = explicit_slot 16 ss2 = explicit_slot 16 @@ -38,18 +38,18 @@ block0(v0: i128): v2 = icmp.i128 eq v0, v1 return v2 } -; run: %i128_stack_store_load_inst_offset(0) == true -; run: %i128_stack_store_load_inst_offset(-1) == true -; run: %i128_stack_store_load_inst_offset(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == true -; run: %i128_stack_store_load_inst_offset(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true -; run: %i128_stack_store_load_inst_offset(0xFEDCBA98_76543210_01234567_89ABCDEF) == true -; run: %i128_stack_store_load_inst_offset(0xA00A00A0_0A00A00A_06060606_06060606) == true -; run: %i128_stack_store_load_inst_offset(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true +; run: %i128_stack_store_load_inst_offset(0) == 1 +; run: %i128_stack_store_load_inst_offset(-1) == 1 +; run: %i128_stack_store_load_inst_offset(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_stack_store_load_inst_offset(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 +; run: %i128_stack_store_load_inst_offset(0xFEDCBA98_76543210_01234567_89ABCDEF) == 1 +; run: %i128_stack_store_load_inst_offset(0xA00A00A0_0A00A00A_06060606_06060606) == 1 +; run: %i128_stack_store_load_inst_offset(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 ; Some arches (aarch64) try to encode the offset into the load/store instructions ; test that we spill if the offset is too large and doesn't fit in the instruction -function %i128_stack_store_load_big_offset(i128) -> b1 { +function %i128_stack_store_load_big_offset(i128) -> i8 { ss0 = explicit_slot 100000 ss1 = explicit_slot 8 @@ -60,17 +60,17 @@ block0(v0: i128): v2 = icmp.i128 eq v0, v1 return v2 } -; run: %i128_stack_store_load_big_offset(0) == true -; run: %i128_stack_store_load_big_offset(-1) == true -; run: %i128_stack_store_load_big_offset(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == true -; run: %i128_stack_store_load_big_offset(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true -; run: %i128_stack_store_load_big_offset(0xFEDCBA98_76543210_01234567_89ABCDEF) == true -; run: %i128_stack_store_load_big_offset(0xA00A00A0_0A00A00A_06060606_06060606) == true -; run: %i128_stack_store_load_big_offset(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true +; run: %i128_stack_store_load_big_offset(0) == 1 +; run: %i128_stack_store_load_big_offset(-1) == 1 +; run: %i128_stack_store_load_big_offset(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_stack_store_load_big_offset(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 +; run: %i128_stack_store_load_big_offset(0xFEDCBA98_76543210_01234567_89ABCDEF) == 1 +; run: %i128_stack_store_load_big_offset(0xA00A00A0_0A00A00A_06060606_06060606) == 1 +; run: %i128_stack_store_load_big_offset(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 -function %i128_store_load(i128) -> b1 { +function %i128_store_load(i128) -> i8 { ss0 = explicit_slot 16 block0(v0: i128): @@ -81,16 +81,16 @@ block0(v0: i128): v3 = icmp.i128 eq v0, v2 return v3 } -; run: %i128_store_load(0) == true -; run: %i128_store_load(-1) == true -; run: %i128_store_load(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == true -; run: %i128_store_load(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true -; run: %i128_store_load(0xFEDCBA98_76543210_01234567_89ABCDEF) == true -; run: %i128_store_load(0xA00A00A0_0A00A00A_06060606_06060606) == true -; run: %i128_store_load(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true +; run: %i128_store_load(0) == 1 +; run: %i128_store_load(-1) == 1 +; run: %i128_store_load(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_store_load(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 +; run: %i128_store_load(0xFEDCBA98_76543210_01234567_89ABCDEF) == 1 +; run: %i128_store_load(0xA00A00A0_0A00A00A_06060606_06060606) == 1 +; run: %i128_store_load(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 -function %i128_store_load_offset(i128) -> b1 { +function %i128_store_load_offset(i128) -> i8 { ss0 = explicit_slot 32 block0(v0: i128): @@ -101,10 +101,10 @@ block0(v0: i128): v3 = icmp.i128 eq v0, v2 return v3 } -; run: %i128_store_load_offset(0) == true -; run: %i128_store_load_offset(-1) == true -; run: %i128_store_load_offset(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == true -; run: %i128_store_load_offset(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == true -; run: %i128_store_load_offset(0xFEDCBA98_76543210_01234567_89ABCDEF) == true -; run: %i128_store_load_offset(0xA00A00A0_0A00A00A_06060606_06060606) == true -; run: %i128_store_load_offset(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == true +; run: %i128_store_load_offset(0) == 1 +; run: %i128_store_load_offset(-1) == 1 +; run: %i128_store_load_offset(0x00000000_00000000_FFFFFFFF_FFFFFFFF) == 1 +; run: %i128_store_load_offset(0xFFFFFFFF_FFFFFFFF_00000000_00000000) == 1 +; run: %i128_store_load_offset(0xFEDCBA98_76543210_01234567_89ABCDEF) == 1 +; run: %i128_store_load_offset(0xA00A00A0_0A00A00A_06060606_06060606) == 1 +; run: %i128_store_load_offset(0xDECAFFFF_C0FFEEEE_C0FFEEEE_DECAFFFF) == 1 diff --git a/cranelift/filetests/filetests/runtests/i128-select.clif b/cranelift/filetests/filetests/runtests/i128-select.clif index 4a049f03beb5..e29cb8119f51 100644 --- a/cranelift/filetests/filetests/runtests/i128-select.clif +++ b/cranelift/filetests/filetests/runtests/i128-select.clif @@ -5,15 +5,15 @@ target s390x target x86_64 target riscv64 -function %i128_select(b1, i128, i128) -> i128 { -block0(v0: b1, v1: i128, v2: i128): +function %i128_select(i8, i128, i128) -> i128 { +block0(v0: i8, v1: i128, v2: i128): v3 = select.i128 v0, v1, v2 return v3 } -; run: %i128_select(true, 0, 1) == 0 -; run: %i128_select(false, 0, 1) == 1 -; run: %i128_select(true, 0x00000000_00000000_DECAFFFF_C0FFEEEE, 0xFFFFFFFF_FFFFFFFF_C0FFEEEE_DECAFFFF) == 0x00000000_00000000_DECAFFFF_C0FFEEEE -; run: %i128_select(false, 0x00000000_00000000_DECAFFFF_C0FFEEEE, 0xFFFFFFFF_FFFFFFFF_C0FFEEEE_DECAFFFF) == 0xFFFFFFFF_FFFFFFFF_C0FFEEEE_DECAFFFF +; run: %i128_select(1, 0, 1) == 0 +; run: %i128_select(0, 0, 1) == 1 +; run: %i128_select(1, 0x00000000_00000000_DECAFFFF_C0FFEEEE, 0xFFFFFFFF_FFFFFFFF_C0FFEEEE_DECAFFFF) == 0x00000000_00000000_DECAFFFF_C0FFEEEE +; run: %i128_select(0, 0x00000000_00000000_DECAFFFF_C0FFEEEE, 0xFFFFFFFF_FFFFFFFF_C0FFEEEE_DECAFFFF) == 0xFFFFFFFF_FFFFFFFF_C0FFEEEE_DECAFFFF ;; Test for issue: https://github.com/bytecodealliance/wasmtime/issues/3963. function %i128_fcmp_eq_select(f32, i128, i128) -> i128 { diff --git a/cranelift/filetests/filetests/runtests/iaddcarry.clif b/cranelift/filetests/filetests/runtests/iaddcarry.clif index 51389ed60c6f..18b061ebfaa4 100644 --- a/cranelift/filetests/filetests/runtests/iaddcarry.clif +++ b/cranelift/filetests/filetests/runtests/iaddcarry.clif @@ -1,97 +1,97 @@ test interpret -function %iaddcarry_i8_v(i8, i8, b1) -> i8 { -block0(v0: i8, v1: i8, v2: b1): +function %iaddcarry_i8_v(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v3 } -; run: %iaddcarry_i8_v(0, 1, true) == 2 -; run: %iaddcarry_i8_v(0, 1, false) == 1 -; run: %iaddcarry_i8_v(100, 27, true) == -128 -; run: %iaddcarry_i8_v(100, 27, false) == 127 -; run: %iaddcarry_i8_v(127, 127, true) == -1 -; run: %iaddcarry_i8_v(127, 127, false) == -2 +; run: %iaddcarry_i8_v(0, 1, 1) == 2 +; run: %iaddcarry_i8_v(0, 1, 0) == 1 +; run: %iaddcarry_i8_v(100, 27, 1) == -128 +; run: %iaddcarry_i8_v(100, 27, 0) == 127 +; run: %iaddcarry_i8_v(127, 127, 1) == -1 +; run: %iaddcarry_i8_v(127, 127, 0) == -2 -function %iaddcarry_i8_c(i8, i8, b1) -> b1 { -block0(v0: i8, v1: i8, v2: b1): +function %iaddcarry_i8_c(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v4 } -; run: %iaddcarry_i8_c(0, 1, true) == false -; run: %iaddcarry_i8_c(0, 1, false) == false -; run: %iaddcarry_i8_c(100, 27, true) == true -; run: %iaddcarry_i8_c(100, 27, false) == false -; run: %iaddcarry_i8_c(127, 127, true) == true -; run: %iaddcarry_i8_c(127, 127, false) == true +; run: %iaddcarry_i8_c(0, 1, 1) == 0 +; run: %iaddcarry_i8_c(0, 1, 0) == 0 +; run: %iaddcarry_i8_c(100, 27, 1) == 1 +; run: %iaddcarry_i8_c(100, 27, 0) == 0 +; run: %iaddcarry_i8_c(127, 127, 1) == 1 +; run: %iaddcarry_i8_c(127, 127, 0) == 1 -function %iaddcarry_i16_v(i16, i16, b1) -> i16 { -block0(v0: i16, v1: i16, v2: b1): +function %iaddcarry_i16_v(i16, i16, i8) -> i16 { +block0(v0: i16, v1: i16, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v3 } -; run: %iaddcarry_i16_v(0, 1, true) == 2 -; run: %iaddcarry_i16_v(0, 1, false) == 1 -; run: %iaddcarry_i16_v(100, 27, true) == 128 -; run: %iaddcarry_i16_v(100, 27, false) == 127 -; run: %iaddcarry_i16_v(32000, 767, true) == -32768 -; run: %iaddcarry_i16_v(32000, 767, false) == 32767 +; run: %iaddcarry_i16_v(0, 1, 1) == 2 +; run: %iaddcarry_i16_v(0, 1, 0) == 1 +; run: %iaddcarry_i16_v(100, 27, 1) == 128 +; run: %iaddcarry_i16_v(100, 27, 0) == 127 +; run: %iaddcarry_i16_v(32000, 767, 1) == -32768 +; run: %iaddcarry_i16_v(32000, 767, 0) == 32767 -function %iaddcarry_i16_c(i16, i16, b1) -> b1 { -block0(v0: i16, v1: i16, v2: b1): +function %iaddcarry_i16_c(i16, i16, i8) -> i8 { +block0(v0: i16, v1: i16, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v4 } -; run: %iaddcarry_i16_c(0, 1, true) == false -; run: %iaddcarry_i16_c(0, 1, false) == false -; run: %iaddcarry_i16_c(100, 27, true) == false -; run: %iaddcarry_i16_c(100, 27, false) == false -; run: %iaddcarry_i16_c(32000, 767, true) == true -; run: %iaddcarry_i16_c(32000, 767, false) == false +; run: %iaddcarry_i16_c(0, 1, 1) == 0 +; run: %iaddcarry_i16_c(0, 1, 0) == 0 +; run: %iaddcarry_i16_c(100, 27, 1) == 0 +; run: %iaddcarry_i16_c(100, 27, 0) == 0 +; run: %iaddcarry_i16_c(32000, 767, 1) == 1 +; run: %iaddcarry_i16_c(32000, 767, 0) == 0 -function %iaddcarry_i32_v(i32, i32, b1) -> i32 { -block0(v0: i32, v1: i32, v2: b1): +function %iaddcarry_i32_v(i32, i32, i8) -> i32 { +block0(v0: i32, v1: i32, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v3 } -; run: %iaddcarry_i32_v(0, 1, true) == 2 -; run: %iaddcarry_i32_v(0, 1, false) == 1 -; run: %iaddcarry_i32_v(100, 27, true) == 128 -; run: %iaddcarry_i32_v(100, 27, false) == 127 -; run: %iaddcarry_i32_v(2000000000, 147483647, true) == -2147483648 -; run: %iaddcarry_i32_v(2000000000, 147483647, false) == 2147483647 +; run: %iaddcarry_i32_v(0, 1, 1) == 2 +; run: %iaddcarry_i32_v(0, 1, 0) == 1 +; run: %iaddcarry_i32_v(100, 27, 1) == 128 +; run: %iaddcarry_i32_v(100, 27, 0) == 127 +; run: %iaddcarry_i32_v(2000000000, 147483647, 1) == -2147483648 +; run: %iaddcarry_i32_v(2000000000, 147483647, 0) == 2147483647 -function %iaddcarry_i32_c(i32, i32, b1) -> b1 { -block0(v0: i32, v1: i32, v2: b1): +function %iaddcarry_i32_c(i32, i32, i8) -> i8 { +block0(v0: i32, v1: i32, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v4 } -; run: %iaddcarry_i32_c(0, 1, true) == false -; run: %iaddcarry_i32_c(0, 1, false) == false -; run: %iaddcarry_i32_c(100, 27, true) == false -; run: %iaddcarry_i32_c(100, 27, false) == false -; run: %iaddcarry_i32_c(2000000000, 147483647, true) == true -; run: %iaddcarry_i32_c(2000000000, 147483647, false) == false +; run: %iaddcarry_i32_c(0, 1, 1) == 0 +; run: %iaddcarry_i32_c(0, 1, 0) == 0 +; run: %iaddcarry_i32_c(100, 27, 1) == 0 +; run: %iaddcarry_i32_c(100, 27, 0) == 0 +; run: %iaddcarry_i32_c(2000000000, 147483647, 1) == 1 +; run: %iaddcarry_i32_c(2000000000, 147483647, 0) == 0 -function %iaddcarry_i64_v(i64, i64, b1) -> i64 { -block0(v0: i64, v1: i64, v2: b1): +function %iaddcarry_i64_v(i64, i64, i8) -> i64 { +block0(v0: i64, v1: i64, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v3 } -; run: %iaddcarry_i64_v(0, 1, true) == 2 -; run: %iaddcarry_i64_v(0, 1, false) == 1 -; run: %iaddcarry_i64_v(100, 27, true) == 128 -; run: %iaddcarry_i64_v(100, 27, false) == 127 -; run: %iaddcarry_i64_v(9000000000000000000, 223372036854775807, true) == -9223372036854775808 -; run: %iaddcarry_i64_v(9000000000000000000, 223372036854775807, false) == 9223372036854775807 +; run: %iaddcarry_i64_v(0, 1, 1) == 2 +; run: %iaddcarry_i64_v(0, 1, 0) == 1 +; run: %iaddcarry_i64_v(100, 27, 1) == 128 +; run: %iaddcarry_i64_v(100, 27, 0) == 127 +; run: %iaddcarry_i64_v(9000000000000000000, 223372036854775807, 1) == -9223372036854775808 +; run: %iaddcarry_i64_v(9000000000000000000, 223372036854775807, 0) == 9223372036854775807 -function %iaddcarry_i64_c(i64, i64, b1) -> b1 { -block0(v0: i64, v1: i64, v2: b1): +function %iaddcarry_i64_c(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): v3, v4 = iadd_carry v0, v1, v2 return v4 } -; run: %iaddcarry_i64_c(0, 1, true) == false -; run: %iaddcarry_i64_c(0, 1, false) == false -; run: %iaddcarry_i64_c(100, 27, true) == false -; run: %iaddcarry_i64_c(100, 27, false) == false -; run: %iaddcarry_i64_c(9000000000000000000, 223372036854775807, true) == true -; run: %iaddcarry_i64_c(9000000000000000000, 223372036854775807, false) == false +; run: %iaddcarry_i64_c(0, 1, 1) == 0 +; run: %iaddcarry_i64_c(0, 1, 0) == 0 +; run: %iaddcarry_i64_c(100, 27, 1) == 0 +; run: %iaddcarry_i64_c(100, 27, 0) == 0 +; run: %iaddcarry_i64_c(9000000000000000000, 223372036854775807, 1) == 1 +; run: %iaddcarry_i64_c(9000000000000000000, 223372036854775807, 0) == 0 diff --git a/cranelift/filetests/filetests/runtests/iaddcin.clif b/cranelift/filetests/filetests/runtests/iaddcin.clif index 8f36ee0d7001..5b185af2a88f 100644 --- a/cranelift/filetests/filetests/runtests/iaddcin.clif +++ b/cranelift/filetests/filetests/runtests/iaddcin.clif @@ -1,48 +1,48 @@ test interpret -function %iaddcin_i8(i8, i8, b1) -> i8 { -block0(v0: i8, v1: i8, v2: b1): +function %iaddcin_i8(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3 = iadd_cin v0, v1, v2 return v3 } -; run: %iaddcin_i8(0, 1, true) == 2 -; run: %iaddcin_i8(0, 1, false) == 1 -; run: %iaddcin_i8(100, 27, true) == -128 -; run: %iaddcin_i8(100, 27, false) == 127 +; run: %iaddcin_i8(0, 1, 1) == 2 +; run: %iaddcin_i8(0, 1, 0) == 1 +; run: %iaddcin_i8(100, 27, 1) == -128 +; run: %iaddcin_i8(100, 27, 0) == 127 -function %iaddcin_i16(i16, i16, b1) -> i16 { -block0(v0: i16, v1: i16, v2: b1): +function %iaddcin_i16(i16, i16, i8) -> i16 { +block0(v0: i16, v1: i16, v2: i8): v3 = iadd_cin v0, v1, v2 return v3 } -; run: %iaddcin_i16(0, 1, true) == 2 -; run: %iaddcin_i16(0, 1, false) == 1 -; run: %iaddcin_i16(100, 27, true) == 128 -; run: %iaddcin_i16(100, 27, false) == 127 -; run: %iaddcin_i16(32000, 767, true) == -32768 -; run: %iaddcin_i16(32000, 767, false) == 32767 +; run: %iaddcin_i16(0, 1, 1) == 2 +; run: %iaddcin_i16(0, 1, 0) == 1 +; run: %iaddcin_i16(100, 27, 1) == 128 +; run: %iaddcin_i16(100, 27, 0) == 127 +; run: %iaddcin_i16(32000, 767, 1) == -32768 +; run: %iaddcin_i16(32000, 767, 0) == 32767 -function %iaddcin_i32(i32, i32, b1) -> i32 { -block0(v0: i32, v1: i32, v2: b1): +function %iaddcin_i32(i32, i32, i8) -> i32 { +block0(v0: i32, v1: i32, v2: i8): v3 = iadd_cin v0, v1, v2 return v3 } -; run: %iaddcin_i32(0, 1, true) == 2 -; run: %iaddcin_i32(0, 1, false) == 1 -; run: %iaddcin_i32(100, 27, true) == 128 -; run: %iaddcin_i32(100, 27, false) == 127 -; run: %iaddcin_i32(2000000000, 147483647, true) == -2147483648 -; run: %iaddcin_i32(2000000000, 147483647, false) == 2147483647 +; run: %iaddcin_i32(0, 1, 1) == 2 +; run: %iaddcin_i32(0, 1, 0) == 1 +; run: %iaddcin_i32(100, 27, 1) == 128 +; run: %iaddcin_i32(100, 27, 0) == 127 +; run: %iaddcin_i32(2000000000, 147483647, 1) == -2147483648 +; run: %iaddcin_i32(2000000000, 147483647, 0) == 2147483647 -function %iaddcin_i64(i64, i64, b1) -> i64 { -block0(v0: i64, v1: i64, v2: b1): +function %iaddcin_i64(i64, i64, i8) -> i64 { +block0(v0: i64, v1: i64, v2: i8): v3 = iadd_cin v0, v1, v2 return v3 } -; run: %iaddcin_i64(0, 1, true) == 2 -; run: %iaddcin_i64(0, 1, false) == 1 -; run: %iaddcin_i64(100, 27, true) == 128 -; run: %iaddcin_i64(100, 27, false) == 127 -; run: %iaddcin_i64(2000000000, 147483647, true) == 2147483648 -; run: %iaddcin_i64(2000000000, 147483647, false) == 2147483647 +; run: %iaddcin_i64(0, 1, 1) == 2 +; run: %iaddcin_i64(0, 1, 0) == 1 +; run: %iaddcin_i64(100, 27, 1) == 128 +; run: %iaddcin_i64(100, 27, 0) == 127 +; run: %iaddcin_i64(2000000000, 147483647, 1) == 2147483648 +; run: %iaddcin_i64(2000000000, 147483647, 0) == 2147483647 diff --git a/cranelift/filetests/filetests/runtests/iaddcout.clif b/cranelift/filetests/filetests/runtests/iaddcout.clif index 6f497b61e51d..4d8ff774b2cd 100644 --- a/cranelift/filetests/filetests/runtests/iaddcout.clif +++ b/cranelift/filetests/filetests/runtests/iaddcout.clif @@ -10,15 +10,15 @@ block0(v0: i8, v1: i8): ; run: %iaddcout_i8_v(100, -20) == 80 ; run: %iaddcout_i8_v(100, 28) == -128 -function %iaddcout_i8_c(i8, i8) -> b1 { +function %iaddcout_i8_c(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2, v3 = iadd_cout v0, v1 return v3 } -; run: %iaddcout_i8_c(0, 1) == false -; run: %iaddcout_i8_c(100, 27) == false -; run: %iaddcout_i8_c(100, -20) == false -; run: %iaddcout_i8_c(100, 28) == true +; run: %iaddcout_i8_c(0, 1) == 0 +; run: %iaddcout_i8_c(100, 27) == 0 +; run: %iaddcout_i8_c(100, -20) == 0 +; run: %iaddcout_i8_c(100, 28) == 1 function %iaddcout_i16_v(i16, i16) -> i16 { block0(v0: i16, v1: i16): @@ -31,16 +31,16 @@ block0(v0: i16, v1: i16): ; run: %iaddcout_i16_v(32000, 767) == 32767 ; run: %iaddcout_i16_v(32000, 768) == -32768 -function %iaddcout_i16_c(i16, i16) -> b1 { +function %iaddcout_i16_c(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2, v3 = iadd_cout v0, v1 return v3 } -; run: %iaddcout_i16_c(0, 1) == false -; run: %iaddcout_i16_c(100, 27) == false -; run: %iaddcout_i16_c(100, 28) == false -; run: %iaddcout_i16_c(32000, 767) == false -; run: %iaddcout_i16_c(32000, 768) == true +; run: %iaddcout_i16_c(0, 1) == 0 +; run: %iaddcout_i16_c(100, 27) == 0 +; run: %iaddcout_i16_c(100, 28) == 0 +; run: %iaddcout_i16_c(32000, 767) == 0 +; run: %iaddcout_i16_c(32000, 768) == 1 function %iaddcout_i32_v(i32, i32) -> i32 { block0(v0: i32, v1: i32): @@ -53,16 +53,16 @@ block0(v0: i32, v1: i32): ; run: %iaddcout_i32_v(2000000000, 147483647) == 2147483647 ; run: %iaddcout_i32_v(2000000000, 147483648) == -2147483648 -function %iaddcout_i32_c(i32, i32) -> b1 { +function %iaddcout_i32_c(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2, v3 = iadd_cout v0, v1 return v3 } -; run: %iaddcout_i32_c(0, 1) == false -; run: %iaddcout_i32_c(100, 27) == false -; run: %iaddcout_i32_c(100, 28) == false -; run: %iaddcout_i32_c(2000000000, 147483647) == false -; run: %iaddcout_i32_c(2000000000, 147483648) == true +; run: %iaddcout_i32_c(0, 1) == 0 +; run: %iaddcout_i32_c(100, 27) == 0 +; run: %iaddcout_i32_c(100, 28) == 0 +; run: %iaddcout_i32_c(2000000000, 147483647) == 0 +; run: %iaddcout_i32_c(2000000000, 147483648) == 1 function %iaddcout_i64_v(i64, i64) -> i64 { block0(v0: i64, v1: i64): @@ -75,13 +75,13 @@ block0(v0: i64, v1: i64): ; run: %iaddcout_i64_v(2000000000, 147483647) == 2147483647 ; run: %iaddcout_i64_v(2000000000, 147483648) == 2147483648 -function %iaddcout_i64_c(i64, i64) -> b1 { +function %iaddcout_i64_c(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2, v3 = iadd_cout v0, v1 return v3 } -; run: %iaddcout_i64_c(0, 1) == false -; run: %iaddcout_i64_c(100, 27) == false -; run: %iaddcout_i64_c(100, 28) == false -; run: %iaddcout_i64_c(2000000000, 147483647) == false -; run: %iaddcout_i64_c(2000000000, 147483648) == false +; run: %iaddcout_i64_c(0, 1) == 0 +; run: %iaddcout_i64_c(100, 27) == 0 +; run: %iaddcout_i64_c(100, 28) == 0 +; run: %iaddcout_i64_c(2000000000, 147483647) == 0 +; run: %iaddcout_i64_c(2000000000, 147483648) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-eq-imm.clif b/cranelift/filetests/filetests/runtests/icmp-eq-imm.clif index 07edcd03c9cd..64795acedb73 100644 --- a/cranelift/filetests/filetests/runtests/icmp-eq-imm.clif +++ b/cranelift/filetests/filetests/runtests/icmp-eq-imm.clif @@ -5,74 +5,74 @@ target x86_64 target s390x target riscv64 -function %icmp_imm_eq_i8(i8) -> b1 { +function %icmp_imm_eq_i8(i8) -> i8 { block0(v0: i8): v1 = icmp_imm eq v0, 0x44 return v1 } -; run: %icmp_imm_eq_i8(0) == false -; run: %icmp_imm_eq_i8(-1) == false -; run: %icmp_imm_eq_i8(0x44) == true +; run: %icmp_imm_eq_i8(0) == 0 +; run: %icmp_imm_eq_i8(-1) == 0 +; run: %icmp_imm_eq_i8(0x44) == 1 -function %icmp_neg_eq_i8(i8) -> b1 { +function %icmp_neg_eq_i8(i8) -> i8 { block0(v0: i8): v1 = icmp_imm eq v0, 0xf4 return v1 } -; run: %icmp_neg_eq_i8(0) == false -; run: %icmp_neg_eq_i8(-1) == false -; run: %icmp_neg_eq_i8(0xf4) == true +; run: %icmp_neg_eq_i8(0) == 0 +; run: %icmp_neg_eq_i8(-1) == 0 +; run: %icmp_neg_eq_i8(0xf4) == 1 -function %icmp_imm_eq_i16(i16) -> b1 { +function %icmp_imm_eq_i16(i16) -> i8 { block0(v0: i16): v1 = icmp_imm eq v0, 0x4444 return v1 } -; run: %icmp_imm_eq_i16(0) == false -; run: %icmp_imm_eq_i16(-1) == false -; run: %icmp_imm_eq_i16(0x4444) == true +; run: %icmp_imm_eq_i16(0) == 0 +; run: %icmp_imm_eq_i16(-1) == 0 +; run: %icmp_imm_eq_i16(0x4444) == 1 -function %icmp_neg_eq_i16(i16) -> b1 { +function %icmp_neg_eq_i16(i16) -> i8 { block0(v0: i16): v1 = icmp_imm eq v0, 0xff44 return v1 } -; run: %icmp_neg_eq_i16(0) == false -; run: %icmp_neg_eq_i16(-1) == false -; run: %icmp_neg_eq_i16(0xff44) == true +; run: %icmp_neg_eq_i16(0) == 0 +; run: %icmp_neg_eq_i16(-1) == 0 +; run: %icmp_neg_eq_i16(0xff44) == 1 -function %icmp_imm_eq_i32(i32) -> b1 { +function %icmp_imm_eq_i32(i32) -> i8 { block0(v0: i32): v1 = icmp_imm eq v0, 0x4444_4444 return v1 } -; run: %icmp_imm_eq_i32(0) == false -; run: %icmp_imm_eq_i32(-1) == false -; run: %icmp_imm_eq_i32(0x4444_4444) == true +; run: %icmp_imm_eq_i32(0) == 0 +; run: %icmp_imm_eq_i32(-1) == 0 +; run: %icmp_imm_eq_i32(0x4444_4444) == 1 -function %icmp_neg_eq_i32(i32) -> b1 { +function %icmp_neg_eq_i32(i32) -> i8 { block0(v0: i32): v1 = icmp_imm eq v0, 0xffff_4444 return v1 } -; run: %icmp_neg_eq_i32(0) == false -; run: %icmp_neg_eq_i32(-1) == false -; run: %icmp_neg_eq_i32(0xffff_4444) == true +; run: %icmp_neg_eq_i32(0) == 0 +; run: %icmp_neg_eq_i32(-1) == 0 +; run: %icmp_neg_eq_i32(0xffff_4444) == 1 -function %icmp_imm_eq_i64(i64) -> b1 { +function %icmp_imm_eq_i64(i64) -> i8 { block0(v0: i64): v1 = icmp_imm eq v0, 0x4444_4444_4444_4444 return v1 } -; run: %icmp_imm_eq_i64(0) == false -; run: %icmp_imm_eq_i64(-1) == false -; run: %icmp_imm_eq_i64(0x4444_4444_4444_4444) == true +; run: %icmp_imm_eq_i64(0) == 0 +; run: %icmp_imm_eq_i64(-1) == 0 +; run: %icmp_imm_eq_i64(0x4444_4444_4444_4444) == 1 -function %icmp_neg_eq_i64(i64) -> b1 { +function %icmp_neg_eq_i64(i64) -> i8 { block0(v0: i64): v1 = icmp_imm eq v0, 0xffff_ffff_4444_4444 return v1 } -; run: %icmp_neg_eq_i64(0) == false -; run: %icmp_neg_eq_i64(-1) == false -; run: %icmp_neg_eq_i64(0xffff_ffff_4444_4444) == true +; run: %icmp_neg_eq_i64(0) == 0 +; run: %icmp_neg_eq_i64(-1) == 0 +; run: %icmp_neg_eq_i64(0xffff_ffff_4444_4444) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-eq.clif b/cranelift/filetests/filetests/runtests/icmp-eq.clif index 1eb3fc36625c..ef8abeefe73d 100644 --- a/cranelift/filetests/filetests/runtests/icmp-eq.clif +++ b/cranelift/filetests/filetests/runtests/icmp-eq.clif @@ -5,38 +5,38 @@ target x86_64 target riscv64 target s390x -function %icmp_eq_i8(i8, i8) -> b1 { +function %icmp_eq_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp eq v0, v1 return v2 } -; run: %icmp_eq_i8(0, 0) == true -; run: %icmp_eq_i8(1, 0) == false -; run: %icmp_eq_i8(-1, -1) == true +; run: %icmp_eq_i8(0, 0) == 1 +; run: %icmp_eq_i8(1, 0) == 0 +; run: %icmp_eq_i8(-1, -1) == 1 -function %icmp_eq_i16(i16, i16) -> b1 { +function %icmp_eq_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp eq v0, v1 return v2 } -; run: %icmp_eq_i16(0, 0) == true -; run: %icmp_eq_i16(1, 0) == false -; run: %icmp_eq_i16(-1, -1) == true +; run: %icmp_eq_i16(0, 0) == 1 +; run: %icmp_eq_i16(1, 0) == 0 +; run: %icmp_eq_i16(-1, -1) == 1 -function %icmp_eq_i32(i32, i32) -> b1 { +function %icmp_eq_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp eq v0, v1 return v2 } -; run: %icmp_eq_i32(0, 0) == true -; run: %icmp_eq_i32(1, 0) == false -; run: %icmp_eq_i32(-1, -1) == true +; run: %icmp_eq_i32(0, 0) == 1 +; run: %icmp_eq_i32(1, 0) == 0 +; run: %icmp_eq_i32(-1, -1) == 1 -function %icmp_eq_i64(i64, i64) -> b1 { +function %icmp_eq_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp eq v0, v1 return v2 } -; run: %icmp_eq_i64(0, 0) == true -; run: %icmp_eq_i64(1, 0) == false -; run: %icmp_eq_i64(-1, -1) == true +; run: %icmp_eq_i64(0, 0) == 1 +; run: %icmp_eq_i64(1, 0) == 0 +; run: %icmp_eq_i64(-1, -1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-ne.clif b/cranelift/filetests/filetests/runtests/icmp-ne.clif index fa643a2371b4..916fcf2be1e1 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ne.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ne.clif @@ -5,38 +5,38 @@ target x86_64 target riscv64 target s390x -function %icmp_ne_i8(i8, i8) -> b1 { +function %icmp_ne_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp ne v0, v1 return v2 } -; run: %icmp_ne_i8(0, 0) == false -; run: %icmp_ne_i8(1, 0) == true -; run: %icmp_ne_i8(-1, -1) == false +; run: %icmp_ne_i8(0, 0) == 0 +; run: %icmp_ne_i8(1, 0) == 1 +; run: %icmp_ne_i8(-1, -1) == 0 -function %icmp_ne_i16(i16, i16) -> b1 { +function %icmp_ne_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp ne v0, v1 return v2 } -; run: %icmp_ne_i16(0, 0) == false -; run: %icmp_ne_i16(1, 0) == true -; run: %icmp_ne_i16(-1, -1) == false +; run: %icmp_ne_i16(0, 0) == 0 +; run: %icmp_ne_i16(1, 0) == 1 +; run: %icmp_ne_i16(-1, -1) == 0 -function %icmp_ne_i32(i32, i32) -> b1 { +function %icmp_ne_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp ne v0, v1 return v2 } -; run: %icmp_ne_i32(0, 0) == false -; run: %icmp_ne_i32(1, 0) == true -; run: %icmp_ne_i32(-1, -1) == false +; run: %icmp_ne_i32(0, 0) == 0 +; run: %icmp_ne_i32(1, 0) == 1 +; run: %icmp_ne_i32(-1, -1) == 0 -function %icmp_ne_i64(i64, i64) -> b1 { +function %icmp_ne_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp ne v0, v1 return v2 } -; run: %icmp_ne_i64(0, 0) == false -; run: %icmp_ne_i64(1, 0) == true -; run: %icmp_ne_i64(-1, -1) == false +; run: %icmp_ne_i64(0, 0) == 0 +; run: %icmp_ne_i64(1, 0) == 1 +; run: %icmp_ne_i64(-1, -1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-sge.clif b/cranelift/filetests/filetests/runtests/icmp-sge.clif index cb8eb2a4f940..f02bf9b2fce9 100644 --- a/cranelift/filetests/filetests/runtests/icmp-sge.clif +++ b/cranelift/filetests/filetests/runtests/icmp-sge.clif @@ -6,50 +6,50 @@ target riscv64 target s390x -function %icmp_sge_i8(i8, i8) -> b1 { +function %icmp_sge_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp sge v0, v1 return v2 } -; run: %icmp_sge_i8(0, 0) == true -; run: %icmp_sge_i8(1, 0) == true -; run: %icmp_sge_i8(-1, -1) == true -; run: %icmp_sge_i8(0, 1) == false -; run: %icmp_sge_i8(-5, -1) == false -; run: %icmp_sge_i8(1, -1) == true +; run: %icmp_sge_i8(0, 0) == 1 +; run: %icmp_sge_i8(1, 0) == 1 +; run: %icmp_sge_i8(-1, -1) == 1 +; run: %icmp_sge_i8(0, 1) == 0 +; run: %icmp_sge_i8(-5, -1) == 0 +; run: %icmp_sge_i8(1, -1) == 1 -function %icmp_sge_i16(i16, i16) -> b1 { +function %icmp_sge_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp sge v0, v1 return v2 } -; run: %icmp_sge_i16(0, 0) == true -; run: %icmp_sge_i16(1, 0) == true -; run: %icmp_sge_i16(-1, -1) == true -; run: %icmp_sge_i16(0, 1) == false -; run: %icmp_sge_i16(-5, -1) == false -; run: %icmp_sge_i16(1, -1) == true +; run: %icmp_sge_i16(0, 0) == 1 +; run: %icmp_sge_i16(1, 0) == 1 +; run: %icmp_sge_i16(-1, -1) == 1 +; run: %icmp_sge_i16(0, 1) == 0 +; run: %icmp_sge_i16(-5, -1) == 0 +; run: %icmp_sge_i16(1, -1) == 1 -function %icmp_sge_i32(i32, i32) -> b1 { +function %icmp_sge_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp sge v0, v1 return v2 } -; run: %icmp_sge_i32(0, 0) == true -; run: %icmp_sge_i32(1, 0) == true -; run: %icmp_sge_i32(-1, -1) == true -; run: %icmp_sge_i32(0, 1) == false -; run: %icmp_sge_i32(-5, -1) == false -; run: %icmp_sge_i32(1, -1) == true +; run: %icmp_sge_i32(0, 0) == 1 +; run: %icmp_sge_i32(1, 0) == 1 +; run: %icmp_sge_i32(-1, -1) == 1 +; run: %icmp_sge_i32(0, 1) == 0 +; run: %icmp_sge_i32(-5, -1) == 0 +; run: %icmp_sge_i32(1, -1) == 1 -function %icmp_sge_i64(i64, i64) -> b1 { +function %icmp_sge_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp sge v0, v1 return v2 } -; run: %icmp_sge_i64(0, 0) == true -; run: %icmp_sge_i64(1, 0) == true -; run: %icmp_sge_i64(-1, -1) == true -; run: %icmp_sge_i64(0, 1) == false -; run: %icmp_sge_i64(-5, -1) == false -; run: %icmp_sge_i64(1, -1) == true +; run: %icmp_sge_i64(0, 0) == 1 +; run: %icmp_sge_i64(1, 0) == 1 +; run: %icmp_sge_i64(-1, -1) == 1 +; run: %icmp_sge_i64(0, 1) == 0 +; run: %icmp_sge_i64(-5, -1) == 0 +; run: %icmp_sge_i64(1, -1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-sgt.clif b/cranelift/filetests/filetests/runtests/icmp-sgt.clif index 4c39f598d901..e52e9c2b19a1 100644 --- a/cranelift/filetests/filetests/runtests/icmp-sgt.clif +++ b/cranelift/filetests/filetests/runtests/icmp-sgt.clif @@ -6,50 +6,50 @@ target riscv64 target s390x -function %icmp_sgt_i8(i8, i8) -> b1 { +function %icmp_sgt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp sgt v0, v1 return v2 } -; run: %icmp_sgt_i8(0, 0) == false -; run: %icmp_sgt_i8(1, 0) == true -; run: %icmp_sgt_i8(-1, -1) == false -; run: %icmp_sgt_i8(0, 1) == false -; run: %icmp_sgt_i8(-5, -1) == false -; run: %icmp_sgt_i8(1, -1) == true +; run: %icmp_sgt_i8(0, 0) == 0 +; run: %icmp_sgt_i8(1, 0) == 1 +; run: %icmp_sgt_i8(-1, -1) == 0 +; run: %icmp_sgt_i8(0, 1) == 0 +; run: %icmp_sgt_i8(-5, -1) == 0 +; run: %icmp_sgt_i8(1, -1) == 1 -function %icmp_sgt_i16(i16, i16) -> b1 { +function %icmp_sgt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp sgt v0, v1 return v2 } -; run: %icmp_sgt_i16(0, 0) == false -; run: %icmp_sgt_i16(1, 0) == true -; run: %icmp_sgt_i16(-1, -1) == false -; run: %icmp_sgt_i16(0, 1) == false -; run: %icmp_sgt_i16(-5, -1) == false -; run: %icmp_sgt_i16(1, -1) == true +; run: %icmp_sgt_i16(0, 0) == 0 +; run: %icmp_sgt_i16(1, 0) == 1 +; run: %icmp_sgt_i16(-1, -1) == 0 +; run: %icmp_sgt_i16(0, 1) == 0 +; run: %icmp_sgt_i16(-5, -1) == 0 +; run: %icmp_sgt_i16(1, -1) == 1 -function %icmp_sgt_i32(i32, i32) -> b1 { +function %icmp_sgt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp sgt v0, v1 return v2 } -; run: %icmp_sgt_i32(0, 0) == false -; run: %icmp_sgt_i32(1, 0) == true -; run: %icmp_sgt_i32(-1, -1) == false -; run: %icmp_sgt_i32(0, 1) == false -; run: %icmp_sgt_i32(-5, -1) == false -; run: %icmp_sgt_i32(1, -1) == true +; run: %icmp_sgt_i32(0, 0) == 0 +; run: %icmp_sgt_i32(1, 0) == 1 +; run: %icmp_sgt_i32(-1, -1) == 0 +; run: %icmp_sgt_i32(0, 1) == 0 +; run: %icmp_sgt_i32(-5, -1) == 0 +; run: %icmp_sgt_i32(1, -1) == 1 -function %icmp_sgt_i64(i64, i64) -> b1 { +function %icmp_sgt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp sgt v0, v1 return v2 } -; run: %icmp_sgt_i64(0, 0) == false -; run: %icmp_sgt_i64(1, 0) == true -; run: %icmp_sgt_i64(-1, -1) == false -; run: %icmp_sgt_i64(0, 1) == false -; run: %icmp_sgt_i64(-5, -1) == false -; run: %icmp_sgt_i64(1, -1) == true +; run: %icmp_sgt_i64(0, 0) == 0 +; run: %icmp_sgt_i64(1, 0) == 1 +; run: %icmp_sgt_i64(-1, -1) == 0 +; run: %icmp_sgt_i64(0, 1) == 0 +; run: %icmp_sgt_i64(-5, -1) == 0 +; run: %icmp_sgt_i64(1, -1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-sle.clif b/cranelift/filetests/filetests/runtests/icmp-sle.clif index 706d815d2791..0a3a2db73d00 100644 --- a/cranelift/filetests/filetests/runtests/icmp-sle.clif +++ b/cranelift/filetests/filetests/runtests/icmp-sle.clif @@ -6,50 +6,50 @@ target riscv64 target s390x -function %icmp_sle_i8(i8, i8) -> b1 { +function %icmp_sle_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp sle v0, v1 return v2 } -; run: %icmp_sle_i8(0, 0) == true -; run: %icmp_sle_i8(1, 0) == false -; run: %icmp_sle_i8(-1, -1) == true -; run: %icmp_sle_i8(0, 1) == true -; run: %icmp_sle_i8(-5, -1) == true -; run: %icmp_sle_i8(1, -1) == false +; run: %icmp_sle_i8(0, 0) == 1 +; run: %icmp_sle_i8(1, 0) == 0 +; run: %icmp_sle_i8(-1, -1) == 1 +; run: %icmp_sle_i8(0, 1) == 1 +; run: %icmp_sle_i8(-5, -1) == 1 +; run: %icmp_sle_i8(1, -1) == 0 -function %icmp_sle_i16(i16, i16) -> b1 { +function %icmp_sle_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp sle v0, v1 return v2 } -; run: %icmp_sle_i16(0, 0) == true -; run: %icmp_sle_i16(1, 0) == false -; run: %icmp_sle_i16(-1, -1) == true -; run: %icmp_sle_i16(0, 1) == true -; run: %icmp_sle_i16(-5, -1) == true -; run: %icmp_sle_i16(1, -1) == false +; run: %icmp_sle_i16(0, 0) == 1 +; run: %icmp_sle_i16(1, 0) == 0 +; run: %icmp_sle_i16(-1, -1) == 1 +; run: %icmp_sle_i16(0, 1) == 1 +; run: %icmp_sle_i16(-5, -1) == 1 +; run: %icmp_sle_i16(1, -1) == 0 -function %icmp_sle_i32(i32, i32) -> b1 { +function %icmp_sle_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp sle v0, v1 return v2 } -; run: %icmp_sle_i32(0, 0) == true -; run: %icmp_sle_i32(1, 0) == false -; run: %icmp_sle_i32(-1, -1) == true -; run: %icmp_sle_i32(0, 1) == true -; run: %icmp_sle_i32(-5, -1) == true -; run: %icmp_sle_i32(1, -1) == false +; run: %icmp_sle_i32(0, 0) == 1 +; run: %icmp_sle_i32(1, 0) == 0 +; run: %icmp_sle_i32(-1, -1) == 1 +; run: %icmp_sle_i32(0, 1) == 1 +; run: %icmp_sle_i32(-5, -1) == 1 +; run: %icmp_sle_i32(1, -1) == 0 -function %icmp_sle_i64(i64, i64) -> b1 { +function %icmp_sle_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp sle v0, v1 return v2 } -; run: %icmp_sle_i64(0, 0) == true -; run: %icmp_sle_i64(1, 0) == false -; run: %icmp_sle_i64(-1, -1) == true -; run: %icmp_sle_i64(0, 1) == true -; run: %icmp_sle_i64(-5, -1) == true -; run: %icmp_sle_i64(1, -1) == false +; run: %icmp_sle_i64(0, 0) == 1 +; run: %icmp_sle_i64(1, 0) == 0 +; run: %icmp_sle_i64(-1, -1) == 1 +; run: %icmp_sle_i64(0, 1) == 1 +; run: %icmp_sle_i64(-5, -1) == 1 +; run: %icmp_sle_i64(1, -1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-slt.clif b/cranelift/filetests/filetests/runtests/icmp-slt.clif index 29c5d5ddc4c1..9333d80ee764 100644 --- a/cranelift/filetests/filetests/runtests/icmp-slt.clif +++ b/cranelift/filetests/filetests/runtests/icmp-slt.clif @@ -5,50 +5,50 @@ target x86_64 target riscv64 target s390x -function %icmp_slt_i8(i8, i8) -> b1 { +function %icmp_slt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp slt v0, v1 return v2 } -; run: %icmp_slt_i8(0, 0) == false -; run: %icmp_slt_i8(1, 0) == false -; run: %icmp_slt_i8(-1, -1) == false -; run: %icmp_slt_i8(0, 1) == true -; run: %icmp_slt_i8(-5, -1) == true -; run: %icmp_slt_i8(1, -1) == false +; run: %icmp_slt_i8(0, 0) == 0 +; run: %icmp_slt_i8(1, 0) == 0 +; run: %icmp_slt_i8(-1, -1) == 0 +; run: %icmp_slt_i8(0, 1) == 1 +; run: %icmp_slt_i8(-5, -1) == 1 +; run: %icmp_slt_i8(1, -1) == 0 -function %icmp_slt_i16(i16, i16) -> b1 { +function %icmp_slt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp slt v0, v1 return v2 } -; run: %icmp_slt_i16(0, 0) == false -; run: %icmp_slt_i16(1, 0) == false -; run: %icmp_slt_i16(-1, -1) == false -; run: %icmp_slt_i16(0, 1) == true -; run: %icmp_slt_i16(-5, -1) == true -; run: %icmp_slt_i16(1, -1) == false +; run: %icmp_slt_i16(0, 0) == 0 +; run: %icmp_slt_i16(1, 0) == 0 +; run: %icmp_slt_i16(-1, -1) == 0 +; run: %icmp_slt_i16(0, 1) == 1 +; run: %icmp_slt_i16(-5, -1) == 1 +; run: %icmp_slt_i16(1, -1) == 0 -function %icmp_slt_i32(i32, i32) -> b1 { +function %icmp_slt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp slt v0, v1 return v2 } -; run: %icmp_slt_i32(0, 0) == false -; run: %icmp_slt_i32(1, 0) == false -; run: %icmp_slt_i32(-1, -1) == false -; run: %icmp_slt_i32(0, 1) == true -; run: %icmp_slt_i32(-5, -1) == true -; run: %icmp_slt_i32(1, -1) == false +; run: %icmp_slt_i32(0, 0) == 0 +; run: %icmp_slt_i32(1, 0) == 0 +; run: %icmp_slt_i32(-1, -1) == 0 +; run: %icmp_slt_i32(0, 1) == 1 +; run: %icmp_slt_i32(-5, -1) == 1 +; run: %icmp_slt_i32(1, -1) == 0 -function %icmp_slt_i64(i64, i64) -> b1 { +function %icmp_slt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp slt v0, v1 return v2 } -; run: %icmp_slt_i64(0, 0) == false -; run: %icmp_slt_i64(1, 0) == false -; run: %icmp_slt_i64(-1, -1) == false -; run: %icmp_slt_i64(0, 1) == true -; run: %icmp_slt_i64(-5, -1) == true -; run: %icmp_slt_i64(1, -1) == false +; run: %icmp_slt_i64(0, 0) == 0 +; run: %icmp_slt_i64(1, 0) == 0 +; run: %icmp_slt_i64(-1, -1) == 0 +; run: %icmp_slt_i64(0, 1) == 1 +; run: %icmp_slt_i64(-5, -1) == 1 +; run: %icmp_slt_i64(1, -1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-uge.clif b/cranelift/filetests/filetests/runtests/icmp-uge.clif index bb59e0f60454..7d373e745f63 100644 --- a/cranelift/filetests/filetests/runtests/icmp-uge.clif +++ b/cranelift/filetests/filetests/runtests/icmp-uge.clif @@ -5,50 +5,50 @@ target x86_64 target riscv64 target s390x -function %icmp_uge_i8(i8, i8) -> b1 { +function %icmp_uge_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp uge v0, v1 return v2 } -; run: %icmp_uge_i8(0, 0) == true -; run: %icmp_uge_i8(1, 0) == true -; run: %icmp_uge_i8(-1, -1) == true -; run: %icmp_uge_i8(0, 1) == false -; run: %icmp_uge_i8(-5, -1) == false -; run: %icmp_uge_i8(1, -1) == false +; run: %icmp_uge_i8(0, 0) == 1 +; run: %icmp_uge_i8(1, 0) == 1 +; run: %icmp_uge_i8(-1, -1) == 1 +; run: %icmp_uge_i8(0, 1) == 0 +; run: %icmp_uge_i8(-5, -1) == 0 +; run: %icmp_uge_i8(1, -1) == 0 -function %icmp_uge_i16(i16, i16) -> b1 { +function %icmp_uge_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp uge v0, v1 return v2 } -; run: %icmp_uge_i16(0, 0) == true -; run: %icmp_uge_i16(1, 0) == true -; run: %icmp_uge_i16(-1, -1) == true -; run: %icmp_uge_i16(0, 1) == false -; run: %icmp_uge_i16(-5, -1) == false -; run: %icmp_uge_i16(1, -1) == false +; run: %icmp_uge_i16(0, 0) == 1 +; run: %icmp_uge_i16(1, 0) == 1 +; run: %icmp_uge_i16(-1, -1) == 1 +; run: %icmp_uge_i16(0, 1) == 0 +; run: %icmp_uge_i16(-5, -1) == 0 +; run: %icmp_uge_i16(1, -1) == 0 -function %icmp_uge_i32(i32, i32) -> b1 { +function %icmp_uge_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp uge v0, v1 return v2 } -; run: %icmp_uge_i32(0, 0) == true -; run: %icmp_uge_i32(1, 0) == true -; run: %icmp_uge_i32(-1, -1) == true -; run: %icmp_uge_i32(0, 1) == false -; run: %icmp_uge_i32(-5, -1) == false -; run: %icmp_uge_i32(1, -1) == false +; run: %icmp_uge_i32(0, 0) == 1 +; run: %icmp_uge_i32(1, 0) == 1 +; run: %icmp_uge_i32(-1, -1) == 1 +; run: %icmp_uge_i32(0, 1) == 0 +; run: %icmp_uge_i32(-5, -1) == 0 +; run: %icmp_uge_i32(1, -1) == 0 -function %icmp_uge_i64(i64, i64) -> b1 { +function %icmp_uge_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp uge v0, v1 return v2 } -; run: %icmp_uge_i64(0, 0) == true -; run: %icmp_uge_i64(1, 0) == true -; run: %icmp_uge_i64(-1, -1) == true -; run: %icmp_uge_i64(0, 1) == false -; run: %icmp_uge_i64(-5, -1) == false -; run: %icmp_uge_i64(1, -1) == false +; run: %icmp_uge_i64(0, 0) == 1 +; run: %icmp_uge_i64(1, 0) == 1 +; run: %icmp_uge_i64(-1, -1) == 1 +; run: %icmp_uge_i64(0, 1) == 0 +; run: %icmp_uge_i64(-5, -1) == 0 +; run: %icmp_uge_i64(1, -1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-ugt.clif b/cranelift/filetests/filetests/runtests/icmp-ugt.clif index 2acad5013da8..29be0ede6dd0 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ugt.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ugt.clif @@ -5,50 +5,50 @@ target s390x target x86_64 target riscv64 -function %icmp_ugt_i8(i8, i8) -> b1 { +function %icmp_ugt_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp ugt v0, v1 return v2 } -; run: %icmp_ugt_i8(0, 0) == false -; run: %icmp_ugt_i8(1, 0) == true -; run: %icmp_ugt_i8(-1, -1) == false -; run: %icmp_ugt_i8(0, 1) == false -; run: %icmp_ugt_i8(-5, -1) == false -; run: %icmp_ugt_i8(1, -1) == false +; run: %icmp_ugt_i8(0, 0) == 0 +; run: %icmp_ugt_i8(1, 0) == 1 +; run: %icmp_ugt_i8(-1, -1) == 0 +; run: %icmp_ugt_i8(0, 1) == 0 +; run: %icmp_ugt_i8(-5, -1) == 0 +; run: %icmp_ugt_i8(1, -1) == 0 -function %icmp_ugt_i16(i16, i16) -> b1 { +function %icmp_ugt_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp ugt v0, v1 return v2 } -; run: %icmp_ugt_i16(0, 0) == false -; run: %icmp_ugt_i16(1, 0) == true -; run: %icmp_ugt_i16(-1, -1) == false -; run: %icmp_ugt_i16(0, 1) == false -; run: %icmp_ugt_i16(-5, -1) == false -; run: %icmp_ugt_i16(1, -1) == false +; run: %icmp_ugt_i16(0, 0) == 0 +; run: %icmp_ugt_i16(1, 0) == 1 +; run: %icmp_ugt_i16(-1, -1) == 0 +; run: %icmp_ugt_i16(0, 1) == 0 +; run: %icmp_ugt_i16(-5, -1) == 0 +; run: %icmp_ugt_i16(1, -1) == 0 -function %icmp_ugt_i32(i32, i32) -> b1 { +function %icmp_ugt_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp ugt v0, v1 return v2 } -; run: %icmp_ugt_i32(0, 0) == false -; run: %icmp_ugt_i32(1, 0) == true -; run: %icmp_ugt_i32(-1, -1) == false -; run: %icmp_ugt_i32(0, 1) == false -; run: %icmp_ugt_i32(-5, -1) == false -; run: %icmp_ugt_i32(1, -1) == false +; run: %icmp_ugt_i32(0, 0) == 0 +; run: %icmp_ugt_i32(1, 0) == 1 +; run: %icmp_ugt_i32(-1, -1) == 0 +; run: %icmp_ugt_i32(0, 1) == 0 +; run: %icmp_ugt_i32(-5, -1) == 0 +; run: %icmp_ugt_i32(1, -1) == 0 -function %icmp_ugt_i64(i64, i64) -> b1 { +function %icmp_ugt_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp ugt v0, v1 return v2 } -; run: %icmp_ugt_i64(0, 0) == false -; run: %icmp_ugt_i64(1, 0) == true -; run: %icmp_ugt_i64(-1, -1) == false -; run: %icmp_ugt_i64(0, 1) == false -; run: %icmp_ugt_i64(-5, -1) == false -; run: %icmp_ugt_i64(1, -1) == false +; run: %icmp_ugt_i64(0, 0) == 0 +; run: %icmp_ugt_i64(1, 0) == 1 +; run: %icmp_ugt_i64(-1, -1) == 0 +; run: %icmp_ugt_i64(0, 1) == 0 +; run: %icmp_ugt_i64(-5, -1) == 0 +; run: %icmp_ugt_i64(1, -1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-ule.clif b/cranelift/filetests/filetests/runtests/icmp-ule.clif index 8bf76573dcde..37d60c9aee73 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ule.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ule.clif @@ -5,50 +5,50 @@ target x86_64 target riscv64 target s390x -function %icmp_ule_i8(i8, i8) -> b1 { +function %icmp_ule_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp ule v0, v1 return v2 } -; run: %icmp_ule_i8(0, 0) == true -; run: %icmp_ule_i8(1, 0) == false -; run: %icmp_ule_i8(-1, -1) == true -; run: %icmp_ule_i8(0, 1) == true -; run: %icmp_ule_i8(-5, -1) == true -; run: %icmp_ule_i8(1, -1) == true +; run: %icmp_ule_i8(0, 0) == 1 +; run: %icmp_ule_i8(1, 0) == 0 +; run: %icmp_ule_i8(-1, -1) == 1 +; run: %icmp_ule_i8(0, 1) == 1 +; run: %icmp_ule_i8(-5, -1) == 1 +; run: %icmp_ule_i8(1, -1) == 1 -function %icmp_ule_i16(i16, i16) -> b1 { +function %icmp_ule_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp ule v0, v1 return v2 } -; run: %icmp_ule_i16(0, 0) == true -; run: %icmp_ule_i16(1, 0) == false -; run: %icmp_ule_i16(-1, -1) == true -; run: %icmp_ule_i16(0, 1) == true -; run: %icmp_ule_i16(-5, -1) == true -; run: %icmp_ule_i16(1, -1) == true +; run: %icmp_ule_i16(0, 0) == 1 +; run: %icmp_ule_i16(1, 0) == 0 +; run: %icmp_ule_i16(-1, -1) == 1 +; run: %icmp_ule_i16(0, 1) == 1 +; run: %icmp_ule_i16(-5, -1) == 1 +; run: %icmp_ule_i16(1, -1) == 1 -function %icmp_ule_i32(i32, i32) -> b1 { +function %icmp_ule_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp ule v0, v1 return v2 } -; run: %icmp_ule_i32(0, 0) == true -; run: %icmp_ule_i32(1, 0) == false -; run: %icmp_ule_i32(-1, -1) == true -; run: %icmp_ule_i32(0, 1) == true -; run: %icmp_ule_i32(-5, -1) == true -; run: %icmp_ule_i32(1, -1) == true +; run: %icmp_ule_i32(0, 0) == 1 +; run: %icmp_ule_i32(1, 0) == 0 +; run: %icmp_ule_i32(-1, -1) == 1 +; run: %icmp_ule_i32(0, 1) == 1 +; run: %icmp_ule_i32(-5, -1) == 1 +; run: %icmp_ule_i32(1, -1) == 1 -function %icmp_ule_i64(i64, i64) -> b1 { +function %icmp_ule_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp ule v0, v1 return v2 } -; run: %icmp_ule_i64(0, 0) == true -; run: %icmp_ule_i64(1, 0) == false -; run: %icmp_ule_i64(-1, -1) == true -; run: %icmp_ule_i64(0, 1) == true -; run: %icmp_ule_i64(-5, -1) == true -; run: %icmp_ule_i64(1, -1) == true +; run: %icmp_ule_i64(0, 0) == 1 +; run: %icmp_ule_i64(1, 0) == 0 +; run: %icmp_ule_i64(-1, -1) == 1 +; run: %icmp_ule_i64(0, 1) == 1 +; run: %icmp_ule_i64(-5, -1) == 1 +; run: %icmp_ule_i64(1, -1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-ult.clif b/cranelift/filetests/filetests/runtests/icmp-ult.clif index e201814f4e7b..a7c04e30e4ad 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ult.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ult.clif @@ -4,50 +4,50 @@ target aarch64 target x86_64 target s390x -function %icmp_ult_i8(i8, i8) -> b1 { +function %icmp_ult_i8(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = icmp ult v0, v1 return v2 } -; run: %icmp_ult_i8(0, 0) == false -; run: %icmp_ult_i8(1, 0) == false -; run: %icmp_ult_i8(-1, -1) == false -; run: %icmp_ult_i8(0, 1) == true -; run: %icmp_ult_i8(-5, -1) == true -; run: %icmp_ult_i8(1, -1) == true +; run: %icmp_ult_i8(0, 0) == 0 +; run: %icmp_ult_i8(1, 0) == 0 +; run: %icmp_ult_i8(-1, -1) == 0 +; run: %icmp_ult_i8(0, 1) == 1 +; run: %icmp_ult_i8(-5, -1) == 1 +; run: %icmp_ult_i8(1, -1) == 1 -function %icmp_ult_i16(i16, i16) -> b1 { +function %icmp_ult_i16(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = icmp ult v0, v1 return v2 } -; run: %icmp_ult_i16(0, 0) == false -; run: %icmp_ult_i16(1, 0) == false -; run: %icmp_ult_i16(-1, -1) == false -; run: %icmp_ult_i16(0, 1) == true -; run: %icmp_ult_i16(-5, -1) == true -; run: %icmp_ult_i16(1, -1) == true +; run: %icmp_ult_i16(0, 0) == 0 +; run: %icmp_ult_i16(1, 0) == 0 +; run: %icmp_ult_i16(-1, -1) == 0 +; run: %icmp_ult_i16(0, 1) == 1 +; run: %icmp_ult_i16(-5, -1) == 1 +; run: %icmp_ult_i16(1, -1) == 1 -function %icmp_ult_i32(i32, i32) -> b1 { +function %icmp_ult_i32(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = icmp ult v0, v1 return v2 } -; run: %icmp_ult_i32(0, 0) == false -; run: %icmp_ult_i32(1, 0) == false -; run: %icmp_ult_i32(-1, -1) == false -; run: %icmp_ult_i32(0, 1) == true -; run: %icmp_ult_i32(-5, -1) == true -; run: %icmp_ult_i32(1, -1) == true +; run: %icmp_ult_i32(0, 0) == 0 +; run: %icmp_ult_i32(1, 0) == 0 +; run: %icmp_ult_i32(-1, -1) == 0 +; run: %icmp_ult_i32(0, 1) == 1 +; run: %icmp_ult_i32(-5, -1) == 1 +; run: %icmp_ult_i32(1, -1) == 1 -function %icmp_ult_i64(i64, i64) -> b1 { +function %icmp_ult_i64(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = icmp ult v0, v1 return v2 } -; run: %icmp_ult_i64(0, 0) == false -; run: %icmp_ult_i64(1, 0) == false -; run: %icmp_ult_i64(-1, -1) == false -; run: %icmp_ult_i64(0, 1) == true -; run: %icmp_ult_i64(-5, -1) == true -; run: %icmp_ult_i64(1, -1) == true +; run: %icmp_ult_i64(0, 0) == 0 +; run: %icmp_ult_i64(1, 0) == 0 +; run: %icmp_ult_i64(-1, -1) == 0 +; run: %icmp_ult_i64(0, 1) == 1 +; run: %icmp_ult_i64(-5, -1) == 1 +; run: %icmp_ult_i64(1, -1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp.clif b/cranelift/filetests/filetests/runtests/icmp.clif index 37ed9f61fbd6..3ed5576d8c44 100644 --- a/cranelift/filetests/filetests/runtests/icmp.clif +++ b/cranelift/filetests/filetests/runtests/icmp.clif @@ -9,11 +9,11 @@ target riscv64 ; We were not correctly handling the fact that the rhs constant value ; overflows its type when viewed as a signed value, and thus encoding the wrong ; value into the resulting instruction. -function %overflow_rhs_const(i8) -> b1 { +function %overflow_rhs_const(i8) -> i8 { block0(v0: i8): v1 = iconst.i8 192 v2 = icmp sge v0, v1 return v2 } -; run: %overflow_rhs_const(49) == true -; run: %overflow_rhs_const(-65) == false +; run: %overflow_rhs_const(49) == 1 +; run: %overflow_rhs_const(-65) == 0 diff --git a/cranelift/filetests/filetests/runtests/isubbin.clif b/cranelift/filetests/filetests/runtests/isubbin.clif index 304d118d6a54..53ebcf116a92 100644 --- a/cranelift/filetests/filetests/runtests/isubbin.clif +++ b/cranelift/filetests/filetests/runtests/isubbin.clif @@ -1,49 +1,49 @@ test interpret -function %isubbin_i8(i8, i8, b1) -> i8 { -block0(v0: i8, v1: i8, v2: b1): +function %isubbin_i8(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3 = isub_bin v0, v1, v2 return v3 } -; run: %isubbin_i8(0, 1, true) == -2 -; run: %isubbin_i8(0, 1, false) == -1 -; run: %isubbin_i8(100, 20, true) == 79 -; run: %isubbin_i8(100, 20, false) == 80 -; run: %isubbin_i8(-128, 1, true) == 126 -; run: %isubbin_i8(-128, 1, false) == 127 +; run: %isubbin_i8(0, 1, 1) == -2 +; run: %isubbin_i8(0, 1, 0) == -1 +; run: %isubbin_i8(100, 20, 1) == 79 +; run: %isubbin_i8(100, 20, 0) == 80 +; run: %isubbin_i8(-128, 1, 1) == 126 +; run: %isubbin_i8(-128, 1, 0) == 127 -function %isubbin_i16(i16, i16, b1) -> i16 { -block0(v0: i16, v1: i16, v2: b1): +function %isubbin_i16(i16, i16, i8) -> i16 { +block0(v0: i16, v1: i16, v2: i8): v3 = isub_bin v0, v1, v2 return v3 } -; run: %isubbin_i16(0, 1, true) == -2 -; run: %isubbin_i16(0, 1, false) == -1 -; run: %isubbin_i16(100, 20, true) == 79 -; run: %isubbin_i16(100, 20, false) == 80 -; run: %isubbin_i16(-32768, 1, true) == 32766 -; run: %isubbin_i16(-32768, 1, false) == 32767 +; run: %isubbin_i16(0, 1, 1) == -2 +; run: %isubbin_i16(0, 1, 0) == -1 +; run: %isubbin_i16(100, 20, 1) == 79 +; run: %isubbin_i16(100, 20, 0) == 80 +; run: %isubbin_i16(-32768, 1, 1) == 32766 +; run: %isubbin_i16(-32768, 1, 0) == 32767 -function %isubbin_i32(i32, i32, b1) -> i32 { -block0(v0: i32, v1: i32, v2: b1): +function %isubbin_i32(i32, i32, i8) -> i32 { +block0(v0: i32, v1: i32, v2: i8): v3 = isub_bin v0, v1, v2 return v3 } -; run: %isubbin_i32(0, 1, true) == -2 -; run: %isubbin_i32(0, 1, false) == -1 -; run: %isubbin_i32(100, 20, true) == 79 -; run: %isubbin_i32(100, 20, false) == 80 -; run: %isubbin_i32(-2147483648, 1, true) == 2147483646 -; run: %isubbin_i32(-2147483648, 1, false) == 2147483647 +; run: %isubbin_i32(0, 1, 1) == -2 +; run: %isubbin_i32(0, 1, 0) == -1 +; run: %isubbin_i32(100, 20, 1) == 79 +; run: %isubbin_i32(100, 20, 0) == 80 +; run: %isubbin_i32(-2147483648, 1, 1) == 2147483646 +; run: %isubbin_i32(-2147483648, 1, 0) == 2147483647 -function %isubbin_i64(i64, i64, b1) -> i64 { -block0(v0: i64, v1: i64, v2: b1): +function %isubbin_i64(i64, i64, i8) -> i64 { +block0(v0: i64, v1: i64, v2: i8): v3 = isub_bin v0, v1, v2 return v3 } -; run: %isubbin_i64(0, 1, true) == -2 -; run: %isubbin_i64(0, 1, false) == -1 -; run: %isubbin_i64(100, 20, true) == 79 -; run: %isubbin_i64(100, 20, false) == 80 -; run: %isubbin_i64(-2147483648, 1, true) == -2147483650 -; run: %isubbin_i64(-2147483648, 1, false) == -2147483649 \ No newline at end of file +; run: %isubbin_i64(0, 1, 1) == -2 +; run: %isubbin_i64(0, 1, 0) == -1 +; run: %isubbin_i64(100, 20, 1) == 79 +; run: %isubbin_i64(100, 20, 0) == 80 +; run: %isubbin_i64(-2147483648, 1, 1) == -2147483650 +; run: %isubbin_i64(-2147483648, 1, 0) == -2147483649 diff --git a/cranelift/filetests/filetests/runtests/isubborrow.clif b/cranelift/filetests/filetests/runtests/isubborrow.clif index cf1f2fd5a348..90dd04c53bcc 100644 --- a/cranelift/filetests/filetests/runtests/isubborrow.clif +++ b/cranelift/filetests/filetests/runtests/isubborrow.clif @@ -1,98 +1,98 @@ test interpret -function %isubborrow_i8_v(i8, i8, b1) -> i8 { -block0(v0: i8, v1: i8, v2: b1): +function %isubborrow_i8_v(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v3 } -; run: %isubborrow_i8_v(0, 1, true) == -2 -; run: %isubborrow_i8_v(0, 1, false) == -1 -; run: %isubborrow_i8_v(100, 20, true) == 79 -; run: %isubborrow_i8_v(100, 20, false) == 80 -; run: %isubborrow_i8_v(127, 127, true) == -1 -; run: %isubborrow_i8_v(127, 127, false) == 0 +; run: %isubborrow_i8_v(0, 1, 1) == -2 +; run: %isubborrow_i8_v(0, 1, 0) == -1 +; run: %isubborrow_i8_v(100, 20, 1) == 79 +; run: %isubborrow_i8_v(100, 20, 0) == 80 +; run: %isubborrow_i8_v(127, 127, 1) == -1 +; run: %isubborrow_i8_v(127, 127, 0) == 0 -function %isubborrow_i8_c(i8, i8, b1) -> b1 { -block0(v0: i8, v1: i8, v2: b1): +function %isubborrow_i8_c(i8, i8, i8) -> i8 { +block0(v0: i8, v1: i8, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v4 } -; run: %isubborrow_i8_c(0, 1, true) == true -; run: %isubborrow_i8_c(0, 1, false) == true -; run: %isubborrow_i8_c(100, 20, true) == false -; run: %isubborrow_i8_c(100, 20, false) == false -; run: %isubborrow_i8_c(127, 127, true) == false -; run: %isubborrow_i8_c(127, 127, false) == false +; run: %isubborrow_i8_c(0, 1, 1) == 1 +; run: %isubborrow_i8_c(0, 1, 0) == 1 +; run: %isubborrow_i8_c(100, 20, 1) == 0 +; run: %isubborrow_i8_c(100, 20, 0) == 0 +; run: %isubborrow_i8_c(127, 127, 1) == 0 +; run: %isubborrow_i8_c(127, 127, 0) == 0 -function %isubborrow_i16_v(i16, i16, b1) -> i16 { -block0(v0: i16, v1: i16, v2: b1): +function %isubborrow_i16_v(i16, i16, i8) -> i16 { +block0(v0: i16, v1: i16, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v3 } -; run: %isubborrow_i16_v(0, 1, true) == -2 -; run: %isubborrow_i16_v(0, 1, false) == -1 -; run: %isubborrow_i16_v(100, 20, true) == 79 -; run: %isubborrow_i16_v(100, 20, false) == 80 -; run: %isubborrow_i16_v(-32000, 768, true) == 32767 -; run: %isubborrow_i16_v(-32000, 768, false) == -32768 +; run: %isubborrow_i16_v(0, 1, 1) == -2 +; run: %isubborrow_i16_v(0, 1, 0) == -1 +; run: %isubborrow_i16_v(100, 20, 1) == 79 +; run: %isubborrow_i16_v(100, 20, 0) == 80 +; run: %isubborrow_i16_v(-32000, 768, 1) == 32767 +; run: %isubborrow_i16_v(-32000, 768, 0) == -32768 -function %isubborrow_i16_c(i16, i16, b1) -> b1 { -block0(v0: i16, v1: i16, v2: b1): +function %isubborrow_i16_c(i16, i16, i8) -> i8 { +block0(v0: i16, v1: i16, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v4 } -; run: %isubborrow_i16_c(0, 1, true) == true -; run: %isubborrow_i16_c(0, 1, false) == true -; run: %isubborrow_i16_c(100, 20, true) == false -; run: %isubborrow_i16_c(100, 20, false) == false -; run: %isubborrow_i16_c(-32000, 768, true) == true -; run: %isubborrow_i16_c(-32000, 768, false) == true +; run: %isubborrow_i16_c(0, 1, 1) == 1 +; run: %isubborrow_i16_c(0, 1, 0) == 1 +; run: %isubborrow_i16_c(100, 20, 1) == 0 +; run: %isubborrow_i16_c(100, 20, 0) == 0 +; run: %isubborrow_i16_c(-32000, 768, 1) == 1 +; run: %isubborrow_i16_c(-32000, 768, 0) == 1 -function %isubborrow_i32_v(i32, i32, b1) -> i32 { -block0(v0: i32, v1: i32, v2: b1): +function %isubborrow_i32_v(i32, i32, i8) -> i32 { +block0(v0: i32, v1: i32, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v3 } -; run: %isubborrow_i32_v(0, 1, true) == -2 -; run: %isubborrow_i32_v(0, 1, false) == -1 -; run: %isubborrow_i32_v(100, 20, true) == 79 -; run: %isubborrow_i32_v(100, 20, false) == 80 -; run: %isubborrow_i32_v(-2147483640, 8, true) == 2147483647 -; run: %isubborrow_i32_v(-2147483640, 8, false) == -2147483648 +; run: %isubborrow_i32_v(0, 1, 1) == -2 +; run: %isubborrow_i32_v(0, 1, 0) == -1 +; run: %isubborrow_i32_v(100, 20, 1) == 79 +; run: %isubborrow_i32_v(100, 20, 0) == 80 +; run: %isubborrow_i32_v(-2147483640, 8, 1) == 2147483647 +; run: %isubborrow_i32_v(-2147483640, 8, 0) == -2147483648 -function %isubborrow_i32_c(i32, i32, b1) -> b1 { -block0(v0: i32, v1: i32, v2: b1): +function %isubborrow_i32_c(i32, i32, i8) -> i8 { +block0(v0: i32, v1: i32, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v4 } -; run: %isubborrow_i32_c(0, 1, true) == true -; run: %isubborrow_i32_c(0, 1, false) == true -; run: %isubborrow_i32_c(100, 20, true) == false -; run: %isubborrow_i32_c(100, 20, false) == false -; run: %isubborrow_i32_c(-2147483640, 8, true) == true -; run: %isubborrow_i32_c(-2147483640, 8, false) == true +; run: %isubborrow_i32_c(0, 1, 1) == 1 +; run: %isubborrow_i32_c(0, 1, 0) == 1 +; run: %isubborrow_i32_c(100, 20, 1) == 0 +; run: %isubborrow_i32_c(100, 20, 0) == 0 +; run: %isubborrow_i32_c(-2147483640, 8, 1) == 1 +; run: %isubborrow_i32_c(-2147483640, 8, 0) == 1 -function %isubborrow_i64_v(i64, i64, b1) -> i64 { -block0(v0: i64, v1: i64, v2: b1): +function %isubborrow_i64_v(i64, i64, i8) -> i64 { +block0(v0: i64, v1: i64, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v3 } -; run: %isubborrow_i64_v(0, 1, true) == -2 -; run: %isubborrow_i64_v(0, 1, false) == -1 -; run: %isubborrow_i64_v(100, 20, true) == 79 -; run: %isubborrow_i64_v(100, 20, false) == 80 -; run: %isubborrow_i64_v(-9223372036854775800, 8, true) == 9223372036854775807 -; run: %isubborrow_i64_v(-9223372036854775800, 8, false) == -9223372036854775808 +; run: %isubborrow_i64_v(0, 1, 1) == -2 +; run: %isubborrow_i64_v(0, 1, 0) == -1 +; run: %isubborrow_i64_v(100, 20, 1) == 79 +; run: %isubborrow_i64_v(100, 20, 0) == 80 +; run: %isubborrow_i64_v(-9223372036854775800, 8, 1) == 9223372036854775807 +; run: %isubborrow_i64_v(-9223372036854775800, 8, 0) == -9223372036854775808 -function %isubborrow_i64_c(i64, i64, b1) -> b1 { -block0(v0: i64, v1: i64, v2: b1): +function %isubborrow_i64_c(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): v3, v4 = isub_borrow v0, v1, v2 return v4 } -; run: %isubborrow_i64_c(0, 1, true) == true -; run: %isubborrow_i64_c(0, 1, false) == true -; run: %isubborrow_i64_c(100, 20, true) == false -; run: %isubborrow_i64_c(100, 20, false) == false -; run: %isubborrow_i64_c(-9223372036854775800, 8, true) == true -; run: %isubborrow_i64_c(-9223372036854775800, 8, false) == true +; run: %isubborrow_i64_c(0, 1, 1) == 1 +; run: %isubborrow_i64_c(0, 1, 0) == 1 +; run: %isubborrow_i64_c(100, 20, 1) == 0 +; run: %isubborrow_i64_c(100, 20, 0) == 0 +; run: %isubborrow_i64_c(-9223372036854775800, 8, 1) == 1 +; run: %isubborrow_i64_c(-9223372036854775800, 8, 0) == 1 diff --git a/cranelift/filetests/filetests/runtests/isubbout.clif b/cranelift/filetests/filetests/runtests/isubbout.clif index db07b1a6f3b4..9c43770d38d4 100644 --- a/cranelift/filetests/filetests/runtests/isubbout.clif +++ b/cranelift/filetests/filetests/runtests/isubbout.clif @@ -10,15 +10,15 @@ block0(v0: i8, v1: i8): ; run: %isubbout_i8_v(100, -20) == 120 ; run: %isubbout_i8_v(-128, 1) == 127 -function %isubbout_i8_c(i8, i8) -> b1 { +function %isubbout_i8_c(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2, v3 = isub_bout v0, v1 return v3 } -; run: %isubbout_i8_c(0, 1) == true -; run: %isubbout_i8_c(100, 20) == false -; run: %isubbout_i8_c(100, -20) == false -; run: %isubbout_i8_c(-128, 1) == true +; run: %isubbout_i8_c(0, 1) == 1 +; run: %isubbout_i8_c(100, 20) == 0 +; run: %isubbout_i8_c(100, -20) == 0 +; run: %isubbout_i8_c(-128, 1) == 1 function %isubbout_i16_v(i16, i16) -> i16 { block0(v0: i16, v1: i16): @@ -31,16 +31,16 @@ block0(v0: i16, v1: i16): ; run: %isubbout_i16_v(-32000, 768) == -32768 ; run: %isubbout_i16_v(-32000, 769) == 32767 -function %isubbout_i16_c(i16, i16) -> b1 { +function %isubbout_i16_c(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2, v3 = isub_bout v0, v1 return v3 } -; run: %isubbout_i16_c(0, 1) == true -; run: %isubbout_i16_c(100, 20) == false -; run: %isubbout_i16_c(100, -28) == false -; run: %isubbout_i16_c(-32000, 768) == true -; run: %isubbout_i16_c(-32000, 769) == true +; run: %isubbout_i16_c(0, 1) == 1 +; run: %isubbout_i16_c(100, 20) == 0 +; run: %isubbout_i16_c(100, -28) == 0 +; run: %isubbout_i16_c(-32000, 768) == 1 +; run: %isubbout_i16_c(-32000, 769) == 1 function %isubbout_i32_v(i32, i32) -> i32 { block0(v0: i32, v1: i32): @@ -53,16 +53,16 @@ block0(v0: i32, v1: i32): ; run: %isubbout_i32_v(-2147483640, 8) == -2147483648 ; run: %isubbout_i32_v(-2147483640, 9) == 2147483647 -function %isubbout_i32_c(i32, i32) -> b1 { +function %isubbout_i32_c(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2, v3 = isub_bout v0, v1 return v3 } -; run: %isubbout_i32_c(0, 1) == true -; run: %isubbout_i32_c(100, 20) == false -; run: %isubbout_i32_c(100, -28) == false -; run: %isubbout_i32_c(-2147483640, 8) == true -; run: %isubbout_i32_c(-2147483640, 9) == true +; run: %isubbout_i32_c(0, 1) == 1 +; run: %isubbout_i32_c(100, 20) == 0 +; run: %isubbout_i32_c(100, -28) == 0 +; run: %isubbout_i32_c(-2147483640, 8) == 1 +; run: %isubbout_i32_c(-2147483640, 9) == 1 function %isubbout_i64_v(i64, i64) -> i64 { block0(v0: i64, v1: i64): @@ -75,13 +75,13 @@ block0(v0: i64, v1: i64): ; run: %isubbout_i64_v(-2147483640, 8) == -2147483648 ; run: %isubbout_i64_v(-2147483640, 9) == -2147483649 -function %isubbout_i64_c(i64, i64) -> b1 { +function %isubbout_i64_c(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2, v3 = isub_bout v0, v1 return v3 } -; run: %isubbout_i64_c(0, 1) == true -; run: %isubbout_i64_c(100, 20) == false -; run: %isubbout_i64_c(100, -28) == false -; run: %isubbout_i64_c(-2147483640, 8) == true -; run: %isubbout_i64_c(-2147483640, 9) == true \ No newline at end of file +; run: %isubbout_i64_c(0, 1) == 1 +; run: %isubbout_i64_c(100, 20) == 0 +; run: %isubbout_i64_c(100, -28) == 0 +; run: %isubbout_i64_c(-2147483640, 8) == 1 +; run: %isubbout_i64_c(-2147483640, 9) == 1 diff --git a/cranelift/filetests/filetests/runtests/nearest.clif b/cranelift/filetests/filetests/runtests/nearest.clif index d363d8730a63..63aeab0d3778 100644 --- a/cranelift/filetests/filetests/runtests/nearest.clif +++ b/cranelift/filetests/filetests/runtests/nearest.clif @@ -59,7 +59,7 @@ function %near_is_nan_f32(f32) -> i32 { block0(v0: f32): v1 = nearest v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %near_is_nan_f32(+NaN) == 1 @@ -132,7 +132,7 @@ function %near_is_nan_f64(f64) -> i32 { block0(v0: f64): v1 = nearest v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %near_is_nan_f64(+NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/ref64-invalid-null.clif b/cranelift/filetests/filetests/runtests/ref64-invalid-null.clif index f052b5ac5e45..39b44d0ce162 100644 --- a/cranelift/filetests/filetests/runtests/ref64-invalid-null.clif +++ b/cranelift/filetests/filetests/runtests/ref64-invalid-null.clif @@ -4,39 +4,39 @@ target aarch64 target x86_64 target s390x -function %is_null_true_r64() -> b1 { +function %is_null_true_r64() -> i8 { block0: v0 = null.r64 v1 = is_null v0 return v1 } -; run: %is_null_true_r64() == true +; run: %is_null_true_r64() == 1 -function %is_null_r64(i64) -> b1 { +function %is_null_r64(i64) -> i8 { block0(v0: i64): v1 = raw_bitcast.r64 v0 v2 = is_null v1 return v2 } -; run: %is_null_r64(256347) == false -; run: %is_null_r64(-1) == false -; run: %is_null_r64(0) == true +; run: %is_null_r64(256347) == 0 +; run: %is_null_r64(-1) == 0 +; run: %is_null_r64(0) == 1 -function %is_invalid_r64(i64) -> b1 { +function %is_invalid_r64(i64) -> i8 { block0(v0: i64): v1 = raw_bitcast.r64 v0 v2 = is_invalid v1 return v2 } -; run: %is_invalid_r64(0xffffffffffffffff) == true -; run: %is_invalid_r64(-1) == true -; run: %is_invalid_r64(256347) == false -; run: %is_invalid_r64(0) == false +; run: %is_invalid_r64(0xffffffffffffffff) == 1 +; run: %is_invalid_r64(-1) == 1 +; run: %is_invalid_r64(256347) == 0 +; run: %is_invalid_r64(0) == 0 -function %is_invalid_null_r64() -> b1 { +function %is_invalid_null_r64() -> i8 { block0: v0 = null.r64 v1 = is_invalid v0 return v1 } -; run: %is_invalid_null_r64() == false +; run: %is_invalid_null_r64() == 0 diff --git a/cranelift/filetests/filetests/runtests/riscv64_issue_4996.clif b/cranelift/filetests/filetests/runtests/riscv64_issue_4996.clif index f019b9026132..6ef2f5268de4 100644 --- a/cranelift/filetests/filetests/runtests/riscv64_issue_4996.clif +++ b/cranelift/filetests/filetests/runtests/riscv64_issue_4996.clif @@ -6,8 +6,8 @@ target riscv64 ; This is a regression test for https://github.com/bytecodealliance/wasmtime/issues/4996. function %issue4996() -> i128, i64 system_v { block0: - v5 = bconst.b1 false - brz v5, block3 ; v5 = false + v5 = iconst.i8 0 + brz v5, block3 ; v5 = 0 jump block1 block1: v12 = iconst.i64 0 @@ -21,4 +21,4 @@ function %issue4996() -> i128, i64 system_v { v29 = iconst.i64 0 return v23, v29 ; v29 = 0 } -; run: %issue4996() == [0,0] \ No newline at end of file +; run: %issue4996() == [0,0] diff --git a/cranelift/filetests/filetests/runtests/select.clif b/cranelift/filetests/filetests/runtests/select.clif index 46cdb79abb2e..c83e8408e80c 100644 --- a/cranelift/filetests/filetests/runtests/select.clif +++ b/cranelift/filetests/filetests/runtests/select.clif @@ -29,18 +29,18 @@ block0(v0: f64, v1: f64): ; run: %select_ne_f64(0x42.42, 0.0) == 1 ; run: %select_ne_f64(NaN, NaN) == 1 -function %select_gt_f64(f64, f64) -> b1 { +function %select_gt_f64(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = fcmp gt v0, v1 - v3 = bconst.b1 true - v4 = bconst.b1 false + v3 = iconst.i8 1 + v4 = iconst.i8 0 v5 = select v2, v3, v4 return v5 } -; run: %select_gt_f64(0x42.42, 0.0) == true -; run: %select_gt_f64(0.0, 0.0) == false -; run: %select_gt_f64(0x0.0, 0x42.42) == false -; run: %select_gt_f64(NaN, 0x42.42) == false +; run: %select_gt_f64(0x42.42, 0.0) == 1 +; run: %select_gt_f64(0.0, 0.0) == 0 +; run: %select_gt_f64(0x0.0, 0x42.42) == 0 +; run: %select_gt_f64(NaN, 0x42.42) == 0 function %select_ge_f64(f64, f64) -> i64 { block0(v0: f64, v1: f64): diff --git a/cranelift/filetests/filetests/runtests/simd-arithmetic.clif b/cranelift/filetests/filetests/runtests/simd-arithmetic.clif index ebcfa3246eab..4bc1ac828a7b 100644 --- a/cranelift/filetests/filetests/runtests/simd-arithmetic.clif +++ b/cranelift/filetests/filetests/runtests/simd-arithmetic.clif @@ -88,7 +88,7 @@ block0(v0: i8x16, v1: i8x16): } ; run: %usub_sat_i8x16([0x80 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0], [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]) == [0x7f 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] -function %add_sub_f32x4() -> b1 { +function %add_sub_f32x4() -> i8 { block0: v0 = vconst.f32x4 [0x4.2 0.0 0.0 0.0] v1 = vconst.f32x4 [0x1.0 0x1.0 0x1.0 0x1.0] @@ -106,7 +106,7 @@ block0: } ; run -function %mul_div_f32x4() -> b1 { +function %mul_div_f32x4() -> i8 { block0: v0 = vconst.f32x4 [0x4.2 -0x2.1 0x2.0 0.0] v1 = vconst.f32x4 [0x3.4 0x6.7 0x8.9 0xa.b] diff --git a/cranelift/filetests/filetests/runtests/simd-bitselect.clif b/cranelift/filetests/filetests/runtests/simd-bitselect.clif index 3febd9868529..981d86375e78 100644 --- a/cranelift/filetests/filetests/runtests/simd-bitselect.clif +++ b/cranelift/filetests/filetests/runtests/simd-bitselect.clif @@ -22,7 +22,7 @@ block0(v0: i8x16, v1: i8x16, v2: i8x16): ; Remember that bitselect accepts: 1) the selector vector, 2) the "if true" vector, and 3) the "if false" vector. ; run: %bitselect_i8x16([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 255], [127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42], [42 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127]) == [42 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42] -function %bitselect_i8x16_1() -> b1 { +function %bitselect_i8x16_1() -> i8 { block0: v0 = vconst.i8x16 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 255] ; the selector vector v1 = vconst.i8x16 [127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42] ; for each 1-bit in v0 the bit of v1 is selected diff --git a/cranelift/filetests/filetests/runtests/simd-bmask.clif b/cranelift/filetests/filetests/runtests/simd-bmask.clif index ba504f786821..1ef244f0a588 100644 --- a/cranelift/filetests/filetests/runtests/simd-bmask.clif +++ b/cranelift/filetests/filetests/runtests/simd-bmask.clif @@ -1,30 +1,30 @@ test interpret -function %bmask_i8x16(b8x16) -> i8x16 { -block0(v0: b8x16): +function %bmask_i8x16(i8x16) -> i8x16 { +block0(v0: i8x16): v1 = bmask.i8x16 v0 return v1 } -; run: %bmask_i8x16([true false true false true false true false true false true false true false true false]) == [-1 0 -1 0 -1 0 -1 0 -1 0 -1 0 -1 0 -1 0] +; run: %bmask_i8x16([-1 0 -1 0 -1 0 -1 0 -1 0 -1 0 -1 0 -1 0]) == [-1 0 -1 0 -1 0 -1 0 -1 0 -1 0 -1 0 -1 0] -function %bmask_i16x8(b16x8) -> i16x8 { -block0(v0: b16x8): +function %bmask_i16x8(i16x8) -> i16x8 { +block0(v0: i16x8): v1 = bmask.i16x8 v0 return v1 } -; run: %bmask_i16x8([true false true false true false true false]) == [-1 0 -1 0 -1 0 -1 0] +; run: %bmask_i16x8([-1 0 -1 0 -1 0 -1 0]) == [-1 0 -1 0 -1 0 -1 0] -function %bmask_i32x4(b32x4) -> i32x4 { -block0(v0: b32x4): +function %bmask_i32x4(i32x4) -> i32x4 { +block0(v0: i32x4): v1 = bmask.i32x4 v0 return v1 } -; run: %bmask_i32x4([true false true false]) == [-1 0 -1 0] +; run: %bmask_i32x4([-1 0 -1 0]) == [-1 0 -1 0] -function %bmask_i64x2(b64x2) -> i64x2 { -block0(v0: b64x2): +function %bmask_i64x2(i64x2) -> i64x2 { +block0(v0: i64x2): v1 = bmask.i64x2 v0 return v1 } -; run: %bmask_i64x2([true false]) == [-1 0] +; run: %bmask_i64x2([-1 0]) == [-1 0] diff --git a/cranelift/filetests/filetests/runtests/simd-comparison.clif b/cranelift/filetests/filetests/runtests/simd-comparison.clif index dd8c6a80b2ef..3ed38f9a71ed 100644 --- a/cranelift/filetests/filetests/runtests/simd-comparison.clif +++ b/cranelift/filetests/filetests/runtests/simd-comparison.clif @@ -4,7 +4,7 @@ target s390x set enable_simd target x86_64 has_sse3 has_ssse3 has_sse41 -function %icmp_eq_i8x16() -> b8 { +function %icmp_eq_i8x16() -> i8 { block0: v0 = vconst.i8x16 0x00 v1 = vconst.i8x16 0x00 @@ -14,7 +14,7 @@ block0: } ; run -function %icmp_eq_i64x2() -> b64 { +function %icmp_eq_i64x2() -> i64 { block0: v0 = vconst.i64x2 0xffffffffffffffffffffffffffffffff v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff @@ -24,7 +24,7 @@ block0: } ; run -function %icmp_ne_i32x4() -> b1 { +function %icmp_ne_i32x4() -> i8 { block0: v0 = vconst.i32x4 [0 1 2 3] v1 = vconst.i32x4 [7 7 7 7] @@ -34,19 +34,19 @@ block0: } ; run -function %icmp_ne_i16x8() -> b1 { +function %icmp_ne_i16x8() -> i8 { block0: v0 = vconst.i16x8 [0 1 2 3 4 5 6 7] v1 = vconst.i16x8 [0 1 2 3 4 5 6 7] v2 = icmp ne v0, v1 v3 = vall_true v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 v5 = icmp_imm eq v4, 0 return v5 } ; run -function %icmp_sgt_i8x16() -> b1 { +function %icmp_sgt_i8x16() -> i8 { block0: v0 = vconst.i8x16 [0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 0] v1 = vconst.i8x16 [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0xff] @@ -59,7 +59,7 @@ block0: } ; run -function %icmp_sgt_i64x2() -> b1 { +function %icmp_sgt_i64x2() -> i8 { block0: v0 = vconst.i64x2 [0 -42] v1 = vconst.i64x2 [-1 -43] @@ -69,7 +69,7 @@ block0: } ; run -function %icmp_ugt_i8x16() -> b1 { +function %icmp_ugt_i8x16() -> i8 { block0: v0 = vconst.i8x16 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16] v1 = vconst.i8x16 [0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] @@ -79,7 +79,7 @@ block0: } ; run -function %icmp_sge_i16x8() -> b1 { +function %icmp_sge_i16x8() -> i8 { block0: v0 = vconst.i16x8 [-1 1 2 3 4 5 6 7] v1 = vconst.i16x8 [-1 1 1 1 1 1 1 1] @@ -89,7 +89,7 @@ block0: } ; run -function %icmp_uge_i32x4() -> b1 { +function %icmp_uge_i32x4() -> i8 { block0: v0 = vconst.i32x4 [1 2 3 4] v1 = vconst.i32x4 [1 1 1 1] @@ -99,7 +99,7 @@ block0: } ; run -function %icmp_slt_i32x4() -> b1 { +function %icmp_slt_i32x4() -> i8 { block0: v0 = vconst.i32x4 [-1 1 1 1] v1 = vconst.i32x4 [1 2 3 4] @@ -109,7 +109,7 @@ block0: } ; run -function %icmp_ult_i32x4() -> b1 { +function %icmp_ult_i32x4() -> i8 { block0: v0 = vconst.i32x4 [1 1 1 1] v1 = vconst.i32x4 [-1 2 3 4] ; -1 = 0xffff... will be greater than 1 when unsigned @@ -120,7 +120,7 @@ block0: ; run -function %icmp_ult_i16x8() -> b1 { +function %icmp_ult_i16x8() -> i8 { block0: v0 = vconst.i16x8 [-1 -1 -1 -1 -1 -1 -1 -1] v1 = vconst.i16x8 [-1 -1 -1 -1 -1 -1 -1 -1] @@ -133,7 +133,7 @@ block0: } ; run -function %icmp_sle_i16x8() -> b1 { +function %icmp_sle_i16x8() -> i8 { block0: v0 = vconst.i16x8 [-1 -1 0 0 0 0 0 0] v1 = vconst.i16x8 [-1 0 0 0 0 0 0 0] @@ -143,7 +143,7 @@ block0: } ; run -function %icmp_ule_i16x8() -> b1 { +function %icmp_ule_i16x8() -> i8 { block0: v0 = vconst.i16x8 [-1 0 0 0 0 0 0 0] v1 = vconst.i16x8 [-1 -1 0 0 0 0 0 0] @@ -153,7 +153,7 @@ block0: } ; run -function %fcmp_eq_f32x4() -> b1 { +function %fcmp_eq_f32x4() -> i8 { block0: v0 = vconst.f32x4 [0.0 -0x4.2 0x0.33333 -0.0] v1 = vconst.f32x4 [0.0 -0x4.2 0x0.33333 -0.0] @@ -163,7 +163,7 @@ block0: } ; run -function %fcmp_lt_f32x4() -> b1 { +function %fcmp_lt_f32x4() -> i8 { block0: v0 = vconst.f32x4 [0.0 -0x4.2 0x0.0 -0.0] v1 = vconst.f32x4 [0x0.001 0x4.2 0x0.33333 0x1.0] @@ -173,7 +173,7 @@ block0: } ; run -function %fcmp_ge_f64x2() -> b1 { +function %fcmp_ge_f64x2() -> i8 { block0: v0 = vconst.f64x2 [0x0.0 0x4.2] v1 = vconst.f64x2 [0.0 0x4.1] @@ -183,7 +183,7 @@ block0: } ; run -function %fcmp_uno_f64x2() -> b1 { +function %fcmp_uno_f64x2() -> i8 { block0: v0 = vconst.f64x2 [0.0 NaN] v1 = vconst.f64x2 [NaN 0x4.1] @@ -193,7 +193,7 @@ block0: } ; run -function %fcmp_gt_nans_f32x4() -> b1 { +function %fcmp_gt_nans_f32x4() -> i8 { block0: v0 = vconst.f32x4 [NaN 0x42.0 -NaN NaN] v1 = vconst.f32x4 [NaN NaN 0x42.0 Inf] diff --git a/cranelift/filetests/filetests/runtests/simd-fma-64bit.clif b/cranelift/filetests/filetests/runtests/simd-fma-64bit.clif index 5f98b80d8a13..228652708611 100644 --- a/cranelift/filetests/filetests/runtests/simd-fma-64bit.clif +++ b/cranelift/filetests/filetests/runtests/simd-fma-64bit.clif @@ -32,16 +32,16 @@ block0(v0: f32x2, v1: f32x2, v2: f32x2): ;; The IEEE754 Standard does not make a lot of guarantees about what ;; comes out of NaN producing operations, we just check if its a NaN -function %fma_is_nan_f32x2(f32x2, f32x2, f32x2) -> b1 { +function %fma_is_nan_f32x2(f32x2, f32x2, f32x2) -> i8 { block0(v0: f32x2, v1: f32x2, v2: f32x2): v3 = fma v0, v1, v2 v4 = fcmp ne v3, v3 v5 = vall_true v4 return v5 } -; run: %fma_is_nan_f32x2([Inf -Inf], [-Inf Inf], [Inf Inf]) == true -; run: %fma_is_nan_f32x2([-Inf +NaN], [-Inf 0x0.0], [-Inf 0x0.0]) == true -; run: %fma_is_nan_f32x2([0x0.0 0x0.0], [+NaN 0x0.0], [0x0.0 +NaN]) == true -; run: %fma_is_nan_f32x2([-NaN 0x0.0], [0x0.0 -NaN], [0x0.0 0x0.0]) == true -; run: %fma_is_nan_f32x2([0x0.0 NaN], [0x0.0 NaN], [-NaN NaN]) == true -; run: %fma_is_nan_f32x2([NaN NaN], [NaN NaN], [NaN NaN]) == true +; run: %fma_is_nan_f32x2([Inf -Inf], [-Inf Inf], [Inf Inf]) == 1 +; run: %fma_is_nan_f32x2([-Inf +NaN], [-Inf 0x0.0], [-Inf 0x0.0]) == 1 +; run: %fma_is_nan_f32x2([0x0.0 0x0.0], [+NaN 0x0.0], [0x0.0 +NaN]) == 1 +; run: %fma_is_nan_f32x2([-NaN 0x0.0], [0x0.0 -NaN], [0x0.0 0x0.0]) == 1 +; run: %fma_is_nan_f32x2([0x0.0 NaN], [0x0.0 NaN], [-NaN NaN]) == 1 +; run: %fma_is_nan_f32x2([NaN NaN], [NaN NaN], [NaN NaN]) == 1 diff --git a/cranelift/filetests/filetests/runtests/simd-fma.clif b/cranelift/filetests/filetests/runtests/simd-fma.clif index cfb1e6b119fc..4ff5e510411d 100644 --- a/cranelift/filetests/filetests/runtests/simd-fma.clif +++ b/cranelift/filetests/filetests/runtests/simd-fma.clif @@ -29,16 +29,16 @@ block0(v0: f32x4, v1: f32x4, v2: f32x4): ;; The IEEE754 Standard does not make a lot of guarantees about what ;; comes out of NaN producing operations, we just check if its a NaN -function %fma_is_nan_f32x4(f32x4, f32x4, f32x4) -> b1 { +function %fma_is_nan_f32x4(f32x4, f32x4, f32x4) -> i8 { block0(v0: f32x4, v1: f32x4, v2: f32x4): v3 = fma v0, v1, v2 v4 = fcmp ne v3, v3 v5 = vall_true v4 return v5 } -; run: %fma_is_nan_f32x4([Inf -Inf -Inf +NaN], [-Inf Inf -Inf 0x0.0], [Inf Inf -Inf 0x0.0]) == true -; run: %fma_is_nan_f32x4([0x0.0 0x0.0 -NaN 0x0.0], [+NaN 0x0.0 0x0.0 -NaN], [0x0.0 +NaN 0x0.0 0x0.0]) == true -; run: %fma_is_nan_f32x4([0x0.0 NaN NaN NaN], [0x0.0 NaN NaN NaN], [-NaN NaN NaN NaN]) == true +; run: %fma_is_nan_f32x4([Inf -Inf -Inf +NaN], [-Inf Inf -Inf 0x0.0], [Inf Inf -Inf 0x0.0]) == 1 +; run: %fma_is_nan_f32x4([0x0.0 0x0.0 -NaN 0x0.0], [+NaN 0x0.0 0x0.0 -NaN], [0x0.0 +NaN 0x0.0 0x0.0]) == 1 +; run: %fma_is_nan_f32x4([0x0.0 NaN NaN NaN], [0x0.0 NaN NaN NaN], [-NaN NaN NaN NaN]) == 1 @@ -73,15 +73,15 @@ block0(v0: f64x2, v1: f64x2, v2: f64x2): ;; The IEEE754 Standard does not make a lot of guarantees about what ;; comes out of NaN producing operations, we just check if its a NaN -function %fma_is_nan_f64x2(f64x2, f64x2, f64x2) -> b1 { +function %fma_is_nan_f64x2(f64x2, f64x2, f64x2) -> i8 { block0(v0: f64x2, v1: f64x2, v2: f64x2): v3 = fma v0, v1, v2 v4 = fcmp ne v3, v3 v5 = vall_true v4 return v5 } -; run: %fma_is_nan_f64x2([Inf -Inf], [-Inf Inf], [Inf Inf]) == true -; run: %fma_is_nan_f64x2([-Inf +NaN], [-Inf 0x0.0], [-Inf 0x0.0]) == true -; run: %fma_is_nan_f64x2([0x0.0 0x0.0], [+NaN 0x0.0], [0x0.0 +NaN]) == true -; run: %fma_is_nan_f64x2([-NaN 0x0.0], [0x0.0 -NaN], [0x0.0 0x0.0]) == true -; run: %fma_is_nan_f64x2([0x0.0 NaN], [0x0.0 NaN], [-NaN NaN]) == true +; run: %fma_is_nan_f64x2([Inf -Inf], [-Inf Inf], [Inf Inf]) == 1 +; run: %fma_is_nan_f64x2([-Inf +NaN], [-Inf 0x0.0], [-Inf 0x0.0]) == 1 +; run: %fma_is_nan_f64x2([0x0.0 0x0.0], [+NaN 0x0.0], [0x0.0 +NaN]) == 1 +; run: %fma_is_nan_f64x2([-NaN 0x0.0], [0x0.0 -NaN], [0x0.0 0x0.0]) == 1 +; run: %fma_is_nan_f64x2([0x0.0 NaN], [0x0.0 NaN], [-NaN NaN]) == 1 diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-eq.clif b/cranelift/filetests/filetests/runtests/simd-icmp-eq.clif index d37d8ace18b0..c3f6dfa8c9ca 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-eq.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-eq.clif @@ -1,30 +1,30 @@ test interpret -function %simd_icmp_eq_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_eq_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp eq v0, v1 return v2 } -; run: %simd_icmp_eq_i8([1 0 -1 1 1 1 1 1 1 1 1 1 1 1 1 1], [1 0 -1 0 0 0 0 0 0 0 0 0 0 0 0 0]) == [true true true false false false false false false false false false false false false false] +; run: %simd_icmp_eq_i8([1 0 -1 1 1 1 1 1 1 1 1 1 1 1 1 1], [1 0 -1 0 0 0 0 0 0 0 0 0 0 0 0 0]) == [-1 -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0] -function %simd_icmp_eq_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_eq_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp eq v0, v1 return v2 } -; run: %simd_icmp_eq_i16([1 0 -1 1 1 1 1 1], [1 0 -1 0 0 0 0 0]) == [true true true false false false false false] +; run: %simd_icmp_eq_i16([1 0 -1 1 1 1 1 1], [1 0 -1 0 0 0 0 0]) == [-1 -1 -1 0 0 0 0 0] -function %simd_icmp_eq_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_eq_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp eq v0, v1 return v2 } -; run: %simd_icmp_eq_i32([1 0 -1 1], [1 0 -1 0]) == [true true true false] +; run: %simd_icmp_eq_i32([1 0 -1 1], [1 0 -1 0]) == [-1 -1 -1 0] -function %simd_icmp_eq_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_eq_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp eq v0, v1 return v2 } -; run: %simd_icmp_eq_i64([10 0], [1 0]) == [false true] -; run: %simd_icmp_eq_i64([-1 1], [-1 0]) == [true false] +; run: %simd_icmp_eq_i64([10 0], [1 0]) == [0 -1] +; run: %simd_icmp_eq_i64([-1 1], [-1 0]) == [-1 0] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-ne.clif b/cranelift/filetests/filetests/runtests/simd-icmp-ne.clif index 8b0400bf175a..a1ef4acaf745 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-ne.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-ne.clif @@ -1,30 +1,30 @@ test interpret -function %simd_icmp_ne_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_ne_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp ne v0, v1 return v2 } -; run: %simd_icmp_ne_i8([1 0 -1 1 1 1 1 1 1 1 1 1 1 1 1 1], [1 0 -1 0 0 0 0 0 0 0 0 0 0 0 0 0]) == [false false false true true true true true true true true true true true true true] +; run: %simd_icmp_ne_i8([1 0 -1 1 1 1 1 1 1 1 1 1 1 1 1 1], [1 0 -1 0 0 0 0 0 0 0 0 0 0 0 0 0]) == [0 0 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] -function %simd_icmp_ne_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_ne_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp ne v0, v1 return v2 } -; run: %simd_icmp_ne_i16([1 0 -1 1 1 1 1 1], [1 0 -1 0 0 0 0 0]) == [false false false true true true true true] +; run: %simd_icmp_ne_i16([1 0 -1 1 1 1 1 1], [1 0 -1 0 0 0 0 0]) == [0 0 0 -1 -1 -1 -1 -1] -function %simd_icmp_ne_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_ne_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ne v0, v1 return v2 } -; run: %simd_icmp_ne_i32([1 0 -1 1], [1 0 -1 0]) == [false false false true] +; run: %simd_icmp_ne_i32([1 0 -1 1], [1 0 -1 0]) == [0 0 0 -1] -function %simd_icmp_ne_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_ne_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp ne v0, v1 return v2 } -; run: %simd_icmp_ne_i64([10 0], [1 0]) == [true false] -; run: %simd_icmp_ne_i64([-1 1], [-1 0]) == [false true] +; run: %simd_icmp_ne_i64([10 0], [1 0]) == [-1 0] +; run: %simd_icmp_ne_i64([-1 1], [-1 0]) == [0 -1] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-sge.clif b/cranelift/filetests/filetests/runtests/simd-icmp-sge.clif index cf11633c3b39..f89ab0fbb35c 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-sge.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-sge.clif @@ -1,33 +1,33 @@ test interpret -function %simd_icmp_sge_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_sge_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp sge v0, v1 return v2 } -; run: %simd_icmp_sge_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 1 0 0 0 0 0 0 0 0 0 0]) == [true true true false false true true true true true true true true true true true] +; run: %simd_icmp_sge_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 1 0 0 0 0 0 0 0 0 0 0]) == [-1 -1 -1 0 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] -function %simd_icmp_sge_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_sge_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp sge v0, v1 return v2 } -; run: %simd_icmp_sge_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 1 0 0]) == [true true true false false true true true] +; run: %simd_icmp_sge_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 1 0 0]) == [-1 -1 -1 0 0 -1 -1 -1] -function %simd_icmp_sge_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_sge_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp sge v0, v1 return v2 } -; run: %simd_icmp_sge_i32([0 1 -1 0], [0 0 -1 1]) == [true true true false] -; run: %simd_icmp_sge_i32([-5 1 0 0], [-1 1 0 0]) == [false true true true] +; run: %simd_icmp_sge_i32([0 1 -1 0], [0 0 -1 1]) == [-1 -1 -1 0] +; run: %simd_icmp_sge_i32([-5 1 0 0], [-1 1 0 0]) == [0 -1 -1 -1] -function %simd_icmp_sge_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_sge_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp sge v0, v1 return v2 } -; run: %simd_icmp_sge_i64([0 1], [0 0]) == [true true] -; run: %simd_icmp_sge_i64([-1 0], [-1 1]) == [true false] -; run: %simd_icmp_sge_i64([-5 1], [-1 1]) == [false true] -; run: %simd_icmp_sge_i64([0 0], [0 0]) == [true true] +; run: %simd_icmp_sge_i64([0 1], [0 0]) == [-1 -1] +; run: %simd_icmp_sge_i64([-1 0], [-1 1]) == [-1 0] +; run: %simd_icmp_sge_i64([-5 1], [-1 1]) == [0 -1] +; run: %simd_icmp_sge_i64([0 0], [0 0]) == [-1 -1] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-sgt.clif b/cranelift/filetests/filetests/runtests/simd-icmp-sgt.clif index 850cd020bf94..1bb9184c72d7 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-sgt.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-sgt.clif @@ -1,35 +1,35 @@ test interpret -function %simd_icmp_sgt_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_sgt_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp sgt v0, v1 return v2 } -; run: %simd_icmp_sgt_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [false true false false false true false false false false false false false false false false] +; run: %simd_icmp_sgt_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [0 -1 0 0 0 -1 0 0 0 0 0 0 0 0 0 0] -function %simd_icmp_sgt_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_sgt_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp sgt v0, v1 return v2 } -; run: %simd_icmp_sgt_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [false true false false false true false false] +; run: %simd_icmp_sgt_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [0 -1 0 0 0 -1 0 0] -function %simd_icmp_sgt_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_sgt_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp sgt v0, v1 return v2 } -; run: %simd_icmp_sgt_i32([0 1 -1 0], [0 0 -1 1]) == [false true false false] -; run: %simd_icmp_sgt_i32([-5 1 0 0], [-1 -1 0 0]) == [false true false false] +; run: %simd_icmp_sgt_i32([0 1 -1 0], [0 0 -1 1]) == [0 -1 0 0] +; run: %simd_icmp_sgt_i32([-5 1 0 0], [-1 -1 0 0]) == [0 -1 0 0] -function %simd_icmp_sgt_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_sgt_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp sgt v0, v1 return v2 } -; run: %simd_icmp_sgt_i64([0 1], [0 0 ]) == [false true] -; run: %simd_icmp_sgt_i64([-1 0], [-1 1]) == [false false] -; run: %simd_icmp_sgt_i64([-5 1], [-1 -1]) == [false true] -; run: %simd_icmp_sgt_i64([0 0], [0 0]) == [false false] +; run: %simd_icmp_sgt_i64([0 1], [0 0 ]) == [0 -1] +; run: %simd_icmp_sgt_i64([-1 0], [-1 1]) == [0 0] +; run: %simd_icmp_sgt_i64([-5 1], [-1 -1]) == [0 -1] +; run: %simd_icmp_sgt_i64([0 0], [0 0]) == [0 0] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-sle.clif b/cranelift/filetests/filetests/runtests/simd-icmp-sle.clif index a5890e82df71..67da079f6b08 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-sle.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-sle.clif @@ -1,35 +1,35 @@ test interpret -function %simd_icmp_sle_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_sle_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp sle v0, v1 return v2 } -; run: %simd_icmp_sle_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [true false true true true false true true true true true true true true true true] +; run: %simd_icmp_sle_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [-1 0 -1 -1 -1 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] -function %simd_icmp_sle_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_sle_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp sle v0, v1 return v2 } -; run: %simd_icmp_sle_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [true false true true true false true true] +; run: %simd_icmp_sle_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [-1 0 -1 -1 -1 0 -1 -1] -function %simd_icmp_sle_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_sle_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp sle v0, v1 return v2 } -; run: %simd_icmp_sle_i32([0 1 -1 0], [0 0 -1 1]) == [true false true true] -; run: %simd_icmp_sle_i32([-5 1 0 0], [-1 -1 0 0]) == [true false true true] +; run: %simd_icmp_sle_i32([0 1 -1 0], [0 0 -1 1]) == [-1 0 -1 -1] +; run: %simd_icmp_sle_i32([-5 1 0 0], [-1 -1 0 0]) == [-1 0 -1 -1] -function %simd_icmp_sle_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_sle_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp sle v0, v1 return v2 } -; run: %simd_icmp_sle_i64([0 1], [0 0 ]) == [true false] -; run: %simd_icmp_sle_i64([-1 0], [-1 1]) == [true true] -; run: %simd_icmp_sle_i64([-5 1], [-1 -1]) == [true false] -; run: %simd_icmp_sle_i64([0 0], [0 0]) == [true true] +; run: %simd_icmp_sle_i64([0 1], [0 0 ]) == [-1 0] +; run: %simd_icmp_sle_i64([-1 0], [-1 1]) == [-1 -1] +; run: %simd_icmp_sle_i64([-5 1], [-1 -1]) == [-1 0] +; run: %simd_icmp_sle_i64([0 0], [0 0]) == [-1 -1] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-slt.clif b/cranelift/filetests/filetests/runtests/simd-icmp-slt.clif index 52e52167f916..aa14590dc210 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-slt.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-slt.clif @@ -1,33 +1,33 @@ test interpret -function %simd_icmp_slt_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_slt_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp slt v0, v1 return v2 } -; run: %simd_icmp_slt_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 1 0 0 0 0 0 0 0 0 0 0]) == [false false false true true false false false false false false false false false false false] +; run: %simd_icmp_slt_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 1 0 0 0 0 0 0 0 0 0 0]) == [0 0 0 -1 -1 0 0 0 0 0 0 0 0 0 0 0] -function %simd_icmp_slt_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_slt_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp slt v0, v1 return v2 } -; run: %simd_icmp_slt_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 1 0 0]) == [false false false true true false false false] +; run: %simd_icmp_slt_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 1 0 0]) == [0 0 0 -1 -1 0 0 0] -function %simd_icmp_slt_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_slt_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp slt v0, v1 return v2 } -; run: %simd_icmp_slt_i32([0 1 -1 0], [0 0 -1 1]) == [false false false true] -; run: %simd_icmp_slt_i32([-5 1 0 0], [-1 1 0 0]) == [true false false false] +; run: %simd_icmp_slt_i32([0 1 -1 0], [0 0 -1 1]) == [0 0 0 -1] +; run: %simd_icmp_slt_i32([-5 1 0 0], [-1 1 0 0]) == [-1 0 0 0] -function %simd_icmp_slt_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_slt_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp slt v0, v1 return v2 } -; run: %simd_icmp_slt_i64([0 1], [0 0]) == [false false] -; run: %simd_icmp_slt_i64([-1 0], [-1 1]) == [false true] -; run: %simd_icmp_slt_i64([-5 1], [-1 1]) == [true false] -; run: %simd_icmp_slt_i64([0 0], [0 0]) == [false false] +; run: %simd_icmp_slt_i64([0 1], [0 0]) == [0 0] +; run: %simd_icmp_slt_i64([-1 0], [-1 1]) == [0 -1] +; run: %simd_icmp_slt_i64([-5 1], [-1 1]) == [-1 0] +; run: %simd_icmp_slt_i64([0 0], [0 0]) == [0 0] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-uge.clif b/cranelift/filetests/filetests/runtests/simd-icmp-uge.clif index 594a01049ece..7396e7073575 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-uge.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-uge.clif @@ -1,33 +1,33 @@ test interpret -function %simd_icmp_uge_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_uge_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp uge v0, v1 return v2 } -; run: %simd_icmp_uge_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [true true true false false false true true true true true true true true true true] +; run: %simd_icmp_uge_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [-1 -1 -1 0 0 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] -function %simd_icmp_uge_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_uge_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp uge v0, v1 return v2 } -; run: %simd_icmp_uge_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [true true true false false false true true] +; run: %simd_icmp_uge_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [-1 -1 -1 0 0 0 -1 -1] -function %simd_icmp_uge_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_uge_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp uge v0, v1 return v2 } -; run: %simd_icmp_uge_i32([0 1 -1 0], [0 0 -1 1]) == [true true true false] -; run: %simd_icmp_uge_i32([-5 1 0 0], [-1 -1 0 0]) == [false false true true] +; run: %simd_icmp_uge_i32([0 1 -1 0], [0 0 -1 1]) == [-1 -1 -1 0] +; run: %simd_icmp_uge_i32([-5 1 0 0], [-1 -1 0 0]) == [0 0 -1 -1] -function %simd_icmp_uge_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_uge_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp uge v0, v1 return v2 } -; run: %simd_icmp_uge_i64([0 1], [0 0]) == [true true] -; run: %simd_icmp_uge_i64([-1 0], [-1 1]) == [true false] -; run: %simd_icmp_uge_i64([-5 1], [-1 -1]) == [false false] -; run: %simd_icmp_uge_i64([0 0], [0 0]) == [true true] +; run: %simd_icmp_uge_i64([0 1], [0 0]) == [-1 -1] +; run: %simd_icmp_uge_i64([-1 0], [-1 1]) == [-1 0] +; run: %simd_icmp_uge_i64([-5 1], [-1 -1]) == [0 0] +; run: %simd_icmp_uge_i64([0 0], [0 0]) == [-1 -1] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-ugt.clif b/cranelift/filetests/filetests/runtests/simd-icmp-ugt.clif index 557c9cd2e422..972dde401fda 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-ugt.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-ugt.clif @@ -1,33 +1,33 @@ test interpret -function %simd_icmp_ugt_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_ugt_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp ugt v0, v1 return v2 } -; run: %simd_icmp_ugt_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [false true false false false false false false false false false false false false false false] +; run: %simd_icmp_ugt_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [0 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0] -function %simd_icmp_ugt_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_ugt_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp ugt v0, v1 return v2 } -; run: %simd_icmp_ugt_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [false true false false false false false false] +; run: %simd_icmp_ugt_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [0 -1 0 0 0 0 0 0] -function %simd_icmp_ugt_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_ugt_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ugt v0, v1 return v2 } -; run: %simd_icmp_ugt_i32([0 1 -1 0], [0 0 -1 1]) == [false true false false] -; run: %simd_icmp_ugt_i32([-5 1 0 0], [-1 -1 0 0]) == [false false false false] +; run: %simd_icmp_ugt_i32([0 1 -1 0], [0 0 -1 1]) == [0 -1 0 0] +; run: %simd_icmp_ugt_i32([-5 1 0 0], [-1 -1 0 0]) == [0 0 0 0] -function %simd_icmp_ugt_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_ugt_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp ugt v0, v1 return v2 } -; run: %simd_icmp_ugt_i64([0 1], [0 0]) == [false true] -; run: %simd_icmp_ugt_i64([-1 0], [-1 1]) == [false false] -; run: %simd_icmp_ugt_i64([-5 1], [-1 -1]) == [false false] -; run: %simd_icmp_ugt_i64([0 0], [0 0]) == [false false] +; run: %simd_icmp_ugt_i64([0 1], [0 0]) == [0 -1] +; run: %simd_icmp_ugt_i64([-1 0], [-1 1]) == [0 0] +; run: %simd_icmp_ugt_i64([-5 1], [-1 -1]) == [0 0] +; run: %simd_icmp_ugt_i64([0 0], [0 0]) == [0 0] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-ule.clif b/cranelift/filetests/filetests/runtests/simd-icmp-ule.clif index bde5c1a8d6d6..cb0c5fd66b5a 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-ule.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-ule.clif @@ -1,33 +1,33 @@ test interpret -function %simd_icmp_ule_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_ule_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp ule v0, v1 return v2 } -; run: %simd_icmp_ule_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [true false true true true true true true true true true true true true true true] +; run: %simd_icmp_ule_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [-1 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] -function %simd_icmp_ule_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_ule_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp ule v0, v1 return v2 } -; run: %simd_icmp_ule_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [true false true true true true true true] +; run: %simd_icmp_ule_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [-1 0 -1 -1 -1 -1 -1 -1] -function %simd_icmp_ule_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_ule_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ule v0, v1 return v2 } -; run: %simd_icmp_ule_i32([0 1 -1 0], [0 0 -1 1]) == [true false true true] -; run: %simd_icmp_ule_i32([-5 1 0 0], [-1 -1 0 0]) == [true true true true] +; run: %simd_icmp_ule_i32([0 1 -1 0], [0 0 -1 1]) == [-1 0 -1 -1] +; run: %simd_icmp_ule_i32([-5 1 0 0], [-1 -1 0 0]) == [-1 -1 -1 -1] -function %simd_icmp_ule_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_ule_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp ule v0, v1 return v2 } -; run: %simd_icmp_ule_i64([0 1], [0 0]) == [true false] -; run: %simd_icmp_ule_i64([-1 0], [-1 1]) == [true true] -; run: %simd_icmp_ule_i64([-5 1], [-1 -1]) == [true true] -; run: %simd_icmp_ule_i64([0 0], [0 0]) == [true true] +; run: %simd_icmp_ule_i64([0 1], [0 0]) == [-1 0] +; run: %simd_icmp_ule_i64([-1 0], [-1 1]) == [-1 -1] +; run: %simd_icmp_ule_i64([-5 1], [-1 -1]) == [-1 -1] +; run: %simd_icmp_ule_i64([0 0], [0 0]) == [-1 -1] diff --git a/cranelift/filetests/filetests/runtests/simd-icmp-ult.clif b/cranelift/filetests/filetests/runtests/simd-icmp-ult.clif index 315b29c1e0b5..7f732cfaa905 100644 --- a/cranelift/filetests/filetests/runtests/simd-icmp-ult.clif +++ b/cranelift/filetests/filetests/runtests/simd-icmp-ult.clif @@ -1,33 +1,33 @@ test interpret -function %simd_icmp_ult_i8(i8x16, i8x16) -> b8x16 { +function %simd_icmp_ult_i8(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp ult v0, v1 return v2 } -; run: %simd_icmp_ult_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [false false false true true true false false false false false false false false false false] +; run: %simd_icmp_ult_i8([0 1 -1 0 -5 1 0 0 0 0 0 0 0 0 0 0], [0 0 -1 1 -1 -1 0 0 0 0 0 0 0 0 0 0]) == [0 0 0 -1 -1 -1 0 0 0 0 0 0 0 0 0 0] -function %simd_icmp_ult_i16(i16x8, i16x8) -> b16x8 { +function %simd_icmp_ult_i16(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v2 = icmp ult v0, v1 return v2 } -; run: %simd_icmp_ult_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [false false false true true true false false] +; run: %simd_icmp_ult_i16([0 1 -1 0 -5 1 0 0], [0 0 -1 1 -1 -1 0 0]) == [0 0 0 -1 -1 -1 0 0] -function %simd_icmp_ult_i32(i32x4, i32x4) -> b32x4 { +function %simd_icmp_ult_i32(i32x4, i32x4) -> i32x4 { block0(v0: i32x4, v1: i32x4): v2 = icmp ult v0, v1 return v2 } -; run: %simd_icmp_ult_i32([0 1 -1 0], [0 0 -1 1]) == [false false false true] -; run: %simd_icmp_ult_i32([-5 1 0 0], [-1 -1 0 0]) == [true true false false] +; run: %simd_icmp_ult_i32([0 1 -1 0], [0 0 -1 1]) == [0 0 0 -1] +; run: %simd_icmp_ult_i32([-5 1 0 0], [-1 -1 0 0]) == [-1 -1 0 0] -function %simd_icmp_ult_i64(i64x2, i64x2) -> b64x2 { +function %simd_icmp_ult_i64(i64x2, i64x2) -> i64x2 { block0(v0: i64x2, v1: i64x2): v2 = icmp ult v0, v1 return v2 } -; run: %simd_icmp_ult_i64([0 1], [0 0]) == [false false] -; run: %simd_icmp_ult_i64([-1 0], [-1 1]) == [false true] -; run: %simd_icmp_ult_i64([-5 1], [-1 -1]) == [true true] -; run: %simd_icmp_ult_i64([0 0], [0 0]) == [false false] +; run: %simd_icmp_ult_i64([0 1], [0 0]) == [0 0] +; run: %simd_icmp_ult_i64([-1 0], [-1 1]) == [0 -1] +; run: %simd_icmp_ult_i64([-5 1], [-1 -1]) == [-1 -1] +; run: %simd_icmp_ult_i64([0 0], [0 0]) == [0 0] diff --git a/cranelift/filetests/filetests/runtests/simd-lane-access.clif b/cranelift/filetests/filetests/runtests/simd-lane-access.clif index d43a0e20cf63..45145f9c1486 100644 --- a/cranelift/filetests/filetests/runtests/simd-lane-access.clif +++ b/cranelift/filetests/filetests/runtests/simd-lane-access.clif @@ -34,16 +34,16 @@ block0: } ; run: %shuffle_in_same_place() == [0 1 2 3] -function %shuffle_b32x4_to_all_true() -> i32x4 { +function %shuffle_i32x4_to_all_true() -> i32x4 { block0: - v1 = vconst.b32x4 [true false true false] - v2 = raw_bitcast.b8x16 v1 ; we have to cast because shuffle is type-limited to Tx16 + v1 = vconst.i32x4 [-1 0 -1 0] + v2 = raw_bitcast.i8x16 v1 ; we have to cast because shuffle is type-limited to Tx16 ; pair up the true values to make the entire vector true v3 = shuffle v2, v2, [0 1 2 3 0 1 2 3 8 9 10 11 8 9 10 11] - v4 = raw_bitcast.i32x4 v3 ; TODO store.b32x4 is unavailable; see https://github.com/bytecodealliance/wasmtime/issues/2237 + v4 = raw_bitcast.i32x4 v3 ; TODO store.i32x4 is unavailable; see https://github.com/bytecodealliance/wasmtime/issues/2237 return v4 } -; run: %shuffle_b32x4_to_all_true() == [0xffffffff 0xffffffff 0xffffffff 0xffffffff] +; run: %shuffle_i32x4_to_all_true() == [0xffffffff 0xffffffff 0xffffffff 0xffffffff] @@ -95,15 +95,15 @@ block0(v1: f64x2, v2: f64): ;; extractlane -function %extractlane_b8x16() -> i8 { +function %extractlane_i8x16() -> i8 { block0: - v1 = vconst.b8x16 [false false false false false false false false false false true false false - false false false] + v1 = vconst.i8x16 [0 0 0 0 0 0 0 0 0 0 -1 0 0 + 0 0 0] v2 = extractlane v1, 10 v3 = raw_bitcast.i8 v2 return v3 } -; run: %extractlane_b8x16_last() == 0xff +; run: %extractlane_i8x16_last() == 0xff function %extractlane_i16x8_second(i16x8) -> i16 { block0(v0: i16x8): @@ -119,7 +119,7 @@ block0(v0: f32x4): } ; run: %extractlane_f32x4_last([0x00.00 0x00.00 0x00.00 0x42.42]) == 0x42.42 -function %extractlane_i32_with_vector_reuse() -> b1 { +function %extractlane_i32_with_vector_reuse() -> i8 { block0: v0 = iconst.i32 42 v1 = iconst.i32 99 @@ -138,7 +138,7 @@ block0: } ; run -function %extractlane_f32_with_vector_reuse() -> b1 { +function %extractlane_f32_with_vector_reuse() -> i8 { block0: v0 = f32const 0x42.42 v1 = f32const 0x99.99 @@ -161,7 +161,7 @@ block0: ;; splat -function %splat_i64x2() -> b1 { +function %splat_i64x2() -> i8 { block0: v0 = iconst.i64 -1 v1 = splat.i64x2 v0 diff --git a/cranelift/filetests/filetests/runtests/simd-logical.clif b/cranelift/filetests/filetests/runtests/simd-logical.clif index 406ea9698ddd..0dad8cdb495d 100644 --- a/cranelift/filetests/filetests/runtests/simd-logical.clif +++ b/cranelift/filetests/filetests/runtests/simd-logical.clif @@ -4,16 +4,16 @@ target s390x set enable_simd target x86_64 has_sse3 has_ssse3 has_sse41 -function %bnot() -> b32 { +function %bnot() -> i32 { block0: - v0 = vconst.b32x4 [true true true false] + v0 = vconst.i32x4 [-1 -1 -1 0] v1 = bnot v0 v2 = extractlane v1, 3 return v2 } ; run -function %band_not() -> b1 { +function %band_not() -> i8 { block0: v0 = vconst.i16x8 [1 0 0 0 0 0 0 0] v1 = vconst.i16x8 [0 0 0 0 0 0 0 0] @@ -24,7 +24,7 @@ block0: } ; run -function %vany_true_i8x16() -> b1, b1 { +function %vany_true_i8x16() -> i8, i8 { block0: v0 = vconst.i8x16 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] v1 = vany_true v0 @@ -34,9 +34,9 @@ block0: return v1, v3 } -; run: %vany_true_i8x16() == [false, true] +; run: %vany_true_i8x16() == [0, 1] -function %vany_true_i16x8() -> b1, b1 { +function %vany_true_i16x8() -> i8, i8 { block0: v0 = vconst.i16x8 [0 0 0 0 0 0 0 0] v1 = vany_true v0 @@ -46,9 +46,9 @@ block0: return v1, v3 } -; run: %vany_true_i16x8() == [false, true] +; run: %vany_true_i16x8() == [0, 1] -function %vany_true_i32x4() -> b1, b1 { +function %vany_true_i32x4() -> i8, i8 { block0: v0 = vconst.i32x4 [0 0 0 0] v1 = vany_true v0 @@ -58,9 +58,9 @@ block0: return v1, v3 } -; run: %vany_true_i32x4() == [false, true] +; run: %vany_true_i32x4() == [0, 1] -function %vany_true_i64x2() -> b1, b1 { +function %vany_true_i64x2() -> i8, i8 { block0: v0 = vconst.i64x2 [0 0] v1 = vany_true v0 @@ -70,9 +70,9 @@ block0: return v1, v3 } -; run: %vany_true_i64x2() == [false, true] +; run: %vany_true_i64x2() == [0, 1] -function %vany_true_f32x4() -> b1, b1 { +function %vany_true_f32x4() -> i8, i8 { block0: v0 = vconst.f32x4 [0.0 0.0 0.0 0.0] v1 = vany_true v0 @@ -82,9 +82,9 @@ block0: return v1, v3 } -; run: %vany_true_f32x4() == [false, true] +; run: %vany_true_f32x4() == [0, 1] -function %vany_true_f64x2() -> b1, b1 { +function %vany_true_f64x2() -> i8, i8 { block0: v0 = vconst.f64x2 [0.0 0.0] v1 = vany_true v0 @@ -94,31 +94,31 @@ block0: return v1, v3 } -; run: %vany_true_f64x2() == [false, true] +; run: %vany_true_f64x2() == [0, 1] -function %vany_true_b32x4() -> b1 { +function %vany_true_i32x4_imm() -> i8 { block0: - v0 = vconst.b32x4 [false false false false] + v0 = vconst.i32x4 [0 0 0 0] v1 = vany_true v0 - v2 = bint.i32 v1 + v2 = uextend.i32 v1 v3 = icmp_imm eq v2, 0 return v3 } ; run -function %vall_true_i16x8() -> b1 { +function %vall_true_i16x8() -> i8 { block0: v0 = vconst.i16x8 [1 0 0 0 0 0 0 0] v1 = vall_true v0 - v2 = bint.i32 v1 + v2 = uextend.i32 v1 v3 = icmp_imm eq v2, 0 return v3 } ; run -function %vall_true_b32x4() -> b1 { +function %vall_true_i32x4() -> i8 { block0: - v0 = vconst.b32x4 [true true true true] + v0 = vconst.i32x4 [-1 -1 -1 -1] v1 = vall_true v0 return v1 } diff --git a/cranelift/filetests/filetests/runtests/simd-splat.clif b/cranelift/filetests/filetests/runtests/simd-splat.clif index 702e229a4073..37db142ec145 100644 --- a/cranelift/filetests/filetests/runtests/simd-splat.clif +++ b/cranelift/filetests/filetests/runtests/simd-splat.clif @@ -59,9 +59,6 @@ block0(v0: f64): ; run: %splat_f64x2(0x2.0) == [0x2.0 0x2.0] ; run: %splat_f64x2(NaN) == [NaN NaN] -; TODO: Test combinations of `bconst` and `splat`, potentially with `breduce` in -; the middle - function %splat_i8x16_2(i8x16) -> i8x16 { block0(v0: i8x16): v1 = iconst.i8 116 diff --git a/cranelift/filetests/filetests/runtests/simd-ushr.clif b/cranelift/filetests/filetests/runtests/simd-ushr.clif index b77aedad58af..8e6300bf999c 100644 --- a/cranelift/filetests/filetests/runtests/simd-ushr.clif +++ b/cranelift/filetests/filetests/runtests/simd-ushr.clif @@ -39,7 +39,7 @@ block0(v0: i64x2, v1: i32): ; run: %ushr_i64x2([1 2], 65) == [0 1] -function %sshr_imm_i16x8() -> b1 { +function %sshr_imm_i16x8() -> i8 { block0: v1 = vconst.i16x8 [1 2 4 -8 0 0 0 0] v2 = ushr_imm v1, 1 diff --git a/cranelift/filetests/filetests/runtests/simd-valltrue-64bit.clif b/cranelift/filetests/filetests/runtests/simd-valltrue-64bit.clif index 6085304a4f2d..2c6a9f9ad8a1 100644 --- a/cranelift/filetests/filetests/runtests/simd-valltrue-64bit.clif +++ b/cranelift/filetests/filetests/runtests/simd-valltrue-64bit.clif @@ -3,56 +3,56 @@ test run target aarch64 ; s390x and x86_64 do not support 64-bit vectors. -function %valltrue_b8x8_f() -> b1 { +function %valltrue_i8x8_f() -> i8 { block0: - v0 = bconst.b8 false - v1 = splat.b8x8 v0 + v0 = iconst.i8 0 + v1 = splat.i8x8 v0 v2 = vall_true v1 return v2 } -; run: %valltrue_b8x8_f() == false +; run: %valltrue_i8x8_f() == 0 -function %valltrue_b8x8_t() -> b1 { +function %valltrue_i8x8_t() -> i8 { block0: - v0 = bconst.b8 true - v1 = splat.b8x8 v0 + v0 = iconst.i8 -1 + v1 = splat.i8x8 v0 v2 = vall_true v1 return v2 } -; run: %valltrue_b8x8_t() == true +; run: %valltrue_i8x8_t() == 1 -function %valltrue_b16x4_f() -> b1 { +function %valltrue_i16x4_f() -> i8 { block0: - v0 = bconst.b16 false - v1 = splat.b16x4 v0 + v0 = iconst.i16 0 + v1 = splat.i16x4 v0 v2 = vall_true v1 return v2 } -; run: %valltrue_b16x4_f() == false +; run: %valltrue_i16x4_f() == 0 -function %valltrue_b16x4_t() -> b1 { +function %valltrue_i16x4_t() -> i8 { block0: - v0 = bconst.b16 true - v1 = splat.b16x4 v0 + v0 = iconst.i16 -1 + v1 = splat.i16x4 v0 v2 = vall_true v1 return v2 } -; run: %valltrue_b16x4_t() == true +; run: %valltrue_i16x4_t() == 1 -function %valltrue_b32x2_f() -> b1 { +function %valltrue_i32x2_f() -> i8 { block0: - v0 = bconst.b32 false - v1 = splat.b32x2 v0 + v0 = iconst.i32 0 + v1 = splat.i32x2 v0 v2 = vall_true v1 return v2 } -; run: %valltrue_b32x2_f() == false +; run: %valltrue_i32x2_f() == 0 -function %valltrue_b32x2_t() -> b1 { +function %valltrue_i32x2_t() -> i8 { block0: - v0 = bconst.b32 true - v1 = splat.b32x2 v0 + v0 = iconst.i32 -1 + v1 = splat.i32x2 v0 v2 = vall_true v1 return v2 } -; run: %valltrue_b32x2_t() == true +; run: %valltrue_i32x2_t() == 1 diff --git a/cranelift/filetests/filetests/runtests/simd-valltrue.clif b/cranelift/filetests/filetests/runtests/simd-valltrue.clif index c799893ac8e2..ffa0b269f989 100644 --- a/cranelift/filetests/filetests/runtests/simd-valltrue.clif +++ b/cranelift/filetests/filetests/runtests/simd-valltrue.clif @@ -4,41 +4,41 @@ target aarch64 target s390x target x86_64 -function %vall_true_b8x16(b8x16) -> b1 { -block0(v0: b8x16): +function %vall_true_i8x16(i8x16) -> i8 { +block0(v0: i8x16): v1 = vall_true v0 return v1 } -; run: %vall_true_b8x16([false false false false false false false false false false false false false false false false]) == false -; run: %vall_true_b8x16([true false false false false false false false false false false false false false false false]) == false -; run: %vall_true_b8x16([true true true true true true true true true true true true true true true true]) == true +; run: %vall_true_i8x16([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]) == 0 +; run: %vall_true_i8x16([-1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]) == 0 +; run: %vall_true_i8x16([-1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]) == 1 -function %vall_true_b16x8(b16x8) -> b1 { -block0(v0: b16x8): +function %vall_true_i16x8(i16x8) -> i8 { +block0(v0: i16x8): v1 = vall_true v0 return v1 } -; run: %vall_true_b16x8([false false false false false false false false]) == false -; run: %vall_true_b16x8([true false false false false false false false]) == false -; run: %vall_true_b16x8([true true true true true true true true]) == true +; run: %vall_true_i16x8([0 0 0 0 0 0 0 0]) == 0 +; run: %vall_true_i16x8([-1 0 0 0 0 0 0 0]) == 0 +; run: %vall_true_i16x8([-1 -1 -1 -1 -1 -1 -1 -1]) == 1 -function %vall_true_b32x4(b32x4) -> b1 { -block0(v0: b32x4): +function %vall_true_i32x4(i32x4) -> i8 { +block0(v0: i32x4): v1 = vall_true v0 return v1 } -; run: %vall_true_b32x4([false false false false]) == false -; run: %vall_true_b32x4([true false false false]) == false -; run: %vall_true_b32x4([true true true true]) == true +; run: %vall_true_i32x4([0 0 0 0]) == 0 +; run: %vall_true_i32x4([-1 0 0 0]) == 0 +; run: %vall_true_i32x4([-1 -1 -1 -1]) == 1 -function %vall_true_b64x2(b64x2) -> b1 { -block0(v0: b64x2): +function %vall_true_i64x2(i64x2) -> i8 { +block0(v0: i64x2): v1 = vall_true v0 return v1 } -; run: %vall_true_b64x2([false false]) == false -; run: %vall_true_b64x2([true false]) == false -; run: %vall_true_b64x2([true true]) == true +; run: %vall_true_i64x2([0 0]) == 0 +; run: %vall_true_i64x2([-1 0]) == 0 +; run: %vall_true_i64x2([-1 -1]) == 1 diff --git a/cranelift/filetests/filetests/runtests/simd-vanytrue-64bit.clif b/cranelift/filetests/filetests/runtests/simd-vanytrue-64bit.clif index 8ead6d2d3799..2c5406163044 100644 --- a/cranelift/filetests/filetests/runtests/simd-vanytrue-64bit.clif +++ b/cranelift/filetests/filetests/runtests/simd-vanytrue-64bit.clif @@ -3,56 +3,56 @@ test run target aarch64 ; s390x and x86_64 do not support 64-bit vectors. -function %vanytrue_b8x8_f() -> b1 { +function %vanytrue_i8x8_f() -> i8 { block0: - v0 = bconst.b8 false - v1 = splat.b8x8 v0 + v0 = iconst.i8 0 + v1 = splat.i8x8 v0 v2 = vany_true v1 return v2 } -; run: %vanytrue_b8x8_f() == false +; run: %vanytrue_i8x8_f() == 0 -function %vanytrue_b8x8_t() -> b1 { +function %vanytrue_i8x8_t() -> i8 { block0: - v0 = bconst.b8 true - v1 = splat.b8x8 v0 + v0 = iconst.i8 -1 + v1 = splat.i8x8 v0 v2 = vany_true v1 return v2 } -; run: %vanytrue_b8x8_t() == true +; run: %vanytrue_i8x8_t() == 1 -function %vanytrue_b16x4_f() -> b1 { +function %vanytrue_i16x4_f() -> i8 { block0: - v0 = bconst.b16 false - v1 = splat.b16x4 v0 + v0 = iconst.i16 0 + v1 = splat.i16x4 v0 v2 = vany_true v1 return v2 } -; run: %vanytrue_b16x4_f() == false +; run: %vanytrue_i16x4_f() == 0 -function %vanytrue_b16x4_t() -> b1 { +function %vanytrue_i16x4_t() -> i8 { block0: - v0 = bconst.b16 true - v1 = splat.b16x4 v0 + v0 = iconst.i16 -1 + v1 = splat.i16x4 v0 v2 = vany_true v1 return v2 } -; run: %vanytrue_b16x4_t() == true +; run: %vanytrue_i16x4_t() == 1 -function %vanytrue_b32x2_f() -> b1 { +function %vanytrue_i32x2_f() -> i8 { block0: - v0 = bconst.b32 false - v1 = splat.b32x2 v0 + v0 = iconst.i32 0 + v1 = splat.i32x2 v0 v2 = vany_true v1 return v2 } -; run: %vanytrue_b32x2_f() == false +; run: %vanytrue_i32x2_f() == 0 -function %vanytrue_b32x2_t() -> b1 { +function %vanytrue_i32x2_t() -> i8 { block0: - v0 = bconst.b32 true - v1 = splat.b32x2 v0 + v0 = iconst.i32 -1 + v1 = splat.i32x2 v0 v2 = vany_true v1 return v2 } -; run: %vanytrue_b32x2_t() == true +; run: %vanytrue_i32x2_t() == 1 diff --git a/cranelift/filetests/filetests/runtests/simd-vanytrue.clif b/cranelift/filetests/filetests/runtests/simd-vanytrue.clif index 28e1c60a7d50..4d5a6904f7e9 100644 --- a/cranelift/filetests/filetests/runtests/simd-vanytrue.clif +++ b/cranelift/filetests/filetests/runtests/simd-vanytrue.clif @@ -4,41 +4,41 @@ target aarch64 target s390x target x86_64 -function %vany_true_b8x16(b8x16) -> b1 { -block0(v0: b8x16): +function %vany_true_i8x16(i8x16) -> i8 { +block0(v0: i8x16): v1 = vany_true v0 return v1 } -; run: %vany_true_b8x16([false false false false false false false false false false false false false false false false]) == false -; run: %vany_true_b8x16([true false false false false false false false false false false false false false false false]) == true -; run: %vany_true_b8x16([true true true true true true true true true true true true true true true true]) == true +; run: %vany_true_i8x16([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]) == 0 +; run: %vany_true_i8x16([-1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]) == 1 +; run: %vany_true_i8x16([-1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]) == 1 -function %vany_true_b16x8(b16x8) -> b1 { -block0(v0: b16x8): +function %vany_true_i16x8(i16x8) -> i8 { +block0(v0: i16x8): v1 = vany_true v0 return v1 } -; run: %vany_true_b16x8([false false false false false false false false]) == false -; run: %vany_true_b16x8([true false false false false false false false]) == true -; run: %vany_true_b16x8([true true true true true true true true]) == true +; run: %vany_true_i16x8([0 0 0 0 0 0 0 0]) == 0 +; run: %vany_true_i16x8([-1 0 0 0 0 0 0 0]) == 1 +; run: %vany_true_i16x8([-1 -1 -1 -1 -1 -1 -1 -1]) == 1 -function %vany_true_b32x4(b32x4) -> b1 { -block0(v0: b32x4): +function %vany_true_i32x4(i32x4) -> i8 { +block0(v0: i32x4): v1 = vany_true v0 return v1 } -; run: %vany_true_b32x4([false false false false]) == false -; run: %vany_true_b32x4([true false false false]) == true -; run: %vany_true_b32x4([true true true true]) == true +; run: %vany_true_i32x4([0 0 0 0]) == 0 +; run: %vany_true_i32x4([-1 0 0 0]) == 1 +; run: %vany_true_i32x4([-1 -1 -1 -1]) == 1 -function %vany_true_b64x2(b64x2) -> b1 { -block0(v0: b64x2): +function %vany_true_i64x2(i64x2) -> i8 { +block0(v0: i64x2): v1 = vany_true v0 return v1 } -; run: %vany_true_b64x2([false false]) == false -; run: %vany_true_b64x2([true false]) == true -; run: %vany_true_b64x2([true true]) == true +; run: %vany_true_i64x2([0 0]) == 0 +; run: %vany_true_i64x2([-1 0]) == 1 +; run: %vany_true_i64x2([-1 -1]) == 1 diff --git a/cranelift/filetests/filetests/runtests/simd-vconst.clif b/cranelift/filetests/filetests/runtests/simd-vconst.clif index 5aa5386484f4..b5de91ff4bea 100644 --- a/cranelift/filetests/filetests/runtests/simd-vconst.clif +++ b/cranelift/filetests/filetests/runtests/simd-vconst.clif @@ -5,7 +5,7 @@ set enable_simd target x86_64 has_sse3 has_ssse3 has_sse41 -function %vconst_zeroes() -> b1 { +function %vconst_zeroes() -> i8 { block0: v0 = vconst.i8x16 0x00 v1 = extractlane v0, 4 @@ -14,7 +14,7 @@ block0: } ; run -function %vconst_ones() -> b1 { +function %vconst_ones() -> i8 { block0: v0 = vconst.i8x16 0xffffffffffffffffffffffffffffffff v1 = extractlane v0, 2 @@ -24,7 +24,7 @@ block0: ; run -function %splat_i64x2() -> b1 { +function %splat_i64x2() -> i8 { block0: v0 = iconst.i64 -1 v1 = splat.i64x2 v0 diff --git a/cranelift/filetests/filetests/runtests/simd-vselect.clif b/cranelift/filetests/filetests/runtests/simd-vselect.clif index 1165ae8a0963..5d2ca1afe77d 100644 --- a/cranelift/filetests/filetests/runtests/simd-vselect.clif +++ b/cranelift/filetests/filetests/runtests/simd-vselect.clif @@ -7,7 +7,7 @@ target x86_64 has_sse3 has_ssse3 has_sse41 function %vselect_i8x16() -> i8x16 { block0: - v1 = vconst.b8x16 [false true false true false true true true true true false false false false false false] + v1 = vconst.i8x16 [0 -1 0 -1 0 -1 -1 -1 -1 -1 0 0 0 0 0 0] v2 = vconst.i8x16 [100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115] v3 = vconst.i8x16 [200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215] v4 = vselect v1, v2, v3 @@ -17,7 +17,7 @@ block0: function %vselect_i16x8() -> i16x8 { block0: - v1 = vconst.b16x8 [false true false true false true true true] + v1 = vconst.i16x8 [0 -1 0 -1 0 -1 -1 -1] v2 = vconst.i16x8 [100 101 102 103 104 105 106 107] v3 = vconst.i16x8 [200 201 202 203 204 205 206 207] v4 = vselect v1, v2, v3 @@ -27,7 +27,7 @@ block0: function %vselect_i32x4_const() -> i32x4 { block0: - v1 = vconst.b32x4 [false true false true] + v1 = vconst.i32x4 [0 -1 0 -1] v2 = vconst.i32x4 [100 101 102 103] v3 = vconst.i32x4 [200 201 202 203] v4 = vselect v1, v2, v3 @@ -35,17 +35,17 @@ block0: } ; run: %vselect_i32x4_const() == [200 101 202 103] -function %vselect_i32x4(b32x4, i32x4, i32x4) -> i32x4 { -block0(v0: b32x4, v1: i32x4, v2: i32x4): +function %vselect_i32x4(i32x4, i32x4, i32x4) -> i32x4 { +block0(v0: i32x4, v1: i32x4, v2: i32x4): v3 = vselect v0, v1, v2 return v3 } ; Remember that vselect accepts: 1) the selector vector, 2) the "if true" vector, and 3) the "if false" vector. -; run: %vselect_i32x4([true true false false], [1 2 -1 -1], [-1 -1 3 4]) == [1 2 3 4] +; run: %vselect_i32x4([-1 -1 0 0], [1 2 -1 -1], [-1 -1 3 4]) == [1 2 3 4] function %vselect_i64x2() -> i64x2 { block0: - v1 = vconst.b64x2 [false true] + v1 = vconst.i64x2 [0 -1] v2 = vconst.i64x2 [100 101] v3 = vconst.i64x2 [200 201] v4 = vselect v1, v2, v3 @@ -53,30 +53,30 @@ block0: } ; run: %vselect_i64x2() == [200 101] -function %vselect_p_i8x16(b8x16, i8x16, i8x16) -> i8x16 { -block0(v0: b8x16, v1: i8x16, v2: i8x16): +function %vselect_p_i8x16(i8x16, i8x16, i8x16) -> i8x16 { +block0(v0: i8x16, v1: i8x16, v2: i8x16): v3 = vselect v0, v1, v2 return v3 } -; run: %vselect_p_i8x16([true false true true true false false false true false true true true false false false], [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16], [17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32]) == [1 18 3 4 5 22 23 24 9 26 11 12 13 30 31 32] +; run: %vselect_p_i8x16([-1 0 -1 -1 -1 0 0 0 -1 0 -1 -1 -1 0 0 0], [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16], [17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32]) == [1 18 3 4 5 22 23 24 9 26 11 12 13 30 31 32] -function %vselect_p_i16x8(b16x8, i16x8, i16x8) -> i16x8 { -block0(v0: b16x8, v1: i16x8, v2: i16x8): +function %vselect_p_i16x8(i16x8, i16x8, i16x8) -> i16x8 { +block0(v0: i16x8, v1: i16x8, v2: i16x8): v3 = vselect v0, v1, v2 return v3 } -; run: %vselect_p_i16x8([true false true true true false false false], [1 2 3 4 5 6 7 8], [17 18 19 20 21 22 23 24]) == [1 18 3 4 5 22 23 24] +; run: %vselect_p_i16x8([-1 0 -1 -1 -1 0 0 0], [1 2 3 4 5 6 7 8], [17 18 19 20 21 22 23 24]) == [1 18 3 4 5 22 23 24] -function %vselect_p_i32x4(b32x4, i32x4, i32x4) -> i32x4 { -block0(v0: b32x4, v1: i32x4, v2: i32x4): +function %vselect_p_i32x4(i32x4, i32x4, i32x4) -> i32x4 { +block0(v0: i32x4, v1: i32x4, v2: i32x4): v3 = vselect v0, v1, v2 return v3 } -; run: %vselect_p_i32x4([true false true true], [1 2 3 4], [100000 200000 300000 400000]) == [1 200000 3 4] +; run: %vselect_p_i32x4([-1 0 -1 -1], [1 2 3 4], [100000 200000 300000 400000]) == [1 200000 3 4] -function %vselect_p_i64x2(b64x2, i64x2, i64x2) -> i64x2 { -block0(v0: b64x2, v1: i64x2, v2: i64x2): +function %vselect_p_i64x2(i64x2, i64x2, i64x2) -> i64x2 { +block0(v0: i64x2, v1: i64x2, v2: i64x2): v3 = vselect v0, v1, v2 return v3 } -; run: %vselect_p_i64x2([true false], [1 2], [100000000000 200000000000]) == [1 200000000000] +; run: %vselect_p_i64x2([-1 0], [1 2], [100000000000 200000000000]) == [1 200000000000] diff --git a/cranelift/filetests/filetests/runtests/simd_compare_zero.clif b/cranelift/filetests/filetests/runtests/simd_compare_zero.clif index 445ccbcc148b..d9cacc1c4705 100644 --- a/cranelift/filetests/filetests/runtests/simd_compare_zero.clif +++ b/cranelift/filetests/filetests/runtests/simd_compare_zero.clif @@ -2,15 +2,12 @@ test run target aarch64 target s390x -; raw_bitcast is needed to get around issue with "bint" on aarch64 - function %simd_icmp_eq_i8(i8x16) -> i8x16 { block0(v0: i8x16): v1 = iconst.i8 0 v3 = splat.i8x16 v1 v2 = icmp eq v0, v3 - v4 = raw_bitcast.i8x16 v2 - return v4 + return v2 } ; run: %simd_icmp_eq_i8([-1 0 1 100 -1 0 1 100 -1 0 1 100 -1 0 1 100]) == [0 0xff 0 0 0 0xff 0 0 0 0xff 0 0 0 0xff 0 0] @@ -19,8 +16,7 @@ block0(v0: i16x8): v1 = iconst.i16 0 v3 = splat.i16x8 v1 v2 = icmp ne v0, v3 - v4 = raw_bitcast.i16x8 v2 - return v4 + return v2 } ; run: %simd_icmp_ne_i16([-1 0 1 100 -1 0 1 100]) == [0xffff 0 0xffff 0xffff 0xffff 0 0xffff 0xffff] @@ -29,8 +25,7 @@ block0(v0: i32x4): v1 = iconst.i32 0 v3 = splat.i32x4 v1 v2 = icmp sle v0, v3 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_icmp_le_i32([-1 0 1 100]) == [0xffffffff 0xffffffff 0 0] @@ -39,8 +34,7 @@ block0(v0: i64x2): v1 = iconst.i64 0 v3 = splat.i64x2 v1 v2 = icmp sge v0, v3 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_icmp_ge_i64([-1 0]) == [0 0xffffffffffffffff] ; run: %simd_icmp_ge_i64([1 100]) == [0xffffffffffffffff 0xffffffffffffffff] @@ -50,8 +44,7 @@ block0(v0: i8x16): v1 = iconst.i8 0 v3 = splat.i8x16 v1 v2 = icmp slt v0, v3 - v4 = raw_bitcast.i8x16 v2 - return v4 + return v2 } ; run: %simd_icmp_lt_i8([-1 0 1 100 -1 0 1 100 -1 0 1 100 -1 0 1 100]) == [0xff 0 0 0 0xff 0 0 0 0xff 0 0 0 0xff 0 0 0] @@ -60,8 +53,7 @@ block0(v0: i16x8): v1 = iconst.i16 0 v3 = splat.i16x8 v1 v2 = icmp sgt v0, v3 - v4 = raw_bitcast.i16x8 v2 - return v4 + return v2 } ; run: %simd_icmp_gt_i16([-1 0 1 100 -1 0 1 100]) == [0 0 0xffff 0xffff 0 0 0xffff 0xffff] @@ -70,8 +62,7 @@ block0(v0: f32x4): v1 = f32const 0.0 v3 = splat.f32x4 v1 v2 = fcmp eq v0, v3 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_fcmp_eq_f32([-0x1.0 0x0.0 0x1.0 NaN]) == [0 0xffffffff 0 0] @@ -80,8 +71,7 @@ block0(v0: f64x2): v1 = f64const 0.0 v3 = splat.f64x2 v1 v2 = fcmp ne v0, v3 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_fcmp_ne_f64([-0x1.0 0x0.0]) == [0xffffffffffffffff 0] ; run: %simd_fcmp_ne_f64([0x1.0 NaN]) == [0xffffffffffffffff 0xffffffffffffffff] @@ -91,8 +81,7 @@ block0(v0: f32x4): v1 = f32const 0.0 v3 = splat.f32x4 v1 v2 = fcmp le v0, v3 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_fcmp_le_f32([-0x1.0 0x0.0 0x1.0 NaN]) == [0xffffffff 0xffffffff 0 0] @@ -101,8 +90,7 @@ block0(v0: f64x2): v1 = f64const 0.0 v3 = splat.f64x2 v1 v2 = fcmp ge v0, v3 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_fcmp_ge_f64([-0x1.0 0x0.0]) == [0 0xffffffffffffffff] @@ -113,8 +101,7 @@ block0(v0: f32x4): v1 = f32const 0.0 v3 = splat.f32x4 v1 v2 = fcmp lt v0, v3 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_fcmp_lt_f32([-0x1.0 0x0.0 0x1.0 NaN]) == [0xffffffff 0 0 0] @@ -123,8 +110,7 @@ block0(v0: f64x2): v1 = f64const 0.0 v3 = splat.f64x2 v1 v2 = fcmp gt v0, v3 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_fcmp_gt_f64([-0x1.0 0x0.0]) == [0 0] @@ -135,8 +121,7 @@ block0(v0: i32x4): v1 = iconst.i32 0 v3 = splat.i32x4 v1 v2 = icmp eq v3, v0 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_icmp_eq_i32([1 0 -1 100]) == [0 0xffffffff 0 0] @@ -145,8 +130,7 @@ block0(v0: i64x2): v1 = iconst.i64 0 v3 = splat.i64x2 v1 v2 = icmp ne v3, v0 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_icmp_ne_i64([-1 0]) == [0xffffffffffffffff 0] ; run: %simd_icmp_ne_i64([1 100]) == [0xffffffffffffffff 0xffffffffffffffff] @@ -156,8 +140,7 @@ block0(v0: i8x16): v1 = iconst.i8 0 v3 = splat.i8x16 v1 v2 = icmp sle v3, v0 - v4 = raw_bitcast.i8x16 v2 - return v4 + return v2 } ; run: %simd_icmp_le_i8([-1 0 1 100 -1 0 1 100 -1 0 1 100 -1 0 1 100]) == [0 0xff 0xff 0xff 0 0xff 0xff 0xff 0 0xff 0xff 0xff 0 0xff 0xff 0xff] @@ -166,8 +149,7 @@ block0(v0: i16x8): v1 = iconst.i16 0 v3 = splat.i16x8 v1 v2 = icmp sge v3, v0 - v4 = raw_bitcast.i16x8 v2 - return v4 + return v2 } ; run: %simd_icmp_ge_i16([-1 0 1 100 -1 0 1 100]) == [0xffff 0xffff 0 0 0xffff 0xffff 0 0] @@ -176,8 +158,7 @@ block0(v0: i32x4): v1 = iconst.i32 0 v3 = splat.i32x4 v1 v2 = icmp slt v3, v0 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_icmp_lt_i32([-1 0 1 100]) == [0 0 0xffffffff 0xffffffff] @@ -186,8 +167,7 @@ block0(v0: i64x2): v1 = iconst.i64 0 v3 = splat.i64x2 v1 v2 = icmp sgt v3, v0 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_icmp_gt_i64([-1 0]) == [0xffffffffffffffff 0] ; run: %simd_icmp_gt_i64([1 100]) == [0 0] @@ -197,8 +177,7 @@ block0(v0: f64x2): v1 = f64const 0.0 v3 = splat.f64x2 v1 v2 = fcmp eq v3, v0 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_fcmp_eq_f64([-0x1.0 0x0.0]) == [0 0xffffffffffffffff] ; run: %simd_fcmp_eq_f64([0x1.0 NaN]) == [0 0] @@ -208,8 +187,7 @@ block0(v0: f32x4): v1 = f32const 0.0 v3 = splat.f32x4 v1 v2 = fcmp ne v3, v0 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_fcmp_ne_f32([-0x1.0 0x0.0 0x1.0 NaN]) == [0xffffffff 0 0xffffffff 0xffffffff] @@ -218,8 +196,7 @@ block0(v0: f64x2): v1 = f64const 0.0 v3 = splat.f64x2 v1 v2 = fcmp le v3, v0 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_fcmp_le_f64([-0x1.0 0x0.0]) == [0 0xffffffffffffffff] ; run: %simd_fcmp_le_f64([0x1.0 NaN]) == [0xffffffffffffffff 0] @@ -229,8 +206,7 @@ block0(v0: f32x4): v1 = f32const 0.0 v3 = splat.f32x4 v1 v2 = fcmp ge v3, v0 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_fcmp_ge_f32([-0x1.0 0x0.0 0x1.0 NaN]) == [0xffffffff 0xffffffff 0 0] @@ -239,8 +215,7 @@ block0(v0: f64x2): v1 = f64const 0.0 v3 = splat.f64x2 v1 v2 = fcmp lt v3, v0 - v4 = raw_bitcast.i64x2 v2 - return v4 + return v2 } ; run: %simd_fcmp_lt_f64([-0x1.0 0x0.0]) == [0 0] ; run: %simd_fcmp_lt_f64([0x1.0 NaN]) == [0xffffffffffffffff 0] @@ -250,7 +225,6 @@ block0(v0: f32x4): v1 = f32const 0.0 v3 = splat.f32x4 v1 v2 = fcmp gt v3, v0 - v4 = raw_bitcast.i32x4 v2 - return v4 + return v2 } ; run: %simd_fcmp_gt_f32([-0x1.0 0x0.0 0x1.0 NaN]) == [0xffffffff 0 0 0] diff --git a/cranelift/filetests/filetests/runtests/sqrt.clif b/cranelift/filetests/filetests/runtests/sqrt.clif index 18fa96857b48..6da83e3e647f 100644 --- a/cranelift/filetests/filetests/runtests/sqrt.clif +++ b/cranelift/filetests/filetests/runtests/sqrt.clif @@ -31,7 +31,7 @@ function %sqrt_is_nan_f32(f32) -> i32 { block0(v0: f32): v2 = sqrt v0 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %sqrt_is_nan_f32(-0x9.0) == 1 @@ -78,7 +78,7 @@ function %sqrt_is_nan_f64(f64) -> i32 { block0(v0: f64): v2 = sqrt v0 v3 = fcmp ne v2, v2 - v4 = bint.i32 v3 + v4 = uextend.i32 v3 return v4 } ; run: %sqrt_is_nan_f64(-0x9.0) == 1 diff --git a/cranelift/filetests/filetests/runtests/stack-addr-32.clif b/cranelift/filetests/filetests/runtests/stack-addr-32.clif index 61a9c5c70549..12aed367981a 100644 --- a/cranelift/filetests/filetests/runtests/stack-addr-32.clif +++ b/cranelift/filetests/filetests/runtests/stack-addr-32.clif @@ -1,6 +1,6 @@ test interpret -function %stack_addr_iadd(i64) -> b1 { +function %stack_addr_iadd(i64) -> i8 { ss0 = explicit_slot 16 block0(v0: i64): @@ -19,12 +19,12 @@ block0(v0: i64): v9 = band v7, v8 return v9 } -; run: %stack_addr_iadd(0) == true -; run: %stack_addr_iadd(1) == true -; run: %stack_addr_iadd(-1) == true +; run: %stack_addr_iadd(0) == 1 +; run: %stack_addr_iadd(1) == 1 +; run: %stack_addr_iadd(-1) == 1 -function %stack_addr_32(i64) -> b1 { +function %stack_addr_32(i64) -> i8 { ss0 = explicit_slot 24 block0(v0: i64): @@ -47,13 +47,13 @@ block0(v0: i64): v11 = band v10, v9 return v11 } -; run: %stack_addr_32(0) == true -; run: %stack_addr_32(1) == true -; run: %stack_addr_32(-1) == true +; run: %stack_addr_32(0) == 1 +; run: %stack_addr_32(1) == 1 +; run: %stack_addr_32(-1) == 1 -function %addr32_64(i64) -> b1 { +function %addr32_64(i64) -> i8 { ss0 = explicit_slot 16 block0(v0: i64): @@ -67,12 +67,12 @@ block0(v0: i64): return v4 } -; run: %addr32_64(0) == true -; run: %addr32_64(1) == true -; run: %addr32_64(-1) == true +; run: %addr32_64(0) == 1 +; run: %addr32_64(1) == 1 +; run: %addr32_64(-1) == 1 -function %multi_slot_different_addrs() -> b1 { +function %multi_slot_different_addrs() -> i8 { ss0 = explicit_slot 8 ss1 = explicit_slot 8 @@ -82,4 +82,4 @@ block0: v2 = icmp ne v0, v1 return v2 } -; run: %multi_slot_different_addrs() == true +; run: %multi_slot_different_addrs() == 1 diff --git a/cranelift/filetests/filetests/runtests/stack-addr-64.clif b/cranelift/filetests/filetests/runtests/stack-addr-64.clif index 3d8e26144aef..7b0d85ea8da4 100644 --- a/cranelift/filetests/filetests/runtests/stack-addr-64.clif +++ b/cranelift/filetests/filetests/runtests/stack-addr-64.clif @@ -5,7 +5,7 @@ target s390x target aarch64 target riscv64 -function %stack_addr_iadd(i64) -> b1 { +function %stack_addr_iadd(i64) -> i8 { ss0 = explicit_slot 16 block0(v0: i64): @@ -24,11 +24,11 @@ block0(v0: i64): v9 = band v7, v8 return v9 } -; run: %stack_addr_iadd(0) == true -; run: %stack_addr_iadd(1) == true -; run: %stack_addr_iadd(-1) == true +; run: %stack_addr_iadd(0) == 1 +; run: %stack_addr_iadd(1) == 1 +; run: %stack_addr_iadd(-1) == 1 -function %stack_addr_64(i64) -> b1 { +function %stack_addr_64(i64) -> i8 { ss0 = explicit_slot 24 block0(v0: i64): @@ -51,6 +51,6 @@ block0(v0: i64): v11 = band v10, v9 return v11 } -; run: %stack_addr_64(0) == true -; run: %stack_addr_64(1) == true -; run: %stack_addr_64(-1) == true +; run: %stack_addr_64(0) == 1 +; run: %stack_addr_64(1) == 1 +; run: %stack_addr_64(-1) == 1 diff --git a/cranelift/filetests/filetests/runtests/trueif-ff.clif b/cranelift/filetests/filetests/runtests/trueif-ff.clif index ceef624b541b..f82ba22371c0 100644 --- a/cranelift/filetests/filetests/runtests/trueif-ff.clif +++ b/cranelift/filetests/filetests/runtests/trueif-ff.clif @@ -3,108 +3,108 @@ test run target aarch64 ; `true{if,ff}` not implemented on x86_64, and panics on s390x. -function %trueif_i8_eq(i8, i8) -> b1 { +function %trueif_i8_eq(i8, i8) -> i8 { block0(v0: i8, v1: i8): v2 = ifcmp v0, v1 v3 = trueif eq v2 return v3 } -; run: %trueif_i8_eq(42, 42) == true -; run: %trueif_i8_eq(-1, 255) == true -; run: %trueif_i8_eq(255, 0) == false -; run: %trueif_i8_eq(32, 64) == false +; run: %trueif_i8_eq(42, 42) == 1 +; run: %trueif_i8_eq(-1, 255) == 1 +; run: %trueif_i8_eq(255, 0) == 0 +; run: %trueif_i8_eq(32, 64) == 0 -function %trueif_i16_eq(i16, i16) -> b1 { +function %trueif_i16_eq(i16, i16) -> i8 { block0(v0: i16, v1: i16): v2 = ifcmp v0, v1 v3 = trueif eq v2 return v3 } -; run: %trueif_i16_eq(42, 42) == true -; run: %trueif_i16_eq(-1, 65535) == true -; run: %trueif_i16_eq(65535, 0) == false -; run: %trueif_i16_eq(32, 64) == false +; run: %trueif_i16_eq(42, 42) == 1 +; run: %trueif_i16_eq(-1, 65535) == 1 +; run: %trueif_i16_eq(65535, 0) == 0 +; run: %trueif_i16_eq(32, 64) == 0 -function %trueif_i32_eq(i32, i32) -> b1 { +function %trueif_i32_eq(i32, i32) -> i8 { block0(v0: i32, v1: i32): v2 = ifcmp v0, v1 v3 = trueif eq v2 return v3 } -; run: %trueif_i32_eq(42, 42) == true -; run: %trueif_i32_eq(-1, 4294967295) == true -; run: %trueif_i32_eq(4294967295, 0) == false -; run: %trueif_i32_eq(32, 64) == false +; run: %trueif_i32_eq(42, 42) == 1 +; run: %trueif_i32_eq(-1, 4294967295) == 1 +; run: %trueif_i32_eq(4294967295, 0) == 0 +; run: %trueif_i32_eq(32, 64) == 0 -function %trueif_i64_eq(i64, i64) -> b1 { +function %trueif_i64_eq(i64, i64) -> i8 { block0(v0: i64, v1: i64): v2 = ifcmp v0, v1 v3 = trueif eq v2 return v3 } -; run: %trueif_i64_eq(42, 42) == true -; run: %trueif_i64_eq(-1, 18446744073709551615) == true -; run: %trueif_i64_eq(18446744073709551615, 0) == false -; run: %trueif_i64_eq(32, 64) == false +; run: %trueif_i64_eq(42, 42) == 1 +; run: %trueif_i64_eq(-1, 18446744073709551615) == 1 +; run: %trueif_i64_eq(18446744073709551615, 0) == 0 +; run: %trueif_i64_eq(32, 64) == 0 -function %trueif_i128_eq(i128, i128) -> b1 { +function %trueif_i128_eq(i128, i128) -> i8 { block0(v0: i128, v1: i128): v2 = ifcmp v0, v1 v3 = trueif eq v2 return v3 } -; run: %trueif_i128_eq(42, 42) == true -; run: %trueif_i128_eq(-1, 18446744073709551615) == false -; run: %trueif_i128_eq(19000000000000000000, 0) == false -; run: %trueif_i128_eq(32, 64) == false +; run: %trueif_i128_eq(42, 42) == 1 +; run: %trueif_i128_eq(-1, 18446744073709551615) == 0 +; run: %trueif_i128_eq(19000000000000000000, 0) == 0 +; run: %trueif_i128_eq(32, 64) == 0 -function %trueff_f32_eq(f32, f32) -> b1 { +function %trueff_f32_eq(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = ffcmp v0, v1 v3 = trueff eq v2 return v3 } -; run: %trueff_f32_eq(0x42.0, 0x42.0) == true -; run: %trueff_f32_eq(-0x1.0, -0x1.0) == true -; run: %trueff_f32_eq(0x1.0, 0x0.0) == false +; run: %trueff_f32_eq(0x42.0, 0x42.0) == 1 +; run: %trueff_f32_eq(-0x1.0, -0x1.0) == 1 +; run: %trueff_f32_eq(0x1.0, 0x0.0) == 0 -function %trueff_f64_eq(f64, f64) -> b1 { +function %trueff_f64_eq(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = ffcmp v0, v1 v3 = trueff eq v2 return v3 } -; run: %trueff_f64_eq(0x42.0, 0x42.0) == true -; run: %trueff_f64_eq(-0x1.0, -0x1.0) == true -; run: %trueff_f64_eq(0x1.0, 0x0.0) == false +; run: %trueff_f64_eq(0x42.0, 0x42.0) == 1 +; run: %trueff_f64_eq(-0x1.0, -0x1.0) == 1 +; run: %trueff_f64_eq(0x1.0, 0x0.0) == 0 -function %trueff_f32_ne(f32, f32) -> b1 { +function %trueff_f32_ne(f32, f32) -> i8 { block0(v0: f32, v1: f32): v2 = ffcmp v0, v1 v3 = trueff ne v2 return v3 } -; run: %trueff_f32_ne(0x42.0, 0x42.0) == false -; run: %trueff_f32_ne(-0x1.0, -0x1.0) == false -; run: %trueff_f32_ne(0x1.0, 0x0.0) == true -; run: %trueff_f32_ne(NaN, NaN) == true +; run: %trueff_f32_ne(0x42.0, 0x42.0) == 0 +; run: %trueff_f32_ne(-0x1.0, -0x1.0) == 0 +; run: %trueff_f32_ne(0x1.0, 0x0.0) == 1 +; run: %trueff_f32_ne(NaN, NaN) == 1 -function %trueff_f64_ne(f64, f64) -> b1 { +function %trueff_f64_ne(f64, f64) -> i8 { block0(v0: f64, v1: f64): v2 = ffcmp v0, v1 v3 = trueff ne v2 return v3 } -; run: %trueff_f64_ne(0x42.0, 0x42.0) == false -; run: %trueff_f64_ne(-0x1.0, -0x1.0) == false -; run: %trueff_f64_ne(0x1.0, 0x0.0) == true -; run: %trueff_f64_ne(NaN, NaN) == true +; run: %trueff_f64_ne(0x42.0, 0x42.0) == 0 +; run: %trueff_f64_ne(-0x1.0, -0x1.0) == 0 +; run: %trueff_f64_ne(0x1.0, 0x0.0) == 1 +; run: %trueff_f64_ne(NaN, NaN) == 1 diff --git a/cranelift/filetests/filetests/runtests/trunc.clif b/cranelift/filetests/filetests/runtests/trunc.clif index eaaf276149a0..f0b427c91731 100644 --- a/cranelift/filetests/filetests/runtests/trunc.clif +++ b/cranelift/filetests/filetests/runtests/trunc.clif @@ -59,7 +59,7 @@ function %trunc_is_nan_f32(f32) -> i32 { block0(v0: f32): v1 = trunc v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %trunc_is_nan_f32(+NaN) == 1 @@ -132,7 +132,7 @@ function %trunc_is_nan_f64(f64) -> i32 { block0(v0: f64): v1 = trunc v0 v2 = fcmp ne v1, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; run: %trunc_is_nan_f64(+NaN) == 1 diff --git a/cranelift/filetests/filetests/simple_gvn/reject.clif b/cranelift/filetests/filetests/simple_gvn/reject.clif index 3a5c1e0ee314..aa4978633e07 100644 --- a/cranelift/filetests/filetests/simple_gvn/reject.clif +++ b/cranelift/filetests/filetests/simple_gvn/reject.clif @@ -14,7 +14,7 @@ block0: return v5 } -function %cpu_flags() -> b1 { +function %cpu_flags() -> i8 { block0: v0 = iconst.i32 7 v1 = iconst.i32 8 diff --git a/cranelift/filetests/filetests/simple_preopt/bitselect.clif b/cranelift/filetests/filetests/simple_preopt/bitselect.clif index 97fe62a9f050..e55c46fd2bc7 100644 --- a/cranelift/filetests/filetests/simple_preopt/bitselect.clif +++ b/cranelift/filetests/filetests/simple_preopt/bitselect.clif @@ -7,12 +7,13 @@ target x86_64 function %mask_from_icmp(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v2 = icmp eq v0, v1 - v3 = raw_bitcast.i8x16 v2 - v4 = bitselect v3, v0, v1 - ; check: v4 = vselect v2, v0, v1 - return v4 + v3 = bitselect v2, v0, v1 + ; check: v3 = vselect v2, v0, v1 + return v3 } +;; We can't guarantee that the i32x4 has all ones or zeros in each lane, so we +;; can't remove the bitselect in this case. function %mask_casted(i8x16, i8x16, i32x4) -> i8x16 { block0(v0: i8x16, v1: i8x16, v2: i32x4): v3 = raw_bitcast.i8x16 v2 @@ -25,7 +26,7 @@ function %good_const_mask_i8x16(i8x16, i8x16) -> i8x16 { block0(v0: i8x16, v1: i8x16): v3 = vconst.i8x16 [0 0 0xFF 0 0 0xFF 0 0 0 0 0xFF 0 0 0 0 0xFF] v4 = bitselect v3, v0, v1 - ; check: v5 = raw_bitcast.b8x16 v3 + ; check: v5 = raw_bitcast.i8x16 v3 ; nextln: v4 = vselect v5, v0, v1 return v4 } @@ -34,7 +35,7 @@ function %good_const_mask_i16x8(i16x8, i16x8) -> i16x8 { block0(v0: i16x8, v1: i16x8): v3 = vconst.i16x8 [0x0000 0xFF00 0x0000 0x00FF 0x0000 0xFFFF 0x00FF 0xFFFF] v4 = bitselect v3, v0, v1 - ; check: v5 = raw_bitcast.b8x16 v3 + ; check: v5 = raw_bitcast.i8x16 v3 ; nextln: v6 = raw_bitcast.i8x16 v0 ; nextln: v7 = raw_bitcast.i8x16 v1 ; nextln: v8 = vselect v5, v6, v7 diff --git a/cranelift/filetests/filetests/simple_preopt/replace_branching_instructions_and_cfg_predecessors.clif b/cranelift/filetests/filetests/simple_preopt/replace_branching_instructions_and_cfg_predecessors.clif index a6cc0d9fb115..493896f0d751 100644 --- a/cranelift/filetests/filetests/simple_preopt/replace_branching_instructions_and_cfg_predecessors.clif +++ b/cranelift/filetests/filetests/simple_preopt/replace_branching_instructions_and_cfg_predecessors.clif @@ -11,7 +11,7 @@ function u0:2(i64 , i64) { v18 = load.i32 v17 v19 = iconst.i32 4 v20 = icmp ne v18, v19 - v21 = bint.i32 v20 + v21 = uextend.i32 v20 brnz v21, block2 jump block4 block4: diff --git a/cranelift/filetests/filetests/simple_preopt/sign_extend.clif b/cranelift/filetests/filetests/simple_preopt/sign_extend.clif index b10b9a2d939d..6fccf8553e62 100644 --- a/cranelift/filetests/filetests/simple_preopt/sign_extend.clif +++ b/cranelift/filetests/filetests/simple_preopt/sign_extend.clif @@ -4,7 +4,7 @@ target x86_64 ;; Tests for sign-extending immediates. -function %sign_extend_signed_icmp(i8) -> b1 { +function %sign_extend_signed_icmp(i8) -> i8 { block0(v0: i8): ; 255 = -1 as u8 v1 = iconst.i8 255 @@ -13,7 +13,7 @@ block0(v0: i8): return v2 } -function %do_not_sign_extend_unsigned_icmp(i8) -> b1 { +function %do_not_sign_extend_unsigned_icmp(i8) -> i8 { block0(v0: i8): v1 = iconst.i8 255 v2 = icmp uge v0, v1 diff --git a/cranelift/filetests/filetests/simple_preopt/simplify32.clif b/cranelift/filetests/filetests/simple_preopt/simplify32.clif index 32566cea8b64..80fb1363e5d3 100644 --- a/cranelift/filetests/filetests/simple_preopt/simplify32.clif +++ b/cranelift/filetests/filetests/simple_preopt/simplify32.clif @@ -34,14 +34,14 @@ function %icmp_imm(i32) -> i32 { block0(v0: i32): v1 = iconst.i32 2 v2 = icmp slt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; sameln: function %icmp_imm ; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = icmp_imm slt v0, 2 -; nextln: v3 = bint.i32 v2 +; nextln: v3 = uextend.i32 v2 ; nextln: return v3 ; nextln: } diff --git a/cranelift/filetests/filetests/simple_preopt/simplify64.clif b/cranelift/filetests/filetests/simple_preopt/simplify64.clif index 102746e97121..72919cad96e2 100644 --- a/cranelift/filetests/filetests/simple_preopt/simplify64.clif +++ b/cranelift/filetests/filetests/simple_preopt/simplify64.clif @@ -34,21 +34,21 @@ function %icmp_imm(i32) -> i32 { block0(v0: i32): v1 = iconst.i32 2 v2 = icmp slt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } ; sameln: function %icmp_imm ; nextln: block0(v0: i32): ; nextln: v1 = iconst.i32 2 ; nextln: v2 = icmp_imm slt v0, 2 -; nextln: v3 = bint.i32 v2 +; nextln: v3 = uextend.i32 v2 ; nextln: return v3 ; nextln: } -function %brz_bint(i32) { +function %brz_uextend(i32) { block0(v0: i32): v3 = icmp_imm slt v0, 0 - v1 = bint.i32 v3 + v1 = uextend.i32 v3 v2 = select v1, v1, v1 trapz v1, user0 brz v1, block1 @@ -60,13 +60,13 @@ block1: block2: return } -; sameln: function %brz_bint +; sameln: function %brz_uextend ; nextln: (v0: i32): ; nextln: v3 = icmp_imm slt v0, 0 -; nextln: v1 = bint.i32 v3 -; nextln: v2 = select v3, v1, v1 -; nextln: trapz v3, user0 -; nextln: brnz v3, block2 +; nextln: v1 = uextend.i32 v3 +; nextln: v2 = select v1, v1, v1 +; nextln: trapz v1, user0 +; nextln: brnz v1, block2 ; nextln: jump block1 function %irsub_imm(i32) -> i32 { diff --git a/cranelift/filetests/filetests/verifier/simd-lane-index.clif b/cranelift/filetests/filetests/verifier/simd-lane-index.clif index 38ad19517a32..57c945bab60c 100644 --- a/cranelift/filetests/filetests/verifier/simd-lane-index.clif +++ b/cranelift/filetests/filetests/verifier/simd-lane-index.clif @@ -11,11 +11,11 @@ block0: return } -function %insertlane_b16x8() { +function %insertlane_i16x8() { block0: - v0 = vconst.b16x8 [false false false false false false false false] - v1 = bconst.b16 true - v2 = insertlane v0, v1, 8 ; error: The lane 8 does not index into the type b16x8 + v0 = vconst.i16x8 [0 0 0 0 0 0 0 0] + v1 = iconst.i16 -1 + v2 = insertlane v0, v1, 8 ; error: The lane 8 does not index into the type i16x8 return } @@ -34,9 +34,9 @@ block0: return } -function %extractlane_b8x16() { +function %extractlane_i8x16() { block0: - v0 = vconst.b8x16 0x00 - v1 = extractlane v0, 16 ; error: The lane 16 does not index into the type b8x16 + v0 = vconst.i8x16 0x00 + v1 = extractlane v0, 16 ; error: The lane 16 does not index into the type i8x16 return } diff --git a/cranelift/filetests/filetests/verifier/type_check.clif b/cranelift/filetests/filetests/verifier/type_check.clif index b5933f8acbad..0663f8fd7e71 100644 --- a/cranelift/filetests/filetests/verifier/type_check.clif +++ b/cranelift/filetests/filetests/verifier/type_check.clif @@ -10,9 +10,9 @@ function %entry_block_arg_type(i32) { return } -function %incorrect_arg_type(i32, b1) -> i32 { - block0(v0: i32, v1: b1): - v2 = iadd v0, v1 ; error: arg 1 (v1) has type b1, expected i32 +function %incorrect_arg_type(i32, i8) -> i32 { + block0(v0: i32, v1: i8): + v2 = iadd v0, v1 ; error: arg 1 (v1) has type i8, expected i32 return v2 } diff --git a/cranelift/filetests/filetests/wasm/f32-compares.clif b/cranelift/filetests/filetests/wasm/f32-compares.clif index ad1bf6ad7eeb..bb5855cd51e4 100644 --- a/cranelift/filetests/filetests/wasm/f32-compares.clif +++ b/cranelift/filetests/filetests/wasm/f32-compares.clif @@ -8,41 +8,41 @@ target x86_64 haswell function %f32_eq(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fcmp eq v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f32_ne(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fcmp ne v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f32_lt(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fcmp lt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f32_gt(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fcmp gt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f32_le(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fcmp le v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f32_ge(f32, f32) -> i32 { block0(v0: f32, v1: f32): v2 = fcmp ge v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } diff --git a/cranelift/filetests/filetests/wasm/f64-compares.clif b/cranelift/filetests/filetests/wasm/f64-compares.clif index c372409251f8..43d7e67d87ee 100644 --- a/cranelift/filetests/filetests/wasm/f64-compares.clif +++ b/cranelift/filetests/filetests/wasm/f64-compares.clif @@ -8,41 +8,41 @@ target x86_64 haswell function %f64_eq(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp eq v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f64_ne(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp ne v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f64_lt(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp lt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f64_gt(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp gt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f64_le(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp le v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %f64_ge(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp ge v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } diff --git a/cranelift/filetests/filetests/wasm/i32-compares.clif b/cranelift/filetests/filetests/wasm/i32-compares.clif index e6e64500c8a5..5b7d795fa4c0 100644 --- a/cranelift/filetests/filetests/wasm/i32-compares.clif +++ b/cranelift/filetests/filetests/wasm/i32-compares.clif @@ -8,76 +8,76 @@ target x86_64 haswell function %i32_eqz(i32) -> i32 { block0(v0: i32): v1 = icmp_imm eq v0, 0 - v2 = bint.i32 v1 + v2 = uextend.i32 v1 return v2 } function %i32_eq(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp eq v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_ne(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp ne v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_lt_s(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp slt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_lt_u(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp ult v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_gt_s(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp sgt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_gt_u(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp ugt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_le_s(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp sle v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_le_u(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp ule v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_ge_s(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp sge v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i32_ge_u(i32, i32) -> i32 { block0(v0: i32, v1: i32): v2 = icmp uge v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } diff --git a/cranelift/filetests/filetests/wasm/i64-compares.clif b/cranelift/filetests/filetests/wasm/i64-compares.clif index c4df3e7e8cf5..917489c56696 100644 --- a/cranelift/filetests/filetests/wasm/i64-compares.clif +++ b/cranelift/filetests/filetests/wasm/i64-compares.clif @@ -7,76 +7,76 @@ target x86_64 haswell function %i64_eqz(i64) -> i32 { block0(v0: i64): v1 = icmp_imm eq v0, 0 - v2 = bint.i32 v1 + v2 = uextend.i32 v1 return v2 } function %i64_eq(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp eq v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_ne(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp ne v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_lt_s(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp slt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_lt_u(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp ult v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_gt_s(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp sgt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_gt_u(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp ugt v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_le_s(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp sle v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_le_u(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp ule v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_ge_s(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp sge v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } function %i64_ge_u(i64, i64) -> i32 { block0(v0: i64, v1: i64): v2 = icmp uge v0, v1 - v3 = bint.i32 v2 + v3 = uextend.i32 v2 return v3 } diff --git a/cranelift/filetests/filetests/wasm/multi-val-mixed.clif b/cranelift/filetests/filetests/wasm/multi-val-mixed.clif index 98bc07a8dab6..647896419b5d 100644 --- a/cranelift/filetests/filetests/wasm/multi-val-mixed.clif +++ b/cranelift/filetests/filetests/wasm/multi-val-mixed.clif @@ -24,9 +24,9 @@ target x86_64 haswell ;; elif r == "i64": ;; val = "0" ;; op = "iconst.i64" -;; elif r == "b1": -;; val = "true" -;; op = "bconst.b1" +;; elif r == "i8": +;; val = "1" +;; op = "iconst.i8" ;; else: ;; raise Exception("bad r = " + str(r)) ;; return " v" + str(i) + " = " + op + " " + val @@ -50,7 +50,7 @@ target x86_64 haswell ;; tail = "}\n" ;; return head + fn_decl + block + call + ret + tail ;; -;; for results in permutations(["i32", "i64", "f32", "f64", "b1"]): +;; for results in permutations(["i32", "i64", "f32", "f64", "i8"]): ;; print make_returner(results) ;; print make_caller(results) ;; ``` @@ -58,316 +58,316 @@ target x86_64 haswell ;; If you're modifying this test, it is likely easier to modify the script and ;; regenerate the test. -function %return_i32_i64_f32_f64_b1() -> i32, i64, f32, f64, b1 { +function %return_i32_i64_f32_f64_i8() -> i32, i64, f32, f64, i8 { block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f32const 0x0.0 v3 = f64const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i32_i64_f32_f64_b1() { - fn0 = %foo() -> i32,i64,f32,f64,b1 +function %call_i32_i64_f32_f64_i8() { + fn0 = %foo() -> i32,i64,f32,f64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_i64_f32_b1_f64() -> i32, i64, f32, b1, f64 { +function %return_i32_i64_f32_b1_f64() -> i32, i64, f32, i8, f64 { block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f32const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_i64_f32_b1_f64() { - fn0 = %foo() -> i32,i64,f32,b1,f64 + fn0 = %foo() -> i32,i64,f32,i8,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_i64_f64_f32_b1() -> i32, i64, f64, f32, b1 { +function %return_i32_i64_f64_f32_i8() -> i32, i64, f64, f32, i8 { block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f64const 0x0.0 v3 = f32const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i32_i64_f64_f32_b1() { - fn0 = %foo() -> i32,i64,f64,f32,b1 +function %call_i32_i64_f64_f32_i8() { + fn0 = %foo() -> i32,i64,f64,f32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_i64_f64_b1_f32() -> i32, i64, f64, b1, f32 { +function %return_i32_i64_f64_b1_f32() -> i32, i64, f64, i8, f32 { block0: v0 = iconst.i32 0 v1 = iconst.i64 0 v2 = f64const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_i64_f64_b1_f32() { - fn0 = %foo() -> i32,i64,f64,b1,f32 + fn0 = %foo() -> i32,i64,f64,i8,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_i64_b1_f32_f64() -> i32, i64, b1, f32, f64 { +function %return_i32_i64_b1_f32_f64() -> i32, i64, i8, f32, f64 { block0: v0 = iconst.i32 0 v1 = iconst.i64 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f32const 0x0.0 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_i64_b1_f32_f64() { - fn0 = %foo() -> i32,i64,b1,f32,f64 + fn0 = %foo() -> i32,i64,i8,f32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_i64_b1_f64_f32() -> i32, i64, b1, f64, f32 { +function %return_i32_i64_b1_f64_f32() -> i32, i64, i8, f64, f32 { block0: v0 = iconst.i32 0 v1 = iconst.i64 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f64const 0x0.0 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_i64_b1_f64_f32() { - fn0 = %foo() -> i32,i64,b1,f64,f32 + fn0 = %foo() -> i32,i64,i8,f64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f32_i64_f64_b1() -> i32, f32, i64, f64, b1 { +function %return_i32_f32_i64_f64_i8() -> i32, f32, i64, f64, i8 { block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = iconst.i64 0 v3 = f64const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i32_f32_i64_f64_b1() { - fn0 = %foo() -> i32,f32,i64,f64,b1 +function %call_i32_f32_i64_f64_i8() { + fn0 = %foo() -> i32,f32,i64,f64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f32_i64_b1_f64() -> i32, f32, i64, b1, f64 { +function %return_i32_f32_i64_b1_f64() -> i32, f32, i64, i8, f64 { block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = iconst.i64 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_f32_i64_b1_f64() { - fn0 = %foo() -> i32,f32,i64,b1,f64 + fn0 = %foo() -> i32,f32,i64,i8,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f32_f64_i64_b1() -> i32, f32, f64, i64, b1 { +function %return_i32_f32_f64_i64_i8() -> i32, f32, f64, i64, i8 { block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 v3 = iconst.i64 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i32_f32_f64_i64_b1() { - fn0 = %foo() -> i32,f32,f64,i64,b1 +function %call_i32_f32_f64_i64_i8() { + fn0 = %foo() -> i32,f32,f64,i64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f32_f64_b1_i64() -> i32, f32, f64, b1, i64 { +function %return_i32_f32_f64_b1_i64() -> i32, f32, f64, i8, i64 { block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_i32_f32_f64_b1_i64() { - fn0 = %foo() -> i32,f32,f64,b1,i64 + fn0 = %foo() -> i32,f32,f64,i8,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f32_b1_i64_f64() -> i32, f32, b1, i64, f64 { +function %return_i32_f32_b1_i64_f64() -> i32, f32, i8, i64, f64 { block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i64 0 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_f32_b1_i64_f64() { - fn0 = %foo() -> i32,f32,b1,i64,f64 + fn0 = %foo() -> i32,f32,i8,i64,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f32_b1_f64_i64() -> i32, f32, b1, f64, i64 { +function %return_i32_f32_b1_f64_i64() -> i32, f32, i8, f64, i64 { block0: v0 = iconst.i32 0 v1 = f32const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f64const 0x0.0 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_i32_f32_b1_f64_i64() { - fn0 = %foo() -> i32,f32,b1,f64,i64 + fn0 = %foo() -> i32,f32,i8,f64,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f64_i64_f32_b1() -> i32, f64, i64, f32, b1 { +function %return_i32_f64_i64_f32_i8() -> i32, f64, i64, f32, i8 { block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = iconst.i64 0 v3 = f32const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i32_f64_i64_f32_b1() { - fn0 = %foo() -> i32,f64,i64,f32,b1 +function %call_i32_f64_i64_f32_i8() { + fn0 = %foo() -> i32,f64,i64,f32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f64_i64_b1_f32() -> i32, f64, i64, b1, f32 { +function %return_i32_f64_i64_b1_f32() -> i32, f64, i64, i8, f32 { block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = iconst.i64 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_f64_i64_b1_f32() { - fn0 = %foo() -> i32,f64,i64,b1,f32 + fn0 = %foo() -> i32,f64,i64,i8,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f64_f32_i64_b1() -> i32, f64, f32, i64, b1 { +function %return_i32_f64_f32_i64_i8() -> i32, f64, f32, i64, i8 { block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 v3 = iconst.i64 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i32_f64_f32_i64_b1() { - fn0 = %foo() -> i32,f64,f32,i64,b1 +function %call_i32_f64_f32_i64_i8() { + fn0 = %foo() -> i32,f64,f32,i64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f64_f32_b1_i64() -> i32, f64, f32, b1, i64 { +function %return_i32_f64_f32_b1_i64() -> i32, f64, f32, i8, i64 { block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_i32_f64_f32_b1_i64() { - fn0 = %foo() -> i32,f64,f32,b1,i64 + fn0 = %foo() -> i32,f64,f32,i8,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f64_b1_i64_f32() -> i32, f64, b1, i64, f32 { +function %return_i32_f64_b1_i64_f32() -> i32, f64, i8, i64, f32 { block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i64 0 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i32_f64_b1_i64_f32() { - fn0 = %foo() -> i32,f64,b1,i64,f32 + fn0 = %foo() -> i32,f64,i8,i64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_f64_b1_f32_i64() -> i32, f64, b1, f32, i64 { +function %return_i32_f64_b1_f32_i64() -> i32, f64, i8, f32, i64 { block0: v0 = iconst.i32 0 v1 = f64const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f32const 0x0.0 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_i32_f64_b1_f32_i64() { - fn0 = %foo() -> i32,f64,b1,f32,i64 + fn0 = %foo() -> i32,f64,i8,f32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_b1_i64_f32_f64() -> i32, b1, i64, f32, f64 { +function %return_i32_b1_i64_f32_f64() -> i32, i8, i64, f32, f64 { block0: v0 = iconst.i32 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i64 0 v3 = f32const 0x0.0 v4 = f64const 0x0.0 @@ -375,16 +375,16 @@ block0: } function %call_i32_b1_i64_f32_f64() { - fn0 = %foo() -> i32,b1,i64,f32,f64 + fn0 = %foo() -> i32,i8,i64,f32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_b1_i64_f64_f32() -> i32, b1, i64, f64, f32 { +function %return_i32_b1_i64_f64_f32() -> i32, i8, i64, f64, f32 { block0: v0 = iconst.i32 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i64 0 v3 = f64const 0x0.0 v4 = f32const 0x0.0 @@ -392,16 +392,16 @@ block0: } function %call_i32_b1_i64_f64_f32() { - fn0 = %foo() -> i32,b1,i64,f64,f32 + fn0 = %foo() -> i32,i8,i64,f64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_b1_f32_i64_f64() -> i32, b1, f32, i64, f64 { +function %return_i32_b1_f32_i64_f64() -> i32, i8, f32, i64, f64 { block0: v0 = iconst.i32 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f32const 0x0.0 v3 = iconst.i64 0 v4 = f64const 0x0.0 @@ -409,16 +409,16 @@ block0: } function %call_i32_b1_f32_i64_f64() { - fn0 = %foo() -> i32,b1,f32,i64,f64 + fn0 = %foo() -> i32,i8,f32,i64,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_b1_f32_f64_i64() -> i32, b1, f32, f64, i64 { +function %return_i32_b1_f32_f64_i64() -> i32, i8, f32, f64, i64 { block0: v0 = iconst.i32 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f32const 0x0.0 v3 = f64const 0x0.0 v4 = iconst.i64 0 @@ -426,16 +426,16 @@ block0: } function %call_i32_b1_f32_f64_i64() { - fn0 = %foo() -> i32,b1,f32,f64,i64 + fn0 = %foo() -> i32,i8,f32,f64,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_b1_f64_i64_f32() -> i32, b1, f64, i64, f32 { +function %return_i32_b1_f64_i64_f32() -> i32, i8, f64, i64, f32 { block0: v0 = iconst.i32 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f64const 0x0.0 v3 = iconst.i64 0 v4 = f32const 0x0.0 @@ -443,16 +443,16 @@ block0: } function %call_i32_b1_f64_i64_f32() { - fn0 = %foo() -> i32,b1,f64,i64,f32 + fn0 = %foo() -> i32,i8,f64,i64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i32_b1_f64_f32_i64() -> i32, b1, f64, f32, i64 { +function %return_i32_b1_f64_f32_i64() -> i32, i8, f64, f32, i64 { block0: v0 = iconst.i32 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f64const 0x0.0 v3 = f32const 0x0.0 v4 = iconst.i64 0 @@ -460,322 +460,322 @@ block0: } function %call_i32_b1_f64_f32_i64() { - fn0 = %foo() -> i32,b1,f64,f32,i64 + fn0 = %foo() -> i32,i8,f64,f32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_i32_f32_f64_b1() -> i64, i32, f32, f64, b1 { +function %return_i64_i32_f32_f64_i8() -> i64, i32, f32, f64, i8 { block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f32const 0x0.0 v3 = f64const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i64_i32_f32_f64_b1() { - fn0 = %foo() -> i64,i32,f32,f64,b1 +function %call_i64_i32_f32_f64_i8() { + fn0 = %foo() -> i64,i32,f32,f64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_i32_f32_b1_f64() -> i64, i32, f32, b1, f64 { +function %return_i64_i32_f32_b1_f64() -> i64, i32, f32, i8, f64 { block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f32const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_i32_f32_b1_f64() { - fn0 = %foo() -> i64,i32,f32,b1,f64 + fn0 = %foo() -> i64,i32,f32,i8,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_i32_f64_f32_b1() -> i64, i32, f64, f32, b1 { +function %return_i64_i32_f64_f32_i8() -> i64, i32, f64, f32, i8 { block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f64const 0x0.0 v3 = f32const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i64_i32_f64_f32_b1() { - fn0 = %foo() -> i64,i32,f64,f32,b1 +function %call_i64_i32_f64_f32_i8() { + fn0 = %foo() -> i64,i32,f64,f32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_i32_f64_b1_f32() -> i64, i32, f64, b1, f32 { +function %return_i64_i32_f64_b1_f32() -> i64, i32, f64, i8, f32 { block0: v0 = iconst.i64 0 v1 = iconst.i32 0 v2 = f64const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_i32_f64_b1_f32() { - fn0 = %foo() -> i64,i32,f64,b1,f32 + fn0 = %foo() -> i64,i32,f64,i8,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_i32_b1_f32_f64() -> i64, i32, b1, f32, f64 { +function %return_i64_i32_b1_f32_f64() -> i64, i32, i8, f32, f64 { block0: v0 = iconst.i64 0 v1 = iconst.i32 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f32const 0x0.0 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_i32_b1_f32_f64() { - fn0 = %foo() -> i64,i32,b1,f32,f64 + fn0 = %foo() -> i64,i32,i8,f32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_i32_b1_f64_f32() -> i64, i32, b1, f64, f32 { +function %return_i64_i32_b1_f64_f32() -> i64, i32, i8, f64, f32 { block0: v0 = iconst.i64 0 v1 = iconst.i32 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f64const 0x0.0 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_i32_b1_f64_f32() { - fn0 = %foo() -> i64,i32,b1,f64,f32 + fn0 = %foo() -> i64,i32,i8,f64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f32_i32_f64_b1() -> i64, f32, i32, f64, b1 { +function %return_i64_f32_i32_f64_i8() -> i64, f32, i32, f64, i8 { block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = iconst.i32 0 v3 = f64const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i64_f32_i32_f64_b1() { - fn0 = %foo() -> i64,f32,i32,f64,b1 +function %call_i64_f32_i32_f64_i8() { + fn0 = %foo() -> i64,f32,i32,f64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f32_i32_b1_f64() -> i64, f32, i32, b1, f64 { +function %return_i64_f32_i32_b1_f64() -> i64, f32, i32, i8, f64 { block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = iconst.i32 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_f32_i32_b1_f64() { - fn0 = %foo() -> i64,f32,i32,b1,f64 + fn0 = %foo() -> i64,f32,i32,i8,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f32_f64_i32_b1() -> i64, f32, f64, i32, b1 { +function %return_i64_f32_f64_i32_i8() -> i64, f32, f64, i32, i8 { block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 v3 = iconst.i32 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i64_f32_f64_i32_b1() { - fn0 = %foo() -> i64,f32,f64,i32,b1 +function %call_i64_f32_f64_i32_i8() { + fn0 = %foo() -> i64,f32,f64,i32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f32_f64_b1_i32() -> i64, f32, f64, b1, i32 { +function %return_i64_f32_f64_b1_i32() -> i64, f32, f64, i8, i32 { block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 v2 = f64const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_i64_f32_f64_b1_i32() { - fn0 = %foo() -> i64,f32,f64,b1,i32 + fn0 = %foo() -> i64,f32,f64,i8,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f32_b1_i32_f64() -> i64, f32, b1, i32, f64 { +function %return_i64_f32_b1_i32_f64() -> i64, f32, i8, i32, f64 { block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i32 0 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_f32_b1_i32_f64() { - fn0 = %foo() -> i64,f32,b1,i32,f64 + fn0 = %foo() -> i64,f32,i8,i32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f32_b1_f64_i32() -> i64, f32, b1, f64, i32 { +function %return_i64_f32_b1_f64_i32() -> i64, f32, i8, f64, i32 { block0: v0 = iconst.i64 0 v1 = f32const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f64const 0x0.0 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_i64_f32_b1_f64_i32() { - fn0 = %foo() -> i64,f32,b1,f64,i32 + fn0 = %foo() -> i64,f32,i8,f64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f64_i32_f32_b1() -> i64, f64, i32, f32, b1 { +function %return_i64_f64_i32_f32_i8() -> i64, f64, i32, f32, i8 { block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = iconst.i32 0 v3 = f32const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i64_f64_i32_f32_b1() { - fn0 = %foo() -> i64,f64,i32,f32,b1 +function %call_i64_f64_i32_f32_i8() { + fn0 = %foo() -> i64,f64,i32,f32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f64_i32_b1_f32() -> i64, f64, i32, b1, f32 { +function %return_i64_f64_i32_b1_f32() -> i64, f64, i32, i8, f32 { block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = iconst.i32 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_f64_i32_b1_f32() { - fn0 = %foo() -> i64,f64,i32,b1,f32 + fn0 = %foo() -> i64,f64,i32,i8,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f64_f32_i32_b1() -> i64, f64, f32, i32, b1 { +function %return_i64_f64_f32_i32_i8() -> i64, f64, f32, i32, i8 { block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 v3 = iconst.i32 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_i64_f64_f32_i32_b1() { - fn0 = %foo() -> i64,f64,f32,i32,b1 +function %call_i64_f64_f32_i32_i8() { + fn0 = %foo() -> i64,f64,f32,i32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f64_f32_b1_i32() -> i64, f64, f32, b1, i32 { +function %return_i64_f64_f32_b1_i32() -> i64, f64, f32, i8, i32 { block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 v2 = f32const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_i64_f64_f32_b1_i32() { - fn0 = %foo() -> i64,f64,f32,b1,i32 + fn0 = %foo() -> i64,f64,f32,i8,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f64_b1_i32_f32() -> i64, f64, b1, i32, f32 { +function %return_i64_f64_b1_i32_f32() -> i64, f64, i8, i32, f32 { block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i32 0 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_i64_f64_b1_i32_f32() { - fn0 = %foo() -> i64,f64,b1,i32,f32 + fn0 = %foo() -> i64,f64,i8,i32,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_f64_b1_f32_i32() -> i64, f64, b1, f32, i32 { +function %return_i64_f64_b1_f32_i32() -> i64, f64, i8, f32, i32 { block0: v0 = iconst.i64 0 v1 = f64const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f32const 0x0.0 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_i64_f64_b1_f32_i32() { - fn0 = %foo() -> i64,f64,b1,f32,i32 + fn0 = %foo() -> i64,f64,i8,f32,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_b1_i32_f32_f64() -> i64, b1, i32, f32, f64 { +function %return_i64_b1_i32_f32_f64() -> i64, i8, i32, f32, f64 { block0: v0 = iconst.i64 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i32 0 v3 = f32const 0x0.0 v4 = f64const 0x0.0 @@ -783,16 +783,16 @@ block0: } function %call_i64_b1_i32_f32_f64() { - fn0 = %foo() -> i64,b1,i32,f32,f64 + fn0 = %foo() -> i64,i8,i32,f32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_b1_i32_f64_f32() -> i64, b1, i32, f64, f32 { +function %return_i64_b1_i32_f64_f32() -> i64, i8, i32, f64, f32 { block0: v0 = iconst.i64 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i32 0 v3 = f64const 0x0.0 v4 = f32const 0x0.0 @@ -800,16 +800,16 @@ block0: } function %call_i64_b1_i32_f64_f32() { - fn0 = %foo() -> i64,b1,i32,f64,f32 + fn0 = %foo() -> i64,i8,i32,f64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_b1_f32_i32_f64() -> i64, b1, f32, i32, f64 { +function %return_i64_b1_f32_i32_f64() -> i64, i8, f32, i32, f64 { block0: v0 = iconst.i64 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f32const 0x0.0 v3 = iconst.i32 0 v4 = f64const 0x0.0 @@ -817,16 +817,16 @@ block0: } function %call_i64_b1_f32_i32_f64() { - fn0 = %foo() -> i64,b1,f32,i32,f64 + fn0 = %foo() -> i64,i8,f32,i32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_b1_f32_f64_i32() -> i64, b1, f32, f64, i32 { +function %return_i64_b1_f32_f64_i32() -> i64, i8, f32, f64, i32 { block0: v0 = iconst.i64 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f32const 0x0.0 v3 = f64const 0x0.0 v4 = iconst.i32 0 @@ -834,16 +834,16 @@ block0: } function %call_i64_b1_f32_f64_i32() { - fn0 = %foo() -> i64,b1,f32,f64,i32 + fn0 = %foo() -> i64,i8,f32,f64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_b1_f64_i32_f32() -> i64, b1, f64, i32, f32 { +function %return_i64_b1_f64_i32_f32() -> i64, i8, f64, i32, f32 { block0: v0 = iconst.i64 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f64const 0x0.0 v3 = iconst.i32 0 v4 = f32const 0x0.0 @@ -851,16 +851,16 @@ block0: } function %call_i64_b1_f64_i32_f32() { - fn0 = %foo() -> i64,b1,f64,i32,f32 + fn0 = %foo() -> i64,i8,f64,i32,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_i64_b1_f64_f32_i32() -> i64, b1, f64, f32, i32 { +function %return_i64_b1_f64_f32_i32() -> i64, i8, f64, f32, i32 { block0: v0 = iconst.i64 0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f64const 0x0.0 v3 = f32const 0x0.0 v4 = iconst.i32 0 @@ -868,322 +868,322 @@ block0: } function %call_i64_b1_f64_f32_i32() { - fn0 = %foo() -> i64,b1,f64,f32,i32 + fn0 = %foo() -> i64,i8,f64,f32,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i32_i64_f64_b1() -> f32, i32, i64, f64, b1 { +function %return_f32_i32_i64_f64_i8() -> f32, i32, i64, f64, i8 { block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 v3 = f64const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f32_i32_i64_f64_b1() { - fn0 = %foo() -> f32,i32,i64,f64,b1 +function %call_f32_i32_i64_f64_i8() { + fn0 = %foo() -> f32,i32,i64,f64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i32_i64_b1_f64() -> f32, i32, i64, b1, f64 { +function %return_f32_i32_i64_b1_f64() -> f32, i32, i64, i8, f64 { block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f32_i32_i64_b1_f64() { - fn0 = %foo() -> f32,i32,i64,b1,f64 + fn0 = %foo() -> f32,i32,i64,i8,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i32_f64_i64_b1() -> f32, i32, f64, i64, b1 { +function %return_f32_i32_f64_i64_i8() -> f32, i32, f64, i64, i8 { block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = f64const 0x0.0 v3 = iconst.i64 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f32_i32_f64_i64_b1() { - fn0 = %foo() -> f32,i32,f64,i64,b1 +function %call_f32_i32_f64_i64_i8() { + fn0 = %foo() -> f32,i32,f64,i64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i32_f64_b1_i64() -> f32, i32, f64, b1, i64 { +function %return_f32_i32_f64_b1_i64() -> f32, i32, f64, i8, i64 { block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 v2 = f64const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f32_i32_f64_b1_i64() { - fn0 = %foo() -> f32,i32,f64,b1,i64 + fn0 = %foo() -> f32,i32,f64,i8,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i32_b1_i64_f64() -> f32, i32, b1, i64, f64 { +function %return_f32_i32_b1_i64_f64() -> f32, i32, i8, i64, f64 { block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i64 0 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f32_i32_b1_i64_f64() { - fn0 = %foo() -> f32,i32,b1,i64,f64 + fn0 = %foo() -> f32,i32,i8,i64,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i32_b1_f64_i64() -> f32, i32, b1, f64, i64 { +function %return_f32_i32_b1_f64_i64() -> f32, i32, i8, f64, i64 { block0: v0 = f32const 0x0.0 v1 = iconst.i32 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f64const 0x0.0 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f32_i32_b1_f64_i64() { - fn0 = %foo() -> f32,i32,b1,f64,i64 + fn0 = %foo() -> f32,i32,i8,f64,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i64_i32_f64_b1() -> f32, i64, i32, f64, b1 { +function %return_f32_i64_i32_f64_i8() -> f32, i64, i32, f64, i8 { block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 v3 = f64const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f32_i64_i32_f64_b1() { - fn0 = %foo() -> f32,i64,i32,f64,b1 +function %call_f32_i64_i32_f64_i8() { + fn0 = %foo() -> f32,i64,i32,f64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i64_i32_b1_f64() -> f32, i64, i32, b1, f64 { +function %return_f32_i64_i32_b1_f64() -> f32, i64, i32, i8, f64 { block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f32_i64_i32_b1_f64() { - fn0 = %foo() -> f32,i64,i32,b1,f64 + fn0 = %foo() -> f32,i64,i32,i8,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i64_f64_i32_b1() -> f32, i64, f64, i32, b1 { +function %return_f32_i64_f64_i32_i8() -> f32, i64, f64, i32, i8 { block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = f64const 0x0.0 v3 = iconst.i32 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f32_i64_f64_i32_b1() { - fn0 = %foo() -> f32,i64,f64,i32,b1 +function %call_f32_i64_f64_i32_i8() { + fn0 = %foo() -> f32,i64,f64,i32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i64_f64_b1_i32() -> f32, i64, f64, b1, i32 { +function %return_f32_i64_f64_b1_i32() -> f32, i64, f64, i8, i32 { block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 v2 = f64const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f32_i64_f64_b1_i32() { - fn0 = %foo() -> f32,i64,f64,b1,i32 + fn0 = %foo() -> f32,i64,f64,i8,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i64_b1_i32_f64() -> f32, i64, b1, i32, f64 { +function %return_f32_i64_b1_i32_f64() -> f32, i64, i8, i32, f64 { block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i32 0 v4 = f64const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f32_i64_b1_i32_f64() { - fn0 = %foo() -> f32,i64,b1,i32,f64 + fn0 = %foo() -> f32,i64,i8,i32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_i64_b1_f64_i32() -> f32, i64, b1, f64, i32 { +function %return_f32_i64_b1_f64_i32() -> f32, i64, i8, f64, i32 { block0: v0 = f32const 0x0.0 v1 = iconst.i64 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f64const 0x0.0 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f32_i64_b1_f64_i32() { - fn0 = %foo() -> f32,i64,b1,f64,i32 + fn0 = %foo() -> f32,i64,i8,f64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_f64_i32_i64_b1() -> f32, f64, i32, i64, b1 { +function %return_f32_f64_i32_i64_i8() -> f32, f64, i32, i64, i8 { block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i32 0 v3 = iconst.i64 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f32_f64_i32_i64_b1() { - fn0 = %foo() -> f32,f64,i32,i64,b1 +function %call_f32_f64_i32_i64_i8() { + fn0 = %foo() -> f32,f64,i32,i64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_f64_i32_b1_i64() -> f32, f64, i32, b1, i64 { +function %return_f32_f64_i32_b1_i64() -> f32, f64, i32, i8, i64 { block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i32 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f32_f64_i32_b1_i64() { - fn0 = %foo() -> f32,f64,i32,b1,i64 + fn0 = %foo() -> f32,f64,i32,i8,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_f64_i64_i32_b1() -> f32, f64, i64, i32, b1 { +function %return_f32_f64_i64_i32_i8() -> f32, f64, i64, i32, i8 { block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i64 0 v3 = iconst.i32 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f32_f64_i64_i32_b1() { - fn0 = %foo() -> f32,f64,i64,i32,b1 +function %call_f32_f64_i64_i32_i8() { + fn0 = %foo() -> f32,f64,i64,i32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_f64_i64_b1_i32() -> f32, f64, i64, b1, i32 { +function %return_f32_f64_i64_b1_i32() -> f32, f64, i64, i8, i32 { block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 v2 = iconst.i64 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f32_f64_i64_b1_i32() { - fn0 = %foo() -> f32,f64,i64,b1,i32 + fn0 = %foo() -> f32,f64,i64,i8,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_f64_b1_i32_i64() -> f32, f64, b1, i32, i64 { +function %return_f32_f64_b1_i32_i64() -> f32, f64, i8, i32, i64 { block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i32 0 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f32_f64_b1_i32_i64() { - fn0 = %foo() -> f32,f64,b1,i32,i64 + fn0 = %foo() -> f32,f64,i8,i32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_f64_b1_i64_i32() -> f32, f64, b1, i64, i32 { +function %return_f32_f64_b1_i64_i32() -> f32, f64, i8, i64, i32 { block0: v0 = f32const 0x0.0 v1 = f64const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i64 0 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f32_f64_b1_i64_i32() { - fn0 = %foo() -> f32,f64,b1,i64,i32 + fn0 = %foo() -> f32,f64,i8,i64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_b1_i32_i64_f64() -> f32, b1, i32, i64, f64 { +function %return_f32_b1_i32_i64_f64() -> f32, i8, i32, i64, f64 { block0: v0 = f32const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i32 0 v3 = iconst.i64 0 v4 = f64const 0x0.0 @@ -1191,16 +1191,16 @@ block0: } function %call_f32_b1_i32_i64_f64() { - fn0 = %foo() -> f32,b1,i32,i64,f64 + fn0 = %foo() -> f32,i8,i32,i64,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_b1_i32_f64_i64() -> f32, b1, i32, f64, i64 { +function %return_f32_b1_i32_f64_i64() -> f32, i8, i32, f64, i64 { block0: v0 = f32const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i32 0 v3 = f64const 0x0.0 v4 = iconst.i64 0 @@ -1208,16 +1208,16 @@ block0: } function %call_f32_b1_i32_f64_i64() { - fn0 = %foo() -> f32,b1,i32,f64,i64 + fn0 = %foo() -> f32,i8,i32,f64,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_b1_i64_i32_f64() -> f32, b1, i64, i32, f64 { +function %return_f32_b1_i64_i32_f64() -> f32, i8, i64, i32, f64 { block0: v0 = f32const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i64 0 v3 = iconst.i32 0 v4 = f64const 0x0.0 @@ -1225,16 +1225,16 @@ block0: } function %call_f32_b1_i64_i32_f64() { - fn0 = %foo() -> f32,b1,i64,i32,f64 + fn0 = %foo() -> f32,i8,i64,i32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_b1_i64_f64_i32() -> f32, b1, i64, f64, i32 { +function %return_f32_b1_i64_f64_i32() -> f32, i8, i64, f64, i32 { block0: v0 = f32const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i64 0 v3 = f64const 0x0.0 v4 = iconst.i32 0 @@ -1242,16 +1242,16 @@ block0: } function %call_f32_b1_i64_f64_i32() { - fn0 = %foo() -> f32,b1,i64,f64,i32 + fn0 = %foo() -> f32,i8,i64,f64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_b1_f64_i32_i64() -> f32, b1, f64, i32, i64 { +function %return_f32_b1_f64_i32_i64() -> f32, i8, f64, i32, i64 { block0: v0 = f32const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f64const 0x0.0 v3 = iconst.i32 0 v4 = iconst.i64 0 @@ -1259,16 +1259,16 @@ block0: } function %call_f32_b1_f64_i32_i64() { - fn0 = %foo() -> f32,b1,f64,i32,i64 + fn0 = %foo() -> f32,i8,f64,i32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f32_b1_f64_i64_i32() -> f32, b1, f64, i64, i32 { +function %return_f32_b1_f64_i64_i32() -> f32, i8, f64, i64, i32 { block0: v0 = f32const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f64const 0x0.0 v3 = iconst.i64 0 v4 = iconst.i32 0 @@ -1276,322 +1276,322 @@ block0: } function %call_f32_b1_f64_i64_i32() { - fn0 = %foo() -> f32,b1,f64,i64,i32 + fn0 = %foo() -> f32,i8,f64,i64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i32_i64_f32_b1() -> f64, i32, i64, f32, b1 { +function %return_f64_i32_i64_f32_i8() -> f64, i32, i64, f32, i8 { block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 v3 = f32const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f64_i32_i64_f32_b1() { - fn0 = %foo() -> f64,i32,i64,f32,b1 +function %call_f64_i32_i64_f32_i8() { + fn0 = %foo() -> f64,i32,i64,f32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i32_i64_b1_f32() -> f64, i32, i64, b1, f32 { +function %return_f64_i32_i64_b1_f32() -> f64, i32, i64, i8, f32 { block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = iconst.i64 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f64_i32_i64_b1_f32() { - fn0 = %foo() -> f64,i32,i64,b1,f32 + fn0 = %foo() -> f64,i32,i64,i8,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i32_f32_i64_b1() -> f64, i32, f32, i64, b1 { +function %return_f64_i32_f32_i64_i8() -> f64, i32, f32, i64, i8 { block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = f32const 0x0.0 v3 = iconst.i64 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f64_i32_f32_i64_b1() { - fn0 = %foo() -> f64,i32,f32,i64,b1 +function %call_f64_i32_f32_i64_i8() { + fn0 = %foo() -> f64,i32,f32,i64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i32_f32_b1_i64() -> f64, i32, f32, b1, i64 { +function %return_f64_i32_f32_b1_i64() -> f64, i32, f32, i8, i64 { block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 v2 = f32const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f64_i32_f32_b1_i64() { - fn0 = %foo() -> f64,i32,f32,b1,i64 + fn0 = %foo() -> f64,i32,f32,i8,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i32_b1_i64_f32() -> f64, i32, b1, i64, f32 { +function %return_f64_i32_b1_i64_f32() -> f64, i32, i8, i64, f32 { block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i64 0 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f64_i32_b1_i64_f32() { - fn0 = %foo() -> f64,i32,b1,i64,f32 + fn0 = %foo() -> f64,i32,i8,i64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i32_b1_f32_i64() -> f64, i32, b1, f32, i64 { +function %return_f64_i32_b1_f32_i64() -> f64, i32, i8, f32, i64 { block0: v0 = f64const 0x0.0 v1 = iconst.i32 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f32const 0x0.0 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f64_i32_b1_f32_i64() { - fn0 = %foo() -> f64,i32,b1,f32,i64 + fn0 = %foo() -> f64,i32,i8,f32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i64_i32_f32_b1() -> f64, i64, i32, f32, b1 { +function %return_f64_i64_i32_f32_i8() -> f64, i64, i32, f32, i8 { block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 v3 = f32const 0x0.0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f64_i64_i32_f32_b1() { - fn0 = %foo() -> f64,i64,i32,f32,b1 +function %call_f64_i64_i32_f32_i8() { + fn0 = %foo() -> f64,i64,i32,f32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i64_i32_b1_f32() -> f64, i64, i32, b1, f32 { +function %return_f64_i64_i32_b1_f32() -> f64, i64, i32, i8, f32 { block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = iconst.i32 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f64_i64_i32_b1_f32() { - fn0 = %foo() -> f64,i64,i32,b1,f32 + fn0 = %foo() -> f64,i64,i32,i8,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i64_f32_i32_b1() -> f64, i64, f32, i32, b1 { +function %return_f64_i64_f32_i32_i8() -> f64, i64, f32, i32, i8 { block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = f32const 0x0.0 v3 = iconst.i32 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f64_i64_f32_i32_b1() { - fn0 = %foo() -> f64,i64,f32,i32,b1 +function %call_f64_i64_f32_i32_i8() { + fn0 = %foo() -> f64,i64,f32,i32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i64_f32_b1_i32() -> f64, i64, f32, b1, i32 { +function %return_f64_i64_f32_b1_i32() -> f64, i64, f32, i8, i32 { block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 v2 = f32const 0x0.0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f64_i64_f32_b1_i32() { - fn0 = %foo() -> f64,i64,f32,b1,i32 + fn0 = %foo() -> f64,i64,f32,i8,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i64_b1_i32_f32() -> f64, i64, b1, i32, f32 { +function %return_f64_i64_b1_i32_f32() -> f64, i64, i8, i32, f32 { block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i32 0 v4 = f32const 0x0.0 return v0, v1, v2, v3, v4 } function %call_f64_i64_b1_i32_f32() { - fn0 = %foo() -> f64,i64,b1,i32,f32 + fn0 = %foo() -> f64,i64,i8,i32,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_i64_b1_f32_i32() -> f64, i64, b1, f32, i32 { +function %return_f64_i64_b1_f32_i32() -> f64, i64, i8, f32, i32 { block0: v0 = f64const 0x0.0 v1 = iconst.i64 0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = f32const 0x0.0 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f64_i64_b1_f32_i32() { - fn0 = %foo() -> f64,i64,b1,f32,i32 + fn0 = %foo() -> f64,i64,i8,f32,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_f32_i32_i64_b1() -> f64, f32, i32, i64, b1 { +function %return_f64_f32_i32_i64_i8() -> f64, f32, i32, i64, i8 { block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i32 0 v3 = iconst.i64 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f64_f32_i32_i64_b1() { - fn0 = %foo() -> f64,f32,i32,i64,b1 +function %call_f64_f32_i32_i64_i8() { + fn0 = %foo() -> f64,f32,i32,i64,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_f32_i32_b1_i64() -> f64, f32, i32, b1, i64 { +function %return_f64_f32_i32_b1_i64() -> f64, f32, i32, i8, i64 { block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i32 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f64_f32_i32_b1_i64() { - fn0 = %foo() -> f64,f32,i32,b1,i64 + fn0 = %foo() -> f64,f32,i32,i8,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_f32_i64_i32_b1() -> f64, f32, i64, i32, b1 { +function %return_f64_f32_i64_i32_i8() -> f64, f32, i64, i32, i8 { block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i64 0 v3 = iconst.i32 0 - v4 = bconst.b1 true + v4 = iconst.i8 1 return v0, v1, v2, v3, v4 } -function %call_f64_f32_i64_i32_b1() { - fn0 = %foo() -> f64,f32,i64,i32,b1 +function %call_f64_f32_i64_i32_i8() { + fn0 = %foo() -> f64,f32,i64,i32,i8 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_f32_i64_b1_i32() -> f64, f32, i64, b1, i32 { +function %return_f64_f32_i64_b1_i32() -> f64, f32, i64, i8, i32 { block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 v2 = iconst.i64 0 - v3 = bconst.b1 true + v3 = iconst.i8 1 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f64_f32_i64_b1_i32() { - fn0 = %foo() -> f64,f32,i64,b1,i32 + fn0 = %foo() -> f64,f32,i64,i8,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_f32_b1_i32_i64() -> f64, f32, b1, i32, i64 { +function %return_f64_f32_b1_i32_i64() -> f64, f32, i8, i32, i64 { block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i32 0 v4 = iconst.i64 0 return v0, v1, v2, v3, v4 } function %call_f64_f32_b1_i32_i64() { - fn0 = %foo() -> f64,f32,b1,i32,i64 + fn0 = %foo() -> f64,f32,i8,i32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_f32_b1_i64_i32() -> f64, f32, b1, i64, i32 { +function %return_f64_f32_b1_i64_i32() -> f64, f32, i8, i64, i32 { block0: v0 = f64const 0x0.0 v1 = f32const 0x0.0 - v2 = bconst.b1 true + v2 = iconst.i8 1 v3 = iconst.i64 0 v4 = iconst.i32 0 return v0, v1, v2, v3, v4 } function %call_f64_f32_b1_i64_i32() { - fn0 = %foo() -> f64,f32,b1,i64,i32 + fn0 = %foo() -> f64,f32,i8,i64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_b1_i32_i64_f32() -> f64, b1, i32, i64, f32 { +function %return_f64_b1_i32_i64_f32() -> f64, i8, i32, i64, f32 { block0: v0 = f64const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i32 0 v3 = iconst.i64 0 v4 = f32const 0x0.0 @@ -1599,16 +1599,16 @@ block0: } function %call_f64_b1_i32_i64_f32() { - fn0 = %foo() -> f64,b1,i32,i64,f32 + fn0 = %foo() -> f64,i8,i32,i64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_b1_i32_f32_i64() -> f64, b1, i32, f32, i64 { +function %return_f64_b1_i32_f32_i64() -> f64, i8, i32, f32, i64 { block0: v0 = f64const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i32 0 v3 = f32const 0x0.0 v4 = iconst.i64 0 @@ -1616,16 +1616,16 @@ block0: } function %call_f64_b1_i32_f32_i64() { - fn0 = %foo() -> f64,b1,i32,f32,i64 + fn0 = %foo() -> f64,i8,i32,f32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_b1_i64_i32_f32() -> f64, b1, i64, i32, f32 { +function %return_f64_b1_i64_i32_f32() -> f64, i8, i64, i32, f32 { block0: v0 = f64const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i64 0 v3 = iconst.i32 0 v4 = f32const 0x0.0 @@ -1633,16 +1633,16 @@ block0: } function %call_f64_b1_i64_i32_f32() { - fn0 = %foo() -> f64,b1,i64,i32,f32 + fn0 = %foo() -> f64,i8,i64,i32,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_b1_i64_f32_i32() -> f64, b1, i64, f32, i32 { +function %return_f64_b1_i64_f32_i32() -> f64, i8, i64, f32, i32 { block0: v0 = f64const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = iconst.i64 0 v3 = f32const 0x0.0 v4 = iconst.i32 0 @@ -1650,16 +1650,16 @@ block0: } function %call_f64_b1_i64_f32_i32() { - fn0 = %foo() -> f64,b1,i64,f32,i32 + fn0 = %foo() -> f64,i8,i64,f32,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_b1_f32_i32_i64() -> f64, b1, f32, i32, i64 { +function %return_f64_b1_f32_i32_i64() -> f64, i8, f32, i32, i64 { block0: v0 = f64const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f32const 0x0.0 v3 = iconst.i32 0 v4 = iconst.i64 0 @@ -1667,16 +1667,16 @@ block0: } function %call_f64_b1_f32_i32_i64() { - fn0 = %foo() -> f64,b1,f32,i32,i64 + fn0 = %foo() -> f64,i8,f32,i32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_f64_b1_f32_i64_i32() -> f64, b1, f32, i64, i32 { +function %return_f64_b1_f32_i64_i32() -> f64, i8, f32, i64, i32 { block0: v0 = f64const 0x0.0 - v1 = bconst.b1 true + v1 = iconst.i8 1 v2 = f32const 0x0.0 v3 = iconst.i64 0 v4 = iconst.i32 0 @@ -1684,15 +1684,15 @@ block0: } function %call_f64_b1_f32_i64_i32() { - fn0 = %foo() -> f64,b1,f32,i64,i32 + fn0 = %foo() -> f64,i8,f32,i64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i32_i64_f32_f64() -> b1, i32, i64, f32, f64 { +function %return_b1_i32_i64_f32_f64() -> i8, i32, i64, f32, f64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i32 0 v2 = iconst.i64 0 v3 = f32const 0x0.0 @@ -1701,15 +1701,15 @@ block0: } function %call_b1_i32_i64_f32_f64() { - fn0 = %foo() -> b1,i32,i64,f32,f64 + fn0 = %foo() -> i8,i32,i64,f32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i32_i64_f64_f32() -> b1, i32, i64, f64, f32 { +function %return_b1_i32_i64_f64_f32() -> i8, i32, i64, f64, f32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i32 0 v2 = iconst.i64 0 v3 = f64const 0x0.0 @@ -1718,15 +1718,15 @@ block0: } function %call_b1_i32_i64_f64_f32() { - fn0 = %foo() -> b1,i32,i64,f64,f32 + fn0 = %foo() -> i8,i32,i64,f64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i32_f32_i64_f64() -> b1, i32, f32, i64, f64 { +function %return_b1_i32_f32_i64_f64() -> i8, i32, f32, i64, f64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i32 0 v2 = f32const 0x0.0 v3 = iconst.i64 0 @@ -1735,15 +1735,15 @@ block0: } function %call_b1_i32_f32_i64_f64() { - fn0 = %foo() -> b1,i32,f32,i64,f64 + fn0 = %foo() -> i8,i32,f32,i64,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i32_f32_f64_i64() -> b1, i32, f32, f64, i64 { +function %return_b1_i32_f32_f64_i64() -> i8, i32, f32, f64, i64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i32 0 v2 = f32const 0x0.0 v3 = f64const 0x0.0 @@ -1752,15 +1752,15 @@ block0: } function %call_b1_i32_f32_f64_i64() { - fn0 = %foo() -> b1,i32,f32,f64,i64 + fn0 = %foo() -> i8,i32,f32,f64,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i32_f64_i64_f32() -> b1, i32, f64, i64, f32 { +function %return_b1_i32_f64_i64_f32() -> i8, i32, f64, i64, f32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i32 0 v2 = f64const 0x0.0 v3 = iconst.i64 0 @@ -1769,15 +1769,15 @@ block0: } function %call_b1_i32_f64_i64_f32() { - fn0 = %foo() -> b1,i32,f64,i64,f32 + fn0 = %foo() -> i8,i32,f64,i64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i32_f64_f32_i64() -> b1, i32, f64, f32, i64 { +function %return_b1_i32_f64_f32_i64() -> i8, i32, f64, f32, i64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i32 0 v2 = f64const 0x0.0 v3 = f32const 0x0.0 @@ -1786,15 +1786,15 @@ block0: } function %call_b1_i32_f64_f32_i64() { - fn0 = %foo() -> b1,i32,f64,f32,i64 + fn0 = %foo() -> i8,i32,f64,f32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i64_i32_f32_f64() -> b1, i64, i32, f32, f64 { +function %return_b1_i64_i32_f32_f64() -> i8, i64, i32, f32, f64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i64 0 v2 = iconst.i32 0 v3 = f32const 0x0.0 @@ -1803,15 +1803,15 @@ block0: } function %call_b1_i64_i32_f32_f64() { - fn0 = %foo() -> b1,i64,i32,f32,f64 + fn0 = %foo() -> i8,i64,i32,f32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i64_i32_f64_f32() -> b1, i64, i32, f64, f32 { +function %return_b1_i64_i32_f64_f32() -> i8, i64, i32, f64, f32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i64 0 v2 = iconst.i32 0 v3 = f64const 0x0.0 @@ -1820,15 +1820,15 @@ block0: } function %call_b1_i64_i32_f64_f32() { - fn0 = %foo() -> b1,i64,i32,f64,f32 + fn0 = %foo() -> i8,i64,i32,f64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i64_f32_i32_f64() -> b1, i64, f32, i32, f64 { +function %return_b1_i64_f32_i32_f64() -> i8, i64, f32, i32, f64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i64 0 v2 = f32const 0x0.0 v3 = iconst.i32 0 @@ -1837,15 +1837,15 @@ block0: } function %call_b1_i64_f32_i32_f64() { - fn0 = %foo() -> b1,i64,f32,i32,f64 + fn0 = %foo() -> i8,i64,f32,i32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i64_f32_f64_i32() -> b1, i64, f32, f64, i32 { +function %return_b1_i64_f32_f64_i32() -> i8, i64, f32, f64, i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i64 0 v2 = f32const 0x0.0 v3 = f64const 0x0.0 @@ -1854,15 +1854,15 @@ block0: } function %call_b1_i64_f32_f64_i32() { - fn0 = %foo() -> b1,i64,f32,f64,i32 + fn0 = %foo() -> i8,i64,f32,f64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i64_f64_i32_f32() -> b1, i64, f64, i32, f32 { +function %return_b1_i64_f64_i32_f32() -> i8, i64, f64, i32, f32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i64 0 v2 = f64const 0x0.0 v3 = iconst.i32 0 @@ -1871,15 +1871,15 @@ block0: } function %call_b1_i64_f64_i32_f32() { - fn0 = %foo() -> b1,i64,f64,i32,f32 + fn0 = %foo() -> i8,i64,f64,i32,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_i64_f64_f32_i32() -> b1, i64, f64, f32, i32 { +function %return_b1_i64_f64_f32_i32() -> i8, i64, f64, f32, i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = iconst.i64 0 v2 = f64const 0x0.0 v3 = f32const 0x0.0 @@ -1888,15 +1888,15 @@ block0: } function %call_b1_i64_f64_f32_i32() { - fn0 = %foo() -> b1,i64,f64,f32,i32 + fn0 = %foo() -> i8,i64,f64,f32,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f32_i32_i64_f64() -> b1, f32, i32, i64, f64 { +function %return_b1_f32_i32_i64_f64() -> i8, f32, i32, i64, f64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f32const 0x0.0 v2 = iconst.i32 0 v3 = iconst.i64 0 @@ -1905,15 +1905,15 @@ block0: } function %call_b1_f32_i32_i64_f64() { - fn0 = %foo() -> b1,f32,i32,i64,f64 + fn0 = %foo() -> i8,f32,i32,i64,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f32_i32_f64_i64() -> b1, f32, i32, f64, i64 { +function %return_b1_f32_i32_f64_i64() -> i8, f32, i32, f64, i64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f32const 0x0.0 v2 = iconst.i32 0 v3 = f64const 0x0.0 @@ -1922,15 +1922,15 @@ block0: } function %call_b1_f32_i32_f64_i64() { - fn0 = %foo() -> b1,f32,i32,f64,i64 + fn0 = %foo() -> i8,f32,i32,f64,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f32_i64_i32_f64() -> b1, f32, i64, i32, f64 { +function %return_b1_f32_i64_i32_f64() -> i8, f32, i64, i32, f64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f32const 0x0.0 v2 = iconst.i64 0 v3 = iconst.i32 0 @@ -1939,15 +1939,15 @@ block0: } function %call_b1_f32_i64_i32_f64() { - fn0 = %foo() -> b1,f32,i64,i32,f64 + fn0 = %foo() -> i8,f32,i64,i32,f64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f32_i64_f64_i32() -> b1, f32, i64, f64, i32 { +function %return_b1_f32_i64_f64_i32() -> i8, f32, i64, f64, i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f32const 0x0.0 v2 = iconst.i64 0 v3 = f64const 0x0.0 @@ -1956,15 +1956,15 @@ block0: } function %call_b1_f32_i64_f64_i32() { - fn0 = %foo() -> b1,f32,i64,f64,i32 + fn0 = %foo() -> i8,f32,i64,f64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f32_f64_i32_i64() -> b1, f32, f64, i32, i64 { +function %return_b1_f32_f64_i32_i64() -> i8, f32, f64, i32, i64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f32const 0x0.0 v2 = f64const 0x0.0 v3 = iconst.i32 0 @@ -1973,15 +1973,15 @@ block0: } function %call_b1_f32_f64_i32_i64() { - fn0 = %foo() -> b1,f32,f64,i32,i64 + fn0 = %foo() -> i8,f32,f64,i32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f32_f64_i64_i32() -> b1, f32, f64, i64, i32 { +function %return_b1_f32_f64_i64_i32() -> i8, f32, f64, i64, i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f32const 0x0.0 v2 = f64const 0x0.0 v3 = iconst.i64 0 @@ -1990,15 +1990,15 @@ block0: } function %call_b1_f32_f64_i64_i32() { - fn0 = %foo() -> b1,f32,f64,i64,i32 + fn0 = %foo() -> i8,f32,f64,i64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f64_i32_i64_f32() -> b1, f64, i32, i64, f32 { +function %return_b1_f64_i32_i64_f32() -> i8, f64, i32, i64, f32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f64const 0x0.0 v2 = iconst.i32 0 v3 = iconst.i64 0 @@ -2007,15 +2007,15 @@ block0: } function %call_b1_f64_i32_i64_f32() { - fn0 = %foo() -> b1,f64,i32,i64,f32 + fn0 = %foo() -> i8,f64,i32,i64,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f64_i32_f32_i64() -> b1, f64, i32, f32, i64 { +function %return_b1_f64_i32_f32_i64() -> i8, f64, i32, f32, i64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f64const 0x0.0 v2 = iconst.i32 0 v3 = f32const 0x0.0 @@ -2024,15 +2024,15 @@ block0: } function %call_b1_f64_i32_f32_i64() { - fn0 = %foo() -> b1,f64,i32,f32,i64 + fn0 = %foo() -> i8,f64,i32,f32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f64_i64_i32_f32() -> b1, f64, i64, i32, f32 { +function %return_b1_f64_i64_i32_f32() -> i8, f64, i64, i32, f32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f64const 0x0.0 v2 = iconst.i64 0 v3 = iconst.i32 0 @@ -2041,15 +2041,15 @@ block0: } function %call_b1_f64_i64_i32_f32() { - fn0 = %foo() -> b1,f64,i64,i32,f32 + fn0 = %foo() -> i8,f64,i64,i32,f32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f64_i64_f32_i32() -> b1, f64, i64, f32, i32 { +function %return_b1_f64_i64_f32_i32() -> i8, f64, i64, f32, i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f64const 0x0.0 v2 = iconst.i64 0 v3 = f32const 0x0.0 @@ -2058,15 +2058,15 @@ block0: } function %call_b1_f64_i64_f32_i32() { - fn0 = %foo() -> b1,f64,i64,f32,i32 + fn0 = %foo() -> i8,f64,i64,f32,i32 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f64_f32_i32_i64() -> b1, f64, f32, i32, i64 { +function %return_b1_f64_f32_i32_i64() -> i8, f64, f32, i32, i64 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f64const 0x0.0 v2 = f32const 0x0.0 v3 = iconst.i32 0 @@ -2075,15 +2075,15 @@ block0: } function %call_b1_f64_f32_i32_i64() { - fn0 = %foo() -> b1,f64,f32,i32,i64 + fn0 = %foo() -> i8,f64,f32,i32,i64 block0: v0,v1,v2,v3,v4 = call fn0() return } -function %return_b1_f64_f32_i64_i32() -> b1, f64, f32, i64, i32 { +function %return_b1_f64_f32_i64_i32() -> i8, f64, f32, i64, i32 { block0: - v0 = bconst.b1 true + v0 = iconst.i8 1 v1 = f64const 0x0.0 v2 = f32const 0x0.0 v3 = iconst.i64 0 @@ -2092,7 +2092,7 @@ block0: } function %call_b1_f64_f32_i64_i32() { - fn0 = %foo() -> b1,f64,f32,i64,i32 + fn0 = %foo() -> i8,f64,f32,i64,i32 block0: v0,v1,v2,v3,v4 = call fn0() return diff --git a/cranelift/tests/bugpoint_consts.clif b/cranelift/tests/bugpoint_consts.clif index e136c7982ca7..449b53ebbe9b 100644 --- a/cranelift/tests/bugpoint_consts.clif +++ b/cranelift/tests/bugpoint_consts.clif @@ -2,13 +2,13 @@ test compile target x86_64 function u0:0() { - sig0 = (f32, f64, i8, i16, i32, i64, i128, b1, b8, b128, r32, r64, b8x16, i16x4, f32x16) + sig0 = (f32, f64, i8, i16, i32, i64, i128, i8, i8, i128, r32, r64, i8x16, i16x4, f32x16) fn0 = u0:1 sig0 block0: trap user0 -block1(v0: f32, v1: f64, v2: i8, v3: i16, v4: i32, v5: i64, v6: i128, v7: b1, v8: b8, v9: b128, v10: r32, v11: r64, v12: b8x16, v13: i16x4, v14: f32x16): +block1(v0: f32, v1: f64, v2: i8, v3: i16, v4: i32, v5: i64, v6: i128, v7: i8, v8: i8, v9: i128, v10: r32, v11: r64, v12: i8x16, v13: i16x4, v14: f32x16): call fn0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) trap user0 } diff --git a/cranelift/tests/bugpoint_consts_expected.clif b/cranelift/tests/bugpoint_consts_expected.clif index cd5b0de2f151..9a8fba3cec52 100644 --- a/cranelift/tests/bugpoint_consts_expected.clif +++ b/cranelift/tests/bugpoint_consts_expected.clif @@ -1,5 +1,5 @@ function u0:0() fast { - sig0 = (f32, f64, i8, i16, i32, i64, i128, b1, b8, b128, r32, r64, b8x16, i16x4, f32x16) fast + sig0 = (f32, f64, i8, i16, i32, i64, i128, i8, i8, i128, r32, r64, i8x16, i16x4, f32x16) fast fn0 = u0:1 sig0 const0 = 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 const1 = 0x0000000000000000 @@ -13,14 +13,14 @@ block1: v4 = iconst.i32 0 v5 = iconst.i64 0 v6 = iconst.i128 0 - v7 = bconst.b1 false - v8 = bconst.b8 false - v9 = bconst.b128 false + v7 = iconst.i8 0 + v8 = iconst.i8 0 + v9 = iconst.i128 0 v10 = null.r32 v11 = null.r64 - v12 = vconst.b8x16 const2 + v12 = vconst.i8x16 const2 v13 = vconst.i16x4 const1 v14 = vconst.f32x16 const0 - call fn0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) ; v0 = 0.0, v1 = 0.0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0, v7 = false, v8 = false, v9 = false, v12 = const2, v13 = const1, v14 = const0 + call fn0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) ; v0 = 0.0, v1 = 0.0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0, v7 = 0, v8 = 0, v9 = 0, v12 = const2, v13 = const1, v14 = const0 trap user0 } diff --git a/cranelift/tests/bugpoint_test.clif b/cranelift/tests/bugpoint_test.clif index ced5b9e80998..e3509f97056c 100644 --- a/cranelift/tests/bugpoint_test.clif +++ b/cranelift/tests/bugpoint_test.clif @@ -418,11 +418,9 @@ block1: v114 = load.i64 v113 v115 = iconst.i64 0 v116 = icmp ugt v114, v115 - v117 = bint.i8 v116 - v118 = uextend.i32 v117 + v118 = uextend.i32 v116 v119 = icmp_imm eq v118, 0 - v120 = bint.i8 v119 - v121 = uextend.i32 v120 + v121 = uextend.i32 v119 brz v121, block3 jump block2 @@ -436,11 +434,9 @@ block3: v126 = load.i64 v125 v127 = iconst.i64 0 v128 = icmp ugt v126, v127 - v129 = bint.i8 v128 - v130 = uextend.i32 v129 + v130 = uextend.i32 v128 v131 = icmp_imm eq v130, 0 - v132 = bint.i8 v131 - v133 = uextend.i32 v132 + v133 = uextend.i32 v131 brz v133, block5 jump block4 @@ -454,11 +450,9 @@ block5: v138 = load.i64 v137+42 v139 = iconst.i64 0 v140 = icmp ugt v138, v139 - v141 = bint.i8 v140 - v142 = uextend.i32 v141 + v142 = uextend.i32 v140 v143 = icmp_imm eq v142, 0 - v144 = bint.i8 v143 - v145 = uextend.i32 v144 + v145 = uextend.i32 v143 brz v145, block7 jump block6 @@ -482,8 +476,7 @@ block9: v153 = load.i8 v6 v154 = uextend.i32 v153 v155 = icmp_imm eq v154, 0 - v156 = bint.i8 v155 - v157 = uextend.i32 v156 + v157 = uextend.i32 v155 brz v157, block11 jump block10 @@ -507,8 +500,7 @@ block13: v165 = load.i8 v8 v166 = uextend.i32 v165 v167 = icmp_imm eq v166, 0 - v168 = bint.i8 v167 - v169 = uextend.i32 v168 + v169 = uextend.i32 v167 brz v169, block15 jump block14 @@ -527,11 +519,9 @@ block16: v175 = iconst.i64 17 v176 = load.i64 v10 v177 = icmp uge v176, v175 - v178 = bint.i8 v177 - v179 = uextend.i32 v178 + v179 = uextend.i32 v177 v180 = icmp_imm eq v179, 0 - v181 = bint.i8 v180 - v182 = uextend.i32 v181 + v182 = uextend.i32 v180 brz v182, block18 jump block17 @@ -584,11 +574,9 @@ block163: block20: v212 = load.i64 v13 v214 = icmp.i64 ult v213, v212 - v215 = bint.i8 v214 - v216 = uextend.i32 v215 + v216 = uextend.i32 v214 v217 = icmp_imm eq v216, 0 - v218 = bint.i8 v217 - v219 = uextend.i32 v218 + v219 = uextend.i32 v217 brz v219, block22 jump block21 @@ -781,11 +769,9 @@ block36: v330 = load.i16 v327 v331 = load.i16 v329 v332 = icmp eq v330, v331 - v333 = bint.i8 v332 - v334 = uextend.i32 v333 + v334 = uextend.i32 v332 v335 = icmp_imm eq v334, 0 - v336 = bint.i8 v335 - v337 = uextend.i32 v336 + v337 = uextend.i32 v335 brz v337, block38 jump block37 @@ -848,11 +834,9 @@ block43: v370 = load.i16 v367 v371 = load.i16 v369 v372 = icmp eq v370, v371 - v373 = bint.i8 v372 - v374 = uextend.i32 v373 + v374 = uextend.i32 v372 v375 = icmp_imm eq v374, 0 - v376 = bint.i8 v375 - v377 = uextend.i32 v376 + v377 = uextend.i32 v375 brz v377, block45 jump block44 @@ -949,8 +933,7 @@ block51: v435 -> v429 v430 = iconst.i16 0xffff_ffff_ffff_8000 v431 = icmp eq v429, v430 - v432 = bint.i8 v431 - v433 = uextend.i32 v432 + v433 = uextend.i32 v431 brz v433, block52 jump block154 @@ -1172,8 +1155,7 @@ block62(v552: i32, v1009: i64, v1013: i64, v1016: i64, v1019: i64, v1022: i16, v v560 -> v553 v554 = iconst.i32 0 v555 = icmp eq v553, v554 - v556 = bint.i8 v555 - v557 = uextend.i32 v556 + v557 = uextend.i32 v555 brz v557, block63 jump block145 @@ -1188,8 +1170,7 @@ block63: v570 -> v563 v564 = iconst.i32 0 v565 = icmp eq v563, v564 - v566 = bint.i8 v565 - v567 = uextend.i32 v566 + v567 = uextend.i32 v565 brz v567, block64 jump block144 @@ -1210,11 +1191,9 @@ block64: block65: v575 = iconst.i32 10 v576 = icmp.i32 ult v574, v575 - v577 = bint.i8 v576 - v578 = uextend.i32 v577 + v578 = uextend.i32 v576 v579 = icmp_imm eq v578, 0 - v580 = bint.i8 v579 - v581 = uextend.i32 v580 + v581 = uextend.i32 v579 brz v581, block67 jump block66 @@ -1248,8 +1227,7 @@ block69: v597 = load.i64 v3 v598 = load.i64 v3+8 v599 = icmp.i64 ult v596, v598 - v600 = bint.i8 v599 - v601 = uextend.i32 v600 + v601 = uextend.i32 v599 brnz v601, block70 jump block142 @@ -1326,8 +1304,7 @@ block73: v675 -> v647 v692 -> v647 v649 = icmp ult v647, v648 - v650 = bint.i8 v649 - v651 = uextend.i32 v650 + v651 = uextend.i32 v649 brz v651, block80 jump block74 @@ -1396,8 +1373,7 @@ block79: block80: v697 = uextend.i64 v696 v698 = icmp.i64 ugt v695, v697 - v699 = bint.i8 v698 - v700 = uextend.i32 v699 + v700 = uextend.i32 v698 brz v700, block96 jump block81 @@ -1418,11 +1394,9 @@ block82: v708 = load.i32 v705 v709 = load.i32 v707 v710 = icmp eq v708, v709 - v711 = bint.i8 v710 - v712 = uextend.i32 v711 + v712 = uextend.i32 v710 v713 = icmp_imm eq v712, 0 - v714 = bint.i8 v713 - v715 = uextend.i32 v714 + v715 = uextend.i32 v713 brz v715, block84 jump block83 @@ -1484,11 +1458,9 @@ block89: v747 = load.i16 v744 v748 = load.i16 v746 v749 = icmp eq v747, v748 - v750 = bint.i8 v749 - v751 = uextend.i32 v750 + v751 = uextend.i32 v749 v752 = icmp_imm eq v751, 0 - v753 = bint.i8 v752 - v754 = uextend.i32 v753 + v754 = uextend.i32 v752 brz v754, block91 jump block90 @@ -1560,8 +1532,7 @@ block97: v794 = iconst.i32 10 v795 = iconst.i32 0 v796 = icmp eq v794, v795 - v797 = bint.i8 v796 - v798 = uextend.i32 v797 + v798 = uextend.i32 v796 brz v798, block98 jump block135 @@ -1734,11 +1705,9 @@ block105: block106: v887 = iconst.i64 10 v888 = icmp.i64 ult v886, v887 - v889 = bint.i8 v888 - v890 = uextend.i32 v889 + v890 = uextend.i32 v888 v891 = icmp_imm eq v890, 0 - v892 = bint.i8 v891 - v893 = uextend.i32 v892 + v893 = uextend.i32 v891 brz v893, block108 jump block107 @@ -1772,8 +1741,7 @@ block110: v909 = load.i64 v3 v910 = load.i64 v3+8 v911 = icmp.i64 ult v908, v910 - v912 = bint.i8 v911 - v913 = uextend.i32 v912 + v913 = uextend.i32 v911 brnz v913, block111 jump block127 @@ -1809,8 +1777,7 @@ block112: v954 -> v933 v1047 -> v933 v936 = icmp.i64 ult v934, v935 - v937 = bint.i8 v936 - v938 = uextend.i32 v937 + v938 = uextend.i32 v936 brz v938, block119 jump block113 From 6ab09428a5c0d4101016db1234ca553e4ed49e70 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Wed, 12 Oct 2022 16:48:59 -0700 Subject: [PATCH 04/12] Review feedback --- .../filetests/isa/x64/{b1.clif => conditional-values.clif} | 0 cranelift/reader/src/parser.rs | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename cranelift/filetests/filetests/isa/x64/{b1.clif => conditional-values.clif} (100%) diff --git a/cranelift/filetests/filetests/isa/x64/b1.clif b/cranelift/filetests/filetests/isa/x64/conditional-values.clif similarity index 100% rename from cranelift/filetests/filetests/isa/x64/b1.clif rename to cranelift/filetests/filetests/isa/x64/conditional-values.clif diff --git a/cranelift/reader/src/parser.rs b/cranelift/reader/src/parser.rs index a4946c5462b6..bde38802d98d 100644 --- a/cranelift/reader/src/parser.rs +++ b/cranelift/reader/src/parser.rs @@ -2542,7 +2542,7 @@ impl<'a> Parser<'a> { { // To match the existing run behavior that does not require an explicit // invocation, we create an invocation from a function like `() -> i*` and - // compare it to not `false`. + // require the result to be non-zero. let invocation = Invocation::new("default", vec![]); let expected = vec![DataValue::I8(0)]; let comparison = Comparison::NotEquals; From 07d30278d0fc65f2aea6f8579085aa4f2a7b031a Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Wed, 12 Oct 2022 18:49:47 -0700 Subject: [PATCH 05/12] Improve the `select` translation with i8 arguments on aarch64 --- cranelift/codegen/src/isa/aarch64/lower.isle | 10 ++++++++-- .../filetests/filetests/isa/aarch64/condops.clif | 6 ++---- cranelift/filetests/filetests/runtests/br.clif | 2 ++ cranelift/filetests/filetests/runtests/select.clif | 12 ++++++++++++ 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/cranelift/codegen/src/isa/aarch64/lower.isle b/cranelift/codegen/src/isa/aarch64/lower.isle index 32ea9ed357f6..89df5e822c57 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.isle +++ b/cranelift/codegen/src/isa/aarch64/lower.isle @@ -1737,13 +1737,19 @@ (fpu_cmp (scalar_size in_ty) x y) cond ty rn rm))) -(rule -1 (lower (has_type ty (select rcond @ (value_type (fits_in_32 _)) rn rm))) +(rule -1 (lower (has_type ty (select rcond @ (value_type $I8) rn rm))) + (let ((rcond Reg rcond)) + (lower_select + (tst_imm $I32 rcond (u64_into_imm_logic $I32 255)) + (Cond.Ne) ty rn rm))) + +(rule -2 (lower (has_type ty (select rcond @ (value_type (fits_in_32 _)) rn rm))) (let ((rcond Reg (put_in_reg_zext32 rcond))) (lower_select (cmp (OperandSize.Size32) rcond (zero_reg)) (Cond.Ne) ty rn rm))) -(rule -2 (lower (has_type ty (select rcond rn rm))) +(rule -3 (lower (has_type ty (select rcond rn rm))) (let ((rcond Reg (put_in_reg_zext64 rcond))) (lower_select (cmp (OperandSize.Size64) rcond (zero_reg)) diff --git a/cranelift/filetests/filetests/isa/aarch64/condops.clif b/cranelift/filetests/filetests/isa/aarch64/condops.clif index 1ffc8c58db15..89f47af8cf79 100644 --- a/cranelift/filetests/filetests/isa/aarch64/condops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/condops.clif @@ -770,8 +770,7 @@ block0(v0: i8, v1: i8, v2: i8): } ; block0: -; uxtb w5, w0 -; subs wzr, w5, wzr +; ands wzr, w0, #255 ; csel x0, x1, x2, ne ; ret @@ -795,8 +794,7 @@ block0(v0: i8, v1: i128, v2: i128): } ; block0: -; uxtb w8, w0 -; subs wzr, w8, wzr +; ands wzr, w0, #255 ; csel x0, x2, x4, ne ; csel x1, x3, x5, ne ; ret diff --git a/cranelift/filetests/filetests/runtests/br.clif b/cranelift/filetests/filetests/runtests/br.clif index 4fcb360f96b6..0160d98057ca 100644 --- a/cranelift/filetests/filetests/runtests/br.clif +++ b/cranelift/filetests/filetests/runtests/br.clif @@ -36,6 +36,7 @@ block2: ; run: %brz_i64(0) == 1 ; run: %brz_i64(1) == 0 ; run: %brz_i64(-1) == 0 +; run: %brz_i64(97) == 0 function %brz_i32(i32) -> i8 { block0(v0: i32): @@ -156,3 +157,4 @@ block2: ; run: %brnz_i8(0) == 0 ; run: %brnz_i8(1) == 1 ; run: %brnz_i8(-1) == 1 +; run: %brnz_i8(97) == 1 diff --git a/cranelift/filetests/filetests/runtests/select.clif b/cranelift/filetests/filetests/runtests/select.clif index c83e8408e80c..3df1cd70bc36 100644 --- a/cranelift/filetests/filetests/runtests/select.clif +++ b/cranelift/filetests/filetests/runtests/select.clif @@ -17,6 +17,18 @@ block0(v0: f32, v1: f32): ; run: %select_eq_f32(0x42.42, 0.0) == 0 ; run: %select_eq_f32(0x42.42, NaN) == 0 +function %select_i8(i8) -> i32 { +block0(v0: i8): + v1 = iconst.i32 42 + v2 = iconst.i32 97 + v3 = select v0, v1, v2 + return v3 +} +; run: %select_i8(0) == 97 +; run: %select_i8(1) == 42 +; run: %select_i8(2) == 42 +; run: %select_i8(-1) == 42 + function %select_ne_f64(f64, f64) -> i32 { block0(v0: f64, v1: f64): v2 = fcmp ne v0, v1 From 6de56b050e57e6974c52e3f174fde22adf69d177 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Wed, 12 Oct 2022 18:50:40 -0700 Subject: [PATCH 06/12] Fix the select implementation on x86_64 --- cranelift/codegen/src/isa/x64/lower.isle | 9 +-------- .../filetests/filetests/isa/x64/conditional-values.clif | 2 +- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/cranelift/codegen/src/isa/x64/lower.isle b/cranelift/codegen/src/isa/x64/lower.isle index 0873c0f20c6d..2f6b304da43f 100644 --- a/cranelift/codegen/src/isa/x64/lower.isle +++ b/cranelift/codegen/src/isa/x64/lower.isle @@ -1650,14 +1650,7 @@ ;; Finally, we lower `select` from a condition value `c`. These rules are meant ;; to be the final, default lowerings if no other patterns matched above. -(rule -1 (lower (has_type ty (select c @ (value_type $I8) x y))) - (let ((size OperandSize (raw_operand_size_of_type $I8)) - ;; N.B.: disallow load-op fusion, see above. TODO: - ;; https://github.com/bytecodealliance/wasmtime/issues/3953. - (gpr_c Gpr (put_in_gpr c))) - (with_flags (x64_test size (RegMemImm.Imm 1) gpr_c) (cmove_from_values ty (CC.NZ) x y)))) - -(rule -2 (lower (has_type ty (select c @ (value_type (fits_in_64 a_ty)) x y))) +(rule -1 (lower (has_type ty (select c @ (value_type (fits_in_64 a_ty)) x y))) (let ((size OperandSize (raw_operand_size_of_type a_ty)) ;; N.B.: disallow load-op fusion, see above. TODO: ;; https://github.com/bytecodealliance/wasmtime/issues/3953. diff --git a/cranelift/filetests/filetests/isa/x64/conditional-values.clif b/cranelift/filetests/filetests/isa/x64/conditional-values.clif index 4b0c1af26516..b3eb4840abdf 100644 --- a/cranelift/filetests/filetests/isa/x64/conditional-values.clif +++ b/cranelift/filetests/filetests/isa/x64/conditional-values.clif @@ -10,7 +10,7 @@ block0(v0: i8, v1: i32, v2: i32): ; pushq %rbp ; movq %rsp, %rbp ; block0: -; testb $1, %dil +; testb %dil, %dil ; movq %rdx, %rax ; cmovnzl %esi, %eax, %eax ; movq %rbp, %rsp From 00127d39a01d3c01dfca50f8cbbd3cefddce51ef Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Thu, 13 Oct 2022 09:25:52 -0700 Subject: [PATCH 07/12] Simplify the translation of bmask on s390x Co-authored-by: Ulrich Weigand --- cranelift/codegen/src/isa/s390x/inst.isle | 52 ++------ cranelift/codegen/src/isa/s390x/lower.isle | 4 +- .../filetests/isa/s390x/conversions.clif | 113 +++++++++--------- 3 files changed, 71 insertions(+), 98 deletions(-) diff --git a/cranelift/codegen/src/isa/s390x/inst.isle b/cranelift/codegen/src/isa/s390x/inst.isle index f6e4611a026a..8bd725f87800 100644 --- a/cranelift/codegen/src/isa/s390x/inst.isle +++ b/cranelift/codegen/src/isa/s390x/inst.isle @@ -3474,15 +3474,19 @@ (_ Unit (emit_consumer (emit_cmov_imm ty dst cond imm_true)))) dst)) -;; Lower a boolean condition to a boolean type. The value used to represent -;; "true" is -1 for all result types except for $I8, which uses 1. +;; Lower a boolean condition to the values 1/0. This rule is only used in the +;; context of instructions that return $I8 results. (decl lower_bool (Type ProducesBool) Reg) (rule (lower_bool $I8 cond) (select_bool_imm $I8 cond 1 0)) -;; TODO: do we need these cases anymore if B8..B128 are missing? -(rule (lower_bool $I16 cond) (select_bool_imm $I16 cond -1 0)) -(rule (lower_bool $I32 cond) (select_bool_imm $I32 cond -1 0)) -(rule (lower_bool $I64 cond) (select_bool_imm $I64 cond -1 0)) +;; Lower a boolean condition to the values -1/0. +(decl lower_bool_to_mask (Type ProducesBool) Reg) +(rule 0 (lower_bool_to_mask (fits_in_64 ty) producer) + (select_bool_imm ty producer -1 0)) + +(rule 1 (lower_bool_to_mask $I128 producer) + (let ((res Reg (lower_bool_to_mask $I64 producer))) + (mov_to_vec128 $I128 res res))) ;; Emit a conditional branch based on a boolean condition. (decl cond_br_bool (ProducesBool MachLabel MachLabel) SideEffectNoResult) @@ -5096,42 +5100,6 @@ (decl vec_fcmphes (Type Reg Reg) ProducesFlags) (rule (vec_fcmphes (ty_vec128 ty) x y) (vec_float_cmps ty (vecop_float_cmphe ty) x y)) -;; Helpers for bmask lowering ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(decl lower_bmask_type (Type) Type) - -(rule 1 (lower_bmask_type (fits_in_32 ty)) $I32) -(rule 0 (lower_bmask_type _) $I64) - -;; Lower a bmask call, given the output and input types. -(decl lower_bmask (Type Type Reg) Reg) - -(rule - 0 - (lower_bmask (fits_in_64 oty) (fits_in_64 ity) input) - (let ((cmp ProducesFlags (icmps_simm16 (lower_bmask_type ity) input 0)) - (op Cond (intcc_as_cond (IntCC.NotEqual)))) - (select_bool_imm oty (bool cmp op) -1 0))) - -(rule - 1 - (lower_bmask $I128 (fits_in_64 ity) input) - (let ((res Reg (lower_bmask $I64 ity input))) - (mov_to_vec128 $I128 res res))) - -(rule - 2 - (lower_bmask (fits_in_64 oty) $I128 input) - (let ((lo Reg (vec_extract_lane $I64X2 input 0 (zero_reg))) - (hi Reg (vec_extract_lane $I64X2 input 1 (zero_reg))) - (combined Reg (or_reg $I64 lo hi))) - (lower_bmask oty $I64 combined))) - -(rule - 3 - (lower_bmask $I128 $I128 input) - (let ((res Reg (lower_bmask $I64 $I128 input))) - (mov_to_vec128 $I128 res res))) ;; Implicit conversions ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; diff --git a/cranelift/codegen/src/isa/s390x/lower.isle b/cranelift/codegen/src/isa/s390x/lower.isle index dec3dac839b6..e541efb21fc9 100644 --- a/cranelift/codegen/src/isa/s390x/lower.isle +++ b/cranelift/codegen/src/isa/s390x/lower.isle @@ -1157,8 +1157,8 @@ ;;;; Rules for `bmask` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -(rule (lower (has_type oty (bmask x @ (value_type ity)))) - (lower_bmask oty ity x)) +(rule (lower (has_type ty (bmask x))) + (lower_bool_to_mask ty (value_nonzero x))) ;;;; Rules for `bitrev` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; diff --git a/cranelift/filetests/filetests/isa/s390x/conversions.clif b/cranelift/filetests/filetests/isa/s390x/conversions.clif index 0498db26760b..2159294f5b19 100644 --- a/cranelift/filetests/filetests/isa/s390x/conversions.clif +++ b/cranelift/filetests/filetests/isa/s390x/conversions.clif @@ -332,14 +332,12 @@ block0(v0: i128): ; block0: ; vl %v0, 0(%r3) -; lgdr %r3, %f0 -; vlgvg %r5, %v0, 1 -; ogr %r3, %r5 -; cghi %r3, 0 +; vgbm %v5, 0 +; vceqgs %v7, %v0, %v5 ; lghi %r3, 0 -; locghilh %r3, -1 -; vlvgp %v23, %r3, %r3 -; vst %v23, 0(%r2) +; locghine %r3, -1 +; vlvgp %v20, %r3, %r3 +; vst %v20, 0(%r2) ; br %r14 function %bmask_i128_i64(i128) -> i64 { @@ -350,12 +348,10 @@ block0(v0: i128): ; block0: ; vl %v0, 0(%r2) -; lgdr %r5, %f0 -; vlgvg %r3, %v0, 1 -; ogr %r5, %r3 -; cghi %r5, 0 +; vgbm %v3, 0 +; vceqgs %v5, %v0, %v3 ; lghi %r2, 0 -; locghilh %r2, -1 +; locghine %r2, -1 ; br %r14 function %bmask_i128_i32(i128) -> i32 { @@ -366,12 +362,10 @@ block0(v0: i128): ; block0: ; vl %v0, 0(%r2) -; lgdr %r5, %f0 -; vlgvg %r3, %v0, 1 -; ogr %r5, %r3 -; cghi %r5, 0 +; vgbm %v3, 0 +; vceqgs %v5, %v0, %v3 ; lhi %r2, 0 -; lochilh %r2, -1 +; lochine %r2, -1 ; br %r14 function %bmask_i128_i16(i128) -> i16 { @@ -382,12 +376,10 @@ block0(v0: i128): ; block0: ; vl %v0, 0(%r2) -; lgdr %r5, %f0 -; vlgvg %r3, %v0, 1 -; ogr %r5, %r3 -; cghi %r5, 0 +; vgbm %v3, 0 +; vceqgs %v5, %v0, %v3 ; lhi %r2, 0 -; lochilh %r2, -1 +; lochine %r2, -1 ; br %r14 function %bmask_i128_i8(i128) -> i8 { @@ -398,12 +390,10 @@ block0(v0: i128): ; block0: ; vl %v0, 0(%r2) -; lgdr %r5, %f0 -; vlgvg %r3, %v0, 1 -; ogr %r5, %r3 -; cghi %r5, 0 +; vgbm %v3, 0 +; vceqgs %v5, %v0, %v3 ; lhi %r2, 0 -; lochilh %r2, -1 +; lochine %r2, -1 ; br %r14 function %bmask_i64_i128(i64, i64) -> i128 { @@ -537,11 +527,12 @@ block0(v0: i16, v1: i16): } ; block0: -; chi %r4, 0 -; lghi %r4, 0 -; locghilh %r4, -1 -; vlvgp %v17, %r4, %r4 -; vst %v17, 0(%r2) +; lhr %r3, %r4 +; chi %r3, 0 +; lghi %r3, 0 +; locghilh %r3, -1 +; vlvgp %v19, %r3, %r3 +; vst %v19, 0(%r2) ; br %r14 function %bmask_i16_i64(i16, i16) -> i64 { @@ -551,7 +542,8 @@ block0(v0: i16, v1: i16): } ; block0: -; chi %r3, 0 +; lhr %r2, %r3 +; chi %r2, 0 ; lghi %r2, 0 ; locghilh %r2, -1 ; br %r14 @@ -563,7 +555,8 @@ block0(v0: i16, v1: i16): } ; block0: -; chi %r3, 0 +; lhr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -575,7 +568,8 @@ block0(v0: i16, v1: i16): } ; block0: -; chi %r3, 0 +; lhr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -587,7 +581,8 @@ block0(v0: i16, v1: i16): } ; block0: -; chi %r3, 0 +; lhr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -599,11 +594,12 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r4, 0 -; lghi %r4, 0 -; locghilh %r4, -1 -; vlvgp %v17, %r4, %r4 -; vst %v17, 0(%r2) +; lbr %r3, %r4 +; chi %r3, 0 +; lghi %r3, 0 +; locghilh %r3, -1 +; vlvgp %v19, %r3, %r3 +; vst %v19, 0(%r2) ; br %r14 function %bmask_i8_i64(i8, i8) -> i64 { @@ -613,7 +609,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lghi %r2, 0 ; locghilh %r2, -1 ; br %r14 @@ -625,7 +622,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -637,7 +635,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -649,7 +648,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -661,11 +661,12 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r4, 0 -; lghi %r4, 0 -; locghilh %r4, -1 -; vlvgp %v17, %r4, %r4 -; vst %v17, 0(%r2) +; lbr %r3, %r4 +; chi %r3, 0 +; lghi %r3, 0 +; locghilh %r3, -1 +; vlvgp %v19, %r3, %r3 +; vst %v19, 0(%r2) ; br %r14 function %bmask_i8_i64(i8, i8) -> i64 { @@ -675,7 +676,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lghi %r2, 0 ; locghilh %r2, -1 ; br %r14 @@ -687,7 +689,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -699,7 +702,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 @@ -711,7 +715,8 @@ block0(v0: i8, v1: i8): } ; block0: -; chi %r3, 0 +; lbr %r2, %r3 +; chi %r2, 0 ; lhi %r2, 0 ; lochilh %r2, -1 ; br %r14 From 1549b287722c40824d9f203896da6219eda49ff4 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Thu, 13 Oct 2022 09:27:25 -0700 Subject: [PATCH 08/12] Update cranelift/codegen/src/isa/aarch64/inst.isle Co-authored-by: Afonso Bordado --- cranelift/codegen/src/isa/aarch64/inst.isle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cranelift/codegen/src/isa/aarch64/inst.isle b/cranelift/codegen/src/isa/aarch64/inst.isle index 52aa391983f9..9f96eceed333 100644 --- a/cranelift/codegen/src/isa/aarch64/inst.isle +++ b/cranelift/codegen/src/isa/aarch64/inst.isle @@ -3458,7 +3458,7 @@ (decl lower_bmask (Type Type ValueRegs) ValueRegs) -;; For conversions that fit in a regsiter, we can use csetm. +;; For conversions that fit in a register, we can use csetm. ;; ;; cmp val, #0 ;; csetm res, ne From a94916084ba179f388be533077f0d34bd88f03f4 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Thu, 13 Oct 2022 10:10:36 -0700 Subject: [PATCH 09/12] Fix overflow behavior with brz Co-authored-by: Chris Fallin --- cranelift/codegen/src/isa/x64/lower.isle | 2 -- .../filetests/isa/x64/conditional-values.clif | 4 ++-- .../filetests/filetests/isa/x64/i128.clif | 2 +- .../filetests/filetests/runtests/br.clif | 22 +++++++++++++++++++ 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/cranelift/codegen/src/isa/x64/lower.isle b/cranelift/codegen/src/isa/x64/lower.isle index 2f6b304da43f..12a0834275b1 100644 --- a/cranelift/codegen/src/isa/x64/lower.isle +++ b/cranelift/codegen/src/isa/x64/lower.isle @@ -2823,8 +2823,6 @@ (decl cmp_zero_int_bool_ref (Value) ProducesFlags) -(rule 1 (cmp_zero_int_bool_ref val @ (value_type $I8)) - (x64_test (OperandSize.Size8) (RegMemImm.Imm 1) val)) (rule (cmp_zero_int_bool_ref val @ (value_type ty)) (let ((size OperandSize (raw_operand_size_of_type ty)) (src Gpr val)) diff --git a/cranelift/filetests/filetests/isa/x64/conditional-values.clif b/cranelift/filetests/filetests/isa/x64/conditional-values.clif index b3eb4840abdf..f90d19a65212 100644 --- a/cranelift/filetests/filetests/isa/x64/conditional-values.clif +++ b/cranelift/filetests/filetests/isa/x64/conditional-values.clif @@ -32,7 +32,7 @@ block2: ; pushq %rbp ; movq %rsp, %rbp ; block0: -; testb $1, %dil +; testb %dil, %dil ; jnz label1; j label2 ; block1: ; movl $1, %eax @@ -60,7 +60,7 @@ block2: ; pushq %rbp ; movq %rsp, %rbp ; block0: -; testb $1, %dil +; testb %dil, %dil ; jz label1; j label2 ; block1: ; movl $1, %eax diff --git a/cranelift/filetests/filetests/isa/x64/i128.clif b/cranelift/filetests/filetests/isa/x64/i128.clif index 0d4afcd6f76b..a46c80c7bf74 100644 --- a/cranelift/filetests/filetests/isa/x64/i128.clif +++ b/cranelift/filetests/filetests/isa/x64/i128.clif @@ -674,7 +674,7 @@ block2(v6: i128): ; pushq %rbp ; movq %rsp, %rbp ; block0: -; testb $1, %dl +; testb %dl, %dl ; jnz label1; j label2 ; block1: ; xorq %rax, %rax, %rax diff --git a/cranelift/filetests/filetests/runtests/br.clif b/cranelift/filetests/filetests/runtests/br.clif index 0160d98057ca..8db16577327a 100644 --- a/cranelift/filetests/filetests/runtests/br.clif +++ b/cranelift/filetests/filetests/runtests/br.clif @@ -38,6 +38,28 @@ block2: ; run: %brz_i64(-1) == 0 ; run: %brz_i64(97) == 0 +function %brz_i8_overflow(i8) -> i8 { +block0(v0: i8): + v1 = iconst.i8 255 + v2 = iadd.i8 v0, v1 + brz v2, block2 + jump block1 + +block1: + v3 = iconst.i8 1 + return v3 + +block2: + v4 = iconst.i8 0 + return v4 +} + +; run: %brz_i8_overflow(0) == 1 +; run: %brz_i8_overflow(1) == 0 +; run: %brz_i8_overflow(2) == 1 +; run: %brz_i8_overflow(98) == 1 +; run: %brz_i8_overflow(97) == 1 + function %brz_i32(i32) -> i8 { block0(v0: i32): brz v0, block1 From 558b9a20487f8e129c07996b28b6bdb728b8c918 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Thu, 13 Oct 2022 15:07:25 -0700 Subject: [PATCH 10/12] Fix inconsistencies with brz, brnz, and select on riscv64 --- cranelift/codegen/src/isa/riscv64/inst.isle | 19 ++++++- .../codegen/src/isa/riscv64/inst/emit.rs | 3 +- .../src/isa/riscv64/inst/unwind/systemv.rs | 2 +- cranelift/codegen/src/isa/riscv64/lower.isle | 4 +- .../codegen/src/isa/riscv64/lower/isle.rs | 6 ++- .../filetests/isa/riscv64/condbr.clif | 50 +++++++++++-------- .../filetests/isa/riscv64/condops.clif | 17 ++++--- .../filetests/isa/riscv64/heap-addr.clif | 38 +++++++------- .../filetests/isa/riscv64/reftypes.clif | 31 ++++++------ .../filetests/filetests/runtests/select.clif | 15 ++++++ 10 files changed, 117 insertions(+), 68 deletions(-) diff --git a/cranelift/codegen/src/isa/riscv64/inst.isle b/cranelift/codegen/src/isa/riscv64/inst.isle index 6c179b1748ec..2a16c404e294 100644 --- a/cranelift/codegen/src/isa/riscv64/inst.isle +++ b/cranelift/codegen/src/isa/riscv64/inst.isle @@ -1896,14 +1896,29 @@ (decl lower_brz_or_nz (IntCC ValueRegs VecMachLabel Type) InstOutput) (extern constructor lower_brz_or_nz lower_brz_or_nz) +;; Normalize a value by masking to its bit-size. +(decl normalize_value (Type ValueRegs) ValueRegs) + +(rule (normalize_value $I8 r) + (value_reg (alu_rr_imm12 (AluOPRRI.Andi) r (imm12_const 255)))) +(rule (normalize_value $I16 r) + (value_reg (alu_rrr (AluOPRRR.And) r (imm $I16 65535)))) +(rule (normalize_value $I32 r) + (value_reg (alu_rr_imm12 (AluOPRRI.Andi) r (imm12_const -1)))) + +(rule (normalize_value $I64 r) r) +(rule (normalize_value $I128 r) r) +(rule (normalize_value $F32 r) r) +(rule (normalize_value $F64 r) r) + ;;;;; (rule (lower_branch (brz v @ (value_type ty) _ _) targets) - (lower_brz_or_nz (IntCC.Equal) v targets ty)) + (lower_brz_or_nz (IntCC.Equal) (normalize_value ty v) targets ty)) ;;;; (rule (lower_branch (brnz v @ (value_type ty) _ _) targets) - (lower_brz_or_nz (IntCC.NotEqual) v targets ty)) + (lower_brz_or_nz (IntCC.NotEqual) (normalize_value ty v) targets ty)) ;;; (rule diff --git a/cranelift/codegen/src/isa/riscv64/inst/emit.rs b/cranelift/codegen/src/isa/riscv64/inst/emit.rs index e3385ead4dec..4895d443c030 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/emit.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/emit.rs @@ -1039,9 +1039,8 @@ impl MachInstEmit for Inst { &Inst::CondBr { taken, not_taken, - kind, + mut kind, } => { - let mut kind = kind; kind.rs1 = allocs.next(kind.rs1); kind.rs2 = allocs.next(kind.rs2); match taken { diff --git a/cranelift/codegen/src/isa/riscv64/inst/unwind/systemv.rs b/cranelift/codegen/src/isa/riscv64/inst/unwind/systemv.rs index 45851c8aa9d6..5e965df295b8 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/unwind/systemv.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/unwind/systemv.rs @@ -143,7 +143,7 @@ mod tests { assert_eq!( format!("{:?}", fde), - "FrameDescriptionEntry { address: Constant(4321), length: 12, lsda: None, instructions: [] }" + "FrameDescriptionEntry { address: Constant(4321), length: 16, lsda: None, instructions: [] }" ); } diff --git a/cranelift/codegen/src/isa/riscv64/lower.isle b/cranelift/codegen/src/isa/riscv64/lower.isle index 4f5bb1a114ee..24f2a95e8e3d 100644 --- a/cranelift/codegen/src/isa/riscv64/lower.isle +++ b/cranelift/codegen/src/isa/riscv64/lower.isle @@ -601,8 +601,8 @@ ;;;;; Rules for `select`;;;;;;;;; (rule - (lower (has_type ty (select c x y))) - (gen_select ty c x y) + (lower (has_type ty (select c @ (value_type cty) x y))) + (gen_select ty (normalize_value cty c) x y) ) ;;;;; Rules for `bitselect`;;;;;;;;; diff --git a/cranelift/codegen/src/isa/riscv64/lower/isle.rs b/cranelift/codegen/src/isa/riscv64/lower/isle.rs index ff2b52f6ba8c..0d3794209901 100644 --- a/cranelift/codegen/src/isa/riscv64/lower/isle.rs +++ b/cranelift/codegen/src/isa/riscv64/lower/isle.rs @@ -270,7 +270,11 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> rd.to_reg() } fn imm12_const(&mut self, val: i32) -> Imm12 { - Imm12::maybe_from_u64(val as u64).unwrap() + if let Some(res) = Imm12::maybe_from_u64(val as u64) { + res + } else { + panic!("Unable to make an Imm12 value from {}", val) + } } fn imm12_const_add(&mut self, val: i32, add: i32) -> Imm12 { Imm12::maybe_from_u64((val + add) as u64).unwrap() diff --git a/cranelift/filetests/filetests/isa/riscv64/condbr.clif b/cranelift/filetests/filetests/isa/riscv64/condbr.clif index 2d547c39edbb..07444763f237 100644 --- a/cranelift/filetests/filetests/isa/riscv64/condbr.clif +++ b/cranelift/filetests/filetests/isa/riscv64/condbr.clif @@ -209,8 +209,9 @@ block1: } ; block0: -; eq a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; eq a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -228,8 +229,9 @@ block1: } ; block0: -; ne a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; ne a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -247,8 +249,9 @@ block1: } ; block0: -; slt a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; slt a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -266,8 +269,9 @@ block1: } ; block0: -; ult a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; ult a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -285,8 +289,9 @@ block1: } ; block0: -; sle a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; sle a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -304,8 +309,9 @@ block1: } ; block0: -; ule a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; ule a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -323,8 +329,9 @@ block1: } ; block0: -; sgt a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; sgt a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -342,8 +349,9 @@ block1: } ; block0: -; ugt a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; ugt a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -361,8 +369,9 @@ block1: } ; block0: -; sge a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; sge a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: @@ -380,8 +389,9 @@ block1: } ; block0: -; uge a2,[a0,a1],[a2,a3]##ty=i128 -; bne a2,zero,taken(label1),not_taken(label2) +; uge a3,[a0,a1],[a2,a3]##ty=i128 +; andi a3,a3,255 +; bne a3,zero,taken(label1),not_taken(label2) ; block1: ; j label3 ; block2: diff --git a/cranelift/filetests/filetests/isa/riscv64/condops.clif b/cranelift/filetests/filetests/isa/riscv64/condops.clif index eecac50e3859..8e6ec0492c3e 100644 --- a/cranelift/filetests/filetests/isa/riscv64/condops.clif +++ b/cranelift/filetests/filetests/isa/riscv64/condops.clif @@ -55,7 +55,8 @@ block0(v0: i8, v1: i8, v2: i8): } ; block0: -; select_i8 a0,a1,a2##condition=a0 +; andi a3,a0,255 +; select_i8 a0,a1,a2##condition=a3 ; ret function %i(i32, i8, i8) -> i8 { @@ -67,11 +68,12 @@ block0(v0: i32, v1: i8, v2: i8): } ; block0: -; li a3,42 -; uext.w a5,a0 -; uext.w a7,a3 -; eq t4,a5,a7##ty=i32 -; select_i8 a0,a1,a2##condition=t4 +; li a4,42 +; uext.w a6,a0 +; uext.w t3,a4 +; eq t0,a6,t3##ty=i32 +; andi a6,t0,255 +; select_i8 a0,a1,a2##condition=a6 ; ret function %i128_select(i8, i128, i128) -> i128 { @@ -81,6 +83,7 @@ block0(v0: i8, v1: i128, v2: i128): } ; block0: -; select_i128 [a0,a1],[a1,a2],[a3,a4]##condition=a0 +; andi a5,a0,255 +; select_i128 [a0,a1],[a1,a2],[a3,a4]##condition=a5 ; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/heap-addr.clif b/cranelift/filetests/filetests/isa/riscv64/heap-addr.clif index 9f884f06447c..818e929e4a3a 100644 --- a/cranelift/filetests/filetests/isa/riscv64/heap-addr.clif +++ b/cranelift/filetests/filetests/isa/riscv64/heap-addr.clif @@ -13,16 +13,17 @@ block0(v0: i64, v1: i32): } ; block0: -; uext.w t3,a1 -; ld t4,0(a0) -; addi t4,t4,0 -; ugt t0,t3,t4##ty=i64 -; beq t0,zero,taken(label1),not_taken(label2) +; uext.w t4,a1 +; ld t0,0(a0) +; addi t0,t0,0 +; ugt t1,t4,t0##ty=i64 +; andi t1,t1,255 +; beq t1,zero,taken(label1),not_taken(label2) ; block1: -; add t0,a0,t3 -; ugt t3,t3,t4##ty=i64 -; li t1,0 -; selectif_spectre_guard a0,t1,t0##test=t3 +; add t1,a0,t4 +; ugt t4,t4,t0##ty=i64 +; li t2,0 +; selectif_spectre_guard a0,t2,t1##test=t4 ; ret ; block2: ; udf##trap_code=heap_oob @@ -37,16 +38,17 @@ block0(v0: i64, v1: i32): } ; block0: -; uext.w t3,a1 -; lui a7,16 -; ugt t4,t3,a7##ty=i64 -; beq t4,zero,taken(label1),not_taken(label2) +; uext.w t4,a1 +; lui t3,16 +; ugt t0,t4,t3##ty=i64 +; andi t0,t0,255 +; beq t0,zero,taken(label1),not_taken(label2) ; block1: -; add t4,a0,t3 -; lui a7,16 -; ugt t0,t3,a7##ty=i64 -; li t1,0 -; selectif_spectre_guard a0,t1,t4##test=t0 +; add t0,a0,t4 +; lui t3,16 +; ugt t1,t4,t3##ty=i64 +; li t2,0 +; selectif_spectre_guard a0,t2,t0##test=t1 ; ret ; block2: ; udf##trap_code=heap_oob diff --git a/cranelift/filetests/filetests/isa/riscv64/reftypes.clif b/cranelift/filetests/filetests/isa/riscv64/reftypes.clif index c497aec11513..d3f903a57b4e 100644 --- a/cranelift/filetests/filetests/isa/riscv64/reftypes.clif +++ b/cranelift/filetests/filetests/isa/riscv64/reftypes.clif @@ -65,37 +65,38 @@ block3(v7: r64, v8: r64): ; sd ra,8(sp) ; sd fp,0(sp) ; mv fp,sp -; sd s9,-8(sp) +; sd s10,-8(sp) ; add sp,-48 ; block0: ; sd a0,8(nominal_sp) ; sd a1,16(nominal_sp) -; mv s9,a2 -; load_sym a3,%f+0 -; callind a3 -; load_addr a2,nsp+0 -; ld t1,8(nominal_sp) -; sd t1,0(a2) -; beq a0,zero,taken(label1),not_taken(label3) +; mv s10,a2 +; load_sym a4,%f+0 +; callind a4 +; load_addr a3,nsp+0 +; ld t2,8(nominal_sp) +; sd t2,0(a3) +; andi a4,a0,255 +; beq a4,zero,taken(label1),not_taken(label3) ; block1: ; j label2 ; block2: -; mv a1,t1 +; mv a1,t2 ; ld a0,16(nominal_sp) ; j label5 ; block3: ; j label4 ; block4: -; mv a0,t1 +; mv a0,t2 ; ld a1,16(nominal_sp) ; j label5 ; block5: -; load_addr a4,nsp+0 -; ld a4,0(a4) -; mv a2,s9 -; sd a4,0(a2) +; load_addr a5,nsp+0 +; ld a5,0(a5) +; mv a2,s10 +; sd a5,0(a2) ; add sp,+48 -; ld s9,-8(sp) +; ld s10,-8(sp) ; ld ra,8(sp) ; ld fp,0(sp) ; add sp,+16 diff --git a/cranelift/filetests/filetests/runtests/select.clif b/cranelift/filetests/filetests/runtests/select.clif index 3df1cd70bc36..a6987b4461cf 100644 --- a/cranelift/filetests/filetests/runtests/select.clif +++ b/cranelift/filetests/filetests/runtests/select.clif @@ -93,3 +93,18 @@ block0(v0: f32, v1: f32): ; run: %select_uno_f32(0x0.0, 0x42.42) == 0 ; run: %select_uno_f32(0x0.0, NaN) == 1 ; run: %select_uno_f32(-NaN, 0x42.42) == 1 + +function %select_overflow(i8) -> i8 { +block0(v0: i8): + v1 = iconst.i8 255 + v2 = iadd v0, v1 + v3 = iconst.i8 1 + v4 = iconst.i8 0 + v5 = select v2, v3, v4 + return v5 +} + +; run: %select_overflow(0) == 1 +; run: %select_overflow(2) == 1 +; run: %select_overflow(1) == 0 +; run: %select_overflow(98) == 1 From 76f210f257a86bd870918b35720b10967086d631 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Mon, 17 Oct 2022 14:07:30 -0700 Subject: [PATCH 11/12] Add some more overflow tests --- .../filetests/filetests/runtests/br.clif | 45 ++++++++++++++++++- .../filetests/filetests/runtests/select.clif | 40 ++++++++++++++--- 2 files changed, 79 insertions(+), 6 deletions(-) diff --git a/cranelift/filetests/filetests/runtests/br.clif b/cranelift/filetests/filetests/runtests/br.clif index 8db16577327a..4ed99f4e5285 100644 --- a/cranelift/filetests/filetests/runtests/br.clif +++ b/cranelift/filetests/filetests/runtests/br.clif @@ -60,6 +60,50 @@ block2: ; run: %brz_i8_overflow(98) == 1 ; run: %brz_i8_overflow(97) == 1 +function %brz_i16_overflow(i16) -> i8 { +block0(v0: i16): + v1 = iconst.i16 65535 + v2 = iadd v0, v1 + brz v2, block2 + jump block1 + +block1: + v3 = iconst.i8 1 + return v3 + +block2: + v4 = iconst.i8 0 + return v4 +} + +; run: %brz_i16_overflow(0) == 1 +; run: %brz_i16_overflow(1) == 0 +; run: %brz_i16_overflow(2) == 1 +; run: %brz_i16_overflow(98) == 1 +; run: %brz_i16_overflow(97) == 1 + +function %brz_i32_overflow(i32) -> i8 { +block0(v0: i32): + v1 = iconst.i32 4294967295 + v2 = iadd v0, v1 + brz v2, block2 + jump block1 + +block1: + v3 = iconst.i8 1 + return v3 + +block2: + v4 = iconst.i8 0 + return v4 +} + +; run: %brz_i32_overflow(0) == 1 +; run: %brz_i32_overflow(1) == 0 +; run: %brz_i32_overflow(2) == 1 +; run: %brz_i32_overflow(98) == 1 +; run: %brz_i32_overflow(97) == 1 + function %brz_i32(i32) -> i8 { block0(v0: i32): brz v0, block1 @@ -94,7 +138,6 @@ block2: ; run: %brz_i16(1) == 0 ; run: %brz_i16(-1) == 0 - function %brz_i8(i8) -> i8 { block0(v1: i8): brz v1, block1 diff --git a/cranelift/filetests/filetests/runtests/select.clif b/cranelift/filetests/filetests/runtests/select.clif index a6987b4461cf..fc5cfc8e873a 100644 --- a/cranelift/filetests/filetests/runtests/select.clif +++ b/cranelift/filetests/filetests/runtests/select.clif @@ -94,7 +94,7 @@ block0(v0: f32, v1: f32): ; run: %select_uno_f32(0x0.0, NaN) == 1 ; run: %select_uno_f32(-NaN, 0x42.42) == 1 -function %select_overflow(i8) -> i8 { +function %select_overflow_i8(i8) -> i8 { block0(v0: i8): v1 = iconst.i8 255 v2 = iadd v0, v1 @@ -104,7 +104,37 @@ block0(v0: i8): return v5 } -; run: %select_overflow(0) == 1 -; run: %select_overflow(2) == 1 -; run: %select_overflow(1) == 0 -; run: %select_overflow(98) == 1 +; run: %select_overflow_i8(0) == 1 +; run: %select_overflow_i8(2) == 1 +; run: %select_overflow_i8(1) == 0 +; run: %select_overflow_i8(98) == 1 + +function %select_overflow_i16(i16) -> i8 { +block0(v0: i16): + v1 = iconst.i16 65535 + v2 = iadd v0, v1 + v3 = iconst.i8 1 + v4 = iconst.i8 0 + v5 = select v2, v3, v4 + return v5 +} + +; run: %select_overflow_i16(0) == 1 +; run: %select_overflow_i16(2) == 1 +; run: %select_overflow_i16(1) == 0 +; run: %select_overflow_i16(98) == 1 + +function %select_overflow_i32(i32) -> i8 { +block0(v0: i32): + v1 = iconst.i32 4294967295 + v2 = iadd v0, v1 + v3 = iconst.i8 1 + v4 = iconst.i8 0 + v5 = select v2, v3, v4 + return v5 +} + +; run: %select_overflow_i32(0) == 1 +; run: %select_overflow_i32(2) == 1 +; run: %select_overflow_i32(1) == 0 +; run: %select_overflow_i32(98) == 1 From fb4986336b63239fe3abc563ed50f5ee7023eee2 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Mon, 17 Oct 2022 14:24:38 -0700 Subject: [PATCH 12/12] Fix some instruction docs --- cranelift/codegen/meta/src/shared/instructions.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cranelift/codegen/meta/src/shared/instructions.rs b/cranelift/codegen/meta/src/shared/instructions.rs index ebeaf9d646a9..4ff9b678bbf7 100644 --- a/cranelift/codegen/meta/src/shared/instructions.rs +++ b/cranelift/codegen/meta/src/shared/instructions.rs @@ -52,8 +52,7 @@ fn define_control_flow( r#" Branch when zero. - If ``c`` is a `b1` value, take the branch when ``c`` is false. If - ``c`` is an integer value, take the branch when ``c = 0``. + Take the branch when ``c = 0``. "#, &formats.branch, ) @@ -67,8 +66,7 @@ fn define_control_flow( r#" Branch when non-zero. - If ``c`` is a `b1` value, take the branch when ``c`` is true. If - ``c`` is an integer value, take the branch when ``c != 0``. + Take the branch when ``c != 0``. "#, &formats.branch, )