* Manually rename BasicBlock to BlockPredecessor BasicBlock is a pair of (Ebb, Inst) that is used to represent the basic block subcomponent of an Ebb that is a predecessor to an Ebb. Eventually we will be able to remove this struct, but for now it makes sense to give it a non-conflicting name so that we can start to transition Ebb to represent a basic block. I have not updated any comments that refer to BasicBlock, as eventually we will remove BlockPredecessor and replace with Block, which is a basic block, so the comments will become correct. * Manually rename SSABuilder block types to avoid conflict SSABuilder has its own Block and BlockData types. These along with associated identifier will cause conflicts in a later commit, so they are renamed to be more verbose here. * Automatically rename 'Ebb' to 'Block' in *.rs * Automatically rename 'EBB' to 'block' in *.rs * Automatically rename 'ebb' to 'block' in *.rs * Automatically rename 'extended basic block' to 'basic block' in *.rs * Automatically rename 'an basic block' to 'a basic block' in *.rs * Manually update comment for `Block` `Block`'s wikipedia article required an update. * Automatically rename 'an `Block`' to 'a `Block`' in *.rs * Automatically rename 'extended_basic_block' to 'basic_block' in *.rs * Automatically rename 'ebb' to 'block' in *.clif * Manually rename clif constant that contains 'ebb' as substring to avoid conflict * Automatically rename filecheck uses of 'EBB' to 'BB' 'regex: EBB' -> 'regex: BB' '$EBB' -> '$BB' * Automatically rename 'EBB' 'Ebb' to 'block' in *.clif * Automatically rename 'an block' to 'a block' in *.clif * Fix broken testcase when function name length increases Test function names are limited to 16 characters. This causes the new longer name to be truncated and fail a filecheck test. An outdated comment was also fixed.
203 lines
3.5 KiB
Plaintext
203 lines
3.5 KiB
Plaintext
; Test code generation for WebAssembly type conversion operators.
|
|
test compile
|
|
|
|
target x86_64 haswell
|
|
|
|
function %i32_wrap_i64(i64) -> i32 {
|
|
block0(v0: i64):
|
|
v1 = ireduce.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_extend_s_i32(i32) -> i64 {
|
|
block0(v0: i32):
|
|
v1 = sextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_extend_u_i32(i32) -> i64 {
|
|
block0(v0: i32):
|
|
v1 = uextend.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_s_f32(f32) -> i32 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_sint.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_u_f32(f32) -> i32 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_uint.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_s_f64(f64) -> i32 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_sint.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_u_f64(f64) -> i32 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_uint.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_s_f32(f32) -> i64 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_sint.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_u_f32(f32) -> i64 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_uint.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_s_f64(f64) -> i64 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_sint.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_u_f64(f64) -> i64 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_uint.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_s_sat_f32(f32) -> i32 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_sint_sat.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_u_sat_f32(f32) -> i32 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_uint_sat.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_s_sat_f64(f64) -> i32 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_sint_sat.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_trunc_u_sat_f64(f64) -> i32 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_uint_sat.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_s_sat_f32(f32) -> i64 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_sint_sat.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_u_sat_f32(f32) -> i64 {
|
|
block0(v0: f32):
|
|
v1 = fcvt_to_uint_sat.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_s_sat_f64(f64) -> i64 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_sint_sat.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_trunc_u_sat_f64(f64) -> i64 {
|
|
block0(v0: f64):
|
|
v1 = fcvt_to_uint_sat.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %f32_trunc_f64(f64) -> f32 {
|
|
block0(v0: f64):
|
|
v1 = fdemote.f32 v0
|
|
return v1
|
|
}
|
|
|
|
function %f64_promote_f32(f32) -> f64 {
|
|
block0(v0: f32):
|
|
v1 = fpromote.f64 v0
|
|
return v1
|
|
}
|
|
|
|
function %f32_convert_s_i32(i32) -> f32 {
|
|
block0(v0: i32):
|
|
v1 = fcvt_from_sint.f32 v0
|
|
return v1
|
|
}
|
|
|
|
function %f32_convert_u_i32(i32) -> f32 {
|
|
block0(v0: i32):
|
|
v1 = fcvt_from_uint.f32 v0
|
|
return v1
|
|
}
|
|
|
|
function %f64_convert_s_i32(i32) -> f64 {
|
|
block0(v0: i32):
|
|
v1 = fcvt_from_sint.f64 v0
|
|
return v1
|
|
}
|
|
|
|
function %f64_convert_u_i32(i32) -> f64 {
|
|
block0(v0: i32):
|
|
v1 = fcvt_from_uint.f64 v0
|
|
return v1
|
|
}
|
|
|
|
function %f32_convert_s_i64(i64) -> f32 {
|
|
block0(v0: i64):
|
|
v1 = fcvt_from_sint.f32 v0
|
|
return v1
|
|
}
|
|
|
|
function %f32_convert_u_i64(i64) -> f32 {
|
|
block0(v0: i64):
|
|
v1 = fcvt_from_uint.f32 v0
|
|
return v1
|
|
}
|
|
|
|
function %f64_convert_s_i64(i64) -> f64 {
|
|
block0(v0: i64):
|
|
v1 = fcvt_from_sint.f64 v0
|
|
return v1
|
|
}
|
|
|
|
function %f64_convert_u_i64(i64) -> f64 {
|
|
block0(v0: i64):
|
|
v1 = fcvt_from_uint.f64 v0
|
|
return v1
|
|
}
|
|
|
|
function %i32_reinterpret_f32(f32) -> i32 {
|
|
block0(v0: f32):
|
|
v1 = bitcast.i32 v0
|
|
return v1
|
|
}
|
|
|
|
function %f32_reinterpret_i32(i32) -> f32 {
|
|
block0(v0: i32):
|
|
v1 = bitcast.f32 v0
|
|
return v1
|
|
}
|
|
|
|
function %i64_reinterpret_f64(f64) -> i64 {
|
|
block0(v0: f64):
|
|
v1 = bitcast.i64 v0
|
|
return v1
|
|
}
|
|
|
|
function %f64_reinterpret_i64(i64) -> f64 {
|
|
block0(v0: i64):
|
|
v1 = bitcast.f64 v0
|
|
return v1
|
|
}
|