Improve codegen for unchecked float casts on wasm

This commit improves codegen for unchecked casts on WebAssembly targets
to use the singluar `iNN.trunc_fMM_{u,s}` instructions. Previously rustc
would codegen a bare `fptosi` and `fptoui` for float casts but for
WebAssembly targets the codegen for these instructions is quite large.
This large codegen is due to the fact that LLVM can speculate these
instructions so the trapping behavior of WebAssembly needs to be
protected against in case they're speculated.

The change here is to update the codegen for the unchecked cast
intrinsics to have a wasm-specific case where they call the appropriate
LLVM intrinsic to generate the right wasm instruction. The intrinsic is
explicitly opting-in to undefined behavior so a trap here for
out-of-bounds inputs on wasm should be acceptable.

cc #73591
This commit is contained in:
Alex Crichton 2020-07-22 14:51:12 -07:00
parent 9e92106d45
commit 618aeec51f
4 changed files with 74 additions and 38 deletions

View file

@ -510,6 +510,14 @@ impl CodegenCx<'b, 'tcx> {
ifn!("llvm.wasm.trunc.saturate.signed.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.signed.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.saturate.signed.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);

View file

@ -629,27 +629,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
}
sym::float_to_int_unchecked => {
if float_type_width(arg_tys[0]).is_none() {
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `float_to_int_unchecked` \
let float_width = match float_type_width(arg_tys[0]) {
Some(width) => width,
None => {
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `float_to_int_unchecked` \
intrinsic: expected basic float type, \
found `{}`",
arg_tys[0]
),
);
return;
}
match int_type_width_signed(ret_ty, self.cx) {
Some((width, signed)) => {
if signed {
self.fptosi(args[0].immediate(), self.cx.type_ix(width))
} else {
self.fptoui(args[0].immediate(), self.cx.type_ix(width))
}
arg_tys[0]
),
);
return;
}
};
let (width, signed) = match int_type_width_signed(ret_ty, self.cx) {
Some(pair) => pair,
None => {
span_invalid_monomorphization_error(
tcx.sess,
@ -663,7 +660,49 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
);
return;
}
};
// The LLVM backend can reorder and speculate `fptosi` and
// `fptoui`, so on WebAssembly the codegen for this instruction
// is quite heavyweight. To avoid this heavyweight codegen we
// instead use the raw wasm intrinsics which will lower to one
// instruction in WebAssembly (`iNN.trunc_fMM_{s,u}`). This one
// instruction will trap if the operand is out of bounds, but
// that's ok since this intrinsic is UB if the operands are out
// of bounds, so the behavior can be different on WebAssembly
// than other targets.
//
// Note, however, that when the `nontrapping-fptoint` feature is
// enabled in LLVM then LLVM will lower `fptosi` to
// `iNN.trunc_sat_fMM_{s,u}`, so if that's the case we don't
// bother with intrinsics.
let mut result = None;
if self.sess().target.target.arch == "wasm32"
&& !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
{
let name = match (width, float_width, signed) {
(32, 32, true) => Some("llvm.wasm.trunc.signed.i32.f32"),
(32, 64, true) => Some("llvm.wasm.trunc.signed.i32.f64"),
(64, 32, true) => Some("llvm.wasm.trunc.signed.i64.f32"),
(64, 64, true) => Some("llvm.wasm.trunc.signed.i64.f64"),
(32, 32, false) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
(32, 64, false) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
(64, 32, false) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
(64, 64, false) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
_ => None,
};
if let Some(name) = name {
let intrinsic = self.get_intrinsic(name);
result = Some(self.call(intrinsic, &[args[0].immediate()], None));
}
}
result.unwrap_or_else(|| {
if signed {
self.fptosi(args[0].immediate(), self.cx.type_ix(width))
} else {
self.fptoui(args[0].immediate(), self.cx.type_ix(width))
}
})
}
sym::discriminant_value => {

View file

@ -2,6 +2,7 @@
// unchecked intrinsics.
// compile-flags: -C opt-level=3
// ignore-wasm32 the wasm target is tested in `wasm_casts_*`
#![crate_type = "lib"]

View file

@ -38,7 +38,6 @@ pub fn cast_f32_i32(a: f32) -> i32 {
a as _
}
// CHECK-LABEL: @cast_f64_u64
#[no_mangle]
pub fn cast_f64_u64(a: f64) -> u64 {
@ -84,13 +83,10 @@ pub fn cast_f32_u8(a: f32) -> u8 {
a as _
}
// CHECK-LABEL: @cast_unchecked_f64_i64
#[no_mangle]
pub unsafe fn cast_unchecked_f64_i64(a: f64) -> i64 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptosi double {{.*}} to i64
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
// CHECK-NEXT: ret i64 {{.*}}
a.to_int_unchecked()
}
@ -98,8 +94,7 @@ pub unsafe fn cast_unchecked_f64_i64(a: f64) -> i64 {
// CHECK-LABEL: @cast_unchecked_f64_i32
#[no_mangle]
pub unsafe fn cast_unchecked_f64_i32(a: f64) -> i32 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptosi double {{.*}} to i32
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
// CHECK-NEXT: ret i32 {{.*}}
a.to_int_unchecked()
}
@ -107,8 +102,7 @@ pub unsafe fn cast_unchecked_f64_i32(a: f64) -> i32 {
// CHECK-LABEL: @cast_unchecked_f32_i64
#[no_mangle]
pub unsafe fn cast_unchecked_f32_i64(a: f32) -> i64 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptosi float {{.*}} to i64
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
// CHECK-NEXT: ret i64 {{.*}}
a.to_int_unchecked()
}
@ -116,18 +110,15 @@ pub unsafe fn cast_unchecked_f32_i64(a: f32) -> i64 {
// CHECK-LABEL: @cast_unchecked_f32_i32
#[no_mangle]
pub unsafe fn cast_unchecked_f32_i32(a: f32) -> i32 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptosi float {{.*}} to i32
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
// CHECK-NEXT: ret i32 {{.*}}
a.to_int_unchecked()
}
// CHECK-LABEL: @cast_unchecked_f64_u64
#[no_mangle]
pub unsafe fn cast_unchecked_f64_u64(a: f64) -> u64 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptoui double {{.*}} to i64
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
// CHECK-NEXT: ret i64 {{.*}}
a.to_int_unchecked()
}
@ -135,8 +126,7 @@ pub unsafe fn cast_unchecked_f64_u64(a: f64) -> u64 {
// CHECK-LABEL: @cast_unchecked_f64_u32
#[no_mangle]
pub unsafe fn cast_unchecked_f64_u32(a: f64) -> u32 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptoui double {{.*}} to i32
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
// CHECK-NEXT: ret i32 {{.*}}
a.to_int_unchecked()
}
@ -144,8 +134,7 @@ pub unsafe fn cast_unchecked_f64_u32(a: f64) -> u32 {
// CHECK-LABEL: @cast_unchecked_f32_u64
#[no_mangle]
pub unsafe fn cast_unchecked_f32_u64(a: f32) -> u64 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptoui float {{.*}} to i64
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
// CHECK-NEXT: ret i64 {{.*}}
a.to_int_unchecked()
}
@ -153,8 +142,7 @@ pub unsafe fn cast_unchecked_f32_u64(a: f32) -> u64 {
// CHECK-LABEL: @cast_unchecked_f32_u32
#[no_mangle]
pub unsafe fn cast_unchecked_f32_u32(a: f32) -> u32 {
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
// CHECK: fptoui float {{.*}} to i32
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
// CHECK-NEXT: ret i32 {{.*}}
a.to_int_unchecked()
}