Emit correct alignment information for loads/store of small aggregates
Loading from and storing to small aggregates happens by casting the aggregate pointer to an appropriately sized integer pointer to avoid the usage of first class aggregates which would lead to less optimized code. But this means that, for example, a tuple of type (i16, i16) will be loading through an i32 pointer and because we currently don't provide alignment information LLVM assumes that the load should use the ABI alignment for i32 which would usually be 4 byte alignment. But the alignment requirement for the (i16, i16) tuple will usually be just 2 bytes, so we're overestimating alignment, which invokes undefined behaviour. Therefore we must emit appropriate alignment information for stores/loads through such casted pointers. Fixes #23431
This commit is contained in:
parent
fcf637b19f
commit
78745a4afe
6 changed files with 42 additions and 16 deletions
|
@ -880,7 +880,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
|
|||
CEnum(ity, min, max) => {
|
||||
assert_discr_in_range(ity, min, max, discr);
|
||||
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
|
||||
val)
|
||||
val);
|
||||
}
|
||||
General(ity, ref cases, dtor) => {
|
||||
if dtor_active(dtor) {
|
||||
|
@ -889,7 +889,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
|
|||
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), ptr);
|
||||
}
|
||||
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
|
||||
GEPi(bcx, val, &[0, 0]))
|
||||
GEPi(bcx, val, &[0, 0]));
|
||||
}
|
||||
Univariant(ref st, dtor) => {
|
||||
assert_eq!(discr, 0);
|
||||
|
@ -901,14 +901,14 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
|
|||
RawNullablePointer { nndiscr, nnty, ..} => {
|
||||
if discr != nndiscr {
|
||||
let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
|
||||
Store(bcx, C_null(llptrty), val)
|
||||
Store(bcx, C_null(llptrty), val);
|
||||
}
|
||||
}
|
||||
StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
|
||||
if discr != nndiscr {
|
||||
let llptrptr = GEPi(bcx, val, &discrfield[..]);
|
||||
let llptrty = val_ty(llptrptr).element_type();
|
||||
Store(bcx, C_null(llptrty), llptrptr)
|
||||
Store(bcx, C_null(llptrty), llptrptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -765,9 +765,14 @@ pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|||
}
|
||||
|
||||
let ptr = to_arg_ty_ptr(cx, ptr, t);
|
||||
let align = type_of::align_of(cx.ccx(), t);
|
||||
|
||||
if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
|
||||
return Load(cx, ptr);
|
||||
let load = Load(cx, ptr);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(load, align);
|
||||
}
|
||||
return load;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
|
@ -793,13 +798,24 @@ pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|||
Load(cx, ptr)
|
||||
};
|
||||
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(val, align);
|
||||
}
|
||||
|
||||
from_arg_ty(cx, val, t)
|
||||
}
|
||||
|
||||
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
|
||||
/// differs from the type used for SSA values.
|
||||
pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
|
||||
Store(cx, to_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
|
||||
if cx.unreachable.get() {
|
||||
return;
|
||||
}
|
||||
|
||||
let store = Store(cx, to_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
|
||||
|
|
|
@ -646,13 +646,13 @@ pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) {
|
||||
if cx.unreachable.get() { return; }
|
||||
pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef {
|
||||
if cx.unreachable.get() { return C_nil(cx.ccx()); }
|
||||
B(cx).store(val, ptr)
|
||||
}
|
||||
|
||||
pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) {
|
||||
if cx.unreachable.get() { return; }
|
||||
pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef {
|
||||
if cx.unreachable.get() { return C_nil(cx.ccx()); }
|
||||
B(cx).volatile_store(val, ptr)
|
||||
}
|
||||
|
||||
|
|
|
@ -509,18 +509,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
|||
value
|
||||
}
|
||||
|
||||
pub fn store(&self, val: ValueRef, ptr: ValueRef) {
|
||||
pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
|
||||
debug!("Store {} -> {}",
|
||||
self.ccx.tn().val_to_string(val),
|
||||
self.ccx.tn().val_to_string(ptr));
|
||||
assert!(!self.llbuilder.is_null());
|
||||
self.count_insn("store");
|
||||
unsafe {
|
||||
llvm::LLVMBuildStore(self.llbuilder, val, ptr);
|
||||
llvm::LLVMBuildStore(self.llbuilder, val, ptr)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
|
||||
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
|
||||
debug!("Store {} -> {}",
|
||||
self.ccx.tn().val_to_string(val),
|
||||
self.ccx.tn().val_to_string(ptr));
|
||||
|
@ -529,6 +529,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
|||
unsafe {
|
||||
let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
|
||||
llvm::LLVMSetVolatile(insn, llvm::True);
|
||||
insn
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -802,7 +802,9 @@ pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|||
// appropriately sized integer and we have to convert it
|
||||
let tmp = builder.bitcast(llforeign_arg,
|
||||
type_of::arg_type_of(ccx, rust_ty).ptr_to());
|
||||
builder.load(tmp)
|
||||
let load = builder.load(tmp);
|
||||
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, rust_ty));
|
||||
load
|
||||
} else {
|
||||
builder.load(llforeign_arg)
|
||||
}
|
||||
|
|
|
@ -456,13 +456,20 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
|||
(_, "volatile_load") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
|
||||
from_arg_ty(bcx, VolatileLoad(bcx, ptr), tp_ty)
|
||||
let load = VolatileLoad(bcx, ptr);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
|
||||
}
|
||||
from_arg_ty(bcx, load, tp_ty)
|
||||
},
|
||||
(_, "volatile_store") => {
|
||||
let tp_ty = *substs.types.get(FnSpace, 0);
|
||||
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
|
||||
let val = to_arg_ty(bcx, llargs[1], tp_ty);
|
||||
VolatileStore(bcx, val, ptr);
|
||||
let store = VolatileStore(bcx, val, ptr);
|
||||
unsafe {
|
||||
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
|
||||
}
|
||||
C_nil(ccx)
|
||||
},
|
||||
|
||||
|
|
Loading…
Reference in a new issue