rustc_target: separate out an individual Align from AbiAndPrefAlign.

This commit is contained in:
Eduard-Mihai Burtescu 2018-09-09 00:22:22 +03:00
parent d56e892085
commit 3ce8d444af
29 changed files with 215 additions and 205 deletions

View file

@ -12,7 +12,7 @@
use super::{Pointer, EvalResult, AllocId};
use ty::layout::{Size, AbiAndPrefAlign};
use ty::layout::{Size, Align, AbiAndPrefAlign};
use syntax::ast::Mutability;
use std::iter;
use mir;
@ -104,7 +104,7 @@ impl<Tag, Extra: Default> Allocation<Tag, Extra> {
}
pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self {
Allocation::from_bytes(slice, AbiAndPrefAlign::from_bytes(1, 1).unwrap())
Allocation::from_bytes(slice, AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()))
}
pub fn undef(size: Size, align: AbiAndPrefAlign) -> Self {

View file

@ -527,7 +527,7 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> {
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
AlignmentCheckFailed { required, has } =>
write!(f, "tried to access memory with alignment {}, but alignment {} is required",
has.abi(), required.abi()),
has.abi.bytes(), required.abi.bytes()),
TypeNotPrimitive(ty) =>
write!(f, "expected primitive type, got {}", ty),
Layout(ref err) =>
@ -537,8 +537,9 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> {
MachineError(ref inner) =>
write!(f, "{}", inner),
IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and \
align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()),
write!(f, "incorrect alloc info: expected size {} and align {}, \
got size {} and align {}",
size.bytes(), align.abi.bytes(), size2.bytes(), align2.abi.bytes()),
Panic { ref msg, line, col, ref file } =>
write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col),
InvalidDiscriminant(val) =>

View file

@ -71,7 +71,7 @@ impl CodeStats {
let info = TypeSizeInfo {
kind,
type_description: type_desc.to_string(),
align: align.abi(),
align: align.abi.bytes(),
overall_size: overall_size.bytes(),
packed: packed,
opt_discr_size: opt_discr_size.map(|s| s.bytes()),

View file

@ -259,7 +259,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let pack = {
let pack = repr.pack as u64;
AbiAndPrefAlign::from_bytes(pack, pack).unwrap()
AbiAndPrefAlign::new(Align::from_bytes(pack).unwrap())
};
let mut align = if packed {
@ -274,7 +274,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut optimize = !repr.inhibit_struct_field_reordering_opt();
if let StructKind::Prefixed(_, align) = kind {
optimize &= align.abi() == 1;
optimize &= align.abi.bytes() == 1;
}
if optimize {
@ -285,7 +285,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
};
let optimizing = &mut inverse_memory_index[..end];
let field_align = |f: &TyLayout<'_>| {
if packed { f.align.min(pack).abi() } else { f.align.abi() }
if packed { f.align.min(pack).abi } else { f.align.abi }
};
match kind {
StructKind::AlwaysSized |
@ -352,7 +352,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
if repr.align > 0 {
let repr_align = repr.align as u64;
align = align.max(AbiAndPrefAlign::from_bytes(repr_align, repr_align).unwrap());
align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
debug!("univariant repr_align: {:?}", repr_align);
}
@ -394,7 +394,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 &&
align.abi() == field.align.abi() &&
align.abi == field.align.abi &&
size == field.size {
match field.abi {
// For plain scalars, or vectors of them, we can't unpack
@ -682,7 +682,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let pack = {
let pack = def.repr.pack as u64;
AbiAndPrefAlign::from_bytes(pack, pack).unwrap()
AbiAndPrefAlign::new(Align::from_bytes(pack).unwrap())
};
let mut align = if packed {
@ -694,7 +694,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
if def.repr.align > 0 {
let repr_align = def.repr.align as u64;
align = align.max(
AbiAndPrefAlign::from_bytes(repr_align, repr_align).unwrap());
AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
}
let optimize = !def.repr.inhibit_union_abi_opt();
@ -964,7 +964,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut size = Size::ZERO;
// We're interested in the smallest alignment, so start large.
let mut start_align = AbiAndPrefAlign::from_bytes(256, 256).unwrap();
let mut start_align = AbiAndPrefAlign::new(Align::from_bytes(256).unwrap());
assert_eq!(Integer::for_abi_align(dl, start_align), None);
// repr(C) on an enum tells us to make a (tag, union) layout,
@ -989,7 +989,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
// Find the first field we can't move later
// to make room for a larger discriminant.
for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
if !field.is_zst() || field.align.abi() != 1 {
if !field.is_zst() || field.align.abi.bytes() != 1 {
start_align = start_align.min(field.align);
break;
}
@ -1251,7 +1251,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
name: name.to_string(),
offset: offset.bytes(),
size: field_layout.size.bytes(),
align: field_layout.align.abi(),
align: field_layout.align.abi.bytes(),
}
}
}
@ -1264,7 +1264,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
} else {
session::SizeKind::Exact
},
align: layout.align.abi(),
align: layout.align.abi.bytes(),
size: if min_size.bytes() == 0 {
layout.size.bytes()
} else {
@ -1994,12 +1994,16 @@ impl_stable_hash_for!(enum ::ty::layout::Primitive {
Pointer
});
impl<'gcx> HashStable<StableHashingContext<'gcx>> for AbiAndPrefAlign {
impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign {
abi,
pref
});
impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'gcx>,
hasher: &mut StableHasher<W>) {
self.abi().hash_stable(hcx, hasher);
self.pref().hash_stable(hcx, hasher);
self.bytes().hash_stable(hcx, hasher);
}
}

View file

@ -73,7 +73,7 @@ impl ArgAttributesExt for ArgAttributes {
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn,
idx.as_uint(),
align.abi() as u32);
align.abi.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
}
@ -98,7 +98,7 @@ impl ArgAttributesExt for ArgAttributes {
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
idx.as_uint(),
align.abi() as u32);
align.abi.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
}

View file

@ -475,7 +475,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::LLVMBuildAlloca(self.llbuilder, ty,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
llvm::LLVMSetAlignment(alloca, align.abi.bytes() as c_uint);
alloca
}
}
@ -494,7 +494,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
llvm::LLVMSetAlignment(alloca, align.abi.bytes() as c_uint);
alloca
}
}
@ -503,7 +503,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetAlignment(load, align.abi() as c_uint);
llvm::LLVMSetAlignment(load, align.abi.bytes() as c_uint);
load
}
}
@ -658,7 +658,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let align = if flags.contains(MemFlags::UNALIGNED) {
1
} else {
align.abi() as c_uint
align.abi.bytes() as c_uint
};
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
@ -893,8 +893,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint,
src, src_align.abi() as c_uint, size, is_volatile);
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi.bytes() as c_uint,
src, src_align.abi.bytes() as c_uint, size, is_volatile);
}
}
@ -913,8 +913,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint,
src, src_align.abi() as c_uint, size, is_volatile);
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi.bytes() as c_uint,
src, src_align.abi.bytes() as c_uint, size, is_volatile);
}
}
@ -930,7 +930,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p());
let align = self.cx().const_u32(align.abi() as u32);
let align = self.cx().const_u32(align.abi.bytes() as u32);
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}

View file

@ -28,7 +28,7 @@ use value::Value;
use rustc::ty::{self, Ty};
use rustc_codegen_ssa::traits::*;
use rustc::ty::layout::{self, Size, AbiAndPrefAlign, LayoutOf};
use rustc::ty::layout::{self, Size, Align, AbiAndPrefAlign, LayoutOf};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
@ -94,15 +94,15 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.target.options.min_global_align {
match ty::layout::AbiAndPrefAlign::from_bits(min, min) {
Ok(min) => align = align.max(min),
match Align::from_bits(min) {
Ok(min) => align = align.max(AbiAndPrefAlign::new(min)),
Err(err) => {
cx.sess().err(&format!("invalid minimum global alignment: {}", err));
}
}
}
unsafe {
llvm::LLVMSetAlignment(gv, align.abi() as u32);
llvm::LLVMSetAlignment(gv, align.abi.bytes() as u32);
}
}
@ -219,7 +219,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
// alignment requirements
let llalign = align.abi() as u32;
let llalign = align.abi.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign);
}

View file

@ -323,7 +323,7 @@ fn fixed_vec_metadata(
llvm::LLVMRustDIBuilderCreateArrayType(
DIB(cx),
size.bits(),
align.abi_bits() as u32,
align.abi.bits() as u32,
element_type_metadata,
subscripts)
};
@ -787,7 +787,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
DIB(cx),
name.as_ptr(),
size.bits(),
align.abi_bits() as u32,
align.abi.bits() as u32,
encoding)
};
@ -818,7 +818,7 @@ fn pointer_type_metadata(
DIB(cx),
pointee_type_metadata,
pointer_size.bits(),
pointer_align.abi_bits() as u32,
pointer_align.abi.bits() as u32,
name.as_ptr())
}
}
@ -1563,7 +1563,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
discriminant_size.bits(),
discriminant_align.abi_bits() as u32,
discriminant_align.abi.bits() as u32,
create_DIArray(DIB(cx), &enumerators_metadata),
discriminant_base_type_metadata, true)
};
@ -1607,7 +1607,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
layout.size.bits(),
layout.align.abi_bits() as u32,
layout.align.abi.bits() as u32,
DIFlags::FlagZero,
None,
0, // RuntimeLang
@ -1655,7 +1655,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
size.bits(),
align.abi_bits() as u32,
align.abi.bits() as u32,
layout.fields.offset(0).bits(),
DIFlags::FlagArtificial,
discr_metadata))
@ -1675,7 +1675,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
size.bits(),
align.abi_bits() as u32,
align.abi.bits() as u32,
layout.fields.offset(0).bits(),
DIFlags::FlagArtificial,
discr_metadata))
@ -1692,7 +1692,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
layout.size.bits(),
layout.align.abi_bits() as u32,
layout.align.abi.bits() as u32,
DIFlags::FlagZero,
discriminator_metadata,
empty_array,
@ -1709,7 +1709,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
layout.size.bits(),
layout.align.abi_bits() as u32,
layout.align.abi.bits() as u32,
DIFlags::FlagZero,
None,
type_array,
@ -1803,7 +1803,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>,
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
member_description.size.bits(),
member_description.align.abi_bits() as u32,
member_description.align.abi.bits() as u32,
member_description.offset.bits(),
match member_description.discriminant {
None => None,
@ -1851,7 +1851,7 @@ fn create_struct_stub(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
struct_size.bits(),
struct_align.abi_bits() as u32,
struct_align.abi.bits() as u32,
DIFlags::FlagZero,
None,
empty_array,
@ -1889,7 +1889,7 @@ fn create_union_stub(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
union_size.bits(),
union_align.abi_bits() as u32,
union_align.abi.bits() as u32,
DIFlags::FlagZero,
Some(empty_array),
0, // RuntimeLang
@ -1958,7 +1958,7 @@ pub fn create_global_var_metadata(
is_local_to_unit,
global,
None,
global_align.abi() as u32,
global_align.abi.bytes() as u32,
);
}
}
@ -1996,7 +1996,7 @@ pub fn create_vtable_metadata(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
Size::ZERO.bits(),
cx.tcx.data_layout.pointer_align.abi_bits() as u32,
cx.tcx.data_layout.pointer_align.abi.bits() as u32,
DIFlags::FlagArtificial,
None,
empty_array,

View file

@ -201,7 +201,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
cx.sess().opts.optimize != config::OptLevel::No,
DIFlags::FlagZero,
argument_index,
align.abi() as u32,
align.abi.bytes() as u32,
)
};
source_loc::set_debug_location(self,

View file

@ -158,7 +158,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
}
"min_align_of" => {
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).abi())
self.cx().const_usize(self.cx().align_of(tp_ty).abi.bytes())
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
@ -167,12 +167,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llalign
} else {
self.cx().const_usize(self.cx().align_of(tp_ty).abi())
self.cx().const_usize(self.cx().align_of(tp_ty).abi.bytes())
}
}
"pref_align_of" => {
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).pref())
self.cx().const_usize(self.cx().align_of(tp_ty).pref.bytes())
}
"type_name" => {
let tp_ty = substs.type_at(0);
@ -261,7 +261,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let align = if name == "unaligned_volatile_load" {
1
} else {
self.cx().align_of(tp_ty).abi() as u32
self.cx().align_of(tp_ty).abi.bytes() as u32
};
unsafe {
llvm::LLVMSetAlignment(load, align);
@ -1436,7 +1436,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi.bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
@ -1536,7 +1536,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi.bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {

View file

@ -125,14 +125,14 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i);
let effective_field_align = layout.align
.min(field.align)
.restrict_for_offset(target_offset);
packed |= effective_field_align.abi() < field.align.abi();
let effective_field_align = AbiAndPrefAlign::new(layout.align.abi
.min(field.align.abi)
.restrict_for_offset(target_offset));
packed |= effective_field_align.abi < field.align.abi;
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
effective_field_align: {}",
i, field, offset, target_offset, effective_field_align.abi());
i, field, offset, target_offset, effective_field_align.abi.bytes());
assert!(target_offset >= offset);
let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align);

View file

@ -30,7 +30,7 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
t, info, layout);
if !layout.is_unsized() {
let size = bx.cx().const_usize(layout.size.bytes());
let align = bx.cx().const_usize(layout.align.abi());
let align = bx.cx().const_usize(layout.align.abi.bytes());
return (size, align);
}
match t.sty {
@ -44,7 +44,7 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
// The info in this case is the length of the str, so the size is that
// times the unit size.
(bx.mul(info.unwrap(), bx.cx().const_usize(unit.size.bytes())),
bx.cx().const_usize(unit.align.abi()))
bx.cx().const_usize(unit.align.abi.bytes()))
}
_ => {
// First get the size of all statically known fields.
@ -55,7 +55,7 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let i = layout.fields.count() - 1;
let sized_size = layout.fields.offset(i).bytes();
let sized_align = layout.align.abi();
let sized_align = layout.align.abi.bytes();
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = bx.cx().const_usize(sized_size);

View file

@ -108,7 +108,7 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
let components: Vec<_> = [
cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)),
cx.const_usize(layout.size.bytes()),
cx.const_usize(layout.align.abi())
cx.const_usize(layout.align.abi.bytes())
].iter().cloned().chain(methods).collect();
let vtable_const = cx.const_struct(&components, false);

View file

@ -280,7 +280,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
scratch.llval
}
Ref(llval, _, align) => {
assert_eq!(align.abi(), op.layout.align.abi(),
assert_eq!(align.abi, op.layout.align.abi,
"return place is unaligned!");
llval
}
@ -805,7 +805,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
Ref(llval, _, align) => {
if arg.is_indirect() && align.abi() < arg.layout.align.abi() {
if arg.is_indirect() && align.abi < arg.layout.align.abi {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
@ -1006,7 +1006,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_place(bx, dest)
};
if fn_ret.is_indirect() {
if dest.align.abi() < dest.layout.align.abi() {
if dest.align.abi < dest.layout.align.abi {
// Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps),

View file

@ -11,7 +11,7 @@
use rustc::mir::interpret::{ConstValue, ErrorHandled};
use rustc::mir;
use rustc::ty;
use rustc::ty::layout::{self, AbiAndPrefAlign, LayoutOf, TyLayout};
use rustc::ty::layout::{self, Align, AbiAndPrefAlign, LayoutOf, TyLayout};
use base;
use MemFlags;
@ -348,8 +348,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
};
// FIXME: choose an appropriate alignment, or use dynamic align somehow
let max_align = AbiAndPrefAlign::from_bits(128, 128).unwrap();
let min_align = AbiAndPrefAlign::from_bits(8, 8).unwrap();
let max_align = AbiAndPrefAlign::new(Align::from_bits(128).unwrap());
let min_align = AbiAndPrefAlign::new(Align::from_bits(8).unwrap());
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));

View file

@ -101,7 +101,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let field = self.layout.field(bx.cx(), ix);
let offset = self.layout.fields.offset(ix);
let effective_field_align = self.align.restrict_for_offset(offset);
let effective_field_align = self.align.abi.restrict_for_offset(offset);
let mut simple = || {
// Unions and newtypes only use an offset of 0.
@ -123,7 +123,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
None
},
layout: field,
align: effective_field_align,
align: AbiAndPrefAlign::new(effective_field_align),
}
};
@ -143,7 +143,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
if def.repr.packed() {
// FIXME(eddyb) generalize the adjustment when we
// start supporting packing to larger alignments.
assert_eq!(self.layout.align.abi(), 1);
assert_eq!(self.layout.align.abi.bytes(), 1);
return simple();
}
}
@ -197,7 +197,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
llextra: self.llextra,
layout: field,
align: effective_field_align,
align: AbiAndPrefAlign::new(effective_field_align),
}
}

View file

@ -499,7 +499,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let content_ty = self.monomorphize(&content_ty);
let content_layout = bx.cx().layout_of(content_ty);
let llsize = bx.cx().const_usize(content_layout.size.bytes());
let llalign = bx.cx().const_usize(content_layout.align.abi());
let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = bx.cx().backend_type(box_layout);

View file

@ -129,7 +129,7 @@ pub fn op_to_const<'tcx>(
assert!(meta.is_none());
let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
assert!(alloc.align.abi >= align.abi);
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;

View file

@ -636,7 +636,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
let (ptr, align) = mplace.to_scalar_ptr_align();
match ptr {
Scalar::Ptr(ptr) => {
write!(msg, " by align({}) ref:", align.abi()).unwrap();
write!(msg, " by align({}) ref:", align.abi.bytes()).unwrap();
allocs.push(ptr.alloc_id);
}
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
@ -665,7 +665,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
Place::Ptr(mplace) => {
match mplace.ptr {
Scalar::Ptr(ptr) => {
trace!("by align({}) ref:", mplace.align.abi());
trace!("by align({}) ref:", mplace.align.abi.bytes());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),

View file

@ -60,7 +60,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
match intrinsic_name {
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = self.layout_of(elem_ty)?.align.abi();
let elem_align = self.layout_of(elem_ty)?.align.abi.bytes();
let align_val = Scalar::from_uint(elem_align, dest.layout.size);
self.write_scalar(align_val, dest)?;
}

View file

@ -21,7 +21,7 @@ use std::ptr;
use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{self, AbiAndPrefAlign, TargetDataLayout, Size, HasDataLayout};
use rustc::ty::layout::{self, Align, AbiAndPrefAlign, TargetDataLayout, Size, HasDataLayout};
pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
@ -268,18 +268,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
};
// Check alignment
if alloc_align.abi() < required_align.abi() {
if alloc_align.abi < required_align.abi {
return err!(AlignmentCheckFailed {
has: alloc_align,
required: required_align,
});
}
if offset % required_align.abi() == 0 {
if offset % required_align.abi.bytes() == 0 {
Ok(())
} else {
let has = offset % required_align.abi();
let has = offset % required_align.abi.bytes();
err!(AlignmentCheckFailed {
has: AbiAndPrefAlign::from_bytes(has, has).unwrap(),
has: AbiAndPrefAlign::new(Align::from_bytes(has).unwrap()),
required: required_align,
})
}
@ -450,7 +450,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
// Could also be a fn ptr or extern static
match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Function(..)) => {
(Size::ZERO, AbiAndPrefAlign::from_bytes(1, 1).unwrap())
(Size::ZERO, AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()))
}
Some(AllocType::Static(did)) => {
// The only way `get` couldn't have worked here is if this is an extern static
@ -523,7 +523,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
"{}({} bytes, alignment {}){}",
msg,
alloc.bytes.len(),
alloc.align.abi(),
alloc.align.abi.bytes(),
extra
);
@ -865,7 +865,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
@ -883,7 +883,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(&[]);
@ -893,7 +893,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
if src.is_empty() {
self.check_align(ptr, align)?;
return Ok(());
@ -910,7 +910,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
count: Size
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::new(Align::from_bytes(1).unwrap());
if count.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());

View file

@ -18,7 +18,8 @@ use std::hash::Hash;
use rustc::hir;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Size, AbiAndPrefAlign, LayoutOf, TyLayout, HasDataLayout, VariantIdx};
use rustc::ty::layout::{self, Size, Align,
AbiAndPrefAlign, LayoutOf, TyLayout, HasDataLayout, VariantIdx};
use super::{
GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic,
@ -127,7 +128,8 @@ impl<Tag> MemPlace<Tag> {
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
pub fn null(cx: &impl HasDataLayout) -> Self {
Self::from_scalar_ptr(Scalar::ptr_null(cx), AbiAndPrefAlign::from_bytes(1, 1).unwrap())
Self::from_scalar_ptr(Scalar::ptr_null(cx),
AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()))
}
#[inline(always)]
@ -167,7 +169,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
MPlaceTy {
mplace: MemPlace::from_scalar_ptr(
Scalar::from_uint(layout.align.abi(), cx.pointer_size()),
Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()),
layout.align
),
layout
@ -368,10 +370,10 @@ where
};
let ptr = base.ptr.ptr_offset(offset, self)?;
let align = base.align
let align = AbiAndPrefAlign::new(base.align.abi
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
.restrict_for_offset(offset); // must be last thing that happens
.restrict_for_offset(offset)); // must be last thing that happens
Ok(MPlaceTy { mplace: MemPlace { ptr, align, meta }, layout: field_layout })
}
@ -998,7 +1000,8 @@ where
if cfg!(debug_assertions) {
let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
assert_eq!(size, layout.size);
assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved
// only ABI alignment is preserved
assert_eq!(align.abi, layout.align.abi);
}
let mplace = MPlaceTy {

View file

@ -9,7 +9,7 @@
// except according to those terms.
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, AbiAndPrefAlign, LayoutOf};
use rustc::ty::layout::{Size, Align, AbiAndPrefAlign, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use super::{EvalContext, Machine, MemoryKind};
@ -42,7 +42,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
let align = layout.align.abi.bytes();
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
@ -110,6 +110,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), AbiAndPrefAlign::from_bytes(align, align).unwrap()))
Ok((Size::from_bytes(size), AbiAndPrefAlign::new(Align::from_bytes(align).unwrap())))
}
}

View file

@ -13,7 +13,7 @@ use std::hash::Hash;
use std::ops::RangeInclusive;
use syntax_pos::symbol::Symbol;
use rustc::ty::layout::{self, Size, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx};
use rustc::ty::layout::{self, Size, Align, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx};
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
@ -463,7 +463,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// for function pointers.
let non_null =
self.ecx.memory.check_align(
Scalar::Ptr(ptr), AbiAndPrefAlign::from_bytes(1, 1).unwrap()
Scalar::Ptr(ptr), AbiAndPrefAlign::new(Align::from_bytes(1).unwrap())
).is_ok() ||
self.ecx.memory.get_fn(ptr).is_ok();
if !non_null {

View file

@ -30,7 +30,7 @@ pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let ty = place.ty(local_decls, tcx).to_ty(tcx);
match tcx.layout_raw(param_env.and(ty)) {
Ok(layout) if layout.align.abi() == 1 => {
Ok(layout) if layout.align.abi.bytes() == 1 => {
// if the alignment is 1, the type can't be further
// disaligned.
debug!("is_disaligned({:?}) - align = 1", place);

View file

@ -93,7 +93,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool)
}
}
let align = arg.layout.align.abi();
let align = arg.layout.align.abi.bytes();
let total = arg.layout.size;
arg.cast_to(Uniform {
unit: if align <= 4 { Reg::i32() } else { Reg::i64() },

View file

@ -13,7 +13,7 @@
// need to be fixed when PowerPC vector support is added.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{AbiAndPrefAlign, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::{Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
#[derive(Debug, Clone, Copy, PartialEq)]
@ -120,8 +120,8 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
} else {
// Aggregates larger than a doubleword should be padded
// at the tail to fill out a whole number of doublewords.
let align = AbiAndPrefAlign::from_bits(64, 64).unwrap();
(Reg::i64(), size.abi_align(align))
let reg_i64 = Reg::i64();
(reg_i64, size.abi_align(reg_i64.align(cx)))
};
arg.cast_to(Uniform {

View file

@ -13,7 +13,7 @@ pub use self::Primitive::*;
use spec::Target;
use std::{cmp, fmt};
use std::fmt;
use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
@ -45,22 +45,23 @@ pub struct TargetDataLayout {
impl Default for TargetDataLayout {
/// Creates an instance of `TargetDataLayout`.
fn default() -> TargetDataLayout {
let align = |bits| Align::from_bits(bits).unwrap();
TargetDataLayout {
endian: Endian::Big,
i1_align: AbiAndPrefAlign::from_bits(8, 8).unwrap(),
i8_align: AbiAndPrefAlign::from_bits(8, 8).unwrap(),
i16_align: AbiAndPrefAlign::from_bits(16, 16).unwrap(),
i32_align: AbiAndPrefAlign::from_bits(32, 32).unwrap(),
i64_align: AbiAndPrefAlign::from_bits(32, 64).unwrap(),
i128_align: AbiAndPrefAlign::from_bits(32, 64).unwrap(),
f32_align: AbiAndPrefAlign::from_bits(32, 32).unwrap(),
f64_align: AbiAndPrefAlign::from_bits(64, 64).unwrap(),
i1_align: AbiAndPrefAlign::new(align(8)),
i8_align: AbiAndPrefAlign::new(align(8)),
i16_align: AbiAndPrefAlign::new(align(16)),
i32_align: AbiAndPrefAlign::new(align(32)),
i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
f32_align: AbiAndPrefAlign::new(align(32)),
f64_align: AbiAndPrefAlign::new(align(64)),
pointer_size: Size::from_bits(64),
pointer_align: AbiAndPrefAlign::from_bits(64, 64).unwrap(),
aggregate_align: AbiAndPrefAlign::from_bits(0, 64).unwrap(),
pointer_align: AbiAndPrefAlign::new(align(64)),
aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
vector_align: vec![
(Size::from_bits(64), AbiAndPrefAlign::from_bits(64, 64).unwrap()),
(Size::from_bits(128), AbiAndPrefAlign::from_bits(128, 128).unwrap())
(Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
(Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
],
instruction_address_space: 0,
}
@ -95,11 +96,17 @@ impl TargetDataLayout {
if s.is_empty() {
return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
}
let align_from_bits = |bits| {
Align::from_bits(bits).map_err(|err| {
format!("invalid alignment for `{}` in \"data-layout\": {}",
cause, err)
})
};
let abi = parse_bits(s[0], "alignment", cause)?;
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
AbiAndPrefAlign::from_bits(abi, pref).map_err(|err| {
format!("invalid alignment for `{}` in \"data-layout\": {}",
cause, err)
Ok(AbiAndPrefAlign {
abi: align_from_bits(abi)?,
pref: align_from_bits(pref)?,
})
};
@ -214,8 +221,7 @@ impl TargetDataLayout {
}
// Default to natural alignment, which is what LLVM does.
// That is, use the size, rounded up to a power of 2.
let align = vec_size.bytes().next_power_of_two();
AbiAndPrefAlign::from_bytes(align, align).unwrap()
AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
}
}
@ -272,13 +278,13 @@ impl Size {
#[inline]
pub fn abi_align(self, align: AbiAndPrefAlign) -> Size {
let mask = align.abi() - 1;
let mask = align.abi.bytes() - 1;
Size::from_bytes((self.bytes() + mask) & !mask)
}
#[inline]
pub fn is_abi_aligned(self, align: AbiAndPrefAlign) -> bool {
let mask = align.abi() - 1;
let mask = align.abi.bytes() - 1;
self.bytes() & mask == 0
}
@ -359,96 +365,92 @@ impl AddAssign for Size {
}
}
/// Alignments of a type in bytes, both ABI-mandated and preferred.
/// Each field is a power of two, giving the alignment a maximum value
/// of 2<sup>(2<sup>8</sup> - 1)</sup>, which is limited by LLVM to a
/// maximum capacity of 2<sup>29</sup> or 536870912.
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct AbiAndPrefAlign {
abi_pow2: u8,
pref_pow2: u8,
/// Alignment of a type in bytes (always a power of two).
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct Align {
pow2: u8,
}
impl AbiAndPrefAlign {
pub fn from_bits(abi: u64, pref: u64) -> Result<AbiAndPrefAlign, String> {
AbiAndPrefAlign::from_bytes(Size::from_bits(abi).bytes(),
Size::from_bits(pref).bytes())
impl Align {
pub fn from_bits(bits: u64) -> Result<Align, String> {
Align::from_bytes(Size::from_bits(bits).bytes())
}
pub fn from_bytes(abi: u64, pref: u64) -> Result<AbiAndPrefAlign, String> {
let log2 = |align: u64| {
// Treat an alignment of 0 bytes like 1-byte alignment.
if align == 0 {
return Ok(0);
}
let mut bytes = align;
let mut pow: u8 = 0;
while (bytes & 1) == 0 {
pow += 1;
bytes >>= 1;
}
if bytes != 1 {
Err(format!("`{}` is not a power of 2", align))
} else if pow > 29 {
Err(format!("`{}` is too large", align))
} else {
Ok(pow)
}
};
Ok(AbiAndPrefAlign {
abi_pow2: log2(abi)?,
pref_pow2: log2(pref)?,
})
}
pub fn abi(self) -> u64 {
1 << self.abi_pow2
}
pub fn pref(self) -> u64 {
1 << self.pref_pow2
}
pub fn abi_bits(self) -> u64 {
self.abi() * 8
}
pub fn pref_bits(self) -> u64 {
self.pref() * 8
}
pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi_pow2: cmp::min(self.abi_pow2, other.abi_pow2),
pref_pow2: cmp::min(self.pref_pow2, other.pref_pow2),
pub fn from_bytes(align: u64) -> Result<Align, String> {
// Treat an alignment of 0 bytes like 1-byte alignment.
if align == 0 {
return Ok(Align { pow2: 0 });
}
let mut bytes = align;
let mut pow2: u8 = 0;
while (bytes & 1) == 0 {
pow2 += 1;
bytes >>= 1;
}
if bytes != 1 {
return Err(format!("`{}` is not a power of 2", align));
}
if pow2 > 29 {
return Err(format!("`{}` is too large", align));
}
Ok(Align { pow2 })
}
pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi_pow2: cmp::max(self.abi_pow2, other.abi_pow2),
pref_pow2: cmp::max(self.pref_pow2, other.pref_pow2),
}
pub fn bytes(self) -> u64 {
1 << self.pow2
}
pub fn bits(self) -> u64 {
self.bytes() * 8
}
/// Compute the best alignment possible for the given offset
/// (the largest power of two that the offset is a multiple of).
///
/// NB: for an offset of `0`, this happens to return `2^64`.
pub fn max_for_offset(offset: Size) -> AbiAndPrefAlign {
let pow2 = offset.bytes().trailing_zeros() as u8;
AbiAndPrefAlign {
abi_pow2: pow2,
pref_pow2: pow2,
pub fn max_for_offset(offset: Size) -> Align {
Align {
pow2: offset.bytes().trailing_zeros() as u8,
}
}
/// Lower the alignment, if necessary, such that the given offset
/// is aligned to it (the offset is a multiple of the alignment).
pub fn restrict_for_offset(self, offset: Size) -> AbiAndPrefAlign {
self.min(AbiAndPrefAlign::max_for_offset(offset))
pub fn restrict_for_offset(self, offset: Size) -> Align {
self.min(Align::max_for_offset(offset))
}
}
/// A pair of aligments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
#[derive(PartialOrd, Ord)] // FIXME(eddyb) remove (error prone/incorrect)
pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
}
impl AbiAndPrefAlign {
pub fn new(align: Align) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi: align,
pref: align,
}
}
pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi: self.abi.min(other.abi),
pref: self.pref.min(other.pref),
}
}
pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi: self.abi.max(other.abi),
pref: self.pref.max(other.pref),
}
}
}
@ -511,9 +513,9 @@ impl Integer {
pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Option<Integer> {
let dl = cx.data_layout();
let wanted = align.abi();
let wanted = align.abi;
for &candidate in &[I8, I16, I32, I64, I128] {
if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() {
if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
return Some(candidate);
}
}
@ -524,10 +526,10 @@ impl Integer {
pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Integer {
let dl = cx.data_layout();
let wanted = align.abi();
let wanted = align.abi;
// FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
for &candidate in &[I64, I32, I16] {
if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() {
if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
return candidate;
}
}

View file

@ -1779,7 +1779,7 @@ fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: De
// We are currently checking the type this field came from, so it must be local
let span = tcx.hir.span_if_local(field.did).unwrap();
let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false);
let align1 = layout.map(|layout| layout.align.abi() == 1).unwrap_or(false);
let align1 = layout.map(|layout| layout.align.abi.bytes() == 1).unwrap_or(false);
(span, zst, align1)
});