Auto merge of #59780 - RalfJung:miri-unsized, r=oli-obk

Miri: unsized locals and by-value dyn traits

r? @oli-obk
Cc @eddyb

Fixes https://github.com/rust-lang/miri/issues/449
This commit is contained in:
bors 2019-04-11 15:44:22 +00:00
commit 3de0106789
6 changed files with 192 additions and 153 deletions

View file

@ -108,34 +108,51 @@ pub enum StackPopCleanup {
/// State of a local variable including a memoized layout /// State of a local variable including a memoized layout
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub struct LocalState<'tcx, Tag=(), Id=AllocId> { pub struct LocalState<'tcx, Tag=(), Id=AllocId> {
pub state: LocalValue<Tag, Id>, pub value: LocalValue<Tag, Id>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice /// Don't modify if `Some`, this is only used to prevent computing the layout twice
pub layout: Cell<Option<TyLayout<'tcx>>>, pub layout: Cell<Option<TyLayout<'tcx>>>,
} }
/// State of a local variable /// Current value of a local variable
#[derive(Copy, Clone, PartialEq, Eq, Hash)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum LocalValue<Tag=(), Id=AllocId> { pub enum LocalValue<Tag=(), Id=AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead, Dead,
// Mostly for convenience, we re-use the `Operand` type here. /// This local is alive but not yet initialized. It can be written to
// This is an optimization over just always having a pointer here; /// but not read from or its address taken. Locals get initialized on
// we can thus avoid doing an allocation when the local just stores /// first write because for unsized locals, we do not know their size
// immediate values *and* never has its address taken. /// before that.
Uninitialized,
/// A normal, live local.
/// Mostly for convenience, we re-use the `Operand` type here.
/// This is an optimization over just always having a pointer here;
/// we can thus avoid doing an allocation when the local just stores
/// immediate values *and* never has its address taken.
Live(Operand<Tag, Id>), Live(Operand<Tag, Id>),
} }
impl<'tcx, Tag> LocalState<'tcx, Tag> { impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
pub fn access(&self) -> EvalResult<'tcx, &Operand<Tag>> { pub fn access(&self) -> EvalResult<'tcx, Operand<Tag>> {
match self.state { match self.value {
LocalValue::Dead => err!(DeadLocal), LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(ref val) => Ok(val), LocalValue::Uninitialized =>
bug!("The type checker should prevent reading from a never-written local"),
LocalValue::Live(val) => Ok(val),
} }
} }
pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand<Tag>> { /// Overwrite the local. If the local can be overwritten in place, return a reference
match self.state { /// to do so; otherwise return the `MemPlace` to consult instead.
pub fn access_mut(
&mut self,
) -> EvalResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
match self.value {
LocalValue::Dead => err!(DeadLocal), LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(ref mut val) => Ok(val), LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
ref mut local @ LocalValue::Live(Operand::Immediate(_)) |
ref mut local @ LocalValue::Uninitialized => {
Ok(Ok(local))
}
} }
} }
} }
@ -327,6 +344,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
let local_ty = self.monomorphize_with_substs(local_ty, frame.instance.substs); let local_ty = self.monomorphize_with_substs(local_ty, frame.instance.substs);
self.layout_of(local_ty) self.layout_of(local_ty)
})?; })?;
// Layouts of locals are requested a lot, so we cache them.
frame.locals[local].layout.set(Some(layout)); frame.locals[local].layout.set(Some(layout));
Ok(layout) Ok(layout)
} }
@ -473,19 +491,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
// don't allocate at all for trivial constants // don't allocate at all for trivial constants
if mir.local_decls.len() > 1 { if mir.local_decls.len() > 1 {
// We put some marker immediate into the locals that we later want to initialize. // Locals are initially uninitialized.
// This can be anything except for LocalValue::Dead -- because *that* is the
// value we use for things that we know are initially dead.
let dummy = LocalState { let dummy = LocalState {
state: LocalValue::Live(Operand::Immediate(Immediate::Scalar( value: LocalValue::Uninitialized,
ScalarMaybeUndef::Undef,
))),
layout: Cell::new(None), layout: Cell::new(None),
}; };
let mut locals = IndexVec::from_elem(dummy, &mir.local_decls); let mut locals = IndexVec::from_elem(dummy, &mir.local_decls);
// Return place is handled specially by the `eval_place` functions, and the // Return place is handled specially by the `eval_place` functions, and the
// entry in `locals` should never be used. Make it dead, to be sure. // entry in `locals` should never be used. Make it dead, to be sure.
locals[mir::RETURN_PLACE].state = LocalValue::Dead; locals[mir::RETURN_PLACE].value = LocalValue::Dead;
// Now mark those locals as dead that we do not want to initialize // Now mark those locals as dead that we do not want to initialize
match self.tcx.describe_def(instance.def_id()) { match self.tcx.describe_def(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them // statics and constants don't have `Storage*` statements, no need to look for them
@ -498,7 +512,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
match stmt.kind { match stmt.kind {
StorageLive(local) | StorageLive(local) |
StorageDead(local) => { StorageDead(local) => {
locals[local].state = LocalValue::Dead; locals[local].value = LocalValue::Dead;
} }
_ => {} _ => {}
} }
@ -506,21 +520,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
} }
}, },
} }
// Finally, properly initialize all those that still have the dummy value
for (idx, local) in locals.iter_enumerated_mut() {
match local.state {
LocalValue::Live(_) => {
// This needs to be properly initialized.
let ty = self.monomorphize(mir.local_decls[idx].ty)?;
let layout = self.layout_of(ty)?;
local.state = LocalValue::Live(self.uninit_operand(layout)?);
local.layout = Cell::new(Some(layout));
}
LocalValue::Dead => {
// Nothing to do
}
}
}
// done // done
self.frame_mut().locals = locals; self.frame_mut().locals = locals;
} }
@ -555,7 +554,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
} }
// Deallocate all locals that are backed by an allocation. // Deallocate all locals that are backed by an allocation.
for local in frame.locals { for local in frame.locals {
self.deallocate_local(local.state)?; self.deallocate_local(local.value)?;
} }
// Validate the return value. Do this after deallocating so that we catch dangling // Validate the return value. Do this after deallocating so that we catch dangling
// references. // references.
@ -603,10 +602,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local); trace!("{:?} is now live", local);
let layout = self.layout_of_local(self.frame(), local, None)?; let local_val = LocalValue::Uninitialized;
let init = LocalValue::Live(self.uninit_operand(layout)?);
// StorageLive *always* kills the value that's currently stored // StorageLive *always* kills the value that's currently stored
Ok(mem::replace(&mut self.frame_mut().locals[local].state, init)) Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
} }
/// Returns the old value of the local. /// Returns the old value of the local.
@ -615,7 +613,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local); trace!("{:?} is now dead", local);
mem::replace(&mut self.frame_mut().locals[local].state, LocalValue::Dead) mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
} }
pub(super) fn deallocate_local( pub(super) fn deallocate_local(
@ -668,31 +666,31 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
} }
write!(msg, ":").unwrap(); write!(msg, ":").unwrap();
match self.stack[frame].locals[local].access() { match self.stack[frame].locals[local].value {
Err(err) => { LocalValue::Dead => write!(msg, " is dead").unwrap(),
if let InterpError::DeadLocal = err.kind { LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
write!(msg, " is dead").unwrap(); LocalValue::Live(Operand::Indirect(mplace)) => {
} else { match mplace.ptr {
panic!("Failed to access local: {:?}", err);
}
}
Ok(Operand::Indirect(mplace)) => {
let (ptr, align) = mplace.to_scalar_ptr_align();
match ptr {
Scalar::Ptr(ptr) => { Scalar::Ptr(ptr) => {
write!(msg, " by align({}) ref:", align.bytes()).unwrap(); write!(msg, " by align({}){} ref:",
mplace.align.bytes(),
match mplace.meta {
Some(meta) => format!(" meta({:?})", meta),
None => String::new()
}
).unwrap();
allocs.push(ptr.alloc_id); allocs.push(ptr.alloc_id);
} }
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
} }
} }
Ok(Operand::Immediate(Immediate::Scalar(val))) => { LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(msg, " {:?}", val).unwrap(); write!(msg, " {:?}", val).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id); allocs.push(ptr.alloc_id);
} }
} }
Ok(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id); allocs.push(ptr.alloc_id);

View file

@ -14,7 +14,7 @@ use rustc::mir::interpret::{
}; };
use super::{ use super::{
InterpretCx, Machine, InterpretCx, Machine,
MemPlace, MPlaceTy, PlaceTy, Place, MemoryKind, MemPlace, MPlaceTy, PlaceTy, Place,
}; };
pub use rustc::mir::interpret::ScalarMaybeUndef; pub use rustc::mir::interpret::ScalarMaybeUndef;
@ -373,33 +373,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Ok(str) Ok(str)
} }
pub fn uninit_operand(
&mut self,
layout: TyLayout<'tcx>
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
// This decides which types we will use the Immediate optimization for, and hence should
// match what `try_read_immediate` and `eval_place_to_op` support.
if layout.is_zst() {
return Ok(Operand::Immediate(Immediate::Scalar(Scalar::zst().into())));
}
Ok(match layout.abi {
layout::Abi::Scalar(..) =>
Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef)),
layout::Abi::ScalarPair(..) =>
Operand::Immediate(Immediate::ScalarPair(
ScalarMaybeUndef::Undef,
ScalarMaybeUndef::Undef,
)),
_ => {
trace!("Forcing allocation for local of type {:?}", layout.ty);
Operand::Indirect(
*self.allocate(layout, MemoryKind::Stack)
)
}
})
}
/// Projection functions /// Projection functions
pub fn operand_field( pub fn operand_field(
&self, &self,
@ -486,8 +459,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
layout: Option<TyLayout<'tcx>>, layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
assert_ne!(local, mir::RETURN_PLACE); assert_ne!(local, mir::RETURN_PLACE);
let op = *frame.locals[local].access()?;
let layout = self.layout_of_local(frame, local, layout)?; let layout = self.layout_of_local(frame, local, layout)?;
let op = if layout.is_zst() {
// Do not read from ZST, they might not be initialized
Operand::Immediate(Immediate::Scalar(Scalar::zst().into()))
} else {
frame.locals[local].access()?
};
Ok(OpTy { op, layout }) Ok(OpTy { op, layout })
} }
@ -502,7 +480,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Operand::Indirect(mplace) Operand::Indirect(mplace)
} }
Place::Local { frame, local } => Place::Local { frame, local } =>
*self.stack[frame].locals[local].access()? *self.access_local(&self.stack[frame], local, None)?
}; };
Ok(OpTy { op, layout: place.layout }) Ok(OpTy { op, layout: place.layout })
} }

View file

@ -15,7 +15,7 @@ use rustc::ty::TypeFoldable;
use super::{ use super::{
GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic, GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic,
InterpretCx, Machine, AllocMap, AllocationExtra, InterpretCx, Machine, AllocMap, AllocationExtra,
RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind, LocalValue
}; };
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
@ -639,6 +639,7 @@ where
None => return err!(InvalidNullPointerUsage), None => return err!(InvalidNullPointerUsage),
}, },
Base(PlaceBase::Local(local)) => PlaceTy { Base(PlaceBase::Local(local)) => PlaceTy {
// This works even for dead/uninitialized locals; we check further when writing
place: Place::Local { place: Place::Local {
frame: self.cur_frame(), frame: self.cur_frame(),
local, local,
@ -714,16 +715,19 @@ where
// but not factored as a separate function. // but not factored as a separate function.
let mplace = match dest.place { let mplace = match dest.place {
Place::Local { frame, local } => { Place::Local { frame, local } => {
match *self.stack[frame].locals[local].access_mut()? { match self.stack[frame].locals[local].access_mut()? {
Operand::Immediate(ref mut dest_val) => { Ok(local) => {
// Yay, we can just change the local directly. // Local can be updated in-place.
*dest_val = src; *local = LocalValue::Live(Operand::Immediate(src));
return Ok(()); return Ok(());
}, }
Operand::Indirect(mplace) => mplace, // already in memory Err(mplace) => {
// The local is in memory, go on below.
mplace
}
} }
}, },
Place::Ptr(mplace) => mplace, // already in memory Place::Ptr(mplace) => mplace, // already referring to memory
}; };
let dest = MPlaceTy { mplace, layout: dest.layout }; let dest = MPlaceTy { mplace, layout: dest.layout };
@ -822,8 +826,6 @@ where
src: OpTy<'tcx, M::PointerTag>, src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot copy unsized data");
// We do NOT compare the types for equality, because well-typed code can // We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast. // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
assert!(src.layout.details == dest.layout.details, assert!(src.layout.details == dest.layout.details,
@ -832,6 +834,7 @@ where
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation. // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
let src = match self.try_read_immediate(src)? { let src = match self.try_read_immediate(src)? {
Ok(src_val) => { Ok(src_val) => {
assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
// Yay, we got a value that we can write directly. // Yay, we got a value that we can write directly.
// FIXME: Add a check to make sure that if `src` is indirect, // FIXME: Add a check to make sure that if `src` is indirect,
// it does not overlap with `dest`. // it does not overlap with `dest`.
@ -842,13 +845,19 @@ where
// Slow path, this does not fit into an immediate. Just memcpy. // Slow path, this does not fit into an immediate. Just memcpy.
trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
let dest = self.force_allocation(dest)?; // This interprets `src.meta` with the `dest` local's layout, if an unsized local
let (src_ptr, src_align) = src.to_scalar_ptr_align(); // is being initialized!
let (dest_ptr, dest_align) = dest.to_scalar_ptr_align(); let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
let size = size.unwrap_or_else(|| {
assert!(!dest.layout.is_unsized(),
"Cannot copy into already initialized unsized place");
dest.layout.size
});
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
self.memory.copy( self.memory.copy(
src_ptr, src_align, src.ptr, src.align,
dest_ptr, dest_align, dest.ptr, dest.align,
dest.layout.size, size,
/*nonoverlapping*/ true, /*nonoverlapping*/ true,
)?; )?;
@ -866,11 +875,13 @@ where
// Fast path: Just use normal `copy_op` // Fast path: Just use normal `copy_op`
return self.copy_op(src, dest); return self.copy_op(src, dest);
} }
// We still require the sizes to match // We still require the sizes to match.
debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot copy unsized data");
assert!(src.layout.size == dest.layout.size, assert!(src.layout.size == dest.layout.size,
"Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); "Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
// Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
// to avoid that here.
assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot transmute unsized data");
// The hard case is `ScalarPair`. `src` is already read from memory in this case, // The hard case is `ScalarPair`. `src` is already read from memory in this case,
// using `src.layout` to figure out which bytes to use for the 1st and 2nd field. // using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
@ -898,39 +909,70 @@ where
/// If the place currently refers to a local that doesn't yet have a matching allocation, /// If the place currently refers to a local that doesn't yet have a matching allocation,
/// create such an allocation. /// create such an allocation.
/// This is essentially `force_to_memplace`. /// This is essentially `force_to_memplace`.
pub fn force_allocation( ///
/// This supports unsized types and returns the computed size to avoid some
/// redundant computation when copying; use `force_allocation` for a simpler, sized-only
/// version.
pub fn force_allocation_maybe_sized(
&mut self, &mut self,
place: PlaceTy<'tcx, M::PointerTag>, place: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { meta: Option<Scalar<M::PointerTag>>,
let mplace = match place.place { ) -> EvalResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
let (mplace, size) = match place.place {
Place::Local { frame, local } => { Place::Local { frame, local } => {
match *self.stack[frame].locals[local].access()? { match self.stack[frame].locals[local].access_mut()? {
Operand::Indirect(mplace) => mplace, Ok(local_val) => {
Operand::Immediate(value) => {
// We need to make an allocation. // We need to make an allocation.
// FIXME: Consider not doing anything for a ZST, and just returning // FIXME: Consider not doing anything for a ZST, and just returning
// a fake pointer? Are we even called for ZST? // a fake pointer? Are we even called for ZST?
// We cannot hold on to the reference `local_val` while allocating,
// but we can hold on to the value in there.
let old_val =
if let LocalValue::Live(Operand::Immediate(value)) = *local_val {
Some(value)
} else {
None
};
// We need the layout of the local. We can NOT use the layout we got, // We need the layout of the local. We can NOT use the layout we got,
// that might e.g., be an inner field of a struct with `Scalar` layout, // that might e.g., be an inner field of a struct with `Scalar` layout,
// that has different alignment than the outer field. // that has different alignment than the outer field.
// We also need to support unsized types, and hence cannot use `allocate`.
let local_layout = self.layout_of_local(&self.stack[frame], local, None)?; let local_layout = self.layout_of_local(&self.stack[frame], local, None)?;
let ptr = self.allocate(local_layout, MemoryKind::Stack); let (size, align) = self.size_and_align_of(meta, local_layout)?
// We don't have to validate as we can assume the local .expect("Cannot allocate for non-dyn-sized type");
// was already valid for its type. let ptr = self.memory.allocate(size, align, MemoryKind::Stack);
self.write_immediate_to_mplace_no_validate(value, ptr)?; let ptr = M::tag_new_allocation(self, ptr, MemoryKind::Stack);
let mplace = ptr.mplace; let mplace = MemPlace { ptr: ptr.into(), align, meta };
// Update the local if let Some(value) = old_val {
*self.stack[frame].locals[local].access_mut()? = // Preserve old value.
Operand::Indirect(mplace); // We don't have to validate as we can assume the local
mplace // was already valid for its type.
let mplace = MPlaceTy { mplace, layout: local_layout };
self.write_immediate_to_mplace_no_validate(value, mplace)?;
}
// Now we can call `access_mut` again, asserting it goes well,
// and actually overwrite things.
*self.stack[frame].locals[local].access_mut().unwrap().unwrap() =
LocalValue::Live(Operand::Indirect(mplace));
(mplace, Some(size))
} }
Err(mplace) => (mplace, None), // this already was an indirect local
} }
} }
Place::Ptr(mplace) => mplace Place::Ptr(mplace) => (mplace, None)
}; };
// Return with the original layout, so that the caller can go on // Return with the original layout, so that the caller can go on
Ok(MPlaceTy { mplace, layout: place.layout }) Ok((MPlaceTy { mplace, layout: place.layout }, size))
}
#[inline(always)]
pub fn force_allocation(
&mut self,
place: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
Ok(self.force_allocation_maybe_sized(place, None)?.0)
} }
pub fn allocate( pub fn allocate(
@ -938,15 +980,9 @@ where
layout: TyLayout<'tcx>, layout: TyLayout<'tcx>,
kind: MemoryKind<M::MemoryKinds>, kind: MemoryKind<M::MemoryKinds>,
) -> MPlaceTy<'tcx, M::PointerTag> { ) -> MPlaceTy<'tcx, M::PointerTag> {
if layout.is_unsized() { let ptr = self.memory.allocate(layout.size, layout.align.abi, kind);
assert!(self.tcx.features().unsized_locals, "cannot alloc memory for unsized type"); let ptr = M::tag_new_allocation(self, ptr, kind);
// FIXME: What should we do here? We should definitely also tag! MPlaceTy::from_aligned_ptr(ptr, layout)
MPlaceTy::dangling(layout, self)
} else {
let ptr = self.memory.allocate(layout.size, layout.align.abi, kind);
let ptr = M::tag_new_allocation(self, ptr, kind);
MPlaceTy::from_aligned_ptr(ptr, layout)
}
} }
pub fn write_discriminant_index( pub fn write_discriminant_index(

View file

@ -114,10 +114,11 @@ macro_rules! impl_snapshot_for {
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
match *self { match *self {
$( $(
$enum_name::$variant $( ( $(ref $field),* ) )? => $enum_name::$variant $( ( $(ref $field),* ) )? => {
$enum_name::$variant $( $enum_name::$variant $(
( $( __impl_snapshot_field!($field, __ctx $(, $delegate)?) ),* ), ( $( __impl_snapshot_field!($field, __ctx $(, $delegate)?) ),* )
)? )?
}
)* )*
} }
} }
@ -250,11 +251,13 @@ impl_snapshot_for!(enum Operand {
impl_stable_hash_for!(enum crate::interpret::LocalValue { impl_stable_hash_for!(enum crate::interpret::LocalValue {
Dead, Dead,
Uninitialized,
Live(x), Live(x),
}); });
impl_snapshot_for!(enum LocalValue { impl_snapshot_for!(enum LocalValue {
Live(v),
Dead, Dead,
Uninitialized,
Live(v),
}); });
impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations
@ -360,13 +363,13 @@ impl<'a, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a LocalState<'tcx>
type Item = LocalValue<(), AllocIdSnapshot<'a>>; type Item = LocalValue<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
let LocalState { state, layout: _ } = self; let LocalState { value, layout: _ } = self;
state.snapshot(ctx) value.snapshot(ctx)
} }
} }
impl_stable_hash_for!(struct LocalState<'tcx> { impl_stable_hash_for!(struct LocalState<'tcx> {
state, value,
layout -> _, layout -> _,
}); });

View file

@ -315,12 +315,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
); );
// Figure out how to pass which arguments. // Figure out how to pass which arguments.
// We have two iterators: Where the arguments come from, // The Rust ABI is special: ZST get skipped.
// and where they go to.
let rust_abi = match caller_abi { let rust_abi = match caller_abi {
Abi::Rust | Abi::RustCall => true, Abi::Rust | Abi::RustCall => true,
_ => false _ => false
}; };
// We have two iterators: Where the arguments come from,
// and where they go to.
// For where they come from: If the ABI is RustCall, we untuple the // For where they come from: If the ABI is RustCall, we untuple the
// last incoming argument. These two iterators do not have the same type, // last incoming argument. These two iterators do not have the same type,
@ -368,7 +369,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
} }
// Now we should have no more caller args // Now we should have no more caller args
if caller_iter.next().is_some() { if caller_iter.next().is_some() {
trace!("Caller has too many args over"); trace!("Caller has passed too many args");
return err!(FunctionArgCountMismatch); return err!(FunctionArgCountMismatch);
} }
// Don't forget to check the return type! // Don't forget to check the return type!
@ -406,9 +407,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
} }
// cannot use the shim here, because that will only result in infinite recursion // cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => { ty::InstanceDef::Virtual(_, idx) => {
let mut args = args.to_vec();
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
let ptr = self.deref_operand(args[0])?; // We have to implement all "object safe receivers". Currently we
let vtable = ptr.vtable()?; // support built-in pointers (&, &mut, Box) as well as unsized-self. We do
// not yet support custom self types.
// Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs.
let receiver_place = match args[0].layout.ty.builtin_deref(true) {
Some(_) => {
// Built-in pointer.
self.deref_operand(args[0])?
}
None => {
// Unsized self.
args[0].to_mem_place()
}
};
// Find and consult vtable
let vtable = receiver_place.vtable()?;
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?; self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized( let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized(
self, self,
@ -416,15 +432,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
)?.to_ptr()?; )?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?; let instance = self.memory.get_fn(fn_ptr)?;
// We have to patch the self argument, in particular get the layout // `*mut receiver_place.layout.ty` is almost the layout that we
// expected by the actual function. Cannot just use "field 0" due to // want for args[0]: We have to project to field 0 because we want
// Box<self>. // a thin pointer.
let mut args = args.to_vec(); assert!(receiver_place.layout.is_unsized());
let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty; let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee); let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
args[0] = OpTy::from(ImmTy { // strip vtable // Adjust receiver argument.
layout: self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?, args[0] = OpTy::from(ImmTy {
imm: Immediate::Scalar(ptr.ptr.into()) layout: this_receiver_ptr,
imm: Immediate::Scalar(receiver_place.ptr.into())
}); });
trace!("Patched self operand to {:#?}", args[0]); trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function // recurse with concrete function

View file

@ -3,7 +3,7 @@ use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf}; use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic}; use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use super::{InterpretCx, Machine, MemoryKind}; use super::{InterpretCx, InterpError, Machine, MemoryKind};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> {
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
@ -76,7 +76,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
for (i, method) in methods.iter().enumerate() { for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method { if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?; // resolve for vtable: insert shims where needed
let substs = self.subst_and_normalize_erasing_regions(substs)?;
let instance = ty::Instance::resolve_for_vtable(
*self.tcx,
self.param_env,
def_id,
substs,
).ok_or_else(|| InterpError::TooGeneric)?;
let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag(); let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag();
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?; let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
self.memory self.memory