Move mir validation out of tree

This commit is contained in:
Oliver Schneider 2017-12-14 11:36:28 +01:00
parent acac58502b
commit 1ba46dc378
No known key found for this signature in database
GPG key ID: A69F8D225B3AD7D9
14 changed files with 61 additions and 1498 deletions

9
src/Cargo.lock generated
View file

@ -961,11 +961,6 @@ name = "lazy_static"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazycell"
version = "0.5.1"
@ -1625,7 +1620,6 @@ dependencies = [
"jobserver 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_apfloat 0.0.0",
"rustc_back 0.0.0",
"rustc_const_math 0.0.0",
@ -1868,10 +1862,8 @@ dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"graphviz 0.0.0",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_apfloat 0.0.0",
"rustc_const_eval 0.0.0",
@ -2771,7 +2763,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum kuchiki 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e03098e8e719c92b7794515dfd5c1724e2b12f5ce1788e61cfa4663f82eba8d8"
"checksum languageserver-types 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "773e175c945800aeea4c21c04090bcb9db987b1a566ad9c6f569972299950e3e"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d"
"checksum lazycell 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b585b7a6811fb03aa10e74b278a0f00f8dd9b45dc681f148bb29fa5cb61859b"
"checksum libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)" = "36fbc8a8929c632868295d0178dd8f63fc423fd7537ad0738372bd010b3ac9b0"
"checksum libgit2-sys 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)" = "6f74b4959cef96898f5123148724fc7dee043b9a6b99f219d948851bfbe53cb2"

View file

@ -24,7 +24,6 @@ rustc_errors = { path = "../librustc_errors" }
serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
regex = "0.2.2"
backtrace = "0.3.3"
byteorder = { version = "1.1", features = ["i128"]}

View file

@ -92,7 +92,6 @@ extern crate serialize as rustc_serialize; // used by deriving
extern crate rustc_apfloat;
extern crate byteorder;
extern crate regex;
extern crate backtrace;
// Note that librustc doesn't actually depend on these crates, see the note in

View file

@ -13,7 +13,6 @@ bitflags = "1.0"
graphviz = { path = "../libgraphviz" }
log = "0.3"
log_settings = "0.1.1"
lazy_static = "1.0"
rustc = { path = "../librustc" }
rustc_const_eval = { path = "../librustc_const_eval" }
rustc_const_math = { path = "../librustc_const_math" }
@ -23,5 +22,4 @@ serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
byteorder = { version = "1.1", features = ["i128"] }
regex = "0.2"
rustc_apfloat = { path = "../librustc_apfloat" }

View file

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use std::collections::HashSet;
use std::fmt::Write;
use rustc::hir::def_id::DefId;
@ -13,13 +13,13 @@ use rustc_data_structures::indexed_vec::Idx;
use syntax::codemap::{self, DUMMY_SP};
use syntax::ast::Mutability;
use rustc::mir::interpret::{
PtrAndAlign, DynamicLifetime, GlobalId, Value, Pointer, PrimVal, PrimValKind,
PtrAndAlign, GlobalId, Value, Pointer, PrimVal, PrimValKind,
EvalError, EvalResult, EvalErrorKind, MemoryPointer,
};
use super::{Place, PlaceExtra, Memory,
HasMemory, MemoryKind, operator,
ValidationQuery, Machine};
Machine};
pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
/// Stores the `Machine` instance.
@ -34,9 +34,6 @@ pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
/// The virtual memory system.
pub memory: Memory<'a, 'tcx, M>,
/// Places that were suspended by the validation subsystem, and will be recovered later
pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'tcx>>,
@ -203,7 +200,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
tcx,
param_env,
memory: Memory::new(tcx, limits.memory_size, memory_data),
suspended: HashMap::new(),
stack: Vec::new(),
stack_limit: limits.stack_limit,
steps_remaining: limits.step_limit,
@ -471,7 +467,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
::log_settings::settings().indentation -= 1;
self.end_region(None)?;
M::end_region(self, None)?;
let frame = self.stack.pop().expect(
"tried to pop a stack frame, but there were none",
);
@ -996,7 +992,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
/// ensures this Value is not a ByRef
pub(super) fn follow_by_ref_value(
pub fn follow_by_ref_value(
&self,
value: Value,
ty: Ty<'tcx>,
@ -1396,7 +1392,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
self.stack.last().expect("no call frames exist")
}
pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> {
pub fn frame_mut(&mut self) -> &mut Frame<'tcx> {
self.stack.last_mut().expect("no call frames exist")
}
@ -1404,7 +1400,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
self.frame().mir
}
pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
pub fn substs(&self) -> &'tcx Substs<'tcx> {
if let Some(frame) = self.stack.last() {
frame.instance.substs
} else {

View file

@ -2,8 +2,8 @@
//! This separation exists to ensure that no fancy miri features like
//! interpreting common C functions leak into CTFE.
use rustc::mir::interpret::{EvalResult, PrimVal};
use super::{EvalContext, Place, ValTy};
use rustc::mir::interpret::{EvalResult, PrimVal, MemoryPointer, AccessKind};
use super::{EvalContext, Place, ValTy, Memory};
use rustc::mir;
use rustc::ty::{self, Ty};
@ -77,4 +77,41 @@ pub trait Machine<'tcx>: Sized {
instance: ty::Instance<'tcx>,
mutability: Mutability,
) -> EvalResult<'tcx>;
fn check_locks<'a>(
_mem: &Memory<'a, 'tcx, Self>,
_ptr: MemoryPointer,
_size: u64,
_access: AccessKind,
) -> EvalResult<'tcx> {
Ok(())
}
fn add_lock<'a>(
_mem: &mut Memory<'a, 'tcx, Self>,
_id: u64,
) {}
fn free_lock<'a>(
_mem: &mut Memory<'a, 'tcx, Self>,
_id: u64,
_len: u64,
) -> EvalResult<'tcx> {
Ok(())
}
fn end_region<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_reg: Option<::rustc::middle::region::Scope>,
) -> EvalResult<'tcx> {
Ok(())
}
fn validation_op<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_op: ::rustc::mir::ValidationOp,
_operand: &::rustc::mir::ValidationOperand<'tcx, ::rustc::mir::Place<'tcx>>,
) -> EvalResult<'tcx> {
Ok(())
}
}

View file

@ -6,77 +6,11 @@ use std::cell::Cell;
use rustc::ty::{Instance, TyCtxt};
use rustc::ty::layout::{self, TargetDataLayout};
use syntax::ast::Mutability;
use rustc::middle::region;
use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, PtrAndAlign, Value, DynamicLifetime, Pointer,
use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, PtrAndAlign, Value, Pointer,
EvalResult, PrimVal, EvalErrorKind};
use super::{EvalContext, Machine, RangeMap, AbsPlace};
////////////////////////////////////////////////////////////////////////////////
// Locks
////////////////////////////////////////////////////////////////////////////////
/// Information about a lock that is currently held.
#[derive(Clone, Debug)]
struct LockInfo<'tcx> {
/// Stores for which lifetimes (of the original write lock) we got
/// which suspensions.
suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
/// The current state of the lock that's actually effective.
active: Lock,
}
/// Write locks are identified by a stack frame and an "abstract" (untyped) place.
/// It may be tempting to use the lifetime as identifier, but that does not work
/// for two reasons:
/// * First of all, due to subtyping, the same lock may be referred to with different
/// lifetimes.
/// * Secondly, different write locks may actually have the same lifetime. See `test2`
/// in `run-pass/many_shr_bor.rs`.
/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
/// considers the path frozen and hence the Id remains stable.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct WriteLockId<'tcx> {
frame: usize,
path: AbsPlace<'tcx>,
}
use rustc::mir::interpret::Lock::*;
use rustc::mir::interpret::Lock;
impl<'tcx> Default for LockInfo<'tcx> {
fn default() -> Self {
LockInfo::new(NoLock)
}
}
impl<'tcx> LockInfo<'tcx> {
fn new(lock: Lock) -> LockInfo<'tcx> {
LockInfo {
suspended: HashMap::new(),
active: lock,
}
}
fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
use self::AccessKind::*;
match (&self.active, access) {
(&NoLock, _) => true,
(&ReadLock(ref lfts), Read) => {
assert!(!lfts.is_empty(), "Someone left an empty read lock behind.");
// Read access to read-locked region is okay, no matter who's holding the read lock.
true
}
(&WriteLock(ref lft), _) => {
// All access is okay if we are the ones holding it
Some(lft.frame) == frame
}
_ => false, // Nothing else is okay.
}
}
}
use super::{EvalContext, Machine};
////////////////////////////////////////////////////////////////////////////////
// Allocations and pointers
@ -123,36 +57,9 @@ pub struct Memory<'a, 'tcx: 'a, M: Machine<'tcx>> {
writes_are_aligned: Cell<bool>,
/// The current stack frame. Used to check accesses against locks.
pub(super) cur_frame: usize,
pub cur_frame: usize,
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// Memory regions that are locked by some function
///
/// Only mutable (static mut, heap, stack) allocations have an entry in this map.
/// The entry is created when allocating the memory and deleted after deallocation.
locks: HashMap<u64, RangeMap<LockInfo<'tcx>>>,
}
impl<'tcx> RangeMap<LockInfo<'tcx>> {
fn check(
&self,
frame: Option<usize>,
offset: u64,
len: u64,
access: AccessKind,
) -> Result<(), LockInfo<'tcx>> {
if len == 0 {
return Ok(());
}
for lock in self.iter(offset, len) {
// Check if the lock is in conflict with the access.
if !lock.access_permitted(frame, access) {
return Err(lock.clone());
}
}
Ok(())
}
}
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
@ -168,7 +75,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
reads_are_aligned: Cell::new(true),
writes_are_aligned: Cell::new(true),
cur_frame: usize::max_value(),
locks: HashMap::new(),
}
}
@ -214,7 +120,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
align,
};
let id = self.tcx.interpret_interner.borrow_mut().reserve();
self.locks.insert(id, RangeMap::new());
M::add_lock(self, id);
match kind {
Some(kind @ MemoryKind::Stack) |
Some(kind @ MemoryKind::Machine(_)) => {
@ -320,21 +226,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
// However, we should check *something*. For now, we make sure that there is no conflicting write
// lock by another frame. We *have* to permit deallocation if we hold a read lock.
// TODO: Figure out the exact rules here.
self.locks
.remove(&ptr.alloc_id.0)
.expect("allocation has no corresponding locks")
.check(
Some(self.cur_frame),
0,
alloc.bytes.len() as u64,
AccessKind::Read,
)
.map_err(|lock| {
EvalErrorKind::DeallocatedLockedMemory {
ptr,
lock: lock.active,
}
})?;
M::free_lock(self, ptr.alloc_id.0, alloc.bytes.len() as u64)?;
if alloc_kind != kind {
return err!(DeallocatedWrongMemoryKind(
@ -419,291 +311,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
}
}
/// Locking
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub(crate) fn check_locks(
&self,
ptr: MemoryPointer,
len: u64,
access: AccessKind,
) -> EvalResult<'tcx> {
if len == 0 {
return Ok(());
}
let locks = match self.locks.get(&ptr.alloc_id.0) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
let frame = self.cur_frame;
locks
.check(Some(frame), ptr.offset, len, access)
.map_err(|lock| {
EvalErrorKind::MemoryLockViolation {
ptr,
len,
frame,
access,
lock: lock.active,
}.into()
})
}
/// Acquire the lock for the given lifetime
pub(crate) fn acquire_lock(
&mut self,
ptr: MemoryPointer,
len: u64,
region: Option<region::Scope>,
kind: AccessKind,
) -> EvalResult<'tcx> {
let frame = self.cur_frame;
assert!(len > 0);
trace!(
"Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}",
frame,
kind,
ptr,
len,
region
);
self.check_bounds(ptr.offset(len, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let locks = match self.locks.get_mut(&ptr.alloc_id.0) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
// Iterate over our range and acquire the lock. If the range is already split into pieces,
// we have to manipulate all of them.
let lifetime = DynamicLifetime { frame, region };
for lock in locks.iter_mut(ptr.offset, len) {
if !lock.access_permitted(None, kind) {
return err!(MemoryAcquireConflict {
ptr,
len,
kind,
lock: lock.active.clone(),
});
}
// See what we have to do
match (&mut lock.active, kind) {
(active @ &mut NoLock, AccessKind::Write) => {
*active = WriteLock(lifetime);
}
(active @ &mut NoLock, AccessKind::Read) => {
*active = ReadLock(vec![lifetime]);
}
(&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
lifetimes.push(lifetime);
}
_ => bug!("We already checked that there is no conflicting lock"),
}
}
Ok(())
}
/// Release or suspend a write lock of the given lifetime prematurely.
/// When releasing, if there is a read lock or someone else's write lock, that's an error.
/// If no lock is held, that's fine. This can happen when e.g. a local is initialized
/// from a constant, and then suspended.
/// When suspending, the same cases are fine; we just register an additional suspension.
pub(crate) fn suspend_write_lock(
&mut self,
ptr: MemoryPointer,
len: u64,
lock_path: &AbsPlace<'tcx>,
suspend: Option<region::Scope>,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let locks = match self.locks.get_mut(&ptr.alloc_id.0) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
'locks: for lock in locks.iter_mut(ptr.offset, len) {
let is_our_lock = match lock.active {
WriteLock(lft) =>
// Double-check that we are holding the lock.
// (Due to subtyping, checking the region would not make any sense.)
lft.frame == cur_frame,
ReadLock(_) | NoLock => false,
};
if is_our_lock {
trace!("Releasing {:?}", lock.active);
// Disable the lock
lock.active = NoLock;
} else {
trace!(
"Not touching {:?} as it is not our lock",
lock.active,
);
}
// Check if we want to register a suspension
if let Some(suspend_region) = suspend {
let lock_id = WriteLockId {
frame: cur_frame,
path: lock_path.clone(),
};
trace!("Adding suspension to {:?}", lock_id);
let mut new_suspension = false;
lock.suspended
.entry(lock_id)
// Remember whether we added a new suspension or not
.or_insert_with(|| { new_suspension = true; Vec::new() })
.push(suspend_region);
// If the suspension is new, we should have owned this.
// If there already was a suspension, we should NOT have owned this.
if new_suspension == is_our_lock {
// All is well
continue 'locks;
}
} else {
if !is_our_lock {
// All is well.
continue 'locks;
}
}
// If we get here, releasing this is an error except for NoLock.
if lock.active != NoLock {
return err!(InvalidMemoryLockRelease {
ptr,
len,
frame: cur_frame,
lock: lock.active.clone(),
});
}
}
Ok(())
}
/// Release a suspension from the write lock. If this is the last suspension or if there is no suspension, acquire the lock.
pub(crate) fn recover_write_lock(
&mut self,
ptr: MemoryPointer,
len: u64,
lock_path: &AbsPlace<'tcx>,
lock_region: Option<region::Scope>,
suspended_region: region::Scope,
) -> EvalResult<'tcx> {
assert!(len > 0);
let cur_frame = self.cur_frame;
let lock_id = WriteLockId {
frame: cur_frame,
path: lock_path.clone(),
};
let locks = match self.locks.get_mut(&ptr.alloc_id.0) {
Some(locks) => locks,
// immutable static or other constant memory
None => return Ok(()),
};
for lock in locks.iter_mut(ptr.offset, len) {
// Check if we have a suspension here
let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
None => {
trace!("No suspension around, we can just acquire");
(true, false)
}
Some(suspensions) => {
trace!("Found suspension of {:?}, removing it", lock_id);
// That's us! Remove suspension (it should be in there). The same suspension can
// occur multiple times (when there are multiple shared borrows of this that have the same
// lifetime); only remove one of them.
let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
None => // TODO: Can the user trigger this?
bug!("We have this lock suspended, but not for the given region."),
Some((idx, _)) => idx
};
suspensions.remove(idx);
let got_lock = suspensions.is_empty();
if got_lock {
trace!("All suspensions are gone, we can have the lock again");
}
(got_lock, got_lock)
}
};
if remove_suspension {
// with NLL, we could do that up in the match above...
assert!(got_the_lock);
lock.suspended.remove(&lock_id);
}
if got_the_lock {
match lock.active {
ref mut active @ NoLock => {
*active = WriteLock(
DynamicLifetime {
frame: cur_frame,
region: lock_region,
}
);
}
_ => {
return err!(MemoryAcquireConflict {
ptr,
len,
kind: AccessKind::Write,
lock: lock.active.clone(),
})
}
}
}
}
Ok(())
}
pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>) {
let cur_frame = self.cur_frame;
trace!(
"Releasing frame {} locks that expire at {:?}",
cur_frame,
ending_region
);
let has_ended = |lifetime: &DynamicLifetime| -> bool {
if lifetime.frame != cur_frame {
return false;
}
match ending_region {
None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
// when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
// end of a function. Same for a function still having recoveries.
Some(ending_region) => lifetime.region == Some(ending_region),
}
};
for alloc_locks in self.locks.values_mut() {
for lock in alloc_locks.iter_mut_all() {
// Delete everything that ends now -- i.e., keep only all the other lifetimes.
let lock_ended = match lock.active {
WriteLock(ref lft) => has_ended(lft),
ReadLock(ref mut lfts) => {
lfts.retain(|lft| !has_ended(lft));
lfts.is_empty()
}
NoLock => false,
};
if lock_ended {
lock.active = NoLock;
}
// Also clean up suspended write locks when the function returns
if ending_region.is_none() {
lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
}
}
// Clean up the map
alloc_locks.retain(|lock| match lock.active {
NoLock => lock.suspended.len() > 0,
_ => true,
});
}
}
}
/// Allocation accessors
impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
@ -882,7 +489,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
if size == 0 {
return Ok(&[]);
}
self.check_locks(ptr, size, AccessKind::Read)?;
M::check_locks(self, ptr, size, AccessKind::Read)?;
self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get(ptr.alloc_id)?;
assert_eq!(ptr.offset as usize as u64, ptr.offset);
@ -902,7 +509,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
if size == 0 {
return Ok(&mut []);
}
self.check_locks(ptr, size, AccessKind::Write)?;
M::check_locks(self, ptr, size, AccessKind::Write)?;
self.check_bounds(ptr.offset(size, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get_mut(ptr.alloc_id)?;
assert_eq!(ptr.offset as usize as u64, ptr.offset);
@ -1089,7 +696,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
return err!(ReadPointerAsBytes);
}
self.check_defined(ptr, (size + 1) as u64)?;
self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?;
M::check_locks(self, ptr, (size + 1) as u64, AccessKind::Read)?;
Ok(&alloc.bytes[offset..offset + size])
}
None => err!(UnterminatedCString(ptr)),

View file

@ -4,11 +4,9 @@ mod cast;
mod const_eval;
mod eval_context;
mod place;
mod validation;
mod machine;
mod memory;
mod operator;
mod range_map;
mod step;
mod terminator;
mod traits;
@ -20,10 +18,6 @@ pub use self::place::{Place, PlaceExtra};
pub use self::memory::{Memory, MemoryKind, HasMemory};
use self::range_map::RangeMap;
pub use self::const_eval::{eval_body_as_integer, eval_body, CompileTimeEvaluator, const_eval_provider};
pub use self::machine::Machine;
pub use self::validation::{ValidationQuery, AbsPlace};

View file

@ -278,7 +278,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Ok((Place::Ptr { ptr, extra }, field))
}
pub(super) fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = self.into_ptr_vtable_pair(val)?;
@ -298,7 +298,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
})
}
pub(super) fn place_index(
pub fn place_index(
&mut self,
base: Place,
outer_ty: Ty<'tcx>,
@ -335,7 +335,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Ok(Place::Ptr { ptr, extra })
}
pub(super) fn eval_place_projection(
pub fn eval_place_projection(
&mut self,
base: Place,
base_ty: Ty<'tcx>,

View file

@ -1,250 +0,0 @@
//! Implements a map from integer indices to data.
//! Rather than storing data for every index, internally, this maps entire ranges to the data.
//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated).
//! Users must not depend on whether a range is coalesced or not, even though this is observable
//! via the iteration APIs.
use std::collections::BTreeMap;
use std::ops;
#[derive(Clone, Debug)]
pub struct RangeMap<T> {
map: BTreeMap<Range, T>,
}
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
// by the second field.
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
// `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking.
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `end < start`, so don't do that!
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
struct Range {
start: u64,
end: u64, // Invariant: end > start
}
impl Range {
fn range(offset: u64, len: u64) -> ops::Range<Range> {
assert!(len > 0);
// We select all elements that are within
// the range given by the offset into the allocation and the length.
// This is sound if all ranges that intersect with the argument range, are in the
// resulting range of ranges.
let left = Range {
// lowest range to include `offset`
start: 0,
end: offset + 1,
};
let right = Range {
// lowest (valid) range not to include `offset+len`
start: offset + len,
end: offset + len + 1,
};
left..right
}
/// Tests if all of [offset, offset+len) are contained in this range.
fn overlaps(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
offset < self.end && offset + len >= self.start
}
}
impl<T> RangeMap<T> {
pub fn new() -> RangeMap<T> {
RangeMap { map: BTreeMap::new() }
}
fn iter_with_range<'a>(
&'a self,
offset: u64,
len: u64,
) -> impl Iterator<Item = (&'a Range, &'a T)> + 'a {
assert!(len > 0);
self.map.range(Range::range(offset, len)).filter_map(
move |(range,
data)| {
if range.overlaps(offset, len) {
Some((range, data))
} else {
None
}
},
)
}
pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item = &'a T> + 'a {
self.iter_with_range(offset, len).map(|(_, data)| data)
}
fn split_entry_at(&mut self, offset: u64)
where
T: Clone,
{
let range = match self.iter_with_range(offset, 1).next() {
Some((&range, _)) => range,
None => return,
};
assert!(
range.start <= offset && range.end > offset,
"We got a range that doesn't even contain what we asked for."
);
// There is an entry overlapping this position, see if we have to split it
if range.start < offset {
let data = self.map.remove(&range).unwrap();
let old = self.map.insert(
Range {
start: range.start,
end: offset,
},
data.clone(),
);
assert!(old.is_none());
let old = self.map.insert(
Range {
start: offset,
end: range.end,
},
data,
);
assert!(old.is_none());
}
}
pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
self.map.values_mut()
}
/// Provide mutable iteration over everything in the given range. As a side-effect,
/// this will split entries in the map that are only partially hit by the given range,
/// to make sure that when they are mutated, the effect is constrained to the given range.
pub fn iter_mut_with_gaps<'a>(
&'a mut self,
offset: u64,
len: u64,
) -> impl Iterator<Item = &'a mut T> + 'a
where
T: Clone,
{
assert!(len > 0);
// Preparation: Split first and last entry as needed.
self.split_entry_at(offset);
self.split_entry_at(offset + len);
// Now we can provide a mutable iterator
self.map.range_mut(Range::range(offset, len)).filter_map(
move |(&range, data)| {
if range.overlaps(offset, len) {
assert!(
offset <= range.start && offset + len >= range.end,
"The splitting went wrong"
);
Some(data)
} else {
// Skip this one
None
}
},
)
}
/// Provide a mutable iterator over everything in the given range, with the same side-effects as
/// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default.
/// This is also how you insert.
pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item = &'a mut T> + 'a
where
T: Clone + Default,
{
// Do a first iteration to collect the gaps
let mut gaps = Vec::new();
let mut last_end = offset;
for (range, _) in self.iter_with_range(offset, len) {
if last_end < range.start {
gaps.push(Range {
start: last_end,
end: range.start,
});
}
last_end = range.end;
}
if last_end < offset + len {
gaps.push(Range {
start: last_end,
end: offset + len,
});
}
// Add default for all gaps
for gap in gaps {
let old = self.map.insert(gap, Default::default());
assert!(old.is_none());
}
// Now provide mutable iteration
self.iter_mut_with_gaps(offset, len)
}
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
let mut remove = Vec::new();
for (range, data) in self.map.iter() {
if !f(data) {
remove.push(*range);
}
}
for range in remove {
self.map.remove(&range);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Query the map at every offset in the range and collect the results.
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
(offset..offset + len)
.into_iter()
.map(|i| *map.iter(i, 1).next().unwrap())
.collect()
}
#[test]
fn basic_insert() {
let mut map = RangeMap::<i32>::new();
// Insert
for x in map.iter_mut(10, 1) {
*x = 42;
}
// Check
assert_eq!(to_vec(&map, 10, 1), vec![42]);
}
#[test]
fn gaps() {
let mut map = RangeMap::<i32>::new();
for x in map.iter_mut(11, 1) {
*x = 42;
}
for x in map.iter_mut(15, 1) {
*x = 42;
}
// Now request a range that needs three gaps filled
for x in map.iter_mut(10, 10) {
if *x != 42 {
*x = 23;
}
}
assert_eq!(
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]
);
assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]);
}
}

View file

@ -126,11 +126,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// Validity checks.
Validate(op, ref places) => {
for operand in places {
self.validation_op(op, operand)?;
M::validation_op(self, op, operand)?;
}
}
EndRegion(ce) => {
self.end_region(Some(ce))?;
M::end_region(self, Some(ce))?;
}
// Defined to do nothing. These are added by optimization passes, to avoid changing the

View file

@ -1,805 +0,0 @@
use rustc::hir::{self, Mutability};
use rustc::hir::Mutability::*;
use rustc::mir::{self, ValidationOp, ValidationOperand};
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
use rustc::ty::layout::LayoutOf;
use rustc::ty::subst::{Substs, Subst};
use rustc::traits;
use rustc::infer::InferCtxt;
use rustc::traits::Reveal;
use rustc::middle::region;
use rustc_data_structures::indexed_vec::Idx;
use interpret::memory::HasMemory;
use super::{EvalContext, Place, PlaceExtra, Machine, ValTy};
use rustc::mir::interpret::{DynamicLifetime, AccessKind, EvalErrorKind, Value, EvalError, EvalResult};
pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, (AbsPlace<'tcx>, Place)>;
#[derive(Copy, Clone, Debug, PartialEq)]
enum ValidationMode {
Acquire,
/// Recover because the given region ended
Recover(region::Scope),
ReleaseUntil(Option<region::Scope>),
}
impl ValidationMode {
fn acquiring(self) -> bool {
use self::ValidationMode::*;
match self {
Acquire | Recover(_) => true,
ReleaseUntil(_) => false,
}
}
}
// Abstract places
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum AbsPlace<'tcx> {
Local(mir::Local),
Static(hir::def_id::DefId),
Projection(Box<AbsPlaceProjection<'tcx>>),
}
type AbsPlaceProjection<'tcx> = mir::Projection<'tcx, AbsPlace<'tcx>, u64, ()>;
type AbsPlaceElem<'tcx> = mir::ProjectionElem<'tcx, u64, ()>;
impl<'tcx> AbsPlace<'tcx> {
pub fn field(self, f: mir::Field) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Field(f, ()))
}
pub fn deref(self) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Deref)
}
pub fn downcast(self, adt_def: &'tcx ty::AdtDef, variant_index: usize) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Downcast(adt_def, variant_index))
}
pub fn index(self, index: u64) -> AbsPlace<'tcx> {
self.elem(mir::ProjectionElem::Index(index))
}
fn elem(self, elem: AbsPlaceElem<'tcx>) -> AbsPlace<'tcx> {
AbsPlace::Projection(Box::new(AbsPlaceProjection {
base: self,
elem,
}))
}
}
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
fn abstract_place_projection(&self, proj: &mir::PlaceProjection<'tcx>) -> EvalResult<'tcx, AbsPlaceProjection<'tcx>> {
use self::mir::ProjectionElem::*;
let elem = match proj.elem {
Deref => Deref,
Field(f, _) => Field(f, ()),
Index(v) => {
let value = self.frame().get_local(v)?;
let ty = self.tcx.types.usize;
let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?;
Index(n)
},
ConstantIndex { offset, min_length, from_end } =>
ConstantIndex { offset, min_length, from_end },
Subslice { from, to } =>
Subslice { from, to },
Downcast(adt, sz) => Downcast(adt, sz),
};
Ok(AbsPlaceProjection {
base: self.abstract_place(&proj.base)?,
elem
})
}
fn abstract_place(&self, place: &mir::Place<'tcx>) -> EvalResult<'tcx, AbsPlace<'tcx>> {
Ok(match place {
&mir::Place::Local(l) => AbsPlace::Local(l),
&mir::Place::Static(ref s) => AbsPlace::Static(s.def_id),
&mir::Place::Projection(ref p) =>
AbsPlace::Projection(Box::new(self.abstract_place_projection(&*p)?)),
})
}
// Validity checks
pub(crate) fn validation_op(
&mut self,
op: ValidationOp,
operand: &ValidationOperand<'tcx, mir::Place<'tcx>>,
) -> EvalResult<'tcx> {
// If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands
// because other crates may have been compiled with mir-emit-validate > 0. Ignore those
// commands. This makes mir-emit-validate also a flag to control whether miri will do
// validation or not.
if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
return Ok(());
}
debug_assert!(self.memory.cur_frame == self.cur_frame());
// HACK: Determine if this method is whitelisted and hence we do not perform any validation.
// We currently insta-UB on anything passing around uninitialized memory, so we have to whitelist
// the places that are allowed to do that.
// The second group is stuff libstd does that is forbidden even under relaxed validation.
{
// The regexp we use for filtering
use regex::Regex;
lazy_static! {
static ref RE: Regex = Regex::new("^(\
(std|alloc::heap::__core)::mem::(uninitialized|forget)::|\
<(std|alloc)::heap::Heap as (std::heap|alloc::allocator)::Alloc>::|\
<(std|alloc::heap::__core)::mem::ManuallyDrop<T>><.*>::new$|\
<(std|alloc::heap::__core)::mem::ManuallyDrop<T> as std::ops::DerefMut><.*>::deref_mut$|\
(std|alloc::heap::__core)::ptr::read::|\
\
<std::sync::Arc<T>><.*>::inner$|\
<std::sync::Arc<T>><.*>::drop_slow$|\
(std::heap|alloc::allocator)::Layout::for_value::|\
(std|alloc::heap::__core)::mem::(size|align)_of_val::\
)").unwrap();
}
// Now test
let name = self.stack[self.cur_frame()].instance.to_string();
if RE.is_match(&name) {
return Ok(());
}
}
// We need to monomorphize ty *without* erasing lifetimes
trace!("validation_op1: {:?}", operand.ty.sty);
let ty = operand.ty.subst(self.tcx, self.substs());
trace!("validation_op2: {:?}", operand.ty.sty);
let place = self.eval_place(&operand.place)?;
let abs_place = self.abstract_place(&operand.place)?;
let query = ValidationQuery {
place: (abs_place, place),
ty,
re: operand.re,
mutbl: operand.mutbl,
};
// Check the mode, and also perform mode-specific operations
let mode = match op {
ValidationOp::Acquire => ValidationMode::Acquire,
ValidationOp::Release => ValidationMode::ReleaseUntil(None),
ValidationOp::Suspend(scope) => {
if query.mutbl == MutMutable {
let lft = DynamicLifetime {
frame: self.cur_frame(),
region: Some(scope), // Notably, we only ever suspend things for given regions.
// Suspending for the entire function does not make any sense.
};
trace!("Suspending {:?} until {:?}", query, scope);
self.suspended.entry(lft).or_insert_with(Vec::new).push(
query.clone(),
);
}
ValidationMode::ReleaseUntil(Some(scope))
}
};
self.validate(query, mode)
}
/// Release locks and executes suspensions of the given region (or the entire fn, in case of None).
pub(crate) fn end_region(&mut self, scope: Option<region::Scope>) -> EvalResult<'tcx> {
debug_assert!(self.memory.cur_frame == self.cur_frame());
self.memory.locks_lifetime_ended(scope);
match scope {
Some(scope) => {
// Recover suspended places
let lft = DynamicLifetime {
frame: self.cur_frame(),
region: Some(scope),
};
if let Some(queries) = self.suspended.remove(&lft) {
for query in queries {
trace!("Recovering {:?} from suspension", query);
self.validate(query, ValidationMode::Recover(scope))?;
}
}
}
None => {
// Clean suspension table of current frame
let cur_frame = self.cur_frame();
self.suspended.retain(|lft, _| {
lft.frame != cur_frame // keep only what is in the other (lower) frames
});
}
}
Ok(())
}
fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
return normalize_associated_type(self.tcx, &ty);
use syntax::codemap::{Span, DUMMY_SP};
// We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior
fn normalize_projections_in<'a, 'gcx, 'tcx, T>(
self_: &InferCtxt<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: &T,
) -> T::Lifted
where
T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
{
let mut selcx = traits::SelectionContext::new(self_);
let cause = traits::ObligationCause::dummy();
let traits::Normalized {
value: result,
obligations,
} = traits::normalize(&mut selcx, param_env, cause, value);
let mut fulfill_cx = traits::FulfillmentContext::new();
for obligation in obligations {
fulfill_cx.register_predicate_obligation(self_, obligation);
}
drain_fulfillment_cx_or_panic(self_, DUMMY_SP, &mut fulfill_cx, &result)
}
fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>(
self_: &InferCtxt<'a, 'gcx, 'tcx>,
span: Span,
fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
result: &T,
) -> T::Lifted
where
T: TypeFoldable<'tcx> + ty::Lift<'gcx>,
{
// In principle, we only need to do this so long as `result`
// contains unbound type parameters. It could be a slight
// optimization to stop iterating early.
match fulfill_cx.select_all_or_error(self_) {
Ok(()) => { }
Err(errors) => {
span_bug!(
span,
"Encountered errors `{:?}` resolving bounds after type-checking",
errors
);
}
}
let result = self_.resolve_type_vars_if_possible(result);
let result = self_.tcx.fold_regions(
&result,
&mut false,
|r, _| match *r {
ty::ReVar(_) => self_.tcx.types.re_erased,
_ => r,
},
);
match self_.tcx.lift_to_global(&result) {
Some(result) => result,
None => {
span_bug!(span, "Uninferred types/regions in `{:?}`", result);
}
}
}
trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> {
fn my_trans_normalize<'a, 'tcx>(
&self,
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Self;
}
macro_rules! items { ($($item:item)+) => ($($item)+) }
macro_rules! impl_trans_normalize {
($lt_gcx:tt, $($ty:ty),+) => {
items!($(impl<$lt_gcx> MyTransNormalize<$lt_gcx> for $ty {
fn my_trans_normalize<'a, 'tcx>(&self,
infcx: &InferCtxt<'a, $lt_gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>)
-> Self {
normalize_projections_in(infcx, param_env, self)
}
})+);
}
}
impl_trans_normalize!('gcx,
Ty<'gcx>,
&'gcx Substs<'gcx>,
ty::FnSig<'gcx>,
ty::PolyFnSig<'gcx>,
ty::ClosureSubsts<'gcx>,
ty::PolyTraitRef<'gcx>,
ty::ExistentialTraitRef<'gcx>
);
fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T
where
T: MyTransNormalize<'tcx>,
{
let param_env = ty::ParamEnv::empty(Reveal::All);
if !value.has_projections() {
return value.clone();
}
self_.infer_ctxt().enter(|infcx| {
value.my_trans_normalize(&infcx, param_env)
})
}
}
// This is a copy of `Layout::field`
//
// FIXME: remove once validation does not depend on lifetimes
fn field_with_lifetimes(
&mut self,
base: Place,
mut layout: ty::layout::TyLayout<'tcx>,
i: usize,
) -> EvalResult<'tcx, Ty<'tcx>> {
match base {
Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => {
layout = layout.for_variant(&self, variant_index);
}
_ => {}
}
let tcx = self.tcx;
Ok(match layout.ty.sty {
ty::TyBool |
ty::TyChar |
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyFloat(_) |
ty::TyFnPtr(_) |
ty::TyNever |
ty::TyFnDef(..) |
ty::TyDynamic(..) |
ty::TyForeign(..) => {
bug!("TyLayout::field_type({:?}): not applicable", layout)
}
// Potentially-fat pointers.
ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
assert!(i < 2);
// Reuse the fat *T type as its own thin pointer data field.
// This provides information about e.g. DST struct pointees
// (which may have no non-DST form), and will work as long
// as the `Abi` or `FieldPlacement` is checked by users.
if i == 0 {
return Ok(layout.ty);
}
match tcx.struct_tail(pointee).sty {
ty::TySlice(_) |
ty::TyStr => tcx.types.usize,
ty::TyDynamic(..) => {
// FIXME(eddyb) use an usize/fn() array with
// the correct number of vtables slots.
tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
}
_ => bug!("TyLayout::field_type({:?}): not applicable", layout)
}
}
// Arrays and slices.
ty::TyArray(element, _) |
ty::TySlice(element) => element,
ty::TyStr => tcx.types.u8,
// Tuples, generators and closures.
ty::TyClosure(def_id, ref substs) => {
substs.upvar_tys(def_id, tcx).nth(i).unwrap()
}
ty::TyGenerator(def_id, ref substs, _) => {
substs.field_tys(def_id, tcx).nth(i).unwrap()
}
ty::TyTuple(tys, _) => tys[i],
// SIMD vector types.
ty::TyAdt(def, ..) if def.repr.simd() => {
layout.ty.simd_type(tcx)
}
// ADTs.
ty::TyAdt(def, substs) => {
use rustc::ty::layout::Variants;
match layout.variants {
Variants::Single { index } => {
def.variants[index].fields[i].ty(tcx, substs)
}
// Discriminant field for enums (where applicable).
Variants::Tagged { ref discr, .. } |
Variants::NicheFilling { niche: ref discr, .. } => {
assert_eq!(i, 0);
return Ok(discr.value.to_ty(tcx))
}
}
}
ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
ty::TyInfer(_) | ty::TyError => {
bug!("TyLayout::field_type: unexpected type `{}`", layout.ty)
}
})
}
fn validate_fields(
&mut self,
query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
let mut layout = self.layout_of(query.ty)?;
layout.ty = query.ty;
// TODO: Maybe take visibility/privacy into account.
for idx in 0..layout.fields.count() {
let field = mir::Field::new(idx);
let (field_place, field_layout) =
self.place_field(query.place.1, field, layout)?;
// layout stuff erases lifetimes, get the field ourselves
let field_ty = self.field_with_lifetimes(query.place.1, layout, idx)?;
trace!("assuming \n{:?}\n == \n{:?}\n except for lifetimes", field_layout.ty, field_ty);
self.validate(
ValidationQuery {
place: (query.place.0.clone().field(field), field_place),
ty: field_ty,
..query
},
mode,
)?;
}
Ok(())
}
fn validate_ptr(
&mut self,
val: Value,
abs_place: AbsPlace<'tcx>,
pointee_ty: Ty<'tcx>,
re: Option<region::Scope>,
mutbl: Mutability,
mode: ValidationMode,
) -> EvalResult<'tcx> {
// Check alignment and non-NULLness
let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
let ptr = self.into_ptr(val)?;
self.memory.check_align(ptr, align.abi(), None)?;
// Recurse
let pointee_place = self.val_to_place(val, pointee_ty)?;
self.validate(
ValidationQuery {
place: (abs_place.deref(), pointee_place),
ty: pointee_ty,
re,
mutbl,
},
mode,
)
}
/// Validate the place at the given type. If `acquire` is false, just do a release of all write locks
fn validate(
&mut self,
mut query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
use rustc::ty::TypeVariants::*;
use rustc::ty::RegionKind::*;
use rustc::ty::AdtKind;
// No point releasing shared stuff.
if !mode.acquiring() && query.mutbl == MutImmutable {
return Ok(());
}
// When we recover, we may see data whose validity *just* ended. Do not acquire it.
if let ValidationMode::Recover(ending_ce) = mode {
if query.re == Some(ending_ce) {
return Ok(());
}
}
query.ty = self.normalize_type_unerased(&query.ty);
trace!("{:?} on {:#?}", mode, query);
trace!("{:#?}", query.ty.sty);
// Decide whether this type *owns* the memory it covers (like integers), or whether it
// just assembles pieces (that each own their memory) together to a larger whole.
// TODO: Currently, we don't acquire locks for padding and discriminants. We should.
let is_owning = match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr |
TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true,
TyAdt(adt, _) if adt.is_box() => true,
TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) |
TyDynamic(..) | TyGenerator(..) | TyForeign(_) => false,
TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => {
bug!("I got an incomplete/unnormalized type for validation")
}
};
if is_owning {
// We need to lock. So we need memory. So we have to force_acquire.
// Tracking the same state for locals not backed by memory would just duplicate too
// much machinery.
// FIXME: We ignore alignment.
let (ptr, extra) = self.force_allocation(query.place.1)?.to_ptr_extra_aligned();
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Places?
let layout = self.layout_of(query.ty)?;
let len = if !layout.is_unsized() {
assert_eq!(extra, PlaceExtra::None, "Got a fat ptr to a sized type");
layout.size.bytes()
} else {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(
query.ty.sty,
TyStr,
"Found a surprising unsized owning type"
);
// The extra must be the length, in bytes.
match extra {
PlaceExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
};
// Handle locking
if len > 0 {
let ptr = ptr.to_ptr()?;
match query.mutbl {
MutImmutable => {
if mode.acquiring() {
self.memory.acquire_lock(
ptr,
len,
query.re,
AccessKind::Read,
)?;
}
}
// No releasing of read locks, ever.
MutMutable => {
match mode {
ValidationMode::Acquire => {
self.memory.acquire_lock(
ptr,
len,
query.re,
AccessKind::Write,
)?
}
ValidationMode::Recover(ending_ce) => {
self.memory.recover_write_lock(
ptr,
len,
&query.place.0,
query.re,
ending_ce,
)?
}
ValidationMode::ReleaseUntil(suspended_ce) => {
self.memory.suspend_write_lock(
ptr,
len,
&query.place.0,
suspended_ce,
)?
}
}
}
}
}
}
let res = do catch {
match query.ty.sty {
TyInt(_) | TyUint(_) | TyRawPtr(_) => {
if mode.acquiring() {
// Make sure we can read this.
let val = self.read_place(query.place.1)?;
self.follow_by_ref_value(val, query.ty)?;
// FIXME: It would be great to rule out Undef here, but that doesn't actually work.
// Passing around undef data is a thing that e.g. Vec::extend_with does.
}
Ok(())
}
TyBool | TyFloat(_) | TyChar => {
if mode.acquiring() {
let val = self.read_place(query.place.1)?;
let val = self.value_to_primval(ValTy { value: val, ty: query.ty })?;
val.to_bytes()?;
// TODO: Check if these are valid bool/float/codepoint/UTF-8
}
Ok(())
}
TyNever => err!(ValidationFailure(format!("The empty type is never valid."))),
TyRef(region,
ty::TypeAndMut {
ty: pointee_ty,
mutbl,
}) => {
let val = self.read_place(query.place.1)?;
// Sharing restricts our context
if mutbl == MutImmutable {
query.mutbl = MutImmutable;
}
// Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
// we record the region of this borrow to the context.
if query.re == None {
match *region {
ReScope(scope) => query.re = Some(scope),
// It is possible for us to encounter erased lifetimes here because the lifetimes in
// this functions' Subst will be erased.
_ => {}
}
}
self.validate_ptr(val, query.place.0, pointee_ty, query.re, query.mutbl, mode)
}
TyAdt(adt, _) if adt.is_box() => {
let val = self.read_place(query.place.1)?;
self.validate_ptr(val, query.place.0, query.ty.boxed_ty(), query.re, query.mutbl, mode)
}
TyFnPtr(_sig) => {
let ptr = self.read_place(query.place.1)?;
let ptr = self.into_ptr(ptr)?.to_ptr()?;
self.memory.get_fn(ptr)?;
// TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
Ok(())
}
TyFnDef(..) => {
// This is a zero-sized type with all relevant data sitting in the type.
// There is nothing to validate.
Ok(())
}
// Compound types
TyStr => {
// TODO: Validate strings
Ok(())
}
TySlice(elem_ty) => {
let len = match query.place.1 {
Place::Ptr { extra: PlaceExtra::Length(len), .. } => len,
_ => {
bug!(
"acquire_valid of a TySlice given non-slice place: {:?}",
query.place
)
}
};
for i in 0..len {
let inner_place = self.place_index(query.place.1, query.ty, i)?;
self.validate(
ValidationQuery {
place: (query.place.0.clone().index(i), inner_place),
ty: elem_ty,
..query
},
mode,
)?;
}
Ok(())
}
TyArray(elem_ty, len) => {
let len = len.val.to_const_int().unwrap().to_u64().unwrap();
for i in 0..len {
let inner_place = self.place_index(query.place.1, query.ty, i as u64)?;
self.validate(
ValidationQuery {
place: (query.place.0.clone().index(i as u64), inner_place),
ty: elem_ty,
..query
},
mode,
)?;
}
Ok(())
}
TyDynamic(_data, _region) => {
// Check that this is a valid vtable
let vtable = match query.place.1 {
Place::Ptr { extra: PlaceExtra::Vtable(vtable), .. } => vtable,
_ => {
bug!(
"acquire_valid of a TyDynamic given non-trait-object place: {:?}",
query.place
)
}
};
self.read_size_and_align_from_vtable(vtable)?;
// TODO: Check that the vtable contains all the function pointers we expect it to have.
// Trait objects cannot have any operations performed
// on them directly. We cannot, in general, even acquire any locks as the trait object *could*
// contain an UnsafeCell. If we call functions to get access to data, we will validate
// their return values. So, it doesn't seem like there's anything else to do.
Ok(())
}
TyAdt(adt, _) => {
if Some(adt.did) == self.tcx.lang_items().unsafe_cell_type() &&
query.mutbl == MutImmutable
{
// No locks for shared unsafe cells. Also no other validation, the only field is private anyway.
return Ok(());
}
match adt.adt_kind() {
AdtKind::Enum => {
let discr = self.read_discriminant_value(query.place.1, query.ty)?;
// Get variant index for discriminant
let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| {
variant_discr.to_u128_unchecked() == discr
});
let variant_idx = match variant_idx {
Some(val) => val,
None => return err!(InvalidDiscriminant),
};
let variant = &adt.variants[variant_idx];
if variant.fields.len() > 0 {
// Downcast to this variant, if needed
let place = if adt.is_enum() {
(
query.place.0.downcast(adt, variant_idx),
self.eval_place_projection(
query.place.1,
query.ty,
&mir::ProjectionElem::Downcast(adt, variant_idx),
)?,
)
} else {
query.place
};
// Recursively validate the fields
self.validate_fields(
ValidationQuery { place, ..query },
mode,
)
} else {
// No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
Ok(())
}
}
AdtKind::Struct => {
self.validate_fields(query, mode)
}
AdtKind::Union => {
// No guarantees are provided for union types.
// TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
Ok(())
}
}
}
TyTuple(..) |
TyClosure(..) => {
// TODO: Check if the signature matches for `TyClosure`
// (should be the same check as what terminator/mod.rs already does on call?).
// Is there other things we can/should check? Like vtable pointers?
self.validate_fields(query, mode)
}
// FIXME: generators aren't validated right now
TyGenerator(..) => Ok(()),
_ => bug!("We already established that this is a type we support. ({})", query.ty),
}
};
match res {
// ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because
// we have to release the return value of a function; due to destination-passing-style
// the callee may directly write there.
// TODO: Ideally we would know whether the destination is already initialized, and only
// release if it is. But of course that can't even always be statically determined.
Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. })
if mode == ValidationMode::ReleaseUntil(None) => {
return Ok(());
}
res => res,
}
}
}

View file

@ -52,10 +52,7 @@ extern crate rustc_const_math;
extern crate rustc_const_eval;
extern crate core; // for NonZero
extern crate log_settings;
#[macro_use]
extern crate lazy_static;
extern crate rustc_apfloat;
extern crate regex;
extern crate byteorder;
mod diagnostics;
@ -67,7 +64,7 @@ mod hair;
mod shim;
pub mod transform;
pub mod util;
mod interpret;
pub mod interpret;
use rustc::ty::maps::Providers;

@ -1 +1 @@
Subproject commit eccf680b5d191bb39ef2fc5ae51bf3909c312bbe
Subproject commit bde093fa140cbf95023482a94b92b0b16af4b521