Auto merge of #46338 - michaelwoerister:lazy-diagnostics, r=nikomatsakis

incr.comp.: Load cached diagnostics lazily and allow more things in the cache.

This PR implements makes two changes:
1. Diagnostics are loaded lazily from the incr. comp. cache now. This turned out to be necessary for correctness because diagnostics contain `Span` values and deserializing those requires that the source file they point to is still around in the current compilation session. Obviously this isn't always the case. Loading them lazily allows for never touching diagnostics that are not valid anymore.
2. The compiler can now deal with there being no cache entry for a given query invocation. Before, all query results of a cacheable query were always expected to be present in the cache. Now, the compiler can fall back to re-computing the result if there is no cache entry found. This allows for caching things that we cannot force from dep-node (like the `symbol_name` query). In such a case we'll just have a "best effort" caching strategy.

~~This PR is based on https://github.com/rust-lang/rust/pull/46301 (=first 2 commits), so please don't merge until that has landed. The rest of the commits are ready for review though.~~

r? @nikomatsakis
This commit is contained in:
bors 2017-12-01 16:01:22 +00:00
commit 6805b016ef
11 changed files with 476 additions and 226 deletions

View file

@ -461,8 +461,8 @@ impl DepGraph {
self.data.as_ref().and_then(|data| data.colors.borrow().get(dep_node).cloned()) self.data.as_ref().and_then(|data| data.colors.borrow().get(dep_node).cloned())
} }
pub fn try_mark_green(&self, pub fn try_mark_green<'tcx>(&self,
tcx: TyCtxt, tcx: TyCtxt<'_, 'tcx, 'tcx>,
dep_node: &DepNode) dep_node: &DepNode)
-> Option<DepNodeIndex> { -> Option<DepNodeIndex> {
debug!("try_mark_green({:?}) - BEGIN", dep_node); debug!("try_mark_green({:?}) - BEGIN", dep_node);
@ -621,7 +621,7 @@ impl DepGraph {
// ... emitting any stored diagnostic ... // ... emitting any stored diagnostic ...
{ {
let diagnostics = tcx.on_disk_query_result_cache let diagnostics = tcx.on_disk_query_result_cache
.load_diagnostics(prev_dep_node_index); .load_diagnostics(tcx, prev_dep_node_index);
if diagnostics.len() > 0 { if diagnostics.len() > 0 {
let handle = tcx.sess.diagnostic(); let handle = tcx.sess.diagnostic();

View file

@ -28,7 +28,7 @@ use syntax::attr;
use syntax::codemap::CodeMap; use syntax::codemap::CodeMap;
use syntax::ext::hygiene::SyntaxContext; use syntax::ext::hygiene::SyntaxContext;
use syntax::symbol::Symbol; use syntax::symbol::Symbol;
use syntax_pos::Span; use syntax_pos::{Span, DUMMY_SP};
use rustc_data_structures::stable_hasher::{HashStable, StableHashingContextProvider, use rustc_data_structures::stable_hasher::{HashStable, StableHashingContextProvider,
StableHasher, StableHasherResult, StableHasher, StableHasherResult,
@ -362,64 +362,53 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for Span {
fn hash_stable<W: StableHasherResult>(&self, fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'gcx>, hcx: &mut StableHashingContext<'gcx>,
hasher: &mut StableHasher<W>) { hasher: &mut StableHasher<W>) {
use syntax_pos::Pos; const TAG_VALID_SPAN: u8 = 0;
const TAG_INVALID_SPAN: u8 = 1;
const TAG_EXPANSION: u8 = 0;
const TAG_NO_EXPANSION: u8 = 1;
if !hcx.hash_spans { if !hcx.hash_spans {
return return
} }
if *self == DUMMY_SP {
return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
}
// If this is not an empty or invalid span, we want to hash the last // If this is not an empty or invalid span, we want to hash the last
// position that belongs to it, as opposed to hashing the first // position that belongs to it, as opposed to hashing the first
// position past it. // position past it.
let span = self.data(); let span = self.data();
let span_hi = if span.hi > span.lo {
// We might end up in the middle of a multibyte character here, if span.hi < span.lo {
// but that's OK, since we are not trying to decode anything at return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
// this position. }
span.hi - ::syntax_pos::BytePos(1)
} else { let (file_lo, line_lo, col_lo) = match hcx.codemap()
span.hi .byte_pos_to_line_and_col(span.lo) {
Some(pos) => pos,
None => {
return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
}
}; };
{ if !file_lo.contains(span.hi) {
let loc1 = hcx.codemap().byte_pos_to_line_and_col(span.lo); return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher);
let loc1 = loc1.as_ref()
.map(|&(ref fm, line, col)| (&fm.name[..], line, col.to_usize()))
.unwrap_or(("???", 0, 0));
let loc2 = hcx.codemap().byte_pos_to_line_and_col(span_hi);
let loc2 = loc2.as_ref()
.map(|&(ref fm, line, col)| (&fm.name[..], line, col.to_usize()))
.unwrap_or(("???", 0, 0));
if loc1.0 == loc2.0 {
std_hash::Hash::hash(&0u8, hasher);
std_hash::Hash::hash(loc1.0, hasher);
std_hash::Hash::hash(&loc1.1, hasher);
std_hash::Hash::hash(&loc1.2, hasher);
// Do not hash the file name twice
std_hash::Hash::hash(&loc2.1, hasher);
std_hash::Hash::hash(&loc2.2, hasher);
} else {
std_hash::Hash::hash(&1u8, hasher);
std_hash::Hash::hash(loc1.0, hasher);
std_hash::Hash::hash(&loc1.1, hasher);
std_hash::Hash::hash(&loc1.2, hasher);
std_hash::Hash::hash(loc2.0, hasher);
std_hash::Hash::hash(&loc2.1, hasher);
std_hash::Hash::hash(&loc2.2, hasher);
}
} }
let len = span.hi - span.lo;
std_hash::Hash::hash(&TAG_VALID_SPAN, hasher);
std_hash::Hash::hash(&file_lo.name, hasher);
std_hash::Hash::hash(&line_lo, hasher);
std_hash::Hash::hash(&col_lo, hasher);
std_hash::Hash::hash(&len, hasher);
if span.ctxt == SyntaxContext::empty() { if span.ctxt == SyntaxContext::empty() {
0u8.hash_stable(hcx, hasher); TAG_NO_EXPANSION.hash_stable(hcx, hasher);
} else { } else {
1u8.hash_stable(hcx, hasher); TAG_EXPANSION.hash_stable(hcx, hasher);
self.source_callsite().hash_stable(hcx, hasher); span.ctxt.outer().expn_info().hash_stable(hcx, hasher);
} }
} }
} }

View file

@ -347,6 +347,30 @@ impl_stable_hash_for!(enum ::syntax::ast::MetaItemKind {
NameValue(lit) NameValue(lit)
}); });
impl_stable_hash_for!(struct ::syntax_pos::hygiene::ExpnInfo {
call_site,
callee
});
impl_stable_hash_for!(struct ::syntax_pos::hygiene::NameAndSpan {
format,
allow_internal_unstable,
allow_internal_unsafe,
span
});
impl_stable_hash_for!(enum ::syntax_pos::hygiene::ExpnFormat {
MacroAttribute(sym),
MacroBang(sym),
CompilerDesugaring(kind)
});
impl_stable_hash_for!(enum ::syntax_pos::hygiene::CompilerDesugaringKind {
BackArrow,
DotFill,
QuestionMark
});
impl<'gcx> HashStable<StableHashingContext<'gcx>> for FileMap { impl<'gcx> HashStable<StableHashingContext<'gcx>> for FileMap {
fn hash_stable<W: StableHasherResult>(&self, fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'gcx>, hcx: &mut StableHashingContext<'gcx>,

View file

@ -1235,7 +1235,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
-> Result<(), E::Error> -> Result<(), E::Error>
where E: ty::codec::TyEncoder where E: ty::codec::TyEncoder
{ {
self.on_disk_query_result_cache.serialize(self.global_tcx(), self.cstore, encoder) self.on_disk_query_result_cache.serialize(self.global_tcx(), encoder)
} }
} }

View file

@ -31,9 +31,9 @@ pub(super) trait QueryDescription<'tcx>: QueryConfig {
false false
} }
fn load_from_disk<'a>(_: TyCtxt<'a, 'tcx, 'tcx>, fn try_load_from_disk(_: TyCtxt<'_, 'tcx, 'tcx>,
_: SerializedDepNodeIndex) _: SerializedDepNodeIndex)
-> Self::Value { -> Option<Self::Value> {
bug!("QueryDescription::load_from_disk() called for unsupport query.") bug!("QueryDescription::load_from_disk() called for unsupport query.")
} }
} }
@ -556,12 +556,14 @@ impl<'tcx> QueryDescription<'tcx> for queries::typeck_tables_of<'tcx> {
def_id.is_local() def_id.is_local()
} }
fn load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn try_load_from_disk(tcx: TyCtxt<'_, 'tcx, 'tcx>,
id: SerializedDepNodeIndex) id: SerializedDepNodeIndex)
-> Self::Value { -> Option<Self::Value> {
let typeck_tables: ty::TypeckTables<'tcx> = tcx.on_disk_query_result_cache let typeck_tables: Option<ty::TypeckTables<'tcx>> = tcx
.load_query_result(tcx, id); .on_disk_query_result_cache
tcx.alloc_tables(typeck_tables) .try_load_query_result(tcx, id);
typeck_tables.map(|tables| tcx.alloc_tables(tables))
} }
} }

View file

@ -14,7 +14,7 @@ use hir;
use hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId, use hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId,
RESERVED_FOR_INCR_COMP_CACHE, LOCAL_CRATE}; RESERVED_FOR_INCR_COMP_CACHE, LOCAL_CRATE};
use hir::map::definitions::DefPathHash; use hir::map::definitions::DefPathHash;
use middle::cstore::CrateStore; use ich::CachingCodemapView;
use mir; use mir;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::indexed_vec::{IndexVec, Idx};
@ -23,23 +23,28 @@ use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque,
UseSpecializedDecodable, UseSpecializedEncodable}; UseSpecializedDecodable, UseSpecializedEncodable};
use session::{CrateDisambiguator, Session}; use session::{CrateDisambiguator, Session};
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::BTreeMap;
use std::mem; use std::mem;
use std::rc::Rc;
use syntax::ast::NodeId; use syntax::ast::NodeId;
use syntax::codemap::{CodeMap, StableFilemapId}; use syntax::codemap::{CodeMap, StableFilemapId};
use syntax_pos::{BytePos, Span, NO_EXPANSION, DUMMY_SP}; use syntax_pos::{BytePos, Span, DUMMY_SP, FileMap};
use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo};
use ty; use ty;
use ty::codec::{self as ty_codec, TyDecoder, TyEncoder}; use ty::codec::{self as ty_codec, TyDecoder, TyEncoder};
use ty::context::TyCtxt; use ty::context::TyCtxt;
// Some magic values used for verifying that encoding and decoding. These are const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE;
// basically random numbers.
const PREV_DIAGNOSTICS_TAG: u64 = 0x1234_5678_A1A1_A1A1;
const QUERY_RESULT_INDEX_TAG: u64 = 0x1234_5678_C3C3_C3C3;
const TAG_CLEAR_CROSS_CRATE_CLEAR: u8 = 0; const TAG_CLEAR_CROSS_CRATE_CLEAR: u8 = 0;
const TAG_CLEAR_CROSS_CRATE_SET: u8 = 1; const TAG_CLEAR_CROSS_CRATE_SET: u8 = 1;
const TAG_NO_EXPANSION_INFO: u8 = 0;
const TAG_EXPANSION_INFO_SHORTHAND: u8 = 1;
const TAG_EXPANSION_INFO_INLINE: u8 = 2;
const TAG_VALID_SPAN: u8 = 0;
const TAG_INVALID_SPAN: u8 = 1;
/// `OnDiskCache` provides an interface to incr. comp. data cached from the /// `OnDiskCache` provides an interface to incr. comp. data cached from the
/// previous compilation session. This data will eventually include the results /// previous compilation session. This data will eventually include the results
/// of a few selected queries (like `typeck_tables_of` and `mir_optimized`) and /// of a few selected queries (like `typeck_tables_of` and `mir_optimized`) and
@ -49,9 +54,6 @@ pub struct OnDiskCache<'sess> {
// The complete cache data in serialized form. // The complete cache data in serialized form.
serialized_data: Vec<u8>, serialized_data: Vec<u8>,
// The diagnostics emitted during the previous compilation session.
prev_diagnostics: FxHashMap<SerializedDepNodeIndex, Vec<Diagnostic>>,
// This field collects all Diagnostics emitted during the current // This field collects all Diagnostics emitted during the current
// compilation session. // compilation session.
current_diagnostics: RefCell<FxHashMap<DepNodeIndex, Vec<Diagnostic>>>, current_diagnostics: RefCell<FxHashMap<DepNodeIndex, Vec<Diagnostic>>>,
@ -59,101 +61,105 @@ pub struct OnDiskCache<'sess> {
prev_cnums: Vec<(u32, String, CrateDisambiguator)>, prev_cnums: Vec<(u32, String, CrateDisambiguator)>,
cnum_map: RefCell<Option<IndexVec<CrateNum, Option<CrateNum>>>>, cnum_map: RefCell<Option<IndexVec<CrateNum, Option<CrateNum>>>>,
prev_filemap_starts: BTreeMap<BytePos, StableFilemapId>,
codemap: &'sess CodeMap, codemap: &'sess CodeMap,
file_index_to_stable_id: FxHashMap<FileMapIndex, StableFilemapId>,
// These two fields caches that are populated lazily during decoding.
file_index_to_file: RefCell<FxHashMap<FileMapIndex, Rc<FileMap>>>,
synthetic_expansion_infos: RefCell<FxHashMap<AbsoluteBytePos, SyntaxContext>>,
// A map from dep-node to the position of the cached query result in // A map from dep-node to the position of the cached query result in
// `serialized_data`. // `serialized_data`.
query_result_index: FxHashMap<SerializedDepNodeIndex, usize>, query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
// A map from dep-node to the position of any associated diagnostics in
// `serialized_data`.
prev_diagnostics_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
} }
// This type is used only for (de-)serialization. // This type is used only for (de-)serialization.
#[derive(RustcEncodable, RustcDecodable)] #[derive(RustcEncodable, RustcDecodable)]
struct Header { struct Footer {
prev_filemap_starts: BTreeMap<BytePos, StableFilemapId>, file_index_to_stable_id: FxHashMap<FileMapIndex, StableFilemapId>,
prev_cnums: Vec<(u32, String, CrateDisambiguator)>, prev_cnums: Vec<(u32, String, CrateDisambiguator)>,
query_result_index: EncodedQueryResultIndex,
diagnostics_index: EncodedQueryResultIndex,
} }
type EncodedPrevDiagnostics = Vec<(SerializedDepNodeIndex, Vec<Diagnostic>)>; type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, usize)>; type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
type EncodedDiagnostics = Vec<Diagnostic>;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
struct FileMapIndex(u32);
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, RustcEncodable, RustcDecodable)]
struct AbsoluteBytePos(u32);
impl AbsoluteBytePos {
fn new(pos: usize) -> AbsoluteBytePos {
debug_assert!(pos <= ::std::u32::MAX as usize);
AbsoluteBytePos(pos as u32)
}
fn to_usize(self) -> usize {
self.0 as usize
}
}
impl<'sess> OnDiskCache<'sess> { impl<'sess> OnDiskCache<'sess> {
/// Create a new OnDiskCache instance from the serialized data in `data`. /// Create a new OnDiskCache instance from the serialized data in `data`.
pub fn new(sess: &'sess Session, data: Vec<u8>, start_pos: usize) -> OnDiskCache<'sess> { pub fn new(sess: &'sess Session, data: Vec<u8>, start_pos: usize) -> OnDiskCache<'sess> {
debug_assert!(sess.opts.incremental.is_some()); debug_assert!(sess.opts.incremental.is_some());
// Decode the header // Wrapping in a scope so we can borrow `data`
let (header, post_header_pos) = { let footer: Footer = {
let mut decoder = opaque::Decoder::new(&data[..], start_pos); let mut decoder = opaque::Decoder::new(&data[..], start_pos);
let header = Header::decode(&mut decoder)
.expect("Error while trying to decode incr. comp. cache header.");
(header, decoder.position())
};
let (prev_diagnostics, query_result_index) = { // Decode the *position* of the footer which can be found in the
let mut decoder = CacheDecoder { // last 8 bytes of the file.
tcx: None, decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE);
opaque: opaque::Decoder::new(&data[..], post_header_pos), let query_result_index_pos = IntEncodedWithFixedSize::decode(&mut decoder)
codemap: sess.codemap(), .expect("Error while trying to decode query result index position.")
prev_filemap_starts: &header.prev_filemap_starts, .0 as usize;
cnum_map: &IndexVec::new(),
};
// Decode Diagnostics // Decoder the file footer which contains all the lookup tables, etc.
let prev_diagnostics: FxHashMap<_, _> = { decoder.set_position(query_result_index_pos);
let diagnostics: EncodedPrevDiagnostics = decode_tagged(&mut decoder, TAG_FILE_FOOTER)
decode_tagged(&mut decoder, PREV_DIAGNOSTICS_TAG) .expect("Error while trying to decode query result index position.")
.expect("Error while trying to decode previous session \
diagnostics from incr. comp. cache.");
diagnostics.into_iter().collect()
};
// Decode the *position* of the query result index
let query_result_index_pos = {
let pos_pos = data.len() - IntEncodedWithFixedSize::ENCODED_SIZE;
decoder.with_position(pos_pos, |decoder| {
IntEncodedWithFixedSize::decode(decoder)
}).expect("Error while trying to decode query result index position.")
.0 as usize
};
// Decode the query result index itself
let query_result_index: EncodedQueryResultIndex =
decoder.with_position(query_result_index_pos, |decoder| {
decode_tagged(decoder, QUERY_RESULT_INDEX_TAG)
}).expect("Error while trying to decode query result index.");
(prev_diagnostics, query_result_index)
}; };
OnDiskCache { OnDiskCache {
serialized_data: data, serialized_data: data,
prev_diagnostics, file_index_to_stable_id: footer.file_index_to_stable_id,
prev_filemap_starts: header.prev_filemap_starts, file_index_to_file: RefCell::new(FxHashMap()),
prev_cnums: header.prev_cnums, prev_cnums: footer.prev_cnums,
cnum_map: RefCell::new(None), cnum_map: RefCell::new(None),
codemap: sess.codemap(), codemap: sess.codemap(),
current_diagnostics: RefCell::new(FxHashMap()), current_diagnostics: RefCell::new(FxHashMap()),
query_result_index: query_result_index.into_iter().collect(), query_result_index: footer.query_result_index.into_iter().collect(),
prev_diagnostics_index: footer.diagnostics_index.into_iter().collect(),
synthetic_expansion_infos: RefCell::new(FxHashMap()),
} }
} }
pub fn new_empty(codemap: &'sess CodeMap) -> OnDiskCache<'sess> { pub fn new_empty(codemap: &'sess CodeMap) -> OnDiskCache<'sess> {
OnDiskCache { OnDiskCache {
serialized_data: Vec::new(), serialized_data: Vec::new(),
prev_diagnostics: FxHashMap(), file_index_to_stable_id: FxHashMap(),
prev_filemap_starts: BTreeMap::new(), file_index_to_file: RefCell::new(FxHashMap()),
prev_cnums: vec![], prev_cnums: vec![],
cnum_map: RefCell::new(None), cnum_map: RefCell::new(None),
codemap, codemap,
current_diagnostics: RefCell::new(FxHashMap()), current_diagnostics: RefCell::new(FxHashMap()),
query_result_index: FxHashMap(), query_result_index: FxHashMap(),
prev_diagnostics_index: FxHashMap(),
synthetic_expansion_infos: RefCell::new(FxHashMap()),
} }
} }
pub fn serialize<'a, 'tcx, E>(&self, pub fn serialize<'a, 'tcx, E>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>,
cstore: &CrateStore,
encoder: &mut E) encoder: &mut E)
-> Result<(), E::Error> -> Result<(), E::Error>
where E: ty_codec::TyEncoder where E: ty_codec::TyEncoder
@ -161,46 +167,31 @@ impl<'sess> OnDiskCache<'sess> {
// Serializing the DepGraph should not modify it: // Serializing the DepGraph should not modify it:
let _in_ignore = tcx.dep_graph.in_ignore(); let _in_ignore = tcx.dep_graph.in_ignore();
// Allocate FileMapIndices
let (file_to_file_index, file_index_to_stable_id) = {
let mut file_to_file_index = FxHashMap();
let mut file_index_to_stable_id = FxHashMap();
for (index, file) in tcx.sess.codemap().files().iter().enumerate() {
let index = FileMapIndex(index as u32);
let file_ptr: *const FileMap = &**file as *const _;
file_to_file_index.insert(file_ptr, index);
file_index_to_stable_id.insert(index, StableFilemapId::new(&file));
}
(file_to_file_index, file_index_to_stable_id)
};
let mut encoder = CacheEncoder { let mut encoder = CacheEncoder {
tcx, tcx,
encoder, encoder,
type_shorthands: FxHashMap(), type_shorthands: FxHashMap(),
predicate_shorthands: FxHashMap(), predicate_shorthands: FxHashMap(),
expn_info_shorthands: FxHashMap(),
codemap: CachingCodemapView::new(tcx.sess.codemap()),
file_to_file_index,
}; };
// Encode the file header
let prev_filemap_starts: BTreeMap<_, _> = self
.codemap
.files()
.iter()
.map(|fm| (fm.start_pos, StableFilemapId::new(fm)))
.collect();
let sorted_cnums = sorted_cnums_including_local_crate(cstore);
let prev_cnums: Vec<_> = sorted_cnums.iter().map(|&cnum| {
let crate_name = tcx.original_crate_name(cnum).as_str().to_string();
let crate_disambiguator = tcx.crate_disambiguator(cnum);
(cnum.as_u32(), crate_name, crate_disambiguator)
}).collect();
Header {
prev_filemap_starts,
prev_cnums,
}.encode(&mut encoder)?;
// Encode Diagnostics
let diagnostics: EncodedPrevDiagnostics =
self.current_diagnostics
.borrow()
.iter()
.map(|(k, v)| (SerializedDepNodeIndex::new(k.index()), v.clone()))
.collect();
encoder.encode_tagged(PREV_DIAGNOSTICS_TAG, &diagnostics)?;
// Load everything into memory so we can write it out to the on-disk // Load everything into memory so we can write it out to the on-disk
// cache. The vast majority of cacheable query results should already // cache. The vast majority of cacheable query results should already
// be in memory, so this should be a cheap operation. // be in memory, so this should be a cheap operation.
@ -218,19 +209,53 @@ impl<'sess> OnDiskCache<'sess> {
encode_query_results::<typeck_tables_of, _>(tcx, enc, qri)?; encode_query_results::<typeck_tables_of, _>(tcx, enc, qri)?;
} }
// Encode query result index // Encode diagnostics
let query_result_index_pos = encoder.position() as u64; let diagnostics_index = {
encoder.encode_tagged(QUERY_RESULT_INDEX_TAG, &query_result_index)?; let mut diagnostics_index = EncodedDiagnosticsIndex::new();
// Encode the position of the query result index as the last 8 bytes of for (dep_node_index, diagnostics) in self.current_diagnostics
.borrow()
.iter() {
let pos = AbsoluteBytePos::new(encoder.position());
// Let's make sure we get the expected type here:
let diagnostics: &EncodedDiagnostics = diagnostics;
let dep_node_index =
SerializedDepNodeIndex::new(dep_node_index.index());
encoder.encode_tagged(dep_node_index, diagnostics)?;
diagnostics_index.push((dep_node_index, pos));
}
diagnostics_index
};
let sorted_cnums = sorted_cnums_including_local_crate(tcx);
let prev_cnums: Vec<_> = sorted_cnums.iter().map(|&cnum| {
let crate_name = tcx.original_crate_name(cnum).as_str().to_string();
let crate_disambiguator = tcx.crate_disambiguator(cnum);
(cnum.as_u32(), crate_name, crate_disambiguator)
}).collect();
// Encode the file footer
let footer_pos = encoder.position() as u64;
encoder.encode_tagged(TAG_FILE_FOOTER, &Footer {
file_index_to_stable_id,
prev_cnums,
query_result_index,
diagnostics_index,
})?;
// Encode the position of the footer as the last 8 bytes of the
// file so we know where to look for it. // file so we know where to look for it.
IntEncodedWithFixedSize(query_result_index_pos).encode(&mut encoder)?; IntEncodedWithFixedSize(footer_pos).encode(encoder.encoder)?;
// DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address
// of the footer must be the last thing in the data stream.
return Ok(()); return Ok(());
fn sorted_cnums_including_local_crate(cstore: &CrateStore) -> Vec<CrateNum> { fn sorted_cnums_including_local_crate(tcx: TyCtxt) -> Vec<CrateNum> {
let mut cnums = vec![LOCAL_CRATE]; let mut cnums = vec![LOCAL_CRATE];
cnums.extend_from_slice(&cstore.crates_untracked()[..]); cnums.extend_from_slice(&tcx.crates()[..]);
cnums.sort_unstable(); cnums.sort_unstable();
// Just to be sure... // Just to be sure...
cnums.dedup(); cnums.dedup();
@ -239,10 +264,17 @@ impl<'sess> OnDiskCache<'sess> {
} }
/// Load a diagnostic emitted during the previous compilation session. /// Load a diagnostic emitted during the previous compilation session.
pub fn load_diagnostics(&self, pub fn load_diagnostics<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
dep_node_index: SerializedDepNodeIndex) dep_node_index: SerializedDepNodeIndex)
-> Vec<Diagnostic> { -> Vec<Diagnostic> {
self.prev_diagnostics.get(&dep_node_index).cloned().unwrap_or(vec![]) let diagnostics: Option<EncodedDiagnostics> = self.load_indexed(
tcx,
dep_node_index,
&self.prev_diagnostics_index,
"diagnostics");
diagnostics.unwrap_or(Vec::new())
} }
/// Store a diagnostic emitted during the current compilation session. /// Store a diagnostic emitted during the current compilation session.
@ -256,35 +288,18 @@ impl<'sess> OnDiskCache<'sess> {
debug_assert!(prev.is_none()); debug_assert!(prev.is_none());
} }
pub fn load_query_result<'a, 'tcx, T>(&self, /// Returns the cached query result if there is something in the cache for
tcx: TyCtxt<'a, 'tcx, 'tcx>, /// the given SerializedDepNodeIndex. Otherwise returns None.
pub fn try_load_query_result<'tcx, T>(&self,
tcx: TyCtxt<'_, 'tcx, 'tcx>,
dep_node_index: SerializedDepNodeIndex) dep_node_index: SerializedDepNodeIndex)
-> T -> Option<T>
where T: Decodable where T: Decodable
{ {
let pos = self.query_result_index[&dep_node_index]; self.load_indexed(tcx,
dep_node_index,
let mut cnum_map = self.cnum_map.borrow_mut(); &self.query_result_index,
if cnum_map.is_none() { "query result")
*cnum_map = Some(Self::compute_cnum_map(tcx, &self.prev_cnums[..]));
}
let mut decoder = CacheDecoder {
tcx: Some(tcx),
opaque: opaque::Decoder::new(&self.serialized_data[..], pos),
codemap: self.codemap,
prev_filemap_starts: &self.prev_filemap_starts,
cnum_map: cnum_map.as_ref().unwrap(),
};
match decode_tagged(&mut decoder, dep_node_index) {
Ok(value) => {
value
}
Err(e) => {
bug!("Could not decode cached query result: {}", e)
}
}
} }
/// Store a diagnostic emitted during computation of an anonymous query. /// Store a diagnostic emitted during computation of an anonymous query.
@ -303,6 +318,49 @@ impl<'sess> OnDiskCache<'sess> {
x.extend(diagnostics.into_iter()); x.extend(diagnostics.into_iter());
} }
fn load_indexed<'tcx, T>(&self,
tcx: TyCtxt<'_, 'tcx, 'tcx>,
dep_node_index: SerializedDepNodeIndex,
index: &FxHashMap<SerializedDepNodeIndex,
AbsoluteBytePos>,
debug_tag: &'static str)
-> Option<T>
where T: Decodable
{
let pos = if let Some(&pos) = index.get(&dep_node_index) {
pos
} else {
return None
};
let mut cnum_map = self.cnum_map.borrow_mut();
if cnum_map.is_none() {
*cnum_map = Some(Self::compute_cnum_map(tcx, &self.prev_cnums[..]));
}
let mut synthetic_expansion_infos = self.synthetic_expansion_infos.borrow_mut();
let mut file_index_to_file = self.file_index_to_file.borrow_mut();
let mut decoder = CacheDecoder {
tcx,
opaque: opaque::Decoder::new(&self.serialized_data[..], pos.to_usize()),
codemap: self.codemap,
cnum_map: cnum_map.as_ref().unwrap(),
file_index_to_file: &mut file_index_to_file,
file_index_to_stable_id: &self.file_index_to_stable_id,
synthetic_expansion_infos: &mut synthetic_expansion_infos,
};
match decode_tagged(&mut decoder, dep_node_index) {
Ok(value) => {
Some(value)
}
Err(e) => {
bug!("Could not decode cached {}: {}", debug_tag, e)
}
}
}
// This function builds mapping from previous-session-CrateNum to // This function builds mapping from previous-session-CrateNum to
// current-session-CrateNum. There might be CrateNums from the previous // current-session-CrateNum. There might be CrateNums from the previous
// Session that don't occur in the current one. For these, the mapping // Session that don't occur in the current one. For these, the mapping
@ -345,22 +403,45 @@ impl<'sess> OnDiskCache<'sess> {
/// we use for crate metadata decoding in that it can rebase spans and /// we use for crate metadata decoding in that it can rebase spans and
/// eventually will also handle things that contain `Ty` instances. /// eventually will also handle things that contain `Ty` instances.
struct CacheDecoder<'a, 'tcx: 'a, 'x> { struct CacheDecoder<'a, 'tcx: 'a, 'x> {
tcx: Option<TyCtxt<'a, 'tcx, 'tcx>>, tcx: TyCtxt<'a, 'tcx, 'tcx>,
opaque: opaque::Decoder<'x>, opaque: opaque::Decoder<'x>,
codemap: &'x CodeMap, codemap: &'x CodeMap,
prev_filemap_starts: &'x BTreeMap<BytePos, StableFilemapId>,
cnum_map: &'x IndexVec<CrateNum, Option<CrateNum>>, cnum_map: &'x IndexVec<CrateNum, Option<CrateNum>>,
synthetic_expansion_infos: &'x mut FxHashMap<AbsoluteBytePos, SyntaxContext>,
file_index_to_file: &'x mut FxHashMap<FileMapIndex, Rc<FileMap>>,
file_index_to_stable_id: &'x FxHashMap<FileMapIndex, StableFilemapId>,
} }
impl<'a, 'tcx, 'x> CacheDecoder<'a, 'tcx, 'x> { impl<'a, 'tcx, 'x> CacheDecoder<'a, 'tcx, 'x> {
fn find_filemap_prev_bytepos(&self, fn file_index_to_file(&mut self, index: FileMapIndex) -> Rc<FileMap> {
prev_bytepos: BytePos) let CacheDecoder {
-> Option<(BytePos, StableFilemapId)> { ref mut file_index_to_file,
for (start, id) in self.prev_filemap_starts.range(BytePos(0) ..= prev_bytepos).rev() { ref file_index_to_stable_id,
return Some((*start, *id)) ref codemap,
..
} = *self;
file_index_to_file.entry(index).or_insert_with(|| {
let stable_id = file_index_to_stable_id[&index];
codemap.filemap_by_stable_id(stable_id)
.expect("Failed to lookup FileMap in new context.")
}).clone()
}
} }
None trait DecoderWithPosition: Decoder {
fn position(&self) -> usize;
}
impl<'enc> DecoderWithPosition for opaque::Decoder<'enc> {
fn position(&self) -> usize {
self.position()
}
}
impl<'a, 'tcx, 'x> DecoderWithPosition for CacheDecoder<'a, 'tcx, 'x> {
fn position(&self) -> usize {
self.opaque.position()
} }
} }
@ -371,7 +452,7 @@ fn decode_tagged<'a, 'tcx, D, T, V>(decoder: &mut D,
-> Result<V, D::Error> -> Result<V, D::Error>
where T: Decodable + Eq + ::std::fmt::Debug, where T: Decodable + Eq + ::std::fmt::Debug,
V: Decodable, V: Decodable,
D: Decoder + ty_codec::TyDecoder<'a, 'tcx>, D: DecoderWithPosition,
'tcx: 'a, 'tcx: 'a,
{ {
let start_pos = decoder.position(); let start_pos = decoder.position();
@ -392,7 +473,7 @@ impl<'a, 'tcx: 'a, 'x> ty_codec::TyDecoder<'a, 'tcx> for CacheDecoder<'a, 'tcx,
#[inline] #[inline]
fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.tcx.expect("missing TyCtxt in CacheDecoder") self.tcx
} }
#[inline] #[inline]
@ -450,18 +531,55 @@ implement_ty_decoder!( CacheDecoder<'a, 'tcx, 'x> );
impl<'a, 'tcx, 'x> SpecializedDecoder<Span> for CacheDecoder<'a, 'tcx, 'x> { impl<'a, 'tcx, 'x> SpecializedDecoder<Span> for CacheDecoder<'a, 'tcx, 'x> {
fn specialized_decode(&mut self) -> Result<Span, Self::Error> { fn specialized_decode(&mut self) -> Result<Span, Self::Error> {
let lo = BytePos::decode(self)?; let tag: u8 = Decodable::decode(self)?;
let hi = BytePos::decode(self)?;
if let Some((prev_filemap_start, filemap_id)) = self.find_filemap_prev_bytepos(lo) { if tag == TAG_INVALID_SPAN {
if let Some(current_filemap) = self.codemap.filemap_by_stable_id(filemap_id) { return Ok(DUMMY_SP);
let lo = (lo + current_filemap.start_pos) - prev_filemap_start; } else {
let hi = (hi + current_filemap.start_pos) - prev_filemap_start; debug_assert_eq!(tag, TAG_VALID_SPAN);
return Ok(Span::new(lo, hi, NO_EXPANSION));
}
} }
Ok(DUMMY_SP) let file_lo_index = FileMapIndex::decode(self)?;
let line_lo = usize::decode(self)?;
let col_lo = BytePos::decode(self)?;
let len = BytePos::decode(self)?;
let file_lo = self.file_index_to_file(file_lo_index);
let lo = file_lo.lines.borrow()[line_lo - 1] + col_lo;
let hi = lo + len;
let expn_info_tag = u8::decode(self)?;
let ctxt = match expn_info_tag {
TAG_NO_EXPANSION_INFO => {
SyntaxContext::empty()
}
TAG_EXPANSION_INFO_INLINE => {
let pos = AbsoluteBytePos::new(self.opaque.position());
let expn_info: ExpnInfo = Decodable::decode(self)?;
let ctxt = SyntaxContext::allocate_directly(expn_info);
self.synthetic_expansion_infos.insert(pos, ctxt);
ctxt
}
TAG_EXPANSION_INFO_SHORTHAND => {
let pos = AbsoluteBytePos::decode(self)?;
if let Some(ctxt) = self.synthetic_expansion_infos.get(&pos).cloned() {
ctxt
} else {
let expn_info = self.with_position(pos.to_usize(), |this| {
ExpnInfo::decode(this)
})?;
let ctxt = SyntaxContext::allocate_directly(expn_info);
self.synthetic_expansion_infos.insert(pos, ctxt);
ctxt
}
}
_ => {
unreachable!()
}
};
Ok(Span::new(lo, hi, ctxt))
} }
} }
@ -479,6 +597,7 @@ impl<'a, 'tcx, 'x> SpecializedDecoder<DefIndex> for CacheDecoder<'a, 'tcx, 'x> {
// compilation sessions. We use the DefPathHash, which is stable across // compilation sessions. We use the DefPathHash, which is stable across
// sessions, to map the old DefId to the new one. // sessions, to map the old DefId to the new one.
impl<'a, 'tcx, 'x> SpecializedDecoder<DefId> for CacheDecoder<'a, 'tcx, 'x> { impl<'a, 'tcx, 'x> SpecializedDecoder<DefId> for CacheDecoder<'a, 'tcx, 'x> {
#[inline]
fn specialized_decode(&mut self) -> Result<DefId, Self::Error> { fn specialized_decode(&mut self) -> Result<DefId, Self::Error> {
// Load the DefPathHash which is was we encoded the DefId as. // Load the DefPathHash which is was we encoded the DefId as.
let def_path_hash = DefPathHash::decode(self)?; let def_path_hash = DefPathHash::decode(self)?;
@ -489,6 +608,7 @@ impl<'a, 'tcx, 'x> SpecializedDecoder<DefId> for CacheDecoder<'a, 'tcx, 'x> {
} }
impl<'a, 'tcx, 'x> SpecializedDecoder<LocalDefId> for CacheDecoder<'a, 'tcx, 'x> { impl<'a, 'tcx, 'x> SpecializedDecoder<LocalDefId> for CacheDecoder<'a, 'tcx, 'x> {
#[inline]
fn specialized_decode(&mut self) -> Result<LocalDefId, Self::Error> { fn specialized_decode(&mut self) -> Result<LocalDefId, Self::Error> {
Ok(LocalDefId::from_def_id(DefId::decode(self)?)) Ok(LocalDefId::from_def_id(DefId::decode(self)?))
} }
@ -558,11 +678,18 @@ struct CacheEncoder<'enc, 'a, 'tcx, E>
encoder: &'enc mut E, encoder: &'enc mut E,
type_shorthands: FxHashMap<ty::Ty<'tcx>, usize>, type_shorthands: FxHashMap<ty::Ty<'tcx>, usize>,
predicate_shorthands: FxHashMap<ty::Predicate<'tcx>, usize>, predicate_shorthands: FxHashMap<ty::Predicate<'tcx>, usize>,
expn_info_shorthands: FxHashMap<Mark, AbsoluteBytePos>,
codemap: CachingCodemapView<'tcx>,
file_to_file_index: FxHashMap<*const FileMap, FileMapIndex>,
} }
impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E> impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E>
where E: 'enc + ty_codec::TyEncoder where E: 'enc + ty_codec::TyEncoder
{ {
fn filemap_index(&mut self, filemap: Rc<FileMap>) -> FileMapIndex {
self.file_to_file_index[&(&*filemap as *const FileMap)]
}
/// Encode something with additional information that allows to do some /// Encode something with additional information that allows to do some
/// sanity checks when decoding the data again. This method will first /// sanity checks when decoding the data again. This method will first
/// encode the specified tag, then the given value, then the number of /// encode the specified tag, then the given value, then the number of
@ -584,6 +711,65 @@ impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E>
} }
} }
impl<'enc, 'a, 'tcx, E> SpecializedEncoder<Span> for CacheEncoder<'enc, 'a, 'tcx, E>
where E: 'enc + ty_codec::TyEncoder
{
fn specialized_encode(&mut self, span: &Span) -> Result<(), Self::Error> {
if *span == DUMMY_SP {
return TAG_INVALID_SPAN.encode(self);
}
let span_data = span.data();
if span_data.hi < span_data.lo {
return TAG_INVALID_SPAN.encode(self);
}
let (file_lo, line_lo, col_lo) = match self.codemap
.byte_pos_to_line_and_col(span_data.lo) {
Some(pos) => pos,
None => {
return TAG_INVALID_SPAN.encode(self);
}
};
if !file_lo.contains(span_data.hi) {
return TAG_INVALID_SPAN.encode(self);
}
let len = span_data.hi - span_data.lo;
let filemap_index = self.filemap_index(file_lo);
TAG_VALID_SPAN.encode(self)?;
filemap_index.encode(self)?;
line_lo.encode(self)?;
col_lo.encode(self)?;
len.encode(self)?;
if span_data.ctxt == SyntaxContext::empty() {
TAG_NO_EXPANSION_INFO.encode(self)
} else {
let mark = span_data.ctxt.outer();
if let Some(expn_info) = mark.expn_info() {
if let Some(pos) = self.expn_info_shorthands.get(&mark).cloned() {
TAG_EXPANSION_INFO_SHORTHAND.encode(self)?;
pos.encode(self)
} else {
TAG_EXPANSION_INFO_INLINE.encode(self)?;
let pos = AbsoluteBytePos::new(self.position());
self.expn_info_shorthands.insert(mark, pos);
expn_info.encode(self)
}
} else {
TAG_NO_EXPANSION_INFO.encode(self)
}
}
}
}
impl<'enc, 'a, 'tcx, E> ty_codec::TyEncoder for CacheEncoder<'enc, 'a, 'tcx, E> impl<'enc, 'a, 'tcx, E> ty_codec::TyEncoder for CacheEncoder<'enc, 'a, 'tcx, E>
where E: 'enc + ty_codec::TyEncoder where E: 'enc + ty_codec::TyEncoder
{ {
@ -753,10 +939,7 @@ impl IntEncodedWithFixedSize {
impl UseSpecializedEncodable for IntEncodedWithFixedSize {} impl UseSpecializedEncodable for IntEncodedWithFixedSize {}
impl UseSpecializedDecodable for IntEncodedWithFixedSize {} impl UseSpecializedDecodable for IntEncodedWithFixedSize {}
impl<'enc, 'a, 'tcx, E> SpecializedEncoder<IntEncodedWithFixedSize> impl<'enc> SpecializedEncoder<IntEncodedWithFixedSize> for opaque::Encoder<'enc> {
for CacheEncoder<'enc, 'a, 'tcx, E>
where E: 'enc + ty_codec::TyEncoder
{
fn specialized_encode(&mut self, x: &IntEncodedWithFixedSize) -> Result<(), Self::Error> { fn specialized_encode(&mut self, x: &IntEncodedWithFixedSize) -> Result<(), Self::Error> {
let start_pos = self.position(); let start_pos = self.position();
for i in 0 .. IntEncodedWithFixedSize::ENCODED_SIZE { for i in 0 .. IntEncodedWithFixedSize::ENCODED_SIZE {
@ -768,8 +951,7 @@ for CacheEncoder<'enc, 'a, 'tcx, E>
} }
} }
impl<'a, 'tcx, 'x> SpecializedDecoder<IntEncodedWithFixedSize> impl<'enc> SpecializedDecoder<IntEncodedWithFixedSize> for opaque::Decoder<'enc> {
for CacheDecoder<'a, 'tcx, 'x> {
fn specialized_decode(&mut self) -> Result<IntEncodedWithFixedSize, Self::Error> { fn specialized_decode(&mut self) -> Result<IntEncodedWithFixedSize, Self::Error> {
let mut value: u64 = 0; let mut value: u64 = 0;
let start_pos = self.position(); let start_pos = self.position();
@ -799,7 +981,7 @@ fn encode_query_results<'enc, 'a, 'tcx, Q, E>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let dep_node = SerializedDepNodeIndex::new(entry.index.index()); let dep_node = SerializedDepNodeIndex::new(entry.index.index());
// Record position of the cache entry // Record position of the cache entry
query_result_index.push((dep_node, encoder.position())); query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position())));
// Encode the type check tables with the SerializedDepNodeIndex // Encode the type check tables with the SerializedDepNodeIndex
// as tag. // as tag.

View file

@ -145,7 +145,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
if !self.dep_graph.is_fully_enabled() { if !self.dep_graph.is_fully_enabled() {
return None; return None;
} }
match self.dep_graph.try_mark_green(self, &dep_node) { match self.dep_graph.try_mark_green(self.global_tcx(), &dep_node) {
Some(dep_node_index) => { Some(dep_node_index) => {
debug_assert!(self.dep_graph.is_green(dep_node_index)); debug_assert!(self.dep_graph.is_green(dep_node_index));
self.dep_graph.read_index(dep_node_index); self.dep_graph.read_index(dep_node_index);
@ -392,12 +392,31 @@ macro_rules! define_maps {
{ {
debug_assert!(tcx.dep_graph.is_green(dep_node_index)); debug_assert!(tcx.dep_graph.is_green(dep_node_index));
let result = if tcx.sess.opts.debugging_opts.incremental_queries && // First we try to load the result from the on-disk cache
Self::cache_on_disk(key) { let result = if Self::cache_on_disk(key) &&
tcx.sess.opts.debugging_opts.incremental_queries {
let prev_dep_node_index = let prev_dep_node_index =
tcx.dep_graph.prev_dep_node_index_of(dep_node); tcx.dep_graph.prev_dep_node_index_of(dep_node);
Self::load_from_disk(tcx.global_tcx(), prev_dep_node_index) let result = Self::try_load_from_disk(tcx.global_tcx(),
prev_dep_node_index);
// We always expect to find a cached result for things that
// can be forced from DepNode.
debug_assert!(!dep_node.kind.can_reconstruct_query_key() ||
result.is_some(),
"Missing on-disk cache entry for {:?}",
dep_node);
result
} else { } else {
// Some things are never cached on disk.
None
};
let result = if let Some(result) = result {
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
let (result, _ ) = tcx.cycle_check(span, Query::$name(key), || { let (result, _ ) = tcx.cycle_check(span, Query::$name(key), || {
// The diagnostics for this query have already been // The diagnostics for this query have already been
// promoted to the current session during // promoted to the current session during

View file

@ -162,6 +162,10 @@ impl<'a> Decoder<'a> {
self.position self.position
} }
pub fn set_position(&mut self, pos: usize) {
self.position = pos
}
pub fn advance(&mut self, bytes: usize) { pub fn advance(&mut self, bytes: usize) {
self.position += bytes; self.position += bytes;
} }

View file

@ -105,7 +105,7 @@ impl FileLoader for RealFileLoader {
// This is a FileMap identifier that is used to correlate FileMaps between // This is a FileMap identifier that is used to correlate FileMaps between
// subsequent compilation sessions (which is something we need to do during // subsequent compilation sessions (which is something we need to do during
// incremental compilation). // incremental compilation).
#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
pub struct StableFilemapId(u128); pub struct StableFilemapId(u128);
impl StableFilemapId { impl StableFilemapId {

View file

@ -140,6 +140,31 @@ impl SyntaxContext {
SyntaxContext(0) SyntaxContext(0)
} }
// Allocate a new SyntaxContext with the given ExpnInfo. This is used when
// deserializing Spans from the incr. comp. cache.
// FIXME(mw): This method does not restore MarkData::parent or
// SyntaxContextData::prev_ctxt or SyntaxContextData::modern. These things
// don't seem to be used after HIR lowering, so everything should be fine
// as long as incremental compilation does not kick in before that.
pub fn allocate_directly(expansion_info: ExpnInfo) -> Self {
HygieneData::with(|data| {
data.marks.push(MarkData {
parent: Mark::root(),
modern: false,
expn_info: Some(expansion_info)
});
let mark = Mark(data.marks.len() as u32 - 1);
data.syntax_contexts.push(SyntaxContextData {
outer_mark: mark,
prev_ctxt: SyntaxContext::empty(),
modern: SyntaxContext::empty(),
});
SyntaxContext(data.syntax_contexts.len() as u32 - 1)
})
}
/// Extend a syntax context with a given mark /// Extend a syntax context with a given mark
pub fn apply_mark(self, mark: Mark) -> SyntaxContext { pub fn apply_mark(self, mark: Mark) -> SyntaxContext {
HygieneData::with(|data| { HygieneData::with(|data| {
@ -286,7 +311,7 @@ impl fmt::Debug for SyntaxContext {
} }
/// Extra information for tracking spans of macro and syntax sugar expansion /// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Clone, Hash, Debug)] #[derive(Clone, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct ExpnInfo { pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar , e.g. /// The location of the actual macro invocation or syntax sugar , e.g.
/// `let x = foo!();` or `if let Some(y) = x {}` /// `let x = foo!();` or `if let Some(y) = x {}`
@ -302,7 +327,7 @@ pub struct ExpnInfo {
pub callee: NameAndSpan pub callee: NameAndSpan
} }
#[derive(Clone, Hash, Debug)] #[derive(Clone, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct NameAndSpan { pub struct NameAndSpan {
/// The format with which the macro was invoked. /// The format with which the macro was invoked.
pub format: ExpnFormat, pub format: ExpnFormat,
@ -330,7 +355,7 @@ impl NameAndSpan {
} }
/// The source of expansion. /// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)] #[derive(Clone, Hash, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub enum ExpnFormat { pub enum ExpnFormat {
/// e.g. #[derive(...)] <item> /// e.g. #[derive(...)] <item>
MacroAttribute(Symbol), MacroAttribute(Symbol),
@ -341,7 +366,7 @@ pub enum ExpnFormat {
} }
/// The kind of compiler desugaring. /// The kind of compiler desugaring.
#[derive(Clone, Hash, Debug, PartialEq, Eq)] #[derive(Clone, Hash, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub enum CompilerDesugaringKind { pub enum CompilerDesugaringKind {
BackArrow, BackArrow,
DotFill, DotFill,

View file

@ -931,6 +931,11 @@ impl FileMap {
(lines[line_index], lines[line_index + 1]) (lines[line_index], lines[line_index + 1])
} }
} }
#[inline]
pub fn contains(&self, byte_pos: BytePos) -> bool {
byte_pos >= self.start_pos && byte_pos <= self.end_pos
}
} }
/// Remove utf-8 BOM if any. /// Remove utf-8 BOM if any.