Reviewer changes

This commit is contained in:
Nick Cameron 2015-04-29 18:14:37 +12:00
parent bb26aadaf3
commit 7bfb5ed826
8 changed files with 2466 additions and 2498 deletions

View file

@ -1,824 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Common facilities for record-like types (structs, enums, tuples)
use self::MemberDescriptionFactory::*;
use self::EnumDiscriminantInfo::*;
use self::MemberOffset::*;
use super::{UNKNOWN_FILE_METADATA, UNKNOWN_SCOPE_METADATA, UNKNOWN_LINE_NUMBER,
UniqueTypeId, FLAGS_NONE, create_and_register_recursive_type_forward_declaration};
use super::utils::{debug_context, DIB, span_start, bytes_to_bits,
size_and_align_of, get_namespace_and_span_for_item};
use super::create::create_DIArray;
use super::types::compute_debuginfo_type_name;
use super::metadata::{type_metadata, file_metadata};
use super::RecursiveTypeDescription::{self, FinalMetadata};
use llvm;
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
use metadata::csearch;
use middle::subst::{self, Substs};
use trans::{adt, machine, type_of};
use trans::common::CrateContext;
use trans::monomorphize;
use trans::type_::Type;
use middle::ty::{self, Ty, ClosureTyper};
use libc::c_uint;
use std::ffi::CString;
use std::ptr;
use std::rc::Rc;
use syntax::codemap::Span;
use syntax::{ast, codemap};
use syntax::parse::token::{self, special_idents};
pub enum MemberOffset {
FixedMemberOffset { bytes: usize },
// For ComputedMemberOffset, the offset is read from the llvm type definition.
ComputedMemberOffset
}
// Description of a type member, which can either be a regular field (as in
// structs or tuples) or an enum variant.
pub struct MemberDescription {
pub name: String,
pub llvm_type: Type,
pub type_metadata: DIType,
pub offset: MemberOffset,
pub flags: c_uint
}
// A factory for MemberDescriptions. It produces a list of member descriptions
// for some record-like type. MemberDescriptionFactories are used to defer the
// creation of type member descriptions in order to break cycles arising from
// recursive type definitions.
pub enum MemberDescriptionFactory<'tcx> {
StructMDF(StructMemberDescriptionFactory<'tcx>),
TupleMDF(TupleMemberDescriptionFactory<'tcx>),
EnumMDF(EnumMemberDescriptionFactory<'tcx>),
VariantMDF(VariantMemberDescriptionFactory<'tcx>)
}
impl<'tcx> MemberDescriptionFactory<'tcx> {
pub fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
match *self {
StructMDF(ref this) => {
this.create_member_descriptions(cx)
}
TupleMDF(ref this) => {
this.create_member_descriptions(cx)
}
EnumMDF(ref this) => {
this.create_member_descriptions(cx)
}
VariantMDF(ref this) => {
this.create_member_descriptions(cx)
}
}
}
}
//=-----------------------------------------------------------------------------
// Structs
//=-----------------------------------------------------------------------------
// Creates MemberDescriptions for the fields of a struct
struct StructMemberDescriptionFactory<'tcx> {
fields: Vec<ty::field<'tcx>>,
is_simd: bool,
span: Span,
}
impl<'tcx> StructMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
if self.fields.is_empty() {
return Vec::new();
}
let field_size = if self.is_simd {
machine::llsize_of_alloc(cx, type_of::type_of(cx, self.fields[0].mt.ty)) as usize
} else {
0xdeadbeef
};
self.fields.iter().enumerate().map(|(i, field)| {
let name = if field.name == special_idents::unnamed_field.name {
format!("__{}", i)
} else {
token::get_name(field.name).to_string()
};
let offset = if self.is_simd {
assert!(field_size != 0xdeadbeef);
FixedMemberOffset { bytes: i * field_size }
} else {
ComputedMemberOffset
};
MemberDescription {
name: name,
llvm_type: type_of::type_of(cx, field.mt.ty),
type_metadata: type_metadata(cx, field.mt.ty, self.span),
offset: offset,
flags: FLAGS_NONE,
}
}).collect()
}
}
pub fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
struct_type: Ty<'tcx>,
def_id: ast::DefId,
substs: &subst::Substs<'tcx>,
unique_type_id: UniqueTypeId,
span: Span)
-> RecursiveTypeDescription<'tcx> {
let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
let struct_llvm_type = type_of::type_of(cx, struct_type);
let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
let struct_metadata_stub = create_struct_stub(cx,
struct_llvm_type,
&struct_name[..],
unique_type_id,
containing_scope);
let mut fields = ty::struct_fields(cx.tcx(), def_id, substs);
// The `Ty` values returned by `ty::struct_fields` can still contain
// `ty_projection` variants, so normalize those away.
for field in &mut fields {
field.mt.ty = monomorphize::normalize_associated_type(cx.tcx(), &field.mt.ty);
}
create_and_register_recursive_type_forward_declaration(
cx,
struct_type,
unique_type_id,
struct_metadata_stub,
struct_llvm_type,
StructMDF(StructMemberDescriptionFactory {
fields: fields,
is_simd: ty::type_is_simd(cx.tcx(), struct_type),
span: span,
})
)
}
//=-----------------------------------------------------------------------------
// Tuples
//=-----------------------------------------------------------------------------
// Creates MemberDescriptions for the fields of a tuple
struct TupleMemberDescriptionFactory<'tcx> {
component_types: Vec<Ty<'tcx>>,
span: Span,
}
impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
self.component_types
.iter()
.enumerate()
.map(|(i, &component_type)| {
MemberDescription {
name: format!("__{}", i),
llvm_type: type_of::type_of(cx, component_type),
type_metadata: type_metadata(cx, component_type, self.span),
offset: ComputedMemberOffset,
flags: FLAGS_NONE,
}
}).collect()
}
}
pub fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
tuple_type: Ty<'tcx>,
component_types: &[Ty<'tcx>],
unique_type_id: UniqueTypeId,
span: Span)
-> RecursiveTypeDescription<'tcx> {
let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false);
let tuple_llvm_type = type_of::type_of(cx, tuple_type);
create_and_register_recursive_type_forward_declaration(
cx,
tuple_type,
unique_type_id,
create_struct_stub(cx,
tuple_llvm_type,
&tuple_name[..],
unique_type_id,
UNKNOWN_SCOPE_METADATA),
tuple_llvm_type,
TupleMDF(TupleMemberDescriptionFactory {
component_types: component_types.to_vec(),
span: span,
})
)
}
//=-----------------------------------------------------------------------------
// Enums
//=-----------------------------------------------------------------------------
// Describes the members of an enum value: An enum is described as a union of
// structs in DWARF. This MemberDescriptionFactory provides the description for
// the members of this union; so for every variant of the given enum, this
// factory will produce one MemberDescription (all with no name and a fixed
// offset of zero bytes).
struct EnumMemberDescriptionFactory<'tcx> {
enum_type: Ty<'tcx>,
type_rep: Rc<adt::Repr<'tcx>>,
variants: Rc<Vec<Rc<ty::VariantInfo<'tcx>>>>,
discriminant_type_metadata: Option<DIType>,
containing_scope: DIScope,
file_metadata: DIFile,
span: Span,
}
impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
match *self.type_rep {
adt::General(_, ref struct_defs, _) => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect(""));
struct_defs
.iter()
.enumerate()
.map(|(i, struct_def)| {
let (variant_type_metadata,
variant_llvm_type,
member_desc_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[i],
discriminant_info,
self.containing_scope,
self.span);
let member_descriptions = member_desc_factory
.create_member_descriptions(cx);
set_members_of_composite_type(cx,
variant_type_metadata,
variant_llvm_type,
&member_descriptions[..]);
MemberDescription {
name: "".to_string(),
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
}).collect()
},
adt::Univariant(ref struct_def, _) => {
assert!(self.variants.len() <= 1);
if self.variants.is_empty() {
vec![]
} else {
let (variant_type_metadata,
variant_llvm_type,
member_description_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[0],
NoDiscriminant,
self.containing_scope,
self.span);
let member_descriptions =
member_description_factory.create_member_descriptions(cx);
set_members_of_composite_type(cx,
variant_type_metadata,
variant_llvm_type,
&member_descriptions[..]);
vec![
MemberDescription {
name: "".to_string(),
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
]
}
}
adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => {
// As far as debuginfo is concerned, the pointer this enum
// represents is still wrapped in a struct. This is to make the
// DWARF representation of enums uniform.
// First create a description of the artificial wrapper struct:
let non_null_variant = &(*self.variants)[non_null_variant_index as usize];
let non_null_variant_name = token::get_name(non_null_variant.name);
// The llvm type and metadata of the pointer
let non_null_llvm_type = type_of::type_of(cx, nnty);
let non_null_type_metadata = type_metadata(cx, nnty, self.span);
// The type of the artificial struct wrapping the pointer
let artificial_struct_llvm_type = Type::struct_(cx,
&[non_null_llvm_type],
false);
// For the metadata of the wrapper struct, we need to create a
// MemberDescription of the struct's single field.
let sole_struct_member_description = MemberDescription {
name: match non_null_variant.arg_names {
Some(ref names) => token::get_name(names[0]).to_string(),
None => "__0".to_string()
},
llvm_type: non_null_llvm_type,
type_metadata: non_null_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
};
let unique_type_id = debug_context(cx).type_map
.borrow_mut()
.get_unique_type_id_of_enum_variant(
cx,
self.enum_type,
&non_null_variant_name);
// Now we can create the metadata of the artificial struct
let artificial_struct_metadata =
composite_type_metadata(cx,
artificial_struct_llvm_type,
&non_null_variant_name,
unique_type_id,
&[sole_struct_member_description],
self.containing_scope,
self.file_metadata,
codemap::DUMMY_SP);
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - non_null_variant_index) as usize;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
0,
null_variant_name);
// Finally create the (singleton) list of descriptions of union
// members.
vec![
MemberDescription {
name: union_member_name,
llvm_type: artificial_struct_llvm_type,
type_metadata: artificial_struct_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
]
},
adt::StructWrappedNullablePointer { nonnull: ref struct_def,
nndiscr,
ref discrfield, ..} => {
// Create a description of the non-null variant
let (variant_type_metadata, variant_llvm_type, member_description_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[nndiscr as usize],
OptimizedDiscriminant,
self.containing_scope,
self.span);
let variant_member_descriptions =
member_description_factory.create_member_descriptions(cx);
set_members_of_composite_type(cx,
variant_type_metadata,
variant_llvm_type,
&variant_member_descriptions[..]);
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - nndiscr) as usize;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
let discrfield = discrfield.iter()
.skip(1)
.map(|x| x.to_string())
.collect::<Vec<_>>().connect("$");
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
discrfield,
null_variant_name);
// Create the (singleton) list of descriptions of union members.
vec![
MemberDescription {
name: union_member_name,
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
flags: FLAGS_NONE
}
]
},
adt::CEnum(..) => cx.sess().span_bug(self.span, "This should be unreachable.")
}
}
}
// Creates MemberDescriptions for the fields of a single enum variant.
struct VariantMemberDescriptionFactory<'tcx> {
args: Vec<(String, Ty<'tcx>)>,
discriminant_type_metadata: Option<DIType>,
span: Span,
}
impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
MemberDescription {
name: name.to_string(),
llvm_type: type_of::type_of(cx, ty),
type_metadata: match self.discriminant_type_metadata {
Some(metadata) if i == 0 => metadata,
_ => type_metadata(cx, ty, self.span)
},
offset: ComputedMemberOffset,
flags: FLAGS_NONE
}
}).collect()
}
}
#[derive(Copy, Clone)]
enum EnumDiscriminantInfo {
RegularDiscriminant(DIType),
OptimizedDiscriminant,
NoDiscriminant
}
// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type
// of the variant, and (3) a MemberDescriptionFactory for producing the
// descriptions of the fields of the variant. This is a rudimentary version of a
// full RecursiveTypeDescription.
fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_type: Ty<'tcx>,
struct_def: &adt::Struct<'tcx>,
variant_info: &ty::VariantInfo<'tcx>,
discriminant_info: EnumDiscriminantInfo,
containing_scope: DIScope,
span: Span)
-> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) {
let variant_llvm_type =
Type::struct_(cx, &struct_def.fields
.iter()
.map(|&t| type_of::type_of(cx, t))
.collect::<Vec<_>>()
,
struct_def.packed);
// Could do some consistency checks here: size, align, field count, discr type
let variant_name = token::get_name(variant_info.name);
let variant_name = &variant_name;
let unique_type_id = debug_context(cx).type_map
.borrow_mut()
.get_unique_type_id_of_enum_variant(
cx,
enum_type,
variant_name);
let metadata_stub = create_struct_stub(cx,
variant_llvm_type,
variant_name,
unique_type_id,
containing_scope);
// Get the argument names from the enum variant info
let mut arg_names: Vec<_> = match variant_info.arg_names {
Some(ref names) => {
names.iter()
.map(|&name| token::get_name(name).to_string())
.collect()
}
None => {
variant_info.args
.iter()
.enumerate()
.map(|(i, _)| format!("__{}", i))
.collect()
}
};
// If this is not a univariant enum, there is also the discriminant field.
match discriminant_info {
RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()),
_ => { /* do nothing */ }
};
// Build an array of (field name, field type) pairs to be captured in the factory closure.
let args: Vec<(String, Ty)> = arg_names.iter()
.zip(struct_def.fields.iter())
.map(|(s, &t)| (s.to_string(), t))
.collect();
let member_description_factory =
VariantMDF(VariantMemberDescriptionFactory {
args: args,
discriminant_type_metadata: match discriminant_info {
RegularDiscriminant(discriminant_type_metadata) => {
Some(discriminant_type_metadata)
}
_ => None
},
span: span,
});
(metadata_stub, variant_llvm_type, member_description_factory)
}
pub fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_type: Ty<'tcx>,
enum_def_id: ast::DefId,
unique_type_id: UniqueTypeId,
span: Span)
-> RecursiveTypeDescription<'tcx> {
let enum_name = compute_debuginfo_type_name(cx, enum_type, false);
let (containing_scope, definition_span) = get_namespace_and_span_for_item(cx, enum_def_id);
let loc = span_start(cx, definition_span);
let file_metadata = file_metadata(cx, &loc.file.name);
let variants = ty::enum_variants(cx.tcx(), enum_def_id);
let enumerators_metadata: Vec<DIDescriptor> = variants
.iter()
.map(|v| {
let token = token::get_name(v.name);
let name = CString::new(token.as_bytes()).unwrap();
unsafe {
llvm::LLVMDIBuilderCreateEnumerator(
DIB(cx),
name.as_ptr(),
v.disr_val as u64)
}
})
.collect();
let discriminant_type_metadata = |inttype| {
// We can reuse the type of the discriminant for all monomorphized
// instances of an enum because it doesn't depend on any type
// parameters. The def_id, uniquely identifying the enum's polytype acts
// as key in this cache.
let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
.borrow()
.get(&enum_def_id).cloned();
match cached_discriminant_type_metadata {
Some(discriminant_type_metadata) => discriminant_type_metadata,
None => {
let discriminant_llvm_type = adt::ll_inttype(cx, inttype);
let (discriminant_size, discriminant_align) =
size_and_align_of(cx, discriminant_llvm_type);
let discriminant_base_type_metadata =
type_metadata(cx,
adt::ty_of_inttype(cx.tcx(), inttype),
codemap::DUMMY_SP);
let discriminant_name = get_enum_discriminant_name(cx, enum_def_id);
let name = CString::new(discriminant_name.as_bytes()).unwrap();
let discriminant_type_metadata = unsafe {
llvm::LLVMDIBuilderCreateEnumerationType(
DIB(cx),
containing_scope,
name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(discriminant_size),
bytes_to_bits(discriminant_align),
create_DIArray(DIB(cx), &enumerators_metadata),
discriminant_base_type_metadata)
};
debug_context(cx).created_enum_disr_types
.borrow_mut()
.insert(enum_def_id, discriminant_type_metadata);
discriminant_type_metadata
}
}
};
let type_rep = adt::represent_type(cx, enum_type);
let discriminant_type_metadata = match *type_rep {
adt::CEnum(inttype, _, _) => {
return FinalMetadata(discriminant_type_metadata(inttype))
},
adt::RawNullablePointer { .. } |
adt::StructWrappedNullablePointer { .. } |
adt::Univariant(..) => None,
adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)),
};
let enum_llvm_type = type_of::type_of(cx, enum_type);
let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type);
let unique_type_id_str = debug_context(cx)
.type_map
.borrow()
.get_unique_type_id_as_string(unique_type_id);
let enum_name = CString::new(enum_name).unwrap();
let unique_type_id_str = CString::new(unique_type_id_str.as_bytes()).unwrap();
let enum_metadata = unsafe {
llvm::LLVMDIBuilderCreateUnionType(
DIB(cx),
containing_scope,
enum_name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(enum_type_size),
bytes_to_bits(enum_type_align),
0, // Flags
ptr::null_mut(),
0, // RuntimeLang
unique_type_id_str.as_ptr())
};
return create_and_register_recursive_type_forward_declaration(
cx,
enum_type,
unique_type_id,
enum_metadata,
enum_llvm_type,
EnumMDF(EnumMemberDescriptionFactory {
enum_type: enum_type,
type_rep: type_rep.clone(),
variants: variants,
discriminant_type_metadata: discriminant_type_metadata,
containing_scope: containing_scope,
file_metadata: file_metadata,
span: span,
}),
);
fn get_enum_discriminant_name(cx: &CrateContext,
def_id: ast::DefId)
-> token::InternedString {
let name = if def_id.krate == ast::LOCAL_CRATE {
cx.tcx().map.get_path_elem(def_id.node).name()
} else {
csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
token::get_name(name)
}
}
/// Creates debug information for a composite type, that is, anything that
/// results in a LLVM struct.
///
/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
pub fn composite_type_metadata(cx: &CrateContext,
composite_llvm_type: Type,
composite_type_name: &str,
composite_type_unique_id: UniqueTypeId,
member_descriptions: &[MemberDescription],
containing_scope: DIScope,
// Ignore source location information as long as it
// can't be reconstructed for non-local crates.
_file_metadata: DIFile,
_definition_span: Span)
-> DICompositeType {
// Create the (empty) struct metadata node ...
let composite_type_metadata = create_struct_stub(cx,
composite_llvm_type,
composite_type_name,
composite_type_unique_id,
containing_scope);
// ... and immediately create and add the member descriptions.
set_members_of_composite_type(cx,
composite_type_metadata,
composite_llvm_type,
member_descriptions);
return composite_type_metadata;
}
pub fn set_members_of_composite_type(cx: &CrateContext,
composite_type_metadata: DICompositeType,
composite_llvm_type: Type,
member_descriptions: &[MemberDescription]) {
// In some rare cases LLVM metadata uniquing would lead to an existing type
// description being used instead of a new one created in
// create_struct_stub. This would cause a hard to trace assertion in
// DICompositeType::SetTypeArray(). The following check makes sure that we
// get a better error message if this should happen again due to some
// regression.
{
let mut composite_types_completed =
debug_context(cx).composite_types_completed.borrow_mut();
if composite_types_completed.contains(&composite_type_metadata) {
cx.sess().bug("debuginfo::set_members_of_composite_type() - \
Already completed forward declaration re-encountered.");
} else {
composite_types_completed.insert(composite_type_metadata);
}
}
let member_metadata: Vec<DIDescriptor> = member_descriptions
.iter()
.enumerate()
.map(|(i, member_description)| {
let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type);
let member_offset = match member_description.offset {
FixedMemberOffset { bytes } => bytes as u64,
ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i)
};
let member_name = member_description.name.as_bytes();
let member_name = CString::new(member_name).unwrap();
unsafe {
llvm::LLVMDIBuilderCreateMemberType(
DIB(cx),
composite_type_metadata,
member_name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(member_size),
bytes_to_bits(member_align),
bytes_to_bits(member_offset),
member_description.flags,
member_description.type_metadata)
}
})
.collect();
unsafe {
let type_array = create_DIArray(DIB(cx), &member_metadata[..]);
llvm::LLVMDICompositeTypeSetTypeArray(DIB(cx), composite_type_metadata, type_array);
}
}
// A convenience wrapper around LLVMDIBuilderCreateStructType(). Does not do any
// caching, does not add any fields to the struct. This can be done later with
// set_members_of_composite_type().
fn create_struct_stub(cx: &CrateContext,
struct_llvm_type: Type,
struct_type_name: &str,
unique_type_id: UniqueTypeId,
containing_scope: DIScope)
-> DICompositeType {
let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type);
let unique_type_id_str = debug_context(cx).type_map
.borrow()
.get_unique_type_id_as_string(unique_type_id);
let name = CString::new(struct_type_name).unwrap();
let unique_type_id = CString::new(unique_type_id_str.as_bytes()).unwrap();
let metadata_stub = unsafe {
// LLVMDIBuilderCreateStructType() wants an empty array. A null
// pointer will lead to hard to trace and debug LLVM assertions
// later on in llvm/lib/IR/Value.cpp.
let empty_array = create_DIArray(DIB(cx), &[]);
llvm::LLVMDIBuilderCreateStructType(
DIB(cx),
containing_scope,
name.as_ptr(),
UNKNOWN_FILE_METADATA,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(struct_size),
bytes_to_bits(struct_align),
0,
ptr::null_mut(),
empty_array,
0,
ptr::null_mut(),
unique_type_id.as_ptr())
};
return metadata_stub;
}

View file

@ -1,125 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Module-Internal debug info creation functions.
use super::utils::{span_start, DIB};
use super::metadata::{type_metadata, file_metadata};
use super::{set_debug_location, DW_TAG_auto_variable, DW_TAG_arg_variable};
use super::VariableKind::{self, ArgumentVariable, CapturedVariable, LocalVariable};
use super::VariableAccess::{self, DirectVariable, IndirectVariable};
use super::InternalDebugLocation::{self, UnknownLocation};
use llvm;
use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
use trans;
use trans::common::{CrateContext, Block};
use middle::ty::Ty;
use session::config;
use libc::c_uint;
use std::ffi::CString;
use syntax::codemap::{Span, Pos};
use syntax::ast;
use syntax::parse::token;
pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
{
// The is_local_to_unit flag indicates whether a function is local to the
// current compilation unit (i.e. if it is *static* in the C-sense). The
// *reachable* set should provide a good approximation of this, as it
// contains everything that might leak out of the current crate (by being
// externally visible or by being inlined into something externally
// visible). It might better to use the `exported_items` set from
// `driver::CrateAnalysis` in the future, but (atm) this set is not
// available in the translation pass.
!cx.reachable().contains(&node_id)
}
#[allow(non_snake_case)]
pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray {
return unsafe {
llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32)
};
}
pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
variable_name: ast::Name,
variable_type: Ty<'tcx>,
scope_metadata: DIScope,
variable_access: VariableAccess,
variable_kind: VariableKind,
span: Span) {
let cx: &CrateContext = bcx.ccx();
let filename = span_start(cx, span).file.name.clone();
let file_metadata = file_metadata(cx, &filename[..]);
let name = token::get_name(variable_name);
let loc = span_start(cx, span);
let type_metadata = type_metadata(cx, variable_type, span);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
LocalVariable |
CapturedVariable => (0, DW_TAG_auto_variable)
};
let name = CString::new(name.as_bytes()).unwrap();
match (variable_access, &[][..]) {
(DirectVariable { alloca }, address_operations) |
(IndirectVariable {alloca, address_operations}, _) => {
let metadata = unsafe {
llvm::LLVMDIBuilderCreateVariable(
DIB(cx),
dwarf_tag,
scope_metadata,
name.as_ptr(),
file_metadata,
loc.line as c_uint,
type_metadata,
cx.sess().opts.optimize != config::No,
0,
address_operations.as_ptr(),
address_operations.len() as c_uint,
argument_index)
};
set_debug_location(cx, InternalDebugLocation::new(scope_metadata,
loc.line,
loc.col.to_usize()));
unsafe {
let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd(
DIB(cx),
alloca,
metadata,
address_operations.as_ptr(),
address_operations.len() as c_uint,
bcx.llbb);
llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr);
}
}
}
match variable_kind {
ArgumentVariable(_) | CapturedVariable => {
assert!(!bcx.fcx
.debug_context
.get_ref(cx, span)
.source_locations_enabled
.get());
set_debug_location(cx, UnknownLocation);
}
_ => { /* nothing to do */ }
}
}

View file

@ -0,0 +1,514 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::metadata::file_metadata;
use super::utils::DIB;
use llvm;
use llvm::debuginfo::{DIScope, DISubprogram};
use trans::common::CrateContext;
use middle::pat_util;
use util::nodemap::NodeMap;
use libc::c_uint;
use syntax::codemap::{Span, Pos};
use syntax::{ast, codemap, ast_util};
// This procedure builds the *scope map* for a given function, which maps any
// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
//
// This builder procedure walks the AST in execution order and keeps track of
// what belongs to which scope, creating DIScope DIEs along the way, and
// introducing *artificial* lexical scope descriptors where necessary. These
// artificial scopes allow GDB to correctly handle name shadowing.
pub fn create_scope_map(cx: &CrateContext,
args: &[ast::Arg],
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
fn_ast_id: ast::NodeId)
-> NodeMap<DIScope> {
let mut scope_map = NodeMap();
let def_map = &cx.tcx().def_map;
let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
scope_map.insert(fn_ast_id, fn_metadata);
// Push argument identifiers onto the stack so arguments integrate nicely
// with variable shadowing.
for arg in args {
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, _, path1| {
scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
name: Some(path1.node.name) });
scope_map.insert(node_id, fn_metadata);
})
}
// Clang creates a separate scope for function bodies, so let's do this too.
with_new_scope(cx,
fn_entry_block.span,
&mut scope_stack,
&mut scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, fn_entry_block, scope_stack, scope_map);
});
return scope_map;
}
// local helper functions for walking the AST.
fn with_new_scope<F>(cx: &CrateContext,
scope_span: Span,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>,
inner_walk: F) where
F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
{
// Create a new lexical scope and push it onto the stack
let loc = cx.sess().codemap().lookup_char_pos(scope_span.lo);
let file_metadata = file_metadata(cx, &loc.file.name);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
inner_walk(cx, scope_stack, scope_map);
// pop artificial scopes
while scope_stack.last().unwrap().name.is_some() {
scope_stack.pop();
}
if scope_stack.last().unwrap().scope_metadata != scope_metadata {
cx.sess().span_bug(scope_span, "debuginfo: Inconsistency in scope management.");
}
scope_stack.pop();
}
struct ScopeStackEntry {
scope_metadata: DIScope,
name: Option<ast::Name>
}
fn walk_block(cx: &CrateContext,
block: &ast::Block,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
// The interesting things here are statements and the concluding expression.
for statement in &block.stmts {
scope_map.insert(ast_util::stmt_id(&**statement),
scope_stack.last().unwrap().scope_metadata);
match statement.node {
ast::StmtDecl(ref decl, _) =>
walk_decl(cx, &**decl, scope_stack, scope_map),
ast::StmtExpr(ref exp, _) |
ast::StmtSemi(ref exp, _) =>
walk_expr(cx, &**exp, scope_stack, scope_map),
ast::StmtMac(..) => () // Ignore macros (which should be expanded anyway).
}
}
if let Some(ref exp) = block.expr {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
fn walk_decl(cx: &CrateContext,
decl: &ast::Decl,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
match *decl {
codemap::Spanned { node: ast::DeclLocal(ref local), .. } => {
scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &*local.pat, scope_stack, scope_map);
if let Some(ref exp) = local.init {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
_ => ()
}
}
fn walk_pattern(cx: &CrateContext,
pat: &ast::Pat,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
// order to put them into the scope map. The above functions don't do that.
match pat.node {
ast::PatIdent(_, ref path1, ref sub_pat_opt) => {
// Check if this is a binding. If so we need to put it on the
// scope stack and maybe introduce an artificial scope
if pat_util::pat_is_binding(def_map, &*pat) {
let name = path1.node.name;
// LLVM does not properly generate 'DW_AT_start_scope' fields
// for variable DIEs. For this reason we have to introduce
// an artificial scope at bindings whenever a variable with
// the same name is declared in *any* parent scope.
//
// Otherwise the following error occurs:
//
// let x = 10;
//
// do_something(); // 'gdb print x' correctly prints 10
//
// {
// do_something(); // 'gdb print x' prints 0, because it
// // already reads the uninitialized 'x'
// // from the next line...
// let x = 100;
// do_something(); // 'gdb print x' correctly prints 100
// }
// Is there already a binding with that name?
// N.B.: this comparison must be UNhygienic... because
// gdb knows nothing about the context, so any two
// variables with the same name will cause the problem.
let need_new_scope = scope_stack
.iter()
.any(|entry| entry.name == Some(name));
if need_new_scope {
// Create a new lexical scope and push it onto the stack
let loc = cx.sess().codemap().lookup_char_pos(pat.span.lo);
let file_metadata = file_metadata(cx, &loc.file.name);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry {
scope_metadata: scope_metadata,
name: Some(name)
});
} else {
// Push a new entry anyway so the name can be found
let prev_metadata = scope_stack.last().unwrap().scope_metadata;
scope_stack.push(ScopeStackEntry {
scope_metadata: prev_metadata,
name: Some(name)
});
}
}
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pat) = *sub_pat_opt {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatWild(_) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
ast::PatEnum(_, ref sub_pats_opt) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pats) = *sub_pats_opt {
for p in sub_pats {
walk_pattern(cx, &**p, scope_stack, scope_map);
}
}
}
ast::PatQPath(..) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
ast::PatStruct(_, ref field_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for &codemap::Spanned {
node: ast::FieldPat { pat: ref sub_pat, .. },
..
} in field_pats.iter() {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatTup(ref sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatBox(ref sub_pat) | ast::PatRegion(ref sub_pat, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
ast::PatLit(ref exp) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &**exp, scope_stack, scope_map);
}
ast::PatRange(ref exp1, ref exp2) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &**exp1, scope_stack, scope_map);
walk_expr(cx, &**exp2, scope_stack, scope_map);
}
ast::PatVec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in front_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
if let Some(ref sub_pat) = *middle_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
for sub_pat in back_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatMac(_) => {
cx.sess().span_bug(pat.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
}
}
fn walk_expr(cx: &CrateContext,
exp: &ast::Expr,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
match exp.node {
ast::ExprLit(_) |
ast::ExprBreak(_) |
ast::ExprAgain(_) |
ast::ExprPath(..) => {}
ast::ExprCast(ref sub_exp, _) |
ast::ExprAddrOf(_, ref sub_exp) |
ast::ExprField(ref sub_exp, _) |
ast::ExprTupField(ref sub_exp, _) |
ast::ExprParen(ref sub_exp) =>
walk_expr(cx, &**sub_exp, scope_stack, scope_map),
ast::ExprBox(ref place, ref sub_expr) => {
place.as_ref().map(
|e| walk_expr(cx, &**e, scope_stack, scope_map));
walk_expr(cx, &**sub_expr, scope_stack, scope_map);
}
ast::ExprRet(ref exp_opt) => match *exp_opt {
Some(ref sub_exp) => walk_expr(cx, &**sub_exp, scope_stack, scope_map),
None => ()
},
ast::ExprUnary(_, ref sub_exp) => {
walk_expr(cx, &**sub_exp, scope_stack, scope_map);
}
ast::ExprAssignOp(_, ref lhs, ref rhs) |
ast::ExprIndex(ref lhs, ref rhs) |
ast::ExprBinary(_, ref lhs, ref rhs) => {
walk_expr(cx, &**lhs, scope_stack, scope_map);
walk_expr(cx, &**rhs, scope_stack, scope_map);
}
ast::ExprRange(ref start, ref end) => {
start.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
end.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
}
ast::ExprVec(ref init_expressions) |
ast::ExprTup(ref init_expressions) => {
for ie in init_expressions {
walk_expr(cx, &**ie, scope_stack, scope_map);
}
}
ast::ExprAssign(ref sub_exp1, ref sub_exp2) |
ast::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
walk_expr(cx, &**sub_exp1, scope_stack, scope_map);
walk_expr(cx, &**sub_exp2, scope_stack, scope_map);
}
ast::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
then_block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**then_block, scope_stack, scope_map);
});
match *opt_else_exp {
Some(ref else_exp) =>
walk_expr(cx, &**else_exp, scope_stack, scope_map),
_ => ()
}
}
ast::ExprIfLet(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded if-let.");
}
ast::ExprWhile(ref cond_exp, ref loop_body, _) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
loop_body.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**loop_body, scope_stack, scope_map);
})
}
ast::ExprWhileLet(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded while-let.");
}
ast::ExprForLoop(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded for loop.");
}
ast::ExprMac(_) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
ast::ExprLoop(ref block, _) |
ast::ExprBlock(ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**block, scope_stack, scope_map);
})
}
ast::ExprClosure(_, ref decl, ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for &ast::Arg { pat: ref pattern, .. } in &decl.inputs {
walk_pattern(cx, &**pattern, scope_stack, scope_map);
}
walk_block(cx, &**block, scope_stack, scope_map);
})
}
ast::ExprCall(ref fn_exp, ref args) => {
walk_expr(cx, &**fn_exp, scope_stack, scope_map);
for arg_exp in args {
walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMethodCall(_, _, ref args) => {
for arg_exp in args {
walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMatch(ref discriminant_exp, ref arms, _) => {
walk_expr(cx, &**discriminant_exp, scope_stack, scope_map);
// For each arm we have to first walk the pattern as these might
// introduce new artificial scopes. It should be sufficient to
// walk only one pattern per arm, as they all must contain the
// same binding names.
for arm_ref in arms {
let arm_span = arm_ref.pats[0].span;
with_new_scope(cx,
arm_span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for pat in &arm_ref.pats {
walk_pattern(cx, &**pat, scope_stack, scope_map);
}
if let Some(ref guard_exp) = arm_ref.guard {
walk_expr(cx, &**guard_exp, scope_stack, scope_map)
}
walk_expr(cx, &*arm_ref.body, scope_stack, scope_map);
})
}
}
ast::ExprStruct(_, ref fields, ref base_exp) => {
for &ast::Field { expr: ref exp, .. } in fields {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
match *base_exp {
Some(ref exp) => walk_expr(cx, &**exp, scope_stack, scope_map),
None => ()
}
}
ast::ExprInlineAsm(ast::InlineAsm { ref inputs,
ref outputs,
.. }) => {
// inputs, outputs: Vec<(String, P<Expr>)>
for &(_, ref exp) in inputs {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
for &(_, ref exp, _) in outputs {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,231 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use self::InternalDebugLocation::*;
use super::utils::{debug_context, span_start, fn_should_be_ignored};
use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER};
use super::{FunctionDebugContext, DebugLoc};
use llvm;
use llvm::debuginfo::DIScope;
use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext};
use libc::c_uint;
use std::ptr;
use syntax::codemap::{Span, Pos};
use syntax::{ast, codemap};
pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
node_id: ast::NodeId,
node_span: Span,
is_block: bool)
-> NodeIdAndSpan {
// A debug location needs two things:
// (1) A span (of which only the beginning will actually be used)
// (2) An AST node-id which will be used to look up the lexical scope
// for the location in the functions scope-map
//
// This function will calculate the debug location for compiler-generated
// cleanup calls that are executed when control-flow leaves the
// scope identified by `node_id`.
//
// For everything but block-like things we can simply take id and span of
// the given expression, meaning that from a debugger's view cleanup code is
// executed at the same source location as the statement/expr itself.
//
// Blocks are a special case. Here we want the cleanup to be linked to the
// closing curly brace of the block. The *scope* the cleanup is executed in
// is up to debate: It could either still be *within* the block being
// cleaned up, meaning that locals from the block are still visible in the
// debugger.
// Or it could be in the scope that the block is contained in, so any locals
// from within the block are already considered out-of-scope and thus not
// accessible in the debugger anymore.
//
// The current implementation opts for the second option: cleanup of a block
// already happens in the parent scope of the block. The main reason for
// this decision is that scoping becomes controlflow dependent when variable
// shadowing is involved and it's impossible to decide statically which
// scope is actually left when the cleanup code is executed.
// In practice it shouldn't make much of a difference.
let mut cleanup_span = node_span;
if is_block {
// Not all blocks actually have curly braces (e.g. simple closure
// bodies), in which case we also just want to return the span of the
// whole expression.
let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
if let Ok(code_snippet) = code_snippet {
let bytes = code_snippet.as_bytes();
if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" {
cleanup_span = Span {
lo: node_span.hi - codemap::BytePos(1),
hi: node_span.hi,
expn_id: node_span.expn_id
};
}
}
}
NodeIdAndSpan {
id: node_id,
span: cleanup_span
}
}
/// Sets the current debug location at the beginning of the span.
///
/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id
/// parameter is used to reliably find the correct visibility scope for the code
/// position.
pub fn set_source_location(fcx: &FunctionContext,
node_id: ast::NodeId,
span: Span) {
match fcx.debug_context {
FunctionDebugContext::DebugInfoDisabled => return,
FunctionDebugContext::FunctionWithoutDebugInfo => {
set_debug_location(fcx.ccx, UnknownLocation);
return;
}
FunctionDebugContext::RegularContext(box ref function_debug_context) => {
if function_debug_context.source_location_override.get() {
// Just ignore any attempts to set a new debug location while
// the override is active.
return;
}
let cx = fcx.ccx;
debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span));
if function_debug_context.source_locations_enabled.get() {
let loc = span_start(cx, span);
let scope = scope_metadata(fcx, node_id, span);
set_debug_location(cx, InternalDebugLocation::new(scope,
loc.line,
loc.col.to_usize()));
} else {
set_debug_location(cx, UnknownLocation);
}
}
}
}
/// This function makes sure that all debug locations emitted while executing
/// `wrapped_function` are set to the given `debug_loc`.
pub fn with_source_location_override<F, R>(fcx: &FunctionContext,
debug_loc: DebugLoc,
wrapped_function: F) -> R
where F: FnOnce() -> R
{
match fcx.debug_context {
FunctionDebugContext::DebugInfoDisabled => {
wrapped_function()
}
FunctionDebugContext::FunctionWithoutDebugInfo => {
set_debug_location(fcx.ccx, UnknownLocation);
wrapped_function()
}
FunctionDebugContext::RegularContext(box ref function_debug_context) => {
if function_debug_context.source_location_override.get() {
wrapped_function()
} else {
debug_loc.apply(fcx);
function_debug_context.source_location_override.set(true);
let result = wrapped_function();
function_debug_context.source_location_override.set(false);
result
}
}
}
}
/// Clears the current debug location.
///
/// Instructions generated hereafter won't be assigned a source location.
pub fn clear_source_location(fcx: &FunctionContext) {
if fn_should_be_ignored(fcx) {
return;
}
set_debug_location(fcx.ccx, UnknownLocation);
}
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
/// they are disabled when beginning to translate a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is translated.
pub fn start_emitting_source_locations(fcx: &FunctionContext) {
match fcx.debug_context {
FunctionDebugContext::RegularContext(box ref data) => {
data.source_locations_enabled.set(true)
},
_ => { /* safe to ignore */ }
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum InternalDebugLocation {
KnownLocation { scope: DIScope, line: usize, col: usize },
UnknownLocation
}
impl InternalDebugLocation {
pub fn new(scope: DIScope, line: usize, col: usize) -> InternalDebugLocation {
KnownLocation {
scope: scope,
line: line,
col: col,
}
}
}
pub fn set_debug_location(cx: &CrateContext, debug_location: InternalDebugLocation) {
if debug_location == debug_context(cx).current_debug_location.get() {
return;
}
let metadata_node;
match debug_location {
KnownLocation { scope, line, .. } => {
// Always set the column to zero like Clang and GCC
let col = UNKNOWN_COLUMN_NUMBER;
debug!("setting debug location to {} {}", line, col);
unsafe {
metadata_node = llvm::LLVMDIBuilderCreateDebugLocation(
debug_context(cx).llcontext,
line as c_uint,
col as c_uint,
scope,
ptr::null_mut());
}
}
UnknownLocation => {
debug!("clearing debug location ");
metadata_node = ptr::null_mut();
}
};
unsafe {
llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
}
debug_context(cx).current_debug_location.set(debug_location);
}

View file

@ -12,20 +12,35 @@
use super::{FunctionDebugContext, CrateDebugContext};
use super::namespace::namespace_for_item;
use super::metadata::file_metadata;
use llvm;
use llvm::debuginfo::{DIScope, DISubprogram, DIBuilderRef};
use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
use trans::machine;
use trans::common::{CrateContext, FunctionContext};
use trans::type_::Type;
use middle::pat_util;
use util::nodemap::NodeMap;
use libc::c_uint;
use syntax::codemap::{Span, Pos};
use syntax::{ast, codemap, ast_util};
use syntax::codemap::Span;
use syntax::{ast, codemap};
pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
{
// The is_local_to_unit flag indicates whether a function is local to the
// current compilation unit (i.e. if it is *static* in the C-sense). The
// *reachable* set should provide a good approximation of this, as it
// contains everything that might leak out of the current crate (by being
// externally visible or by being inlined into something externally
// visible). It might better to use the `exported_items` set from
// `driver::CrateAnalysis` in the future, but (atm) this set is not
// available in the translation pass.
!cx.reachable().contains(&node_id)
}
#[allow(non_snake_case)]
pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray {
return unsafe {
llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32)
};
}
pub fn contains_nodebug_attribute(attributes: &[ast::Attribute]) -> bool {
attributes.iter().any(|attr| {
@ -91,496 +106,3 @@ pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: ast::DefId)
(containing_scope, definition_span)
}
// This procedure builds the *scope map* for a given function, which maps any
// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
//
// This builder procedure walks the AST in execution order and keeps track of
// what belongs to which scope, creating DIScope DIEs along the way, and
// introducing *artificial* lexical scope descriptors where necessary. These
// artificial scopes allow GDB to correctly handle name shadowing.
pub fn create_scope_map(cx: &CrateContext,
args: &[ast::Arg],
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
fn_ast_id: ast::NodeId)
-> NodeMap<DIScope> {
let mut scope_map = NodeMap();
let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
scope_metadata: DIScope,
name: Option<ast::Name>
}
let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
scope_map.insert(fn_ast_id, fn_metadata);
// Push argument identifiers onto the stack so arguments integrate nicely
// with variable shadowing.
for arg in args {
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, _, path1| {
scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
name: Some(path1.node.name) });
scope_map.insert(node_id, fn_metadata);
})
}
// Clang creates a separate scope for function bodies, so let's do this too.
with_new_scope(cx,
fn_entry_block.span,
&mut scope_stack,
&mut scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, fn_entry_block, scope_stack, scope_map);
});
return scope_map;
// local helper functions for walking the AST.
fn with_new_scope<F>(cx: &CrateContext,
scope_span: Span,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>,
inner_walk: F) where
F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
{
// Create a new lexical scope and push it onto the stack
let loc = cx.sess().codemap().lookup_char_pos(scope_span.lo);
let file_metadata = file_metadata(cx, &loc.file.name);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
inner_walk(cx, scope_stack, scope_map);
// pop artificial scopes
while scope_stack.last().unwrap().name.is_some() {
scope_stack.pop();
}
if scope_stack.last().unwrap().scope_metadata != scope_metadata {
cx.sess().span_bug(scope_span, "debuginfo: Inconsistency in scope management.");
}
scope_stack.pop();
}
fn walk_block(cx: &CrateContext,
block: &ast::Block,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
// The interesting things here are statements and the concluding expression.
for statement in &block.stmts {
scope_map.insert(ast_util::stmt_id(&**statement),
scope_stack.last().unwrap().scope_metadata);
match statement.node {
ast::StmtDecl(ref decl, _) =>
walk_decl(cx, &**decl, scope_stack, scope_map),
ast::StmtExpr(ref exp, _) |
ast::StmtSemi(ref exp, _) =>
walk_expr(cx, &**exp, scope_stack, scope_map),
ast::StmtMac(..) => () // Ignore macros (which should be expanded anyway).
}
}
if let Some(ref exp) = block.expr {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
fn walk_decl(cx: &CrateContext,
decl: &ast::Decl,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
match *decl {
codemap::Spanned { node: ast::DeclLocal(ref local), .. } => {
scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &*local.pat, scope_stack, scope_map);
if let Some(ref exp) = local.init {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
_ => ()
}
}
fn walk_pattern(cx: &CrateContext,
pat: &ast::Pat,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
// order to put them into the scope map. The above functions don't do that.
match pat.node {
ast::PatIdent(_, ref path1, ref sub_pat_opt) => {
// Check if this is a binding. If so we need to put it on the
// scope stack and maybe introduce an artificial scope
if pat_util::pat_is_binding(def_map, &*pat) {
let name = path1.node.name;
// LLVM does not properly generate 'DW_AT_start_scope' fields
// for variable DIEs. For this reason we have to introduce
// an artificial scope at bindings whenever a variable with
// the same name is declared in *any* parent scope.
//
// Otherwise the following error occurs:
//
// let x = 10;
//
// do_something(); // 'gdb print x' correctly prints 10
//
// {
// do_something(); // 'gdb print x' prints 0, because it
// // already reads the uninitialized 'x'
// // from the next line...
// let x = 100;
// do_something(); // 'gdb print x' correctly prints 100
// }
// Is there already a binding with that name?
// N.B.: this comparison must be UNhygienic... because
// gdb knows nothing about the context, so any two
// variables with the same name will cause the problem.
let need_new_scope = scope_stack
.iter()
.any(|entry| entry.name == Some(name));
if need_new_scope {
// Create a new lexical scope and push it onto the stack
let loc = cx.sess().codemap().lookup_char_pos(pat.span.lo);
let file_metadata = file_metadata(cx, &loc.file.name);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry {
scope_metadata: scope_metadata,
name: Some(name)
});
} else {
// Push a new entry anyway so the name can be found
let prev_metadata = scope_stack.last().unwrap().scope_metadata;
scope_stack.push(ScopeStackEntry {
scope_metadata: prev_metadata,
name: Some(name)
});
}
}
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pat) = *sub_pat_opt {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatWild(_) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
ast::PatEnum(_, ref sub_pats_opt) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pats) = *sub_pats_opt {
for p in sub_pats {
walk_pattern(cx, &**p, scope_stack, scope_map);
}
}
}
ast::PatQPath(..) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
ast::PatStruct(_, ref field_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for &codemap::Spanned {
node: ast::FieldPat { pat: ref sub_pat, .. },
..
} in field_pats.iter() {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatTup(ref sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatBox(ref sub_pat) | ast::PatRegion(ref sub_pat, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
ast::PatLit(ref exp) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &**exp, scope_stack, scope_map);
}
ast::PatRange(ref exp1, ref exp2) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &**exp1, scope_stack, scope_map);
walk_expr(cx, &**exp2, scope_stack, scope_map);
}
ast::PatVec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in front_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
if let Some(ref sub_pat) = *middle_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
for sub_pat in back_sub_pats {
walk_pattern(cx, &**sub_pat, scope_stack, scope_map);
}
}
ast::PatMac(_) => {
cx.sess().span_bug(pat.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
}
}
fn walk_expr(cx: &CrateContext,
exp: &ast::Expr,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
match exp.node {
ast::ExprLit(_) |
ast::ExprBreak(_) |
ast::ExprAgain(_) |
ast::ExprPath(..) => {}
ast::ExprCast(ref sub_exp, _) |
ast::ExprAddrOf(_, ref sub_exp) |
ast::ExprField(ref sub_exp, _) |
ast::ExprTupField(ref sub_exp, _) |
ast::ExprParen(ref sub_exp) =>
walk_expr(cx, &**sub_exp, scope_stack, scope_map),
ast::ExprBox(ref place, ref sub_expr) => {
place.as_ref().map(
|e| walk_expr(cx, &**e, scope_stack, scope_map));
walk_expr(cx, &**sub_expr, scope_stack, scope_map);
}
ast::ExprRet(ref exp_opt) => match *exp_opt {
Some(ref sub_exp) => walk_expr(cx, &**sub_exp, scope_stack, scope_map),
None => ()
},
ast::ExprUnary(_, ref sub_exp) => {
walk_expr(cx, &**sub_exp, scope_stack, scope_map);
}
ast::ExprAssignOp(_, ref lhs, ref rhs) |
ast::ExprIndex(ref lhs, ref rhs) |
ast::ExprBinary(_, ref lhs, ref rhs) => {
walk_expr(cx, &**lhs, scope_stack, scope_map);
walk_expr(cx, &**rhs, scope_stack, scope_map);
}
ast::ExprRange(ref start, ref end) => {
start.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
end.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
}
ast::ExprVec(ref init_expressions) |
ast::ExprTup(ref init_expressions) => {
for ie in init_expressions {
walk_expr(cx, &**ie, scope_stack, scope_map);
}
}
ast::ExprAssign(ref sub_exp1, ref sub_exp2) |
ast::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
walk_expr(cx, &**sub_exp1, scope_stack, scope_map);
walk_expr(cx, &**sub_exp2, scope_stack, scope_map);
}
ast::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
then_block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**then_block, scope_stack, scope_map);
});
match *opt_else_exp {
Some(ref else_exp) =>
walk_expr(cx, &**else_exp, scope_stack, scope_map),
_ => ()
}
}
ast::ExprIfLet(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded if-let.");
}
ast::ExprWhile(ref cond_exp, ref loop_body, _) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
loop_body.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**loop_body, scope_stack, scope_map);
})
}
ast::ExprWhileLet(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded while-let.");
}
ast::ExprForLoop(..) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded for loop.");
}
ast::ExprMac(_) => {
cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
ast::ExprLoop(ref block, _) |
ast::ExprBlock(ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &**block, scope_stack, scope_map);
})
}
ast::ExprClosure(_, ref decl, ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for &ast::Arg { pat: ref pattern, .. } in &decl.inputs {
walk_pattern(cx, &**pattern, scope_stack, scope_map);
}
walk_block(cx, &**block, scope_stack, scope_map);
})
}
ast::ExprCall(ref fn_exp, ref args) => {
walk_expr(cx, &**fn_exp, scope_stack, scope_map);
for arg_exp in args {
walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMethodCall(_, _, ref args) => {
for arg_exp in args {
walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMatch(ref discriminant_exp, ref arms, _) => {
walk_expr(cx, &**discriminant_exp, scope_stack, scope_map);
// For each arm we have to first walk the pattern as these might
// introduce new artificial scopes. It should be sufficient to
// walk only one pattern per arm, as they all must contain the
// same binding names.
for arm_ref in arms {
let arm_span = arm_ref.pats[0].span;
with_new_scope(cx,
arm_span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for pat in &arm_ref.pats {
walk_pattern(cx, &**pat, scope_stack, scope_map);
}
if let Some(ref guard_exp) = arm_ref.guard {
walk_expr(cx, &**guard_exp, scope_stack, scope_map)
}
walk_expr(cx, &*arm_ref.body, scope_stack, scope_map);
})
}
}
ast::ExprStruct(_, ref fields, ref base_exp) => {
for &ast::Field { expr: ref exp, .. } in fields {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
match *base_exp {
Some(ref exp) => walk_expr(cx, &**exp, scope_stack, scope_map),
None => ()
}
}
ast::ExprInlineAsm(ast::InlineAsm { ref inputs,
ref outputs,
.. }) => {
// inputs, outputs: Vec<(String, P<Expr>)>
for &(_, ref exp) in inputs {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
for &(_, ref exp, _) in outputs {
walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
}
}
}