Move to a more lightweight builder system

You now do

    bld::Ret(bcx, someval)

where you used to say

    bcx.build.Ret(someval)

Two fewer boxes are allocated for each block context, and build calls
no longer go through a vtable.
This commit is contained in:
Marijn Haverbeke 2011-08-24 14:54:55 +02:00
parent 9f44df65ef
commit b9112525ba
9 changed files with 1146 additions and 1193 deletions

View file

@ -897,525 +897,6 @@ native "cdecl" mod llvm = "rustllvm" {
fn LLVMLinkModules(Dest: ModuleRef, Src: ModuleRef) -> Bool;
}
/* Slightly more terse object-interface to LLVM's 'builder' functions. For the
* most part, build.Foo() wraps LLVMBuildFoo(), threading the correct
* BuilderRef B into place. A BuilderRef is a cursor-like LLVM value that
* inserts instructions for a particular BasicBlockRef at a particular
* position; for our purposes, it always inserts at the end of the basic block
* it's attached to.
*/
resource BuilderRef_res(B: BuilderRef) { llvm::LLVMDisposeBuilder(B); }
obj builder(B: BuilderRef,
terminated: @mutable bool,
// Stored twice so that we don't have to constantly deref
res: @BuilderRef_res) {
/* Terminators */
fn RetVoid() -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildRetVoid(B);
}
fn Ret(V: ValueRef) -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildRet(B, V);
}
fn AggregateRet(RetVals: &[ValueRef]) -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildAggregateRet(B, vec::to_ptr(RetVals),
vec::len(RetVals));
}
fn Br(Dest: BasicBlockRef) -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildBr(B, Dest);
}
fn CondBr(If: ValueRef, Then: BasicBlockRef, Else: BasicBlockRef) ->
ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildCondBr(B, If, Then, Else);
}
fn Switch(V: ValueRef, Else: BasicBlockRef, NumCases: uint) -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildSwitch(B, V, Else, NumCases);
}
fn IndirectBr(Addr: ValueRef, NumDests: uint) -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildIndirectBr(B, Addr, NumDests);
}
fn Invoke(Fn: ValueRef, Args: &[ValueRef], Then: BasicBlockRef,
Catch: BasicBlockRef) -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildInvoke(B, Fn, vec::to_ptr(Args), vec::len(Args),
Then, Catch, str::buf(""));
}
fn Unreachable() -> ValueRef {
assert (!*terminated);;
*terminated = true;
ret llvm::LLVMBuildUnreachable(B);
}
/* Arithmetic */
fn Add(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildAdd(B, LHS, RHS, str::buf(""));
}
fn NSWAdd(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNSWAdd(B, LHS, RHS, str::buf(""));
}
fn NUWAdd(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNUWAdd(B, LHS, RHS, str::buf(""));
}
fn FAdd(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFAdd(B, LHS, RHS, str::buf(""));
}
fn Sub(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSub(B, LHS, RHS, str::buf(""));
}
fn NSWSub(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNSWSub(B, LHS, RHS, str::buf(""));
}
fn NUWSub(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNUWSub(B, LHS, RHS, str::buf(""));
}
fn FSub(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFSub(B, LHS, RHS, str::buf(""));
}
fn Mul(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildMul(B, LHS, RHS, str::buf(""));
}
fn NSWMul(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNSWMul(B, LHS, RHS, str::buf(""));
}
fn NUWMul(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNUWMul(B, LHS, RHS, str::buf(""));
}
fn FMul(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFMul(B, LHS, RHS, str::buf(""));
}
fn UDiv(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildUDiv(B, LHS, RHS, str::buf(""));
}
fn SDiv(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSDiv(B, LHS, RHS, str::buf(""));
}
fn ExactSDiv(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildExactSDiv(B, LHS, RHS, str::buf(""));
}
fn FDiv(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFDiv(B, LHS, RHS, str::buf(""));
}
fn URem(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildURem(B, LHS, RHS, str::buf(""));
}
fn SRem(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSRem(B, LHS, RHS, str::buf(""));
}
fn FRem(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFRem(B, LHS, RHS, str::buf(""));
}
fn Shl(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildShl(B, LHS, RHS, str::buf(""));
}
fn LShr(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildLShr(B, LHS, RHS, str::buf(""));
}
fn AShr(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildAShr(B, LHS, RHS, str::buf(""));
}
fn And(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildAnd(B, LHS, RHS, str::buf(""));
}
fn Or(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildOr(B, LHS, RHS, str::buf(""));
}
fn Xor(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildXor(B, LHS, RHS, str::buf(""));
}
fn BinOp(Op: Opcode, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildBinOp(B, Op, LHS, RHS, str::buf(""));
}
fn Neg(V: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNeg(B, V, str::buf(""));
}
fn NSWNeg(V: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNSWNeg(B, V, str::buf(""));
}
fn NUWNeg(V: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNUWNeg(B, V, str::buf(""));
}
fn FNeg(V: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFNeg(B, V, str::buf(""));
}
fn Not(V: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildNot(B, V, str::buf(""));
}
/* Memory */
fn Malloc(Ty: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildMalloc(B, Ty, str::buf(""));
}
fn ArrayMalloc(Ty: TypeRef, Val: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildArrayMalloc(B, Ty, Val, str::buf(""));
}
fn Alloca(Ty: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildAlloca(B, Ty, str::buf(""));
}
fn ArrayAlloca(Ty: TypeRef, Val: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildArrayAlloca(B, Ty, Val, str::buf(""));
}
fn Free(PointerVal: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFree(B, PointerVal);
}
fn Load(PointerVal: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildLoad(B, PointerVal, str::buf(""));
}
fn Store(Val: ValueRef, Ptr: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildStore(B, Val, Ptr);
}
fn GEP(Pointer: ValueRef, Indices: &[ValueRef]) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildGEP(B, Pointer, vec::to_ptr(Indices),
vec::len(Indices), str::buf(""));
}
fn InBoundsGEP(Pointer: ValueRef, Indices: &[ValueRef]) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildInBoundsGEP(B, Pointer, vec::to_ptr(Indices),
vec::len(Indices), str::buf(""));
}
fn StructGEP(Pointer: ValueRef, Idx: uint) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildStructGEP(B, Pointer, Idx, str::buf(""));
}
fn GlobalString(_Str: sbuf) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildGlobalString(B, _Str, str::buf(""));
}
fn GlobalStringPtr(_Str: sbuf) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildGlobalStringPtr(B, _Str, str::buf(""));
}
/* Casts */
fn Trunc(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildTrunc(B, Val, DestTy, str::buf(""));
}
fn ZExt(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildZExt(B, Val, DestTy, str::buf(""));
}
fn SExt(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSExt(B, Val, DestTy, str::buf(""));
}
fn FPToUI(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFPToUI(B, Val, DestTy, str::buf(""));
}
fn FPToSI(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFPToSI(B, Val, DestTy, str::buf(""));
}
fn UIToFP(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildUIToFP(B, Val, DestTy, str::buf(""));
}
fn SIToFP(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSIToFP(B, Val, DestTy, str::buf(""));
}
fn FPTrunc(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFPTrunc(B, Val, DestTy, str::buf(""));
}
fn FPExt(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFPExt(B, Val, DestTy, str::buf(""));
}
fn PtrToInt(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildPtrToInt(B, Val, DestTy, str::buf(""));
}
fn IntToPtr(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildIntToPtr(B, Val, DestTy, str::buf(""));
}
fn BitCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildBitCast(B, Val, DestTy, str::buf(""));
}
fn ZExtOrBitCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildZExtOrBitCast(B, Val, DestTy, str::buf(""));
}
fn SExtOrBitCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSExtOrBitCast(B, Val, DestTy, str::buf(""));
}
fn TruncOrBitCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildTruncOrBitCast(B, Val, DestTy, str::buf(""));
}
fn Cast(Op: Opcode, Val: ValueRef, DestTy: TypeRef, _Name: sbuf) ->
ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildCast(B, Op, Val, DestTy, str::buf(""));
}
fn PointerCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildPointerCast(B, Val, DestTy, str::buf(""));
}
fn IntCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildIntCast(B, Val, DestTy, str::buf(""));
}
fn FPCast(Val: ValueRef, DestTy: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFPCast(B, Val, DestTy, str::buf(""));
}
/* Comparisons */
fn ICmp(Op: uint, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildICmp(B, Op, LHS, RHS, str::buf(""));
}
fn FCmp(Op: uint, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildFCmp(B, Op, LHS, RHS, str::buf(""));
}
/* Miscellaneous instructions */
fn Phi(Ty: TypeRef, vals: &[ValueRef], bbs: &[BasicBlockRef]) ->
ValueRef {
assert (!*terminated);
let phi = llvm::LLVMBuildPhi(B, Ty, str::buf(""));
assert (vec::len::<ValueRef>(vals) == vec::len::<BasicBlockRef>(bbs));
llvm::LLVMAddIncoming(phi, vec::to_ptr(vals), vec::to_ptr(bbs),
vec::len(vals));
ret phi;
}
fn AddIncomingToPhi(phi: ValueRef, vals: &[ValueRef],
bbs: &[BasicBlockRef]) {
assert (vec::len::<ValueRef>(vals) == vec::len::<BasicBlockRef>(bbs));
llvm::LLVMAddIncoming(phi, vec::to_ptr(vals), vec::to_ptr(bbs),
vec::len(vals));
}
fn Call(Fn: ValueRef, Args: &[ValueRef]) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildCall(B, Fn, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
}
fn FastCall(Fn: ValueRef, Args: &[ValueRef]) -> ValueRef {
assert (!*terminated);
let v =
llvm::LLVMBuildCall(B, Fn, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
llvm::LLVMSetInstructionCallConv(v, LLVMFastCallConv);
ret v;
}
fn CallWithConv(Fn: ValueRef, Args: &[ValueRef], Conv: uint) -> ValueRef {
assert (!*terminated);
let v =
llvm::LLVMBuildCall(B, Fn, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
llvm::LLVMSetInstructionCallConv(v, Conv);
ret v;
}
fn Select(If: ValueRef, Then: ValueRef, Else: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildSelect(B, If, Then, Else, str::buf(""));
}
fn VAArg(list: ValueRef, Ty: TypeRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildVAArg(B, list, Ty, str::buf(""));
}
fn ExtractElement(VecVal: ValueRef, Index: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildExtractElement(B, VecVal, Index, str::buf(""));
}
fn InsertElement(VecVal: ValueRef, EltVal: ValueRef, Index: ValueRef) ->
ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildInsertElement(B, VecVal, EltVal, Index,
str::buf(""));
}
fn ShuffleVector(V1: ValueRef, V2: ValueRef, Mask: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildShuffleVector(B, V1, V2, Mask, str::buf(""));
}
fn ExtractValue(AggVal: ValueRef, Index: uint) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildExtractValue(B, AggVal, Index, str::buf(""));
}
fn InsertValue(AggVal: ValueRef, EltVal: ValueRef, Index: uint) ->
ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildInsertValue(B, AggVal, EltVal, Index,
str::buf(""));
}
fn IsNull(Val: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildIsNull(B, Val, str::buf(""));
}
fn IsNotNull(Val: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildIsNotNull(B, Val, str::buf(""));
}
fn PtrDiff(LHS: ValueRef, RHS: ValueRef) -> ValueRef {
assert (!*terminated);
ret llvm::LLVMBuildPtrDiff(B, LHS, RHS, str::buf(""));
}
fn Trap() -> ValueRef {
assert (!*terminated);
let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(B);
let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB);
let M: ModuleRef = llvm::LLVMGetGlobalParent(FN);
let T: ValueRef =
llvm::LLVMGetNamedFunction(M, str::buf("llvm.trap"));
assert (T as int != 0);
let Args: [ValueRef] = [];
ret llvm::LLVMBuildCall(B, T, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
}
fn is_terminated() -> bool { ret *terminated; }
}
fn new_builder(llbb: BasicBlockRef) -> builder {
let llbuild: BuilderRef = llvm::LLVMCreateBuilder();
llvm::LLVMPositionBuilderAtEnd(llbuild, llbb);
ret builder(llbuild, @mutable false, @BuilderRef_res(llbuild));
}
/* Memory-managed object interface to type handles. */
obj type_names(type_names: std::map::hashmap<TypeRef, str>,

View file

@ -15,6 +15,7 @@ import std::unsafe;
import std::vec;
import lll = lib::llvm::llvm;
import bld = trans_build;
type ctxt = @{mutable next_tydesc_num: uint};
@ -47,13 +48,13 @@ fn add_gc_root(cx: &@block_ctxt, llval: ValueRef, ty: ty::t) -> @block_ctxt {
let lltydesc = td_r.result.val;
let gcroot = bcx_ccx(bcx).intrinsics.get("llvm.gcroot");
let llvalptr = bcx.build.PointerCast(llval, T_ptr(T_ptr(T_i8())));
let llvalptr = bld::PointerCast(bcx, llval, T_ptr(T_ptr(T_i8())));
alt td_r.kind {
tk_derived. {
// It's a derived type descriptor. First, spill it.
let lltydescptr = trans::alloca(bcx, val_ty(lltydesc));
bcx.build.Store(lltydesc, lltydescptr);
bld::Store(bcx, lltydesc, lltydescptr);
let number = gc_cx.next_tydesc_num;
gc_cx.next_tydesc_num += 1u;
@ -69,10 +70,10 @@ fn add_gc_root(cx: &@block_ctxt, llval: ValueRef, ty: ty::t) -> @block_ctxt {
llsrcindex = lll::LLVMConstPointerCast(llsrcindex, T_ptr(T_i8()));
lltydescptr =
bcx.build.PointerCast(lltydescptr, T_ptr(T_ptr(T_i8())));
bld::PointerCast(bcx, lltydescptr, T_ptr(T_ptr(T_i8())));
bcx.build.Call(gcroot, [lltydescptr, lldestindex]);
bcx.build.Call(gcroot, [llvalptr, llsrcindex]);
bld::Call(bcx, gcroot, [lltydescptr, lldestindex]);
bld::Call(bcx, gcroot, [llvalptr, llsrcindex]);
}
tk_param. {
bcx_tcx(cx).sess.bug("we should never be trying to root values " +
@ -87,7 +88,7 @@ fn add_gc_root(cx: &@block_ctxt, llval: ValueRef, ty: ty::t) -> @block_ctxt {
let llstaticgcmetaptr =
lll::LLVMConstPointerCast(llstaticgcmeta, T_ptr(T_i8()));
bcx.build.Call(gcroot, [llvalptr, llstaticgcmetaptr]);
bld::Call(bcx, gcroot, [llvalptr, llstaticgcmetaptr]);
}
}

File diff suppressed because it is too large Load diff

View file

@ -9,6 +9,7 @@ import lib::llvm::llvm;
import lib::llvm::llvm::ValueRef;
import lib::llvm::llvm::TypeRef;
import lib::llvm::llvm::BasicBlockRef;
import bld = trans_build;
import trans::new_sub_block_ctxt;
import trans::new_scope_block_ctxt;
import trans::load_if_immediate;
@ -210,9 +211,9 @@ fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
vec::len(ty::tag_variant_with_id(ccx.tcx, vdefs.tg, vdefs.var).args);
if size > 0u && vec::len(variants) != 1u {
let tagptr =
bcx.build.PointerCast(val,
bld::PointerCast(bcx, val,
trans_common::T_opaque_tag_ptr(ccx.tn));
blobptr = bcx.build.GEP(tagptr, [C_int(0), C_int(1)]);
blobptr = bld::GEP(bcx, tagptr, [C_int(0), C_int(1)]);
}
let i = 0u;
while i < size {
@ -289,7 +290,7 @@ fn pick_col(m: &match) -> uint {
fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
f: &mk_fail, exits: &mutable [exit_node]) {
if vec::len(m) == 0u { bcx.build.Br(f()); ret; }
if vec::len(m) == 0u { bld::Br(bcx, f()); ret; }
if vec::len(m[0].pats) == 0u {
let data = m[0].data;
alt data.guard {
@ -297,7 +298,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let guard_cx = new_scope_block_ctxt(bcx, "guard");
let next_cx = new_sub_block_ctxt(bcx, "next");
let else_cx = new_sub_block_ctxt(bcx, "else");
bcx.build.Br(guard_cx.llbb);
bld::Br(bcx, guard_cx.llbb);
// Temporarily set bindings. They'll be rewritten to PHI nodes for
// the actual arm block.
for each @{key, val} in data.id_map.items() {
@ -307,7 +308,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let {bcx: guard_bcx, val: guard_val} =
trans::trans_expr(guard_cx, e);
guard_bcx = trans::trans_block_cleanups(guard_bcx, guard_cx);
guard_bcx.build.CondBr(guard_val, next_cx.llbb, else_cx.llbb);
bld::CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb);
compile_submatch(else_cx, vec::slice(m, 1u, vec::len(m)),
vals, f, exits);
bcx = next_cx;
@ -315,7 +316,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
_ {}
}
exits += [{bound: m[0].bound, from: bcx.llbb, to: data.body}];
bcx.build.Br(data.body);
bld::Br(bcx, data.body);
ret;
}
@ -372,9 +373,9 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
// Unbox in case of a box field
if any_box_pat(m, col) {
let box = bcx.build.Load(val);
let box = bld::Load(bcx, val);
let unboxed =
bcx.build.InBoundsGEP(box,
bld::InBoundsGEP(bcx, box,
[C_int(0),
C_int(back::abi::box_rc_field_body)]);
compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left,
@ -394,16 +395,15 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
kind = single;
} else {
let tagptr =
bcx.build.PointerCast(
val,
bld::PointerCast(bcx, val,
trans_common::T_opaque_tag_ptr(ccx.tn));
let discrimptr = bcx.build.GEP(tagptr, [C_int(0), C_int(0)]);
test_val = bcx.build.Load(discrimptr);
let discrimptr = bld::GEP(bcx, tagptr, [C_int(0), C_int(0)]);
test_val = bld::Load(bcx, discrimptr);
kind = switch;
}
}
lit(l) {
test_val = bcx.build.Load(val);
test_val = bld::Load(bcx, val);
kind = alt l.node { ast::lit_str(_, _) { compare } _ { switch } };
}
}
@ -415,14 +415,14 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
};
let sw =
if kind == switch {
bcx.build.Switch(test_val, else_cx.llbb, vec::len(opts))
bld::Switch(bcx, test_val, else_cx.llbb, vec::len(opts))
} else { C_int(0) }; // Placeholder for when not using a switch
// Compile subtrees for each option
for opt: opt in opts {
let opt_cx = new_sub_block_ctxt(bcx, "match_case");
alt kind {
single. { bcx.build.Br(opt_cx.llbb); }
single. { bld::Br(bcx, opt_cx.llbb); }
switch. {
let r = trans_opt(bcx, opt);
bcx = r.bcx;
@ -435,7 +435,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
let eq =
trans::trans_compare(bcx, ast::eq, test_val, t, r.val, t);
bcx = new_sub_block_ctxt(bcx, "next");
eq.bcx.build.CondBr(eq.val, opt_cx.llbb, bcx.llbb);
bld::CondBr(eq.bcx, eq.val, opt_cx.llbb, bcx.llbb);
}
_ { }
}
@ -455,7 +455,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
}
// Compile the fall-through case
if kind == compare { bcx.build.Br(else_cx.llbb); }
if kind == compare { bld::Br(bcx, else_cx.llbb); }
if kind != single {
compile_submatch(else_cx, enter_default(m, col, val), vals_left, f,
exits);
@ -479,7 +479,7 @@ fn make_phi_bindings(bcx: &@block_ctxt, map: &[exit_node],
}
}
if vec::len(vals) > 0u {
let phi = bcx.build.Phi(val_ty(vals[0]), vals, llbbs);
let phi = bld::Phi(bcx, val_ty(vals[0]), vals, llbbs);
bcx.fcx.lllocals.insert(item.val, phi);
} else { success = false; }
}
@ -495,8 +495,8 @@ fn trans_alt(cx: &@block_ctxt, expr: &@ast::expr, arms: &[ast::arm],
// No need to generate code for alt,
// since the disc diverges.
if !cx.build.is_terminated() {
ret rslt(cx, cx.build.Unreachable());
if !is_terminated(cx) {
ret rslt(cx, bld::Unreachable(cx));
} else { ret er; }
}
@ -594,9 +594,9 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
}
}
ast::pat_box(inner) {
let box = bcx.build.Load(val);
let box = bld::Load(bcx, val);
let unboxed =
bcx.build.InBoundsGEP(box,
bld::InBoundsGEP(bcx, box,
[C_int(0),
C_int(back::abi::box_rc_field_body)]);
bcx = bind_irrefutable_pat(bcx, inner, unboxed, table, true);

View file

@ -0,0 +1,459 @@
import std::{vec, str};
import str::rustrt::sbuf;
import lib::llvm::llvm;
import llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef,
Opcode, ModuleRef};
import trans_common::block_ctxt;
resource BuilderRef_res(B: llvm::BuilderRef) {
llvm::LLVMDisposeBuilder(B);
}
fn mk_builder(llbb: BasicBlockRef) -> BuilderRef {
let B = llvm::LLVMCreateBuilder();
llvm::LLVMPositionBuilderAtEnd(B, llbb);
ret B;
}
fn RetVoid(cx: &@block_ctxt) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildRetVoid(*cx.build);
}
fn Ret(cx: &@block_ctxt, V: ValueRef) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildRet(*cx.build, V);
}
fn AggregateRet(cx: &@block_ctxt, RetVals: &[ValueRef]) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildAggregateRet(*cx.build, vec::to_ptr(RetVals),
vec::len(RetVals));
}
fn Br(cx: &@block_ctxt, Dest: BasicBlockRef) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildBr(*cx.build, Dest);
}
fn CondBr(cx: &@block_ctxt, If: ValueRef, Then: BasicBlockRef,
Else: BasicBlockRef) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildCondBr(*cx.build, If, Then, Else);
}
fn Switch(cx: &@block_ctxt, V: ValueRef, Else: BasicBlockRef,
NumCases: uint) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildSwitch(*cx.build, V, Else, NumCases);
}
fn IndirectBr(cx: &@block_ctxt, Addr: ValueRef,
NumDests: uint) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildIndirectBr(*cx.build, Addr, NumDests);
}
fn Invoke(cx: &@block_ctxt, Fn: ValueRef, Args: &[ValueRef],
Then: BasicBlockRef, Catch: BasicBlockRef) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildInvoke(*cx.build, Fn, vec::to_ptr(Args),
vec::len(Args), Then, Catch, str::buf(""));
}
fn Unreachable(cx: &@block_ctxt) -> ValueRef {
assert (!cx.terminated);;
cx.terminated = true;
ret llvm::LLVMBuildUnreachable(*cx.build);
}
/* Arithmetic */
fn Add(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildAdd(*cx.build, LHS, RHS, str::buf(""));
}
fn NSWAdd(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNSWAdd(*cx.build, LHS, RHS, str::buf(""));
}
fn NUWAdd(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNUWAdd(*cx.build, LHS, RHS, str::buf(""));
}
fn FAdd(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFAdd(*cx.build, LHS, RHS, str::buf(""));
}
fn Sub(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildSub(*cx.build, LHS, RHS, str::buf(""));
}
fn NSWSub(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNSWSub(*cx.build, LHS, RHS, str::buf(""));
}
fn NUWSub(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNUWSub(*cx.build, LHS, RHS, str::buf(""));
}
fn FSub(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFSub(*cx.build, LHS, RHS, str::buf(""));
}
fn Mul(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildMul(*cx.build, LHS, RHS, str::buf(""));
}
fn NSWMul(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNSWMul(*cx.build, LHS, RHS, str::buf(""));
}
fn NUWMul(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNUWMul(*cx.build, LHS, RHS, str::buf(""));
}
fn FMul(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFMul(*cx.build, LHS, RHS, str::buf(""));
}
fn UDiv(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildUDiv(*cx.build, LHS, RHS, str::buf(""));
}
fn SDiv(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildSDiv(*cx.build, LHS, RHS, str::buf(""));
}
fn ExactSDiv(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildExactSDiv(*cx.build, LHS, RHS, str::buf(""));
}
fn FDiv(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFDiv(*cx.build, LHS, RHS, str::buf(""));
}
fn URem(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildURem(*cx.build, LHS, RHS, str::buf(""));
}
fn SRem(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildSRem(*cx.build, LHS, RHS, str::buf(""));
}
fn FRem(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFRem(*cx.build, LHS, RHS, str::buf(""));
}
fn Shl(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildShl(*cx.build, LHS, RHS, str::buf(""));
}
fn LShr(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildLShr(*cx.build, LHS, RHS, str::buf(""));
}
fn AShr(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildAShr(*cx.build, LHS, RHS, str::buf(""));
}
fn And(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildAnd(*cx.build, LHS, RHS, str::buf(""));
}
fn Or(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildOr(*cx.build, LHS, RHS, str::buf(""));
}
fn Xor(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildXor(*cx.build, LHS, RHS, str::buf(""));
}
fn BinOp(cx: &@block_ctxt, Op: Opcode, LHS: ValueRef,
RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildBinOp(*cx.build, Op, LHS, RHS, str::buf(""));
}
fn Neg(cx: &@block_ctxt, V: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNeg(*cx.build, V, str::buf(""));
}
fn NSWNeg(cx: &@block_ctxt, V: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNSWNeg(*cx.build, V, str::buf(""));
}
fn NUWNeg(cx: &@block_ctxt, V: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNUWNeg(*cx.build, V, str::buf(""));
}
fn FNeg(cx: &@block_ctxt, V: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFNeg(*cx.build, V, str::buf(""));
}
fn Not(cx: &@block_ctxt, V: ValueRef) -> ValueRef {
ret llvm::LLVMBuildNot(*cx.build, V, str::buf(""));
}
/* Memory */
fn Malloc(cx: &@block_ctxt, Ty: TypeRef) -> ValueRef {
ret llvm::LLVMBuildMalloc(*cx.build, Ty, str::buf(""));
}
fn ArrayMalloc(cx: &@block_ctxt, Ty: TypeRef, Val: ValueRef) -> ValueRef {
ret llvm::LLVMBuildArrayMalloc(*cx.build, Ty, Val, str::buf(""));
}
fn Alloca(cx: &@block_ctxt, Ty: TypeRef) -> ValueRef {
ret llvm::LLVMBuildAlloca(*cx.build, Ty, str::buf(""));
}
fn ArrayAlloca(cx: &@block_ctxt, Ty: TypeRef, Val: ValueRef) -> ValueRef {
ret llvm::LLVMBuildArrayAlloca(*cx.build, Ty, Val, str::buf(""));
}
fn Free(cx: &@block_ctxt, PointerVal: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFree(*cx.build, PointerVal);
}
fn Load(cx: &@block_ctxt, PointerVal: ValueRef) -> ValueRef {
ret llvm::LLVMBuildLoad(*cx.build, PointerVal, str::buf(""));
}
fn Store(cx: &@block_ctxt, Val: ValueRef, Ptr: ValueRef) -> ValueRef {
ret llvm::LLVMBuildStore(*cx.build, Val, Ptr);
}
fn GEP(cx: &@block_ctxt, Pointer: ValueRef,
Indices: &[ValueRef]) -> ValueRef {
ret llvm::LLVMBuildGEP(*cx.build, Pointer, vec::to_ptr(Indices),
vec::len(Indices), str::buf(""));
}
fn InBoundsGEP(cx: &@block_ctxt, Pointer: ValueRef,
Indices: &[ValueRef]) -> ValueRef {
ret llvm::LLVMBuildInBoundsGEP(*cx.build, Pointer, vec::to_ptr(Indices),
vec::len(Indices), str::buf(""));
}
fn StructGEP(cx: &@block_ctxt, Pointer: ValueRef, Idx: uint) -> ValueRef {
ret llvm::LLVMBuildStructGEP(*cx.build, Pointer, Idx, str::buf(""));
}
fn GlobalString(cx: &@block_ctxt, _Str: sbuf) -> ValueRef {
ret llvm::LLVMBuildGlobalString(*cx.build, _Str, str::buf(""));
}
fn GlobalStringPtr(cx: &@block_ctxt, _Str: sbuf) -> ValueRef {
ret llvm::LLVMBuildGlobalStringPtr(*cx.build, _Str, str::buf(""));
}
/* Casts */
fn Trunc(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildTrunc(*cx.build, Val, DestTy, str::buf(""));
}
fn ZExt(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildZExt(*cx.build, Val, DestTy, str::buf(""));
}
fn SExt(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildSExt(*cx.build, Val, DestTy, str::buf(""));
}
fn FPToUI(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildFPToUI(*cx.build, Val, DestTy, str::buf(""));
}
fn FPToSI(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildFPToSI(*cx.build, Val, DestTy, str::buf(""));
}
fn UIToFP(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildUIToFP(*cx.build, Val, DestTy, str::buf(""));
}
fn SIToFP(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildSIToFP(*cx.build, Val, DestTy, str::buf(""));
}
fn FPTrunc(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildFPTrunc(*cx.build, Val, DestTy, str::buf(""));
}
fn FPExt(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildFPExt(*cx.build, Val, DestTy, str::buf(""));
}
fn PtrToInt(cx: &@block_ctxt, Val: ValueRef,
DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildPtrToInt(*cx.build, Val, DestTy, str::buf(""));
}
fn IntToPtr(cx: &@block_ctxt, Val: ValueRef,
DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildIntToPtr(*cx.build, Val, DestTy, str::buf(""));
}
fn BitCast(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildBitCast(*cx.build, Val, DestTy, str::buf(""));
}
fn ZExtOrBitCast(cx: &@block_ctxt, Val: ValueRef,
DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildZExtOrBitCast(*cx.build, Val, DestTy, str::buf(""));
}
fn SExtOrBitCast(cx: &@block_ctxt, Val: ValueRef,
DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildSExtOrBitCast(*cx.build, Val, DestTy, str::buf(""));
}
fn TruncOrBitCast(cx: &@block_ctxt, Val: ValueRef,
DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildTruncOrBitCast(*cx.build, Val, DestTy, str::buf(""));
}
fn Cast(cx: &@block_ctxt, Op: Opcode, Val: ValueRef,
DestTy: TypeRef, _Name: sbuf) ->
ValueRef {
ret llvm::LLVMBuildCast(*cx.build, Op, Val, DestTy, str::buf(""));
}
fn PointerCast(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildPointerCast(*cx.build, Val, DestTy, str::buf(""));
}
fn IntCast(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildIntCast(*cx.build, Val, DestTy, str::buf(""));
}
fn FPCast(cx: &@block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
ret llvm::LLVMBuildFPCast(*cx.build, Val, DestTy, str::buf(""));
}
/* Comparisons */
fn ICmp(cx: &@block_ctxt, Op: uint, LHS: ValueRef,
RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildICmp(*cx.build, Op, LHS, RHS, str::buf(""));
}
fn FCmp(cx: &@block_ctxt, Op: uint, LHS: ValueRef,
RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildFCmp(*cx.build, Op, LHS, RHS, str::buf(""));
}
/* Miscellaneous instructions */
fn Phi(cx: &@block_ctxt, Ty: TypeRef, vals: &[ValueRef],
bbs: &[BasicBlockRef]) -> ValueRef {
let phi = llvm::LLVMBuildPhi(*cx.build, Ty, str::buf(""));
assert (vec::len::<ValueRef>(vals) == vec::len::<BasicBlockRef>(bbs));
llvm::LLVMAddIncoming(phi, vec::to_ptr(vals), vec::to_ptr(bbs),
vec::len(vals));
ret phi;
}
fn AddIncomingToPhi(phi: ValueRef, vals: &[ValueRef], bbs: &[BasicBlockRef]) {
assert (vec::len::<ValueRef>(vals) == vec::len::<BasicBlockRef>(bbs));
llvm::LLVMAddIncoming(phi, vec::to_ptr(vals), vec::to_ptr(bbs),
vec::len(vals));
}
fn Call(cx: &@block_ctxt, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef {
ret llvm::LLVMBuildCall(*cx.build, Fn, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
}
fn FastCall(cx: &@block_ctxt, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef {
let v =
llvm::LLVMBuildCall(*cx.build, Fn, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
llvm::LLVMSetInstructionCallConv(v, lib::llvm::LLVMFastCallConv);
ret v;
}
fn CallWithConv(cx: &@block_ctxt, Fn: ValueRef, Args: &[ValueRef],
Conv: uint) -> ValueRef {
let v =
llvm::LLVMBuildCall(*cx.build, Fn, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
llvm::LLVMSetInstructionCallConv(v, Conv);
ret v;
}
fn Select(cx: &@block_ctxt, If: ValueRef, Then: ValueRef,
Else: ValueRef) -> ValueRef {
ret llvm::LLVMBuildSelect(*cx.build, If, Then, Else, str::buf(""));
}
fn VAArg(cx: &@block_ctxt, list: ValueRef, Ty: TypeRef) -> ValueRef {
ret llvm::LLVMBuildVAArg(*cx.build, list, Ty, str::buf(""));
}
fn ExtractElement(cx: &@block_ctxt, VecVal: ValueRef,
Index: ValueRef) -> ValueRef {
ret llvm::LLVMBuildExtractElement(*cx.build, VecVal, Index, str::buf(""));
}
fn InsertElement(cx: &@block_ctxt, VecVal: ValueRef, EltVal: ValueRef,
Index: ValueRef) ->
ValueRef {
ret llvm::LLVMBuildInsertElement(*cx.build, VecVal, EltVal, Index,
str::buf(""));
}
fn ShuffleVector(cx: &@block_ctxt, V1: ValueRef, V2: ValueRef,
Mask: ValueRef) -> ValueRef {
ret llvm::LLVMBuildShuffleVector(*cx.build, V1, V2, Mask, str::buf(""));
}
fn ExtractValue(cx: &@block_ctxt, AggVal: ValueRef, Index: uint) -> ValueRef {
ret llvm::LLVMBuildExtractValue(*cx.build, AggVal, Index, str::buf(""));
}
fn InsertValue(cx: &@block_ctxt, AggVal: ValueRef,
EltVal: ValueRef, Index: uint) -> ValueRef {
ret llvm::LLVMBuildInsertValue(*cx.build, AggVal, EltVal, Index,
str::buf(""));
}
fn IsNull(cx: &@block_ctxt, Val: ValueRef) -> ValueRef {
ret llvm::LLVMBuildIsNull(*cx.build, Val, str::buf(""));
}
fn IsNotNull(cx: &@block_ctxt, Val: ValueRef) -> ValueRef {
ret llvm::LLVMBuildIsNotNull(*cx.build, Val, str::buf(""));
}
fn PtrDiff(cx: &@block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
ret llvm::LLVMBuildPtrDiff(*cx.build, LHS, RHS, str::buf(""));
}
fn Trap(cx: &@block_ctxt) -> ValueRef {
let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(*cx.build);
let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB);
let M: ModuleRef = llvm::LLVMGetGlobalParent(FN);
let T: ValueRef =
llvm::LLVMGetNamedFunction(M, str::buf("llvm.trap"));
assert (T as int != 0);
let Args: [ValueRef] = [];
ret llvm::LLVMBuildCall(*cx.build, T, vec::to_ptr(Args), vec::len(Args),
str::buf(""));
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//

View file

@ -30,7 +30,6 @@ import std::map::new_int_hash;
import std::map::new_str_hash;
import syntax::codemap::span;
import lib::llvm::llvm;
import lib::llvm::builder;
import lib::llvm::target_data;
import lib::llvm::type_names;
import lib::llvm::mk_target_data;
@ -56,6 +55,7 @@ import util::ppaux::ty_to_str;
import util::ppaux::ty_to_short_str;
import syntax::print::pprust::expr_to_str;
import syntax::print::pprust::path_to_str;
import bld = trans_build;
// FIXME: These should probably be pulled in here too.
import trans::type_of_fn_full;
@ -400,13 +400,18 @@ type block_ctxt =
// The function context for the function to which this block is
// attached.
{llbb: BasicBlockRef,
build: builder,
mutable terminated: bool,
build: bld::BuilderRef_res,
parent: block_parent,
kind: block_kind,
mutable cleanups: [cleanup],
sp: span,
fcx: @fn_ctxt};
fn is_terminated(cx: &@block_ctxt) -> bool {
ret cx.terminated;
}
// FIXME: we should be able to use option::t<@block_parent> here but
// the infinite-tag check in rustboot gets upset.
tag block_parent { parent_none; parent_some(@block_ctxt); }

View file

@ -9,6 +9,7 @@ import trans::{call_memmove, trans_shared_malloc, llsize_of,
lazily_emit_tydesc_glue, get_tydesc, load_inbounds,
move_val_if_temp, trans_lval, node_id_type,
new_sub_block_ctxt};
import bld = trans_build;
import trans_common::*;
fn alloc_with_heap(bcx: @block_ctxt, typ: &ty::t, vecsz: uint) ->
@ -33,23 +34,23 @@ fn alloc_with_heap(bcx: @block_ctxt, typ: &ty::t, vecsz: uint) ->
add_clean_temp(bcx, llvecptr, typ);
let lllen = bcx.build.Mul(C_uint(vecsz), unit_sz);
let lllen = bld::Mul(bcx, C_uint(vecsz), unit_sz);
// Allocate the vector pieces and store length and allocated length.
let llfirsteltptr;
if vecsz > 0u && vecsz <= abi::ivec_default_length {
// Interior case.
bcx.build.Store(lllen,
bcx.build.InBoundsGEP(llvecptr,
bld::Store(bcx, lllen,
bld::InBoundsGEP(bcx, llvecptr,
[C_int(0),
C_uint(abi::ivec_elt_len)]));
bcx.build.Store(llalen,
bcx.build.InBoundsGEP(llvecptr,
bld::Store(bcx, llalen,
bld::InBoundsGEP(bcx, llvecptr,
[C_int(0),
C_uint(abi::ivec_elt_alen)]));
llfirsteltptr =
bcx.build.InBoundsGEP(llvecptr,
bld::InBoundsGEP(bcx, llvecptr,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
} else {
@ -59,29 +60,29 @@ fn alloc_with_heap(bcx: @block_ctxt, typ: &ty::t, vecsz: uint) ->
let stub_a = [C_int(0), C_uint(abi::ivec_heap_stub_elt_alen)];
let stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
let llstubty = T_ivec_heap(llunitty);
let llstubptr = bcx.build.PointerCast(llvecptr, T_ptr(llstubty));
bcx.build.Store(C_int(0), bcx.build.InBoundsGEP(llstubptr, stub_z));
let llstubptr = bld::PointerCast(bcx, llvecptr, T_ptr(llstubty));
bld::Store(bcx, C_int(0), bld::InBoundsGEP(bcx, llstubptr, stub_z));
let llheapty = T_ivec_heap_part(llunitty);
if vecsz == 0u {
// Null heap pointer indicates a zero-length vector.
bcx.build.Store(llalen, bcx.build.InBoundsGEP(llstubptr, stub_a));
bcx.build.Store(C_null(T_ptr(llheapty)),
bcx.build.InBoundsGEP(llstubptr, stub_p));
bld::Store(bcx, llalen, bld::InBoundsGEP(bcx, llstubptr, stub_a));
bld::Store(bcx, C_null(T_ptr(llheapty)),
bld::InBoundsGEP(bcx, llstubptr, stub_p));
llfirsteltptr = C_null(T_ptr(llunitty));
} else {
bcx.build.Store(lllen, bcx.build.InBoundsGEP(llstubptr, stub_a));
bld::Store(bcx, lllen, bld::InBoundsGEP(bcx, llstubptr, stub_a));
let llheapsz = bcx.build.Add(llsize_of(llheapty), lllen);
let llheapsz = bld::Add(bcx, llsize_of(llheapty), lllen);
let rslt = trans_shared_malloc(bcx, T_ptr(llheapty), llheapsz);
bcx = rslt.bcx;
let llheapptr = rslt.val;
bcx.build.Store(llheapptr,
bcx.build.InBoundsGEP(llstubptr, stub_p));
bld::Store(bcx, llheapptr,
bld::InBoundsGEP(bcx, llstubptr, stub_p));
let heap_l = [C_int(0), C_uint(abi::ivec_heap_elt_len)];
bcx.build.Store(lllen, bcx.build.InBoundsGEP(llheapptr, heap_l));
bld::Store(bcx, lllen, bld::InBoundsGEP(bcx, llheapptr, heap_l));
llfirsteltptr =
bcx.build.InBoundsGEP(llheapptr,
bld::InBoundsGEP(bcx, llheapptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
@ -115,10 +116,10 @@ fn trans_ivec(bcx: @block_ctxt, args: &[@ast::expr],
let lleltptr;
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
lleltptr =
bcx.build.InBoundsGEP(llfirsteltptr,
[bcx.build.Mul(C_uint(i), llunitsz)]);
bld::InBoundsGEP(bcx, llfirsteltptr,
[bld::Mul(bcx, C_uint(i), llunitsz)]);
} else {
lleltptr = bcx.build.InBoundsGEP(llfirsteltptr, [C_uint(i)]);
lleltptr = bld::InBoundsGEP(bcx, llfirsteltptr, [C_uint(i)]);
}
bcx = move_val_if_temp(bcx, INIT, lleltptr, lv, unit_ty);
i += 1u;
@ -135,23 +136,23 @@ fn get_len_and_data(bcx: &@block_ctxt, orig_v: ValueRef, unit_ty: ty::t)
// opaque vector type.
let v;
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
v = bcx.build.PointerCast(orig_v, T_ptr(T_opaque_ivec()));
v = bld::PointerCast(bcx, orig_v, T_ptr(T_opaque_ivec()));
} else { v = orig_v; }
let llunitty = type_of_or_i8(bcx, unit_ty);
let stack_len =
load_inbounds(bcx, v, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_elem =
bcx.build.InBoundsGEP(v,
bld::InBoundsGEP(bcx, v,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
let on_heap =
bcx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
bld::ICmp(bcx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let on_heap_cx = new_sub_block_ctxt(bcx, "on_heap");
let next_cx = new_sub_block_ctxt(bcx, "next");
bcx.build.CondBr(on_heap, on_heap_cx.llbb, next_cx.llbb);
bld::CondBr(bcx, on_heap, on_heap_cx.llbb, next_cx.llbb);
let heap_stub =
on_heap_cx.build.PointerCast(v, T_ptr(T_ivec_heap(llunitty)));
bld::PointerCast(on_heap_cx, v, T_ptr(T_ivec_heap(llunitty)));
let heap_ptr =
load_inbounds(on_heap_cx, heap_stub,
[C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)]);
@ -162,18 +163,18 @@ fn get_len_and_data(bcx: &@block_ctxt, orig_v: ValueRef, unit_ty: ty::t)
let llstubty = T_ivec_heap(llunitty);
let llheapptrty = struct_elt(llstubty, abi::ivec_heap_stub_elt_ptr);
let heap_ptr_is_null =
on_heap_cx.build.ICmp(lib::llvm::LLVMIntEQ, heap_ptr,
bld::ICmp(on_heap_cx, lib::llvm::LLVMIntEQ, heap_ptr,
C_null(T_ptr(llheapptrty)));
let zero_len_cx = new_sub_block_ctxt(bcx, "zero_len");
let nonzero_len_cx = new_sub_block_ctxt(bcx, "nonzero_len");
on_heap_cx.build.CondBr(heap_ptr_is_null, zero_len_cx.llbb,
bld::CondBr(on_heap_cx, heap_ptr_is_null, zero_len_cx.llbb,
nonzero_len_cx.llbb);
// Technically this context is unnecessary, but it makes this function
// clearer.
let zero_len = C_int(0);
let zero_elem = C_null(T_ptr(llunitty));
zero_len_cx.build.Br(next_cx.llbb);
bld::Br(zero_len_cx, next_cx.llbb);
// If we're here, then we actually have a heapified vector.
let heap_len =
@ -183,19 +184,19 @@ fn get_len_and_data(bcx: &@block_ctxt, orig_v: ValueRef, unit_ty: ty::t)
{
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems), C_int(0)];
nonzero_len_cx.build.InBoundsGEP(heap_ptr, v)
bld::InBoundsGEP(nonzero_len_cx, heap_ptr, v)
};
nonzero_len_cx.build.Br(next_cx.llbb);
bld::Br(nonzero_len_cx, next_cx.llbb);
// Now we can figure out the length of `v` and get a pointer to its
// first element.
let len =
next_cx.build.Phi(T_int(), [stack_len, zero_len, heap_len],
bld::Phi(next_cx, T_int(), [stack_len, zero_len, heap_len],
[bcx.llbb, zero_len_cx.llbb,
nonzero_len_cx.llbb]);
let elem =
next_cx.build.Phi(T_ptr(llunitty),
bld::Phi(next_cx, T_ptr(llunitty),
[stack_elem, zero_elem, heap_elem],
[bcx.llbb, zero_len_cx.llbb,
nonzero_len_cx.llbb]);
@ -207,8 +208,8 @@ fn get_len_and_data(bcx: &@block_ctxt, orig_v: ValueRef, unit_ty: ty::t)
fn reserve_space(cx: &@block_ctxt, llunitty: TypeRef, v: ValueRef,
len_needed: ValueRef) -> result {
let stack_len_ptr =
cx.build.InBoundsGEP(v, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_len = cx.build.Load(stack_len_ptr);
bld::InBoundsGEP(cx, v, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_len = bld::Load(cx, stack_len_ptr);
let alen =
load_inbounds(cx, v, [C_int(0), C_uint(abi::ivec_elt_alen)]);
// There are four cases we have to consider:
@ -218,40 +219,40 @@ fn reserve_space(cx: &@block_ctxt, llunitty: TypeRef, v: ValueRef,
// (4) On stack, need to spill to heap.
let maybe_on_heap =
cx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
bld::ICmp(cx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let maybe_on_heap_cx = new_sub_block_ctxt(cx, "maybe_on_heap");
let on_stack_cx = new_sub_block_ctxt(cx, "on_stack");
cx.build.CondBr(maybe_on_heap, maybe_on_heap_cx.llbb,
bld::CondBr(cx, maybe_on_heap, maybe_on_heap_cx.llbb,
on_stack_cx.llbb);
let next_cx = new_sub_block_ctxt(cx, "next");
// We're possibly on the heap, unless the vector is zero-length.
let stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
let stub_ptr =
maybe_on_heap_cx.build.PointerCast(v,
bld::PointerCast(maybe_on_heap_cx, v,
T_ptr(T_ivec_heap(llunitty)));
let heap_ptr = load_inbounds(maybe_on_heap_cx, stub_ptr, stub_p);
let on_heap =
maybe_on_heap_cx.build.ICmp(lib::llvm::LLVMIntNE, heap_ptr,
bld::ICmp(maybe_on_heap_cx, lib::llvm::LLVMIntNE, heap_ptr,
C_null(val_ty(heap_ptr)));
let on_heap_cx = new_sub_block_ctxt(cx, "on_heap");
maybe_on_heap_cx.build.CondBr(on_heap, on_heap_cx.llbb,
bld::CondBr(maybe_on_heap_cx, on_heap, on_heap_cx.llbb,
on_stack_cx.llbb);
// We're definitely on the heap. Check whether we need to resize.
let heap_len_ptr =
on_heap_cx.build.InBoundsGEP(heap_ptr,
bld::InBoundsGEP(on_heap_cx, heap_ptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_len)]);
let heap_len = on_heap_cx.build.Load(heap_len_ptr);
let new_heap_len = on_heap_cx.build.Add(heap_len, len_needed);
let heap_len = bld::Load(on_heap_cx, heap_len_ptr);
let new_heap_len = bld::Add(on_heap_cx, heap_len, len_needed);
let heap_len_unscaled =
on_heap_cx.build.UDiv(heap_len, llsize_of(llunitty));
bld::UDiv(on_heap_cx, heap_len, llsize_of(llunitty));
let heap_no_resize_needed =
on_heap_cx.build.ICmp(lib::llvm::LLVMIntULE, new_heap_len, alen);
bld::ICmp(on_heap_cx, lib::llvm::LLVMIntULE, new_heap_len, alen);
let heap_no_resize_cx = new_sub_block_ctxt(cx, "heap_no_resize");
let heap_resize_cx = new_sub_block_ctxt(cx, "heap_resize");
on_heap_cx.build.CondBr(heap_no_resize_needed, heap_no_resize_cx.llbb,
bld::CondBr(on_heap_cx, heap_no_resize_needed, heap_no_resize_cx.llbb,
heap_resize_cx.llbb);
// Case (1): We're on the heap and don't need to resize.
@ -260,18 +261,18 @@ fn reserve_space(cx: &@block_ctxt, llunitty: TypeRef, v: ValueRef,
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems),
heap_len_unscaled];
heap_no_resize_cx.build.InBoundsGEP(heap_ptr, v)
bld::InBoundsGEP(heap_no_resize_cx, heap_ptr, v)
};
heap_no_resize_cx.build.Store(new_heap_len, heap_len_ptr);
heap_no_resize_cx.build.Br(next_cx.llbb);
bld::Store(heap_no_resize_cx, new_heap_len, heap_len_ptr);
bld::Br(heap_no_resize_cx, next_cx.llbb);
// Case (2): We're on the heap and need to resize. This path is rare,
// so we delegate to cold glue.
{
let p =
heap_resize_cx.build.PointerCast(v, T_ptr(T_opaque_ivec()));
bld::PointerCast(heap_resize_cx, v, T_ptr(T_opaque_ivec()));
let upcall = bcx_ccx(cx).upcalls.ivec_resize_shared;
heap_resize_cx.build.Call(upcall,
bld::Call(heap_resize_cx, upcall,
[cx.fcx.lltaskptr, p, new_heap_len]);
}
let heap_ptr_resize = load_inbounds(heap_resize_cx, stub_ptr, stub_p);
@ -281,42 +282,42 @@ fn reserve_space(cx: &@block_ctxt, llunitty: TypeRef, v: ValueRef,
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems),
heap_len_unscaled];
heap_resize_cx.build.InBoundsGEP(heap_ptr_resize, v)
bld::InBoundsGEP(heap_resize_cx, heap_ptr_resize, v)
};
heap_resize_cx.build.Br(next_cx.llbb);
bld::Br(heap_resize_cx, next_cx.llbb);
// We're on the stack. Check whether we need to spill to the heap.
let new_stack_len = on_stack_cx.build.Add(stack_len, len_needed);
let new_stack_len = bld::Add(on_stack_cx, stack_len, len_needed);
let stack_no_spill_needed =
on_stack_cx.build.ICmp(lib::llvm::LLVMIntULE, new_stack_len,
bld::ICmp(on_stack_cx, lib::llvm::LLVMIntULE, new_stack_len,
alen);
let stack_len_unscaled =
on_stack_cx.build.UDiv(stack_len, llsize_of(llunitty));
bld::UDiv(on_stack_cx, stack_len, llsize_of(llunitty));
let stack_no_spill_cx = new_sub_block_ctxt(cx, "stack_no_spill");
let stack_spill_cx = new_sub_block_ctxt(cx, "stack_spill");
on_stack_cx.build.CondBr(stack_no_spill_needed,
bld::CondBr(on_stack_cx, stack_no_spill_needed,
stack_no_spill_cx.llbb, stack_spill_cx.llbb);
// Case (3): We're on the stack and don't need to spill.
let stack_data_no_spill =
stack_no_spill_cx.build.InBoundsGEP(v,
bld::InBoundsGEP(stack_no_spill_cx, v,
[C_int(0),
C_uint(abi::ivec_elt_elems),
stack_len_unscaled]);
stack_no_spill_cx.build.Store(new_stack_len, stack_len_ptr);
stack_no_spill_cx.build.Br(next_cx.llbb);
bld::Store(stack_no_spill_cx, new_stack_len, stack_len_ptr);
bld::Br(stack_no_spill_cx, next_cx.llbb);
// Case (4): We're on the stack and need to spill. Like case (2), this
// path is rare, so we delegate to cold glue.
{
let p =
stack_spill_cx.build.PointerCast(v, T_ptr(T_opaque_ivec()));
bld::PointerCast(stack_spill_cx, v, T_ptr(T_opaque_ivec()));
let upcall = bcx_ccx(cx).upcalls.ivec_spill_shared;
stack_spill_cx.build.Call(upcall,
bld::Call(stack_spill_cx, upcall,
[cx.fcx.lltaskptr, p, new_stack_len]);
}
let spill_stub =
stack_spill_cx.build.PointerCast(v, T_ptr(T_ivec_heap(llunitty)));
bld::PointerCast(stack_spill_cx, v, T_ptr(T_ivec_heap(llunitty)));
let heap_ptr_spill =
load_inbounds(stack_spill_cx, spill_stub, stub_p);
@ -326,13 +327,13 @@ fn reserve_space(cx: &@block_ctxt, llunitty: TypeRef, v: ValueRef,
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems),
stack_len_unscaled];
stack_spill_cx.build.InBoundsGEP(heap_ptr_spill, v)
bld::InBoundsGEP(stack_spill_cx, heap_ptr_spill, v)
};
stack_spill_cx.build.Br(next_cx.llbb);
bld::Br(stack_spill_cx, next_cx.llbb);
// Phi together the different data pointers to get the result.
let data_ptr =
next_cx.build.Phi(T_ptr(llunitty),
bld::Phi(next_cx, T_ptr(llunitty),
[heap_data_no_resize, heap_data_resize,
stack_data_no_spill, heap_data_spill],
[heap_no_resize_cx.llbb, heap_resize_cx.llbb,
@ -343,8 +344,8 @@ fn trans_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
rhs: ValueRef) -> result {
// Cast to opaque interior vector types if necessary.
if ty::type_has_dynamic_size(bcx_tcx(cx), t) {
lhs = cx.build.PointerCast(lhs, T_ptr(T_opaque_ivec()));
rhs = cx.build.PointerCast(rhs, T_ptr(T_opaque_ivec()));
lhs = bld::PointerCast(cx, lhs, T_ptr(T_opaque_ivec()));
rhs = bld::PointerCast(cx, rhs, T_ptr(T_opaque_ivec()));
}
let unit_ty = ty::sequence_element_type(bcx_tcx(cx), t);
@ -380,7 +381,7 @@ fn trans_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
let extra_len = if have_istrs {
// Only need one of the nulls
bcx.build.Sub(rhs_len, C_uint(1u))
bld::Sub(bcx, rhs_len, C_uint(1u))
} else { rhs_len };
rs = reserve_space(bcx, llunitty, lhs, extra_len);
@ -391,7 +392,7 @@ fn trans_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
let lhs_data_without_null_ptr = alloca(bcx, T_ptr(llunitty));
incr_ptr(bcx, lhs_data, C_int(-1),
lhs_data_without_null_ptr);
bcx.build.Load(lhs_data_without_null_ptr)
bld::Load(bcx, lhs_data_without_null_ptr)
} else {
rs.val
};
@ -403,27 +404,27 @@ fn trans_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
// Work out the end pointer.
let lhs_unscaled_idx = bcx.build.UDiv(rhs_len, llsize_of(llunitty));
let lhs_end = bcx.build.InBoundsGEP(lhs_data, [lhs_unscaled_idx]);
let lhs_unscaled_idx = bld::UDiv(bcx, rhs_len, llsize_of(llunitty));
let lhs_end = bld::InBoundsGEP(bcx, lhs_data, [lhs_unscaled_idx]);
// Now emit the copy loop.
let dest_ptr = alloca(bcx, T_ptr(llunitty));
bcx.build.Store(lhs_data, dest_ptr);
bld::Store(bcx, lhs_data, dest_ptr);
let src_ptr = alloca(bcx, T_ptr(llunitty));
bcx.build.Store(rhs_data, src_ptr);
bld::Store(bcx, rhs_data, src_ptr);
let copy_loop_header_cx = new_sub_block_ctxt(bcx, "copy_loop_header");
bcx.build.Br(copy_loop_header_cx.llbb);
let copy_dest_ptr = copy_loop_header_cx.build.Load(dest_ptr);
bld::Br(bcx, copy_loop_header_cx.llbb);
let copy_dest_ptr = bld::Load(copy_loop_header_cx, dest_ptr);
let not_yet_at_end =
copy_loop_header_cx.build.ICmp(lib::llvm::LLVMIntNE,
bld::ICmp(copy_loop_header_cx, lib::llvm::LLVMIntNE,
copy_dest_ptr, lhs_end);
let copy_loop_body_cx = new_sub_block_ctxt(bcx, "copy_loop_body");
let next_cx = new_sub_block_ctxt(bcx, "next");
copy_loop_header_cx.build.CondBr(not_yet_at_end,
bld::CondBr(copy_loop_header_cx, not_yet_at_end,
copy_loop_body_cx.llbb,
next_cx.llbb);
let copy_src_ptr = copy_loop_body_cx.build.Load(src_ptr);
let copy_src_ptr = bld::Load(copy_loop_body_cx, src_ptr);
let copy_src =
load_if_immediate(copy_loop_body_cx, copy_src_ptr, unit_ty);
@ -439,7 +440,7 @@ fn trans_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
incr_ptr(post_copy_cx, copy_src_ptr, C_int(1), src_ptr);
}
post_copy_cx.build.Br(copy_loop_header_cx.llbb);
bld::Br(post_copy_cx, copy_loop_header_cx.llbb);
ret rslt(next_cx, C_nil());
}
@ -449,14 +450,14 @@ fn trans_append_literal(bcx: &@block_ctxt, v: ValueRef, vec_ty: ty::t,
let ti = none;
let {bcx, val: td} = get_tydesc(bcx, elt_ty, false, ti).result;
trans::lazily_emit_all_tydesc_glue(bcx, ti);
let opaque_v = bcx.build.PointerCast(v, T_ptr(T_opaque_ivec()));
let opaque_v = bld::PointerCast(bcx, v, T_ptr(T_opaque_ivec()));
for val in vals {
let {bcx: e_bcx, val: elt} = trans::trans_expr(bcx, val);
bcx = e_bcx;
let spilled = trans::spill_if_immediate(bcx, elt, elt_ty);
bcx.build.Call(bcx_ccx(bcx).upcalls.ivec_push,
bld::Call(bcx, bcx_ccx(bcx).upcalls.ivec_push,
[bcx.fcx.lltaskptr, opaque_v, td,
bcx.build.PointerCast(spilled, T_ptr(T_i8()))]);
bld::PointerCast(bcx, spilled, T_ptr(T_i8()))]);
}
ret bcx;
}
@ -483,18 +484,18 @@ fn alloc(cx: &@block_ctxt, unit_ty: ty::t) -> alloc_result {
if dynamic { cx.fcx.llderivedtydescs = bcx.llbb; }
let llalen =
bcx.build.Mul(llunitsz, C_uint(abi::ivec_default_length));
bld::Mul(bcx, llunitsz, C_uint(abi::ivec_default_length));
let llptr;
let llunitty = type_of_or_i8(bcx, unit_ty);
let bcx_result;
if dynamic {
let llarraysz = bcx.build.Add(llsize_of(T_opaque_ivec()), llalen);
let llarraysz = bld::Add(bcx, llsize_of(T_opaque_ivec()), llalen);
let llvecptr = array_alloca(bcx, T_i8(), llarraysz);
bcx_result = cx;
llptr =
bcx_result.build.PointerCast(llvecptr,
bld::PointerCast(bcx_result, llvecptr,
T_ptr(T_opaque_ivec()));
} else { llptr = alloca(bcx, T_ivec(llunitty)); bcx_result = bcx; }
@ -527,7 +528,7 @@ fn trans_add(cx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
lhs_len = alt ty::struct(bcx_tcx(bcx), vec_ty) {
ty::ty_istr. {
// Forget about the trailing null on the left side
bcx.build.Sub(lhs_len, C_uint(1u))
bld::Sub(bcx, lhs_len, C_uint(1u))
}
ty::ty_vec(_) { lhs_len }
_ { bcx_tcx(bcx).sess.bug("non-istr/ivec in trans_add") }
@ -537,17 +538,17 @@ fn trans_add(cx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
let rhs_len = rhs_len_and_data.len;
let rhs_data = rhs_len_and_data.data;
bcx = rhs_len_and_data.bcx;
let lllen = bcx.build.Add(lhs_len, rhs_len);
let lllen = bld::Add(bcx, lhs_len, rhs_len);
// We have three cases to handle here:
// (1) Length is zero ([] + []).
// (2) Copy onto stack.
// (3) Allocate on heap and copy there.
let len_is_zero =
bcx.build.ICmp(lib::llvm::LLVMIntEQ, lllen, C_int(0));
bld::ICmp(bcx, lib::llvm::LLVMIntEQ, lllen, C_int(0));
let zero_len_cx = new_sub_block_ctxt(bcx, "zero_len");
let nonzero_len_cx = new_sub_block_ctxt(bcx, "nonzero_len");
bcx.build.CondBr(len_is_zero, zero_len_cx.llbb, nonzero_len_cx.llbb);
bld::CondBr(bcx, len_is_zero, zero_len_cx.llbb, nonzero_len_cx.llbb);
// Case (1): Length is zero.
let stub_z = [C_int(0), C_uint(abi::ivec_heap_stub_elt_zero)];
@ -558,103 +559,103 @@ fn trans_add(cx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
let vec_a = [C_int(0), C_uint(abi::ivec_elt_alen)];
let stub_ptr_zero =
zero_len_cx.build.PointerCast(llvecptr,
bld::PointerCast(zero_len_cx, llvecptr,
T_ptr(T_ivec_heap(llunitty)));
zero_len_cx.build.Store(C_int(0),
zero_len_cx.build.InBoundsGEP(stub_ptr_zero,
bld::Store(zero_len_cx, C_int(0),
bld::InBoundsGEP(zero_len_cx, stub_ptr_zero,
stub_z));
zero_len_cx.build.Store(llalen,
zero_len_cx.build.InBoundsGEP(stub_ptr_zero,
bld::Store(zero_len_cx, llalen,
bld::InBoundsGEP(zero_len_cx, stub_ptr_zero,
stub_a));
zero_len_cx.build.Store(C_null(T_ptr(llheappartty)),
zero_len_cx.build.InBoundsGEP(stub_ptr_zero,
bld::Store(zero_len_cx, C_null(T_ptr(llheappartty)),
bld::InBoundsGEP(zero_len_cx, stub_ptr_zero,
stub_p));
let next_cx = new_sub_block_ctxt(bcx, "next");
zero_len_cx.build.Br(next_cx.llbb);
bld::Br(zero_len_cx, next_cx.llbb);
// Determine whether we need to spill to the heap.
let on_stack =
nonzero_len_cx.build.ICmp(lib::llvm::LLVMIntULE, lllen, llalen);
bld::ICmp(nonzero_len_cx, lib::llvm::LLVMIntULE, lllen, llalen);
let stack_cx = new_sub_block_ctxt(bcx, "stack");
let heap_cx = new_sub_block_ctxt(bcx, "heap");
nonzero_len_cx.build.CondBr(on_stack, stack_cx.llbb, heap_cx.llbb);
bld::CondBr(nonzero_len_cx, on_stack, stack_cx.llbb, heap_cx.llbb);
// Case (2): Copy onto stack.
stack_cx.build.Store(lllen,
stack_cx.build.InBoundsGEP(llvecptr, vec_l));
stack_cx.build.Store(llalen,
stack_cx.build.InBoundsGEP(llvecptr, vec_a));
bld::Store(stack_cx, lllen,
bld::InBoundsGEP(stack_cx, llvecptr, vec_l));
bld::Store(stack_cx, llalen,
bld::InBoundsGEP(stack_cx, llvecptr, vec_a));
let dest_ptr_stack =
stack_cx.build.InBoundsGEP(llvecptr,
bld::InBoundsGEP(stack_cx, llvecptr,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
let copy_cx = new_sub_block_ctxt(bcx, "copy");
stack_cx.build.Br(copy_cx.llbb);
bld::Br(stack_cx, copy_cx.llbb);
// Case (3): Allocate on heap and copy there.
let stub_ptr_heap =
heap_cx.build.PointerCast(llvecptr, T_ptr(T_ivec_heap(llunitty)));
heap_cx.build.Store(C_int(0),
heap_cx.build.InBoundsGEP(stub_ptr_heap, stub_z));
heap_cx.build.Store(lllen,
heap_cx.build.InBoundsGEP(stub_ptr_heap, stub_a));
let heap_sz = heap_cx.build.Add(llsize_of(llheappartty), lllen);
bld::PointerCast(heap_cx, llvecptr, T_ptr(T_ivec_heap(llunitty)));
bld::Store(heap_cx, C_int(0),
bld::InBoundsGEP(heap_cx, stub_ptr_heap, stub_z));
bld::Store(heap_cx, lllen,
bld::InBoundsGEP(heap_cx, stub_ptr_heap, stub_a));
let heap_sz = bld::Add(heap_cx, llsize_of(llheappartty), lllen);
let rs = trans_shared_malloc(heap_cx, T_ptr(llheappartty), heap_sz);
let heap_part = rs.val;
heap_cx = rs.bcx;
heap_cx.build.Store(heap_part,
heap_cx.build.InBoundsGEP(stub_ptr_heap, stub_p));
bld::Store(heap_cx, heap_part,
bld::InBoundsGEP(heap_cx, stub_ptr_heap, stub_p));
{
let v = [C_int(0), C_uint(abi::ivec_heap_elt_len)];
heap_cx.build.Store(lllen,
heap_cx.build.InBoundsGEP(heap_part, v));
bld::Store(heap_cx, lllen,
bld::InBoundsGEP(heap_cx, heap_part, v));
}
let dest_ptr_heap =
heap_cx.build.InBoundsGEP(heap_part,
bld::InBoundsGEP(heap_cx, heap_part,
[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
heap_cx.build.Br(copy_cx.llbb);
bld::Br(heap_cx, copy_cx.llbb);
// Emit the copy loop.
let first_dest_ptr =
copy_cx.build.Phi(T_ptr(llunitty),
bld::Phi(copy_cx, T_ptr(llunitty),
[dest_ptr_stack, dest_ptr_heap],
[stack_cx.llbb, heap_cx.llbb]);
let lhs_end_ptr;
let rhs_end_ptr;
if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
lhs_end_ptr = copy_cx.build.InBoundsGEP(lhs_data, [lhs_len]);
rhs_end_ptr = copy_cx.build.InBoundsGEP(rhs_data, [rhs_len]);
lhs_end_ptr = bld::InBoundsGEP(copy_cx, lhs_data, [lhs_len]);
rhs_end_ptr = bld::InBoundsGEP(copy_cx, rhs_data, [rhs_len]);
} else {
let lhs_len_unscaled = copy_cx.build.UDiv(lhs_len, unit_sz);
let lhs_len_unscaled = bld::UDiv(copy_cx, lhs_len, unit_sz);
lhs_end_ptr =
copy_cx.build.InBoundsGEP(lhs_data, [lhs_len_unscaled]);
let rhs_len_unscaled = copy_cx.build.UDiv(rhs_len, unit_sz);
bld::InBoundsGEP(copy_cx, lhs_data, [lhs_len_unscaled]);
let rhs_len_unscaled = bld::UDiv(copy_cx, rhs_len, unit_sz);
rhs_end_ptr =
copy_cx.build.InBoundsGEP(rhs_data, [rhs_len_unscaled]);
bld::InBoundsGEP(copy_cx, rhs_data, [rhs_len_unscaled]);
}
let dest_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
copy_cx.build.Store(first_dest_ptr, dest_ptr_ptr);
bld::Store(copy_cx, first_dest_ptr, dest_ptr_ptr);
let lhs_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
copy_cx.build.Store(lhs_data, lhs_ptr_ptr);
bld::Store(copy_cx, lhs_data, lhs_ptr_ptr);
let rhs_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
copy_cx.build.Store(rhs_data, rhs_ptr_ptr);
bld::Store(copy_cx, rhs_data, rhs_ptr_ptr);
let lhs_copy_cx = new_sub_block_ctxt(bcx, "lhs_copy");
copy_cx.build.Br(lhs_copy_cx.llbb);
bld::Br(copy_cx, lhs_copy_cx.llbb);
// Copy in elements from the LHS.
let lhs_ptr = lhs_copy_cx.build.Load(lhs_ptr_ptr);
let lhs_ptr = bld::Load(lhs_copy_cx, lhs_ptr_ptr);
let not_at_end_lhs =
lhs_copy_cx.build.ICmp(lib::llvm::LLVMIntNE, lhs_ptr,
bld::ICmp(lhs_copy_cx, lib::llvm::LLVMIntNE, lhs_ptr,
lhs_end_ptr);
let lhs_do_copy_cx = new_sub_block_ctxt(bcx, "lhs_do_copy");
let rhs_copy_cx = new_sub_block_ctxt(bcx, "rhs_copy");
lhs_copy_cx.build.CondBr(not_at_end_lhs, lhs_do_copy_cx.llbb,
bld::CondBr(lhs_copy_cx, not_at_end_lhs, lhs_do_copy_cx.llbb,
rhs_copy_cx.llbb);
let dest_ptr_lhs_copy = lhs_do_copy_cx.build.Load(dest_ptr_ptr);
let dest_ptr_lhs_copy = bld::Load(lhs_do_copy_cx, dest_ptr_ptr);
let lhs_val = load_if_immediate(lhs_do_copy_cx, lhs_ptr, unit_ty);
lhs_do_copy_cx = copy_val(lhs_do_copy_cx, INIT, dest_ptr_lhs_copy,
lhs_val, unit_ty);
@ -671,17 +672,17 @@ fn trans_add(cx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
incr_ptr(lhs_do_copy_cx, lhs_ptr, C_int(1), lhs_ptr_ptr);
}
lhs_do_copy_cx.build.Br(lhs_copy_cx.llbb);
bld::Br(lhs_do_copy_cx, lhs_copy_cx.llbb);
// Copy in elements from the RHS.
let rhs_ptr = rhs_copy_cx.build.Load(rhs_ptr_ptr);
let rhs_ptr = bld::Load(rhs_copy_cx, rhs_ptr_ptr);
let not_at_end_rhs =
rhs_copy_cx.build.ICmp(lib::llvm::LLVMIntNE, rhs_ptr,
bld::ICmp(rhs_copy_cx, lib::llvm::LLVMIntNE, rhs_ptr,
rhs_end_ptr);
let rhs_do_copy_cx = new_sub_block_ctxt(bcx, "rhs_do_copy");
rhs_copy_cx.build.CondBr(not_at_end_rhs, rhs_do_copy_cx.llbb,
bld::CondBr(rhs_copy_cx, not_at_end_rhs, rhs_do_copy_cx.llbb,
next_cx.llbb);
let dest_ptr_rhs_copy = rhs_do_copy_cx.build.Load(dest_ptr_ptr);
let dest_ptr_rhs_copy = bld::Load(rhs_do_copy_cx, dest_ptr_ptr);
let rhs_val = load_if_immediate(rhs_do_copy_cx, rhs_ptr, unit_ty);
rhs_do_copy_cx = copy_val(rhs_do_copy_cx, INIT, dest_ptr_rhs_copy,
rhs_val, unit_ty);
@ -698,7 +699,7 @@ fn trans_add(cx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
incr_ptr(rhs_do_copy_cx, rhs_ptr, C_int(1), rhs_ptr_ptr);
}
rhs_do_copy_cx.build.Br(rhs_copy_cx.llbb);
bld::Br(rhs_do_copy_cx, rhs_copy_cx.llbb);
// Finally done!
ret rslt(next_cx, llvecptr);
@ -712,7 +713,7 @@ fn duplicate_heap_part(cx: &@block_ctxt, orig_vptr: ValueRef,
// type.
let vptr;
if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
vptr = cx.build.PointerCast(orig_vptr, T_ptr(T_opaque_ivec()));
vptr = bld::PointerCast(cx, orig_vptr, T_ptr(T_opaque_ivec()));
} else { vptr = orig_vptr; }
let llunitty = type_of_or_i8(cx, unit_ty);
@ -720,41 +721,40 @@ fn duplicate_heap_part(cx: &@block_ctxt, orig_vptr: ValueRef,
// Check to see if the vector is heapified.
let stack_len_ptr =
cx.build.InBoundsGEP(vptr, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_len = cx.build.Load(stack_len_ptr);
bld::InBoundsGEP(cx, vptr, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_len = bld::Load(cx, stack_len_ptr);
let stack_len_is_zero =
cx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
bld::ICmp(cx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let maybe_on_heap_cx = new_sub_block_ctxt(cx, "maybe_on_heap");
let next_cx = new_sub_block_ctxt(cx, "next");
cx.build.CondBr(stack_len_is_zero, maybe_on_heap_cx.llbb,
bld::CondBr(cx, stack_len_is_zero, maybe_on_heap_cx.llbb,
next_cx.llbb);
let stub_ptr =
maybe_on_heap_cx.build.PointerCast(vptr,
bld::PointerCast(maybe_on_heap_cx, vptr,
T_ptr(T_ivec_heap(llunitty)));
let heap_ptr_ptr =
maybe_on_heap_cx.build.InBoundsGEP(
bld::InBoundsGEP(maybe_on_heap_cx,
stub_ptr,
[C_int(0),
C_uint(abi::ivec_heap_stub_elt_ptr)]);
let heap_ptr = maybe_on_heap_cx.build.Load(heap_ptr_ptr);
let heap_ptr = bld::Load(maybe_on_heap_cx, heap_ptr_ptr);
let heap_ptr_is_nonnull =
maybe_on_heap_cx.build.ICmp(lib::llvm::LLVMIntNE, heap_ptr,
bld::ICmp(maybe_on_heap_cx, lib::llvm::LLVMIntNE, heap_ptr,
C_null(T_ptr(llheappartty)));
let on_heap_cx = new_sub_block_ctxt(cx, "on_heap");
maybe_on_heap_cx.build.CondBr(heap_ptr_is_nonnull, on_heap_cx.llbb,
bld::CondBr(maybe_on_heap_cx, heap_ptr_is_nonnull, on_heap_cx.llbb,
next_cx.llbb);
// Ok, the vector is on the heap. Copy the heap part.
let alen_ptr =
on_heap_cx.build.InBoundsGEP(
stub_ptr,
bld::InBoundsGEP(on_heap_cx, stub_ptr,
[C_int(0),
C_uint(abi::ivec_heap_stub_elt_alen)]);
let alen = on_heap_cx.build.Load(alen_ptr);
let alen = bld::Load(on_heap_cx, alen_ptr);
let heap_part_sz =
on_heap_cx.build.Add(alen, llsize_of(T_opaque_ivec_heap_part()));
bld::Add(on_heap_cx, alen, llsize_of(T_opaque_ivec_heap_part()));
let rs =
trans_shared_malloc(on_heap_cx, T_ptr(llheappartty),
heap_part_sz);
@ -764,8 +764,8 @@ fn duplicate_heap_part(cx: &@block_ctxt, orig_vptr: ValueRef,
rs = call_memmove(on_heap_cx, new_heap_ptr, heap_ptr, heap_part_sz);
on_heap_cx = rs.bcx;
on_heap_cx.build.Store(new_heap_ptr, heap_ptr_ptr);
on_heap_cx.build.Br(next_cx.llbb);
bld::Store(on_heap_cx, new_heap_ptr, heap_ptr_ptr);
bld::Br(on_heap_cx, next_cx.llbb);
ret rslt(next_cx, C_nil());
}

View file

@ -20,6 +20,7 @@ import syntax::codemap::span;
import trans_common::*;
import trans::*;
import bld = trans_build;
export trans_anon_obj;
export trans_obj;
@ -78,17 +79,17 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// abi::obj_field_vtbl and abi::obj_field_box simply specify words 0 and 1
// of 'pair'.
let pair_vtbl =
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box = bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_box)]);
bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box = bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_box)]);
// Make a vtable for this object: a static array of pointers to functions.
// It will be located in the read-only memory of the executable we're
// creating and will contain ValueRefs for all of this object's methods.
// create_vtbl returns a pointer to the vtable, which we store.
let vtbl = create_vtbl(cx, sp, self_ty, ob, ty_params, none, []);
vtbl = bcx.build.PointerCast(vtbl, T_ptr(T_empty_struct()));
vtbl = bld::PointerCast(bcx, vtbl, T_ptr(T_empty_struct()));
bcx.build.Store(vtbl, pair_vtbl);
bld::Store(bcx, vtbl, pair_vtbl);
// Next we have to take care of the other half of the pair we're
// returning: a boxed (reference-counted) tuple containing a tydesc,
@ -101,7 +102,7 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
// there's not much to do.
// Store null into pair, if no args or typarams.
bcx.build.Store(C_null(llbox_ty), pair_box);
bld::Store(bcx, C_null(llbox_ty), pair_box);
} else {
let obj_fields: [ty::t] = [];
for a: ty::arg in arg_tys { obj_fields += [a.ty]; }
@ -136,7 +137,7 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
bcx = body_td.bcx;
bcx.build.Store(body_td.val, body_tydesc.val);
bld::Store(bcx, body_td.val, body_tydesc.val);
// Copy the object's type parameters and fields into the space we
// allocated for the object body. (This is something like saving the
@ -186,8 +187,8 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
}
// Store box ptr in outer pair.
let p = bcx.build.PointerCast(box.box, llbox_ty);
bcx.build.Store(p, pair_box);
let p = bld::PointerCast(bcx, box.box, llbox_ty);
bld::Store(bcx, p, pair_box);
}
build_return(bcx);
@ -280,11 +281,11 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// Grab onto the first and second elements of the pair.
let pair_vtbl =
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box = bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_box)]);
bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
let pair_box = bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_box)]);
vtbl = bcx.build.PointerCast(vtbl, T_ptr(T_empty_struct()));
bcx.build.Store(vtbl, pair_vtbl);
vtbl = bld::PointerCast(bcx, vtbl, T_ptr(T_empty_struct()));
bld::Store(bcx, vtbl, pair_vtbl);
// Next we have to take care of the other half of the pair we're
// returning: a boxed (reference-counted) tuple containing a tydesc,
@ -296,7 +297,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
// If the object we're translating has no fields and no inner_obj,
// there's not much to do.
bcx.build.Store(C_null(llbox_ty), pair_box);
bld::Store(bcx, C_null(llbox_ty), pair_box);
} else {
@ -326,7 +327,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
bcx = body_td.bcx;
bcx.build.Store(body_td.val, body_tydesc.val);
bld::Store(bcx, body_td.val, body_tydesc.val);
// Copy the object's fields into the space we allocated for the object
// body. (This is something like saving the lexical environment of a
@ -370,8 +371,8 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
}
// Store box ptr in outer pair.
let p = bcx.build.PointerCast(box.box, llbox_ty);
bcx.build.Store(p, pair_box);
let p = bld::PointerCast(bcx, box.box, llbox_ty);
bld::Store(bcx, p, pair_box);
}
// return the object we built.
@ -618,16 +619,15 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Cast to self-stack's type.
let llenv =
bcx.build.PointerCast(
fcx.llenv,
bld::PointerCast(bcx, fcx.llenv,
T_ptr(T_struct([cx.ccx.rust_object_type,
T_ptr(cx.ccx.rust_object_type)])));
let llself_obj_ptr = bcx.build.GEP(llenv, [C_int(0), C_int(1)]);
llself_obj_ptr = bcx.build.Load(llself_obj_ptr);
let llself_obj_ptr = bld::GEP(bcx, llenv, [C_int(0), C_int(1)]);
llself_obj_ptr = bld::Load(bcx, llself_obj_ptr);
// Cast it back to pointer-to-object-type, so LLVM won't complain.
llself_obj_ptr =
bcx.build.PointerCast(llself_obj_ptr, T_ptr(cx.ccx.rust_object_type));
bld::PointerCast(bcx, llself_obj_ptr, T_ptr(cx.ccx.rust_object_type));
// The 'llretptr' that will arrive in the backwarding function we're
// creating also needs to be the correct type. Cast it to the method's
@ -635,7 +635,7 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
let llretptr = fcx.llretptr;
if ty::type_contains_params(cx.ccx.tcx, m.output) {
let llretty = type_of_inner(cx.ccx, sp, m.output);
llretptr = bcx.build.PointerCast(llretptr, T_ptr(llretty));
llretptr = bld::PointerCast(bcx, llretptr, T_ptr(llretty));
}
// Get the index of the method we want.
@ -655,12 +655,12 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
let vtbl_type = T_ptr(T_array(T_ptr(T_nil()), ix + 1u));
let llouter_obj_vtbl =
bcx.build.GEP(llself_obj_ptr, [C_int(0), C_int(abi::obj_field_vtbl)]);
llouter_obj_vtbl = bcx.build.Load(llouter_obj_vtbl);
llouter_obj_vtbl = bcx.build.PointerCast(llouter_obj_vtbl, vtbl_type);
bld::GEP(bcx, llself_obj_ptr, [C_int(0), C_int(abi::obj_field_vtbl)]);
llouter_obj_vtbl = bld::Load(bcx, llouter_obj_vtbl);
llouter_obj_vtbl = bld::PointerCast(bcx, llouter_obj_vtbl, vtbl_type);
let llouter_mthd =
bcx.build.GEP(llouter_obj_vtbl, [C_int(0), C_int(ix as int)]);
bld::GEP(bcx, llouter_obj_vtbl, [C_int(0), C_int(ix as int)]);
// Set up the outer method to be called.
let outer_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
@ -670,8 +670,8 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
m.inputs, m.output,
std::vec::len::<ast::ty_param>(ty_params));
llouter_mthd =
bcx.build.PointerCast(llouter_mthd, T_ptr(T_ptr(llouter_mthd_ty)));
llouter_mthd = bcx.build.Load(llouter_mthd);
bld::PointerCast(bcx, llouter_mthd, T_ptr(T_ptr(llouter_mthd_ty)));
llouter_mthd = bld::Load(bcx, llouter_mthd);
// Set up the three implicit arguments to the outer method we'll need to
// call.
@ -692,7 +692,7 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
}
// And, finally, call the outer method.
bcx.build.FastCall(llouter_mthd, llouter_mthd_args);
bld::FastCall(bcx, llouter_mthd, llouter_mthd_args);
build_return(bcx);
finish_fn(fcx, lltop);
@ -751,7 +751,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
let llretptr = fcx.llretptr;
if ty::type_contains_params(cx.ccx.tcx, m.output) {
let llretty = type_of_inner(cx.ccx, sp, m.output);
llretptr = bcx.build.PointerCast(llretptr, T_ptr(llretty));
llretptr = bld::PointerCast(bcx, llretptr, T_ptr(llretty));
}
// Now, we have to get the the inner_obj's vtbl out of the self_obj. This
@ -760,16 +760,16 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// First, grab the box out of the self_obj. It contains a refcount and a
// body.
let llself_obj_box =
bcx.build.GEP(llself_obj_ptr, [C_int(0), C_int(abi::obj_field_box)]);
llself_obj_box = bcx.build.Load(llself_obj_box);
bld::GEP(bcx, llself_obj_ptr, [C_int(0), C_int(abi::obj_field_box)]);
llself_obj_box = bld::Load(bcx, llself_obj_box);
let ccx = bcx_ccx(bcx);
let llbox_ty = T_opaque_obj_ptr(*ccx);
llself_obj_box = bcx.build.PointerCast(llself_obj_box, llbox_ty);
llself_obj_box = bld::PointerCast(bcx, llself_obj_box, llbox_ty);
// Now, reach into the box and grab the body.
let llself_obj_body =
bcx.build.GEP(llself_obj_box,
bld::GEP(bcx, llself_obj_box,
[C_int(0), C_int(abi::box_rc_field_body)]);
// Now, we need to figure out exactly what type the body is supposed to be
@ -779,7 +779,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
some(inner_obj_ty));
// And cast to that type.
llself_obj_body =
bcx.build.PointerCast(llself_obj_body,
bld::PointerCast(bcx, llself_obj_body,
T_ptr(type_of(cx.ccx, sp, body_ty)));
// Now, reach into the body and grab the inner_obj.
@ -793,13 +793,13 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// method's entry out of the vtable so that the forwarding function can
// call it.
let llinner_obj_vtbl =
bcx.build.GEP(llinner_obj.val,
bld::GEP(bcx, llinner_obj.val,
[C_int(0), C_int(abi::obj_field_vtbl)]);
llinner_obj_vtbl = bcx.build.Load(llinner_obj_vtbl);
llinner_obj_vtbl = bld::Load(bcx, llinner_obj_vtbl);
let llinner_obj_body =
bcx.build.GEP(llinner_obj.val, [C_int(0), C_int(abi::obj_field_box)]);
llinner_obj_body = bcx.build.Load(llinner_obj_body);
bld::GEP(bcx, llinner_obj.val, [C_int(0), C_int(abi::obj_field_box)]);
llinner_obj_body = bld::Load(bcx, llinner_obj_body);
// Get the index of the method we want.
let ix: uint = 0u;
@ -816,10 +816,10 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Pick out the original method from the vtable.
let vtbl_type = T_ptr(T_array(T_ptr(T_nil()), ix + 1u));
llinner_obj_vtbl = bcx.build.PointerCast(llinner_obj_vtbl, vtbl_type);
llinner_obj_vtbl = bld::PointerCast(bcx, llinner_obj_vtbl, vtbl_type);
let llorig_mthd =
bcx.build.GEP(llinner_obj_vtbl, [C_int(0), C_int(ix as int)]);
bld::GEP(bcx, llinner_obj_vtbl, [C_int(0), C_int(ix as int)]);
// Set up the original method to be called.
let orig_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
@ -829,8 +829,8 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
m.inputs, m.output,
std::vec::len::<ast::ty_param>(ty_params));
llorig_mthd =
bcx.build.PointerCast(llorig_mthd, T_ptr(T_ptr(llorig_mthd_ty)));
llorig_mthd = bcx.build.Load(llorig_mthd);
bld::PointerCast(bcx, llorig_mthd, T_ptr(T_ptr(llorig_mthd_ty)));
llorig_mthd = bld::Load(bcx, llorig_mthd);
// Set up the self-stack.
let self_stack =
@ -843,7 +843,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
// Cast self_stack back to pointer-to-object-type to make LLVM happy.
self_stack =
bcx.build.PointerCast(self_stack, T_ptr(cx.ccx.rust_object_type));
bld::PointerCast(bcx, self_stack, T_ptr(cx.ccx.rust_object_type));
// Set up the three implicit arguments to the original method we'll need
// to call.
@ -863,7 +863,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
}
// And, finally, call the original (inner) method.
bcx.build.FastCall(llorig_mthd, llorig_mthd_args);
bld::FastCall(bcx, llorig_mthd, llorig_mthd_args);
build_return(bcx);
finish_fn(fcx, lltop);
@ -939,19 +939,19 @@ fn populate_self_stack(bcx: @block_ctxt, self_stack: ValueRef,
inner_obj_body: ValueRef) -> ValueRef {
// Drop the outer obj into the second slot.
let self_pair_ptr = bcx.build.GEP(self_stack, [C_int(0), C_int(1)]);
bcx.build.Store(outer_obj, self_pair_ptr);
let self_pair_ptr = bld::GEP(bcx, self_stack, [C_int(0), C_int(1)]);
bld::Store(bcx, outer_obj, self_pair_ptr);
// Drop in the backwarding vtbl.
let wrapper_pair = bcx.build.GEP(self_stack, [C_int(0), C_int(0)]);
let wrapper_vtbl_ptr = bcx.build.GEP(wrapper_pair, [C_int(0), C_int(0)]);
let wrapper_pair = bld::GEP(bcx, self_stack, [C_int(0), C_int(0)]);
let wrapper_vtbl_ptr = bld::GEP(bcx, wrapper_pair, [C_int(0), C_int(0)]);
let backwarding_vtbl_cast =
bcx.build.PointerCast(backwarding_vtbl, T_ptr(T_empty_struct()));
bcx.build.Store(backwarding_vtbl_cast, wrapper_vtbl_ptr);
bld::PointerCast(bcx, backwarding_vtbl, T_ptr(T_empty_struct()));
bld::Store(bcx, backwarding_vtbl_cast, wrapper_vtbl_ptr);
// Drop in the inner obj body.
let wrapper_body_ptr = bcx.build.GEP(wrapper_pair, [C_int(0), C_int(1)]);
bcx.build.Store(inner_obj_body, wrapper_body_ptr);
let wrapper_body_ptr = bld::GEP(bcx, wrapper_pair, [C_int(0), C_int(1)]);
bld::Store(bcx, inner_obj_body, wrapper_body_ptr);
ret self_stack;
}

View file

@ -15,6 +15,7 @@ use std (name = "std",
mod middle {
mod trans_common;
mod trans_build;
mod trans;
mod trans_alt;
mod trans_objects;