diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 455928077da46..47431e3a324c0 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -602,4 +602,24 @@ extern "rust-intrinsic" { /// Returns the value of the discriminant for the variant in 'v', /// cast to a `u64`; if `T` has no discriminant, returns 0. pub fn discriminant_value(v: &T) -> u64; + + /// Hints to the compiler that a branch is likely to be taken. Returns the value + /// passed to it. In order for the hint to take effect, it should be used as follows: + /// + /// ``` + /// unsafe fn foo(a: i32, b: i32) -> i32 { + /// if likely(a == b) { + /// 1 + /// } else { + /// 2 + /// } + /// } + /// ``` + #[cfg(not(stage0))] + pub fn likely(v: bool) -> bool; + + /// Hints to the compiler that a branch is not likely to be taken. See `likely` for + /// more details + #[cfg(not(stage0))] + pub fn unlikely(v: bool) -> bool; } diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index b382b71343f4e..160a83199910a 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -1672,8 +1672,7 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, disr: ty::Disr, args: callee::CallArgs, dest: expr::Dest, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { + debug_loc: DebugLoc) -> Result<'blk, 'tcx> { let ccx = bcx.fcx.ccx; let tcx = ccx.tcx(); @@ -1717,6 +1716,12 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } + let val = if type_is_immediate(ccx, result_ty) && !type_is_zero_size(ccx, result_ty) { + load_ty(bcx, llresult, result_ty) + } else { + common::C_nil(ccx) + }; + // If the caller doesn't care about the result // drop the temporary we made let bcx = match dest { @@ -1730,7 +1735,7 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } }; - Result::new(bcx, llresult) + Result::new(bcx, val) } pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/trans/build.rs b/src/librustc_trans/trans/build.rs index 05d0a967e64b6..9df7f8f3ccef4 100644 --- a/src/librustc_trans/trans/build.rs +++ b/src/librustc_trans/trans/build.rs @@ -31,7 +31,14 @@ pub fn terminate(cx: Block, _: &str) { pub fn check_not_terminated(cx: Block) { if cx.terminated.get() { - panic!("already terminated!"); + let fcx = cx.fcx; + if let Some(span) = fcx.span { + cx.tcx().sess.span_bug( + span, + "already terminated!"); + } else { + cx.tcx().sess.bug("already terminated!"); + } } } diff --git a/src/librustc_trans/trans/builder.rs b/src/librustc_trans/trans/builder.rs index e100defc24875..c0e1b80d8668a 100644 --- a/src/librustc_trans/trans/builder.rs +++ b/src/librustc_trans/trans/builder.rs @@ -718,6 +718,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { pub fn icmp(&self, op: IntPredicate, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("icmp"); unsafe { + assert!(val_ty(lhs) == val_ty(rhs), "Cannot compare {} and {}", + self.ccx.tn().val_to_string(lhs), + self.ccx.tn().val_to_string(rhs)); llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs index 0aeb4046dc64c..da7bbc596d2d9 100644 --- a/src/librustc_trans/trans/callee.rs +++ b/src/librustc_trans/trans/callee.rs @@ -87,8 +87,8 @@ pub struct Callee<'blk, 'tcx: 'blk> { pub data: CalleeData<'tcx>, } -fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) - -> Callee<'blk, 'tcx> { +pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) + -> Callee<'blk, 'tcx> { let _icx = push_ctxt("trans_callee"); debug!("callee::trans(expr={})", expr.repr(bcx.tcx())); @@ -860,13 +860,25 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, abi); fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - bcx = foreign::trans_native_call(bcx, + let (llret, b) = foreign::trans_native_call(bcx, callee_ty, llfn, opt_llretslot.unwrap(), &llargs[..], arg_tys, debug_loc); + + bcx = b; + match (opt_llretslot, ret_ty) { + (Some(_), ty::FnConverging(ret_ty)) => { + if !type_of::return_uses_outptr(bcx.ccx(), ret_ty) && + !common::type_is_zero_size(bcx.ccx(), ret_ty) + { + llresult = llret; + } + } + (_, _) => {} + } } fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope); diff --git a/src/librustc_trans/trans/controlflow.rs b/src/librustc_trans/trans/controlflow.rs index d203aeca0a757..b59eb13478e5f 100644 --- a/src/librustc_trans/trans/controlflow.rs +++ b/src/librustc_trans/trans/controlflow.rs @@ -19,6 +19,7 @@ use trans::cleanup::CleanupMethods; use trans::cleanup; use trans::common::*; use trans::consts; +use trans::datum; use trans::debuginfo; use trans::debuginfo::{DebugLoc, ToDebugLoc}; use trans::expr; @@ -144,6 +145,48 @@ pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, return bcx; } +/// Same as `trans_block` except it returns a `DatumBlock` instead of storing the +/// the result to a destination. This avoids going through a temporary when it's +/// not needed, primarily to ensure that `unsafe { likely(cond) }` and similar patterns +/// work. +pub fn trans_block_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + b: &ast::Block) + -> datum::DatumBlock<'blk, 'tcx, datum::Expr> { + let _icx = push_ctxt("trans_block_datum"); + + if bcx.unreachable.get() { + let llval = C_nil(bcx.ccx()); + let datum = datum::immediate_rvalue(llval, ty::mk_nil(bcx.tcx())); + return datum::DatumBlock {bcx: bcx, datum: datum.to_expr_datum()}; + } + + let fcx = bcx.fcx; + let mut bcx = bcx; + + let cleanup_debug_loc = + debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true); + fcx.push_ast_cleanup_scope(cleanup_debug_loc); + + for s in &b.stmts { + bcx = trans_stmt(bcx, &**s); + } + + let datum = match b.expr { + Some(ref e) if !bcx.unreachable.get() => { + unpack_datum!(bcx, expr::trans(bcx, &**e)) + } + _ => { + let llval = C_nil(bcx.ccx()); + datum::immediate_rvalue(llval, ty::mk_nil(bcx.tcx())).to_expr_datum() + } + }; + + bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id); + + datum::DatumBlock {bcx: bcx, datum: datum} +} + + pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if_id: ast::NodeId, cond: &ast::Expr, diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs index 07a8cbc38d575..134031fcc8787 100644 --- a/src/librustc_trans/trans/expr.rs +++ b/src/librustc_trans/trans/expr.rs @@ -571,8 +571,7 @@ pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this /// directly. fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &ast::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { + expr: &ast::Expr) -> DatumBlock<'blk, 'tcx, Expr> { let mut bcx = bcx; debug!("trans_unadjusted(expr={})", bcx.expr_to_string(expr)); @@ -580,7 +579,9 @@ fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); - return match ty::expr_kind(bcx.tcx(), expr) { + let ty = expr_ty(bcx, expr); + + return match expr_kind(bcx, expr) { ty::LvalueExpr | ty::RvalueDatumExpr => { let datum = unpack_datum!(bcx, { trans_datum_unadjusted(bcx, expr) @@ -591,11 +592,10 @@ fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty::RvalueStmtExpr => { bcx = trans_rvalue_stmt_unadjusted(bcx, expr); - nil(bcx, expr_ty(bcx, expr)) + nil(bcx, ty) } ty::RvalueDpsExpr => { - let ty = expr_ty(bcx, expr); if type_is_zero_size(bcx.ccx(), ty) { bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore); nil(bcx, ty) @@ -629,6 +629,55 @@ fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } +// Get the appropriate expression kind for the expression. Most of the time this just uses +// ty::expr_kind, but `ExprCall`s can be treated as `RvalueDatumExpr`s in some cases. +fn expr_kind<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) -> ty::ExprKind { + let ty = expr_ty(bcx, expr); + match expr.node { + ast::ExprCall(ref f, _) if !bcx.tcx().is_method_call(expr.id) => { + if let ast::ExprPath(..) = f.node { + let fn_ty = expr_ty_adjusted(bcx, f); + + let ret_ty = match fn_ty.sty { + ty::TyBareFn(_, ref fty) => + ty::erase_late_bound_regions(bcx.tcx(), &fty.sig.output()), + _ => bcx.tcx().sess.bug("Not calling a function?") + }; + + let is_datum = if let ty::FnConverging(output) = ret_ty { + !type_of::return_uses_outptr(bcx.ccx(), output) && + !bcx.fcx.type_needs_drop(output) + } else { + true + }; + + + if is_datum { + return ty::RvalueDatumExpr; + } + } + } + ast::ExprBlock(ref b) if !bcx.fcx.type_needs_drop(ty) => { + // Only consider the final expression if it's reachable + let reachable = if let Some(ref cfg) = bcx.fcx.cfg { + cfg.node_is_reachable(expr.id) + } else { + true + }; + // Use the kind of the last expression in the block, since + // it's the only one that actually matters + if let Some(ref expr) = b.expr { + if reachable { + return expr_kind(bcx, expr); + } + } + } + _ => () + } + + return ty::expr_kind(bcx.tcx(), expr); +} + fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr) -> DatumBlock<'blk, 'tcx, Expr> { @@ -696,6 +745,38 @@ fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Datum output mode means this is a scalar cast: trans_imm_cast(bcx, &**val, expr.id) } + ast::ExprCall(ref f, ref args) => { + let fn_ty = expr_ty_adjusted(bcx, f); + + let ret_ty = match fn_ty.sty { + ty::TyBareFn(_, ref fty) => { + ty::erase_late_bound_regions(bcx.tcx(), &fty.sig.output()) + } + _ => panic!("Not calling a function?!") + }; + + let args = callee::ArgExprs(&args[..]); + let result = callee::trans_call_inner(bcx, + expr.debug_loc(), + fn_ty, + |cx, _| callee::trans(cx, f), + args, Some(Ignore)); + + if let ty::FnConverging(ret_ty) = ret_ty { + immediate_rvalue_bcx(result.bcx, result.val, ret_ty) + .to_expr_datumblock() + } else { + // We called a diverging function, generate an undef value of the appropriate + // type. + let ty = expr_ty(bcx, expr); + let llval = C_undef(type_of::arg_type_of(bcx.ccx(), ty)); + let datum = immediate_rvalue(llval, ty); + DatumBlock::new(bcx, datum.to_expr_datum()) + } + } + ast::ExprBlock(ref b) => { + controlflow::trans_block_datum(bcx, b) + } _ => { bcx.tcx().sess.span_bug( expr.span, @@ -1018,6 +1099,9 @@ fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ast::ExprInlineAsm(ref a) => { asm::trans_inline_asm(bcx, a) } + ast::ExprBlock(ref b) => { + controlflow::trans_block(bcx, b, Ignore) + } _ => { bcx.tcx().sess.span_bug( expr.span, @@ -1036,6 +1120,10 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let mut bcx = bcx; let tcx = bcx.tcx(); + if bcx.unreachable.get() { + return bcx; + } + debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); match expr.node { @@ -2418,13 +2506,10 @@ impl OverflowOpViaIntrinsic { let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); let result = ExtractValue(bcx, val, 0); // iN operation result - let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" - - let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false), - binop_debug_loc); + let cond = ExtractValue(bcx, val, 1); // i1 "did it overflow?" let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)], + let cond = Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)], None, binop_debug_loc); let bcx = diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs index 9ad6df5a6aba1..5e105e171bd5c 100644 --- a/src/librustc_trans/trans/foreign.rs +++ b/src/librustc_trans/trans/foreign.rs @@ -230,7 +230,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llargs_rust: &[ValueRef], passed_arg_tys: Vec>, call_debug_loc: DebugLoc) - -> Block<'blk, 'tcx> + -> (ValueRef, Block<'blk, 'tcx>) { let ccx = bcx.ccx(); let tcx = bcx.tcx(); @@ -389,6 +389,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // type to match because some ABIs will use a different type than // the Rust type. e.g., a {u32,u32} struct could be returned as // u64. + let llretval = llforeign_retval; if llsig.ret_def && !fn_type.ret_ty.is_indirect() { let llrust_ret_ty = llsig.llret_ty; let llforeign_ret_ty = match fn_type.ret_ty.cast { @@ -436,7 +437,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } - return bcx; + return (llretval, bcx); } // feature gate SIMD types in FFI, since I (huonw) am not sure the diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs index 419ab1bb05d6e..a45792f209cd8 100644 --- a/src/librustc_trans/trans/intrinsic.rs +++ b/src/librustc_trans/trans/intrinsic.rs @@ -171,6 +171,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let foreign_item = tcx.map.expect_foreign_item(node); let name = token::get_ident(foreign_item.ident); + let call_debug_location = DebugLoc::At(call_info.id, call_info.span); + // For `transmute` we can just trans the input expr directly into dest if &name[..] == "transmute" { let llret_ty = type_of::type_of(ccx, ret_ty.unwrap()); @@ -199,16 +201,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, }; // An approximation to which types can be directly cast via - // LLVM's bitcast. This doesn't cover pointer -> pointer casts, - // but does, importantly, cover SIMD types. + // LLVM's bitcast. let in_kind = llintype.kind(); let ret_kind = llret_ty.kind(); let bitcast_compatible = - (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || { - in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer - }; + (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || + (in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer) || + (in_kind == TypeKind::Pointer && ret_kind == TypeKind::Integer) || + (in_kind == TypeKind::Integer && ret_kind == TypeKind::Pointer); - let dest = if bitcast_compatible { + let val = if bitcast_compatible { // if we're here, the type is scalar-like (a primitive, a // SIMD type or a pointer), and so can be handled as a // by-value ValueRef and can also be directly bitcast to the @@ -226,7 +228,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, from_arg_ty(bcx, datum.val, datum.ty) }; - let cast_val = BitCast(bcx, val, llret_ty); + // Do the appropriate cast type + let cast_val = match (in_kind, ret_kind) { + (TypeKind::Pointer, TypeKind::Pointer) => PointerCast(bcx, val, llret_ty), + (TypeKind::Pointer, TypeKind::Integer) => PtrToInt(bcx, val, llret_ty), + (TypeKind::Integer, TypeKind::Pointer) => IntToPtr(bcx, val, llret_ty), + _ => BitCast(bcx, val, llret_ty) + }; match dest { expr::SaveIn(d) => { @@ -236,26 +244,54 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } expr::Ignore => {} } - dest + cast_val } else { // The types are too complicated to do with a by-value // bitcast, so pointer cast instead. We need to cast the // dest so the types work out. - let dest = match dest { + + // Create the destination, if we were passed a destination, bitcast it to the + // appropriate type. Otherwise, create a temporary destination for immediate + // types and just ignore non-immediate types. This is because we return the + // result for immediate values. + let tmp_dest = match dest { expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())), - expr::Ignore => expr::Ignore + expr::Ignore => if type_is_immediate(bcx.ccx(), out_type) { + expr::SaveIn(alloc_ty(bcx, in_type, "intrinsic_result")) + } else { + expr::Ignore + } }; - bcx = expr::trans_into(bcx, &*arg_exprs[0], dest); - dest + bcx = expr::trans_into(bcx, &*arg_exprs[0], tmp_dest); + match dest { + expr::SaveIn(d) => d, + expr::Ignore => { + // We weren't passed a destination to save to, if we made a temporary + // destination, load the immediate value stored and clean up the type. + // Otherwise return (). + if let expr::SaveIn(d) = tmp_dest { + let val = if type_is_immediate(bcx.ccx(), out_type) { + let d = PointerCast(bcx, d, llouttype.ptr_to()); + load_ty(bcx, d, out_type) + } else { + C_nil(bcx.ccx()) + }; + + bcx = glue::drop_ty(bcx, d, out_type, call_debug_location); + + val + } else { + C_nil(bcx.ccx()) + } + } + } }; fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - return match dest { - expr::SaveIn(d) => Result::new(bcx, d), - expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to())) - }; + return Result::new(bcx, val); + } @@ -319,8 +355,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - let call_debug_location = DebugLoc::At(call_info.id, call_info.span); - // These are the only intrinsic functions that diverge. if &name[..] == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); @@ -763,6 +797,17 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } + (_, "likely") => { + let llfn = ccx.get_intrinsic(&("llvm.expect.i1")); + let expected = C_bool(ccx, true); + Call(bcx, llfn, &[llargs[0], expected], None, call_debug_location) + } + (_, "unlikely") => { + let llfn = ccx.get_intrinsic(&("llvm.expect.i1")); + let expected = C_bool(ccx, false); + Call(bcx, llfn, &[llargs[0], expected], None, call_debug_location) + } + // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst (_, name) if name.starts_with("atomic_") => { @@ -877,7 +922,14 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - Result::new(bcx, llresult) + match dest { + expr::Ignore => { + Result::new(bcx, llval) + } + expr::SaveIn(_) => { + Result::new(bcx, llresult) + } + } } fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 9df0d4aa56b8a..db1ac58039427 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -5239,6 +5239,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) { tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(0))), param(ccx, 0))], tcx.types.u64), + "likely" | "unlikely" => (0, vec![tcx.types.bool], tcx.types.bool), ref other => { span_err!(tcx.sess, it.span, E0093, diff --git a/src/libstd/rt/unwind/mod.rs b/src/libstd/rt/unwind/mod.rs index c403976745aa4..fcf7597e43413 100644 --- a/src/libstd/rt/unwind/mod.rs +++ b/src/libstd/rt/unwind/mod.rs @@ -306,7 +306,7 @@ pub unsafe fn register(f: Callback) -> bool { // been incremented, but the callback has not been stored. We're // guaranteed that the slot we're storing into is 0. n if n < MAX_CALLBACKS => { - let prev = CALLBACKS[n].swap(mem::transmute(f), Ordering::SeqCst); + let prev = CALLBACKS[n].swap(f as usize, Ordering::SeqCst); rtassert!(prev == 0); true } diff --git a/src/test/codegen/expect.rs b/src/test/codegen/expect.rs new file mode 100644 index 0000000000000..1ba200bfcd030 --- /dev/null +++ b/src/test/codegen/expect.rs @@ -0,0 +1,56 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -C no-prepopulate-passes + +// The expect intrinsic needs to have the output from the call fed directly +// into the branch, these tests make sure that happens + +#![feature(core)] + +use std::intrinsics::{likely,unlikely}; + +// CHECK-LABEL: direct_likely +#[no_mangle] +pub fn direct_likely(x: i32) { + unsafe { + // CHECK: [[VAR:%[0-9]+]] = call i1 @llvm.expect.i1(i1 %{{.*}}, i1 true) + // CHECK: br i1 [[VAR]], label %{{.+}}, label %{{.*}} + if likely(x == 1) {} + } +} + +// CHECK-LABEL: direct_unlikely +#[no_mangle] +pub fn direct_unlikely(x: i32) { + unsafe { + // CHECK: [[VAR:%[0-9]+]] = call i1 @llvm.expect.i1(i1 %{{.*}}, i1 false) + // CHECK: br i1 [[VAR]], label %{{.+}}, label %{{.*}} + if unlikely(x == 1) {} + } +} + +// CHECK-LABEL: wrapped_likely +// Make sure you can wrap just the call to the intrinsic in `unsafe` and still +// have it work +#[no_mangle] +pub fn wrapped_likely(x: i32) { + // CHECK: [[VAR:%[0-9]+]] = call i1 @llvm.expect.i1(i1 %{{.*}}, i1 true) + // CHECK: br i1 [[VAR]], label %{{.+}}, label %{{.*}} + if unsafe { likely(x == 1) } {} +} + +// CHECK-LABEL: wrapped_unlikely +#[no_mangle] +pub fn wrapped_unlikely(x: i32) { + // CHECK: [[VAR:%[0-9]+]] = call i1 @llvm.expect.i1(i1 %{{.*}}, i1 false) + // CHECK: br i1 [[VAR]], label %{{.+}}, label %{{.*}} + if unsafe { unlikely(x == 1) } {} +}