diff --git a/src/librustc/middle/trans/foreign.rs b/src/librustc/middle/trans/foreign.rs index 460d7407e5069..58c77f037ded3 100644 --- a/src/librustc/middle/trans/foreign.rs +++ b/src/librustc/middle/trans/foreign.rs @@ -845,6 +845,26 @@ pub fn trans_intrinsic(ccx: @CrateContext, T_ptr(T_nil())); Store(bcx, morestack_addr, fcx.llretptr.get()); } + ~"memcpy32" => { + let dst_ptr = get_param(decl, first_real_arg); + let src_ptr = get_param(decl, first_real_arg + 1); + let size = get_param(decl, first_real_arg + 2); + let align = C_i32(1); + let volatile = C_i1(false); + let llfn = *bcx.ccx().intrinsics.get( + &~"llvm.memcpy.p0i8.p0i8.i32"); + Call(bcx, llfn, [dst_ptr, src_ptr, size, align, volatile]); + } + ~"memcpy64" => { + let dst_ptr = get_param(decl, first_real_arg); + let src_ptr = get_param(decl, first_real_arg + 1); + let size = get_param(decl, first_real_arg + 2); + let align = C_i32(1); + let volatile = C_i1(false); + let llfn = *bcx.ccx().intrinsics.get( + &~"llvm.memcpy.p0i8.p0i8.i64"); + Call(bcx, llfn, [dst_ptr, src_ptr, size, align, volatile]); + } ~"memmove32" => { let dst_ptr = get_param(decl, first_real_arg); let src_ptr = get_param(decl, first_real_arg + 1); diff --git a/src/librustc/middle/trans/type_use.rs b/src/librustc/middle/trans/type_use.rs index 365b75b8dd43d..d4c34a3ace53b 100644 --- a/src/librustc/middle/trans/type_use.rs +++ b/src/librustc/middle/trans/type_use.rs @@ -135,7 +135,7 @@ pub fn type_uses_for(ccx: @CrateContext, fn_id: def_id, n_tps: uint) ~"visit_tydesc" | ~"forget" | ~"frame_address" | ~"morestack_addr" => 0, - ~"memmove32" | ~"memmove64" => 0, + ~"memcpy32" | ~"memcpy64" | ~"memmove32" | ~"memmove64" => 0, ~"sqrtf32" | ~"sqrtf64" | ~"powif32" | ~"powif64" | ~"sinf32" | ~"sinf64" | ~"cosf32" | ~"cosf64" | diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index 6131280f90038..6ae03ee45062d 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -3451,122 +3451,152 @@ pub fn check_intrinsic_type(ccx: @mut CrateCtxt, it: @ast::foreign_item) { let tcx = ccx.tcx; let (n_tps, inputs, output) = match *ccx.tcx.sess.str_of(it.ident) { - ~"size_of" | - ~"pref_align_of" | ~"min_align_of" => (1u, ~[], ty::mk_uint()), - ~"init" => (1u, ~[], param(ccx, 0u)), - ~"uninit" => (1u, ~[], param(ccx, 0u)), - ~"forget" => (1u, ~[ param(ccx, 0) ], ty::mk_nil()), - ~"transmute" => (2, ~[ param(ccx, 0) ], param(ccx, 1)), - ~"move_val" | ~"move_val_init" => { - (1u, + ~"size_of" | + ~"pref_align_of" | ~"min_align_of" => (1u, ~[], ty::mk_uint()), + ~"init" => (1u, ~[], param(ccx, 0u)), + ~"uninit" => (1u, ~[], param(ccx, 0u)), + ~"forget" => (1u, ~[ param(ccx, 0) ], ty::mk_nil()), + ~"transmute" => (2, ~[ param(ccx, 0) ], param(ccx, 1)), + ~"move_val" | ~"move_val_init" => { + (1u, + ~[ + ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), param(ccx, 0)), + param(ccx, 0u) + ], + ty::mk_nil()) + } + ~"needs_drop" => (1u, ~[], ty::mk_bool()), + + ~"atomic_cxchg" | ~"atomic_cxchg_acq"| ~"atomic_cxchg_rel" => { + (0, ~[ - ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), param(ccx, 0)), - param(ccx, 0u) - ], - ty::mk_nil()) - } - ~"needs_drop" => (1u, ~[], ty::mk_bool()), - - ~"atomic_cxchg" | ~"atomic_cxchg_acq"| ~"atomic_cxchg_rel" => { - (0, - ~[ - ty::mk_mut_rptr(tcx, - ty::re_bound(ty::br_anon(0)), - ty::mk_int()), - ty::mk_int(), - ty::mk_int() - ], - ty::mk_int()) - } - ~"atomic_load" | ~"atomic_load_acq" => { - (0, - ~[ - ty::mk_imm_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()) - ], - ty::mk_int()) - } - ~"atomic_store" | ~"atomic_store_rel" => { - (0, - ~[ - ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()), - ty::mk_int() - ], - ty::mk_nil()) - } - ~"atomic_xchg" | ~"atomic_xadd" | ~"atomic_xsub" | - ~"atomic_xchg_acq" | ~"atomic_xadd_acq" | ~"atomic_xsub_acq" | - ~"atomic_xchg_rel" | ~"atomic_xadd_rel" | ~"atomic_xsub_rel" => { - (0, - ~[ - ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()), - ty::mk_int() - ], - ty::mk_int()) - } - - ~"get_tydesc" => { - // FIXME (#3730): return *intrinsic::tydesc, not *() - (1u, ~[], ty::mk_nil_ptr(ccx.tcx)) - } - ~"visit_tydesc" => { - let tydesc_name = special_idents::tydesc; - assert!(tcx.intrinsic_defs.contains_key(&tydesc_name)); - let (_, tydesc_ty) = tcx.intrinsic_defs.get_copy(&tydesc_name); - let (_, visitor_object_ty) = ty::visitor_object_ty(tcx); - let td_ptr = ty::mk_ptr(ccx.tcx, ty::mt { - ty: tydesc_ty, - mutbl: ast::m_imm - }); - (0, ~[ td_ptr, visitor_object_ty ], ty::mk_nil()) - } - ~"frame_address" => { - let fty = ty::mk_closure(ccx.tcx, ty::ClosureTy { - purity: ast::impure_fn, - sigil: ast::BorrowedSigil, - onceness: ast::Once, - region: ty::re_bound(ty::br_anon(0)), - bounds: ty::EmptyBuiltinBounds(), - sig: ty::FnSig { - bound_lifetime_names: opt_vec::Empty, - inputs: ~[ty::mk_imm_ptr(ccx.tcx, ty::mk_mach_uint(ast::ty_u8))], - output: ty::mk_nil() - } - }); - (0u, ~[fty], ty::mk_nil()) - } - ~"morestack_addr" => { - (0u, ~[], ty::mk_nil_ptr(ccx.tcx)) - } - ~"memmove32" => { - (0, - ~[ - ty::mk_ptr(tcx, ty::mt { - ty: ty::mk_u8(), - mutbl: ast::m_mutbl - }), - ty::mk_ptr(tcx, ty::mt { - ty: ty::mk_u8(), - mutbl: ast::m_imm - }), - ty::mk_u32() - ], - ty::mk_nil()) - } - ~"memmove64" => { - (0, - ~[ - ty::mk_ptr(tcx, ty::mt { - ty: ty::mk_u8(), - mutbl: ast::m_mutbl - }), - ty::mk_ptr(tcx, ty::mt { - ty: ty::mk_u8(), - mutbl: ast::m_imm - }), - ty::mk_u64() - ], - ty::mk_nil()) - } + ty::mk_mut_rptr(tcx, + ty::re_bound(ty::br_anon(0)), + ty::mk_int()), + ty::mk_int(), + ty::mk_int() + ], + ty::mk_int()) + } + ~"atomic_load" | ~"atomic_load_acq" => { + (0, + ~[ + ty::mk_imm_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()) + ], + ty::mk_int()) + } + ~"atomic_store" | ~"atomic_store_rel" => { + (0, + ~[ + ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()), + ty::mk_int() + ], + ty::mk_nil()) + } + ~"atomic_xchg" | ~"atomic_xadd" | ~"atomic_xsub" | + ~"atomic_xchg_acq" | ~"atomic_xadd_acq" | ~"atomic_xsub_acq" | + ~"atomic_xchg_rel" | ~"atomic_xadd_rel" | ~"atomic_xsub_rel" => { + (0, + ~[ + ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()), + ty::mk_int() + ], + ty::mk_int()) + } + + ~"get_tydesc" => { + // FIXME (#3730): return *intrinsic::tydesc, not *() + (1u, ~[], ty::mk_nil_ptr(ccx.tcx)) + } + ~"visit_tydesc" => { + let tydesc_name = special_idents::tydesc; + assert!(tcx.intrinsic_defs.contains_key(&tydesc_name)); + let (_, tydesc_ty) = tcx.intrinsic_defs.get_copy(&tydesc_name); + let (_, visitor_object_ty) = ty::visitor_object_ty(tcx); + let td_ptr = ty::mk_ptr(ccx.tcx, ty::mt { + ty: tydesc_ty, + mutbl: ast::m_imm + }); + (0, ~[ td_ptr, visitor_object_ty ], ty::mk_nil()) + } + ~"frame_address" => { + let fty = ty::mk_closure(ccx.tcx, ty::ClosureTy { + purity: ast::impure_fn, + sigil: ast::BorrowedSigil, + onceness: ast::Once, + region: ty::re_bound(ty::br_anon(0)), + bounds: ty::EmptyBuiltinBounds(), + sig: ty::FnSig { + bound_lifetime_names: opt_vec::Empty, + inputs: ~[ty::mk_imm_ptr(ccx.tcx, ty::mk_mach_uint(ast::ty_u8))], + output: ty::mk_nil() + } + }); + (0u, ~[fty], ty::mk_nil()) + } + ~"morestack_addr" => { + (0u, ~[], ty::mk_nil_ptr(ccx.tcx)) + } + ~"memcpy32" => { + (0, + ~[ + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_mutbl + }), + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_imm + }), + ty::mk_u32() + ], + ty::mk_nil()) + } + ~"memcpy64" => { + (0, + ~[ + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_mutbl + }), + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_imm + }), + ty::mk_u64() + ], + ty::mk_nil()) + } + ~"memmove32" => { + (0, + ~[ + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_mutbl + }), + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_imm + }), + ty::mk_u32() + ], + ty::mk_nil()) + } + ~"memmove64" => { + (0, + ~[ + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_mutbl + }), + ty::mk_ptr(tcx, ty::mt { + ty: ty::mk_u8(), + mutbl: ast::m_imm + }), + ty::mk_u64() + ], + ty::mk_nil()) + } ~"sqrtf32" => (0, ~[ ty::mk_f32() ], ty::mk_f32()), ~"sqrtf64" => (0, ~[ ty::mk_f64() ], ty::mk_f64()), ~"powif32" => { diff --git a/src/libstd/ptr.rs b/src/libstd/ptr.rs index f6cc00ccc8681..d1c0ffe795311 100644 --- a/src/libstd/ptr.rs +++ b/src/libstd/ptr.rs @@ -103,6 +103,7 @@ pub unsafe fn copy_memory(dst: *mut T, src: *const T, count: uint) { let n = count * sys::size_of::(); memmove32(dst as *mut u8, src as *u8, n as u32); } + #[inline(always)] #[cfg(target_word_size = "64")] pub unsafe fn copy_memory(dst: *mut T, src: *const T, count: uint) { @@ -111,6 +112,28 @@ pub unsafe fn copy_memory(dst: *mut T, src: *const T, count: uint) { memmove64(dst as *mut u8, src as *u8, n as u64); } +#[inline(always)] +#[cfg(target_word_size = "32")] +pub unsafe fn copy_nonoverlapping_memory(dst: *mut T, src: *const T, count: uint) { + #[cfg(stage0)] + use memcpy32 = unstable::intrinsics::memmove32; + #[cfg(not(stage0))] + use unstable::intrinsics::memcpy32; + let n = count * sys::size_of::(); + memcpy32(dst as *mut u8, src as *u8, n as u32); +} + +#[inline(always)] +#[cfg(target_word_size = "64")] +pub unsafe fn copy_nonoverlapping_memory(dst: *mut T, src: *const T, count: uint) { + #[cfg(stage0)] + use memcpy64 = unstable::intrinsics::memmove64; + #[cfg(not(stage0))] + use unstable::intrinsics::memcpy64; + let n = count * sys::size_of::(); + memcpy64(dst as *mut u8, src as *u8, n as u64); +} + #[inline(always)] pub unsafe fn set_memory(dst: *mut T, c: int, count: uint) { let n = count * sys::size_of::(); diff --git a/src/libstd/unstable/intrinsics.rs b/src/libstd/unstable/intrinsics.rs index d476822819eb9..521708621fc89 100644 --- a/src/libstd/unstable/intrinsics.rs +++ b/src/libstd/unstable/intrinsics.rs @@ -31,7 +31,6 @@ A quick refresher on memory ordering: with atomic types and is equivalent to Java's `volatile`. */ - #[abi = "rust-intrinsic"] pub extern "rust-intrinsic" { @@ -128,6 +127,13 @@ pub extern "rust-intrinsic" { /// Get the address of the `__morestack` stack growth function. pub fn morestack_addr() -> *(); + /// Equivalent to the `llvm.memcpy.p0i8.0i8.i32` intrinsic. + #[cfg(not(stage0))] + pub fn memcpy32(dst: *mut u8, src: *u8, size: u32); + /// Equivalent to the `llvm.memcpy.p0i8.0i8.i64` intrinsic. + #[cfg(not(stage0))] + pub fn memcpy64(dst: *mut u8, src: *u8, size: u64); + /// Equivalent to the `llvm.memmove.p0i8.0i8.i32` intrinsic. pub fn memmove32(dst: *mut u8, src: *u8, size: u32); /// Equivalent to the `llvm.memmove.p0i8.0i8.i64` intrinsic. diff --git a/src/libstd/util.rs b/src/libstd/util.rs index e2b91594d123c..400a13896be8c 100644 --- a/src/libstd/util.rs +++ b/src/libstd/util.rs @@ -51,7 +51,18 @@ pub fn with( #[inline(always)] pub fn swap(x: &mut T, y: &mut T) { unsafe { - swap_ptr(ptr::to_mut_unsafe_ptr(x), ptr::to_mut_unsafe_ptr(y)); + // Give ourselves some scratch space to work with + let mut tmp: T = intrinsics::uninit(); + let t: *mut T = &mut tmp; + + // Perform the swap, `&mut` pointers never alias + ptr::copy_nonoverlapping_memory(t, x, 1); + ptr::copy_nonoverlapping_memory(x, y, 1); + ptr::copy_nonoverlapping_memory(y, t, 1); + + // y and t now point to the same thing, but we need to completely forget `tmp` + // because it's no longer relevant. + cast::forget(tmp); } } @@ -61,18 +72,16 @@ pub fn swap(x: &mut T, y: &mut T) { */ #[inline] pub unsafe fn swap_ptr(x: *mut T, y: *mut T) { - if x == y { return } - // Give ourselves some scratch space to work with let mut tmp: T = intrinsics::uninit(); - let t = ptr::to_mut_unsafe_ptr(&mut tmp); + let t: *mut T = &mut tmp; // Perform the swap ptr::copy_memory(t, x, 1); ptr::copy_memory(x, y, 1); ptr::copy_memory(y, t, 1); - // y and t now point to the same thing, but we need to completely forget t + // y and t now point to the same thing, but we need to completely forget `tmp` // because it's no longer relevant. cast::forget(tmp); }