diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index 9b8ce481de958..d2176d767f570 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -3956,31 +3956,25 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { //We only care about the operation here match split[1] { - "cxchg" => (1, ~[ty::mk_mut_rptr(tcx, - ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)), + "cxchg" => (1, ~[ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0), param(ccx, 0), ], param(ccx, 0)), "load" => (1, ~[ - ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)) + ty::mk_imm_ptr(tcx, param(ccx, 0)) ], param(ccx, 0)), "store" => (1, ~[ - ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)), + ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ], ty::mk_nil()), "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" | "umin" => { - (1, ~[ty::mk_mut_rptr(tcx, - ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)), param(ccx, 0) ], + (1, ~[ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ], param(ccx, 0)) } "fence" => { diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs index 7c2db7688fd0c..76c7d66bd823e 100644 --- a/src/libstd/intrinsics.rs +++ b/src/libstd/intrinsics.rs @@ -164,6 +164,7 @@ pub trait TyVisitor { fn visit_self(&mut self) -> bool; } +#[cfg(stage0)] extern "rust-intrinsic" { pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; @@ -244,6 +245,96 @@ extern "rust-intrinsic" { pub fn atomic_umax_rel(dst: &mut T, src: T) -> T; pub fn atomic_umax_acqrel(dst: &mut T, src: T) -> T; pub fn atomic_umax_relaxed(dst: &mut T, src: T) -> T; +} + +#[cfg(not(stage0))] +extern "rust-intrinsic" { + + // NB: These intrinsics take unsafe pointers because they mutate aliased + // memory, which is not valid for either `&` or `&mut`. + + pub fn atomic_cxchg(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acqrel(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_relaxed(dst: *mut T, old: T, src: T) -> T; + + pub fn atomic_load(src: *T) -> T; + pub fn atomic_load_acq(src: *T) -> T; + pub fn atomic_load_relaxed(src: *T) -> T; + + pub fn atomic_store(dst: *mut T, val: T); + pub fn atomic_store_rel(dst: *mut T, val: T); + pub fn atomic_store_relaxed(dst: *mut T, val: T); + + pub fn atomic_xchg(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_xadd(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_xsub(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_and(dst: *mut T, src: T) -> T; + pub fn atomic_and_acq(dst: *mut T, src: T) -> T; + pub fn atomic_and_rel(dst: *mut T, src: T) -> T; + pub fn atomic_and_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_and_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_nand(dst: *mut T, src: T) -> T; + pub fn atomic_nand_acq(dst: *mut T, src: T) -> T; + pub fn atomic_nand_rel(dst: *mut T, src: T) -> T; + pub fn atomic_nand_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_nand_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_or(dst: *mut T, src: T) -> T; + pub fn atomic_or_acq(dst: *mut T, src: T) -> T; + pub fn atomic_or_rel(dst: *mut T, src: T) -> T; + pub fn atomic_or_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_or_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_xor(dst: *mut T, src: T) -> T; + pub fn atomic_xor_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xor_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xor_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xor_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_max(dst: *mut T, src: T) -> T; + pub fn atomic_max_acq(dst: *mut T, src: T) -> T; + pub fn atomic_max_rel(dst: *mut T, src: T) -> T; + pub fn atomic_max_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_max_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_min(dst: *mut T, src: T) -> T; + pub fn atomic_min_acq(dst: *mut T, src: T) -> T; + pub fn atomic_min_rel(dst: *mut T, src: T) -> T; + pub fn atomic_min_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_min_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_umin(dst: *mut T, src: T) -> T; + pub fn atomic_umin_acq(dst: *mut T, src: T) -> T; + pub fn atomic_umin_rel(dst: *mut T, src: T) -> T; + pub fn atomic_umin_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_umin_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_umax(dst: *mut T, src: T) -> T; + pub fn atomic_umax_acq(dst: *mut T, src: T) -> T; + pub fn atomic_umax_rel(dst: *mut T, src: T) -> T; + pub fn atomic_umax_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_umax_relaxed(dst: *mut T, src: T) -> T; +} + +extern "rust-intrinsic" { pub fn atomic_fence(); pub fn atomic_fence_acq(); diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index b4d465c0397de..e38dc6c62011e 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -23,6 +23,7 @@ use intrinsics; use cast; +use cast::transmute_mut_unsafe; use std::kinds::marker; use option::{Option,Some,None}; use ops::Drop; @@ -32,7 +33,8 @@ use ops::Drop; */ pub struct AtomicFlag { priv v: int, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -40,7 +42,8 @@ pub struct AtomicFlag { */ pub struct AtomicBool { priv v: uint, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -48,7 +51,8 @@ pub struct AtomicBool { */ pub struct AtomicInt { priv v: int, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -56,16 +60,8 @@ pub struct AtomicInt { */ pub struct AtomicUint { priv v: uint, - priv nopod: marker::NoPod -} - -/** - * An unsigned atomic integer type that is forced to be 64-bits. This does not - * support all operations. - */ -pub struct AtomicU64 { - priv v: u64, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -73,7 +69,8 @@ pub struct AtomicU64 { */ pub struct AtomicPtr { priv p: uint, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -82,6 +79,7 @@ pub struct AtomicPtr { #[unsafe_no_drop_flag] pub struct AtomicOption { priv p: uint, + priv nofreeze: marker::NoFreeze } pub enum Ordering { @@ -92,24 +90,31 @@ pub enum Ordering { SeqCst } -pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; impl AtomicFlag { pub fn new() -> AtomicFlag { - AtomicFlag { v: 0, nopod: marker::NoPod} + AtomicFlag { v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } /** * Clears the atomic flag */ #[inline] - pub fn clear(&mut self, order: Ordering) { - unsafe {atomic_store(&mut self.v, 0, order)} + pub fn clear(&self, order: Ordering) { + unsafe {atomic_store(transmute_mut_unsafe(&self.v), 0, order)} } /** @@ -117,14 +122,14 @@ impl AtomicFlag { * flag. */ #[inline] - pub fn test_and_set(&mut self, order: Ordering) -> bool { - unsafe { atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0 } + pub fn test_and_set(&self, order: Ordering) -> bool { + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), 0, 1, order) > 0 } } } impl AtomicBool { pub fn new(v: bool) -> AtomicBool { - AtomicBool { v: if v { 1 } else { 0 }, nopod: marker::NoPod } + AtomicBool { v: if v { 1 } else { 0 }, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -133,63 +138,63 @@ impl AtomicBool { } #[inline] - pub fn store(&mut self, val: bool, order: Ordering) { + pub fn store(&self, val: bool, order: Ordering) { let val = if val { 1 } else { 0 }; - unsafe { atomic_store(&mut self.v, val, order); } + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] - pub fn swap(&mut self, val: bool, order: Ordering) -> bool { + pub fn swap(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_swap(&mut self.v, val, order) > 0 } + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) > 0 } } #[inline] - pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { + pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool { let old = if old { 1 } else { 0 }; let new = if new { 1 } else { 0 }; - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) > 0 } + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_and(&mut self.v, val, order) > 0 } + unsafe { atomic_and(transmute_mut_unsafe(&self.v), val, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_nand(&mut self.v, val, order) > 0 } + unsafe { atomic_nand(transmute_mut_unsafe(&self.v), val, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_or(&mut self.v, val, order) > 0 } + unsafe { atomic_or(transmute_mut_unsafe(&self.v), val, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_xor(&mut self.v, val, order) > 0 } + unsafe { atomic_xor(transmute_mut_unsafe(&self.v), val, order) > 0 } } } impl AtomicInt { pub fn new(v: int) -> AtomicInt { - AtomicInt { v:v, nopod: marker::NoPod} + AtomicInt { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -198,72 +203,36 @@ impl AtomicInt { } #[inline] - pub fn store(&mut self, val: int, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } + pub fn store(&self, val: int, order: Ordering) { + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] - pub fn swap(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_swap(&mut self.v, val, order) } + pub fn swap(&self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) } } #[inline] - pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) } } /// Returns the old value (like __sync_fetch_and_add). #[inline] - pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_add(&mut self.v, val, order) } + pub fn fetch_add(&self, val: int, order: Ordering) -> int { + unsafe { atomic_add(transmute_mut_unsafe(&self.v), val, order) } } /// Returns the old value (like __sync_fetch_and_sub). #[inline] - pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_sub(&mut self.v, val, order) } - } -} - -impl AtomicU64 { - pub fn new(v: u64) -> AtomicU64 { - AtomicU64 { v:v, nopod: marker::NoPod } - } - - #[inline] - pub fn load(&self, order: Ordering) -> u64 { - unsafe { atomic_load(&self.v, order) } - } - - #[inline] - pub fn store(&mut self, val: u64, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } - } - - #[inline] - pub fn swap(&mut self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_swap(&mut self.v, val, order) } - } - - #[inline] - pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } - } - - #[inline] - pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_add(&mut self.v, val, order) } - } - - #[inline] - pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_sub(&mut self.v, val, order) } + pub fn fetch_sub(&self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(transmute_mut_unsafe(&self.v), val, order) } } } impl AtomicUint { pub fn new(v: uint) -> AtomicUint { - AtomicUint { v:v, nopod: marker::NoPod } + AtomicUint { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -272,36 +241,36 @@ impl AtomicUint { } #[inline] - pub fn store(&mut self, val: uint, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } + pub fn store(&self, val: uint, order: Ordering) { + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] - pub fn swap(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_swap(&mut self.v, val, order) } + pub fn swap(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) } } #[inline] - pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) } } /// Returns the old value (like __sync_fetch_and_add). #[inline] - pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_add(&mut self.v, val, order) } + pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(transmute_mut_unsafe(&self.v), val, order) } } /// Returns the old value (like __sync_fetch_and_sub).. #[inline] - pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_sub(&mut self.v, val, order) } + pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(transmute_mut_unsafe(&self.v), val, order) } } } impl AtomicPtr { pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p: p as uint, nopod: marker::NoPod } + AtomicPtr { p: p as uint, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -312,19 +281,19 @@ impl AtomicPtr { } #[inline] - pub fn store(&mut self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(&mut self.p, ptr as uint, order); } + pub fn store(&self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(transmute_mut_unsafe(&self.p), ptr as uint, order); } } #[inline] - pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(&mut self.p, ptr as uint, order) as *mut T } + pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(transmute_mut_unsafe(&self.p), ptr as uint, order) as *mut T } } #[inline] - pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { - atomic_compare_and_swap(&mut self.p, old as uint, + atomic_compare_and_swap(transmute_mut_unsafe(&self.p), old as uint, new as uint, order) as *mut T } } @@ -332,17 +301,17 @@ impl AtomicPtr { impl AtomicOption { pub fn new(p: ~T) -> AtomicOption { - unsafe { AtomicOption { p: cast::transmute(p) } } + unsafe { AtomicOption { p: cast::transmute(p), nofreeze: marker::NoFreeze } } } - pub fn empty() -> AtomicOption { AtomicOption { p: 0 } } + pub fn empty() -> AtomicOption { AtomicOption { p: 0, nofreeze: marker::NoFreeze } } #[inline] - pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { + pub fn swap(&self, val: ~T, order: Ordering) -> Option<~T> { unsafe { let val = cast::transmute(val); - let p = atomic_swap(&mut self.p, val, order); + let p = atomic_swap(transmute_mut_unsafe(&self.p), val, order); if p as uint == 0 { None } else { @@ -352,7 +321,7 @@ impl AtomicOption { } #[inline] - pub fn take(&mut self, order: Ordering) -> Option<~T> { + pub fn take(&self, order: Ordering) -> Option<~T> { unsafe { self.swap(cast::transmute(0), order) } } @@ -360,11 +329,12 @@ impl AtomicOption { /// if so. If the option was already 'Some', returns 'Some' of the rejected /// value. #[inline] - pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { + pub fn fill(&self, val: ~T, order: Ordering) -> Option<~T> { unsafe { let val = cast::transmute(val); let expected = cast::transmute(0); - let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); + let oldval = atomic_compare_and_swap( + transmute_mut_unsafe(&self.p), expected, val, order); if oldval == expected { None } else { @@ -376,7 +346,7 @@ impl AtomicOption { /// Be careful: The caller must have some external method of ensuring the /// result does not get invalidated by another task after this returns. #[inline] - pub fn is_empty(&mut self, order: Ordering) -> bool { + pub fn is_empty(&self, order: Ordering) -> bool { unsafe { atomic_load(&self.p, order) as uint == 0 } } } @@ -389,7 +359,7 @@ impl Drop for AtomicOption { } #[inline] -pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { +unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), @@ -398,7 +368,7 @@ pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { } #[inline] -pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { +unsafe fn atomic_load(dst: *T, order:Ordering) -> T { match order { Acquire => intrinsics::atomic_load_acq(dst), Relaxed => intrinsics::atomic_load_relaxed(dst), @@ -407,7 +377,7 @@ pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { } #[inline] -pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), @@ -419,7 +389,7 @@ pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_add). #[inline] -pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), @@ -431,7 +401,7 @@ pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_sub). #[inline] -pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), @@ -442,7 +412,7 @@ pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { +unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), Release => intrinsics::atomic_cxchg_rel(dst, old, new), @@ -453,7 +423,7 @@ pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Orderi } #[inline] -pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), @@ -464,7 +434,7 @@ pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_nand_acq(dst, val), Release => intrinsics::atomic_nand_rel(dst, val), @@ -476,7 +446,7 @@ pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), @@ -488,7 +458,7 @@ pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), @@ -616,7 +586,7 @@ mod test { assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); let mut slot = 0u32; - assert_eq!(super::atomic_load(&mut slot, SeqCst), 0); + assert_eq!(super::atomic_load(&slot, SeqCst), 0); let mut slot = 0u64; super::atomic_store(&mut slot, 2, SeqCst); diff --git a/src/libstd/sync/atomics_stage0.rs b/src/libstd/sync/atomics_stage0.rs new file mode 100644 index 0000000000000..b4d465c0397de --- /dev/null +++ b/src/libstd/sync/atomics_stage0.rs @@ -0,0 +1,625 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + * Atomic types + * + * Basic atomic types supporting atomic operations. Each method takes an + * `Ordering` which represents the strength of the memory barrier for that + * operation. These orderings are the same as C++11 atomic orderings + * [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync] + * + * All atomic types are a single word in size. + */ + +#[allow(missing_doc)]; + +use intrinsics; +use cast; +use std::kinds::marker; +use option::{Option,Some,None}; +use ops::Drop; + +/** + * A simple atomic flag, that can be set and cleared. The most basic atomic type. + */ +pub struct AtomicFlag { + priv v: int, + priv nopod: marker::NoPod +} + +/** + * An atomic boolean type. + */ +pub struct AtomicBool { + priv v: uint, + priv nopod: marker::NoPod +} + +/** + * A signed atomic integer type, supporting basic atomic arithmetic operations + */ +pub struct AtomicInt { + priv v: int, + priv nopod: marker::NoPod +} + +/** + * An unsigned atomic integer type, supporting basic atomic arithmetic operations + */ +pub struct AtomicUint { + priv v: uint, + priv nopod: marker::NoPod +} + +/** + * An unsigned atomic integer type that is forced to be 64-bits. This does not + * support all operations. + */ +pub struct AtomicU64 { + priv v: u64, + priv nopod: marker::NoPod +} + +/** + * An unsafe atomic pointer. Only supports basic atomic operations + */ +pub struct AtomicPtr { + priv p: uint, + priv nopod: marker::NoPod +} + +/** + * An owned atomic pointer. Ensures that only a single reference to the data is held at any time. + */ +#[unsafe_no_drop_flag] +pub struct AtomicOption { + priv p: uint, +} + +pub enum Ordering { + Relaxed, + Release, + Acquire, + AcqRel, + SeqCst +} + +pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: 0, nopod: marker::NoPod }; + +impl AtomicFlag { + + pub fn new() -> AtomicFlag { + AtomicFlag { v: 0, nopod: marker::NoPod} + } + + /** + * Clears the atomic flag + */ + #[inline] + pub fn clear(&mut self, order: Ordering) { + unsafe {atomic_store(&mut self.v, 0, order)} + } + + /** + * Sets the flag if it was previously unset, returns the previous value of the + * flag. + */ + #[inline] + pub fn test_and_set(&mut self, order: Ordering) -> bool { + unsafe { atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0 } + } +} + +impl AtomicBool { + pub fn new(v: bool) -> AtomicBool { + AtomicBool { v: if v { 1 } else { 0 }, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> bool { + unsafe { atomic_load(&self.v, order) > 0 } + } + + #[inline] + pub fn store(&mut self, val: bool, order: Ordering) { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_swap(&mut self.v, val, order) > 0 } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { + let old = if old { 1 } else { 0 }; + let new = if new { 1 } else { 0 }; + + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_and(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_nand(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_or(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_xor(&mut self.v, val, order) > 0 } + } +} + +impl AtomicInt { + pub fn new(v: int) -> AtomicInt { + AtomicInt { v:v, nopod: marker::NoPod} + } + + #[inline] + pub fn load(&self, order: Ordering) -> int { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: int, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + /// Returns the old value (like __sync_fetch_and_add). + #[inline] + pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_add(&mut self.v, val, order) } + } + + /// Returns the old value (like __sync_fetch_and_sub). + #[inline] + pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicU64 { + pub fn new(v: u64) -> AtomicU64 { + AtomicU64 { v:v, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> u64 { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: u64, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + #[inline] + pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_add(&mut self.v, val, order) } + } + + #[inline] + pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicUint { + pub fn new(v: uint) -> AtomicUint { + AtomicUint { v:v, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> uint { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: uint, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + /// Returns the old value (like __sync_fetch_and_add). + #[inline] + pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(&mut self.v, val, order) } + } + + /// Returns the old value (like __sync_fetch_and_sub).. + #[inline] + pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicPtr { + pub fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p: p as uint, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> *mut T { + unsafe { + atomic_load(&self.p, order) as *mut T + } + } + + #[inline] + pub fn store(&mut self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(&mut self.p, ptr as uint, order); } + } + + #[inline] + pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(&mut self.p, ptr as uint, order) as *mut T } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + unsafe { + atomic_compare_and_swap(&mut self.p, old as uint, + new as uint, order) as *mut T + } + } +} + +impl AtomicOption { + pub fn new(p: ~T) -> AtomicOption { + unsafe { AtomicOption { p: cast::transmute(p) } } + } + + pub fn empty() -> AtomicOption { AtomicOption { p: 0 } } + + #[inline] + pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + + let p = atomic_swap(&mut self.p, val, order); + if p as uint == 0 { + None + } else { + Some(cast::transmute(p)) + } + } + } + + #[inline] + pub fn take(&mut self, order: Ordering) -> Option<~T> { + unsafe { self.swap(cast::transmute(0), order) } + } + + /// A compare-and-swap. Succeeds if the option is 'None' and returns 'None' + /// if so. If the option was already 'Some', returns 'Some' of the rejected + /// value. + #[inline] + pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + let expected = cast::transmute(0); + let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); + if oldval == expected { + None + } else { + Some(cast::transmute(val)) + } + } + } + + /// Be careful: The caller must have some external method of ensuring the + /// result does not get invalidated by another task after this returns. + #[inline] + pub fn is_empty(&mut self, order: Ordering) -> bool { + unsafe { atomic_load(&self.p, order) as uint == 0 } + } +} + +#[unsafe_destructor] +impl Drop for AtomicOption { + fn drop(&mut self) { + let _ = self.take(SeqCst); + } +} + +#[inline] +pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { + match order { + Release => intrinsics::atomic_store_rel(dst, val), + Relaxed => intrinsics::atomic_store_relaxed(dst, val), + _ => intrinsics::atomic_store(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { + match order { + Acquire => intrinsics::atomic_load_acq(dst), + Relaxed => intrinsics::atomic_load_relaxed(dst), + _ => intrinsics::atomic_load(dst) + } +} + +#[inline] +pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xchg_acq(dst, val), + Release => intrinsics::atomic_xchg_rel(dst, val), + AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), + _ => intrinsics::atomic_xchg(dst, val) + } +} + +/// Returns the old value (like __sync_fetch_and_add). +#[inline] +pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xadd_acq(dst, val), + Release => intrinsics::atomic_xadd_rel(dst, val), + AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), + Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), + _ => intrinsics::atomic_xadd(dst, val) + } +} + +/// Returns the old value (like __sync_fetch_and_sub). +#[inline] +pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xsub_acq(dst, val), + Release => intrinsics::atomic_xsub_rel(dst, val), + AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), + Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), + _ => intrinsics::atomic_xsub(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), + Release => intrinsics::atomic_cxchg_rel(dst, old, new), + AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), + Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), + _ => intrinsics::atomic_cxchg(dst, old, new), + } +} + +#[inline] +pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_and_acq(dst, val), + Release => intrinsics::atomic_and_rel(dst, val), + AcqRel => intrinsics::atomic_and_acqrel(dst, val), + Relaxed => intrinsics::atomic_and_relaxed(dst, val), + _ => intrinsics::atomic_and(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_nand_acq(dst, val), + Release => intrinsics::atomic_nand_rel(dst, val), + AcqRel => intrinsics::atomic_nand_acqrel(dst, val), + Relaxed => intrinsics::atomic_nand_relaxed(dst, val), + _ => intrinsics::atomic_nand(dst, val) + } +} + + +#[inline] +pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_or_acq(dst, val), + Release => intrinsics::atomic_or_rel(dst, val), + AcqRel => intrinsics::atomic_or_acqrel(dst, val), + Relaxed => intrinsics::atomic_or_relaxed(dst, val), + _ => intrinsics::atomic_or(dst, val) + } +} + + +#[inline] +pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xor_acq(dst, val), + Release => intrinsics::atomic_xor_rel(dst, val), + AcqRel => intrinsics::atomic_xor_acqrel(dst, val), + Relaxed => intrinsics::atomic_xor_relaxed(dst, val), + _ => intrinsics::atomic_xor(dst, val) + } +} + + +/** + * An atomic fence. + * + * A fence 'A' which has `Release` ordering semantics, synchronizes with a + * fence 'B' with (at least) `Acquire` semantics, if and only if there exists + * atomic operations X and Y, both operating on some atomic object 'M' such + * that A is sequenced before X, Y is synchronized before B and Y observers + * the change to M. This provides a happens-before dependence between A and B. + * + * Atomic operations with `Release` or `Acquire` semantics can also synchronize + * with a fence. + * + * A fence with has `SeqCst` ordering, in addition to having both `Acquire` and + * `Release` semantics, participates in the global program order of the other + * `SeqCst` operations and/or fences. + * + * Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. + */ +#[inline] +pub fn fence(order: Ordering) { + unsafe { + match order { + Acquire => intrinsics::atomic_fence_acq(), + Release => intrinsics::atomic_fence_rel(), + AcqRel => intrinsics::atomic_fence_rel(), + _ => intrinsics::atomic_fence(), + } + } +} + +#[cfg(test)] +mod test { + use option::*; + use super::*; + + #[test] + fn flag() { + let mut flg = AtomicFlag::new(); + assert!(!flg.test_and_set(SeqCst)); + assert!(flg.test_and_set(SeqCst)); + + flg.clear(SeqCst); + assert!(!flg.test_and_set(SeqCst)); + } + + #[test] + fn option_empty() { + let mut option: AtomicOption<()> = AtomicOption::empty(); + assert!(option.is_empty(SeqCst)); + } + + #[test] + fn option_swap() { + let mut p = AtomicOption::new(~1); + let a = ~2; + + let b = p.swap(a, SeqCst); + + assert_eq!(b, Some(~1)); + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn option_take() { + let mut p = AtomicOption::new(~1); + + assert_eq!(p.take(SeqCst), Some(~1)); + assert_eq!(p.take(SeqCst), None); + + let p2 = ~2; + p.swap(p2, SeqCst); + + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn option_fill() { + let mut p = AtomicOption::new(~1); + assert!(p.fill(~2, SeqCst).is_some()); // should fail; shouldn't leak! + assert_eq!(p.take(SeqCst), Some(~1)); + + assert!(p.fill(~2, SeqCst).is_none()); // shouldn't fail + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn bool_and() { + let mut a = AtomicBool::new(true); + assert_eq!(a.fetch_and(false, SeqCst),true); + assert_eq!(a.load(SeqCst),false); + } + + static mut S_FLAG : AtomicFlag = INIT_ATOMIC_FLAG; + static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL; + static mut S_INT : AtomicInt = INIT_ATOMIC_INT; + static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT; + + #[test] + fn static_init() { + unsafe { + assert!(!S_FLAG.test_and_set(SeqCst)); + assert!(!S_BOOL.load(SeqCst)); + assert!(S_INT.load(SeqCst) == 0); + assert!(S_UINT.load(SeqCst) == 0); + } + } + + #[test] + fn different_sizes() { + unsafe { + let mut slot = 0u16; + assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); + + let mut slot = 0u8; + assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); + + let mut slot = 0u32; + assert_eq!(super::atomic_load(&mut slot, SeqCst), 0); + + let mut slot = 0u64; + super::atomic_store(&mut slot, 2, SeqCst); + } + } +} diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs index 3213c538152c6..994d12b34e5aa 100644 --- a/src/libstd/sync/mod.rs +++ b/src/libstd/sync/mod.rs @@ -16,6 +16,10 @@ //! other types of concurrent primitives. pub mod arc; +#[cfg(stage0)] +#[path = "atomics_stage0.rs"] +pub mod atomics; +#[cfg(not(stage0))] pub mod atomics; pub mod deque; pub mod mpmc_bounded_queue; diff --git a/src/test/auxiliary/cci_intrinsic.rs b/src/test/auxiliary/cci_intrinsic.rs index 07d6df89d220c..35d987480c078 100644 --- a/src/test/auxiliary/cci_intrinsic.rs +++ b/src/test/auxiliary/cci_intrinsic.rs @@ -10,26 +10,12 @@ pub mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; - - pub fn atomic_xchg(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; - - pub fn atomic_xadd(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; - - pub fn atomic_xsub(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xchg(dst: *mut T, src: T) -> T; } } #[inline(always)] -pub fn atomic_xchg(dst: &mut int, src: int) -> int { +pub fn atomic_xchg(dst: *mut int, src: int) -> int { unsafe { rusti::atomic_xchg(dst, src) } diff --git a/src/test/run-pass/intrinsic-atomics.rs b/src/test/run-pass/intrinsic-atomics.rs index d6e394a345e22..b663cbfa50974 100644 --- a/src/test/run-pass/intrinsic-atomics.rs +++ b/src/test/run-pass/intrinsic-atomics.rs @@ -10,27 +10,27 @@ mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: *mut T, old: T, src: T) -> T; - pub fn atomic_load(src: &T) -> T; - pub fn atomic_load_acq(src: &T) -> T; + pub fn atomic_load(src: *T) -> T; + pub fn atomic_load_acq(src: *T) -> T; - pub fn atomic_store(dst: &mut T, val: T); - pub fn atomic_store_rel(dst: &mut T, val: T); + pub fn atomic_store(dst: *mut T, val: T); + pub fn atomic_store_rel(dst: *mut T, val: T); - pub fn atomic_xchg(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xchg(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: *mut T, src: T) -> T; - pub fn atomic_xadd(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xadd(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: *mut T, src: T) -> T; - pub fn atomic_xsub(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xsub(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: *mut T, src: T) -> T; } } @@ -38,41 +38,41 @@ pub fn main() { unsafe { let mut x = ~1; - assert_eq!(rusti::atomic_load(x), 1); + assert_eq!(rusti::atomic_load(&*x), 1); *x = 5; - assert_eq!(rusti::atomic_load_acq(x), 5); + assert_eq!(rusti::atomic_load_acq(&*x), 5); - rusti::atomic_store(x,3); + rusti::atomic_store(&mut *x,3); assert_eq!(*x, 3); - rusti::atomic_store_rel(x,1); + rusti::atomic_store_rel(&mut *x,1); assert_eq!(*x, 1); - assert_eq!(rusti::atomic_cxchg(x, 1, 2), 1); + assert_eq!(rusti::atomic_cxchg(&mut *x, 1, 2), 1); assert_eq!(*x, 2); - assert_eq!(rusti::atomic_cxchg_acq(x, 1, 3), 2); + assert_eq!(rusti::atomic_cxchg_acq(&mut *x, 1, 3), 2); assert_eq!(*x, 2); - assert_eq!(rusti::atomic_cxchg_rel(x, 2, 1), 2); + assert_eq!(rusti::atomic_cxchg_rel(&mut *x, 2, 1), 2); assert_eq!(*x, 1); - assert_eq!(rusti::atomic_xchg(x, 0), 1); + assert_eq!(rusti::atomic_xchg(&mut *x, 0), 1); assert_eq!(*x, 0); - assert_eq!(rusti::atomic_xchg_acq(x, 1), 0); + assert_eq!(rusti::atomic_xchg_acq(&mut *x, 1), 0); assert_eq!(*x, 1); - assert_eq!(rusti::atomic_xchg_rel(x, 0), 1); + assert_eq!(rusti::atomic_xchg_rel(&mut *x, 0), 1); assert_eq!(*x, 0); - assert_eq!(rusti::atomic_xadd(x, 1), 0); - assert_eq!(rusti::atomic_xadd_acq(x, 1), 1); - assert_eq!(rusti::atomic_xadd_rel(x, 1), 2); + assert_eq!(rusti::atomic_xadd(&mut *x, 1), 0); + assert_eq!(rusti::atomic_xadd_acq(&mut *x, 1), 1); + assert_eq!(rusti::atomic_xadd_rel(&mut *x, 1), 2); assert_eq!(*x, 3); - assert_eq!(rusti::atomic_xsub(x, 1), 3); - assert_eq!(rusti::atomic_xsub_acq(x, 1), 2); - assert_eq!(rusti::atomic_xsub_rel(x, 1), 1); + assert_eq!(rusti::atomic_xsub(&mut *x, 1), 3); + assert_eq!(rusti::atomic_xsub_acq(&mut *x, 1), 2); + assert_eq!(rusti::atomic_xsub_rel(&mut *x, 1), 1); assert_eq!(*x, 0); } }