diff --git a/src/libextra/arc.rs b/src/libextra/arc.rs index 054b4ce5177d5..404d5bfde5876 100644 --- a/src/libextra/arc.rs +++ b/src/libextra/arc.rs @@ -50,9 +50,9 @@ use std::borrow; /// As sync::condvar, a mechanism for unlock-and-descheduling and signaling. pub struct Condvar<'self> { - is_mutex: bool, - failed: &'self mut bool, - cond: &'self sync::Condvar<'self> + priv is_mutex: bool, + priv failed: &'self mut bool, + priv cond: &'self sync::Condvar<'self> } impl<'self> Condvar<'self> { @@ -108,7 +108,7 @@ impl<'self> Condvar<'self> { ****************************************************************************/ /// An atomically reference counted wrapper for shared immutable state. -pub struct ARC { x: UnsafeAtomicRcBox } +pub struct ARC { priv x: UnsafeAtomicRcBox } /// Create an atomically reference counted wrapper. pub fn ARC(data: T) -> ARC { @@ -123,6 +123,20 @@ impl ARC { pub fn get<'a>(&'a self) -> &'a T { unsafe { &*self.x.get_immut() } } + + /** + * Retrieve the data back out of the ARC. This function blocks until the + * reference given to it is the last existing one, and then unwrap the data + * instead of destroying it. + * + * If multiple tasks call unwrap, all but the first will fail. Do not call + * unwrap from a task that holds another reference to the same ARC; it is + * guaranteed to deadlock. + */ + pub fn unwrap(self) -> T { + let ARC { x: x } = self; + unsafe { x.unwrap() } + } } /** @@ -143,9 +157,9 @@ impl Clone for ARC { ****************************************************************************/ #[doc(hidden)] -struct MutexARCInner { lock: Mutex, failed: bool, data: T } +struct MutexARCInner { priv lock: Mutex, priv failed: bool, priv data: T } /// An ARC with mutable data protected by a blocking mutex. -struct MutexARC { x: UnsafeAtomicRcBox> } +struct MutexARC { priv x: UnsafeAtomicRcBox> } /// Create a mutex-protected ARC with the supplied data. pub fn MutexARC(user_data: T) -> MutexARC { @@ -225,6 +239,22 @@ impl MutexARC { cond: cond }) } } + + /** + * Retrieves the data, blocking until all other references are dropped, + * exactly as arc::unwrap. + * + * Will additionally fail if another task has failed while accessing the arc. + */ + pub fn unwrap(self) -> T { + let MutexARC { x: x } = self; + let inner = unsafe { x.unwrap() }; + let MutexARCInner { failed: failed, data: data, _ } = inner; + if failed { + fail!(~"Can't unwrap poisoned MutexARC - another task failed inside!"); + } + data + } } // Common code for {mutex.access,rwlock.write}{,_cond}. @@ -268,7 +298,7 @@ fn PoisonOnFail<'r>(failed: &'r mut bool) -> PoisonOnFail { ****************************************************************************/ #[doc(hidden)] -struct RWARCInner { lock: RWlock, failed: bool, data: T } +struct RWARCInner { priv lock: RWlock, priv failed: bool, priv data: T } /** * A dual-mode ARC protected by a reader-writer lock. The data can be accessed * mutably or immutably, and immutably-accessing tasks may run concurrently. @@ -278,7 +308,7 @@ struct RWARCInner { lock: RWlock, failed: bool, data: T } #[mutable] // XXX remove after snap #[no_freeze] struct RWARC { - x: UnsafeAtomicRcBox>, + priv x: UnsafeAtomicRcBox>, } /// Create a reader/writer ARC with the supplied data. @@ -429,6 +459,23 @@ impl RWARC { } } } + + /** + * Retrieves the data, blocking until all other references are dropped, + * exactly as arc::unwrap. + * + * Will additionally fail if another task has failed while accessing the arc + * in write mode. + */ + pub fn unwrap(self) -> T { + let RWARC { x: x, _ } = self; + let inner = unsafe { x.unwrap() }; + let RWARCInner { failed: failed, data: data, _ } = inner; + if failed { + fail!(~"Can't unwrap poisoned RWARC - another task failed inside!") + } + data + } } // Borrowck rightly complains about immutably aliasing the rwlock in order to @@ -611,6 +658,23 @@ mod tests { } } #[test] #[should_fail] #[ignore(cfg(windows))] + pub fn test_mutex_arc_unwrap_poison() { + let arc = MutexARC(1); + let arc2 = ~(&arc).clone(); + let (p, c) = comm::stream(); + do task::spawn { + unsafe { + do arc2.access |one| { + c.send(()); + assert!(*one == 2); + } + } + } + let _ = p.recv(); + let one = arc.unwrap(); + assert!(one == 1); + } + #[test] #[should_fail] #[ignore(cfg(windows))] fn test_rw_arc_poison_wr() { let arc = ~RWARC(1); let arc2 = (*arc).clone(); diff --git a/src/libextra/dlist.rs b/src/libextra/dlist.rs index 840b412757709..c42eba1ffa29c 100644 --- a/src/libextra/dlist.rs +++ b/src/libextra/dlist.rs @@ -208,7 +208,7 @@ impl Deque for DList { /// /// O(1) fn pop_front(&mut self) -> Option { - match util::replace(&mut self.list_head, None) { + match self.list_head.take() { None => None, Some(old_head) => { self.length -= 1; diff --git a/src/libextra/ringbuf.rs b/src/libextra/ringbuf.rs index 6f8ca6500c6d8..f46af664b189f 100644 --- a/src/libextra/ringbuf.rs +++ b/src/libextra/ringbuf.rs @@ -14,7 +14,6 @@ //! extra::container::Deque`. use std::num; -use std::util; use std::uint; use std::vec; use std::iterator::{FromIterator, InvertIterator}; @@ -72,7 +71,7 @@ impl Deque for RingBuf { /// Remove and return the first element in the RingBuf, or None if it is empty fn pop_front(&mut self) -> Option { - let result = util::replace(&mut self.elts[self.lo], None); + let result = self.elts[self.lo].take(); if result.is_some() { self.lo = (self.lo + 1u) % self.elts.len(); self.nelts -= 1u; @@ -85,7 +84,7 @@ impl Deque for RingBuf { if self.nelts > 0 { self.nelts -= 1; let hi = self.raw_index(self.nelts); - util::replace(&mut self.elts[hi], None) + self.elts[hi].take() } else { None } diff --git a/src/libextra/smallintmap.rs b/src/libextra/smallintmap.rs index 6ad4d2acd9056..6ff219a4f8f7b 100644 --- a/src/libextra/smallintmap.rs +++ b/src/libextra/smallintmap.rs @@ -118,7 +118,7 @@ impl MutableMap for SmallIntMap { if *key >= self.v.len() { return None; } - replace(&mut self.v[*key], None) + self.v[*key].take() } } diff --git a/src/libextra/sort.rs b/src/libextra/sort.rs index 56a906e0a5d3e..57d8563861e35 100644 --- a/src/libextra/sort.rs +++ b/src/libextra/sort.rs @@ -1020,8 +1020,6 @@ mod big_tests { use sort::*; - use std::cast::unsafe_copy; - use std::local_data; use std::rand::RngUtil; use std::rand; use std::uint; diff --git a/src/libextra/treemap.rs b/src/libextra/treemap.rs index 1e6d38b9a1f2c..7e0cb76b51561 100644 --- a/src/libextra/treemap.rs +++ b/src/libextra/treemap.rs @@ -697,7 +697,7 @@ fn remove(node: &mut Option<~TreeNode>, } } } - return match replace(node, None) { + return match node.take() { Some(~TreeNode{value, _}) => Some(value), None => fail!() }; } diff --git a/src/libextra/workcache.rs b/src/libextra/workcache.rs index 99cf8c6912c33..ea13f33199912 100644 --- a/src/libextra/workcache.rs +++ b/src/libextra/workcache.rs @@ -27,7 +27,6 @@ use std::result; use std::run; use std::task; use std::to_bytes; -use std::util::replace; /** * @@ -353,7 +352,7 @@ impl TPrep for Prep { _ => { let (port, chan) = oneshot(); - let blk = replace(&mut bo, None).unwrap(); + let blk = bo.take_unwrap(); let chan = Cell::new(chan); do task::spawn { @@ -385,7 +384,7 @@ fn unwrap>( // FIXME(#5121) w: Work) -> T { let mut ww = w; - let s = replace(&mut ww.res, None); + let s = ww.res.take(); match s { None => fail!(), diff --git a/src/libstd/cell.rs b/src/libstd/cell.rs index 53ea11f2b0592..695ed0749dde0 100644 --- a/src/libstd/cell.rs +++ b/src/libstd/cell.rs @@ -14,7 +14,6 @@ use cast::transmute_mut; use prelude::*; -use util::replace; /* A dynamic, mutable location. @@ -48,7 +47,7 @@ impl Cell { fail!("attempt to take an empty cell"); } - replace(&mut this.value, None).unwrap() + this.value.take_unwrap() } /// Returns the value, failing if the cell is full. diff --git a/src/libstd/comm.rs b/src/libstd/comm.rs index 0acd6fee57efe..b9dacc142cebc 100644 --- a/src/libstd/comm.rs +++ b/src/libstd/comm.rs @@ -242,8 +242,7 @@ impl GenericChan for SharedChan { unsafe { let mut xx = Some(x); do chan.with_imm |chan| { - let x = replace(&mut xx, None); - chan.send(x.unwrap()) + chan.send(xx.take_unwrap()) } } } @@ -259,8 +258,7 @@ impl GenericSmartChan for SharedChan { unsafe { let mut xx = Some(x); do chan.with_imm |chan| { - let x = replace(&mut xx, None); - chan.try_send(x.unwrap()) + chan.try_send(xx.take_unwrap()) } } } @@ -372,7 +370,6 @@ mod pipesy { use pipes::{recv, try_recv, peek, PacketHeader}; use super::{GenericChan, GenericSmartChan, GenericPort, Peekable, Selectable}; use cast::transmute_mut; - use util::replace; /*proto! oneshot ( Oneshot:send { @@ -638,8 +635,7 @@ mod pipesy { fn send(&self, x: T) { unsafe { let self_endp = transmute_mut(&self.endp); - let endp = replace(self_endp, None); - *self_endp = Some(streamp::client::data(endp.unwrap(), x)) + *self_endp = Some(streamp::client::data(self_endp.take_unwrap(), x)) } } } @@ -649,8 +645,7 @@ mod pipesy { fn try_send(&self, x: T) -> bool { unsafe { let self_endp = transmute_mut(&self.endp); - let endp = replace(self_endp, None); - match streamp::client::try_data(endp.unwrap(), x) { + match streamp::client::try_data(self_endp.take_unwrap(), x) { Some(next) => { *self_endp = Some(next); true @@ -666,7 +661,7 @@ mod pipesy { fn recv(&self) -> T { unsafe { let self_endp = transmute_mut(&self.endp); - let endp = replace(self_endp, None); + let endp = self_endp.take(); let streamp::data(x, endp) = recv(endp.unwrap()); *self_endp = Some(endp); x @@ -677,7 +672,7 @@ mod pipesy { fn try_recv(&self) -> Option { unsafe { let self_endp = transmute_mut(&self.endp); - let endp = replace(self_endp, None); + let endp = self_endp.take(); match try_recv(endp.unwrap()) { Some(streamp::data(x, endp)) => { *self_endp = Some(endp); @@ -694,7 +689,7 @@ mod pipesy { fn peek(&self) -> bool { unsafe { let self_endp = transmute_mut(&self.endp); - let mut endp = replace(self_endp, None); + let mut endp = self_endp.take(); let peek = match endp { Some(ref mut endp) => peek(endp), None => fail!("peeking empty stream") diff --git a/src/libstd/either.rs b/src/libstd/either.rs index fcbd98a79e796..4fb43e5157b43 100644 --- a/src/libstd/either.rs +++ b/src/libstd/either.rs @@ -18,6 +18,7 @@ use cmp::Eq; use iterator::IteratorUtil; use result::Result; use result; +use str::StrSlice; use vec; use vec::{OwnedVector, ImmutableVector}; @@ -121,24 +122,37 @@ pub fn is_right(eith: &Either) -> bool { } } -/// Retrieves the value in the left branch. Fails if the either is Right. +/// Retrieves the value in the left branch. +/// Fails with a specified reason if the either is Right. #[inline] -pub fn unwrap_left(eith: Either) -> T { +pub fn expect_left(eith: Either, reason: &str) -> T { match eith { Left(x) => x, - Right(_) => fail!("either::unwrap_left Right") + Right(_) => fail!(reason.to_owned()) } } -/// Retrieves the value in the right branch. Fails if the either is Left. +/// Retrieves the value in the left branch. Fails if the either is Right. #[inline] -pub fn unwrap_right(eith: Either) -> U { +pub fn unwrap_left(eith: Either) -> T { + expect_left(eith, "either::unwrap_left Right") +} + +/// Retrieves the value in the right branch. +/// Fails with a specified reason if the either is Left. +#[inline] +pub fn expect_right(eith: Either, reason: &str) -> U { match eith { Right(x) => x, - Left(_) => fail!("either::unwrap_right Left") + Left(_) => fail!(reason.to_owned()) } } +/// Retrieves the value in the right branch. Fails if the either is Left. +pub fn unwrap_right(eith: Either) -> U { + expect_right(eith, "either::unwrap_right Left") +} + impl Either { #[inline] pub fn either(&self, f_left: &fn(&T) -> V, f_right: &fn(&U) -> V) -> V { @@ -157,9 +171,15 @@ impl Either { #[inline] pub fn is_right(&self) -> bool { is_right(self) } + #[inline] + pub fn expect_left(self, reason: &str) -> T { expect_left(self, reason) } + #[inline] pub fn unwrap_left(self) -> T { unwrap_left(self) } + #[inline] + pub fn expect_right(self, reason: &str) -> U { expect_right(self, reason) } + #[inline] pub fn unwrap_right(self) -> U { unwrap_right(self) } } diff --git a/src/libstd/hashmap.rs b/src/libstd/hashmap.rs index ecc5de117d0af..182ee37202a65 100644 --- a/src/libstd/hashmap.rs +++ b/src/libstd/hashmap.rs @@ -253,7 +253,7 @@ impl HashMap { }; let len_buckets = self.buckets.len(); - let bucket = replace(&mut self.buckets[idx], None); + let bucket = self.buckets[idx].take(); let value = match bucket { None => None, @@ -267,7 +267,7 @@ impl HashMap { let size = self.size - 1; idx = self.next_bucket(idx, len_buckets); while self.buckets[idx].is_some() { - let bucket = replace(&mut self.buckets[idx], None); + let bucket = self.buckets[idx].take(); self.insert_opt_bucket(bucket); idx = self.next_bucket(idx, len_buckets); } diff --git a/src/libstd/option.rs b/src/libstd/option.rs index 42d892fee9b50..f5e5dbb3dbf7f 100644 --- a/src/libstd/option.rs +++ b/src/libstd/option.rs @@ -180,6 +180,13 @@ impl Option { match *self { Some(ref mut x) => Some(f(x)), None => None } } + /// Maps a `some` value from one type to another by a mutable reference, + /// or returns a default value. + #[inline] + pub fn map_mut_default<'a, U>(&'a mut self, def: U, f: &fn(&'a mut T) -> U) -> U { + match *self { Some(ref mut x) => f(x), None => def } + } + /// As `map`, but consumes the option and gives `f` ownership to avoid /// copying. #[inline] @@ -200,6 +207,26 @@ impl Option { match self { None => def, Some(v) => f(v) } } + /// Take the value out of the option, leaving a `None` in its place. + #[inline] + pub fn take(&mut self) -> Option { + util::replace(self, None) + } + + /// As `map_consume`, but swaps a None into the original option rather + /// than consuming it by-value. + #[inline] + pub fn take_map(&mut self, blk: &fn(T) -> U) -> Option { + self.take().map_consume(blk) + } + + /// As `map_consume_default`, but swaps a None into the original option + /// rather than consuming it by-value. + #[inline] + pub fn take_map_default (&mut self, def: U, blk: &fn(T) -> U) -> U { + self.take().map_consume_default(def, blk) + } + /// Apply a function to the contained value or do nothing pub fn mutate(&mut self, f: &fn(T) -> T) { if self.is_some() { @@ -295,7 +322,7 @@ impl Option { #[inline] pub fn take_unwrap(&mut self) -> T { if self.is_none() { fail!("option::take_unwrap none") } - util::replace(self, None).unwrap() + self.take().unwrap() } /** diff --git a/src/libstd/pipes.rs b/src/libstd/pipes.rs index 8f43e847c24bb..a861c3c5f0fd6 100644 --- a/src/libstd/pipes.rs +++ b/src/libstd/pipes.rs @@ -431,7 +431,7 @@ fn try_recv_(p: &mut Packet) -> Option { // optimistic path match p.header.state { Full => { - let payload = replace(&mut p.payload, None); + let payload = p.payload.take(); p.header.state = Empty; return Some(payload.unwrap()) }, @@ -482,7 +482,7 @@ fn try_recv_(p: &mut Packet) -> Option { fail!("blocking on already blocked packet") }, Full => { - let payload = replace(&mut p.payload, None); + let payload = p.payload.take(); let old_task = swap_task(&mut p.header.blocked_task, ptr::null()); if !old_task.is_null() { unsafe { @@ -676,8 +676,7 @@ impl Drop for SendPacketBuffered { unsafe { let this: &mut SendPacketBuffered = transmute(self); if this.p != None { - let p = replace(&mut this.p, None); - sender_terminate(p.unwrap()) + sender_terminate(this.p.take_unwrap()); } } } @@ -695,7 +694,7 @@ pub fn SendPacketBuffered(p: *mut Packet) impl SendPacketBuffered { pub fn unwrap(&mut self) -> *mut Packet { - replace(&mut self.p, None).unwrap() + self.p.take_unwrap() } pub fn header(&mut self) -> *mut PacketHeader { @@ -711,7 +710,7 @@ impl SendPacketBuffered { pub fn reuse_buffer(&mut self) -> BufferResource { //error!("send reuse_buffer"); - replace(&mut self.buffer, None).unwrap() + self.buffer.take_unwrap() } } @@ -734,8 +733,7 @@ impl Drop for RecvPacketBuffered { unsafe { let this: &mut RecvPacketBuffered = transmute(self); if this.p != None { - let p = replace(&mut this.p, None); - receiver_terminate(p.unwrap()) + receiver_terminate(this.p.take_unwrap()) } } } @@ -743,11 +741,11 @@ impl Drop for RecvPacketBuffered { impl RecvPacketBuffered { pub fn unwrap(&mut self) -> *mut Packet { - replace(&mut self.p, None).unwrap() + self.p.take_unwrap() } pub fn reuse_buffer(&mut self) -> BufferResource { - replace(&mut self.buffer, None).unwrap() + self.buffer.take_unwrap() } } diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index fba6171129762..f098f8b2767ce 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -19,7 +19,7 @@ use option::*; use cast; use util; use ops::Drop; -use rt::task::Task; +use rt::kill::BlockedTask; use kinds::Send; use rt::sched::Scheduler; use rt::local::Local; @@ -30,13 +30,13 @@ use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable}; use cell::Cell; use clone::Clone; -/// A combined refcount / ~Task pointer. +/// A combined refcount / BlockedTask-as-uint pointer. /// /// Can be equal to the following values: /// /// * 2 - both endpoints are alive /// * 1 - either the sender or the receiver is dead, determined by context -/// * - A pointer to a blocked Task that can be transmuted to ~Task +/// * - A pointer to a blocked Task (see BlockedTask::cast_{to,from}_uint) type State = uint; static STATE_BOTH: State = 2; @@ -137,11 +137,13 @@ impl ChanOne { } task_as_state => { // Port is blocked. Wake it up. - let recvr: ~Task = cast::transmute(task_as_state); - let mut sched = Local::take::(); - rtdebug!("rendezvous send"); - sched.metrics.rendezvous_sends += 1; - sched.schedule_task(recvr); + let recvr = BlockedTask::cast_from_uint(task_as_state); + do recvr.wake().map_consume |woken_task| { + let mut sched = Local::take::(); + rtdebug!("rendezvous send"); + sched.metrics.rendezvous_sends += 1; + sched.schedule_task(woken_task); + }; } } } @@ -177,7 +179,7 @@ impl PortOne { // an acquire barrier to prevent reordering of the subsequent read // of the payload. Also issues a release barrier to prevent reordering // of any previous writes to the task structure. - let task_as_state: State = cast::transmute(task); + let task_as_state = task.cast_to_uint(); let oldstate = (*packet).state.swap(task_as_state, SeqCst); match oldstate { STATE_BOTH => { @@ -193,8 +195,8 @@ impl PortOne { // NB: We have to drop back into the scheduler event loop here // instead of switching immediately back or we could end up // triggering infinite recursion on the scheduler's stack. - let task: ~Task = cast::transmute(task_as_state); - sched.enqueue_task(task); + let recvr = BlockedTask::cast_from_uint(task_as_state); + sched.enqueue_blocked_task(recvr); } _ => util::unreachable() } @@ -258,9 +260,11 @@ impl Drop for ChanOneHack { task_as_state => { // The port is blocked waiting for a message we will never send. Wake it. assert!((*this.packet()).payload.is_none()); - let recvr: ~Task = cast::transmute(task_as_state); - let sched = Local::take::(); - sched.schedule_task(recvr); + let recvr = BlockedTask::cast_from_uint(task_as_state); + do recvr.wake().map_consume |woken_task| { + let sched = Local::take::(); + sched.schedule_task(woken_task); + }; } } } @@ -282,8 +286,14 @@ impl Drop for PortOneHack { STATE_ONE => { let _packet: ~Packet = cast::transmute(this.void_packet); } - _ => { - util::unreachable() + task_as_state => { + // This case occurs during unwinding, when the blocked + // receiver was killed awake. The task can't still be + // blocked (we are it), but we need to free the handle. + let recvr = BlockedTask::cast_from_uint(task_as_state); + // FIXME(#7554)(bblum): Make this cfg(test) dependent. + // in a later commit. + assert!(recvr.wake().is_none()); } } } diff --git a/src/libstd/rt/join_latch.rs b/src/libstd/rt/join_latch.rs deleted file mode 100644 index 924db1a21b729..0000000000000 --- a/src/libstd/rt/join_latch.rs +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The JoinLatch is a concurrent type that establishes the task -//! tree and propagates failure. -//! -//! Each task gets a JoinLatch that is derived from the JoinLatch -//! of its parent task. Every latch must be released by either calling -//! the non-blocking `release` method or the task-blocking `wait` method. -//! Releasing a latch does not complete until all of its child latches -//! complete. -//! -//! Latches carry a `success` flag that is set to `false` during task -//! failure and is propagated both from children to parents and parents -//! to children. The status af this flag may be queried for the purposes -//! of linked failure. -//! -//! In addition to failure propagation the task tree serves to keep the -//! default task schedulers alive. The runtime only sends the shutdown -//! message to schedulers once the root task exits. -//! -//! Under this scheme tasks that terminate before their children become -//! 'zombies' since they may not exit until their children do. Zombie -//! tasks are 'tombstoned' as `Tombstone(~JoinLatch)` and the tasks -//! themselves allowed to terminate. -//! -//! XXX: Propagate flag from parents to children. -//! XXX: Tombstoning actually doesn't work. -//! XXX: This could probably be done in a way that doesn't leak tombstones -//! longer than the life of the child tasks. - -use comm::{GenericPort, Peekable, GenericSmartChan}; -use clone::Clone; -use container::Container; -use option::{Option, Some, None}; -use ops::Drop; -use rt::comm::{SharedChan, Port, stream}; -use rt::local::Local; -use rt::sched::Scheduler; -use unstable::atomics::{AtomicUint, SeqCst}; -use util; -use vec::OwnedVector; - -// FIXME #7026: Would prefer this to be an enum -pub struct JoinLatch { - priv parent: Option, - priv child: Option, - closed: bool, -} - -// Shared between parents and all their children. -struct SharedState { - /// Reference count, held by a parent and all children. - count: AtomicUint, - success: bool -} - -struct ParentLink { - shared: *mut SharedState, - // For communicating with the parent. - chan: SharedChan -} - -struct ChildLink { - shared: ~SharedState, - // For receiving from children. - port: Port, - chan: SharedChan, - // Prevents dropping the child SharedState reference counts multiple times. - dropped_child: bool -} - -// Messages from child latches to parent. -enum Message { - Tombstone(~JoinLatch), - ChildrenTerminated -} - -impl JoinLatch { - pub fn new_root() -> ~JoinLatch { - let this = ~JoinLatch { - parent: None, - child: None, - closed: false - }; - rtdebug!("new root latch %x", this.id()); - return this; - } - - fn id(&self) -> uint { - unsafe { ::cast::transmute(&*self) } - } - - pub fn new_child(&mut self) -> ~JoinLatch { - rtassert!(!self.closed); - - if self.child.is_none() { - // This is the first time spawning a child - let shared = ~SharedState { - count: AtomicUint::new(1), - success: true - }; - let (port, chan) = stream(); - let chan = SharedChan::new(chan); - let child = ChildLink { - shared: shared, - port: port, - chan: chan, - dropped_child: false - }; - self.child = Some(child); - } - - let child_link: &mut ChildLink = self.child.get_mut_ref(); - let shared_state: *mut SharedState = &mut *child_link.shared; - - child_link.shared.count.fetch_add(1, SeqCst); - - let child = ~JoinLatch { - parent: Some(ParentLink { - shared: shared_state, - chan: child_link.chan.clone() - }), - child: None, - closed: false - }; - rtdebug!("NEW child latch %x", child.id()); - return child; - } - - pub fn release(~self, local_success: bool) { - // XXX: This should not block, but there's a bug in the below - // code that I can't figure out. - self.wait(local_success); - } - - // XXX: Should not require ~self - fn release_broken(~self, local_success: bool) { - rtassert!(!self.closed); - - rtdebug!("releasing %x", self.id()); - - let id = self.id(); - let _ = id; // XXX: `id` is only used in debug statements so appears unused - let mut this = self; - let mut child_success = true; - let mut children_done = false; - - if this.child.is_some() { - rtdebug!("releasing children"); - let child_link: &mut ChildLink = this.child.get_mut_ref(); - let shared: &mut SharedState = &mut *child_link.shared; - - if !child_link.dropped_child { - let last_count = shared.count.fetch_sub(1, SeqCst); - rtdebug!("child count before sub %u %x", last_count, id); - if last_count == 1 { - assert!(child_link.chan.try_send(ChildrenTerminated)); - } - child_link.dropped_child = true; - } - - // Wait for messages from children - let mut tombstones = ~[]; - loop { - if child_link.port.peek() { - match child_link.port.recv() { - Tombstone(t) => { - tombstones.push(t); - }, - ChildrenTerminated => { - children_done = true; - break; - } - } - } else { - break - } - } - - rtdebug!("releasing %u tombstones %x", tombstones.len(), id); - - // Try to release the tombstones. Those that still have - // outstanding will be re-enqueued. When this task's - // parents release their latch we'll end up back here - // trying them again. - while !tombstones.is_empty() { - tombstones.pop().release(true); - } - - if children_done { - let count = shared.count.load(SeqCst); - assert!(count == 0); - // self_count is the acquire-read barrier - child_success = shared.success; - } - } else { - children_done = true; - } - - let total_success = local_success && child_success; - - rtassert!(this.parent.is_some()); - - unsafe { - { - let parent_link: &mut ParentLink = this.parent.get_mut_ref(); - let shared: *mut SharedState = parent_link.shared; - - if !total_success { - // parent_count is the write-wait barrier - (*shared).success = false; - } - } - - if children_done { - rtdebug!("children done"); - do Local::borrow:: |sched| { - sched.metrics.release_tombstone += 1; - } - { - rtdebug!("RELEASING parent %x", id); - let parent_link: &mut ParentLink = this.parent.get_mut_ref(); - let shared: *mut SharedState = parent_link.shared; - let last_count = (*shared).count.fetch_sub(1, SeqCst); - rtdebug!("count before parent sub %u %x", last_count, id); - if last_count == 1 { - assert!(parent_link.chan.try_send(ChildrenTerminated)); - } - } - this.closed = true; - util::ignore(this); - } else { - rtdebug!("children not done"); - rtdebug!("TOMBSTONING %x", id); - do Local::borrow:: |sched| { - sched.metrics.release_no_tombstone += 1; - } - let chan = { - let parent_link: &mut ParentLink = this.parent.get_mut_ref(); - parent_link.chan.clone() - }; - assert!(chan.try_send(Tombstone(this))); - } - } - } - - // XXX: Should not require ~self - pub fn wait(~self, local_success: bool) -> bool { - rtassert!(!self.closed); - - rtdebug!("WAITING %x", self.id()); - - let mut this = self; - let mut child_success = true; - - if this.child.is_some() { - rtdebug!("waiting for children"); - let child_link: &mut ChildLink = this.child.get_mut_ref(); - let shared: &mut SharedState = &mut *child_link.shared; - - if !child_link.dropped_child { - let last_count = shared.count.fetch_sub(1, SeqCst); - rtdebug!("child count before sub %u", last_count); - if last_count == 1 { - assert!(child_link.chan.try_send(ChildrenTerminated)); - } - child_link.dropped_child = true; - } - - // Wait for messages from children - loop { - match child_link.port.recv() { - Tombstone(t) => { - t.wait(true); - } - ChildrenTerminated => break - } - } - - let count = shared.count.load(SeqCst); - if count != 0 { ::io::println(fmt!("%u", count)); } - assert!(count == 0); - // self_count is the acquire-read barrier - child_success = shared.success; - } - - let total_success = local_success && child_success; - - if this.parent.is_some() { - rtdebug!("releasing parent"); - unsafe { - let parent_link: &mut ParentLink = this.parent.get_mut_ref(); - let shared: *mut SharedState = parent_link.shared; - - if !total_success { - // parent_count is the write-wait barrier - (*shared).success = false; - } - - let last_count = (*shared).count.fetch_sub(1, SeqCst); - rtdebug!("count before parent sub %u", last_count); - if last_count == 1 { - assert!(parent_link.chan.try_send(ChildrenTerminated)); - } - } - } - - this.closed = true; - util::ignore(this); - - return total_success; - } -} - -impl Drop for JoinLatch { - fn drop(&self) { - rtdebug!("DESTROYING %x", self.id()); - rtassert!(self.closed); - } -} - -#[cfg(test)] -mod test { - use super::*; - use cell::Cell; - use container::Container; - use iter::Times; - use rt::test::*; - use rand; - use rand::RngUtil; - use vec::{CopyableVector, ImmutableVector}; - - #[test] - fn success_immediately() { - do run_in_newsched_task { - let mut latch = JoinLatch::new_root(); - - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_immediately { - let child_latch = child_latch.take(); - assert!(child_latch.wait(true)); - } - - assert!(latch.wait(true)); - } - } - - #[test] - fn success_later() { - do run_in_newsched_task { - let mut latch = JoinLatch::new_root(); - - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_later { - let child_latch = child_latch.take(); - assert!(child_latch.wait(true)); - } - - assert!(latch.wait(true)); - } - } - - #[test] - fn mt_success() { - do run_in_mt_newsched_task { - let mut latch = JoinLatch::new_root(); - - for 10.times { - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_random { - let child_latch = child_latch.take(); - assert!(child_latch.wait(true)); - } - } - - assert!(latch.wait(true)); - } - } - - #[test] - fn mt_failure() { - do run_in_mt_newsched_task { - let mut latch = JoinLatch::new_root(); - - let spawn = |status| { - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_random { - let child_latch = child_latch.take(); - child_latch.wait(status); - } - }; - - for 10.times { spawn(true) } - spawn(false); - for 10.times { spawn(true) } - - assert!(!latch.wait(true)); - } - } - - #[test] - fn mt_multi_level_success() { - do run_in_mt_newsched_task { - let mut latch = JoinLatch::new_root(); - - fn child(latch: &mut JoinLatch, i: int) { - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_random { - let mut child_latch = child_latch.take(); - if i != 0 { - child(&mut *child_latch, i - 1); - child_latch.wait(true); - } else { - child_latch.wait(true); - } - } - } - - child(&mut *latch, 10); - - assert!(latch.wait(true)); - } - } - - #[test] - fn mt_multi_level_failure() { - do run_in_mt_newsched_task { - let mut latch = JoinLatch::new_root(); - - fn child(latch: &mut JoinLatch, i: int) { - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_random { - let mut child_latch = child_latch.take(); - if i != 0 { - child(&mut *child_latch, i - 1); - child_latch.wait(false); - } else { - child_latch.wait(true); - } - } - } - - child(&mut *latch, 10); - - assert!(!latch.wait(true)); - } - } - - #[test] - fn release_child() { - do run_in_newsched_task { - let mut latch = JoinLatch::new_root(); - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - - do spawntask_immediately { - let latch = child_latch.take(); - latch.release(false); - } - - assert!(!latch.wait(true)); - } - } - - #[test] - fn release_child_tombstone() { - do run_in_newsched_task { - let mut latch = JoinLatch::new_root(); - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - - do spawntask_immediately { - let mut latch = child_latch.take(); - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_later { - let latch = child_latch.take(); - latch.release(false); - } - latch.release(true); - } - - assert!(!latch.wait(true)); - } - } - - #[test] - fn release_child_no_tombstone() { - do run_in_newsched_task { - let mut latch = JoinLatch::new_root(); - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - - do spawntask_later { - let mut latch = child_latch.take(); - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - do spawntask_immediately { - let latch = child_latch.take(); - latch.release(false); - } - latch.release(true); - } - - assert!(!latch.wait(true)); - } - } - - #[test] - fn release_child_tombstone_stress() { - fn rand_orders() -> ~[bool] { - let mut v = ~[false,.. 5]; - v[0] = true; - let mut rng = rand::rng(); - return rng.shuffle(v); - } - - fn split_orders(orders: &[bool]) -> (~[bool], ~[bool]) { - if orders.is_empty() { - return (~[], ~[]); - } else if orders.len() <= 2 { - return (orders.to_owned(), ~[]); - } - let mut rng = rand::rng(); - let n = rng.gen_uint_range(1, orders.len()); - let first = orders.slice(0, n).to_owned(); - let last = orders.slice(n, orders.len()).to_owned(); - assert!(first.len() + last.len() == orders.len()); - return (first, last); - } - - for stress_factor().times { - do run_in_newsched_task { - fn doit(latch: &mut JoinLatch, orders: ~[bool], depth: uint) { - let (my_orders, remaining_orders) = split_orders(orders); - rtdebug!("(my_orders, remaining): %?", (&my_orders, &remaining_orders)); - rtdebug!("depth: %u", depth); - let mut remaining_orders = remaining_orders; - let mut num = 0; - for my_orders.iter().advance |&order| { - let child_latch = latch.new_child(); - let child_latch = Cell::new(child_latch); - let (child_orders, remaining) = split_orders(remaining_orders); - rtdebug!("(child_orders, remaining): %?", (&child_orders, &remaining)); - remaining_orders = remaining; - let child_orders = Cell::new(child_orders); - let child_num = num; - let _ = child_num; // XXX unused except in rtdebug! - do spawntask_random { - rtdebug!("depth %u num %u", depth, child_num); - let mut child_latch = child_latch.take(); - let child_orders = child_orders.take(); - doit(&mut *child_latch, child_orders, depth + 1); - child_latch.release(order); - } - - num += 1; - } - } - - let mut latch = JoinLatch::new_root(); - let orders = rand_orders(); - rtdebug!("orders: %?", orders); - - doit(&mut *latch, orders, 0); - - assert!(!latch.wait(true)); - } - } - } - - #[deriving(Clone)] - struct Order { - immediate: bool, - succeed: bool, - orders: ~[Order] - } - - #[test] - fn whateverman() { - fn next(latch: &mut JoinLatch, orders: ~[Order]) { - for orders.iter().advance |order| { - let suborders = order.orders.clone(); - let child_latch = Cell::new(latch.new_child()); - let succeed = order.succeed; - if order.immediate { - do spawntask_immediately { - let mut child_latch = child_latch.take(); - next(&mut *child_latch, suborders.clone()); - rtdebug!("immediate releasing"); - child_latch.release(succeed); - } - } else { - do spawntask_later { - let mut child_latch = child_latch.take(); - next(&mut *child_latch, suborders.clone()); - rtdebug!("later releasing"); - child_latch.release(succeed); - } - } - } - } - - do run_in_newsched_task { - let mut latch = JoinLatch::new_root(); - let orders = ~[ Order { // 0 0 - immediate: true, - succeed: true, - orders: ~[ Order { // 1 0 - immediate: true, - succeed: false, - orders: ~[ Order { // 2 0 - immediate: false, - succeed: false, - orders: ~[ Order { // 3 0 - immediate: true, - succeed: false, - orders: ~[] - }, Order { // 3 1 - immediate: false, - succeed: false, - orders: ~[] - }] - }] - }] - }]; - - next(&mut *latch, orders); - assert!(!latch.wait(true)); - } - } -} - diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs new file mode 100644 index 0000000000000..cfd8e46dfdb75 --- /dev/null +++ b/src/libstd/rt/kill.rs @@ -0,0 +1,792 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Task death: asynchronous killing, linked failure, exit code propagation. + +use cast; +use cell::Cell; +use either::{Either, Left, Right}; +use option::{Option, Some, None}; +use prelude::*; +use rt::task::Task; +use to_bytes::IterBytes; +use unstable::atomics::{AtomicUint, Acquire, SeqCst}; +use unstable::sync::{UnsafeAtomicRcBox, LittleLock}; +use util; + +static KILLED_MSG: &'static str = "killed by linked failure"; + +// State values for the 'killed' and 'unkillable' atomic flags below. +static KILL_RUNNING: uint = 0; +static KILL_KILLED: uint = 1; +static KILL_UNKILLABLE: uint = 2; + +struct KillFlag(AtomicUint); +type KillFlagHandle = UnsafeAtomicRcBox; + +/// A handle to a blocked task. Usually this means having the ~Task pointer by +/// ownership, but if the task is killable, a killer can steal it at any time. +pub enum BlockedTask { + Unkillable(~Task), + Killable(KillFlagHandle), +} + +// FIXME(#7544)(bblum): think about the cache efficiency of this +struct KillHandleInner { + // Is the task running, blocked, or killed? Possible values: + // * KILL_RUNNING - Not unkillable, no kill pending. + // * KILL_KILLED - Kill pending. + // * - A transmuted blocked ~Task pointer. + // This flag is refcounted because it may also be referenced by a blocking + // concurrency primitive, used to wake the task normally, whose reference + // may outlive the handle's if the task is killed. + killed: KillFlagHandle, + // Has the task deferred kill signals? This flag guards the above one. + // Possible values: + // * KILL_RUNNING - Not unkillable, no kill pending. + // * KILL_KILLED - Kill pending. + // * KILL_UNKILLABLE - Kill signals deferred. + unkillable: AtomicUint, + + // Shared state between task and children for exit code propagation. These + // are here so we can re-use the kill handle to implement watched children + // tasks. Using a separate ARClike would introduce extra atomic adds/subs + // into common spawn paths, so this is just for speed. + + // Locklessly accessed; protected by the enclosing refcount's barriers. + any_child_failed: bool, + // A lazy list, consuming which may unwrap() many child tombstones. + child_tombstones: Option<~fn() -> bool>, + // Protects multiple children simultaneously creating tombstones. + graveyard_lock: LittleLock, +} + +/// State shared between tasks used for task killing during linked failure. +#[deriving(Clone)] +pub struct KillHandle(UnsafeAtomicRcBox); + +/// Per-task state related to task death, killing, failure, etc. +pub struct Death { + // Shared among this task, its watched children, and any linked tasks who + // might kill it. This is optional so we can take it by-value at exit time. + kill_handle: Option, + // Handle to a watching parent, if we have one, for exit code propagation. + watching_parent: Option, + // Action to be done with the exit code. If set, also makes the task wait + // until all its watched children exit before collecting the status. + on_exit: Option<~fn(bool)>, + // nesting level counter for task::unkillable calls (0 == killable). + unkillable: int, + // nesting level counter for task::atomically calls (0 == can yield). + wont_sleep: int, + // A "spare" handle to the kill flag inside the kill handle. Used during + // blocking/waking as an optimization to avoid two xadds on the refcount. + spare_kill_flag: Option, +} + +impl Drop for KillFlag { + // Letting a KillFlag with a task inside get dropped would leak the task. + // We could free it here, but the task should get awoken by hand somehow. + fn drop(&self) { + match self.load(Acquire) { + KILL_RUNNING | KILL_KILLED => { }, + _ => rtabort!("can't drop kill flag with a blocked task inside!"), + } + } +} + +// Whenever a task blocks, it swaps out its spare kill flag to use as the +// blocked task handle. So unblocking a task must restore that spare. +unsafe fn revive_task_ptr(task_ptr: uint, spare_flag: Option) -> ~Task { + let mut task: ~Task = cast::transmute(task_ptr); + rtassert!(task.death.spare_kill_flag.is_none()); + task.death.spare_kill_flag = spare_flag; + task +} + +impl BlockedTask { + /// Returns Some if the task was successfully woken; None if already killed. + pub fn wake(self) -> Option<~Task> { + match self { + Unkillable(task) => Some(task), + Killable(flag_arc) => { + let flag = unsafe { &mut **flag_arc.get() }; + match flag.swap(KILL_RUNNING, SeqCst) { + KILL_RUNNING => rtabort!("tried to wake an already-running task"), + KILL_KILLED => None, // a killer stole it already + task_ptr => + Some(unsafe { revive_task_ptr(task_ptr, Some(flag_arc)) }) + } + } + } + } + + /// Create a blocked task, unless the task was already killed. + pub fn try_block(mut task: ~Task) -> Either<~Task, BlockedTask> { + if task.death.unkillable > 0 { + Right(Unkillable(task)) + } else { + rtassert!(task.death.kill_handle.is_some()); + unsafe { + // The inverse of 'revive', above, occurs here. + // The spare kill flag will usually be Some, unless the task was + // already killed, in which case the killer will have deferred + // creating a new one until whenever it blocks during unwinding. + let flag_arc = match task.death.spare_kill_flag.take() { + Some(spare_flag) => spare_flag, + None => { + // FIXME(#7544): Uncomment this when terminate_current_task + // stops being *terrible*. That's the only place that violates + // the assumption of "becoming unkillable will fail if the + // task was killed". + // rtassert!(task.unwinder.unwinding); + (*task.death.kill_handle.get_ref().get()).killed.clone() + } + }; + let flag = &mut **flag_arc.get(); + let task_ptr = cast::transmute(task); + // Expect flag to contain RUNNING. If KILLED, it should stay KILLED. + match flag.compare_and_swap(KILL_RUNNING, task_ptr, SeqCst) { + KILL_RUNNING => Right(Killable(flag_arc)), + KILL_KILLED => Left(revive_task_ptr(task_ptr, Some(flag_arc))), + x => rtabort!("can't block task! kill flag = %?", x), + } + } + } + } + + /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag. + #[inline] + pub unsafe fn cast_to_uint(self) -> uint { + // Use the low bit to distinguish the enum variants, to save a second + // allocation in the indestructible case. + match self { + Unkillable(task) => { + let blocked_task_ptr: uint = cast::transmute(task); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr + }, + Killable(flag_arc) => { + let blocked_task_ptr: uint = cast::transmute(~flag_arc); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr | 0x1 + } + } + } + + /// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag. + #[inline] + pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { + if blocked_task_ptr & 0x1 == 0 { + Unkillable(cast::transmute(blocked_task_ptr)) + } else { + let ptr: ~KillFlagHandle = cast::transmute(blocked_task_ptr & !0x1); + match ptr { + ~flag_arc => Killable(flag_arc) + } + } + } +} + +// So that KillHandle can be hashed in the taskgroup bookkeeping code. +impl IterBytes for KillHandle { + fn iter_bytes(&self, lsb0: bool, f: &fn(buf: &[u8]) -> bool) -> bool { + self.data.iter_bytes(lsb0, f) + } +} +impl Eq for KillHandle { + #[inline] fn eq(&self, other: &KillHandle) -> bool { self.data.eq(&other.data) } + #[inline] fn ne(&self, other: &KillHandle) -> bool { self.data.ne(&other.data) } +} + +impl KillHandle { + pub fn new() -> (KillHandle, KillFlagHandle) { + let (flag, flag_clone) = + UnsafeAtomicRcBox::new2(KillFlag(AtomicUint::new(KILL_RUNNING))); + let handle = KillHandle(UnsafeAtomicRcBox::new(KillHandleInner { + // Linked failure fields + killed: flag, + unkillable: AtomicUint::new(KILL_RUNNING), + // Exit code propagation fields + any_child_failed: false, + child_tombstones: None, + graveyard_lock: LittleLock(), + })); + (handle, flag_clone) + } + + // Will begin unwinding if a kill signal was received, unless already_failing. + // This can't be used recursively, because a task which sees a KILLED + // signal must fail immediately, which an already-unkillable task can't do. + #[inline] + pub fn inhibit_kill(&mut self, already_failing: bool) { + let inner = unsafe { &mut *self.get() }; + // Expect flag to contain RUNNING. If KILLED, it should stay KILLED. + // FIXME(#7544)(bblum): is it really necessary to prohibit double kill? + match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, SeqCst) { + KILL_RUNNING => { }, // normal case + KILL_KILLED => if !already_failing { fail!(KILLED_MSG) }, + _ => rtabort!("inhibit_kill: task already unkillable"), + } + } + + // Will begin unwinding if a kill signal was received, unless already_failing. + #[inline] + pub fn allow_kill(&mut self, already_failing: bool) { + let inner = unsafe { &mut *self.get() }; + // Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED. + // FIXME(#7544)(bblum): is it really necessary to prohibit double kill? + match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, SeqCst) { + KILL_UNKILLABLE => { }, // normal case + KILL_KILLED => if !already_failing { fail!(KILLED_MSG) }, + _ => rtabort!("allow_kill: task already killable"), + } + } + + // Send a kill signal to the handle's owning task. Returns the task itself + // if it was blocked and needs punted awake. To be called by other tasks. + pub fn kill(&mut self) -> Option<~Task> { + let inner = unsafe { &mut *self.get() }; + if inner.unkillable.swap(KILL_KILLED, SeqCst) == KILL_RUNNING { + // Got in. Allowed to try to punt the task awake. + let flag = unsafe { &mut *inner.killed.get() }; + match flag.swap(KILL_KILLED, SeqCst) { + // Task either not blocked or already taken care of. + KILL_RUNNING | KILL_KILLED => None, + // Got ownership of the blocked task. + // While the usual 'wake' path can just pass back the flag + // handle, we (the slower kill path) haven't an extra one lying + // around. The task will wake up without a spare. + task_ptr => Some(unsafe { revive_task_ptr(task_ptr, None) }), + } + } else { + // Otherwise it was either unkillable or already killed. Somebody + // else was here first who will deal with the kill signal. + None + } + } + + #[inline] + pub fn killed(&self) -> bool { + // Called every context switch, so shouldn't report true if the task + // is unkillable with a kill signal pending. + let inner = unsafe { &*self.get() }; + let flag = unsafe { &*inner.killed.get() }; + // FIXME(#6598): can use relaxed ordering (i think) + flag.load(Acquire) == KILL_KILLED + } + + pub fn notify_immediate_failure(&mut self) { + // A benign data race may happen here if there are failing sibling + // tasks that were also spawned-watched. The refcount's write barriers + // in UnsafeAtomicRcBox ensure that this write will be seen by the + // unwrapper/destructor, whichever task may unwrap it. + unsafe { (*self.get()).any_child_failed = true; } + } + + // For use when a task does not need to collect its children's exit + // statuses, but the task has a parent which might want them. + pub fn reparent_children_to(self, parent: &mut KillHandle) { + // Optimistic path: If another child of the parent's already failed, + // we don't need to worry about any of this. + if unsafe { (*parent.get()).any_child_failed } { + return; + } + + // Try to see if all our children are gone already. + match unsafe { self.try_unwrap() } { + // Couldn't unwrap; children still alive. Reparent entire handle as + // our own tombstone, to be unwrapped later. + Left(this) => { + let this = Cell::new(this); // :( + do add_lazy_tombstone(parent) |other_tombstones| { + let this = Cell::new(this.take()); // :( + let others = Cell::new(other_tombstones); // :( + || { + // Prefer to check tombstones that were there first, + // being "more fair" at the expense of tail-recursion. + others.take().map_consume_default(true, |f| f()) && { + let mut inner = unsafe { this.take().unwrap() }; + (!inner.any_child_failed) && + inner.child_tombstones.take_map_default(true, |f| f()) + } + } + } + } + // Whether or not all children exited, one or more already failed. + Right(KillHandleInner { any_child_failed: true, _ }) => { + parent.notify_immediate_failure(); + } + // All children exited, but some left behind tombstones that we + // don't want to wait on now. Give them to our parent. + Right(KillHandleInner { any_child_failed: false, + child_tombstones: Some(f), _ }) => { + let f = Cell::new(f); // :( + do add_lazy_tombstone(parent) |other_tombstones| { + let f = Cell::new(f.take()); // :( + let others = Cell::new(other_tombstones); // :( + || { + // Prefer fairness to tail-recursion, as in above case. + others.take().map_consume_default(true, |f| f()) && + f.take()() + } + } + } + // All children exited, none failed. Nothing to do! + Right(KillHandleInner { any_child_failed: false, + child_tombstones: None, _ }) => { } + } + + // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule. + #[inline] + fn add_lazy_tombstone(parent: &mut KillHandle, + blk: &fn(Option<~fn() -> bool>) -> ~fn() -> bool) { + + let inner: &mut KillHandleInner = unsafe { &mut *parent.get() }; + unsafe { + do inner.graveyard_lock.lock { + // Update the current "head node" of the lazy list. + inner.child_tombstones = + Some(blk(util::replace(&mut inner.child_tombstones, None))); + } + } + } + } +} + +impl Death { + pub fn new() -> Death { + let (handle, spare) = KillHandle::new(); + Death { + kill_handle: Some(handle), + watching_parent: None, + on_exit: None, + unkillable: 0, + wont_sleep: 0, + spare_kill_flag: Some(spare), + } + } + + pub fn new_child(&self) -> Death { + // FIXME(#7327) + let (handle, spare) = KillHandle::new(); + Death { + kill_handle: Some(handle), + watching_parent: self.kill_handle.clone(), + on_exit: None, + unkillable: 0, + wont_sleep: 0, + spare_kill_flag: Some(spare), + } + } + + /// Collect failure exit codes from children and propagate them to a parent. + pub fn collect_failure(&mut self, mut success: bool) { + // This may run after the task has already failed, so even though the + // task appears to need to be killed, the scheduler should not fail us + // when we block to unwrap. + // (XXX: Another less-elegant reason for doing this is so that the use + // of the LittleLock in reparent_children_to doesn't need to access the + // unkillable flag in the kill_handle, since we'll have removed it.) + rtassert!(self.unkillable == 0); + self.unkillable = 1; + + // Step 1. Decide if we need to collect child failures synchronously. + do self.on_exit.take_map |on_exit| { + if success { + // We succeeded, but our children might not. Need to wait for them. + let mut inner = unsafe { self.kill_handle.take_unwrap().unwrap() }; + if inner.any_child_failed { + success = false; + } else { + // Lockless access to tombstones protected by unwrap barrier. + success = inner.child_tombstones.take_map_default(true, |f| f()); + } + } + on_exit(success); + }; + + // Step 2. Possibly alert possibly-watching parent to failure status. + // Note that as soon as parent_handle goes out of scope, the parent + // can successfully unwrap its handle and collect our reported status. + do self.watching_parent.take_map |mut parent_handle| { + if success { + // Our handle might be None if we had an exit callback, and + // already unwrapped it. But 'success' being true means no + // child failed, so there's nothing to do (see below case). + do self.kill_handle.take_map |own_handle| { + own_handle.reparent_children_to(&mut parent_handle); + }; + } else { + // Can inform watching parent immediately that we failed. + // (Note the importance of non-failing tasks NOT writing + // 'false', which could obscure another task's failure.) + parent_handle.notify_immediate_failure(); + } + }; + + // Can't use allow_kill directly; that would require the kill handle. + rtassert!(self.unkillable == 1); + self.unkillable = 0; + } + + /// Fails if a kill signal was received. + #[inline] + pub fn check_killed(&self) { + match self.kill_handle { + Some(ref kill_handle) => + // The task may be both unkillable and killed if it does some + // synchronization during unwinding or cleanup (for example, + // sending on a notify port). In that case failing won't help. + if self.unkillable == 0 && kill_handle.killed() { + fail!(KILLED_MSG); + }, + // This may happen during task death (see comments in collect_failure). + None => rtassert!(self.unkillable > 0), + } + } + + /// Enter a possibly-nested unkillable section of code. + /// All calls must be paired with a subsequent call to allow_kill. + #[inline] + pub fn inhibit_kill(&mut self, already_failing: bool) { + if self.unkillable == 0 { + rtassert!(self.kill_handle.is_some()); + self.kill_handle.get_mut_ref().inhibit_kill(already_failing); + } + self.unkillable += 1; + } + + /// Exit a possibly-nested unkillable section of code. + /// All calls must be paired with a preceding call to inhibit_kill. + #[inline] + pub fn allow_kill(&mut self, already_failing: bool) { + rtassert!(self.unkillable != 0); + self.unkillable -= 1; + if self.unkillable == 0 { + rtassert!(self.kill_handle.is_some()); + self.kill_handle.get_mut_ref().allow_kill(already_failing); + } + } + + /// Enter a possibly-nested "atomic" section of code. Just for assertions. + /// All calls must be paired with a subsequent call to allow_yield. + #[inline] + pub fn inhibit_yield(&mut self) { + self.wont_sleep += 1; + } + + /// Exit a possibly-nested "atomic" section of code. Just for assertions. + /// All calls must be paired with a preceding call to inhibit_yield. + #[inline] + pub fn allow_yield(&mut self) { + rtassert!(self.wont_sleep != 0); + self.wont_sleep -= 1; + } + + /// Ensure that the task is allowed to become descheduled. + #[inline] + pub fn assert_may_sleep(&self) { + if self.wont_sleep != 0 { + rtabort!("illegal atomic-sleep: can't deschedule inside atomically()"); + } + } +} + +impl Drop for Death { + fn drop(&self) { + // Mustn't be in an atomic or unkillable section at task death. + rtassert!(self.unkillable == 0); + rtassert!(self.wont_sleep == 0); + } +} + +#[cfg(test)] +mod test { + #[allow(unused_mut)]; + use cell::Cell; + use rt::test::*; + use super::*; + use util; + + // Test cases don't care about the spare killed flag. + fn make_kill_handle() -> KillHandle { let (h,_) = KillHandle::new(); h } + + #[test] + fn no_tombstone_success() { + do run_in_newsched_task { + // Tests case 4 of the 4-way match in reparent_children. + let mut parent = make_kill_handle(); + let mut child = make_kill_handle(); + + // Without another handle to child, the try unwrap should succeed. + child.reparent_children_to(&mut parent); + let mut parent_inner = unsafe { parent.unwrap() }; + assert!(parent_inner.child_tombstones.is_none()); + assert!(parent_inner.any_child_failed == false); + } + } + #[test] + fn no_tombstone_failure() { + do run_in_newsched_task { + // Tests case 2 of the 4-way match in reparent_children. + let mut parent = make_kill_handle(); + let mut child = make_kill_handle(); + + child.notify_immediate_failure(); + // Without another handle to child, the try unwrap should succeed. + child.reparent_children_to(&mut parent); + let mut parent_inner = unsafe { parent.unwrap() }; + assert!(parent_inner.child_tombstones.is_none()); + // Immediate failure should have been propagated. + assert!(parent_inner.any_child_failed); + } + } + #[test] + fn no_tombstone_because_sibling_already_failed() { + do run_in_newsched_task { + // Tests "case 0, the optimistic path in reparent_children. + let mut parent = make_kill_handle(); + let mut child1 = make_kill_handle(); + let mut child2 = make_kill_handle(); + let mut link = child2.clone(); + + // Should set parent's child_failed flag + child1.notify_immediate_failure(); + child1.reparent_children_to(&mut parent); + // Should bypass trying to unwrap child2 entirely. + // Otherwise, due to 'link', it would try to tombstone. + child2.reparent_children_to(&mut parent); + // Should successfully unwrap even though 'link' is still alive. + let mut parent_inner = unsafe { parent.unwrap() }; + assert!(parent_inner.child_tombstones.is_none()); + // Immediate failure should have been propagated by first child. + assert!(parent_inner.any_child_failed); + util::ignore(link); + } + } + #[test] + fn one_tombstone_success() { + do run_in_newsched_task { + let mut parent = make_kill_handle(); + let mut child = make_kill_handle(); + let mut link = child.clone(); + + // Creates 1 tombstone. Existence of 'link' makes try-unwrap fail. + child.reparent_children_to(&mut parent); + // Let parent collect tombstones. + util::ignore(link); + // Must have created a tombstone + let mut parent_inner = unsafe { parent.unwrap() }; + assert!(parent_inner.child_tombstones.take_unwrap()()); + assert!(parent_inner.any_child_failed == false); + } + } + #[test] + fn one_tombstone_failure() { + do run_in_newsched_task { + let mut parent = make_kill_handle(); + let mut child = make_kill_handle(); + let mut link = child.clone(); + + // Creates 1 tombstone. Existence of 'link' makes try-unwrap fail. + child.reparent_children_to(&mut parent); + // Must happen after tombstone to not be immediately propagated. + link.notify_immediate_failure(); + // Let parent collect tombstones. + util::ignore(link); + // Must have created a tombstone + let mut parent_inner = unsafe { parent.unwrap() }; + // Failure must be seen in the tombstone. + assert!(parent_inner.child_tombstones.take_unwrap()() == false); + assert!(parent_inner.any_child_failed == false); + } + } + #[test] + fn two_tombstones_success() { + do run_in_newsched_task { + let mut parent = make_kill_handle(); + let mut middle = make_kill_handle(); + let mut child = make_kill_handle(); + let mut link = child.clone(); + + child.reparent_children_to(&mut middle); // case 1 tombstone + // 'middle' should try-unwrap okay, but still have to reparent. + middle.reparent_children_to(&mut parent); // case 3 tombston + // Let parent collect tombstones. + util::ignore(link); + // Must have created a tombstone + let mut parent_inner = unsafe { parent.unwrap() }; + assert!(parent_inner.child_tombstones.take_unwrap()()); + assert!(parent_inner.any_child_failed == false); + } + } + #[test] + fn two_tombstones_failure() { + do run_in_newsched_task { + let mut parent = make_kill_handle(); + let mut middle = make_kill_handle(); + let mut child = make_kill_handle(); + let mut link = child.clone(); + + child.reparent_children_to(&mut middle); // case 1 tombstone + // Must happen after tombstone to not be immediately propagated. + link.notify_immediate_failure(); + // 'middle' should try-unwrap okay, but still have to reparent. + middle.reparent_children_to(&mut parent); // case 3 tombstone + // Let parent collect tombstones. + util::ignore(link); + // Must have created a tombstone + let mut parent_inner = unsafe { parent.unwrap() }; + // Failure must be seen in the tombstone. + assert!(parent_inner.child_tombstones.take_unwrap()() == false); + assert!(parent_inner.any_child_failed == false); + } + } + + // Task killing tests + + #[test] + fn kill_basic() { + do run_in_newsched_task { + let mut handle = make_kill_handle(); + assert!(!handle.killed()); + assert!(handle.kill().is_none()); + assert!(handle.killed()); + } + } + + #[test] + fn double_kill() { + do run_in_newsched_task { + let mut handle = make_kill_handle(); + assert!(!handle.killed()); + assert!(handle.kill().is_none()); + assert!(handle.killed()); + assert!(handle.kill().is_none()); + assert!(handle.killed()); + } + } + + #[test] + fn unkillable_after_kill() { + do run_in_newsched_task { + let mut handle = make_kill_handle(); + assert!(handle.kill().is_none()); + assert!(handle.killed()); + let handle_cell = Cell::new(handle); + let result = do spawntask_try { + handle_cell.take().inhibit_kill(false); + }; + assert!(result.is_err()); + } + } + + #[test] + fn unkillable_during_kill() { + do run_in_newsched_task { + let mut handle = make_kill_handle(); + handle.inhibit_kill(false); + assert!(handle.kill().is_none()); + assert!(!handle.killed()); + let handle_cell = Cell::new(handle); + let result = do spawntask_try { + handle_cell.take().allow_kill(false); + }; + assert!(result.is_err()); + } + } + + #[test] + fn unkillable_before_kill() { + do run_in_newsched_task { + let mut handle = make_kill_handle(); + handle.inhibit_kill(false); + handle.allow_kill(false); + assert!(handle.kill().is_none()); + assert!(handle.killed()); + } + } + + // Task blocking tests + + #[test] + fn block_and_wake() { + do with_test_task |mut task| { + BlockedTask::try_block(task).unwrap_right().wake().unwrap() + } + } + + #[test] + fn block_and_get_killed() { + do with_test_task |mut task| { + let mut handle = task.death.kill_handle.get_ref().clone(); + let result = BlockedTask::try_block(task).unwrap_right(); + let task = handle.kill().unwrap(); + assert!(result.wake().is_none()); + task + } + } + + #[test] + fn block_already_killed() { + do with_test_task |mut task| { + let mut handle = task.death.kill_handle.get_ref().clone(); + assert!(handle.kill().is_none()); + BlockedTask::try_block(task).unwrap_left() + } + } + + #[test] + fn block_unkillably_and_get_killed() { + do with_test_task |mut task| { + let mut handle = task.death.kill_handle.get_ref().clone(); + task.death.inhibit_kill(false); + let result = BlockedTask::try_block(task).unwrap_right(); + assert!(handle.kill().is_none()); + let mut task = result.wake().unwrap(); + // This call wants to fail, but we can't have that happen since + // we're not running in a newsched task, so we can't even use + // spawntask_try. But the failing behaviour is already tested + // above, in unkillable_during_kill(), so we punt on it here. + task.death.allow_kill(true); + task + } + } + + #[test] + fn block_on_pipe() { + // Tests the "killable" path of casting to/from uint. + do run_in_newsched_task { + do with_test_task |mut task| { + let result = BlockedTask::try_block(task).unwrap_right(); + let result = unsafe { result.cast_to_uint() }; + let result = unsafe { BlockedTask::cast_from_uint(result) }; + result.wake().unwrap() + } + } + } + + #[test] + fn block_unkillably_on_pipe() { + // Tests the "indestructible" path of casting to/from uint. + do run_in_newsched_task { + do with_test_task |mut task| { + task.death.inhibit_kill(false); + let result = BlockedTask::try_block(task).unwrap_right(); + let result = unsafe { result.cast_to_uint() }; + let result = unsafe { BlockedTask::cast_from_uint(result) }; + let mut task = result.wake().unwrap(); + task.death.allow_kill(false); + task + } + } + } +} diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 51f4737ef85fb..85537f476d4a1 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -83,6 +83,9 @@ pub mod global_heap; /// Implementations of language-critical runtime features like @. pub mod task; +/// Facilities related to task failure, killing, and death. +mod kill; + /// The coroutine task scheduler, built on the `io` event loop. mod sched; @@ -149,9 +152,6 @@ pub mod local_ptr; /// Bindings to pthread/windows thread-local storage. pub mod thread_local_storage; -/// For waiting on child tasks. -pub mod join_latch; - pub mod metrics; // FIXME #5248 shouldn't be pub @@ -277,7 +277,7 @@ pub fn run(main: ~fn()) -> int { let main_cell = Cell::new(main); let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, main_cell.take()); - main_task.on_exit = Some(on_exit); + main_task.death.on_exit = Some(on_exit); scheds[0].enqueue_task(main_task); // Run each scheduler in a thread. @@ -367,7 +367,7 @@ fn test_context() { let sched = Local::take::(); do sched.deschedule_running_task_and_then() |sched, task| { assert_eq!(context(), SchedulerContext); - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } }; sched.enqueue_task(task); diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 4e4145ddc161f..d8d61806a5bba 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -8,7 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::*; +use either::{Left, Right}; +use option::{Option, Some, None}; use sys; use cast::transmute; use clone::Clone; @@ -20,6 +21,7 @@ use super::rtio::{EventLoop, EventLoopObject, RemoteCallbackObject}; use super::context::Context; use super::task::{Task, AnySched, Sched}; use super::message_queue::MessageQueue; +use rt::kill::BlockedTask; use rt::local_ptr; use rt::local::Local; use rt::rtio::RemoteCallback; @@ -271,6 +273,14 @@ impl Scheduler { }; } + /// As enqueue_task, but with the possibility for the blocked task to + /// already have been killed. + pub fn enqueue_blocked_task(&mut self, blocked_task: BlockedTask) { + do blocked_task.wake().map_consume |task| { + self.enqueue_task(task); + }; + } + // * Scheduler-context operations fn interpret_message_queue(~self) -> bool { @@ -412,14 +422,26 @@ impl Scheduler { /// Called by a running task to end execution, after which it will /// be recycled by the scheduler for reuse in a new task. pub fn terminate_current_task(~self) { - assert!(self.in_task_context()); + let mut this = self; + assert!(this.in_task_context()); rtdebug!("ending running task"); - do self.deschedule_running_task_and_then |sched, dead_task| { - let mut dead_task = dead_task; - let coroutine = dead_task.coroutine.take_unwrap(); - coroutine.recycle(&mut sched.stack_pool); + // This task is post-cleanup, so it must be unkillable. This sequence + // of descheduling and recycling must not get interrupted by a kill. + // FIXME(#7544): Make this use an inner descheduler, like yield should. + this.current_task.get_mut_ref().death.unkillable += 1; + + do this.deschedule_running_task_and_then |sched, dead_task| { + match dead_task.wake() { + Some(dead_task) => { + let mut dead_task = dead_task; + dead_task.death.unkillable -= 1; // FIXME(#7544) ugh + let coroutine = dead_task.coroutine.take_unwrap(); + coroutine.recycle(&mut sched.stack_pool); + } + None => rtabort!("dead task killed before recycle"), + } } rtabort!("control reached end of task"); @@ -440,7 +462,7 @@ impl Scheduler { // here we know we are home, execute now OR we know we // aren't homed, and that this sched doesn't care do this.switch_running_tasks_and_then(task) |sched, last_task| { - sched.enqueue_task(last_task); + sched.enqueue_blocked_task(last_task); } } else if !homed && !this.run_anything { // the task isn't homed, but it can't be run here @@ -483,9 +505,21 @@ impl Scheduler { // Running tasks may have asked us to do some cleanup (*sched).run_cleanup_job(); + + // Must happen after running the cleanup job (of course). + // Might not be running in task context; if not, a later call to + // resume_task_immediately will take care of this. + (*sched).current_task.map(|t| t.death.check_killed()); } } + pub fn resume_blocked_task_immediately(~self, blocked_task: BlockedTask) { + match blocked_task.wake() { + Some(task) => self.resume_task_immediately(task), + None => Local::put(self), + }; + } + /// Block a running task, context switch to the scheduler, then pass the /// blocked task to a closure. /// @@ -498,7 +532,7 @@ impl Scheduler { /// This passes a Scheduler pointer to the fn after the context switch /// in order to prevent that fn from performing further scheduling operations. /// Doing further scheduling could easily result in infinite recursion. - pub fn deschedule_running_task_and_then(~self, f: &fn(&mut Scheduler, ~Task)) { + pub fn deschedule_running_task_and_then(~self, f: &fn(&mut Scheduler, BlockedTask)) { let mut this = self; assert!(this.in_task_context()); @@ -507,8 +541,8 @@ impl Scheduler { unsafe { let blocked_task = this.current_task.take_unwrap(); - let f_fake_region = transmute::<&fn(&mut Scheduler, ~Task), - &fn(&mut Scheduler, ~Task)>(f); + let f_fake_region = transmute::<&fn(&mut Scheduler, BlockedTask), + &fn(&mut Scheduler, BlockedTask)>(f); let f_opaque = ClosureConverter::from_fn(f_fake_region); this.enqueue_cleanup_job(GiveTask(blocked_task, f_opaque)); } @@ -524,6 +558,9 @@ impl Scheduler { // We could be executing in a different thread now let sched = Local::unsafe_borrow::(); (*sched).run_cleanup_job(); + + // As above, must happen after running the cleanup job. + (*sched).current_task.map(|t| t.death.check_killed()); } } @@ -531,7 +568,7 @@ impl Scheduler { /// You would want to think hard about doing this, e.g. if there are /// pending I/O events it would be a bad idea. pub fn switch_running_tasks_and_then(~self, next_task: ~Task, - f: &fn(&mut Scheduler, ~Task)) { + f: &fn(&mut Scheduler, BlockedTask)) { let mut this = self; assert!(this.in_task_context()); @@ -540,8 +577,8 @@ impl Scheduler { let old_running_task = this.current_task.take_unwrap(); let f_fake_region = unsafe { - transmute::<&fn(&mut Scheduler, ~Task), - &fn(&mut Scheduler, ~Task)>(f) + transmute::<&fn(&mut Scheduler, BlockedTask), + &fn(&mut Scheduler, BlockedTask)>(f) }; let f_opaque = ClosureConverter::from_fn(f_fake_region); this.enqueue_cleanup_job(GiveTask(old_running_task, f_opaque)); @@ -559,6 +596,9 @@ impl Scheduler { // We could be executing in a different thread now let sched = Local::unsafe_borrow::(); (*sched).run_cleanup_job(); + + // As above, must happen after running the cleanup job. + (*sched).current_task.map(|t| t.death.check_killed()); } } @@ -579,7 +619,15 @@ impl Scheduler { let cleanup_job = self.cleanup_job.take_unwrap(); match cleanup_job { DoNothing => { } - GiveTask(task, f) => (f.to_fn())(self, task) + GiveTask(task, f) => { + let f = f.to_fn(); + // Task might need to receive a kill signal instead of blocking. + // We can call the "and_then" only if it blocks successfully. + match BlockedTask::try_block(task) { + Left(killed_task) => self.enqueue_task(killed_task), + Right(blocked_task) => f(self, blocked_task), + } + } } } @@ -652,12 +700,14 @@ impl SchedHandle { // complaining type UnsafeTaskReceiver = sys::Closure; trait ClosureConverter { - fn from_fn(&fn(&mut Scheduler, ~Task)) -> Self; - fn to_fn(self) -> &fn(&mut Scheduler, ~Task); + fn from_fn(&fn(&mut Scheduler, BlockedTask)) -> Self; + fn to_fn(self) -> &fn(&mut Scheduler, BlockedTask); } impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: &fn(&mut Scheduler, ~Task)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } - fn to_fn(self) -> &fn(&mut Scheduler, ~Task) { unsafe { transmute(self) } } + fn from_fn(f: &fn(&mut Scheduler, BlockedTask)) -> UnsafeTaskReceiver { + unsafe { transmute(f) } + } + fn to_fn(self) -> &fn(&mut Scheduler, BlockedTask) { unsafe { transmute(self) } } } @@ -917,8 +967,7 @@ mod test { }; // Context switch directly to the new task do sched.switch_running_tasks_and_then(task2) |sched, task1| { - let task1 = Cell::new(task1); - sched.enqueue_task(task1.take()); + sched.enqueue_blocked_task(task1); } unsafe { *count_ptr = *count_ptr + 1; } }; @@ -969,9 +1018,8 @@ mod test { let sched = Local::take::(); assert!(sched.in_task_context()); do sched.deschedule_running_task_and_then() |sched, task| { - let task = Cell::new(task); assert!(!sched.in_task_context()); - sched.enqueue_task(task.take()); + sched.enqueue_blocked_task(task); } }; sched.enqueue_task(task); @@ -993,7 +1041,7 @@ mod test { do sched.event_loop.callback_ms(10) { rtdebug!("in callback"); let mut sched = Local::take::(); - sched.enqueue_task(task.take()); + sched.enqueue_blocked_task(task.take()); Local::put(sched); } } diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 449438b920551..d297514835085 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -20,13 +20,14 @@ use libc::{c_void, uintptr_t}; use ptr; use prelude::*; use option::{Option, Some, None}; +use rt::kill::Death; use rt::local::Local; use rt::logging::StdErrLogger; use super::local_heap::LocalHeap; use rt::sched::{Scheduler, SchedHandle}; -use rt::join_latch::JoinLatch; use rt::stack::{StackSegment, StackPool}; use rt::context::Context; +use task::spawn::Taskgroup; use cell::Cell; pub struct Task { @@ -36,8 +37,8 @@ pub struct Task { logger: StdErrLogger, unwinder: Unwinder, home: Option, - join_latch: Option<~JoinLatch>, - on_exit: Option<~fn(bool)>, + taskgroup: Option, + death: Death, destroyed: bool, coroutine: Option<~Coroutine> } @@ -86,8 +87,8 @@ impl Task { logger: StdErrLogger, unwinder: Unwinder { unwinding: false }, home: Some(home), - join_latch: Some(JoinLatch::new_root()), - on_exit: None, + taskgroup: None, + death: Death::new(), destroyed: false, coroutine: Some(~Coroutine::new(stack_pool, start)) } @@ -104,8 +105,9 @@ impl Task { logger: StdErrLogger, home: Some(home), unwinder: Unwinder { unwinding: false }, - join_latch: Some(self.join_latch.get_mut_ref().new_child()), - on_exit: None, + taskgroup: None, + // FIXME(#7544) make watching optional + death: self.death.new_child(), destroyed: false, coroutine: Some(~Coroutine::new(stack_pool, start)) } @@ -123,20 +125,9 @@ impl Task { } self.unwinder.try(f); + { let _ = self.taskgroup.take(); } + self.death.collect_failure(!self.unwinder.unwinding); self.destroy(); - - // Wait for children. Possibly report the exit status. - let local_success = !self.unwinder.unwinding; - let join_latch = self.join_latch.take_unwrap(); - match self.on_exit { - Some(ref on_exit) => { - let success = join_latch.wait(local_success); - (*on_exit)(success); - } - None => { - join_latch.release(local_success); - } - } } /// must be called manually before finalization to clean up diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index e5393c84a088c..11a02dfbeebec 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -51,7 +51,7 @@ pub fn run_in_newsched_task(f: ~fn()) { let mut task = ~Task::new_root(&mut sched.stack_pool, f.take()); rtdebug!("newsched_task: %x", to_uint(task)); - task.on_exit = Some(on_exit); + task.death.on_exit = Some(on_exit); sched.enqueue_task(task); sched.run(); } @@ -109,7 +109,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { }; let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, f_cell.take()); - main_task.on_exit = Some(on_exit); + main_task.death.on_exit = Some(on_exit); scheds[0].enqueue_task(main_task); let mut threads = ~[]; @@ -170,7 +170,7 @@ pub fn spawntask_immediately(f: ~fn()) { let sched = Local::take::(); do sched.switch_running_tasks_and_then(task) |sched, task| { - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } } @@ -214,7 +214,7 @@ pub fn spawntask_random(f: ~fn()) { if run_now { do sched.switch_running_tasks_and_then(task) |sched, task| { - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } } else { sched.enqueue_task(task); @@ -280,11 +280,11 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { f.take()) } }; - new_task.on_exit = Some(on_exit); + new_task.death.on_exit = Some(on_exit); let sched = Local::take::(); do sched.switch_running_tasks_and_then(new_task) |sched, old_task| { - sched.enqueue_task(old_task); + sched.enqueue_blocked_task(old_task); } rtdebug!("enqueued the new task, now waiting on exit_status"); @@ -293,7 +293,7 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { if exit_status { Ok(()) } else { Err(()) } } -// Spawn a new task in a new scheduler and return a thread handle. +/// Spawn a new task in a new scheduler and return a thread handle. pub fn spawntask_thread(f: ~fn()) -> Thread { use rt::sched::*; @@ -317,6 +317,16 @@ pub fn spawntask_thread(f: ~fn()) -> Thread { return thread; } +/// Get a ~Task for testing purposes other than actually scheduling it. +pub fn with_test_task(blk: ~fn(~Task) -> ~Task) { + do run_in_bare_thread { + let mut sched = ~new_test_uv_sched(); + let task = blk(~Task::new_root(&mut sched.stack_pool, ||{})); + sched.enqueue_task(task); + sched.run(); + } +} + /// Get a port number, starting at 9600, for use in tests pub fn next_test_port() -> u16 { diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs index f61eee8859b1a..bc223d8f3f70d 100644 --- a/src/libstd/rt/tube.rs +++ b/src/libstd/rt/tube.rs @@ -18,13 +18,13 @@ use clone::Clone; use super::rc::RC; use rt::sched::Scheduler; use rt::{context, TaskContext, SchedulerContext}; +use rt::kill::BlockedTask; use rt::local::Local; -use rt::task::Task; use vec::OwnedVector; use container::Container; struct TubeState { - blocked_task: Option<~Task>, + blocked_task: Option, buf: ~[T] } @@ -55,7 +55,7 @@ impl Tube { rtdebug!("waking blocked tube"); let task = (*state).blocked_task.take_unwrap(); let sched = Local::take::(); - sched.resume_task_immediately(task); + sched.resume_blocked_task_immediately(task); } } } @@ -111,7 +111,7 @@ mod test { do sched.deschedule_running_task_and_then |sched, task| { let mut tube_clone = tube_clone_cell.take(); tube_clone.send(1); - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } assert!(tube.recv() == 1); @@ -133,7 +133,7 @@ mod test { // sending will wake it up. tube_clone.send(1); } - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } assert!(tube.recv() == 1); @@ -168,7 +168,7 @@ mod test { } } - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } for int::range(0, MAX) |i| { diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 5d0c64c686782..7046afe855133 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -227,7 +227,7 @@ impl IoFactory for UvIoFactory { // Context switch let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } else { rtdebug!("status is some"); let task_cell = Cell::new(task_cell.take()); @@ -235,7 +235,7 @@ impl IoFactory for UvIoFactory { let res = Err(uv_error_to_io_error(status.get())); unsafe { (*result_cell_ptr).put_back(res); } let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } }; } @@ -255,7 +255,7 @@ impl IoFactory for UvIoFactory { let task_cell = Cell::new(task); do watcher.as_stream().close { let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } Err(uv_error_to_io_error(uverr)) @@ -273,7 +273,7 @@ impl IoFactory for UvIoFactory { let task_cell = Cell::new(task); do watcher.close { let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } Err(uv_error_to_io_error(uverr)) @@ -309,7 +309,7 @@ impl Drop for UvTcpListener { let task_cell = Cell::new(task); do watcher.as_stream().close { let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -372,7 +372,7 @@ impl Drop for UvTcpStream { let task_cell = Cell::new(task); do self.close { let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -419,7 +419,7 @@ impl RtioTcpStream for UvTcpStream { unsafe { (*result_cell_ptr).put_back(result); } let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } @@ -447,7 +447,7 @@ impl RtioTcpStream for UvTcpStream { unsafe { (*result_cell_ptr).put_back(result); } let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } @@ -473,7 +473,7 @@ impl Drop for UvUdpSocket { let task_cell = Cell::new(task); do self.close { let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -513,7 +513,7 @@ impl RtioUdpSocket for UvUdpSocket { unsafe { (*result_cell_ptr).put_back(result); } let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } @@ -540,7 +540,7 @@ impl RtioUdpSocket for UvUdpSocket { unsafe { (*result_cell_ptr).put_back(result); } let scheduler = Local::take::(); - scheduler.resume_task_immediately(task_cell.take()); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } @@ -678,7 +678,7 @@ fn test_read_and_block() { // not ready for it do scheduler.deschedule_running_task_and_then |sched, task| { let task = Cell::new(task); - sched.enqueue_task(task.take()); + sched.enqueue_blocked_task(task.take()); } } diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index 2fec9858c88d8..de6410aa82f91 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -42,10 +42,10 @@ use cmp::Eq; use comm::{stream, Chan, GenericChan, GenericPort, Port}; use result::Result; use result; -use rt::{context, OldTaskContext}; +use rt::{context, OldTaskContext, TaskContext}; +use rt::local::Local; use task::rt::{task_id, sched_id}; use unstable::finally::Finally; -use util::replace; use util; #[cfg(test)] use cast; @@ -147,6 +147,17 @@ pub struct SchedOpts { * * supervised - Propagate failure unidirectionally from parent to child, * but not from child to parent. False by default. * + * * watched - Make parent task collect exit status notifications from child + * before reporting its own exit status. (This delays the parent + * task's death and cleanup until after all transitively watched + * children also exit.) True by default. + * + * * indestructible - Configures the task to ignore kill signals received from + * linked failure. This may cause process hangs during + * failure if not used carefully, but causes task blocking + * code paths (e.g. port recv() calls) to be faster by 2 + * atomic operations. False by default. + * * * notify_chan - Enable lifecycle notifications on the given channel * * * sched - Specify the configuration of a new scheduler to create the task @@ -165,6 +176,8 @@ pub struct SchedOpts { pub struct TaskOpts { linked: bool, supervised: bool, + watched: bool, + indestructible: bool, notify_chan: Option>, sched: SchedOpts } @@ -210,12 +223,14 @@ impl TaskBuilder { fail!("Cannot copy a task_builder"); // Fake move mode on self } self.consumed = true; - let gen_body = replace(&mut self.gen_body, None); - let notify_chan = replace(&mut self.opts.notify_chan, None); + let gen_body = self.gen_body.take(); + let notify_chan = self.opts.notify_chan.take(); TaskBuilder { opts: TaskOpts { linked: self.opts.linked, supervised: self.opts.supervised, + watched: self.opts.watched, + indestructible: self.opts.indestructible, notify_chan: notify_chan, sched: self.opts.sched }, @@ -231,6 +246,7 @@ impl TaskBuilder { /// the other will not be killed. pub fn unlinked(&mut self) { self.opts.linked = false; + self.opts.watched = false; } /// Unidirectionally link the child task's failure with the parent's. The @@ -239,6 +255,7 @@ impl TaskBuilder { pub fn supervised(&mut self) { self.opts.supervised = true; self.opts.linked = false; + self.opts.watched = false; } /// Link the child task's and parent task's failures. If either fails, the @@ -246,6 +263,26 @@ impl TaskBuilder { pub fn linked(&mut self) { self.opts.linked = true; self.opts.supervised = false; + self.opts.watched = true; + } + + /// Cause the parent task to collect the child's exit status (and that of + /// all transitively-watched grandchildren) before reporting its own. + pub fn watched(&mut self) { + self.opts.watched = true; + } + + /// Allow the child task to outlive the parent task, at the possible cost + /// of the parent reporting success even if the child task fails later. + pub fn unwatched(&mut self) { + self.opts.watched = false; + } + + /// Cause the child task to ignore any kill signals received from linked + /// failure. This optimizes context switching, at the possible expense of + /// process hangs in the case of unexpected failure. + pub fn indestructible(&mut self) { + self.opts.indestructible = true; } /** @@ -302,7 +339,7 @@ impl TaskBuilder { * existing body generator to the new body generator. */ pub fn add_wrapper(&mut self, wrapper: ~fn(v: ~fn()) -> ~fn()) { - let prev_gen_body = replace(&mut self.gen_body, None); + let prev_gen_body = self.gen_body.take(); let prev_gen_body = match prev_gen_body { Some(gen) => gen, None => { @@ -334,12 +371,14 @@ impl TaskBuilder { * must be greater than zero. */ pub fn spawn(&mut self, f: ~fn()) { - let gen_body = replace(&mut self.gen_body, None); - let notify_chan = replace(&mut self.opts.notify_chan, None); + let gen_body = self.gen_body.take(); + let notify_chan = self.opts.notify_chan.take(); let x = self.consume(); let opts = TaskOpts { linked: x.opts.linked, supervised: x.opts.supervised, + watched: x.opts.watched, + indestructible: x.opts.indestructible, notify_chan: notify_chan, sched: x.opts.sched }; @@ -406,6 +445,8 @@ pub fn default_task_opts() -> TaskOpts { TaskOpts { linked: true, supervised: false, + watched: true, + indestructible: false, notify_chan: None, sched: SchedOpts { mode: DefaultScheduler, @@ -447,6 +488,17 @@ pub fn spawn_supervised(f: ~fn()) { task.spawn(f) } +/// Creates a child task that cannot be killed by linked failure. This causes +/// its context-switch path to be faster by 2 atomic swap operations. +/// (Note that this convenience wrapper still uses linked-failure, so the +/// child's children will still be killable by the parent. For the fastest +/// possible spawn mode, use task::task().unlinked().indestructible().spawn.) +pub fn spawn_indestructible(f: ~fn()) { + let mut task = task(); + task.indestructible(); + task.spawn(f) +} + pub fn spawn_with(arg: A, f: ~fn(v: A)) { /*! * Runs a task, while transfering ownership of one argument to the @@ -514,9 +566,10 @@ pub fn yield() { } _ => { // XXX: What does yield really mean in newsched? + // FIXME(#7544): Optimize this, since we know we won't block. let sched = Local::take::(); do sched.deschedule_running_task_and_then |sched, task| { - sched.enqueue_task(task); + sched.enqueue_blocked_task(task); } } } @@ -526,8 +579,6 @@ pub fn yield() { pub fn failing() -> bool { //! True if the running task has failed - use rt::{context, OldTaskContext}; - use rt::local::Local; use rt::task::Task; match context() { @@ -572,33 +623,59 @@ pub fn get_scheduler() -> Scheduler { * ~~~ */ pub unsafe fn unkillable(f: &fn() -> U) -> U { - if context() == OldTaskContext { - let t = rt::rust_get_task(); - do (|| { - rt::rust_task_inhibit_kill(t); - f() - }).finally { - rt::rust_task_allow_kill(t); + use rt::task::Task; + + match context() { + OldTaskContext => { + let t = rt::rust_get_task(); + do (|| { + rt::rust_task_inhibit_kill(t); + f() + }).finally { + rt::rust_task_allow_kill(t); + } + } + TaskContext => { + // The inhibits/allows might fail and need to borrow the task. + let t = Local::unsafe_borrow::(); + do (|| { + (*t).death.inhibit_kill((*t).unwinder.unwinding); + f() + }).finally { + (*t).death.allow_kill((*t).unwinder.unwinding); + } } - } else { - // FIXME #6377 - f() + // FIXME(#3095): This should be an rtabort as soon as the scheduler + // no longer uses a workqueue implemented with an Exclusive. + _ => f() } } /// The inverse of unkillable. Only ever to be used nested in unkillable(). pub unsafe fn rekillable(f: &fn() -> U) -> U { - if context() == OldTaskContext { - let t = rt::rust_get_task(); - do (|| { - rt::rust_task_allow_kill(t); - f() - }).finally { - rt::rust_task_inhibit_kill(t); + use rt::task::Task; + + match context() { + OldTaskContext => { + let t = rt::rust_get_task(); + do (|| { + rt::rust_task_allow_kill(t); + f() + }).finally { + rt::rust_task_inhibit_kill(t); + } + } + TaskContext => { + let t = Local::unsafe_borrow::(); + do (|| { + (*t).death.allow_kill((*t).unwinder.unwinding); + f() + }).finally { + (*t).death.inhibit_kill((*t).unwinder.unwinding); + } } - } else { - // FIXME #6377 - f() + // FIXME(#3095): As in unkillable(). + _ => f() } } @@ -607,19 +684,36 @@ pub unsafe fn rekillable(f: &fn() -> U) -> U { * For use with exclusive ARCs, which use pthread mutexes directly. */ pub unsafe fn atomically(f: &fn() -> U) -> U { - if context() == OldTaskContext { - let t = rt::rust_get_task(); - do (|| { - rt::rust_task_inhibit_kill(t); - rt::rust_task_inhibit_yield(t); - f() - }).finally { - rt::rust_task_allow_yield(t); - rt::rust_task_allow_kill(t); + use rt::task::Task; + + match context() { + OldTaskContext => { + let t = rt::rust_get_task(); + do (|| { + rt::rust_task_inhibit_kill(t); + rt::rust_task_inhibit_yield(t); + f() + }).finally { + rt::rust_task_allow_yield(t); + rt::rust_task_allow_kill(t); + } } - } else { - // FIXME #6377 - f() + TaskContext => { + let t = Local::unsafe_borrow::(); + do (|| { + // It's important to inhibit kill after inhibiting yield, because + // inhibit-kill might fail if we were already killed, and the + // inhibit-yield must happen to match the finally's allow-yield. + (*t).death.inhibit_yield(); + (*t).death.inhibit_kill((*t).unwinder.unwinding); + f() + }).finally { + (*t).death.allow_kill((*t).unwinder.unwinding); + (*t).death.allow_yield(); + } + } + // FIXME(#3095): As in unkillable(). + _ => f() } } @@ -640,6 +734,9 @@ fn test_cant_dup_task_builder() { // !!! These tests are dangerous. If Something is buggy, they will hang, !!! // !!! instead of exiting cleanly. This might wedge the buildbots. !!! +#[cfg(test)] +fn block_forever() { let (po, _ch) = stream::<()>(); po.recv(); } + #[test] #[ignore(cfg(windows))] fn test_spawn_unlinked_unsup_no_fail_down() { // grandchild sends on a port let (po, ch) = stream(); @@ -667,14 +764,12 @@ fn test_spawn_unlinked_sup_no_fail_up() { // child unlinked fails } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_spawn_unlinked_sup_fail_down() { - do spawn_supervised { loop { task::yield(); } } + do spawn_supervised { block_forever(); } fail!(); // Shouldn't leave a child hanging around. } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_spawn_linked_sup_fail_up() { // child fails; parent fails - let (po, _ch) = stream::<()>(); - // Unidirectional "parenting" shouldn't override bidirectional linked. // We have to cheat with opts - the interface doesn't support them because // they don't make sense (redundant with task().supervised()). @@ -685,7 +780,7 @@ fn test_spawn_linked_sup_fail_up() { // child fails; parent fails do b0.spawn { fail!(); } - po.recv(); // We should get punted awake + block_forever(); // We should get punted awake } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_spawn_linked_sup_fail_down() { // parent fails; child fails @@ -694,24 +789,19 @@ fn test_spawn_linked_sup_fail_down() { // parent fails; child fails let mut b0 = task(); b0.opts.linked = true; b0.opts.supervised = true; - do b0.spawn { - loop { - task::yield(); - } - } + do b0.spawn { block_forever(); } fail!(); // *both* mechanisms would be wrong if this didn't kill the child } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_spawn_linked_unsup_fail_up() { // child fails; parent fails - let (po, _ch) = stream::<()>(); // Default options are to spawn linked & unsupervised. do spawn { fail!(); } - po.recv(); // We should get punted awake + block_forever(); // We should get punted awake } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_spawn_linked_unsup_fail_down() { // parent fails; child fails // Default options are to spawn linked & unsupervised. - do spawn { loop { task::yield(); } } + do spawn { block_forever(); } fail!(); } #[test] #[should_fail] #[ignore(cfg(windows))] @@ -719,11 +809,7 @@ fn test_spawn_linked_unsup_default_opts() { // parent fails; child fails // Make sure the above test is the same as this one. let mut builder = task(); builder.linked(); - do builder.spawn { - loop { - task::yield(); - } - } + do builder.spawn { block_forever(); } fail!(); } @@ -734,9 +820,7 @@ fn test_spawn_linked_unsup_default_opts() { // parent fails; child fails fn test_spawn_failure_propagate_grandchild() { // Middle task exits; does grandparent's failure propagate across the gap? do spawn_supervised { - do spawn_supervised { - loop { task::yield(); } - } + do spawn_supervised { block_forever(); } } for 16.times { task::yield(); } fail!(); @@ -746,9 +830,7 @@ fn test_spawn_failure_propagate_grandchild() { fn test_spawn_failure_propagate_secondborn() { // First-born child exits; does parent's failure propagate to sibling? do spawn_supervised { - do spawn { // linked - loop { task::yield(); } - } + do spawn { block_forever(); } // linked } for 16.times { task::yield(); } fail!(); @@ -758,9 +840,7 @@ fn test_spawn_failure_propagate_secondborn() { fn test_spawn_failure_propagate_nephew_or_niece() { // Our sibling exits; does our failure propagate to sibling's child? do spawn { // linked - do spawn_supervised { - loop { task::yield(); } - } + do spawn_supervised { block_forever(); } } for 16.times { task::yield(); } fail!(); @@ -770,9 +850,7 @@ fn test_spawn_failure_propagate_nephew_or_niece() { fn test_spawn_linked_sup_propagate_sibling() { // Middle sibling exits - does eldest's failure propagate to youngest? do spawn { // linked - do spawn { // linked - loop { task::yield(); } - } + do spawn { block_forever(); } // linked } for 16.times { task::yield(); } fail!(); @@ -1182,3 +1260,61 @@ fn test_simple_newsched_spawn() { } } +#[test] #[ignore(cfg(windows))] +fn test_spawn_watched() { + use rt::test::{run_in_newsched_task, spawntask_try}; + do run_in_newsched_task { + let result = do spawntask_try { + let mut t = task(); + t.unlinked(); + t.watched(); + do t.spawn { + let mut t = task(); + t.unlinked(); + t.watched(); + do t.spawn { + task::yield(); + fail!(); + } + } + }; + assert!(result.is_err()); + } +} + +#[test] #[ignore(cfg(windows))] +fn test_indestructible() { + use rt::test::{run_in_newsched_task, spawntask_try}; + do run_in_newsched_task { + let result = do spawntask_try { + let mut t = task(); + t.watched(); + t.supervised(); + t.indestructible(); + do t.spawn { + let (p1, _c1) = stream::<()>(); + let (p2, c2) = stream::<()>(); + let (p3, c3) = stream::<()>(); + let mut t = task(); + t.unwatched(); + do t.spawn { + do (|| { + p1.recv(); // would deadlock if not killed + }).finally { + c2.send(()); + }; + } + let mut t = task(); + t.unwatched(); + do t.spawn { + p3.recv(); + task::yield(); + fail!(); + } + c3.send(()); + p2.recv(); + } + }; + assert!(result.is_ok()); + } +} diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index a08214ea40caf..2150c0c5ac28d 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -79,7 +79,7 @@ use cast; use cell::Cell; use container::MutableMap; use comm::{Chan, GenericChan}; -use hashmap::HashSet; +use hashmap::{HashSet, HashSetConsumeIterator}; use local_data; use task::local_data_priv::{local_get, local_set, OldHandle}; use task::rt::rust_task; @@ -88,32 +88,67 @@ use task::{Failure, ManualThreads, PlatformThread, SchedOpts, SingleThreaded}; use task::{Success, TaskOpts, TaskResult, ThreadPerTask}; use task::{ExistingScheduler, SchedulerHandle}; use task::unkillable; +use to_bytes::IterBytes; use uint; use util; use unstable::sync::{Exclusive, exclusive}; +use rt::{OldTaskContext, TaskContext, SchedulerContext, GlobalContext, context}; use rt::local::Local; use rt::task::Task; +use rt::kill::KillHandle; +use rt::sched::Scheduler; use iterator::IteratorUtil; #[cfg(test)] use task::default_task_opts; #[cfg(test)] use comm; #[cfg(test)] use task; -type TaskSet = HashSet<*rust_task>; - -fn new_taskset() -> TaskSet { - HashSet::new() +// Transitionary. +#[deriving(Eq)] +enum TaskHandle { + OldTask(*rust_task), + NewTask(KillHandle), } -fn taskset_insert(tasks: &mut TaskSet, task: *rust_task) { - let didnt_overwrite = tasks.insert(task); - assert!(didnt_overwrite); + +impl Clone for TaskHandle { + fn clone(&self) -> TaskHandle { + match *self { + OldTask(x) => OldTask(x), + NewTask(ref x) => NewTask(x.clone()), + } + } } -fn taskset_remove(tasks: &mut TaskSet, task: *rust_task) { - let was_present = tasks.remove(&task); - assert!(was_present); + +impl IterBytes for TaskHandle { + fn iter_bytes(&self, lsb0: bool, f: &fn(buf: &[u8]) -> bool) -> bool { + match *self { + OldTask(ref x) => x.iter_bytes(lsb0, f), + NewTask(ref x) => x.iter_bytes(lsb0, f), + } + } } -pub fn taskset_each(tasks: &TaskSet, blk: &fn(v: *rust_task) -> bool) -> bool { - tasks.iter().advance(|k| blk(*k)) + +struct TaskSet(HashSet); + +impl TaskSet { + #[inline] + fn new() -> TaskSet { + TaskSet(HashSet::new()) + } + #[inline] + fn insert(&mut self, task: TaskHandle) { + let didnt_overwrite = (**self).insert(task); + assert!(didnt_overwrite); + } + #[inline] + fn remove(&mut self, task: &TaskHandle) { + let was_present = (**self).remove(task); + assert!(was_present); + } + #[inline] + fn consume(self) -> HashSetConsumeIterator { + (*self).consume() + } } // One of these per group of linked-failure tasks. @@ -147,10 +182,9 @@ struct AncestorNode { // circular references arise, deadlock and memory leaks are imminent). // Hence we assert that this counter monotonically decreases as we // approach the tail of the list. - // FIXME(#3068): Make the generation counter togglable with #[cfg(debug)]. generation: uint, - // Should really be a non-option. This way appeases borrowck. - parent_group: Option, + // Handle to the tasks in the group of the current generation. + parent_group: TaskGroupArc, // Recursive rest of the list. ancestors: AncestorList, } @@ -173,39 +207,44 @@ fn access_ancestors(x: &Exclusive, } } +#[inline] #[cfg(test)] +fn check_generation(younger: uint, older: uint) { assert!(younger > older); } +#[inline] #[cfg(not(test))] +fn check_generation(_younger: uint, _older: uint) { } + +#[inline] #[cfg(test)] +fn incr_generation(ancestors: &AncestorList) -> uint { + ancestors.map_default(0, |arc| access_ancestors(arc, |a| a.generation+1)) +} +#[inline] #[cfg(not(test))] +fn incr_generation(_ancestors: &AncestorList) -> uint { 0 } + // Iterates over an ancestor list. // (1) Runs forward_blk on each ancestral taskgroup in the list // (2) If forward_blk "break"s, runs optional bail_blk on all ancestral // taskgroups that forward_blk already ran on successfully (Note: bail_blk // is NOT called on the block that forward_blk broke on!). // (3) As a bonus, coalesces away all 'dead' taskgroup nodes in the list. -// FIXME(#2190): Change Option<@fn(...)> to Option<&fn(...)>, to save on -// allocations. Once that bug is fixed, changing the sigil should suffice. fn each_ancestor(list: &mut AncestorList, - bail_opt: Option<@fn(TaskGroupInner)>, + bail_blk: &fn(TaskGroupInner), forward_blk: &fn(TaskGroupInner) -> bool) -> bool { // "Kickoff" call - there was no last generation. - return !coalesce(list, bail_opt, forward_blk, uint::max_value); + return !coalesce(list, bail_blk, forward_blk, uint::max_value); // Recursively iterates, and coalesces afterwards if needed. Returns // whether or not unwinding is needed (i.e., !successful iteration). fn coalesce(list: &mut AncestorList, - bail_opt: Option<@fn(TaskGroupInner)>, + bail_blk: &fn(TaskGroupInner), forward_blk: &fn(TaskGroupInner) -> bool, last_generation: uint) -> bool { - // Need to swap the list out to use it, to appease borrowck. - let tmp_list = util::replace(&mut *list, AncestorList(None)); let (coalesce_this, early_break) = - iterate(&tmp_list, bail_opt, forward_blk, last_generation); + iterate(list, bail_blk, forward_blk, last_generation); // What should our next ancestor end up being? if coalesce_this.is_some() { // Needed coalesce. Our next ancestor becomes our old // ancestor's next ancestor. ("next = old_next->next;") *list = coalesce_this.unwrap(); - } else { - // No coalesce; restore from tmp. ("next = old_next;") - *list = tmp_list; } return early_break; } @@ -218,8 +257,8 @@ fn each_ancestor(list: &mut AncestorList, // bool: // True if the supplied block did 'break', here or in any recursive // calls. If so, must call the unwinder on all previous nodes. - fn iterate(ancestors: &AncestorList, - bail_opt: Option<@fn(TaskGroupInner)>, + fn iterate(ancestors: &mut AncestorList, + bail_blk: &fn(TaskGroupInner), forward_blk: &fn(TaskGroupInner) -> bool, last_generation: uint) -> (Option, bool) { @@ -236,20 +275,20 @@ fn each_ancestor(list: &mut AncestorList, // The map defaults to None, because if ancestors is None, we're at // the end of the list, which doesn't make sense to coalesce. - return do (**ancestors).map_default((None,false)) |ancestor_arc| { + do ancestors.map_default((None,false)) |ancestor_arc| { // NB: Takes a lock! (this ancestor node) do access_ancestors(ancestor_arc) |nobe| { // Argh, but we couldn't give it to coalesce() otherwise. let forward_blk = forward_blk.take(); // Check monotonicity - assert!(last_generation > nobe.generation); + check_generation(last_generation, nobe.generation); /*##########################################################* * Step 1: Look at this ancestor group (call iterator block). *##########################################################*/ let mut nobe_is_dead = false; let do_continue = // NB: Takes a lock! (this ancestor node's parent group) - do with_parent_tg(&mut nobe.parent_group) |tg_opt| { + do access_group(&nobe.parent_group) |tg_opt| { // Decide whether this group is dead. Note that the // group being *dead* is disjoint from it *failing*. nobe_is_dead = match *tg_opt { @@ -257,7 +296,7 @@ fn each_ancestor(list: &mut AncestorList, None => nobe_is_dead }; // Call iterator block. (If the group is dead, it's - // safe to skip it. This will leave our *rust_task + // safe to skip it. This will leave our TaskHandle // hanging around in the group even after it's freed, // but that's ok because, by virtue of the group being // dead, nobody will ever kill-all (foreach) over it.) @@ -271,17 +310,15 @@ fn each_ancestor(list: &mut AncestorList, let mut need_unwind = false; if do_continue { // NB: Takes many locks! (ancestor nodes & parent groups) - need_unwind = coalesce(&mut nobe.ancestors, bail_opt, + need_unwind = coalesce(&mut nobe.ancestors, |tg| bail_blk(tg), forward_blk, nobe.generation); } /*##########################################################* * Step 3: Maybe unwind; compute return info for our caller. *##########################################################*/ if need_unwind && !nobe_is_dead { - for bail_opt.iter().advance |bail_blk| { - do with_parent_tg(&mut nobe.parent_group) |tg_opt| { - (*bail_blk)(tg_opt) - } + do access_group(&nobe.parent_group) |tg_opt| { + bail_blk(tg_opt) } } // Decide whether our caller should unwind. @@ -296,23 +333,12 @@ fn each_ancestor(list: &mut AncestorList, (None, need_unwind) } } - }; - - // Wrapper around exclusive::with that appeases borrowck. - fn with_parent_tg(parent_group: &mut Option, - blk: &fn(TaskGroupInner) -> U) -> U { - // If this trips, more likely the problem is 'blk' failed inside. - let tmp_arc = parent_group.take_unwrap(); - let result = do access_group(&tmp_arc) |tg_opt| { blk(tg_opt) }; - *parent_group = Some(tmp_arc); - result } } } // One of these per task. -struct TCB { - me: *rust_task, +pub struct Taskgroup { // List of tasks with whose fates this one's is intertwined. tasks: TaskGroupArc, // 'none' means the group has failed. // Lists of tasks who will kill us if they fail, but whom we won't kill. @@ -321,50 +347,50 @@ struct TCB { notifier: Option, } -impl Drop for TCB { +impl Drop for Taskgroup { // Runs on task exit. fn drop(&self) { unsafe { // FIXME(#4330) Need self by value to get mutability. - let this: &mut TCB = transmute(self); + let this: &mut Taskgroup = transmute(self); // If we are failing, the whole taskgroup needs to die. - if rt::rust_task_is_unwinding(self.me) { - for this.notifier.mut_iter().advance |x| { - x.failed = true; - } - // Take everybody down with us. - do access_group(&self.tasks) |tg| { - kill_taskgroup(tg, self.me, self.is_main); - } - } else { - // Remove ourselves from the group(s). - do access_group(&self.tasks) |tg| { - leave_taskgroup(tg, self.me, true); + do RuntimeGlue::with_task_handle_and_failing |me, failing| { + if failing { + for this.notifier.mut_iter().advance |x| { + x.failed = true; + } + // Take everybody down with us. + do access_group(&self.tasks) |tg| { + kill_taskgroup(tg, &me, self.is_main); + } + } else { + // Remove ourselves from the group(s). + do access_group(&self.tasks) |tg| { + leave_taskgroup(tg, &me, true); + } } + // It doesn't matter whether this happens before or after dealing + // with our own taskgroup, so long as both happen before we die. + // We remove ourself from every ancestor we can, so no cleanup; no + // break. + for each_ancestor(&mut this.ancestors, |_| {}) |ancestor_group| { + leave_taskgroup(ancestor_group, &me, false); + }; } - // It doesn't matter whether this happens before or after dealing - // with our own taskgroup, so long as both happen before we die. - // We remove ourself from every ancestor we can, so no cleanup; no - // break. - for each_ancestor(&mut this.ancestors, None) |ancestor_group| { - leave_taskgroup(ancestor_group, self.me, false); - }; } } } -fn TCB(me: *rust_task, - tasks: TaskGroupArc, +pub fn Taskgroup(tasks: TaskGroupArc, ancestors: AncestorList, is_main: bool, - mut notifier: Option) -> TCB { + mut notifier: Option) -> Taskgroup { for notifier.mut_iter().advance |x| { x.failed = false; } - TCB { - me: me, + Taskgroup { tasks: tasks, ancestors: ancestors, is_main: is_main, @@ -391,42 +417,36 @@ fn AutoNotify(chan: Chan) -> AutoNotify { } } -fn enlist_in_taskgroup(state: TaskGroupInner, me: *rust_task, +fn enlist_in_taskgroup(state: TaskGroupInner, me: TaskHandle, is_member: bool) -> bool { - let newstate = util::replace(&mut *state, None); + let me = Cell::new(me); // :( // If 'None', the group was failing. Can't enlist. - if newstate.is_some() { - let mut group = newstate.unwrap(); - taskset_insert(if is_member { + do state.map_mut_default(false) |group| { + (if is_member { &mut group.members } else { &mut group.descendants - }, me); - *state = Some(group); + }).insert(me.take()); true - } else { - false } } // NB: Runs in destructor/post-exit context. Can't 'fail'. -fn leave_taskgroup(state: TaskGroupInner, me: *rust_task, +fn leave_taskgroup(state: TaskGroupInner, me: &TaskHandle, is_member: bool) { - let newstate = util::replace(&mut *state, None); + let me = Cell::new(me); // :( // If 'None', already failing and we've already gotten a kill signal. - if newstate.is_some() { - let mut group = newstate.unwrap(); - taskset_remove(if is_member { + do state.map_mut |group| { + (if is_member { &mut group.members } else { &mut group.descendants - }, me); - *state = Some(group); - } + }).remove(me.take()); + }; } // NB: Runs in destructor/post-exit context. Can't 'fail'. -fn kill_taskgroup(state: TaskGroupInner, me: *rust_task, is_main: bool) { +fn kill_taskgroup(state: TaskGroupInner, me: &TaskHandle, is_main: bool) { unsafe { // NB: We could do the killing iteration outside of the group arc, by // having "let mut newstate" here, swapping inside, and iterating @@ -442,20 +462,21 @@ fn kill_taskgroup(state: TaskGroupInner, me: *rust_task, is_main: bool) { // That's ok; only one task needs to do the dirty work. (Might also // see 'None' if Somebody already failed and we got a kill signal.) if newstate.is_some() { - let group = newstate.unwrap(); - for taskset_each(&group.members) |sibling| { + let TaskGroupData { members: members, descendants: descendants } = + newstate.unwrap(); + for members.consume().advance |sibling| { // Skip self - killing ourself won't do much good. - if sibling != me { - rt::rust_task_kill_other(sibling); + if &sibling != me { + RuntimeGlue::kill_task(sibling); } } - for taskset_each(&group.descendants) |child| { - assert!(child != me); - rt::rust_task_kill_other(child); + for descendants.consume().advance |child| { + assert!(&child != me); + RuntimeGlue::kill_task(child); } // Only one task should ever do this. if is_main { - rt::rust_task_kill_all(me); + RuntimeGlue::kill_all_tasks(me); } // Do NOT restore state to Some(..)! It stays None to indicate // that the whole taskgroup is failing, to forbid new spawns. @@ -467,112 +488,171 @@ fn kill_taskgroup(state: TaskGroupInner, me: *rust_task, is_main: bool) { // FIXME (#2912): Work around core-vs-coretest function duplication. Can't use // a proper closure because the #[test]s won't understand. Have to fake it. #[cfg(not(stage0))] -fn taskgroup_key() -> local_data::Key<@@mut TCB> { +fn taskgroup_key() -> local_data::Key<@@mut Taskgroup> { unsafe { cast::transmute(-2) } } #[cfg(stage0)] -fn taskgroup_key() -> local_data::Key<@@mut TCB> { +fn taskgroup_key() -> local_data::Key<@@mut Taskgroup> { unsafe { cast::transmute((-2, 0)) } } -fn gen_child_taskgroup(linked: bool, supervised: bool) - -> (TaskGroupArc, AncestorList, bool) { - unsafe { - let spawner = rt::rust_get_task(); - /*##################################################################* - * Step 1. Get spawner's taskgroup info. - *##################################################################*/ - let spawner_group: @@mut TCB = - do local_get(OldHandle(spawner), taskgroup_key()) |group| { - match group { +// Transitionary. +struct RuntimeGlue; +impl RuntimeGlue { + unsafe fn kill_task(task: TaskHandle) { + match task { + OldTask(ptr) => rt::rust_task_kill_other(ptr), + NewTask(handle) => { + let mut handle = handle; + do handle.kill().map_consume |killed_task| { + let killed_task = Cell::new(killed_task); + do Local::borrow:: |sched| { + sched.enqueue_task(killed_task.take()); + } + }; + } + } + } + + unsafe fn kill_all_tasks(task: &TaskHandle) { + match *task { + OldTask(ptr) => rt::rust_task_kill_all(ptr), + NewTask(ref _handle) => rtabort!("unimplemented"), // FIXME(#7544) + } + } + + fn with_task_handle_and_failing(blk: &fn(TaskHandle, bool)) { + match context() { + OldTaskContext => unsafe { + let me = rt::rust_get_task(); + blk(OldTask(me), rt::rust_task_is_unwinding(me)) + }, + TaskContext => unsafe { + // Can't use safe borrow, because the taskgroup destructor needs to + // access the scheduler again to send kill signals to other tasks. + let me = Local::unsafe_borrow::(); + // FIXME(#7544): Get rid of this clone by passing by-ref. + // Will probably have to wait until the old rt is gone. + blk(NewTask((*me).death.kill_handle.get_ref().clone()), + (*me).unwinder.unwinding) + }, + SchedulerContext | GlobalContext => rtabort!("task dying in bad context"), + } + } + + fn with_my_taskgroup(blk: &fn(&Taskgroup) -> U) -> U { + match context() { + OldTaskContext => unsafe { + let me = rt::rust_get_task(); + do local_get(OldHandle(me), taskgroup_key()) |g| { + match g { + None => { + // Main task, doing first spawn ever. Lazily initialise here. + let mut members = TaskSet::new(); + members.insert(OldTask(me)); + let tasks = exclusive(Some(TaskGroupData { + members: members, + descendants: TaskSet::new(), + })); + // Main task/group has no ancestors, no notifier, etc. + let group = @@mut Taskgroup(tasks, AncestorList(None), + true, None); + local_set(OldHandle(me), taskgroup_key(), group); + blk(&**group) + } + Some(&group) => blk(&**group) + } + } + }, + TaskContext => unsafe { + // Can't use safe borrow, because creating new hashmaps for the + // tasksets requires an rng, which needs to borrow the sched. + let me = Local::unsafe_borrow::(); + blk(match (*me).taskgroup { None => { - // Main task, doing first spawn ever. Lazily initialise - // here. - let mut members = new_taskset(); - taskset_insert(&mut members, spawner); + // Main task, doing first spawn ever. Lazily initialize. + let mut members = TaskSet::new(); + let my_handle = (*me).death.kill_handle.get_ref().clone(); + members.insert(NewTask(my_handle)); let tasks = exclusive(Some(TaskGroupData { members: members, - descendants: new_taskset(), + descendants: TaskSet::new(), })); - // Main task/group has no ancestors, no notifier, etc. - let group = @@mut TCB(spawner, - tasks, - AncestorList(None), - true, - None); - local_set(OldHandle(spawner), taskgroup_key(), group); - group + let group = Taskgroup(tasks, AncestorList(None), true, None); + (*me).taskgroup = Some(group); + (*me).taskgroup.get_ref() } - Some(&group) => group - } - }; - let spawner_group: &mut TCB = *spawner_group; + Some(ref group) => group, + }) + }, + SchedulerContext | GlobalContext => rtabort!("spawning in bad context"), + } + } +} - /*##################################################################* - * Step 2. Process spawn options for child. - *##################################################################*/ - return if linked { +fn gen_child_taskgroup(linked: bool, supervised: bool) + -> (TaskGroupArc, AncestorList, bool) { + do RuntimeGlue::with_my_taskgroup |spawner_group| { + let ancestors = AncestorList(spawner_group.ancestors.map(|x| x.clone())); + if linked { // Child is in the same group as spawner. - let g = spawner_group.tasks.clone(); // Child's ancestors are spawner's ancestors. - let a = share_ancestors(&mut spawner_group.ancestors); // Propagate main-ness. - (g, a, spawner_group.is_main) + (spawner_group.tasks.clone(), ancestors, spawner_group.is_main) } else { // Child is in a separate group from spawner. let g = exclusive(Some(TaskGroupData { - members: new_taskset(), - descendants: new_taskset(), + members: TaskSet::new(), + descendants: TaskSet::new(), })); let a = if supervised { - // Child's ancestors start with the spawner. - let old_ancestors = - share_ancestors(&mut spawner_group.ancestors); - // FIXME(#3068) - The generation counter is only used for a - // debug assertion, but initialising it requires locking a - // mutex. Hence it should be enabled only in debug builds. - let new_generation = - match *old_ancestors { - Some(ref arc) => { - access_ancestors(arc, |a| a.generation+1) - } - None => 0 // the actual value doesn't really matter. - }; + let new_generation = incr_generation(&ancestors); assert!(new_generation < uint::max_value); + // Child's ancestors start with the spawner. // Build a new node in the ancestor list. AncestorList(Some(exclusive(AncestorNode { generation: new_generation, - parent_group: Some(spawner_group.tasks.clone()), - ancestors: old_ancestors, + parent_group: spawner_group.tasks.clone(), + ancestors: ancestors, }))) } else { // Child has no ancestors. AncestorList(None) }; (g, a, false) - }; + } } +} - fn share_ancestors(ancestors: &mut AncestorList) -> AncestorList { - // Appease the borrow-checker. Really this wants to be written as: - // match ancestors - // Some(ancestor_arc) { ancestor_list(Some(ancestor_arc.clone())) } - // None { ancestor_list(None) } - let tmp = util::replace(&mut **ancestors, None); - if tmp.is_some() { - let ancestor_arc = tmp.unwrap(); - let result = ancestor_arc.clone(); - **ancestors = Some(ancestor_arc); - AncestorList(Some(result)) - } else { - AncestorList(None) +// Set up membership in taskgroup and descendantship in all ancestor +// groups. If any enlistment fails, Some task was already failing, so +// don't let the child task run, and undo every successful enlistment. +fn enlist_many(child: TaskHandle, child_arc: &TaskGroupArc, + ancestors: &mut AncestorList) -> bool { + // Join this taskgroup. + let mut result = do access_group(child_arc) |child_tg| { + enlist_in_taskgroup(child_tg, child.clone(), true) // member + }; + if result { + // Unwinding function in case any ancestral enlisting fails + let bail: &fn(TaskGroupInner) = |tg| { leave_taskgroup(tg, &child, false) }; + // Attempt to join every ancestor group. + result = do each_ancestor(ancestors, bail) |ancestor_tg| { + // Enlist as a descendant, not as an actual member. + // Descendants don't kill ancestor groups on failure. + enlist_in_taskgroup(ancestor_tg, child.clone(), false) + }; + // If any ancestor group fails, need to exit this group too. + if !result { + do access_group(child_arc) |child_tg| { + leave_taskgroup(child_tg, &child, true); // member + } } } + result } pub fn spawn_raw(opts: TaskOpts, f: ~fn()) { - use rt::*; - match context() { OldTaskContext => { spawn_raw_oldsched(opts, f) @@ -590,21 +670,49 @@ pub fn spawn_raw(opts: TaskOpts, f: ~fn()) { } fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) { - use rt::sched::*; - - let f = Cell::new(f); + let child_data = Cell::new(gen_child_taskgroup(opts.linked, opts.supervised)); + let indestructible = opts.indestructible; + + let child_wrapper: ~fn() = || { + // Child task runs this code. + let child_data = Cell::new(child_data.take()); // :( + let enlist_success = do Local::borrow:: |me| { + let (child_tg, ancestors, is_main) = child_data.take(); + let mut ancestors = ancestors; + // FIXME(#7544): Optimize out the xadd in this clone, somehow. + let handle = me.death.kill_handle.get_ref().clone(); + // Atomically try to get into all of our taskgroups. + if enlist_many(NewTask(handle), &child_tg, &mut ancestors) { + // Got in. We can run the provided child body, and can also run + // the taskgroup's exit-time-destructor afterward. + me.taskgroup = Some(Taskgroup(child_tg, ancestors, is_main, None)); + true + } else { + false + } + }; + // Should be run after the local-borrowed task is returned. + if enlist_success { + if indestructible { + unsafe { do unkillable { f() } } + } else { + f() + } + } + }; let mut task = unsafe { let sched = Local::unsafe_borrow::(); rtdebug!("unsafe borrowed sched"); - if opts.linked { + if opts.watched { + let child_wrapper = Cell::new(child_wrapper); do Local::borrow::() |running_task| { - ~running_task.new_child(&mut (*sched).stack_pool, f.take()) + ~running_task.new_child(&mut (*sched).stack_pool, child_wrapper.take()) } } else { - // An unlinked task is a new root in the task tree - ~Task::new_root(&mut (*sched).stack_pool, f.take()) + // An unwatched task is a new root in the exit-code propagation tree + ~Task::new_root(&mut (*sched).stack_pool, child_wrapper) } }; @@ -616,7 +724,7 @@ fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) { if success { Success } else { Failure } ) }; - task.on_exit = Some(on_exit); + task.death.on_exit = Some(on_exit); } rtdebug!("spawn about to take scheduler"); @@ -635,8 +743,7 @@ fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) { let child_data = Cell::new((child_tg, ancestors, f)); // Being killed with the unsafe task/closure pointers would leak them. do unkillable { - // Agh. Get move-mode items into the closure. FIXME (#2829) - let (child_tg, ancestors, f) = child_data.take(); + let (child_tg, ancestors, f) = child_data.take(); // :( // Create child task. let new_task = match opts.sched.mode { DefaultScheduler => rt::new_task(), @@ -644,14 +751,8 @@ fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) { }; assert!(!new_task.is_null()); // Getting killed after here would leak the task. - let notify_chan = if opts.notify_chan.is_none() { - None - } else { - Some(opts.notify_chan.take_unwrap()) - }; - let child_wrapper = make_child_wrapper(new_task, child_tg, - ancestors, is_main, notify_chan, f); + ancestors, is_main, opts.notify_chan.take(), f); let closure = cast::transmute(&child_wrapper); @@ -676,8 +777,7 @@ fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) { -> ~fn() { let child_data = Cell::new((notify_chan, child_arc, ancestors)); let result: ~fn() = || { - // Agh. Get move-mode items into the closure. FIXME (#2829) - let (notify_chan, child_arc, ancestors) = child_data.take(); + let (notify_chan, child_arc, ancestors) = child_data.take(); // :( let mut ancestors = ancestors; // Child task runs this code. @@ -686,12 +786,8 @@ fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) { let notifier = notify_chan.map_consume(|c| AutoNotify(c)); - if enlist_many(child, &child_arc, &mut ancestors) { - let group = @@mut TCB(child, - child_arc, - ancestors, - is_main, - notifier); + if enlist_many(OldTask(child), &child_arc, &mut ancestors) { + let group = @@mut Taskgroup(child_arc, ancestors, is_main, notifier); unsafe { local_set(OldHandle(child), taskgroup_key(), group); } @@ -707,38 +803,6 @@ fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) { // unsafe { cleanup::annihilate(); } }; return result; - - // Set up membership in taskgroup and descendantship in all ancestor - // groups. If any enlistment fails, Some task was already failing, so - // don't let the child task run, and undo every successful enlistment. - fn enlist_many(child: *rust_task, child_arc: &TaskGroupArc, - ancestors: &mut AncestorList) -> bool { - // Join this taskgroup. - let mut result = - do access_group(child_arc) |child_tg| { - enlist_in_taskgroup(child_tg, child, true) // member - }; - if result { - // Unwinding function in case any ancestral enlisting fails - let bail: @fn(TaskGroupInner) = |tg| { - leave_taskgroup(tg, child, false) - }; - // Attempt to join every ancestor group. - result = - each_ancestor(ancestors, Some(bail), |ancestor_tg| { - // Enlist as a descendant, not as an actual member. - // Descendants don't kill ancestor groups on failure. - enlist_in_taskgroup(ancestor_tg, child, false) - }); - // If any ancestor group fails, need to exit this group too. - if !result { - do access_group(child_arc) |child_tg| { - leave_taskgroup(child_tg, child, true); // member - } - } - } - result - } } fn new_task_in_sched(opts: SchedOpts) -> *rust_task { @@ -789,6 +853,7 @@ fn test_spawn_raw_simple() { fn test_spawn_raw_unsupervise() { let opts = task::TaskOpts { linked: false, + watched: false, notify_chan: None, .. default_task_opts() }; @@ -819,6 +884,7 @@ fn test_spawn_raw_notify_failure() { let opts = task::TaskOpts { linked: false, + watched: false, notify_chan: Some(notify_ch), .. default_task_opts() }; diff --git a/src/libstd/unstable/atomics.rs b/src/libstd/unstable/atomics.rs index 1e5ac305df37e..dbb9c83ea3954 100644 --- a/src/libstd/unstable/atomics.rs +++ b/src/libstd/unstable/atomics.rs @@ -272,6 +272,30 @@ impl AtomicOption { self.swap(cast::transmute(0), order) } } + + /// A compare-and-swap. Succeeds if the option is 'None' and returns 'None' + /// if so. If the option was already 'Some', returns 'Some' of the rejected + /// value. + #[inline] + pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + let expected = cast::transmute(0); + let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); + if oldval == expected { + None + } else { + Some(cast::transmute(val)) + } + } + } + + /// Be careful: The caller must have some external method of ensuring the + /// result does not get invalidated by another task after this returns. + #[inline] + pub fn is_empty(&mut self, order: Ordering) -> bool { + unsafe { atomic_load(&self.p, order) == cast::transmute(0) } + } } #[unsafe_destructor] @@ -374,6 +398,11 @@ mod test { assert!(!flg.test_and_set(SeqCst)); } + #[test] + fn option_empty() { + assert!(AtomicOption::empty::<()>().is_empty(SeqCst)); + } + #[test] fn option_swap() { let mut p = AtomicOption::new(~1); @@ -398,4 +427,13 @@ mod test { assert_eq!(p.take(SeqCst), Some(~2)); } + #[test] + fn option_fill() { + let mut p = AtomicOption::new(~1); + assert!(p.fill(~2, SeqCst).is_some()); // should fail; shouldn't leak! + assert_eq!(p.take(SeqCst), Some(~1)); + + assert!(p.fill(~2, SeqCst).is_none()); // shouldn't fail + assert_eq!(p.take(SeqCst), Some(~2)); + } } diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index 0da05dd167d2d..d4de402a33e52 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -9,12 +9,16 @@ // except according to those terms. use cast; +use cell::Cell; +use comm; use libc; +use ptr; use option::*; +use either::{Either, Left, Right}; use task; use task::atomically; +use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst}; use unstable::finally::Finally; -use unstable::intrinsics; use ops::Drop; use clone::Clone; use kinds::Send; @@ -27,24 +31,44 @@ pub struct UnsafeAtomicRcBox { } struct AtomicRcBoxData { - count: int, + count: AtomicUint, + // An unwrapper uses this protocol to communicate with the "other" task that + // drops the last refcount on an arc. Unfortunately this can't be a proper + // pipe protocol because the unwrapper has to access both stages at once. + // FIXME(#7544): Maybe use AtomicPtr instead (to avoid xchg in take() later)? + unwrapper: AtomicOption<(comm::ChanOne<()>, comm::PortOne)>, + // FIXME(#3224) should be able to make this non-option to save memory data: Option, } impl UnsafeAtomicRcBox { pub fn new(data: T) -> UnsafeAtomicRcBox { unsafe { - let data = ~AtomicRcBoxData { count: 1, data: Some(data) }; + let data = ~AtomicRcBoxData { count: AtomicUint::new(1), + unwrapper: AtomicOption::empty(), + data: Some(data) }; let ptr = cast::transmute(data); return UnsafeAtomicRcBox { data: ptr }; } } + /// As new(), but returns an extra pre-cloned handle. + pub fn new2(data: T) -> (UnsafeAtomicRcBox, UnsafeAtomicRcBox) { + unsafe { + let data = ~AtomicRcBoxData { count: AtomicUint::new(2), + unwrapper: AtomicOption::empty(), + data: Some(data) }; + let ptr = cast::transmute(data); + return (UnsafeAtomicRcBox { data: ptr }, + UnsafeAtomicRcBox { data: ptr }); + } + } + #[inline] pub unsafe fn get(&self) -> *mut T { let mut data: ~AtomicRcBoxData = cast::transmute(self.data); - assert!(data.count > 0); + assert!(data.count.load(Acquire) > 0); // no barrier is really needed let r: *mut T = data.data.get_mut_ref(); cast::forget(data); return r; @@ -53,20 +77,113 @@ impl UnsafeAtomicRcBox { #[inline] pub unsafe fn get_immut(&self) -> *T { - let mut data: ~AtomicRcBoxData = cast::transmute(self.data); - assert!(data.count > 0); - let r: *T = cast::transmute_immut(data.data.get_mut_ref()); + let data: ~AtomicRcBoxData = cast::transmute(self.data); + assert!(data.count.load(Acquire) > 0); // no barrier is really needed + let r: *T = data.data.get_ref(); cast::forget(data); return r; } + + /// Wait until all other handles are dropped, then retrieve the enclosed + /// data. See extra::arc::ARC for specific semantics documentation. + /// If called when the task is already unkillable, unwrap will unkillably + /// block; otherwise, an unwrapping task can be killed by linked failure. + pub unsafe fn unwrap(self) -> T { + let this = Cell::new(self); // argh + do task::unkillable { + let mut this = this.take(); + let mut data: ~AtomicRcBoxData = cast::transmute(this.data); + // Set up the unwrap protocol. + let (p1,c1) = comm::oneshot(); // () + let (p2,c2) = comm::oneshot(); // bool + // Try to put our server end in the unwrapper slot. + // This needs no barrier -- it's protected by the release barrier on + // the xadd, and the acquire+release barrier in the destructor's xadd. + // FIXME(#6598) Change Acquire to Relaxed. + if data.unwrapper.fill(~(c1,p2), Acquire).is_none() { + // Got in. Tell this handle's destructor not to run (we are now it). + this.data = ptr::mut_null(); + // Drop our own reference. + let old_count = data.count.fetch_sub(1, Release); + assert!(old_count >= 1); + if old_count == 1 { + // We were the last owner. Can unwrap immediately. + // AtomicOption's destructor will free the server endpoint. + // FIXME(#3224): it should be like this + // let ~AtomicRcBoxData { data: user_data, _ } = data; + // user_data + data.data.take_unwrap() + } else { + // The *next* person who sees the refcount hit 0 will wake us. + let p1 = Cell::new(p1); // argh + // Unlike the above one, this cell is necessary. It will get + // taken either in the do block or in the finally block. + let c2_and_data = Cell::new((c2,data)); + do (|| { + do task::rekillable { p1.take().recv(); } + // Got here. Back in the 'unkillable' without getting killed. + let (c2, data) = c2_and_data.take(); + c2.send(true); + // FIXME(#3224): it should be like this + // let ~AtomicRcBoxData { data: user_data, _ } = data; + // user_data + let mut data = data; + data.data.take_unwrap() + }).finally { + if task::failing() { + // Killed during wait. Because this might happen while + // someone else still holds a reference, we can't free + // the data now; the "other" last refcount will free it. + let (c2, data) = c2_and_data.take(); + c2.send(false); + cast::forget(data); + } else { + assert!(c2_and_data.is_empty()); + } + } + } + } else { + // If 'put' returns the server end back to us, we were rejected; + // someone else was trying to unwrap. Avoid guaranteed deadlock. + cast::forget(data); + fail!("Another task is already unwrapping this ARC!"); + } + } + } + + /// As unwrap above, but without blocking. Returns 'Left(self)' if this is + /// not the last reference; 'Right(unwrapped_data)' if so. + pub unsafe fn try_unwrap(self) -> Either, T> { + let mut this = self; // FIXME(#4330) mutable self + let mut data: ~AtomicRcBoxData = cast::transmute(this.data); + // This can of course race with anybody else who has a handle, but in + // such a case, the returned count will always be at least 2. If we + // see 1, no race was possible. All that matters is 1 or not-1. + let count = data.count.load(Acquire); + assert!(count >= 1); + // The more interesting race is one with an unwrapper. They may have + // already dropped their count -- but if so, the unwrapper pointer + // will have been set first, which the barriers ensure we will see. + // (Note: using is_empty(), not take(), to not free the unwrapper.) + if count == 1 && data.unwrapper.is_empty(Acquire) { + // Tell this handle's destructor not to run (we are now it). + this.data = ptr::mut_null(); + // FIXME(#3224) as above + Right(data.data.take_unwrap()) + } else { + cast::forget(data); + Left(this) + } + } } impl Clone for UnsafeAtomicRcBox { fn clone(&self) -> UnsafeAtomicRcBox { unsafe { let mut data: ~AtomicRcBoxData = cast::transmute(self.data); - let new_count = intrinsics::atomic_xadd(&mut data.count, 1) + 1; - assert!(new_count >= 2); + // This barrier might be unnecessary, but I'm not sure... + let old_count = data.count.fetch_add(1, Acquire); + assert!(old_count >= 1); cast::forget(data); return UnsafeAtomicRcBox { data: self.data }; } @@ -77,12 +194,37 @@ impl Clone for UnsafeAtomicRcBox { impl Drop for UnsafeAtomicRcBox{ fn drop(&self) { unsafe { + if self.data.is_null() { + return; // Happens when destructing an unwrapper's handle. + } do task::unkillable { let mut data: ~AtomicRcBoxData = cast::transmute(self.data); - let new_count = intrinsics::atomic_xsub(&mut data.count, 1) - 1; - assert!(new_count >= 0); - if new_count == 0 { - // drop glue takes over. + // Must be acquire+release, not just release, to make sure this + // doesn't get reordered to after the unwrapper pointer load. + let old_count = data.count.fetch_sub(1, SeqCst); + assert!(old_count >= 1); + if old_count == 1 { + // Were we really last, or should we hand off to an + // unwrapper? It's safe to not xchg because the unwrapper + // will set the unwrap lock *before* dropping his/her + // reference. In effect, being here means we're the only + // *awake* task with the data. + match data.unwrapper.take(Acquire) { + Some(~(message,response)) => { + // Send 'ready' and wait for a response. + message.send(()); + // Unkillable wait. Message guaranteed to come. + if response.recv() { + // Other task got the data. + cast::forget(data); + } else { + // Other task was killed. drop glue takes over. + } + } + None => { + // drop glue takes over. + } + } } else { cast::forget(data); } @@ -95,9 +237,9 @@ impl Drop for UnsafeAtomicRcBox{ /****************************************************************************/ #[allow(non_camel_case_types)] // runtime type -pub type rust_little_lock = *libc::c_void; +type rust_little_lock = *libc::c_void; -struct LittleLock { +pub struct LittleLock { l: rust_little_lock, } @@ -109,7 +251,7 @@ impl Drop for LittleLock { } } -fn LittleLock() -> LittleLock { +pub fn LittleLock() -> LittleLock { unsafe { LittleLock { l: rust_create_little_lock() @@ -139,6 +281,13 @@ struct ExData { /** * An arc over mutable data that is protected by a lock. For library use only. + * + * # Safety note + * + * This uses a pthread mutex, not one that's aware of the userspace scheduler. + * The user of an exclusive must be careful not to invoke any functions that may + * reschedule the task while holding the lock, or deadlock may result. If you + * need to block or yield while accessing shared state, use extra::sync::RWARC. */ pub struct Exclusive { x: UnsafeAtomicRcBox> @@ -189,12 +338,13 @@ impl Exclusive { f(cast::transmute_immut(x)) } } -} -fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool { - unsafe { - let old = intrinsics::atomic_cxchg(address, oldval, newval); - old == oldval + pub fn unwrap(self) -> T { + let Exclusive { x: x } = self; + // Someday we might need to unkillably unwrap an exclusive, but not today. + let inner = unsafe { x.unwrap() }; + let ExData { data: user_data, _ } = inner; // will destroy the LittleLock + user_data } } @@ -205,57 +355,15 @@ extern { fn rust_unlock_little_lock(lock: rust_little_lock); } -/* *********************************************************************/ - -//FIXME: #5042 This should be replaced by proper atomic type -pub struct AtomicUint { - priv inner: uint -} - -impl AtomicUint { - pub fn new(val: uint) -> AtomicUint { AtomicUint { inner: val } } - pub fn load(&self) -> uint { - unsafe { intrinsics::atomic_load(cast::transmute(self)) as uint } - } - pub fn store(&mut self, val: uint) { - unsafe { intrinsics::atomic_store(cast::transmute(self), val as int); } - } - pub fn add(&mut self, val: int) -> uint { - unsafe { intrinsics::atomic_xadd(cast::transmute(self), val as int) as uint } - } - pub fn cas(&mut self, old:uint, new: uint) -> uint { - unsafe { intrinsics::atomic_cxchg(cast::transmute(self), old as int, new as int) as uint } - } -} - -pub struct AtomicInt { - priv inner: int -} - -impl AtomicInt { - pub fn new(val: int) -> AtomicInt { AtomicInt { inner: val } } - pub fn load(&self) -> int { - unsafe { intrinsics::atomic_load(&self.inner) } - } - pub fn store(&mut self, val: int) { - unsafe { intrinsics::atomic_store(&mut self.inner, val); } - } - pub fn add(&mut self, val: int) -> int { - unsafe { intrinsics::atomic_xadd(&mut self.inner, val) } - } - pub fn cas(&mut self, old: int, new: int) -> int { - unsafe { intrinsics::atomic_cxchg(&mut self.inner, old, new) } - } -} - - #[cfg(test)] mod tests { - use super::*; + use cell::Cell; use comm; - use super::exclusive; + use option::*; + use super::{exclusive, UnsafeAtomicRcBox}; use task; use uint; + use util; #[test] fn exclusive_arc() { @@ -309,26 +417,120 @@ mod tests { } #[test] - fn atomic_int_smoke_test() { - let mut i = AtomicInt::new(0); - i.store(10); - assert!(i.load() == 10); - assert!(i.add(1) == 10); - assert!(i.load() == 11); - assert!(i.cas(11, 12) == 11); - assert!(i.cas(11, 13) == 12); - assert!(i.load() == 12); + fn arclike_unwrap_basic() { + unsafe { + let x = UnsafeAtomicRcBox::new(~~"hello"); + assert!(x.unwrap() == ~~"hello"); + } } #[test] - fn atomic_uint_smoke_test() { - let mut i = AtomicUint::new(0); - i.store(10); - assert!(i.load() == 10); - assert!(i.add(1) == 10); - assert!(i.load() == 11); - assert!(i.cas(11, 12) == 11); - assert!(i.cas(11, 13) == 12); - assert!(i.load() == 12); + fn arclike_try_unwrap() { + unsafe { + let x = UnsafeAtomicRcBox::new(~~"hello"); + assert!(x.try_unwrap().expect_right("try_unwrap failed") == ~~"hello"); + } + } + + #[test] + fn arclike_try_unwrap_fail() { + unsafe { + let x = UnsafeAtomicRcBox::new(~~"hello"); + let x2 = x.clone(); + let left_x = x.try_unwrap(); + assert!(left_x.is_left()); + util::ignore(left_x); + assert!(x2.try_unwrap().expect_right("try_unwrap none") == ~~"hello"); + } + } + + #[test] + fn arclike_try_unwrap_unwrap_race() { + // When an unwrap and a try_unwrap race, the unwrapper should always win. + unsafe { + let x = UnsafeAtomicRcBox::new(~~"hello"); + let x2 = Cell::new(x.clone()); + let (p,c) = comm::stream(); + do task::spawn { + c.send(()); + assert!(x2.take().unwrap() == ~~"hello"); + c.send(()); + } + p.recv(); + task::yield(); // Try to make the unwrapper get blocked first. + let left_x = x.try_unwrap(); + assert!(left_x.is_left()); + util::ignore(left_x); + p.recv(); + } + } + + #[test] + fn exclusive_unwrap_basic() { + // Unlike the above, also tests no double-freeing of the LittleLock. + let x = exclusive(~~"hello"); + assert!(x.unwrap() == ~~"hello"); + } + + #[test] + fn exclusive_unwrap_contended() { + let x = exclusive(~~"hello"); + let x2 = Cell::new(x.clone()); + do task::spawn { + let x2 = x2.take(); + unsafe { do x2.with |_hello| { } } + task::yield(); + } + assert!(x.unwrap() == ~~"hello"); + + // Now try the same thing, but with the child task blocking. + let x = exclusive(~~"hello"); + let x2 = Cell::new(x.clone()); + let mut res = None; + let mut builder = task::task(); + builder.future_result(|r| res = Some(r)); + do builder.spawn { + let x2 = x2.take(); + assert!(x2.unwrap() == ~~"hello"); + } + // Have to get rid of our reference before blocking. + util::ignore(x); + res.unwrap().recv(); + } + + #[test] #[should_fail] #[ignore(cfg(windows))] + fn exclusive_unwrap_conflict() { + let x = exclusive(~~"hello"); + let x2 = Cell::new(x.clone()); + let mut res = None; + let mut builder = task::task(); + builder.future_result(|r| res = Some(r)); + do builder.spawn { + let x2 = x2.take(); + assert!(x2.unwrap() == ~~"hello"); + } + assert!(x.unwrap() == ~~"hello"); + // See #4689 for why this can't be just "res.recv()". + assert!(res.unwrap().recv() == task::Success); + } + + #[test] #[ignore(cfg(windows))] + fn exclusive_unwrap_deadlock() { + // This is not guaranteed to get to the deadlock before being killed, + // but it will show up sometimes, and if the deadlock were not there, + // the test would nondeterministically fail. + let result = do task::try { + // a task that has two references to the same exclusive will + // deadlock when it unwraps. nothing to be done about that. + let x = exclusive(~~"hello"); + let x2 = x.clone(); + do task::spawn { + for 10.times { task::yield(); } // try to let the unwrapper go + fail!(); // punt it awake from its deadlock + } + let _z = x.unwrap(); + unsafe { do x2.with |_hello| { } } + }; + assert!(result.is_err()); } } diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index cc73c28bba0df..03e94a902c1a1 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -1896,12 +1896,11 @@ pub mod raw { use cast::transmute; use clone::Clone; use managed; - use option::{None, Some}; + use option::Some; use ptr; use sys; use unstable::intrinsics; use vec::{UnboxedVecRepr, with_capacity, ImmutableVector, MutableVector}; - use util; #[cfg(not(stage0))] use unstable::intrinsics::contains_managed; @@ -2022,9 +2021,8 @@ pub mod raw { pub unsafe fn init_elem(v: &mut [T], i: uint, val: T) { let mut box = Some(val); do v.as_mut_buf |p, _len| { - let box2 = util::replace(&mut box, None); intrinsics::move_val_init(&mut(*ptr::mut_offset(p, i)), - box2.unwrap()); + box.take_unwrap()); } } diff --git a/src/test/bench/core-std.rs b/src/test/bench/core-std.rs index 1f98865a10c02..ea5aa309dc6c9 100644 --- a/src/test/bench/core-std.rs +++ b/src/test/bench/core-std.rs @@ -30,7 +30,7 @@ macro_rules! bench ( fn main() { let argv = os::args(); - let tests = argv.slice(1, argv.len()); + let _tests = argv.slice(1, argv.len()); bench!(shift_push); bench!(read_line); @@ -44,7 +44,7 @@ fn main() { fn maybe_run_test(argv: &[~str], name: ~str, test: &fn()) { let mut run_test = false; - if os::getenv(~"RUST_BENCH").is_some() { + if os::getenv("RUST_BENCH").is_some() { run_test = true } else if argv.len() > 0 { run_test = argv.iter().any(|x| x == &~"all") || argv.iter().any(|x| x == &name) diff --git a/src/test/bench/core-uint-to-str.rs b/src/test/bench/core-uint-to-str.rs index f960363766dfd..2c5cb5d134772 100644 --- a/src/test/bench/core-uint-to-str.rs +++ b/src/test/bench/core-uint-to-str.rs @@ -13,7 +13,7 @@ use std::uint; fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"10000000"] } else if args.len() <= 1u { ~[~"", ~"100000"] diff --git a/src/test/bench/graph500-bfs.rs b/src/test/bench/graph500-bfs.rs index 8a0d9bcead0fb..f17b6658f9dcd 100644 --- a/src/test/bench/graph500-bfs.rs +++ b/src/test/bench/graph500-bfs.rs @@ -410,7 +410,7 @@ fn validate(edges: ~[(node_id, node_id)], fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"15", ~"48"] } else if args.len() <= 1 { ~[~"", ~"10", ~"16"] @@ -447,7 +447,7 @@ fn main() { let graph_arc = arc::ARC(graph.clone()); do gen_search_keys(graph, num_keys).map() |root| { - io::stdout().write_line(~""); + io::stdout().write_line(""); io::stdout().write_line(fmt!("Search key: %?", root)); if do_sequential { @@ -511,7 +511,7 @@ fn main() { } }; - io::stdout().write_line(~""); + io::stdout().write_line(""); io::stdout().write_line( fmt!("Total sequential: %? \t Total Parallel: %? \t Speedup: %?x", total_seq, total_par, total_seq / total_par)); diff --git a/src/test/bench/msgsend-pipes-shared.rs b/src/test/bench/msgsend-pipes-shared.rs index 8aff30ec80a8b..9b0fd23d9cb42 100644 --- a/src/test/bench/msgsend-pipes-shared.rs +++ b/src/test/bench/msgsend-pipes-shared.rs @@ -22,15 +22,12 @@ extern mod extra; use std::comm::{Port, Chan, SharedChan}; use std::comm; -use std::io::{Writer, WriterUtil}; use std::io; use std::os; use std::task; -use std::ptr; use std::uint; -use std::vec; -fn move_out(x: T) {} +fn move_out(_x: T) {} enum request { get_count, @@ -38,7 +35,7 @@ enum request { stop } -fn server(requests: &Port, responses: &comm::Chan) { +fn server(requests: &Port, responses: &Chan) { let mut count = 0u; let mut done = false; while !done { @@ -102,7 +99,7 @@ fn run(args: &[~str]) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"1000000", ~"10000"] } else if args.len() <= 1u { ~[~"", ~"10000", ~"4"] diff --git a/src/test/bench/msgsend-pipes.rs b/src/test/bench/msgsend-pipes.rs index 0046fb9dd1235..5ce5e902ed1ef 100644 --- a/src/test/bench/msgsend-pipes.rs +++ b/src/test/bench/msgsend-pipes.rs @@ -16,16 +16,13 @@ extern mod extra; -use std::comm::{Port, PortSet, Chan, stream}; -use std::io::{Writer, WriterUtil}; +use std::comm::{PortSet, Chan, stream}; use std::io; use std::os; -use std::ptr; use std::task; use std::uint; -use std::vec; -fn move_out(x: T) {} +fn move_out(_x: T) {} enum request { get_count, @@ -98,7 +95,7 @@ fn run(args: &[~str]) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"1000000", ~"8"] } else if args.len() <= 1u { ~[~"", ~"10000", ~"4"] diff --git a/src/test/bench/msgsend-ring-mutex-arcs.rs b/src/test/bench/msgsend-ring-mutex-arcs.rs index a60e0b9e340bd..86784c0b7d3bc 100644 --- a/src/test/bench/msgsend-ring-mutex-arcs.rs +++ b/src/test/bench/msgsend-ring-mutex-arcs.rs @@ -24,7 +24,6 @@ use std::cell::Cell; use std::io; use std::os; use std::uint; -use std::vec; // A poor man's pipe. type pipe = arc::MutexARC<~[uint]>; @@ -60,8 +59,8 @@ fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { // Send/Receive lots of messages. for uint::range(0u, count) |j| { //error!("task %?, iter %?", i, j); - let mut num_chan2 = num_chan.take_unwrap(); - let mut num_port2 = num_port.take_unwrap(); + let num_chan2 = num_chan.take_unwrap(); + let num_port2 = num_port.take_unwrap(); send(&num_chan2, i * j); num_chan = Some(num_chan2); let _n = recv(&num_port2); @@ -72,7 +71,7 @@ fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100", ~"10000"] } else if args.len() <= 1u { ~[~"", ~"10", ~"100"] @@ -84,7 +83,7 @@ fn main() { let msg_per_task = uint::from_str(args[2]).get(); let (num_chan, num_port) = init(); - let mut num_chan = Cell::new(num_chan); + let num_chan = Cell::new(num_chan); let start = time::precise_time_s(); diff --git a/src/test/bench/msgsend-ring-pipes.rs b/src/test/bench/msgsend-ring-pipes.rs index b4692c774aa2c..b79f171147aa0 100644 --- a/src/test/bench/msgsend-ring-pipes.rs +++ b/src/test/bench/msgsend-ring-pipes.rs @@ -24,7 +24,6 @@ use std::cell::Cell; use std::io; use std::os; use std::pipes::recv; -use std::ptr; use std::uint; use std::util; @@ -58,7 +57,7 @@ fn thread_ring(i: uint, fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100", ~"10000"] } else if args.len() <= 1u { ~[~"", ~"100", ~"1000"] @@ -70,7 +69,7 @@ fn main() { let msg_per_task = uint::from_str(args[2]).get(); let (num_port, num_chan) = ring::init(); - let mut num_chan = Cell::new(num_chan); + let num_chan = Cell::new(num_chan); let start = time::precise_time_s(); diff --git a/src/test/bench/msgsend-ring-rw-arcs.rs b/src/test/bench/msgsend-ring-rw-arcs.rs index 0c6b97c6b7819..b4037d866a010 100644 --- a/src/test/bench/msgsend-ring-rw-arcs.rs +++ b/src/test/bench/msgsend-ring-rw-arcs.rs @@ -24,7 +24,6 @@ use std::cell::Cell; use std::io; use std::os; use std::uint; -use std::vec; // A poor man's pipe. type pipe = arc::RWARC<~[uint]>; @@ -56,8 +55,8 @@ fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { // Send/Receive lots of messages. for uint::range(0u, count) |j| { //error!("task %?, iter %?", i, j); - let mut num_chan2 = num_chan.take_unwrap(); - let mut num_port2 = num_port.take_unwrap(); + let num_chan2 = num_chan.take_unwrap(); + let num_port2 = num_port.take_unwrap(); send(&num_chan2, i * j); num_chan = Some(num_chan2); let _n = recv(&num_port2); @@ -68,7 +67,7 @@ fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100", ~"10000"] } else if args.len() <= 1u { ~[~"", ~"10", ~"100"] @@ -80,7 +79,7 @@ fn main() { let msg_per_task = uint::from_str(args[2]).get(); let (num_chan, num_port) = init(); - let mut num_chan = Cell::new(num_chan); + let num_chan = Cell::new(num_chan); let start = time::precise_time_s(); diff --git a/src/test/bench/pingpong.rs b/src/test/bench/pingpong.rs index 1d32a78303a8f..2eb274378900a 100644 --- a/src/test/bench/pingpong.rs +++ b/src/test/bench/pingpong.rs @@ -190,7 +190,7 @@ fn timeit(f: &fn()) -> float { } fn main() { - let count = if os::getenv(~"RUST_BENCH").is_some() { + let count = if os::getenv("RUST_BENCH").is_some() { 250000 } else { 100 diff --git a/src/test/bench/shootout-ackermann.rs b/src/test/bench/shootout-ackermann.rs index 51fec4dbfe8b6..ff806c8b5d44d 100644 --- a/src/test/bench/shootout-ackermann.rs +++ b/src/test/bench/shootout-ackermann.rs @@ -28,7 +28,7 @@ fn ack(m: int, n: int) -> int { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"12"] } else if args.len() <= 1u { ~[~"", ~"8"] diff --git a/src/test/bench/shootout-binarytrees.rs b/src/test/bench/shootout-binarytrees.rs index 76ef4c12380e2..d88843e118045 100644 --- a/src/test/bench/shootout-binarytrees.rs +++ b/src/test/bench/shootout-binarytrees.rs @@ -40,7 +40,7 @@ fn main() { use std::os; use std::int; let args = std::os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"17"] } else if args.len() <= 1u { ~[~"", ~"8"] diff --git a/src/test/bench/shootout-chameneos-redux.rs b/src/test/bench/shootout-chameneos-redux.rs index deb2d4b300bc6..f218b963aaa31 100644 --- a/src/test/bench/shootout-chameneos-redux.rs +++ b/src/test/bench/shootout-chameneos-redux.rs @@ -12,7 +12,6 @@ extern mod extra; -use extra::sort; use std::cell::Cell; use std::comm::*; use std::io; @@ -20,7 +19,6 @@ use std::option; use std::os; use std::task; use std::uint; -use std::vec; fn print_complements() { let all = [Blue, Red, Yellow]; @@ -206,7 +204,7 @@ fn rendezvous(nn: uint, set: ~[color]) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"200000"] } else if args.len() <= 1u { ~[~"", ~"600"] @@ -217,10 +215,10 @@ fn main() { let nn = uint::from_str(args[1]).get(); print_complements(); - io::println(~""); + io::println(""); rendezvous(nn, ~[Blue, Red, Yellow]); - io::println(~""); + io::println(""); rendezvous(nn, ~[Blue, Red, Yellow, Red, Yellow, Blue, Red, Yellow, Red, Blue]); diff --git a/src/test/bench/shootout-fasta-redux.rs b/src/test/bench/shootout-fasta-redux.rs index 5ebcfe164ce77..cc23c00183315 100644 --- a/src/test/bench/shootout-fasta-redux.rs +++ b/src/test/bench/shootout-fasta-redux.rs @@ -2,7 +2,6 @@ use std::cast::transmute; use std::from_str::FromStr; use std::libc::{FILE, STDOUT_FILENO, c_int, fdopen, fputc, fputs, fwrite, size_t}; use std::os; -use std::str; use std::uint::{min, range}; use std::vec::bytes::copy_memory; use std::vec; diff --git a/src/test/bench/shootout-fibo.rs b/src/test/bench/shootout-fibo.rs index a1bb0e9e6d8d5..de36a59dd6507 100644 --- a/src/test/bench/shootout-fibo.rs +++ b/src/test/bench/shootout-fibo.rs @@ -24,7 +24,7 @@ fn fib(n: int) -> int { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"40"] } else if args.len() <= 1u { ~[~"", ~"30"] diff --git a/src/test/bench/shootout-k-nucleotide-pipes.rs b/src/test/bench/shootout-k-nucleotide-pipes.rs index a3755919ffeef..46b882f7b8253 100644 --- a/src/test/bench/shootout-k-nucleotide-pipes.rs +++ b/src/test/bench/shootout-k-nucleotide-pipes.rs @@ -121,8 +121,8 @@ fn windows_with_carry(bb: &[u8], nn: uint, } fn make_sequence_processor(sz: uint, - from_parent: &comm::Port<~[u8]>, - to_parent: &comm::Chan<~str>) { + from_parent: &Port<~[u8]>, + to_parent: &Chan<~str>) { let mut freqs: HashMap<~[u8], uint> = HashMap::new(); let mut carry: ~[u8] = ~[]; let mut total: uint = 0u; @@ -143,11 +143,11 @@ fn make_sequence_processor(sz: uint, let buffer = match sz { 1u => { sort_and_fmt(&freqs, total) } 2u => { sort_and_fmt(&freqs, total) } - 3u => { fmt!("%u\t%s", find(&freqs, ~"GGT"), ~"GGT") } - 4u => { fmt!("%u\t%s", find(&freqs, ~"GGTA"), ~"GGTA") } - 6u => { fmt!("%u\t%s", find(&freqs, ~"GGTATT"), ~"GGTATT") } - 12u => { fmt!("%u\t%s", find(&freqs, ~"GGTATTTTAATT"), ~"GGTATTTTAATT") } - 18u => { fmt!("%u\t%s", find(&freqs, ~"GGTATTTTAATTTATAGT"), ~"GGTATTTTAATTTATAGT") } + 3u => { fmt!("%u\t%s", find(&freqs, ~"GGT"), "GGT") } + 4u => { fmt!("%u\t%s", find(&freqs, ~"GGTA"), "GGTA") } + 6u => { fmt!("%u\t%s", find(&freqs, ~"GGTATT"), "GGTATT") } + 12u => { fmt!("%u\t%s", find(&freqs, ~"GGTATTTTAATT"), "GGTATTTTAATT") } + 18u => { fmt!("%u\t%s", find(&freqs, ~"GGTATTTTAATTTATAGT"), "GGTATTTTAATTTATAGT") } _ => { ~"" } }; @@ -156,8 +156,7 @@ fn make_sequence_processor(sz: uint, // given a FASTA file on stdin, process sequence THREE fn main() { - let args = os::args(); - let rdr = if os::getenv(~"RUST_BENCH").is_some() { + let rdr = if os::getenv("RUST_BENCH").is_some() { // FIXME: Using this compile-time env variable is a crummy way to // get to this massive data set, but include_bin! chokes on it (#2598) let path = Path(env!("CFG_SRC_DIR")) @@ -203,7 +202,7 @@ fn main() { // start processing if this is the one ('>', false) => { - match line.slice_from(1).find_str(~"THREE") { + match line.slice_from(1).find_str("THREE") { option::Some(_) => { proc_mode = true; } option::None => { } } @@ -217,7 +216,7 @@ fn main() { let line_bytes = line.as_bytes(); for sizes.iter().enumerate().advance |(ii, _sz)| { - let mut lb = line_bytes.to_owned(); + let lb = line_bytes.to_owned(); to_child[ii].send(lb); } } diff --git a/src/test/bench/shootout-nbody.rs b/src/test/bench/shootout-nbody.rs index a56d1f44fa246..1fab646fb37a7 100644 --- a/src/test/bench/shootout-nbody.rs +++ b/src/test/bench/shootout-nbody.rs @@ -1,7 +1,6 @@ use std::from_str::FromStr; use std::os; use std::uint::range; -use std::vec; static PI: f64 = 3.141592653589793; static SOLAR_MASS: f64 = 4.0 * PI * PI; diff --git a/src/test/bench/shootout-pfib.rs b/src/test/bench/shootout-pfib.rs index cbc2d3f0ea887..6ea22715750c5 100644 --- a/src/test/bench/shootout-pfib.rs +++ b/src/test/bench/shootout-pfib.rs @@ -28,7 +28,6 @@ use std::io::WriterUtil; use std::io; use std::os; use std::result::{Ok, Err}; -use std::str; use std::task; use std::u64; use std::uint; @@ -59,13 +58,13 @@ struct Config { } fn parse_opts(argv: ~[~str]) -> Config { - let opts = ~[getopts::optflag(~"stress")]; + let opts = ~[getopts::optflag("stress")]; let opt_args = argv.slice(1, argv.len()); match getopts::getopts(opt_args, opts) { Ok(ref m) => { - return Config {stress: getopts::opt_present(m, ~"stress")} + return Config {stress: getopts::opt_present(m, "stress")} } Err(_) => { fail!(); } } @@ -97,7 +96,7 @@ fn stress(num_tasks: int) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"20"] } else if args.len() <= 1u { ~[~"", ~"8"] diff --git a/src/test/bench/shootout-reverse-complement.rs b/src/test/bench/shootout-reverse-complement.rs index e57dee06c75bd..6ce62ccd12714 100644 --- a/src/test/bench/shootout-reverse-complement.rs +++ b/src/test/bench/shootout-reverse-complement.rs @@ -5,7 +5,6 @@ use std::cast::transmute; use std::libc::{STDOUT_FILENO, c_int, fdopen, fgets, fopen, fputc, fwrite}; use std::libc::{size_t}; use std::ptr::null; -use std::vec::raw::set_len; static LINE_LEN: u32 = 80; diff --git a/src/test/bench/shootout-threadring.rs b/src/test/bench/shootout-threadring.rs index d30c129443525..7e75ac8584855 100644 --- a/src/test/bench/shootout-threadring.rs +++ b/src/test/bench/shootout-threadring.rs @@ -15,7 +15,7 @@ use std::os; fn start(n_tasks: int, token: int) { let (p, ch1) = stream(); let mut p = p; - let mut ch1 = ch1; + let ch1 = ch1; ch1.send(token); // XXX could not get this to work with a range closure let mut i = 2; @@ -55,7 +55,7 @@ fn roundtrip(id: int, n_tasks: int, p: &Port, ch: &Chan) { } fn main() { - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"2000000", ~"503"] } else { diff --git a/src/test/bench/std-smallintmap.rs b/src/test/bench/std-smallintmap.rs index 23ac68315e3a0..7f2accb6f0ea9 100644 --- a/src/test/bench/std-smallintmap.rs +++ b/src/test/bench/std-smallintmap.rs @@ -32,7 +32,7 @@ fn check_sequential(min: uint, max: uint, map: &SmallIntMap) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100000", ~"100"] } else if args.len() <= 1u { ~[~"", ~"10000", ~"50"] diff --git a/src/test/bench/sudoku.rs b/src/test/bench/sudoku.rs index 2396d6efc5cc1..f4ddc090c2160 100644 --- a/src/test/bench/sudoku.rs +++ b/src/test/bench/sudoku.rs @@ -15,7 +15,6 @@ extern mod extra; use std::io::{ReaderUtil, WriterUtil}; use std::io; use std::os; -use std::str; use std::u8; use std::uint; use std::unstable::intrinsics::cttz16; @@ -50,7 +49,7 @@ impl Sudoku { } pub fn from_vec(vec: &[[u8, ..9], ..9]) -> Sudoku { - let mut g = do vec::from_fn(9u) |i| { + let g = do vec::from_fn(9u) |i| { do vec::from_fn(9u) |j| { vec[i][j] } }; return Sudoku::new(g) @@ -161,17 +160,17 @@ impl Sudoku { // Stores available colors as simple bitfield, bit 0 is always unset struct Colors(u16); -static heads: u16 = (1u16 << 10) - 1; /* bits 9..0 */ +static HEADS: u16 = (1u16 << 10) - 1; /* bits 9..0 */ impl Colors { fn new(start_color: u8) -> Colors { // Sets bits 9..start_color let tails = !0u16 << start_color; - return Colors(heads & tails); + return Colors(HEADS & tails); } fn next(&self) -> u8 { - let val = **self & heads; + let val = **self & HEADS; if (0u16 == val) { return 0u8; } else { @@ -190,7 +189,7 @@ impl Colors { } } -static default_sudoku: [[u8, ..9], ..9] = [ +static DEFAULT_SUDOKU: [[u8, ..9], ..9] = [ /* 0 1 2 3 4 5 6 7 8 */ /* 0 */ [0u8, 4u8, 0u8, 6u8, 0u8, 0u8, 0u8, 3u8, 2u8], /* 1 */ [0u8, 0u8, 8u8, 0u8, 2u8, 0u8, 0u8, 0u8, 0u8], @@ -204,7 +203,7 @@ static default_sudoku: [[u8, ..9], ..9] = [ ]; #[cfg(test)] -static default_solution: [[u8, ..9], ..9] = [ +static DEFAULT_SOLUTION: [[u8, ..9], ..9] = [ /* 0 1 2 3 4 5 6 7 8 */ /* 0 */ [1u8, 4u8, 9u8, 6u8, 7u8, 5u8, 8u8, 3u8, 2u8], /* 1 */ [5u8, 3u8, 8u8, 1u8, 2u8, 9u8, 7u8, 4u8, 6u8], @@ -258,10 +257,10 @@ fn colors_remove_works() { } #[test] -fn check_default_sudoku_solution() { +fn check_DEFAULT_SUDOKU_solution() { // GIVEN - let mut sudoku = Sudoku::from_vec(&default_sudoku); - let solution = Sudoku::from_vec(&default_solution); + let mut sudoku = Sudoku::from_vec(&DEFAULT_SUDOKU); + let solution = Sudoku::from_vec(&DEFAULT_SOLUTION); // WHEN sudoku.solve(); @@ -274,7 +273,7 @@ fn main() { let args = os::args(); let use_default = args.len() == 1u; let mut sudoku = if use_default { - Sudoku::from_vec(&default_sudoku) + Sudoku::from_vec(&DEFAULT_SUDOKU) } else { Sudoku::read(io::stdin()) }; diff --git a/src/test/bench/task-perf-alloc-unwind.rs b/src/test/bench/task-perf-alloc-unwind.rs index f397f954623b0..3470cc9274578 100644 --- a/src/test/bench/task-perf-alloc-unwind.rs +++ b/src/test/bench/task-perf-alloc-unwind.rs @@ -22,7 +22,7 @@ enum UniqueList { } fn main() { - let (repeat, depth) = if os::getenv(~"RUST_BENCH").is_some() { + let (repeat, depth) = if os::getenv("RUST_BENCH").is_some() { (50, 1000) } else { (10, 10) diff --git a/src/test/bench/task-perf-jargon-metal-smoke.rs b/src/test/bench/task-perf-jargon-metal-smoke.rs index d3fd20a029358..4e27841a74850 100644 --- a/src/test/bench/task-perf-jargon-metal-smoke.rs +++ b/src/test/bench/task-perf-jargon-metal-smoke.rs @@ -43,7 +43,7 @@ fn child_generation(gens_left: uint, c: comm::Chan<()>) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100000"] } else if args.len() <= 1 { ~[~"", ~"100"] diff --git a/src/test/bench/task-perf-linked-failure.rs b/src/test/bench/task-perf-linked-failure.rs index e07e53347120b..7eb138e99a086 100644 --- a/src/test/bench/task-perf-linked-failure.rs +++ b/src/test/bench/task-perf-linked-failure.rs @@ -63,7 +63,7 @@ fn spawn_supervised_blocking(myname: &str, f: ~fn()) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100000"] } else if args.len() <= 1u { ~[~"", ~"100"] diff --git a/src/test/bench/task-perf-one-million.rs b/src/test/bench/task-perf-one-million.rs index 1cd90962c5b79..7f986eab78938 100644 --- a/src/test/bench/task-perf-one-million.rs +++ b/src/test/bench/task-perf-one-million.rs @@ -12,7 +12,6 @@ // xfail-test OOM on linux-32 without opts -use std::comm::*; use std::os; use std::task; use std::uint; @@ -49,7 +48,7 @@ fn calc(children: uint, parent_wait_chan: &Chan>>) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"30"] } else if args.len() <= 1u { ~[~"", ~"10"] diff --git a/src/test/bench/task-perf-spawnalot.rs b/src/test/bench/task-perf-spawnalot.rs index 1a8fc7e098ddb..a152c30213318 100644 --- a/src/test/bench/task-perf-spawnalot.rs +++ b/src/test/bench/task-perf-spawnalot.rs @@ -24,7 +24,7 @@ fn g() { } fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"400"] } else if args.len() <= 1u { ~[~"", ~"10"]