10
10
11
11
//! Task death: asynchronous killing, linked failure, exit code propagation.
12
12
13
+ use cast;
13
14
use cell:: Cell ;
14
15
use option:: { Option , Some , None } ;
15
16
use prelude:: * ;
17
+ use rt:: task:: Task ;
18
+ use unstable:: atomics:: { AtomicUint , SeqCst } ;
16
19
use unstable:: sync:: { UnsafeAtomicRcBox , LittleLock } ;
17
20
use util;
18
21
22
+ static KILLED_MSG : & ' static str = "killed by linked failure" ;
23
+
24
+ // State values for the 'killed' and 'unkillable' atomic flags below.
25
+ static KILL_RUNNING : uint = 0 ;
26
+ static KILL_KILLED : uint = 1 ;
27
+ static KILL_UNKILLABLE : uint = 2 ;
28
+
19
29
// FIXME(#7544)(bblum): think about the cache efficiency of this
20
30
struct KillHandleInner {
21
- // ((more fields to be added in a future commit))
31
+ // Is the task running, blocked, or killed? Possible values:
32
+ // * KILL_RUNNING - Not unkillable, no kill pending.
33
+ // * KILL_KILLED - Kill pending.
34
+ // * <ptr> - A transmuted blocked ~Task pointer.
35
+ // This flag is refcounted because it may also be referenced by a blocking
36
+ // concurrency primitive, used to wake the task normally, whose reference
37
+ // may outlive the handle's if the task is killed.
38
+ killed : UnsafeAtomicRcBox < AtomicUint > ,
39
+ // Has the task deferred kill signals? This flag guards the above one.
40
+ // Possible values:
41
+ // * KILL_RUNNING - Not unkillable, no kill pending.
42
+ // * KILL_KILLED - Kill pending.
43
+ // * KILL_UNKILLABLE - Kill signals deferred.
44
+ unkillable : AtomicUint ,
22
45
23
46
// Shared state between task and children for exit code propagation. These
24
47
// are here so we can re-use the kill handle to implement watched children
@@ -47,20 +70,73 @@ pub struct Death {
47
70
// Action to be done with the exit code. If set, also makes the task wait
48
71
// until all its watched children exit before collecting the status.
49
72
on_exit : Option < ~fn ( bool ) > ,
73
+ // nesting level counter for task::unkillable calls (0 == killable).
74
+ unkillable : int ,
75
+ // nesting level counter for task::atomically calls (0 == can yield).
76
+ wont_sleep : int ,
50
77
}
51
78
52
79
impl KillHandle {
53
80
pub fn new ( ) -> KillHandle {
54
81
KillHandle ( UnsafeAtomicRcBox :: new ( KillHandleInner {
55
82
// Linked failure fields
56
- // ((none yet))
83
+ killed : UnsafeAtomicRcBox :: new ( AtomicUint :: new ( KILL_RUNNING ) ) ,
84
+ unkillable : AtomicUint :: new ( KILL_RUNNING ) ,
57
85
// Exit code propagation fields
58
86
any_child_failed : false ,
59
87
child_tombstones : None ,
60
88
graveyard_lock : LittleLock ( ) ,
61
89
} ) )
62
90
}
63
91
92
+ // Will begin unwinding if a kill signal was received, unless already_failing.
93
+ // This can't be used recursively, because a task which sees a KILLED
94
+ // signal must fail immediately, which an already-unkillable task can't do.
95
+ #[ inline]
96
+ pub fn inhibit_kill ( & mut self , already_failing : bool ) {
97
+ let inner = unsafe { & mut * self . get ( ) } ;
98
+ // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
99
+ // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
100
+ match inner. unkillable . compare_and_swap ( KILL_RUNNING , KILL_UNKILLABLE , SeqCst ) {
101
+ KILL_RUNNING => { } , // normal case
102
+ KILL_KILLED => if !already_failing { fail ! ( KILLED_MSG ) } ,
103
+ _ => rtabort ! ( "inhibit_kill: task already unkillable" ) ,
104
+ }
105
+ }
106
+
107
+ // Will begin unwinding if a kill signal was received, unless already_failing.
108
+ #[ inline]
109
+ pub fn allow_kill ( & mut self , already_failing : bool ) {
110
+ let inner = unsafe { & mut * self . get ( ) } ;
111
+ // Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
112
+ // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
113
+ match inner. unkillable . compare_and_swap ( KILL_UNKILLABLE , KILL_RUNNING , SeqCst ) {
114
+ KILL_UNKILLABLE => { } , // normal case
115
+ KILL_KILLED => if !already_failing { fail ! ( KILLED_MSG ) } ,
116
+ _ => rtabort ! ( "allow_kill: task already killable" ) ,
117
+ }
118
+ }
119
+
120
+ // Send a kill signal to the handle's owning task. Returns the task itself
121
+ // if it was blocked and needs punted awake. To be called by other tasks.
122
+ pub fn kill ( & mut self ) -> Option < ~Task > {
123
+ let inner = unsafe { & mut * self . get ( ) } ;
124
+ if inner. unkillable . swap ( KILL_KILLED , SeqCst ) == KILL_RUNNING {
125
+ // Got in. Allowed to try to punt the task awake.
126
+ let flag = unsafe { & mut * inner. killed . get ( ) } ;
127
+ match flag. swap ( KILL_KILLED , SeqCst ) {
128
+ // Task either not blocked or already taken care of.
129
+ KILL_RUNNING | KILL_KILLED => None ,
130
+ // Got ownership of the blocked task.
131
+ task_ptr => Some ( unsafe { cast:: transmute ( task_ptr) } ) ,
132
+ }
133
+ } else {
134
+ // Otherwise it was either unkillable or already killed. Somebody
135
+ // else was here first who will deal with the kill signal.
136
+ None
137
+ }
138
+ }
139
+
64
140
pub fn notify_immediate_failure ( & mut self ) {
65
141
// A benign data race may happen here if there are failing sibling
66
142
// tasks that were also spawned-watched. The refcount's write barriers
@@ -123,6 +199,7 @@ impl KillHandle {
123
199
}
124
200
125
201
// NB: Takes a pthread mutex -- 'blk' not allowed to reschedule.
202
+ #[ inline]
126
203
fn add_lazy_tombstone ( parent : & mut KillHandle ,
127
204
blk : & fn ( Option < ~fn ( ) -> bool > ) -> ~fn ( ) -> bool ) {
128
205
@@ -144,6 +221,8 @@ impl Death {
144
221
kill_handle : Some ( KillHandle :: new ( ) ) ,
145
222
watching_parent : None ,
146
223
on_exit : None ,
224
+ unkillable : 0 ,
225
+ wont_sleep : 0 ,
147
226
}
148
227
}
149
228
@@ -153,11 +232,22 @@ impl Death {
153
232
kill_handle : Some ( KillHandle :: new ( ) ) ,
154
233
watching_parent : self . kill_handle . clone ( ) ,
155
234
on_exit : None ,
235
+ unkillable : 0 ,
236
+ wont_sleep : 0 ,
156
237
}
157
238
}
158
239
159
240
/// Collect failure exit codes from children and propagate them to a parent.
160
241
pub fn collect_failure ( & mut self , mut success : bool ) {
242
+ // This may run after the task has already failed, so even though the
243
+ // task appears to need to be killed, the scheduler should not fail us
244
+ // when we block to unwrap.
245
+ // (XXX: Another less-elegant reason for doing this is so that the use
246
+ // of the LittleLock in reparent_children_to doesn't need to access the
247
+ // unkillable flag in the kill_handle, since we'll have removed it.)
248
+ rtassert ! ( self . unkillable == 0 ) ;
249
+ self . unkillable = 1 ;
250
+
161
251
// Step 1. Decide if we need to collect child failures synchronously.
162
252
do self. on_exit . take_map |on_exit| {
163
253
if success {
@@ -191,6 +281,64 @@ impl Death {
191
281
parent_handle. notify_immediate_failure ( ) ;
192
282
}
193
283
} ;
284
+
285
+ // Can't use allow_kill directly; that would require the kill handle.
286
+ rtassert ! ( self . unkillable == 1 ) ;
287
+ self . unkillable = 0 ;
288
+ }
289
+
290
+ /// Enter a possibly-nested unkillable section of code.
291
+ /// All calls must be paired with a subsequent call to allow_kill.
292
+ #[ inline]
293
+ pub fn inhibit_kill ( & mut self , already_failing : bool ) {
294
+ if self . unkillable == 0 {
295
+ rtassert ! ( self . kill_handle. is_some( ) ) ;
296
+ self . kill_handle . get_mut_ref ( ) . inhibit_kill ( already_failing) ;
297
+ }
298
+ self . unkillable += 1 ;
299
+ }
300
+
301
+ /// Exit a possibly-nested unkillable section of code.
302
+ /// All calls must be paired with a preceding call to inhibit_kill.
303
+ #[ inline]
304
+ pub fn allow_kill ( & mut self , already_failing : bool ) {
305
+ rtassert ! ( self . unkillable != 0 ) ;
306
+ self . unkillable -= 1 ;
307
+ if self . unkillable == 0 {
308
+ rtassert ! ( self . kill_handle. is_some( ) ) ;
309
+ self . kill_handle . get_mut_ref ( ) . allow_kill ( already_failing) ;
310
+ }
311
+ }
312
+
313
+ /// Enter a possibly-nested "atomic" section of code. Just for assertions.
314
+ /// All calls must be paired with a subsequent call to allow_yield.
315
+ #[ inline]
316
+ pub fn inhibit_yield ( & mut self ) {
317
+ self . wont_sleep += 1 ;
318
+ }
319
+
320
+ /// Exit a possibly-nested "atomic" section of code. Just for assertions.
321
+ /// All calls must be paired with a preceding call to inhibit_yield.
322
+ #[ inline]
323
+ pub fn allow_yield ( & mut self ) {
324
+ rtassert ! ( self . wont_sleep != 0 ) ;
325
+ self . wont_sleep -= 1 ;
326
+ }
327
+
328
+ /// Ensure that the task is allowed to become descheduled.
329
+ #[ inline]
330
+ pub fn assert_may_sleep ( & self ) {
331
+ if self . wont_sleep != 0 {
332
+ rtabort ! ( "illegal atomic-sleep: can't deschedule inside atomically()" ) ;
333
+ }
334
+ }
335
+ }
336
+
337
+ impl Drop for Death {
338
+ fn drop ( & self ) {
339
+ // Mustn't be in an atomic or unkillable section at task death.
340
+ rtassert ! ( self . unkillable == 0 ) ;
341
+ rtassert ! ( self . wont_sleep == 0 ) ;
194
342
}
195
343
}
196
344
0 commit comments