From 5dd1583c57fbee9a07ac1111858871c241a24c50 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 22 Oct 2013 15:09:23 -0700 Subject: [PATCH 1/2] Make some more rt components public Primarily this makes the Scheduler and all of its related interfaces public. The reason for doing this is that currently any extern event loops had no access to the scheduler at all. This allows third-party event loops to manipulate the scheduler, along with allowing the uv event loop to live inside of its own crate. --- src/libstd/rt/mod.rs | 22 ++++++++++------------ src/libstd/rt/sched.rs | 16 +++++----------- src/libstd/select.rs | 4 +++- src/libstd/task/mod.rs | 4 ++-- src/libstd/task/spawn.rs | 10 +++++++--- src/libstd/unstable/mod.rs | 2 +- 6 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index eaaf8c4328191..21fdf0e50a112 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -87,15 +87,13 @@ pub use self::util::set_exit_status; // method... pub use self::util::default_sched_threads; +// Re-export of the functionality in the kill module +pub use self::kill::{KillHandle, BlockedTask}; + // XXX: these probably shouldn't be public... #[doc(hidden)] pub mod shouldnt_be_public { - pub use super::sched::Scheduler; - pub use super::kill::KillHandle; - pub use super::thread::Thread; - pub use super::work_queue::WorkQueue; pub use super::select::SelectInner; - pub use super::rtio::EventLoop; pub use super::select::{SelectInner, SelectPortInner}; pub use super::local_ptr::maybe_tls_key; } @@ -116,13 +114,13 @@ pub mod task; mod kill; /// The coroutine task scheduler, built on the `io` event loop. -mod sched; +pub mod sched; /// Synchronous I/O. pub mod io; /// The EventLoop and internal synchronous I/O interface. -mod rtio; +pub mod rtio; /// libuv and default rtio implementation. pub mod uv; @@ -132,10 +130,10 @@ pub mod uv; pub mod local; /// A parallel work-stealing deque. -mod work_queue; +pub mod work_queue; /// A parallel queue. -mod message_queue; +pub mod message_queue; /// A mostly lock-free multi-producer, single consumer queue. mod mpsc_queue; @@ -144,7 +142,7 @@ mod mpsc_queue; mod mpmc_bounded_queue; /// A parallel data structure for tracking sleeping schedulers. -mod sleeper_list; +pub mod sleeper_list; /// Stack segments and caching. pub mod stack; @@ -153,7 +151,7 @@ pub mod stack; mod context; /// Bindings to system threading libraries. -mod thread; +pub mod thread; /// The runtime configuration, read from environment variables. pub mod env; @@ -327,7 +325,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int { // waking up schedulers for work stealing; since this is a // non-work-stealing scheduler it should not be adding itself // to the list. - main_handle.send_shutdown(); + main_handle.send(Shutdown); Some(main_sched) } else { None diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index d44264befc16f..3ee822ced2dd4 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -542,7 +542,7 @@ impl Scheduler { match this.sleeper_list.casual_pop() { Some(handle) => { - let mut handle = handle; + let mut handle = handle; handle.send(Wake) } None => { (/* pass */) } @@ -818,12 +818,6 @@ impl SchedHandle { self.queue.push(msg); self.remote.fire(); } - pub fn send_task_from_friend(&mut self, friend: ~Task) { - self.send(TaskFromFriend(friend)); - } - pub fn send_shutdown(&mut self) { - self.send(Shutdown); - } } struct CleanupJob { @@ -1266,15 +1260,15 @@ mod test { use comm::{GenericPort, GenericChan}; do run_in_mt_newsched_task { - let (end_port, end_chan) = oneshot(); + let (end_port, end_chan) = oneshot(); let n_tasks = 10; let token = 2000; - let (p, ch1) = stream(); + let (p, ch1) = stream(); let mut p = p; - ch1.send((token, end_chan)); - let mut i = 2; + ch1.send((token, end_chan)); + let mut i = 2; while i <= n_tasks { let (next_p, ch) = stream(); let imm_i = i; diff --git a/src/libstd/select.rs b/src/libstd/select.rs index 75b09187f04cc..f5dc98c57b651 100644 --- a/src/libstd/select.rs +++ b/src/libstd/select.rs @@ -18,7 +18,9 @@ use option::*; // use either::{Either, Left, Right}; // use rt::kill::BlockedTask; use rt::local::Local; -use rt::shouldnt_be_public::{EventLoop, Scheduler, SelectInner, SelectPortInner}; +use rt::rtio::EventLoop; +use rt::sched::Scheduler; +use rt::shouldnt_be_public::{SelectInner, SelectPortInner}; use task; use unstable::finally::Finally; use vec::{OwnedVector, MutableVector}; diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index 8efa185bbbdd1..023ba6f7108a2 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -578,7 +578,7 @@ pub fn deschedule() { //! Yield control to the task scheduler use rt::local::Local; - use rt::shouldnt_be_public::Scheduler; + use rt::sched::Scheduler; // FIXME(#7544): Optimize this, since we know we won't block. let sched: ~Scheduler = Local::take(); @@ -1094,7 +1094,7 @@ fn test_try_fail() { #[cfg(test)] fn get_sched_id() -> int { - do Local::borrow |sched: &mut ::rt::shouldnt_be_public::Scheduler| { + do Local::borrow |sched: &mut ::rt::sched::Scheduler| { sched.sched_id() as int } } diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index 235e67048f678..2cda38f8a30ba 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -82,7 +82,11 @@ use hashmap::{HashSet, HashSetMoveIterator}; use local_data; use rt::in_green_task_context; use rt::local::Local; -use rt::shouldnt_be_public::{Scheduler, KillHandle, WorkQueue, Thread, EventLoop}; +use rt::sched::Scheduler; +use rt::KillHandle; +use rt::work_queue::WorkQueue; +use rt::rtio::EventLoop; +use rt::thread::Thread; use rt::task::{Task, Sched}; use rt::task::{UnwindReasonLinked, UnwindReasonStr}; use rt::task::{UnwindResult, Success, Failure}; @@ -627,7 +631,7 @@ pub fn spawn_raw(mut opts: TaskOpts, f: ~fn()) { let mut new_sched_handle = new_sched.make_handle(); // Allow the scheduler to exit when the pinned task exits - new_sched_handle.send_shutdown(); + new_sched_handle.send(Shutdown); // Pin the new task to the new scheduler let new_task = if opts.watched { @@ -665,7 +669,7 @@ pub fn spawn_raw(mut opts: TaskOpts, f: ~fn()) { debug!("enqueing join_task"); // Now tell the original scheduler to join with this thread // by scheduling a thread-joining task on the original scheduler - orig_sched_handle.send_task_from_friend(join_task); + orig_sched_handle.send(TaskFromFriend(join_task)); // NB: We can't simply send a message from here to another task // because this code isn't running in a task and message passing doesn't diff --git a/src/libstd/unstable/mod.rs b/src/libstd/unstable/mod.rs index 835b16996b5f2..484ddde6d3ccd 100644 --- a/src/libstd/unstable/mod.rs +++ b/src/libstd/unstable/mod.rs @@ -37,7 +37,7 @@ a normal large stack. */ pub fn run_in_bare_thread(f: ~fn()) { use cell::Cell; - use rt::shouldnt_be_public::Thread; + use rt::thread::Thread; let f_cell = Cell::new(f); let (port, chan) = comm::stream(); From 201cab84e8f12ec73131ac4908e6779b277449a2 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 22 Oct 2013 15:13:18 -0700 Subject: [PATCH 2/2] Move rust's uv implementation to its own crate There are a few reasons that this is a desirable move to take: 1. Proof of concept that a third party event loop is possible 2. Clear separation of responsibility between rt::io and the uv-backend 3. Enforce in the future that the event loop is "pluggable" and replacable Here's a quick summary of the points of this pull request which make this possible: * Two new lang items were introduced: event_loop, and event_loop_factory. The idea of a "factory" is to define a function which can be called with no arguments and will return the new event loop as a trait object. This factory is emitted to the crate map when building an executable. The factory doesn't have to exist, and when it doesn't then an empty slot is in the crate map and a basic event loop with no I/O support is provided to the runtime. * When building an executable, then the rustuv crate will be linked by default (providing a default implementation of the event loop) via a similar method to injecting a dependency on libstd. This is currently the only location where the rustuv crate is ever linked. * There is a new #[no_uv] attribute (implied by #[no_std]) which denies implicitly linking to rustuv by default Closes #5019 --- Makefile.in | 23 +- mk/clean.mk | 3 + mk/host.mk | 18 +- mk/install.mk | 6 + mk/rt.mk | 2 +- mk/stage0.mk | 21 +- mk/target.mk | 11 + mk/tests.mk | 14 +- src/etc/snapshot.py | 4 + src/librustc/front/std_inject.rs | 29 +- src/librustc/middle/lang_items.rs | 199 ++- src/librustc/middle/trans/base.rs | 62 +- src/librustuv/addrinfo.rs | 273 ++++ src/librustuv/async.rs | 83 + src/librustuv/file.rs | 647 ++++++++ src/librustuv/idle.rs | 137 ++ src/librustuv/macros.rs | 36 + src/librustuv/net.rs | 851 ++++++++++ src/librustuv/pipe.rs | 98 ++ src/librustuv/process.rs | 202 +++ src/librustuv/rustuv.rs | 424 +++++ src/librustuv/signal.rs | 72 + src/librustuv/timer.rs | 157 ++ src/librustuv/tty.rs | 83 + src/librustuv/uvio.rs | 2526 +++++++++++++++++++++++++++++ src/librustuv/uvll.rs | 1174 ++++++++++++++ src/libstd/rt/crate_map.rs | 38 +- src/libstd/rt/io/net/tcp.rs | 6 +- src/libstd/rt/mod.rs | 32 +- src/libstd/rt/sched.rs | 1 - src/libstd/rt/test.rs | 7 +- src/libstd/rt/uv/uvio.rs | 6 + src/libstd/std.rs | 7 +- src/libstd/task/spawn.rs | 14 +- 34 files changed, 7105 insertions(+), 161 deletions(-) create mode 100644 src/librustuv/addrinfo.rs create mode 100644 src/librustuv/async.rs create mode 100644 src/librustuv/file.rs create mode 100644 src/librustuv/idle.rs create mode 100644 src/librustuv/macros.rs create mode 100644 src/librustuv/net.rs create mode 100644 src/librustuv/pipe.rs create mode 100644 src/librustuv/process.rs create mode 100644 src/librustuv/rustuv.rs create mode 100644 src/librustuv/signal.rs create mode 100644 src/librustuv/timer.rs create mode 100644 src/librustuv/tty.rs create mode 100644 src/librustuv/uvio.rs create mode 100644 src/librustuv/uvll.rs diff --git a/Makefile.in b/Makefile.in index 4d2fcacc10834..e4b08b2546ae8 100644 --- a/Makefile.in +++ b/Makefile.in @@ -221,6 +221,7 @@ CFG_LIBRUSTC_$(1) :=$(call CFG_LIB_NAME_$(1),rustc) CFG_LIBSYNTAX_$(1) :=$(call CFG_LIB_NAME_$(1),syntax) CFG_LIBRUSTPKG_$(1) :=$(call CFG_LIB_NAME_$(1),rustpkg) CFG_LIBRUSTDOC_$(1) :=$(call CFG_LIB_NAME_$(1),rustdoc) +CFG_LIBRUSTUV_$(1) :=$(call CFG_LIB_NAME_$(1),rustuv) EXTRALIB_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),extra) STDLIB_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),std) @@ -228,12 +229,14 @@ LIBRUSTC_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustc) LIBSYNTAX_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),syntax) LIBRUSTPKG_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustpkg) LIBRUSTDOC_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustdoc) +LIBRUSTUV_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustuv) EXTRALIB_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),extra) STDLIB_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),std) LIBRUSTC_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustc) LIBSYNTAX_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),syntax) LIBRUSTPKG_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustpkg) LIBRUSTDOC_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustdoc) +LIBRUSTUV_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustuv) endef @@ -289,6 +292,14 @@ EXTRALIB_CRATE := $(S)src/libextra/extra.rs EXTRALIB_INPUTS := $(wildcard $(addprefix $(S)src/libextra/, \ *.rs */*.rs)) +###################################################################### +# Rust UV library variables +###################################################################### + +LIBRUSTUV_CRATE := $(S)src/librustuv/rustuv.rs +LIBRUSTUV_INPUTS := $(wildcard $(addprefix $(S)src/librustuv/, \ + *.rs */*.rs)) + ###################################################################### # rustc crate variables ###################################################################### @@ -410,6 +421,11 @@ else $$(HLIB$(1)_H_$(3))/$(CFG_LIBRUSTC_$(3)) TLIBRUSTC_DEFAULT$(1)_T_$(2)_H_$(3) = \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2)) + + HLIBRUSTUV_DEFAULT$(1)_H_$(3) = \ + $$(HLIB$(1)_H_$(3))/$(CFG_LIBRUSTUV_$(3)) + TLIBRUSTUV_DEFAULT$(1)_T_$(2)_H_$(3) = \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) endif # Preqrequisites for using the stageN compiler @@ -421,6 +437,7 @@ HSREQ$(1)_H_$(3) = \ $$(HEXTRALIB_DEFAULT$(1)_H_$(3)) \ $$(HLIBSYNTAX_DEFAULT$(1)_H_$(3)) \ $$(HLIBRUSTC_DEFAULT$(1)_H_$(3)) \ + $$(HLIBRUSTUV_DEFAULT$(1)_H_$(3)) \ $$(MKFILE_DEPS) # Prerequisites for using the stageN compiler to build target artifacts @@ -433,7 +450,8 @@ TSREQ$(1)_T_$(2)_H_$(3) = \ SREQ$(1)_T_$(2)_H_$(3) = \ $$(TSREQ$(1)_T_$(2)_H_$(3)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) # Prerequisites for a working stageN compiler and libraries, for a specific target CSREQ$(1)_T_$(2)_H_$(3) = \ @@ -447,7 +465,8 @@ CSREQ$(1)_T_$(2)_H_$(3) = \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(2)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTPKG_$(2)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTDOC_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTDOC_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) ifeq ($(1),0) # Don't run the the stage0 compiler under valgrind - that ship has sailed diff --git a/mk/clean.mk b/mk/clean.mk index 09a80c5507bca..a46e7fa4f3ff8 100644 --- a/mk/clean.mk +++ b/mk/clean.mk @@ -73,6 +73,7 @@ clean$(1)_H_$(2): $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_RUNTIME_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_STDLIB_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_EXTRALIB_$(2)) + $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBRUSTUV_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBRUSTC_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBSYNTAX_$(2)) $(Q)rm -f $$(HLIB$(1)_H_$(2))/$(STDLIB_GLOB_$(2)) @@ -103,10 +104,12 @@ clean$(1)_T_$(2)_H_$(3): $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_RUNTIME_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(STDLIB_GLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(EXTRALIB_GLOB_$(2)) + $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTUV_GLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTC_GLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBSYNTAX_GLOB_$(2)) $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTPKG_GLOB_$(2)) diff --git a/mk/host.mk b/mk/host.mk index ecf84967e5edf..537e737a29af6 100644 --- a/mk/host.mk +++ b/mk/host.mk @@ -30,6 +30,7 @@ $$(HBIN$(2)_H_$(4))/rustc$$(X_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)) \ $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ $$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \ + $$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \ | $$(HBIN$(2)_H_$(4))/ @$$(call E, cp: $$@) @@ -42,6 +43,7 @@ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_RUSTLLVM_$(4)) \ $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ $$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \ + $$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) @@ -58,6 +60,7 @@ $$(HLIB$(2)_H_$(4))/$(CFG_LIBSYNTAX_$(4)): \ $$(HLIB$(2)_H_$(4))/$(CFG_RUSTLLVM_$(4)) \ $$(HSTDLIB_DEFAULT$(2)_H_$(4)) \ $$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \ + $$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBSYNTAX_GLOB_$(4)),$$(notdir $$@)) @@ -80,7 +83,7 @@ $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)): \ @$$(call E, cp: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(STDLIB_GLOB_$(4)),$$(notdir $$@)) $$(Q)cp $$< $$@ -# Subtle: We do not let the shell expand $(STDLIB_DSYM_GLOB) directly rather +# Subtle: We do not let the shell expand $$(STDLIB_DSYM_GLOB) directly rather # we use Make's $$(wildcard) facility. The reason is that, on mac, when using # USE_SNAPSHOT_STDLIB, we copy the std.dylib file out of the snapshot. # In that case, there is no .dSYM file. Annoyingly, bash then refuses to expand @@ -105,6 +108,19 @@ $$(HLIB$(2)_H_$(4))/$(CFG_EXTRALIB_$(4)): \ $$(HLIB$(2)_H_$(4)) $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(4)),$$(notdir $$@)) +$$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTUV_$(4)): \ + $$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBRUSTUV_$(4)) \ + $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \ + $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \ + | $$(HLIB$(2)_H_$(4))/ + @$$(call E, cp: $$@) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@)) + $$(Q)cp $$< $$@ + $$(Q)cp -R $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBRUSTUV_GLOB_$(4)) \ + $$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBRUSTUV_DSYM_GLOB_$(4))) \ + $$(HLIB$(2)_H_$(4)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@)) + $$(HLIB$(2)_H_$(4))/libstd.rlib: \ $$(TLIB$(1)_T_$(4)_H_$(3))/libstd.rlib \ $$(HLIB$(2)_H_$(4))/$$(CFG_RUNTIME_$(4)) \ diff --git a/mk/install.mk b/mk/install.mk index 6ebcfeeefae73..16af433e57518 100644 --- a/mk/install.mk +++ b/mk/install.mk @@ -91,6 +91,7 @@ install-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ$$(ISTAG $$(Q)$$(call INSTALL_LIB,$$(CFG_RUNTIME_$(1))) $$(Q)$$(call INSTALL_LIB,$$(STDLIB_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(EXTRALIB_GLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,libmorestack.a) endef @@ -104,6 +105,7 @@ install-target-$(1)-host-$(2): $$(CSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(Q)$$(call INSTALL_LIB,$$(CFG_RUSTLLVM_$(1))) $$(Q)$$(call INSTALL_LIB,$$(STDLIB_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(EXTRALIB_GLOB_$(1))) + $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTC_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBSYNTAX_GLOB_$(1))) $$(Q)$$(call INSTALL_LIB,$$(LIBRUSTPKG_GLOB_$(1))) @@ -143,6 +145,7 @@ install-host: $(CSREQ$(ISTAGE)_T_$(CFG_BUILD_TRIPLE)_H_$(CFG_BUILD_TRIPLE)) $(Q)$(call INSTALL,$(HB2),$(PHB),rustdoc$(X_$(CFG_BUILD_TRIPLE))) $(Q)$(call INSTALL_LIB,$(STDLIB_GLOB_$(CFG_BUILD_TRIPLE))) $(Q)$(call INSTALL_LIB,$(EXTRALIB_GLOB_$(CFG_BUILD_TRIPLE))) + $(Q)$(call INSTALL_LIB,$(LIBRUSTUV_GLOB_$(CFG_BUILD_TRIPLE))) $(Q)$(call INSTALL_LIB,$(LIBRUSTC_GLOB_$(CFG_BUILD_TRIPLE))) $(Q)$(call INSTALL_LIB,$(LIBSYNTAX_GLOB_$(CFG_BUILD_TRIPLE))) $(Q)$(call INSTALL_LIB,$(LIBRUSTPKG_GLOB_$(CFG_BUILD_TRIPLE))) @@ -168,6 +171,7 @@ uninstall: $(Q)for i in \ $(call HOST_LIB_FROM_HL_GLOB,$(STDLIB_GLOB_$(CFG_BUILD_TRIPLE))) \ $(call HOST_LIB_FROM_HL_GLOB,$(EXTRALIB_GLOB_$(CFG_BUILD_TRIPLE))) \ + $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTUV_GLOB_$(CFG_BUILD_TRIPLE))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTC_GLOB_$(CFG_BUILD_TRIPLE))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBSYNTAX_GLOB_$(CFG_BUILD_TRIPLE))) \ $(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTPKG_GLOB_$(CFG_BUILD_TRIPLE))) \ @@ -230,6 +234,7 @@ install-runtime-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(CFG_RUNTIME_$(1)),$(CFG_RUNTIME_PUSH_DIR)) $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(STDLIB_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(EXTRALIB_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) + $(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(LIBRUSTUV_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR)) endef define INSTALL_RUNTIME_TARGET_CLEANUP_N @@ -238,6 +243,7 @@ install-runtime-target-$(1)-cleanup: $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(CFG_RUNTIME_$(1))) $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(STDLIB_GLOB_$(1))) $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(EXTRALIB_GLOB_$(1))) + $(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(LIBRUSTUV_GLOB_$(1))) endef $(eval $(call INSTALL_RUNTIME_TARGET_N,arm-linux-androideabi,$(CFG_BUILD_TRIPLE))) diff --git a/mk/rt.mk b/mk/rt.mk index f1f46975e47bb..c919509cbab72 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -148,7 +148,7 @@ $$(RT_BUILD_DIR_$(1)_$(2))/$(CFG_RUNTIME_$(1)): $$(RUNTIME_OBJS_$(1)_$(2)) $$(MK # These could go in rt.mk or rustllvm.mk, they're needed for both. -# This regexp has a single $, escaped twice +# This regexp has a single $$ escaped twice $(1)/%.bsd.def: %.def.in $$(MKFILE_DEPS) @$$(call E, def: $$@) $$(Q)echo "{" > $$@ diff --git a/mk/stage0.mk b/mk/stage0.mk index c1b0242bbc63c..a3d3446298b51 100644 --- a/mk/stage0.mk +++ b/mk/stage0.mk @@ -42,6 +42,11 @@ $(HLIB0_H_$(CFG_BUILD_TRIPLE))/$(CFG_EXTRALIB_$(CFG_BUILD_TRIPLE)): \ | $(HLIB0_H_$(CFG_BUILD_TRIPLE))/ $(Q)touch $@ +$(HLIB0_H_$(CFG_BUILD_TRIPLE))/$(CFG_LIBRUSTUV_$(CFG_BUILD_TRIPLE)): \ + $(HBIN0_H_$(CFG_BUILD_TRIPLE))/rustc$(X_$(CFG_BUILD_TRIPLE)) \ + | $(HLIB0_H_$(CFG_BUILD_TRIPLE))/ + $(Q)touch $@ + $(HLIB0_H_$(CFG_BUILD_TRIPLE))/$(CFG_LIBRUSTC_$(CFG_BUILD_TRIPLE)): \ $(HBIN0_H_$(CFG_BUILD_TRIPLE))/rustc$(X_$(CFG_BUILD_TRIPLE)) \ | $(HLIB0_H_$(CFG_BUILD_TRIPLE))/ @@ -81,9 +86,9 @@ $$(HLIB0_H_$(1))/$(CFG_STDLIB_$(1)): \ $$(TLIB$(2)_T_$(1)_H_$(3))/$(CFG_STDLIB_$(1)) \ | $(HLIB0_H_$(1))/ @$$(call E, cp: $$@) - $$(call CHECK_FOR_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(4)),$$(notdir $$@)) + $$(call CHECK_FOR_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(STDLIB_GLOB_$(4)),$$(notdir $$@)) $$(Q)cp $$(TLIB$(2)_T_$(1)_H_$(3))/$(STDLIB_GLOB_$(1)) $$@ - $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(4)),$$(notdir $$@)) + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(STDLIB_GLOB_$(4)),$$(notdir $$@)) $$(HLIB0_H_$(1))/$(CFG_EXTRALIB_$(1)): \ $$(TLIB$(2)_T_$(1)_H_$(3))/$(CFG_EXTRALIB_$(1)) \ @@ -93,6 +98,18 @@ $$(HLIB0_H_$(1))/$(CFG_EXTRALIB_$(1)): \ $$(Q)cp $$(TLIB$(2)_T_$(1)_H_$(3))/$(EXTRALIB_GLOB_$(1)) $$@ $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(4)),$$(notdir $$@)) +$$(HLIB0_H_$(1))/$(CFG_LIBRUSTUV_$(1)): + touch $$@ +# NOTE: this should get uncommented after a snapshot and the rule above this can +# get deleted, right now we're not expecting a librustuv in a snapshot. +# $$(HLIB0_H_$(1))/$(CFG_LIBRUSTUV_$(1)): \ +# $$(TLIB$(2)_T_$(1)_H_$(3))/$(CFG_LIBRUSTUV_$(1)) \ +# | $(HLIB0_H_$(1))/ +# @$$(call E, cp: $$@) +# $$(call CHECK_FOR_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@)) +# $$(Q)cp $$(TLIB$(2)_T_$(1)_H_$(3))/$(LIBRUSTUV_GLOB_$(1)) $$@ +# $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@)) + $$(HLIB0_H_$(1))/$(CFG_LIBRUSTC_$(1)): \ $$(TLIB$(2)_T_$(1)_H_$(3))/$(CFG_LIBRUSTC_$(1)) \ | $(HLIB0_H_$(1))/ diff --git a/mk/target.mk b/mk/target.mk index cd0b78dfe6d53..e71ca152e9a81 100644 --- a/mk/target.mk +++ b/mk/target.mk @@ -73,11 +73,22 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)): \ $$(STAGE$(1)_T_$(2)_H_$(3)) $$(WFLAGS_ST$(1)) --out-dir $$(@D) $$< && touch $$@ $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(2)),$$(notdir $$@)) +$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)): \ + $$(LIBRUSTUV_CRATE) $$(LIBRUSTUV_INPUTS) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \ + $$(TSREQ$(1)_T_$(2)_H_$(3)) \ + | $$(TLIB$(1)_T_$(2)_H_$(3))/ + @$$(call E, compile_and_link: $$@) + $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(2)),$$(notdir $$@)) + $$(STAGE$(1)_T_$(2)_H_$(3)) $$(WFLAGS_ST$(1)) --out-dir $$(@D) $$< && touch $$@ + $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(2)),$$(notdir $$@)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(3)): \ $$(LIBSYNTAX_CRATE) $$(LIBSYNTAX_INPUTS) \ $$(TSREQ$(1)_T_$(2)_H_$(3)) \ $$(TSTDLIB_DEFAULT$(1)_T_$(2)_H_$(3)) \ $$(TEXTRALIB_DEFAULT$(1)_T_$(2)_H_$(3)) \ + $$(TLIBRUSTUV_DEFAULT$(1)_T_$(2)_H_$(3)) \ | $$(TLIB$(1)_T_$(2)_H_$(3))/ @$$(call E, compile_and_link: $$@) $$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBSYNTAX_GLOB_$(2)),$$(notdir $$@)) diff --git a/mk/tests.mk b/mk/tests.mk index d6fc5ecb8e5af..6aec4b81d0a49 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -14,7 +14,7 @@ ###################################################################### # The names of crates that must be tested -TEST_TARGET_CRATES = std extra +TEST_TARGET_CRATES = std extra rustuv TEST_HOST_CRATES = rustpkg rustc rustdoc syntax TEST_CRATES = $(TEST_TARGET_CRATES) $(TEST_HOST_CRATES) @@ -164,6 +164,8 @@ $(info check: android device test dir $(CFG_ADB_TEST_DIR) ready \ $(CFG_ADB_TEST_DIR)) \ $(shell adb push $(TLIB2_T_arm-linux-androideabi_H_$(CFG_BUILD_TRIPLE))/$(EXTRALIB_GLOB_arm-linux-androideabi) \ $(CFG_ADB_TEST_DIR)) \ + $(shell adb push $(TLIB2_T_arm-linux-androideabi_H_$(CFG_BUILD_TRIPLE))/$(LIBRUSTUV_GLOB_arm-linux-androideabi) \ + $(CFG_ADB_TEST_DIR)) \ ) else CFG_ADB_TEST_DIR= @@ -189,6 +191,7 @@ check-test: cleantestlibs cleantmptestlogs all check-stage2-rfail check-lite: cleantestlibs cleantmptestlogs \ check-stage2-std check-stage2-extra check-stage2-rpass \ + check-stage2-rustuv \ check-stage2-rustpkg \ check-stage2-rfail check-stage2-cfail $(Q)$(CFG_PYTHON) $(S)src/etc/check-summary.py tmp/*.log @@ -333,7 +336,8 @@ define TEST_RUNNER # test crates without rebuilding std and extra first ifeq ($(NO_REBUILD),) STDTESTDEP_$(1)_$(2)_$(3) = $$(SREQ$(1)_T_$(2)_H_$(3)) \ - $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_EXTRALIB_$(2)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_EXTRALIB_$(2)) \ + $$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBRUSTUV_$(2)) else STDTESTDEP_$(1)_$(2)_$(3) = endif @@ -350,6 +354,12 @@ $(3)/stage$(1)/test/extratest-$(2)$$(X_$(2)): \ @$$(call E, compile_and_link: $$@) $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test +$(3)/stage$(1)/test/rustuvtest-$(2)$$(X_$(2)): \ + $$(LIBRUSTUV_CRATE) $$(LIBRUSTUV_INPUTS) \ + $$(STDTESTDEP_$(1)_$(2)_$(3)) + @$$(call E, compile_and_link: $$@) + $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test + $(3)/stage$(1)/test/syntaxtest-$(2)$$(X_$(2)): \ $$(LIBSYNTAX_CRATE) $$(LIBSYNTAX_INPUTS) \ $$(STDTESTDEP_$(1)_$(2)_$(3)) diff --git a/src/etc/snapshot.py b/src/etc/snapshot.py index 687f0139f6139..e7057defd7750 100644 --- a/src/etc/snapshot.py +++ b/src/etc/snapshot.py @@ -30,6 +30,7 @@ def scrub(b): "lib/libextra-*.so", "lib/librustc-*.so", "lib/libsyntax-*.so", + "lib/librustuv-*.so", "lib/librustrt.so", "lib/librustllvm.so"], "macos": ["bin/rustc", @@ -37,6 +38,7 @@ def scrub(b): "lib/libextra-*.dylib", "lib/librustc-*.dylib", "lib/libsyntax-*.dylib", + "lib/librustuv-*.dylib", "lib/librustrt.dylib", "lib/librustllvm.dylib"], "winnt": ["bin/rustc.exe", @@ -44,6 +46,7 @@ def scrub(b): "bin/extra-*.dll", "bin/rustc-*.dll", "bin/syntax-*.dll", + "bin/rustuv-*.dll", "bin/rustrt.dll", "bin/rustllvm.dll"], "freebsd": ["bin/rustc", @@ -51,6 +54,7 @@ def scrub(b): "lib/libextra-*.so", "lib/librustc-*.so", "lib/libsyntax-*.so", + "lib/librustuv-*.so", "lib/librustrt.so", "lib/librustllvm.so"] } diff --git a/src/librustc/front/std_inject.rs b/src/librustc/front/std_inject.rs index 2253f151ddf59..b34829bf47fc1 100644 --- a/src/librustc/front/std_inject.rs +++ b/src/librustc/front/std_inject.rs @@ -35,6 +35,10 @@ fn use_std(crate: &ast::Crate) -> bool { !attr::contains_name(crate.attrs, "no_std") } +fn use_uv(crate: &ast::Crate) -> bool { + !attr::contains_name(crate.attrs, "no_uv") +} + fn no_prelude(attrs: &[ast::Attribute]) -> bool { attr::contains_name(attrs, "no_implicit_prelude") } @@ -53,19 +57,30 @@ struct StandardLibraryInjector { impl fold::ast_fold for StandardLibraryInjector { fn fold_crate(&self, crate: ast::Crate) -> ast::Crate { let version = STD_VERSION.to_managed(); - let vi1 = ast::view_item { + let vers_item = attr::mk_name_value_item_str(@"vers", version); + let mut vis = ~[ast::view_item { node: ast::view_item_extern_mod(self.sess.ident_of("std"), None, - ~[], + ~[vers_item.clone()], ast::DUMMY_NODE_ID), - attrs: ~[ - attr::mk_attr(attr::mk_name_value_item_str(@"vers", version)) - ], + attrs: ~[], vis: ast::private, span: dummy_sp() - }; + }]; + + if use_uv(&crate) && !*self.sess.building_library { + vis.push(ast::view_item { + node: ast::view_item_extern_mod(self.sess.ident_of("rustuv"), + None, + ~[vers_item], + ast::DUMMY_NODE_ID), + attrs: ~[], + vis: ast::private, + span: dummy_sp() + }); + } - let vis = vec::append(~[vi1], crate.module.view_items); + vis.push_all(crate.module.view_items); let mut new_module = ast::_mod { view_items: vis, ..crate.module.clone() diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index 6c15dc019ea34..e8d6a7267dfb6 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -24,10 +24,9 @@ use driver::session::Session; use metadata::csearch::each_lang_item; use metadata::cstore::iter_crate_data; use middle::ty::{BuiltinBound, BoundFreeze, BoundSend, BoundSized}; -use syntax::ast::{Crate, DefId, MetaItem}; +use syntax::ast; use syntax::ast_util::local_def; use syntax::attr::AttrMetaMethods; -use syntax::ast::{item}; use syntax::visit; use syntax::visit::Visitor; @@ -81,20 +80,22 @@ pub enum LangItem { TyDescStructLangItem, // 36 TyVisitorTraitLangItem, // 37 OpaqueStructLangItem, // 38 + + EventLoopFactoryLangItem, // 39 } pub struct LanguageItems { - items: [Option, ..39] + items: [Option, ..40] } impl LanguageItems { pub fn new() -> LanguageItems { LanguageItems { - items: [ None, ..39 ] + items: [ None, ..40 ] } } - pub fn items<'a>(&'a self) -> Enumerate>> { + pub fn items<'a>(&'a self) -> Enumerate>> { self.items.iter().enumerate() } @@ -145,13 +146,15 @@ impl LanguageItems { 37 => "ty_visitor", 38 => "opaque", + 39 => "event_loop_factory", + _ => "???" } } // FIXME #4621: Method macros sure would be nice here. - pub fn require(&self, it: LangItem) -> Result { + pub fn require(&self, it: LangItem) -> Result { match self.items[it as uint] { Some(id) => Ok(id), None => Err(format!("requires `{}` lang_item", @@ -159,7 +162,7 @@ impl LanguageItems { } } - pub fn to_builtin_kind(&self, id: DefId) -> Option { + pub fn to_builtin_kind(&self, id: ast::DefId) -> Option { if Some(id) == self.freeze_trait() { Some(BoundFreeze) } else if Some(id) == self.send_trait() { @@ -171,162 +174,166 @@ impl LanguageItems { } } - pub fn freeze_trait(&self) -> Option { + pub fn freeze_trait(&self) -> Option { self.items[FreezeTraitLangItem as uint] } - pub fn send_trait(&self) -> Option { + pub fn send_trait(&self) -> Option { self.items[SendTraitLangItem as uint] } - pub fn sized_trait(&self) -> Option { + pub fn sized_trait(&self) -> Option { self.items[SizedTraitLangItem as uint] } - pub fn drop_trait(&self) -> Option { + pub fn drop_trait(&self) -> Option { self.items[DropTraitLangItem as uint] } - pub fn add_trait(&self) -> Option { + pub fn add_trait(&self) -> Option { self.items[AddTraitLangItem as uint] } - pub fn sub_trait(&self) -> Option { + pub fn sub_trait(&self) -> Option { self.items[SubTraitLangItem as uint] } - pub fn mul_trait(&self) -> Option { + pub fn mul_trait(&self) -> Option { self.items[MulTraitLangItem as uint] } - pub fn div_trait(&self) -> Option { + pub fn div_trait(&self) -> Option { self.items[DivTraitLangItem as uint] } - pub fn rem_trait(&self) -> Option { + pub fn rem_trait(&self) -> Option { self.items[RemTraitLangItem as uint] } - pub fn neg_trait(&self) -> Option { + pub fn neg_trait(&self) -> Option { self.items[NegTraitLangItem as uint] } - pub fn not_trait(&self) -> Option { + pub fn not_trait(&self) -> Option { self.items[NotTraitLangItem as uint] } - pub fn bitxor_trait(&self) -> Option { + pub fn bitxor_trait(&self) -> Option { self.items[BitXorTraitLangItem as uint] } - pub fn bitand_trait(&self) -> Option { + pub fn bitand_trait(&self) -> Option { self.items[BitAndTraitLangItem as uint] } - pub fn bitor_trait(&self) -> Option { + pub fn bitor_trait(&self) -> Option { self.items[BitOrTraitLangItem as uint] } - pub fn shl_trait(&self) -> Option { + pub fn shl_trait(&self) -> Option { self.items[ShlTraitLangItem as uint] } - pub fn shr_trait(&self) -> Option { + pub fn shr_trait(&self) -> Option { self.items[ShrTraitLangItem as uint] } - pub fn index_trait(&self) -> Option { + pub fn index_trait(&self) -> Option { self.items[IndexTraitLangItem as uint] } - pub fn eq_trait(&self) -> Option { + pub fn eq_trait(&self) -> Option { self.items[EqTraitLangItem as uint] } - pub fn ord_trait(&self) -> Option { + pub fn ord_trait(&self) -> Option { self.items[OrdTraitLangItem as uint] } - pub fn str_eq_fn(&self) -> Option { + pub fn str_eq_fn(&self) -> Option { self.items[StrEqFnLangItem as uint] } - pub fn uniq_str_eq_fn(&self) -> Option { + pub fn uniq_str_eq_fn(&self) -> Option { self.items[UniqStrEqFnLangItem as uint] } - pub fn fail_fn(&self) -> Option { + pub fn fail_fn(&self) -> Option { self.items[FailFnLangItem as uint] } - pub fn fail_bounds_check_fn(&self) -> Option { + pub fn fail_bounds_check_fn(&self) -> Option { self.items[FailBoundsCheckFnLangItem as uint] } - pub fn exchange_malloc_fn(&self) -> Option { + pub fn exchange_malloc_fn(&self) -> Option { self.items[ExchangeMallocFnLangItem as uint] } - pub fn closure_exchange_malloc_fn(&self) -> Option { + pub fn closure_exchange_malloc_fn(&self) -> Option { self.items[ClosureExchangeMallocFnLangItem as uint] } - pub fn exchange_free_fn(&self) -> Option { + pub fn exchange_free_fn(&self) -> Option { self.items[ExchangeFreeFnLangItem as uint] } - pub fn malloc_fn(&self) -> Option { + pub fn malloc_fn(&self) -> Option { self.items[MallocFnLangItem as uint] } - pub fn free_fn(&self) -> Option { + pub fn free_fn(&self) -> Option { self.items[FreeFnLangItem as uint] } - pub fn borrow_as_imm_fn(&self) -> Option { + pub fn borrow_as_imm_fn(&self) -> Option { self.items[BorrowAsImmFnLangItem as uint] } - pub fn borrow_as_mut_fn(&self) -> Option { + pub fn borrow_as_mut_fn(&self) -> Option { self.items[BorrowAsMutFnLangItem as uint] } - pub fn return_to_mut_fn(&self) -> Option { + pub fn return_to_mut_fn(&self) -> Option { self.items[ReturnToMutFnLangItem as uint] } - pub fn check_not_borrowed_fn(&self) -> Option { + pub fn check_not_borrowed_fn(&self) -> Option { self.items[CheckNotBorrowedFnLangItem as uint] } - pub fn strdup_uniq_fn(&self) -> Option { + pub fn strdup_uniq_fn(&self) -> Option { self.items[StrDupUniqFnLangItem as uint] } - pub fn record_borrow_fn(&self) -> Option { + pub fn record_borrow_fn(&self) -> Option { self.items[RecordBorrowFnLangItem as uint] } - pub fn unrecord_borrow_fn(&self) -> Option { + pub fn unrecord_borrow_fn(&self) -> Option { self.items[UnrecordBorrowFnLangItem as uint] } - pub fn start_fn(&self) -> Option { + pub fn start_fn(&self) -> Option { self.items[StartFnLangItem as uint] } - pub fn ty_desc(&self) -> Option { + pub fn ty_desc(&self) -> Option { self.items[TyDescStructLangItem as uint] } - pub fn ty_visitor(&self) -> Option { + pub fn ty_visitor(&self) -> Option { self.items[TyVisitorTraitLangItem as uint] } - pub fn opaque(&self) -> Option { + pub fn opaque(&self) -> Option { self.items[OpaqueStructLangItem as uint] } + pub fn event_loop_factory(&self) -> Option { + self.items[EventLoopFactoryLangItem as uint] + } } -struct LanguageItemCollector<'self> { +struct LanguageItemCollector { items: LanguageItems, - crate: &'self Crate, session: Session, item_refs: HashMap<&'static str, uint>, } struct LanguageItemVisitor<'self> { - this: *mut LanguageItemCollector<'self>, + this: &'self mut LanguageItemCollector, } impl<'self> Visitor<()> for LanguageItemVisitor<'self> { - - fn visit_item(&mut self, item:@item, _:()) { - - for attribute in item.attrs.iter() { - unsafe { - (*self.this).match_and_collect_meta_item( - local_def(item.id), - attribute.node.value - ); + fn visit_item(&mut self, item: @ast::item, _: ()) { + match extract(item.attrs) { + Some(value) => { + let item_index = self.this.item_refs.find_equiv(&value).map(|x| *x); + + match item_index { + Some(item_index) => { + self.this.collect_item(item_index, local_def(item.id)) } + None => {} } + } + None => {} + } visit::walk_item(self, item, ()); } } -impl<'self> LanguageItemCollector<'self> { - pub fn new<'a>(crate: &'a Crate, session: Session) - -> LanguageItemCollector<'a> { +impl LanguageItemCollector { + pub fn new(session: Session) -> LanguageItemCollector { let mut item_refs = HashMap::new(); item_refs.insert("freeze", FreezeTraitLangItem as uint); @@ -374,27 +381,16 @@ impl<'self> LanguageItemCollector<'self> { item_refs.insert("ty_desc", TyDescStructLangItem as uint); item_refs.insert("ty_visitor", TyVisitorTraitLangItem as uint); item_refs.insert("opaque", OpaqueStructLangItem as uint); + item_refs.insert("event_loop_factory", EventLoopFactoryLangItem as uint); LanguageItemCollector { - crate: crate, session: session, items: LanguageItems::new(), item_refs: item_refs } } - pub fn match_and_collect_meta_item(&mut self, - item_def_id: DefId, - meta_item: &MetaItem) { - match meta_item.name_str_pair() { - Some((key, value)) => { - self.match_and_collect_item(item_def_id, key, value); - } - None => {} // skip - } - } - - pub fn collect_item(&mut self, item_index: uint, item_def_id: DefId) { + pub fn collect_item(&mut self, item_index: uint, item_def_id: ast::DefId) { // Check for duplicates. match self.items.items[item_index] { Some(original_def_id) if original_def_id != item_def_id => { @@ -410,33 +406,9 @@ impl<'self> LanguageItemCollector<'self> { self.items.items[item_index] = Some(item_def_id); } - pub fn match_and_collect_item(&mut self, - item_def_id: DefId, - key: &str, - value: @str) { - if "lang" != key { - return; // Didn't match. - } - - let item_index = self.item_refs.find_equiv(&value).map(|x| *x); - // prevent borrow checker from considering ^~~~~~~~~~~ - // self to be borrowed (annoying) - - match item_index { - Some(item_index) => { - self.collect_item(item_index, item_def_id); - } - None => { - // Didn't match. - return; - } - } - } - - pub fn collect_local_language_items(&mut self) { - let this: *mut LanguageItemCollector = &mut *self; - let mut v = LanguageItemVisitor { this: this }; - visit::walk_crate(&mut v, self.crate, ()); + pub fn collect_local_language_items(&mut self, crate: &ast::Crate) { + let mut v = LanguageItemVisitor { this: self }; + visit::walk_crate(&mut v, crate, ()); } pub fn collect_external_language_items(&mut self) { @@ -444,24 +416,37 @@ impl<'self> LanguageItemCollector<'self> { do iter_crate_data(crate_store) |crate_number, _crate_metadata| { do each_lang_item(crate_store, crate_number) |node_id, item_index| { - let def_id = DefId { crate: crate_number, node: node_id }; + let def_id = ast::DefId { crate: crate_number, node: node_id }; self.collect_item(item_index, def_id); true }; } } - pub fn collect(&mut self) { - self.collect_local_language_items(); + pub fn collect(&mut self, crate: &ast::Crate) { + self.collect_local_language_items(crate); self.collect_external_language_items(); } } -pub fn collect_language_items(crate: &Crate, +pub fn extract(attrs: &[ast::Attribute]) -> Option<@str> { + for attribute in attrs.iter() { + match attribute.name_str_pair() { + Some((key, value)) if "lang" == key => { + return Some(value); + } + Some(*) | None => {} + } + } + + return None; +} + +pub fn collect_language_items(crate: &ast::Crate, session: Session) -> LanguageItems { - let mut collector = LanguageItemCollector::new(crate, session); - collector.collect(); + let mut collector = LanguageItemCollector::new(session); + collector.collect(crate); let LanguageItemCollector { items, _ } = collector; session.abort_if_errors(); items diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 74f9e2114e10c..665aaad5f47e0 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -2596,16 +2596,36 @@ pub fn get_item_val(ccx: @mut CrateContext, id: ast::NodeId) -> ValueRef { foreign::register_foreign_item_fn(ccx, abis, &path, ni) } ast::foreign_item_static(*) => { - let ident = foreign::link_name(ccx, ni); - unsafe { - let g = do ident.with_c_str |buf| { - let ty = type_of(ccx, ty); - llvm::LLVMAddGlobal(ccx.llmod, ty.to_ref(), buf) - }; - if attr::contains_name(ni.attrs, "weak_linkage") { - lib::llvm::SetLinkage(g, lib::llvm::ExternalWeakLinkage); + // Treat the crate map static specially in order to + // a weak-linkage-like functionality where it's + // dynamically resolved at runtime. If we're + // building a library, then we declare the static + // with weak linkage, but if we're building a + // library then we've already declared the crate map + // so use that instead. + if attr::contains_name(ni.attrs, "crate_map") { + if *ccx.sess.building_library { + let s = "_rust_crate_map_toplevel"; + let g = unsafe { do s.with_c_str |buf| { + let ty = type_of(ccx, ty); + llvm::LLVMAddGlobal(ccx.llmod, + ty.to_ref(), buf) + } }; + lib::llvm::SetLinkage(g, + lib::llvm::ExternalWeakLinkage); + g + } else { + ccx.crate_map + } + } else { + let ident = foreign::link_name(ccx, ni); + unsafe { + do ident.with_c_str |buf| { + let ty = type_of(ccx, ty); + llvm::LLVMAddGlobal(ccx.llmod, + ty.to_ref(), buf) + } } - g } } } @@ -2929,7 +2949,12 @@ pub fn decl_crate_map(sess: session::Session, mapmeta: LinkMeta, let sym_name = ~"_rust_crate_map_" + mapname; let slicetype = Type::struct_([int_type, int_type], false); - let maptype = Type::struct_([Type::i32(), slicetype, slicetype], false); + let maptype = Type::struct_([ + Type::i32(), // version + slicetype, // child modules + slicetype, // sub crate-maps + int_type.ptr_to(), // event loop factory + ], false); let map = do sym_name.with_c_str |buf| { unsafe { llvm::LLVMAddGlobal(llmod, maptype.to_ref(), buf) @@ -2964,6 +2989,20 @@ pub fn fill_crate_map(ccx: &mut CrateContext, map: ValueRef) { subcrates.push(p2i(ccx, cr)); i += 1; } + let event_loop_factory = if !*ccx.sess.building_library { + match ccx.tcx.lang_items.event_loop_factory() { + Some(did) => unsafe { + let name = csearch::get_symbol(ccx.sess.cstore, did); + let global = do name.with_c_str |buf| { + llvm::LLVMAddGlobal(ccx.llmod, ccx.int_type.to_ref(), buf) + }; + global + }, + None => C_null(ccx.int_type.ptr_to()) + } + } else { + C_null(ccx.int_type.ptr_to()) + }; unsafe { let maptype = Type::array(&ccx.int_type, subcrates.len() as u64); let vec_elements = do "_crate_map_child_vectors".with_c_str |buf| { @@ -2983,7 +3022,8 @@ pub fn fill_crate_map(ccx: &mut CrateContext, map: ValueRef) { C_struct([ p2i(ccx, vec_elements), C_uint(ccx, subcrates.len()) - ], false) + ], false), + event_loop_factory, ], false)); } } diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs new file mode 100644 index 0000000000000..09736749997be --- /dev/null +++ b/src/librustuv/addrinfo.rs @@ -0,0 +1,273 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cast::transmute; +use std::cell::Cell; +use std::libc::{c_int, c_void}; +use std::ptr::null; +use ai = std::rt::io::net::addrinfo; + +use uvll; +use uvll::UV_GETADDRINFO; +use super::{Loop, UvError, NativeHandle, status_to_maybe_uv_error}; +use net; + +type GetAddrInfoCallback = ~fn(GetAddrInfoRequest, &net::UvAddrInfo, Option); + +pub struct GetAddrInfoRequest(*uvll::uv_getaddrinfo_t); + +pub struct RequestData { + priv getaddrinfo_cb: Option, +} + +impl GetAddrInfoRequest { + pub fn new() -> GetAddrInfoRequest { + let req = unsafe { uvll::malloc_req(UV_GETADDRINFO) }; + assert!(req.is_not_null()); + let mut req: GetAddrInfoRequest = NativeHandle::from_native_handle(req); + req.install_req_data(); + return req; + } + + pub fn getaddrinfo(&mut self, loop_: &Loop, node: Option<&str>, + service: Option<&str>, hints: Option, + cb: GetAddrInfoCallback) { + + assert!(node.is_some() || service.is_some()); + + let (c_node, c_node_ptr) = match node { + Some(n) => { + let c_node = n.to_c_str(); + let c_node_ptr = c_node.with_ref(|r| r); + (Some(c_node), c_node_ptr) + } + None => (None, null()) + }; + + let (c_service, c_service_ptr) = match service { + Some(s) => { + let c_service = s.to_c_str(); + let c_service_ptr = c_service.with_ref(|r| r); + (Some(c_service), c_service_ptr) + } + None => (None, null()) + }; + + let cb = Cell::new(cb); + let wrapper_cb: GetAddrInfoCallback = |req, addrinfo, err| { + // Capture some heap values that need to stay alive for the + // getaddrinfo call + let _ = &c_node; + let _ = &c_service; + + let cb = cb.take(); + cb(req, addrinfo, err) + }; + + let hint = hints.map(|hint| { + let mut flags = 0; + do each_ai_flag |cval, aival| { + if hint.flags & (aival as uint) != 0 { + flags |= cval as i32; + } + } + /* XXX: do we really want to support these? + let socktype = match hint.socktype { + Some(ai::Stream) => uvll::rust_SOCK_STREAM(), + Some(ai::Datagram) => uvll::rust_SOCK_DGRAM(), + Some(ai::Raw) => uvll::rust_SOCK_RAW(), + None => 0, + }; + let protocol = match hint.protocol { + Some(ai::UDP) => uvll::rust_IPPROTO_UDP(), + Some(ai::TCP) => uvll::rust_IPPROTO_TCP(), + _ => 0, + }; + */ + let socktype = 0; + let protocol = 0; + + uvll::addrinfo { + ai_flags: flags, + ai_family: hint.family as c_int, + ai_socktype: socktype, + ai_protocol: protocol, + ai_addrlen: 0, + ai_canonname: null(), + ai_addr: null(), + ai_next: null(), + } + }); + let hint_ptr = hint.as_ref().map_default(null(), |x| x as *uvll::addrinfo); + + self.get_req_data().getaddrinfo_cb = Some(wrapper_cb); + + unsafe { + assert!(0 == uvll::getaddrinfo(loop_.native_handle(), + self.native_handle(), + getaddrinfo_cb, + c_node_ptr, + c_service_ptr, + hint_ptr)); + } + + extern "C" fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, + status: c_int, + res: *uvll::addrinfo) { + let mut req: GetAddrInfoRequest = NativeHandle::from_native_handle(req); + let err = status_to_maybe_uv_error(status); + let addrinfo = net::UvAddrInfo(res); + let data = req.get_req_data(); + (*data.getaddrinfo_cb.get_ref())(req, &addrinfo, err); + unsafe { + uvll::freeaddrinfo(res); + } + } + } + + fn get_loop(&self) -> Loop { + unsafe { + Loop { + handle: uvll::get_loop_from_fs_req(self.native_handle()) + } + } + } + + fn install_req_data(&mut self) { + let req = self.native_handle() as *uvll::uv_getaddrinfo_t; + let data = ~RequestData { + getaddrinfo_cb: None + }; + unsafe { + let data = transmute::<~RequestData, *c_void>(data); + uvll::set_data_for_req(req, data); + } + } + + fn get_req_data<'r>(&'r mut self) -> &'r mut RequestData { + unsafe { + let data = uvll::get_data_for_req(self.native_handle()); + let data = transmute::<&*c_void, &mut ~RequestData>(&data); + return &mut **data; + } + } + + fn delete(self) { + unsafe { + let data = uvll::get_data_for_req(self.native_handle()); + let _data = transmute::<*c_void, ~RequestData>(data); + uvll::set_data_for_req(self.native_handle(), null::<()>()); + uvll::free_req(self.native_handle()); + } + } +} + +fn each_ai_flag(_f: &fn(c_int, ai::Flag)) { + /* XXX: do we really want to support these? + unsafe { + f(uvll::rust_AI_ADDRCONFIG(), ai::AddrConfig); + f(uvll::rust_AI_ALL(), ai::All); + f(uvll::rust_AI_CANONNAME(), ai::CanonName); + f(uvll::rust_AI_NUMERICHOST(), ai::NumericHost); + f(uvll::rust_AI_NUMERICSERV(), ai::NumericServ); + f(uvll::rust_AI_PASSIVE(), ai::Passive); + f(uvll::rust_AI_V4MAPPED(), ai::V4Mapped); + } + */ +} + +// Traverse the addrinfo linked list, producing a vector of Rust socket addresses +pub fn accum_addrinfo(addr: &net::UvAddrInfo) -> ~[ai::Info] { + unsafe { + let &net::UvAddrInfo(addr) = addr; + let mut addr = addr; + + let mut addrs = ~[]; + loop { + let uvaddr = net::sockaddr_to_UvSocketAddr((*addr).ai_addr); + let rustaddr = net::uv_socket_addr_to_socket_addr(uvaddr); + + let mut flags = 0; + do each_ai_flag |cval, aival| { + if (*addr).ai_flags & cval != 0 { + flags |= aival as uint; + } + } + + /* XXX: do we really want to support these + let protocol = match (*addr).ai_protocol { + p if p == uvll::rust_IPPROTO_UDP() => Some(ai::UDP), + p if p == uvll::rust_IPPROTO_TCP() => Some(ai::TCP), + _ => None, + }; + let socktype = match (*addr).ai_socktype { + p if p == uvll::rust_SOCK_STREAM() => Some(ai::Stream), + p if p == uvll::rust_SOCK_DGRAM() => Some(ai::Datagram), + p if p == uvll::rust_SOCK_RAW() => Some(ai::Raw), + _ => None, + }; + */ + let protocol = None; + let socktype = None; + + addrs.push(ai::Info { + address: rustaddr, + family: (*addr).ai_family as uint, + socktype: socktype, + protocol: protocol, + flags: flags, + }); + if (*addr).ai_next.is_not_null() { + addr = (*addr).ai_next; + } else { + break; + } + } + + return addrs; + } +} + +impl NativeHandle<*uvll::uv_getaddrinfo_t> for GetAddrInfoRequest { + fn from_native_handle(handle: *uvll::uv_getaddrinfo_t) -> GetAddrInfoRequest { + GetAddrInfoRequest(handle) + } + fn native_handle(&self) -> *uvll::uv_getaddrinfo_t { + match self { &GetAddrInfoRequest(ptr) => ptr } + } +} + +#[cfg(test)] +mod test { + use Loop; + use std::rt::io::net::ip::{SocketAddr, Ipv4Addr}; + use super::*; + + #[test] + fn getaddrinfo_test() { + let mut loop_ = Loop::new(); + let mut req = GetAddrInfoRequest::new(); + do req.getaddrinfo(&loop_, Some("localhost"), None, None) |_, addrinfo, _| { + let sockaddrs = accum_addrinfo(addrinfo); + let mut found_local = false; + let local_addr = &SocketAddr { + ip: Ipv4Addr(127, 0, 0, 1), + port: 0 + }; + for addr in sockaddrs.iter() { + found_local = found_local || addr.address == *local_addr; + } + assert!(found_local); + } + loop_.run(); + loop_.close(); + req.delete(); + } +} diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs new file mode 100644 index 0000000000000..4a1858ee03672 --- /dev/null +++ b/src/librustuv/async.rs @@ -0,0 +1,83 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc::c_int; + +use uvll; +use super::{Watcher, Loop, NativeHandle, AsyncCallback, WatcherInterop}; +use super::status_to_maybe_uv_error; + +pub struct AsyncWatcher(*uvll::uv_async_t); +impl Watcher for AsyncWatcher { } + +impl AsyncWatcher { + pub fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher { + unsafe { + let handle = uvll::malloc_handle(uvll::UV_ASYNC); + assert!(handle.is_not_null()); + let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + let data = watcher.get_watcher_data(); + data.async_cb = Some(cb); + assert_eq!(0, uvll::async_init(loop_.native_handle(), handle, async_cb)); + return watcher; + } + + extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { + let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); + let status = status_to_maybe_uv_error(status); + let data = watcher.get_watcher_data(); + let cb = data.async_cb.get_ref(); + (*cb)(watcher, status); + } + } + + pub fn send(&mut self) { + unsafe { + let handle = self.native_handle(); + uvll::async_send(handle); + } + } +} + +impl NativeHandle<*uvll::uv_async_t> for AsyncWatcher { + fn from_native_handle(handle: *uvll::uv_async_t) -> AsyncWatcher { + AsyncWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_async_t { + match self { &AsyncWatcher(ptr) => ptr } + } +} + +#[cfg(test)] +mod test { + + use super::*; + use Loop; + use std::unstable::run_in_bare_thread; + use std::rt::thread::Thread; + use std::cell::Cell; + + #[test] + fn smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let watcher = AsyncWatcher::new(&mut loop_, |w, _| w.close(||()) ); + let watcher_cell = Cell::new(watcher); + let thread = do Thread::start { + let mut watcher = watcher_cell.take(); + watcher.send(); + }; + loop_.run(); + loop_.close(); + thread.join(); + } + } +} diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs new file mode 100644 index 0000000000000..575226f79028b --- /dev/null +++ b/src/librustuv/file.rs @@ -0,0 +1,647 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ptr::null; +use std::c_str; +use std::c_str::CString; +use std::libc::c_void; +use std::cast::transmute; +use std::libc; +use std::libc::{c_int}; + +use super::{Request, NativeHandle, Loop, FsCallback, Buf, + status_to_maybe_uv_error, UvError}; +use uvll; +use uvll::*; + +pub struct FsRequest(*uvll::uv_fs_t); +impl Request for FsRequest {} + +pub struct RequestData { + priv complete_cb: Option +} + +impl FsRequest { + pub fn new() -> FsRequest { + let fs_req = unsafe { malloc_req(UV_FS) }; + assert!(fs_req.is_not_null()); + let fs_req: FsRequest = NativeHandle::from_native_handle(fs_req); + fs_req + } + + pub fn open(self, loop_: &Loop, path: &CString, flags: int, mode: int, + cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::fs_open(loop_.native_handle(), + self.native_handle(), p, flags, mode, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + pub fn open_sync(self, loop_: &Loop, path: &CString, + flags: int, mode: int) -> Result { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(None) + }; + let result = path.with_ref(|p| unsafe { + uvll::fs_open(loop_.native_handle(), + self.native_handle(), p, flags, mode, complete_cb_ptr) + }); + self.sync_cleanup(result) + } + + pub fn unlink(self, loop_: &Loop, path: &CString, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::fs_unlink(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + pub fn unlink_sync(self, loop_: &Loop, path: &CString) + -> Result { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(None) + }; + let result = path.with_ref(|p| unsafe { + uvll::fs_unlink(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) + }); + self.sync_cleanup(result) + } + + pub fn stat(self, loop_: &Loop, path: &CString, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::fs_stat(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + pub fn write(self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let base_ptr = buf.base as *c_void; + let len = buf.len as uint; + let ret = unsafe { + uvll::fs_write(loop_.native_handle(), self.native_handle(), + fd, base_ptr, + len, offset, complete_cb_ptr) + }; + assert_eq!(ret, 0); + } + pub fn write_sync(self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64) + -> Result { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(None) + }; + let base_ptr = buf.base as *c_void; + let len = buf.len as uint; + let result = unsafe { + uvll::fs_write(loop_.native_handle(), self.native_handle(), + fd, base_ptr, + len, offset, complete_cb_ptr) + }; + self.sync_cleanup(result) + } + + pub fn read(self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let buf_ptr = buf.base as *c_void; + let len = buf.len as uint; + let ret = unsafe { + uvll::fs_read(loop_.native_handle(), self.native_handle(), + fd, buf_ptr, + len, offset, complete_cb_ptr) + }; + assert_eq!(ret, 0); + } + pub fn read_sync(self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64) + -> Result { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(None) + }; + let buf_ptr = buf.base as *c_void; + let len = buf.len as uint; + let result = unsafe { + uvll::fs_read(loop_.native_handle(), self.native_handle(), + fd, buf_ptr, + len, offset, complete_cb_ptr) + }; + self.sync_cleanup(result) + } + + pub fn close(self, loop_: &Loop, fd: c_int, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = unsafe { + uvll::fs_close(loop_.native_handle(), self.native_handle(), + fd, complete_cb_ptr) + }; + assert_eq!(ret, 0); + } + pub fn close_sync(self, loop_: &Loop, fd: c_int) -> Result { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(None) + }; + let result = unsafe { + uvll::fs_close(loop_.native_handle(), self.native_handle(), + fd, complete_cb_ptr) + }; + self.sync_cleanup(result) + } + + pub fn mkdir(self, loop_: &Loop, path: &CString, mode: int, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::fs_mkdir(loop_.native_handle(), + self.native_handle(), p, mode, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + pub fn rmdir(self, loop_: &Loop, path: &CString, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::fs_rmdir(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + pub fn readdir(self, loop_: &Loop, path: &CString, + flags: c_int, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::fs_readdir(loop_.native_handle(), + self.native_handle(), p, flags, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + // accessors/utility funcs + fn sync_cleanup(self, result: c_int) + -> Result { + self.cleanup_and_delete(); + match status_to_maybe_uv_error(result as i32) { + Some(err) => Err(err), + None => Ok(result) + } + } + fn req_boilerplate(&mut self, cb: Option) -> *u8 { + let result = match cb { + Some(_) => { + compl_cb as *u8 + }, + None => 0 as *u8 + }; + self.install_req_data(cb); + result + } + pub fn install_req_data(&mut self, cb: Option) { + let fs_req = (self.native_handle()) as *uvll::uv_write_t; + let data = ~RequestData { + complete_cb: cb + }; + unsafe { + let data = transmute::<~RequestData, *c_void>(data); + uvll::set_data_for_req(fs_req, data); + } + } + + fn get_req_data<'r>(&'r mut self) -> &'r mut RequestData { + unsafe { + let data = uvll::get_data_for_req((self.native_handle())); + let data = transmute::<&*c_void, &mut ~RequestData>(&data); + &mut **data + } + } + + pub fn get_result(&mut self) -> c_int { + unsafe { + uvll::get_result_from_fs_req(self.native_handle()) + } + } + + pub fn get_loop(&self) -> Loop { + unsafe { Loop{handle:uvll::get_loop_from_fs_req(self.native_handle())} } + } + + pub fn get_stat(&self) -> uv_stat_t { + let stat = uv_stat_t::new(); + unsafe { uvll::populate_stat(self.native_handle(), &stat); } + stat + } + + pub fn get_ptr(&self) -> *libc::c_void { + unsafe { + uvll::get_ptr_from_fs_req(self.native_handle()) + } + } + + pub fn each_path(&mut self, f: &fn(&CString)) { + let ptr = self.get_ptr(); + match self.get_result() { + n if (n <= 0) => {} + n => { + let n_len = n as uint; + // we pass in the len that uv tells us is there + // for the entries and we don't continue past that.. + // it appears that sometimes the multistring isn't + // correctly delimited and we stray into garbage memory? + // in any case, passing Some(n_len) fixes it and ensures + // good results + unsafe { + c_str::from_c_multistring(ptr as *libc::c_char, + Some(n_len), f); + } + } + } + } + + fn cleanup_and_delete(self) { + unsafe { + let data = uvll::get_data_for_req(self.native_handle()); + let _data = transmute::<*c_void, ~RequestData>(data); + uvll::set_data_for_req(self.native_handle(), null::<()>()); + uvll::fs_req_cleanup(self.native_handle()); + free_req(self.native_handle() as *c_void) + } + } +} + +impl NativeHandle<*uvll::uv_fs_t> for FsRequest { + fn from_native_handle(handle: *uvll:: uv_fs_t) -> FsRequest { + FsRequest(handle) + } + fn native_handle(&self) -> *uvll::uv_fs_t { + match self { &FsRequest(ptr) => ptr } + } +} + +fn sync_cleanup(result: int) + -> Result { + match status_to_maybe_uv_error(result as i32) { + Some(err) => Err(err), + None => Ok(result) + } +} + +extern fn compl_cb(req: *uv_fs_t) { + let mut req: FsRequest = NativeHandle::from_native_handle(req); + // pull the user cb out of the req data + let cb = { + let data = req.get_req_data(); + assert!(data.complete_cb.is_some()); + // option dance, option dance. oooooh yeah. + data.complete_cb.take_unwrap() + }; + // in uv_fs_open calls, the result will be the fd in the + // case of success, otherwise it's -1 indicating an error + let result = req.get_result(); + let status = status_to_maybe_uv_error(result); + // we have a req and status, call the user cb.. + // only giving the user a ref to the FsRequest, as we + // have to clean it up, afterwards (and they aren't really + // reusable, anyways + cb(&mut req, status); + // clean up the req (and its data!) after calling the user cb + req.cleanup_and_delete(); +} + +#[cfg(test)] +mod test { + use super::*; + //use std::rt::test::*; + use std::libc::{STDOUT_FILENO}; + use std::vec; + use std::str; + use std::unstable::run_in_bare_thread; + use super::super::{Loop, Buf, slice_to_uv_buf}; + use std::libc::{O_CREAT, O_RDWR, O_RDONLY, S_IWUSR, S_IRUSR}; + + #[test] + fn file_test_full_simple() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let create_flags = O_RDWR | O_CREAT; + let read_flags = O_RDONLY; + // 0644 BZZT! WRONG! 0600! See below. + let mode = S_IWUSR |S_IRUSR; + // these aren't defined in std::libc :( + //map_mode(S_IRGRP) | + //map_mode(S_IROTH); + let path_str = "./tmp/file_full_simple.txt"; + let write_val = "hello".as_bytes().to_owned(); + let write_buf = slice_to_uv_buf(write_val); + let write_buf_ptr: *Buf = &write_buf; + let read_buf_len = 1028; + let read_mem = vec::from_elem(read_buf_len, 0u8); + let read_buf = slice_to_uv_buf(read_mem); + let read_buf_ptr: *Buf = &read_buf; + let open_req = FsRequest::new(); + do open_req.open(&loop_, &path_str.to_c_str(), create_flags as int, + mode as int) |req, uverr| { + assert!(uverr.is_none()); + let fd = req.get_result(); + let buf = unsafe { *write_buf_ptr }; + let write_req = FsRequest::new(); + do write_req.write(&req.get_loop(), fd, buf, -1) |req, uverr| { + let close_req = FsRequest::new(); + do close_req.close(&req.get_loop(), fd) |req, _| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let open_req = FsRequest::new(); + do open_req.open(&loop_, &path_str.to_c_str(), + read_flags as int,0) |req, uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let fd = req.get_result(); + let read_buf = unsafe { *read_buf_ptr }; + let read_req = FsRequest::new(); + do read_req.read(&loop_, fd, read_buf, 0) |req, uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + // we know nread >=0 because uverr is none.. + let nread = req.get_result() as uint; + // nread == 0 would be EOF + if nread > 0 { + let read_str = unsafe { + let read_buf = *read_buf_ptr; + str::from_utf8( + vec::from_buf( + read_buf.base, nread)) + }; + assert!(read_str == ~"hello"); + let close_req = FsRequest::new(); + do close_req.close(&loop_, fd) |req,uverr| { + assert!(uverr.is_none()); + let loop_ = &req.get_loop(); + let unlink_req = FsRequest::new(); + do unlink_req.unlink(loop_, + &path_str.to_c_str()) + |_,uverr| { + assert!(uverr.is_none()); + }; + }; + }; + }; + }; + }; + }; + }; + loop_.run(); + loop_.close(); + } + } + + #[test] + fn file_test_full_simple_sync() { + do run_in_bare_thread { + // setup + let mut loop_ = Loop::new(); + let create_flags = O_RDWR | + O_CREAT; + let read_flags = O_RDONLY; + // 0644 + let mode = S_IWUSR | + S_IRUSR; + //S_IRGRP | + //S_IROTH; + let path_str = "./tmp/file_full_simple_sync.txt"; + let write_val = "hello".as_bytes().to_owned(); + let write_buf = slice_to_uv_buf(write_val); + // open/create + let open_req = FsRequest::new(); + let result = open_req.open_sync(&loop_, &path_str.to_c_str(), + create_flags as int, mode as int); + assert!(result.is_ok()); + let fd = result.unwrap(); + // write + let write_req = FsRequest::new(); + let result = write_req.write_sync(&loop_, fd, write_buf, -1); + assert!(result.is_ok()); + // close + let close_req = FsRequest::new(); + let result = close_req.close_sync(&loop_, fd); + assert!(result.is_ok()); + // re-open + let open_req = FsRequest::new(); + let result = open_req.open_sync(&loop_, &path_str.to_c_str(), + read_flags as int,0); + assert!(result.is_ok()); + let len = 1028; + let fd = result.unwrap(); + // read + let read_mem: ~[u8] = vec::from_elem(len, 0u8); + let buf = slice_to_uv_buf(read_mem); + let read_req = FsRequest::new(); + let result = read_req.read_sync(&loop_, fd, buf, 0); + assert!(result.is_ok()); + let nread = result.unwrap(); + // nread == 0 would be EOF.. we know it's >= zero because otherwise + // the above assert would fail + if nread > 0 { + let read_str = str::from_utf8( + read_mem.slice(0, nread as uint)); + assert!(read_str == ~"hello"); + // close + let close_req = FsRequest::new(); + let result = close_req.close_sync(&loop_, fd); + assert!(result.is_ok()); + // unlink + let unlink_req = FsRequest::new(); + let result = unlink_req.unlink_sync(&loop_, &path_str.to_c_str()); + assert!(result.is_ok()); + } else { fail!("nread was 0.. wudn't expectin' that."); } + loop_.close(); + } + } + + fn naive_print(loop_: &Loop, input: &str) { + let write_val = input.as_bytes(); + let write_buf = slice_to_uv_buf(write_val); + let write_req = FsRequest::new(); + write_req.write_sync(loop_, STDOUT_FILENO, write_buf, -1); + } + + #[test] + fn file_test_write_to_stdout() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + naive_print(&loop_, "zanzibar!\n"); + loop_.run(); + loop_.close(); + }; + } + #[test] + fn file_test_stat_simple() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let path = "./tmp/file_test_stat_simple.txt"; + let create_flags = O_RDWR | + O_CREAT; + let mode = S_IWUSR | + S_IRUSR; + let write_val = "hello".as_bytes().to_owned(); + let write_buf = slice_to_uv_buf(write_val); + let write_buf_ptr: *Buf = &write_buf; + let open_req = FsRequest::new(); + do open_req.open(&loop_, &path.to_c_str(), create_flags as int, + mode as int) |req, uverr| { + assert!(uverr.is_none()); + let fd = req.get_result(); + let buf = unsafe { *write_buf_ptr }; + let write_req = FsRequest::new(); + do write_req.write(&req.get_loop(), fd, buf, 0) |req, uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let stat_req = FsRequest::new(); + do stat_req.stat(&loop_, &path.to_c_str()) |req, uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let stat = req.get_stat(); + let sz: uint = stat.st_size as uint; + assert!(sz > 0); + let close_req = FsRequest::new(); + do close_req.close(&loop_, fd) |req, uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let unlink_req = FsRequest::new(); + do unlink_req.unlink(&loop_, + &path.to_c_str()) |req,uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let stat_req = FsRequest::new(); + do stat_req.stat(&loop_, + &path.to_c_str()) |_, uverr| { + // should cause an error because the + // file doesn't exist anymore + assert!(uverr.is_some()); + }; + }; + }; + }; + }; + }; + loop_.run(); + loop_.close(); + } + } + + #[test] + fn file_test_mk_rm_dir() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let path = "./tmp/mk_rm_dir"; + let mode = S_IWUSR | + S_IRUSR; + let mkdir_req = FsRequest::new(); + do mkdir_req.mkdir(&loop_, &path.to_c_str(), + mode as int) |req,uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let stat_req = FsRequest::new(); + do stat_req.stat(&loop_, &path.to_c_str()) |req, uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let stat = req.get_stat(); + naive_print(&loop_, format!("{:?}", stat)); + assert!(stat.is_dir()); + let rmdir_req = FsRequest::new(); + do rmdir_req.rmdir(&loop_, &path.to_c_str()) |req,uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let stat_req = FsRequest::new(); + do stat_req.stat(&loop_, &path.to_c_str()) |_req, uverr| { + assert!(uverr.is_some()); + } + } + } + } + loop_.run(); + loop_.close(); + } + } + #[test] + fn file_test_mkdir_chokes_on_double_create() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let path = "./tmp/double_create_dir"; + let mode = S_IWUSR | + S_IRUSR; + let mkdir_req = FsRequest::new(); + do mkdir_req.mkdir(&loop_, &path.to_c_str(), mode as int) |req,uverr| { + assert!(uverr.is_none()); + let loop_ = req.get_loop(); + let mkdir_req = FsRequest::new(); + do mkdir_req.mkdir(&loop_, &path.to_c_str(), + mode as int) |req,uverr| { + assert!(uverr.is_some()); + let loop_ = req.get_loop(); + let _stat = req.get_stat(); + let rmdir_req = FsRequest::new(); + do rmdir_req.rmdir(&loop_, &path.to_c_str()) |req,uverr| { + assert!(uverr.is_none()); + let _loop = req.get_loop(); + } + } + } + loop_.run(); + loop_.close(); + } + } + #[test] + fn file_test_rmdir_chokes_on_nonexistant_path() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let path = "./tmp/never_existed_dir"; + let rmdir_req = FsRequest::new(); + do rmdir_req.rmdir(&loop_, &path.to_c_str()) |_req, uverr| { + assert!(uverr.is_some()); + } + loop_.run(); + loop_.close(); + } + } +} diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs new file mode 100644 index 0000000000000..4f606b5f01f8a --- /dev/null +++ b/src/librustuv/idle.rs @@ -0,0 +1,137 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc::c_int; + +use uvll; +use super::{Watcher, Loop, NativeHandle, IdleCallback, status_to_maybe_uv_error}; + +pub struct IdleWatcher(*uvll::uv_idle_t); +impl Watcher for IdleWatcher { } + +impl IdleWatcher { + pub fn new(loop_: &mut Loop) -> IdleWatcher { + unsafe { + let handle = uvll::malloc_handle(uvll::UV_IDLE); + assert!(handle.is_not_null()); + assert_eq!(uvll::idle_init(loop_.native_handle(), handle), 0); + let mut watcher: IdleWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + return watcher + } + } + + pub fn start(&mut self, cb: IdleCallback) { + { + let data = self.get_watcher_data(); + data.idle_cb = Some(cb); + } + + unsafe { + assert_eq!(uvll::idle_start(self.native_handle(), idle_cb), 0) + } + } + + pub fn restart(&mut self) { + unsafe { + assert!(self.get_watcher_data().idle_cb.is_some()); + assert_eq!(uvll::idle_start(self.native_handle(), idle_cb), 0) + } + } + + pub fn stop(&mut self) { + // NB: Not resetting the Rust idle_cb to None here because `stop` is + // likely called from *within* the idle callback, causing a use after + // free + + unsafe { + assert_eq!(uvll::idle_stop(self.native_handle()), 0); + } + } +} + +impl NativeHandle<*uvll::uv_idle_t> for IdleWatcher { + fn from_native_handle(handle: *uvll::uv_idle_t) -> IdleWatcher { + IdleWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_idle_t { + match self { &IdleWatcher(ptr) => ptr } + } +} + +extern fn idle_cb(handle: *uvll::uv_idle_t, status: c_int) { + let mut idle_watcher: IdleWatcher = NativeHandle::from_native_handle(handle); + let data = idle_watcher.get_watcher_data(); + let cb: &IdleCallback = data.idle_cb.get_ref(); + let status = status_to_maybe_uv_error(status); + (*cb)(idle_watcher, status); +} + +#[cfg(test)] +mod test { + + use Loop; + use super::*; + use std::unstable::run_in_bare_thread; + + #[test] + #[ignore(reason = "valgrind - loop destroyed before watcher?")] + fn idle_new_then_close() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let idle_watcher = { IdleWatcher::new(&mut loop_) }; + idle_watcher.close(||()); + } + } + + #[test] + fn idle_smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; + let mut count = 10; + let count_ptr: *mut int = &mut count; + do idle_watcher.start |idle_watcher, status| { + let mut idle_watcher = idle_watcher; + assert!(status.is_none()); + if unsafe { *count_ptr == 10 } { + idle_watcher.stop(); + idle_watcher.close(||()); + } else { + unsafe { *count_ptr = *count_ptr + 1; } + } + } + loop_.run(); + loop_.close(); + assert_eq!(count, 10); + } + } + + #[test] + fn idle_start_stop_start() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; + do idle_watcher.start |idle_watcher, status| { + let mut idle_watcher = idle_watcher; + assert!(status.is_none()); + idle_watcher.stop(); + do idle_watcher.start |idle_watcher, status| { + assert!(status.is_none()); + let mut idle_watcher = idle_watcher; + idle_watcher.stop(); + idle_watcher.close(||()); + } + } + loop_.run(); + loop_.close(); + } + } +} diff --git a/src/librustuv/macros.rs b/src/librustuv/macros.rs new file mode 100644 index 0000000000000..cbbed316d83df --- /dev/null +++ b/src/librustuv/macros.rs @@ -0,0 +1,36 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_escape]; + +use std::fmt; + +macro_rules! uverrln ( + ($($arg:tt)*) => ( { + format_args!(::macros::dumb_println, $($arg)*) + } ) +) + +// Some basic logging. Enabled by passing `--cfg uvdebug` to the libstd build. +macro_rules! uvdebug ( + ($($arg:tt)*) => ( { + if cfg!(uvdebug) { + uverrln!($($arg)*) + } + }) +) + +pub fn dumb_println(args: &fmt::Arguments) { + use std::rt::io::native::stdio::stderr; + use std::rt::io::Writer; + + let mut out = stderr(); + fmt::writeln(&mut out as &mut Writer, args); +} diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs new file mode 100644 index 0000000000000..0aaa931c9475e --- /dev/null +++ b/src/librustuv/net.rs @@ -0,0 +1,851 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc::{size_t, ssize_t, c_int, c_void, c_uint}; +use std::vec; +use std::str; +use std::rt::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr}; + +use uvll; +use uvll::*; +use super::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, + UdpSendCallback, Loop, Watcher, Request, UvError, Buf, NativeHandle, + status_to_maybe_uv_error, empty_buf}; + +pub struct UvAddrInfo(*uvll::addrinfo); + +pub enum UvSocketAddr { + UvIpv4SocketAddr(*sockaddr_in), + UvIpv6SocketAddr(*sockaddr_in6), +} + +pub fn sockaddr_to_UvSocketAddr(addr: *uvll::sockaddr) -> UvSocketAddr { + unsafe { + assert!((is_ip4_addr(addr) || is_ip6_addr(addr))); + assert!(!(is_ip4_addr(addr) && is_ip6_addr(addr))); + match addr { + _ if is_ip4_addr(addr) => UvIpv4SocketAddr(addr as *uvll::sockaddr_in), + _ if is_ip6_addr(addr) => UvIpv6SocketAddr(addr as *uvll::sockaddr_in6), + _ => fail!(), + } + } +} + +fn socket_addr_as_uv_socket_addr(addr: SocketAddr, f: &fn(UvSocketAddr) -> T) -> T { + let malloc = match addr.ip { + Ipv4Addr(*) => malloc_ip4_addr, + Ipv6Addr(*) => malloc_ip6_addr, + }; + let wrap = match addr.ip { + Ipv4Addr(*) => UvIpv4SocketAddr, + Ipv6Addr(*) => UvIpv6SocketAddr, + }; + let free = match addr.ip { + Ipv4Addr(*) => free_ip4_addr, + Ipv6Addr(*) => free_ip6_addr, + }; + + let addr = unsafe { malloc(addr.ip.to_str(), addr.port as int) }; + do (|| { + f(wrap(addr)) + }).finally { + unsafe { free(addr) }; + } +} + +fn uv_socket_addr_as_socket_addr(addr: UvSocketAddr, f: &fn(SocketAddr) -> T) -> T { + let ip_size = match addr { + UvIpv4SocketAddr(*) => 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/, + UvIpv6SocketAddr(*) => 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/, + }; + let ip_name = { + let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); + unsafe { + let buf_ptr = vec::raw::to_ptr(buf); + match addr { + UvIpv4SocketAddr(addr) => uvll::ip4_name(addr, buf_ptr, ip_size as size_t), + UvIpv6SocketAddr(addr) => uvll::ip6_name(addr, buf_ptr, ip_size as size_t), + } + }; + buf + }; + let ip_port = unsafe { + let port = match addr { + UvIpv4SocketAddr(addr) => uvll::ip4_port(addr), + UvIpv6SocketAddr(addr) => uvll::ip6_port(addr), + }; + port as u16 + }; + let ip_str = str::from_utf8_slice(ip_name).trim_right_chars(&'\x00'); + let ip_addr = FromStr::from_str(ip_str).unwrap(); + + // finally run the closure + f(SocketAddr { ip: ip_addr, port: ip_port }) +} + +pub fn uv_socket_addr_to_socket_addr(addr: UvSocketAddr) -> SocketAddr { + use std::util; + uv_socket_addr_as_socket_addr(addr, util::id) +} + +#[cfg(test)] +#[test] +fn test_ip4_conversion() { + use std::rt; + let ip4 = rt::test::next_test_ip4(); + assert_eq!(ip4, socket_addr_as_uv_socket_addr(ip4, uv_socket_addr_to_socket_addr)); +} + +#[cfg(test)] +#[test] +fn test_ip6_conversion() { + use std::rt; + let ip6 = rt::test::next_test_ip6(); + assert_eq!(ip6, socket_addr_as_uv_socket_addr(ip6, uv_socket_addr_to_socket_addr)); +} + +// uv_stream_t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t +// and uv_file_t +pub struct StreamWatcher(*uvll::uv_stream_t); +impl Watcher for StreamWatcher { } + +impl StreamWatcher { + pub fn read_start(&mut self, alloc: AllocCallback, cb: ReadCallback) { + unsafe { + match uvll::read_start(self.native_handle(), alloc_cb, read_cb) { + 0 => { + let data = self.get_watcher_data(); + data.alloc_cb = Some(alloc); + data.read_cb = Some(cb); + } + n => { + cb(*self, 0, empty_buf(), Some(UvError(n))) + } + } + } + + extern fn alloc_cb(stream: *uvll::uv_stream_t, suggested_size: size_t) -> Buf { + let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); + let alloc_cb = stream_watcher.get_watcher_data().alloc_cb.get_ref(); + return (*alloc_cb)(suggested_size as uint); + } + + extern fn read_cb(stream: *uvll::uv_stream_t, nread: ssize_t, buf: Buf) { + uvdebug!("buf addr: {}", buf.base); + uvdebug!("buf len: {}", buf.len); + let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); + let cb = stream_watcher.get_watcher_data().read_cb.get_ref(); + let status = status_to_maybe_uv_error(nread as c_int); + (*cb)(stream_watcher, nread as int, buf, status); + } + } + + pub fn read_stop(&mut self) { + // It would be nice to drop the alloc and read callbacks here, + // but read_stop may be called from inside one of them and we + // would end up freeing the in-use environment + let handle = self.native_handle(); + unsafe { assert_eq!(uvll::read_stop(handle), 0); } + } + + pub fn write(&mut self, buf: Buf, cb: ConnectionCallback) { + let req = WriteRequest::new(); + return unsafe { + match uvll::write(req.native_handle(), self.native_handle(), + [buf], write_cb) { + 0 => { + let data = self.get_watcher_data(); + assert!(data.write_cb.is_none()); + data.write_cb = Some(cb); + } + n => { + req.delete(); + cb(*self, Some(UvError(n))) + } + } + }; + + extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { + let write_request: WriteRequest = NativeHandle::from_native_handle(req); + let mut stream_watcher = write_request.stream(); + write_request.delete(); + let cb = stream_watcher.get_watcher_data().write_cb.take_unwrap(); + let status = status_to_maybe_uv_error(status); + cb(stream_watcher, status); + } + } + + + pub fn listen(&mut self, cb: ConnectionCallback) -> Result<(), UvError> { + { + let data = self.get_watcher_data(); + assert!(data.connect_cb.is_none()); + data.connect_cb = Some(cb); + } + + return unsafe { + static BACKLOG: c_int = 128; // XXX should be configurable + match uvll::listen(self.native_handle(), BACKLOG, connection_cb) { + 0 => Ok(()), + n => Err(UvError(n)) + } + }; + + extern fn connection_cb(handle: *uvll::uv_stream_t, status: c_int) { + uvdebug!("connection_cb"); + let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle); + let cb = stream_watcher.get_watcher_data().connect_cb.get_ref(); + let status = status_to_maybe_uv_error(status); + (*cb)(stream_watcher, status); + } + } + + pub fn accept(&mut self, stream: StreamWatcher) { + let self_handle = self.native_handle() as *c_void; + let stream_handle = stream.native_handle() as *c_void; + assert_eq!(0, unsafe { uvll::accept(self_handle, stream_handle) } ); + } +} + +impl NativeHandle<*uvll::uv_stream_t> for StreamWatcher { + fn from_native_handle(handle: *uvll::uv_stream_t) -> StreamWatcher { + StreamWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_stream_t { + match self { &StreamWatcher(ptr) => ptr } + } +} + +pub struct TcpWatcher(*uvll::uv_tcp_t); +impl Watcher for TcpWatcher { } + +impl TcpWatcher { + pub fn new(loop_: &Loop) -> TcpWatcher { + unsafe { + let handle = malloc_handle(UV_TCP); + assert!(handle.is_not_null()); + assert_eq!(0, uvll::tcp_init(loop_.native_handle(), handle)); + let mut watcher: TcpWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + return watcher; + } + } + + pub fn bind(&mut self, address: SocketAddr) -> Result<(), UvError> { + do socket_addr_as_uv_socket_addr(address) |addr| { + let result = unsafe { + match addr { + UvIpv4SocketAddr(addr) => uvll::tcp_bind(self.native_handle(), addr), + UvIpv6SocketAddr(addr) => uvll::tcp_bind6(self.native_handle(), addr), + } + }; + match result { + 0 => Ok(()), + _ => Err(UvError(result)), + } + } + } + + pub fn connect(&mut self, address: SocketAddr, cb: ConnectionCallback) { + unsafe { + assert!(self.get_watcher_data().connect_cb.is_none()); + self.get_watcher_data().connect_cb = Some(cb); + + let connect_handle = ConnectRequest::new().native_handle(); + uvdebug!("connect_t: {}", connect_handle); + do socket_addr_as_uv_socket_addr(address) |addr| { + let result = match addr { + UvIpv4SocketAddr(addr) => uvll::tcp_connect(connect_handle, + self.native_handle(), addr, connect_cb), + UvIpv6SocketAddr(addr) => uvll::tcp_connect6(connect_handle, + self.native_handle(), addr, connect_cb), + }; + assert_eq!(0, result); + } + + extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { + uvdebug!("connect_t: {}", req); + let connect_request: ConnectRequest = NativeHandle::from_native_handle(req); + let mut stream_watcher = connect_request.stream(); + connect_request.delete(); + let cb = stream_watcher.get_watcher_data().connect_cb.take_unwrap(); + let status = status_to_maybe_uv_error(status); + cb(stream_watcher, status); + } + } + } + + pub fn as_stream(&self) -> StreamWatcher { + NativeHandle::from_native_handle(self.native_handle() as *uvll::uv_stream_t) + } +} + +impl NativeHandle<*uvll::uv_tcp_t> for TcpWatcher { + fn from_native_handle(handle: *uvll::uv_tcp_t) -> TcpWatcher { + TcpWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_tcp_t { + match self { &TcpWatcher(ptr) => ptr } + } +} + +pub struct UdpWatcher(*uvll::uv_udp_t); +impl Watcher for UdpWatcher { } + +impl UdpWatcher { + pub fn new(loop_: &Loop) -> UdpWatcher { + unsafe { + let handle = malloc_handle(UV_UDP); + assert!(handle.is_not_null()); + assert_eq!(0, uvll::udp_init(loop_.native_handle(), handle)); + let mut watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + return watcher; + } + } + + pub fn bind(&mut self, address: SocketAddr) -> Result<(), UvError> { + do socket_addr_as_uv_socket_addr(address) |addr| { + let result = unsafe { + match addr { + UvIpv4SocketAddr(addr) => uvll::udp_bind(self.native_handle(), addr, 0u32), + UvIpv6SocketAddr(addr) => uvll::udp_bind6(self.native_handle(), addr, 0u32), + } + }; + match result { + 0 => Ok(()), + _ => Err(UvError(result)), + } + } + } + + pub fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { + { + let data = self.get_watcher_data(); + data.alloc_cb = Some(alloc); + data.udp_recv_cb = Some(cb); + } + + unsafe { uvll::udp_recv_start(self.native_handle(), alloc_cb, recv_cb); } + + extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { + let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + let alloc_cb = udp_watcher.get_watcher_data().alloc_cb.get_ref(); + return (*alloc_cb)(suggested_size as uint); + } + + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, + addr: *uvll::sockaddr, flags: c_uint) { + // When there's no data to read the recv callback can be a no-op. + // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring + // this we just drop back to kqueue and wait for the next callback. + if nread == 0 { + return; + } + + uvdebug!("buf addr: {}", buf.base); + uvdebug!("buf len: {}", buf.len); + let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + let cb = udp_watcher.get_watcher_data().udp_recv_cb.get_ref(); + let status = status_to_maybe_uv_error(nread as c_int); + let addr = uv_socket_addr_to_socket_addr(sockaddr_to_UvSocketAddr(addr)); + (*cb)(udp_watcher, nread as int, buf, addr, flags as uint, status); + } + } + + pub fn recv_stop(&mut self) { + unsafe { uvll::udp_recv_stop(self.native_handle()); } + } + + pub fn send(&mut self, buf: Buf, address: SocketAddr, cb: UdpSendCallback) { + { + let data = self.get_watcher_data(); + assert!(data.udp_send_cb.is_none()); + data.udp_send_cb = Some(cb); + } + + let req = UdpSendRequest::new(); + do socket_addr_as_uv_socket_addr(address) |addr| { + let result = unsafe { + match addr { + UvIpv4SocketAddr(addr) => uvll::udp_send(req.native_handle(), + self.native_handle(), [buf], addr, send_cb), + UvIpv6SocketAddr(addr) => uvll::udp_send6(req.native_handle(), + self.native_handle(), [buf], addr, send_cb), + } + }; + assert_eq!(0, result); + } + + extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { + let send_request: UdpSendRequest = NativeHandle::from_native_handle(req); + let mut udp_watcher = send_request.handle(); + send_request.delete(); + let cb = udp_watcher.get_watcher_data().udp_send_cb.take_unwrap(); + let status = status_to_maybe_uv_error(status); + cb(udp_watcher, status); + } + } +} + +impl NativeHandle<*uvll::uv_udp_t> for UdpWatcher { + fn from_native_handle(handle: *uvll::uv_udp_t) -> UdpWatcher { + UdpWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_udp_t { + match self { &UdpWatcher(ptr) => ptr } + } +} + +// uv_connect_t is a subclass of uv_req_t +pub struct ConnectRequest(*uvll::uv_connect_t); +impl Request for ConnectRequest { } + +impl ConnectRequest { + + pub fn new() -> ConnectRequest { + let connect_handle = unsafe { malloc_req(UV_CONNECT) }; + assert!(connect_handle.is_not_null()); + ConnectRequest(connect_handle as *uvll::uv_connect_t) + } + + fn stream(&self) -> StreamWatcher { + unsafe { + let stream_handle = uvll::get_stream_handle_from_connect_req(self.native_handle()); + NativeHandle::from_native_handle(stream_handle) + } + } + + fn delete(self) { + unsafe { free_req(self.native_handle() as *c_void) } + } +} + +impl NativeHandle<*uvll::uv_connect_t> for ConnectRequest { + fn from_native_handle(handle: *uvll:: uv_connect_t) -> ConnectRequest { + ConnectRequest(handle) + } + fn native_handle(&self) -> *uvll::uv_connect_t { + match self { &ConnectRequest(ptr) => ptr } + } +} + +pub struct WriteRequest(*uvll::uv_write_t); + +impl Request for WriteRequest { } + +impl WriteRequest { + pub fn new() -> WriteRequest { + let write_handle = unsafe { malloc_req(UV_WRITE) }; + assert!(write_handle.is_not_null()); + WriteRequest(write_handle as *uvll::uv_write_t) + } + + pub fn stream(&self) -> StreamWatcher { + unsafe { + let stream_handle = uvll::get_stream_handle_from_write_req(self.native_handle()); + NativeHandle::from_native_handle(stream_handle) + } + } + + pub fn delete(self) { + unsafe { free_req(self.native_handle() as *c_void) } + } +} + +impl NativeHandle<*uvll::uv_write_t> for WriteRequest { + fn from_native_handle(handle: *uvll:: uv_write_t) -> WriteRequest { + WriteRequest(handle) + } + fn native_handle(&self) -> *uvll::uv_write_t { + match self { &WriteRequest(ptr) => ptr } + } +} + +pub struct UdpSendRequest(*uvll::uv_udp_send_t); +impl Request for UdpSendRequest { } + +impl UdpSendRequest { + pub fn new() -> UdpSendRequest { + let send_handle = unsafe { malloc_req(UV_UDP_SEND) }; + assert!(send_handle.is_not_null()); + UdpSendRequest(send_handle as *uvll::uv_udp_send_t) + } + + pub fn handle(&self) -> UdpWatcher { + let send_request_handle = unsafe { + uvll::get_udp_handle_from_send_req(self.native_handle()) + }; + NativeHandle::from_native_handle(send_request_handle) + } + + pub fn delete(self) { + unsafe { free_req(self.native_handle() as *c_void) } + } +} + +impl NativeHandle<*uvll::uv_udp_send_t> for UdpSendRequest { + fn from_native_handle(handle: *uvll::uv_udp_send_t) -> UdpSendRequest { + UdpSendRequest(handle) + } + fn native_handle(&self) -> *uvll::uv_udp_send_t { + match self { &UdpSendRequest(ptr) => ptr } + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::util::ignore; + use std::cell::Cell; + use std::vec; + use std::unstable::run_in_bare_thread; + use std::rt::thread::Thread; + use std::rt::test::*; + use super::super::{Loop, AllocCallback}; + use super::super::{vec_from_uv_buf, vec_to_uv_buf, slice_to_uv_buf}; + + #[test] + fn connect_close_ip4() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; + // Connect to a port where nobody is listening + let addr = next_test_ip4(); + do tcp_watcher.connect(addr) |stream_watcher, status| { + uvdebug!("tcp_watcher.connect!"); + assert!(status.is_some()); + assert_eq!(status.unwrap().name(), ~"ECONNREFUSED"); + stream_watcher.close(||()); + } + loop_.run(); + loop_.close(); + } + } + + #[test] + fn connect_close_ip6() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; + // Connect to a port where nobody is listening + let addr = next_test_ip6(); + do tcp_watcher.connect(addr) |stream_watcher, status| { + uvdebug!("tcp_watcher.connect!"); + assert!(status.is_some()); + assert_eq!(status.unwrap().name(), ~"ECONNREFUSED"); + stream_watcher.close(||()); + } + loop_.run(); + loop_.close(); + } + } + + #[test] + fn udp_bind_close_ip4() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let mut udp_watcher = { UdpWatcher::new(&mut loop_) }; + let addr = next_test_ip4(); + udp_watcher.bind(addr); + udp_watcher.close(||()); + loop_.run(); + loop_.close(); + } + } + + #[test] + fn udp_bind_close_ip6() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let mut udp_watcher = { UdpWatcher::new(&mut loop_) }; + let addr = next_test_ip6(); + udp_watcher.bind(addr); + udp_watcher.close(||()); + loop_.run(); + loop_.close(); + } + } + + #[test] + fn listen_ip4() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; + let addr = next_test_ip4(); + server_tcp_watcher.bind(addr); + let loop_ = loop_; + uvdebug!("listening"); + let mut stream = server_tcp_watcher.as_stream(); + let res = do stream.listen |mut server_stream_watcher, status| { + uvdebug!("listened!"); + assert!(status.is_none()); + let mut loop_ = loop_; + let client_tcp_watcher = TcpWatcher::new(&mut loop_); + let mut client_tcp_watcher = client_tcp_watcher.as_stream(); + server_stream_watcher.accept(client_tcp_watcher); + let count_cell = Cell::new(0); + let server_stream_watcher = server_stream_watcher; + uvdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0u8)) + }; + do client_tcp_watcher.read_start(alloc) |stream_watcher, nread, buf, status| { + + uvdebug!("i'm reading!"); + let buf = vec_from_uv_buf(buf); + let mut count = count_cell.take(); + if status.is_none() { + uvdebug!("got {} bytes", nread); + let buf = buf.unwrap(); + for byte in buf.slice(0, nread as uint).iter() { + assert!(*byte == count as u8); + uvdebug!("{}", *byte as uint); + count += 1; + } + } else { + assert_eq!(count, MAX); + do stream_watcher.close { + server_stream_watcher.close(||()); + } + } + count_cell.put_back(count); + } + }; + + assert!(res.is_ok()); + + let client_thread = do Thread::start { + uvdebug!("starting client thread"); + let mut loop_ = Loop::new(); + let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; + do tcp_watcher.connect(addr) |mut stream_watcher, status| { + uvdebug!("connecting"); + assert!(status.is_none()); + let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; + let buf = slice_to_uv_buf(msg); + let msg_cell = Cell::new(msg); + do stream_watcher.write(buf) |stream_watcher, status| { + uvdebug!("writing"); + assert!(status.is_none()); + let msg_cell = Cell::new(msg_cell.take()); + stream_watcher.close(||ignore(msg_cell.take())); + } + } + loop_.run(); + loop_.close(); + }; + + let mut loop_ = loop_; + loop_.run(); + loop_.close(); + client_thread.join(); + }; + } + + #[test] + fn listen_ip6() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; + let addr = next_test_ip6(); + server_tcp_watcher.bind(addr); + let loop_ = loop_; + uvdebug!("listening"); + let mut stream = server_tcp_watcher.as_stream(); + let res = do stream.listen |mut server_stream_watcher, status| { + uvdebug!("listened!"); + assert!(status.is_none()); + let mut loop_ = loop_; + let client_tcp_watcher = TcpWatcher::new(&mut loop_); + let mut client_tcp_watcher = client_tcp_watcher.as_stream(); + server_stream_watcher.accept(client_tcp_watcher); + let count_cell = Cell::new(0); + let server_stream_watcher = server_stream_watcher; + uvdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0u8)) + }; + do client_tcp_watcher.read_start(alloc) + |stream_watcher, nread, buf, status| { + + uvdebug!("i'm reading!"); + let buf = vec_from_uv_buf(buf); + let mut count = count_cell.take(); + if status.is_none() { + uvdebug!("got {} bytes", nread); + let buf = buf.unwrap(); + let r = buf.slice(0, nread as uint); + for byte in r.iter() { + assert!(*byte == count as u8); + uvdebug!("{}", *byte as uint); + count += 1; + } + } else { + assert_eq!(count, MAX); + do stream_watcher.close { + server_stream_watcher.close(||()); + } + } + count_cell.put_back(count); + } + }; + assert!(res.is_ok()); + + let client_thread = do Thread::start { + uvdebug!("starting client thread"); + let mut loop_ = Loop::new(); + let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; + do tcp_watcher.connect(addr) |mut stream_watcher, status| { + uvdebug!("connecting"); + assert!(status.is_none()); + let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; + let buf = slice_to_uv_buf(msg); + let msg_cell = Cell::new(msg); + do stream_watcher.write(buf) |stream_watcher, status| { + uvdebug!("writing"); + assert!(status.is_none()); + let msg_cell = Cell::new(msg_cell.take()); + stream_watcher.close(||ignore(msg_cell.take())); + } + } + loop_.run(); + loop_.close(); + }; + + let mut loop_ = loop_; + loop_.run(); + loop_.close(); + client_thread.join(); + } + } + + #[test] + fn udp_recv_ip4() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + + let mut server = UdpWatcher::new(&loop_); + assert!(server.bind(server_addr).is_ok()); + + uvdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0u8)) + }; + + do server.recv_start(alloc) |mut server, nread, buf, src, flags, status| { + server.recv_stop(); + uvdebug!("i'm reading!"); + assert!(status.is_none()); + assert_eq!(flags, 0); + assert_eq!(src, client_addr); + + let buf = vec_from_uv_buf(buf); + let mut count = 0; + uvdebug!("got {} bytes", nread); + + let buf = buf.unwrap(); + for &byte in buf.slice(0, nread as uint).iter() { + assert!(byte == count as u8); + uvdebug!("{}", byte as uint); + count += 1; + } + assert_eq!(count, MAX); + + server.close(||{}); + } + + let thread = do Thread::start { + let mut loop_ = Loop::new(); + let mut client = UdpWatcher::new(&loop_); + assert!(client.bind(client_addr).is_ok()); + let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let buf = slice_to_uv_buf(msg); + do client.send(buf, server_addr) |client, status| { + uvdebug!("writing"); + assert!(status.is_none()); + client.close(||{}); + } + + loop_.run(); + loop_.close(); + }; + + loop_.run(); + loop_.close(); + thread.join(); + } + } + + #[test] + fn udp_recv_ip6() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let server_addr = next_test_ip6(); + let client_addr = next_test_ip6(); + + let mut server = UdpWatcher::new(&loop_); + assert!(server.bind(server_addr).is_ok()); + + uvdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0u8)) + }; + + do server.recv_start(alloc) |mut server, nread, buf, src, flags, status| { + server.recv_stop(); + uvdebug!("i'm reading!"); + assert!(status.is_none()); + assert_eq!(flags, 0); + assert_eq!(src, client_addr); + + let buf = vec_from_uv_buf(buf); + let mut count = 0; + uvdebug!("got {} bytes", nread); + + let buf = buf.unwrap(); + for &byte in buf.slice(0, nread as uint).iter() { + assert!(byte == count as u8); + uvdebug!("{}", byte as uint); + count += 1; + } + assert_eq!(count, MAX); + + server.close(||{}); + } + + let thread = do Thread::start { + let mut loop_ = Loop::new(); + let mut client = UdpWatcher::new(&loop_); + assert!(client.bind(client_addr).is_ok()); + let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let buf = slice_to_uv_buf(msg); + do client.send(buf, server_addr) |client, status| { + uvdebug!("writing"); + assert!(status.is_none()); + client.close(||{}); + } + + loop_.run(); + loop_.close(); + }; + + loop_.run(); + loop_.close(); + thread.join(); + } + } +} diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs new file mode 100644 index 0000000000000..b453da0cc9ea2 --- /dev/null +++ b/src/librustuv/pipe.rs @@ -0,0 +1,98 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc; +use std::c_str::CString; + +use super::{Loop, UvError, Watcher, NativeHandle, status_to_maybe_uv_error}; +use super::ConnectionCallback; +use net; +use uvll; + +pub struct Pipe(*uvll::uv_pipe_t); + +impl Watcher for Pipe {} + +impl Pipe { + pub fn new(loop_: &Loop, ipc: bool) -> Pipe { + unsafe { + let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); + assert!(handle.is_not_null()); + let ipc = ipc as libc::c_int; + assert_eq!(uvll::pipe_init(loop_.native_handle(), handle, ipc), 0); + let mut ret: Pipe = + NativeHandle::from_native_handle(handle); + ret.install_watcher_data(); + ret + } + } + + pub fn as_stream(&self) -> net::StreamWatcher { + net::StreamWatcher(**self as *uvll::uv_stream_t) + } + + #[fixed_stack_segment] #[inline(never)] + pub fn open(&mut self, file: libc::c_int) -> Result<(), UvError> { + match unsafe { uvll::pipe_open(self.native_handle(), file) } { + 0 => Ok(()), + n => Err(UvError(n)) + } + } + + #[fixed_stack_segment] #[inline(never)] + pub fn bind(&mut self, name: &CString) -> Result<(), UvError> { + do name.with_ref |name| { + match unsafe { uvll::pipe_bind(self.native_handle(), name) } { + 0 => Ok(()), + n => Err(UvError(n)) + } + } + } + + #[fixed_stack_segment] #[inline(never)] + pub fn connect(&mut self, name: &CString, cb: ConnectionCallback) { + { + let data = self.get_watcher_data(); + assert!(data.connect_cb.is_none()); + data.connect_cb = Some(cb); + } + + let connect = net::ConnectRequest::new(); + let name = do name.with_ref |p| { p }; + + unsafe { + uvll::pipe_connect(connect.native_handle(), + self.native_handle(), + name, + connect_cb) + } + + extern "C" fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { + let connect_request: net::ConnectRequest = + NativeHandle::from_native_handle(req); + let mut stream_watcher = connect_request.stream(); + connect_request.delete(); + + let cb = stream_watcher.get_watcher_data().connect_cb.take_unwrap(); + let status = status_to_maybe_uv_error(status); + cb(stream_watcher, status); + } + } + +} + +impl NativeHandle<*uvll::uv_pipe_t> for Pipe { + fn from_native_handle(handle: *uvll::uv_pipe_t) -> Pipe { + Pipe(handle) + } + fn native_handle(&self) -> *uvll::uv_pipe_t { + match self { &Pipe(ptr) => ptr } + } +} diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs new file mode 100644 index 0000000000000..2d746e329f44a --- /dev/null +++ b/src/librustuv/process.rs @@ -0,0 +1,202 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cell::Cell; +use std::libc; +use std::ptr; +use std::vec; +use std::rt::io::process::*; + +use super::{Watcher, Loop, NativeHandle, UvError}; +use super::{status_to_maybe_uv_error, ExitCallback}; +use uvio::{UvPipeStream, UvUnboundPipe}; +use uvll; + +/// A process wraps the handle of the underlying uv_process_t. +pub struct Process(*uvll::uv_process_t); + +impl Watcher for Process {} + +impl Process { + /// Creates a new process, ready to spawn inside an event loop + pub fn new() -> Process { + let handle = unsafe { uvll::malloc_handle(uvll::UV_PROCESS) }; + assert!(handle.is_not_null()); + let mut ret: Process = NativeHandle::from_native_handle(handle); + ret.install_watcher_data(); + return ret; + } + + /// Spawn a new process inside the specified event loop. + /// + /// The `config` variable will be passed down to libuv, and the `exit_cb` + /// will be run only once, when the process exits. + /// + /// Returns either the corresponding process object or an error which + /// occurred. + pub fn spawn(&mut self, loop_: &Loop, config: ProcessConfig, + exit_cb: ExitCallback) + -> Result<~[Option<~UvPipeStream>], UvError> + { + let cwd = config.cwd.map(|s| s.to_c_str()); + + extern fn on_exit(p: *uvll::uv_process_t, + exit_status: libc::c_int, + term_signal: libc::c_int) { + let mut p: Process = NativeHandle::from_native_handle(p); + let err = match exit_status { + 0 => None, + _ => status_to_maybe_uv_error(-1) + }; + p.get_watcher_data().exit_cb.take_unwrap()(p, + exit_status as int, + term_signal as int, + err); + } + + let io = config.io; + let mut stdio = vec::with_capacity::(io.len()); + let mut ret_io = vec::with_capacity(io.len()); + unsafe { + vec::raw::set_len(&mut stdio, io.len()); + for (slot, other) in stdio.iter().zip(io.iter()) { + let io = set_stdio(slot as *uvll::uv_stdio_container_t, other, + loop_); + ret_io.push(io); + } + } + + let exit_cb = Cell::new(exit_cb); + let ret_io = Cell::new(ret_io); + do with_argv(config.program, config.args) |argv| { + do with_env(config.env) |envp| { + let options = uvll::uv_process_options_t { + exit_cb: on_exit, + file: unsafe { *argv }, + args: argv, + env: envp, + cwd: match cwd { + Some(ref cwd) => cwd.with_ref(|p| p), + None => ptr::null(), + }, + flags: 0, + stdio_count: stdio.len() as libc::c_int, + stdio: stdio.as_imm_buf(|p, _| p), + uid: 0, + gid: 0, + }; + + match unsafe { + uvll::spawn(loop_.native_handle(), **self, options) + } { + 0 => { + (*self).get_watcher_data().exit_cb = Some(exit_cb.take()); + Ok(ret_io.take()) + } + err => Err(UvError(err)) + } + } + } + } + + /// Sends a signal to this process. + /// + /// This is a wrapper around `uv_process_kill` + pub fn kill(&self, signum: int) -> Result<(), UvError> { + match unsafe { + uvll::process_kill(self.native_handle(), signum as libc::c_int) + } { + 0 => Ok(()), + err => Err(UvError(err)) + } + } + + /// Returns the process id of a spawned process + pub fn pid(&self) -> libc::pid_t { + unsafe { uvll::process_pid(**self) as libc::pid_t } + } +} + +unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, + io: &StdioContainer, + loop_: &Loop) -> Option<~UvPipeStream> { + match *io { + Ignored => { + uvll::set_stdio_container_flags(dst, uvll::STDIO_IGNORE); + None + } + InheritFd(fd) => { + uvll::set_stdio_container_flags(dst, uvll::STDIO_INHERIT_FD); + uvll::set_stdio_container_fd(dst, fd); + None + } + CreatePipe(readable, writable) => { + let mut flags = uvll::STDIO_CREATE_PIPE as libc::c_int; + if readable { + flags |= uvll::STDIO_READABLE_PIPE as libc::c_int; + } + if writable { + flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int; + } + let pipe = UvUnboundPipe::new(loop_); + let handle = pipe.pipe.as_stream().native_handle(); + uvll::set_stdio_container_flags(dst, flags); + uvll::set_stdio_container_stream(dst, handle); + Some(~UvPipeStream::new(pipe)) + } + } +} + +/// Converts the program and arguments to the argv array expected by libuv +fn with_argv(prog: &str, args: &[~str], f: &fn(**libc::c_char) -> T) -> T { + // First, allocation space to put all the C-strings (we need to have + // ownership of them somewhere + let mut c_strs = vec::with_capacity(args.len() + 1); + c_strs.push(prog.to_c_str()); + for arg in args.iter() { + c_strs.push(arg.to_c_str()); + } + + // Next, create the char** array + let mut c_args = vec::with_capacity(c_strs.len() + 1); + for s in c_strs.iter() { + c_args.push(s.with_ref(|p| p)); + } + c_args.push(ptr::null()); + c_args.as_imm_buf(|buf, _| f(buf)) +} + +/// Converts the environment to the env array expected by libuv +fn with_env(env: Option<&[(~str, ~str)]>, f: &fn(**libc::c_char) -> T) -> T { + let env = match env { + Some(s) => s, + None => { return f(ptr::null()); } + }; + // As with argv, create some temporary storage and then the actual array + let mut envp = vec::with_capacity(env.len()); + for &(ref key, ref value) in env.iter() { + envp.push(format!("{}={}", *key, *value).to_c_str()); + } + let mut c_envp = vec::with_capacity(envp.len() + 1); + for s in envp.iter() { + c_envp.push(s.with_ref(|p| p)); + } + c_envp.push(ptr::null()); + c_envp.as_imm_buf(|buf, _| f(buf)) +} + +impl NativeHandle<*uvll::uv_process_t> for Process { + fn from_native_handle(handle: *uvll::uv_process_t) -> Process { + Process(handle) + } + fn native_handle(&self) -> *uvll::uv_process_t { + match self { &Process(ptr) => ptr } + } +} diff --git a/src/librustuv/rustuv.rs b/src/librustuv/rustuv.rs new file mode 100644 index 0000000000000..f483cfb0c658d --- /dev/null +++ b/src/librustuv/rustuv.rs @@ -0,0 +1,424 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + +Bindings to libuv, along with the default implementation of `std::rt::rtio`. + +UV types consist of the event loop (Loop), Watchers, Requests and +Callbacks. + +Watchers and Requests encapsulate pointers to uv *handles*, which have +subtyping relationships with each other. This subtyping is reflected +in the bindings with explicit or implicit coercions. For example, an +upcast from TcpWatcher to StreamWatcher is done with +`tcp_watcher.as_stream()`. In other cases a callback on a specific +type of watcher will be passed a watcher of a supertype. + +Currently all use of Request types (connect/write requests) are +encapsulated in the bindings and don't need to be dealt with by the +caller. + +# Safety note + +Due to the complex lifecycle of uv handles, as well as compiler bugs, +this module is not memory safe and requires explicit memory management, +via `close` and `delete` methods. + +*/ + +#[link(name = "rustuv", + vers = "0.9-pre", + uuid = "f3719011-0459-9b86-b11c-29265c0d0864", + url = "https://github.com/mozilla/rust/tree/master/src/librustuv")]; + +#[license = "MIT/ASL2"]; +#[crate_type = "lib"]; + +#[feature(macro_rules, globs)]; + +use std::str::raw::from_c_str; +use std::vec; +use std::ptr; +use std::str; +use std::libc::{c_void, c_int, size_t, malloc, free}; +use std::cast::transmute; +use std::ptr::null; +use std::unstable::finally::Finally; +use std::rt::io::net::ip::SocketAddr; +use std::rt::io::signal::Signum; + +use std::rt::io::IoError; + +//#[cfg(test)] use unstable::run_in_bare_thread; + +pub use self::file::{FsRequest}; +pub use self::net::{StreamWatcher, TcpWatcher, UdpWatcher}; +pub use self::idle::IdleWatcher; +pub use self::timer::TimerWatcher; +pub use self::async::AsyncWatcher; +pub use self::process::Process; +pub use self::pipe::Pipe; +pub use self::signal::SignalWatcher; + +mod macros; + +/// The implementation of `rtio` for libuv +pub mod uvio; + +/// C bindings to libuv +pub mod uvll; + +pub mod file; +pub mod net; +pub mod idle; +pub mod timer; +pub mod async; +pub mod addrinfo; +pub mod process; +pub mod pipe; +pub mod tty; +pub mod signal; + +/// XXX: Loop(*handle) is buggy with destructors. Normal structs +/// with dtors may not be destructured, but tuple structs can, +/// but the results are not correct. +pub struct Loop { + priv handle: *uvll::uv_loop_t +} + +pub struct Handle(*uvll::uv_handle_t); + +impl Watcher for Handle {} +impl NativeHandle<*uvll::uv_handle_t> for Handle { + fn from_native_handle(h: *uvll::uv_handle_t) -> Handle { Handle(h) } + fn native_handle(&self) -> *uvll::uv_handle_t { **self } +} + +/// The trait implemented by uv 'watchers' (handles). Watchers are +/// non-owning wrappers around the uv handles and are not completely +/// safe - there may be multiple instances for a single underlying +/// handle. Watchers are generally created, then `start`ed, `stop`ed +/// and `close`ed, but due to their complex life cycle may not be +/// entirely memory safe if used in unanticipated patterns. +pub trait Watcher { } + +pub trait Request { } + +/// A type that wraps a native handle +pub trait NativeHandle { + fn from_native_handle(T) -> Self; + fn native_handle(&self) -> T; +} + +impl Loop { + pub fn new() -> Loop { + let handle = unsafe { uvll::loop_new() }; + assert!(handle.is_not_null()); + NativeHandle::from_native_handle(handle) + } + + pub fn run(&mut self) { + unsafe { uvll::run(self.native_handle()) }; + } + + pub fn close(&mut self) { + unsafe { uvll::loop_delete(self.native_handle()) }; + } +} + +impl NativeHandle<*uvll::uv_loop_t> for Loop { + fn from_native_handle(handle: *uvll::uv_loop_t) -> Loop { + Loop { handle: handle } + } + fn native_handle(&self) -> *uvll::uv_loop_t { + self.handle + } +} + +// XXX: The uv alloc callback also has a *uv_handle_t arg +pub type AllocCallback = ~fn(uint) -> Buf; +pub type ReadCallback = ~fn(StreamWatcher, int, Buf, Option); +pub type NullCallback = ~fn(); +pub type IdleCallback = ~fn(IdleWatcher, Option); +pub type ConnectionCallback = ~fn(StreamWatcher, Option); +pub type FsCallback = ~fn(&mut FsRequest, Option); +// first int is exit_status, second is term_signal +pub type ExitCallback = ~fn(Process, int, int, Option); +pub type TimerCallback = ~fn(TimerWatcher, Option); +pub type AsyncCallback = ~fn(AsyncWatcher, Option); +pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); +pub type UdpSendCallback = ~fn(UdpWatcher, Option); +pub type SignalCallback = ~fn(SignalWatcher, Signum); + + +/// Callbacks used by StreamWatchers, set as custom data on the foreign handle. +/// XXX: Would be better not to have all watchers allocate room for all callback types. +struct WatcherData { + read_cb: Option, + write_cb: Option, + connect_cb: Option, + close_cb: Option, + alloc_cb: Option, + idle_cb: Option, + timer_cb: Option, + async_cb: Option, + udp_recv_cb: Option, + udp_send_cb: Option, + exit_cb: Option, + signal_cb: Option, +} + +pub trait WatcherInterop { + fn event_loop(&self) -> Loop; + fn install_watcher_data(&mut self); + fn get_watcher_data<'r>(&'r mut self) -> &'r mut WatcherData; + fn drop_watcher_data(&mut self); + fn close(self, cb: NullCallback); + fn close_async(self); +} + +impl> WatcherInterop for W { + /// Get the uv event loop from a Watcher + fn event_loop(&self) -> Loop { + unsafe { + let handle = self.native_handle(); + let loop_ = uvll::get_loop_for_uv_handle(handle); + NativeHandle::from_native_handle(loop_) + } + } + + fn install_watcher_data(&mut self) { + unsafe { + let data = ~WatcherData { + read_cb: None, + write_cb: None, + connect_cb: None, + close_cb: None, + alloc_cb: None, + idle_cb: None, + timer_cb: None, + async_cb: None, + udp_recv_cb: None, + udp_send_cb: None, + exit_cb: None, + signal_cb: None, + }; + let data = transmute::<~WatcherData, *c_void>(data); + uvll::set_data_for_uv_handle(self.native_handle(), data); + } + } + + fn get_watcher_data<'r>(&'r mut self) -> &'r mut WatcherData { + unsafe { + let data = uvll::get_data_for_uv_handle(self.native_handle()); + let data = transmute::<&*c_void, &mut ~WatcherData>(&data); + return &mut **data; + } + } + + fn drop_watcher_data(&mut self) { + unsafe { + let data = uvll::get_data_for_uv_handle(self.native_handle()); + let _data = transmute::<*c_void, ~WatcherData>(data); + uvll::set_data_for_uv_handle(self.native_handle(), null::<()>()); + } + } + + fn close(self, cb: NullCallback) { + let mut this = self; + { + let data = this.get_watcher_data(); + assert!(data.close_cb.is_none()); + data.close_cb = Some(cb); + } + + unsafe { uvll::close(this.native_handle(), close_cb); } + + extern fn close_cb(handle: *uvll::uv_handle_t) { + let mut h: Handle = NativeHandle::from_native_handle(handle); + h.get_watcher_data().close_cb.take_unwrap()(); + h.drop_watcher_data(); + unsafe { uvll::free_handle(handle as *c_void) } + } + } + + fn close_async(self) { + unsafe { uvll::close(self.native_handle(), close_cb); } + + extern fn close_cb(handle: *uvll::uv_handle_t) { + let mut h: Handle = NativeHandle::from_native_handle(handle); + h.drop_watcher_data(); + unsafe { uvll::free_handle(handle as *c_void) } + } + } +} + +// XXX: Need to define the error constants like EOF so they can be +// compared to the UvError type + +pub struct UvError(c_int); + +impl UvError { + pub fn name(&self) -> ~str { + unsafe { + let inner = match self { &UvError(a) => a }; + let name_str = uvll::err_name(inner); + assert!(name_str.is_not_null()); + from_c_str(name_str) + } + } + + pub fn desc(&self) -> ~str { + unsafe { + let inner = match self { &UvError(a) => a }; + let desc_str = uvll::strerror(inner); + assert!(desc_str.is_not_null()); + from_c_str(desc_str) + } + } + + pub fn is_eof(&self) -> bool { + **self == uvll::EOF + } +} + +impl ToStr for UvError { + fn to_str(&self) -> ~str { + format!("{}: {}", self.name(), self.desc()) + } +} + +#[test] +fn error_smoke_test() { + let err: UvError = UvError(uvll::EOF); + assert_eq!(err.to_str(), ~"EOF: end of file"); +} + +pub fn uv_error_to_io_error(uverr: UvError) -> IoError { + unsafe { + // Importing error constants + use uvll::*; + use std::rt::io::*; + + // uv error descriptions are static + let c_desc = uvll::strerror(*uverr); + let desc = str::raw::c_str_to_static_slice(c_desc); + + let kind = match *uverr { + UNKNOWN => OtherIoError, + OK => OtherIoError, + EOF => EndOfFile, + EACCES => PermissionDenied, + ECONNREFUSED => ConnectionRefused, + ECONNRESET => ConnectionReset, + ENOTCONN => NotConnected, + EPIPE => BrokenPipe, + ECONNABORTED => ConnectionAborted, + err => { + uvdebug!("uverr.code {}", err as int); + // XXX: Need to map remaining uv error types + OtherIoError + } + }; + + IoError { + kind: kind, + desc: desc, + detail: None + } + } +} + +/// Given a uv handle, convert a callback status to a UvError +pub fn status_to_maybe_uv_error(status: c_int) -> Option +{ + if status >= 0 { + None + } else { + Some(UvError(status)) + } +} + +/// The uv buffer type +pub type Buf = uvll::uv_buf_t; + +pub fn empty_buf() -> Buf { + uvll::uv_buf_t { + base: null(), + len: 0, + } +} + +/// Borrow a slice to a Buf +pub fn slice_to_uv_buf(v: &[u8]) -> Buf { + let data = vec::raw::to_ptr(v); + unsafe { uvll::buf_init(data, v.len()) } +} + +// XXX: Do these conversions without copying + +/// Transmute an owned vector to a Buf +pub fn vec_to_uv_buf(v: ~[u8]) -> Buf { + #[fixed_stack_segment]; #[inline(never)]; + + unsafe { + let data = malloc(v.len() as size_t) as *u8; + assert!(data.is_not_null()); + do v.as_imm_buf |b, l| { + let data = data as *mut u8; + ptr::copy_memory(data, b, l) + } + uvll::buf_init(data, v.len()) + } +} + +/// Transmute a Buf that was once a ~[u8] back to ~[u8] +pub fn vec_from_uv_buf(buf: Buf) -> Option<~[u8]> { + #[fixed_stack_segment]; #[inline(never)]; + + if !(buf.len == 0 && buf.base.is_null()) { + let v = unsafe { vec::from_buf(buf.base, buf.len as uint) }; + unsafe { free(buf.base as *c_void) }; + return Some(v); + } else { + // No buffer + uvdebug!("No buffer!"); + return None; + } +} +/* +#[test] +fn test_slice_to_uv_buf() { + let slice = [0, .. 20]; + let buf = slice_to_uv_buf(slice); + + assert!(buf.len == 20); + + unsafe { + let base = transmute::<*u8, *mut u8>(buf.base); + (*base) = 1; + (*ptr::mut_offset(base, 1)) = 2; + } + + assert!(slice[0] == 1); + assert!(slice[1] == 2); +} + + +#[test] +fn loop_smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + loop_.run(); + loop_.close(); + } +} +*/ diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs new file mode 100644 index 0000000000000..3fcf449959dba --- /dev/null +++ b/src/librustuv/signal.rs @@ -0,0 +1,72 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cast; +use std::libc::c_int; +use std::rt::io::signal::Signum; + +use super::{Loop, NativeHandle, SignalCallback, UvError, Watcher}; +use uvll; + +pub struct SignalWatcher(*uvll::uv_signal_t); + +impl Watcher for SignalWatcher { } + +impl SignalWatcher { + pub fn new(loop_: &mut Loop) -> SignalWatcher { + unsafe { + let handle = uvll::malloc_handle(uvll::UV_SIGNAL); + assert!(handle.is_not_null()); + assert!(0 == uvll::signal_init(loop_.native_handle(), handle)); + let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + return watcher; + } + } + + pub fn start(&mut self, signum: Signum, callback: SignalCallback) + -> Result<(), UvError> + { + return unsafe { + match uvll::signal_start(self.native_handle(), signal_cb, + signum as c_int) { + 0 => { + let data = self.get_watcher_data(); + data.signal_cb = Some(callback); + Ok(()) + } + n => Err(UvError(n)), + } + }; + + extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) { + let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); + let data = watcher.get_watcher_data(); + let cb = data.signal_cb.get_ref(); + (*cb)(watcher, unsafe { cast::transmute(signum as int) }); + } + } + + pub fn stop(&mut self) { + unsafe { + uvll::signal_stop(self.native_handle()); + } + } +} + +impl NativeHandle<*uvll::uv_signal_t> for SignalWatcher { + fn from_native_handle(handle: *uvll::uv_signal_t) -> SignalWatcher { + SignalWatcher(handle) + } + + fn native_handle(&self) -> *uvll::uv_signal_t { + match self { &SignalWatcher(ptr) => ptr } + } +} diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs new file mode 100644 index 0000000000000..9a693f6a27d35 --- /dev/null +++ b/src/librustuv/timer.rs @@ -0,0 +1,157 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc::c_int; + +use uvll; +use super::{Watcher, Loop, NativeHandle, TimerCallback, status_to_maybe_uv_error}; + +pub struct TimerWatcher(*uvll::uv_timer_t); +impl Watcher for TimerWatcher { } + +impl TimerWatcher { + pub fn new(loop_: &mut Loop) -> TimerWatcher { + unsafe { + let handle = uvll::malloc_handle(uvll::UV_TIMER); + assert!(handle.is_not_null()); + assert!(0 == uvll::timer_init(loop_.native_handle(), handle)); + let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + return watcher; + } + } + + pub fn start(&mut self, timeout: u64, repeat: u64, cb: TimerCallback) { + { + let data = self.get_watcher_data(); + data.timer_cb = Some(cb); + } + + unsafe { + uvll::timer_start(self.native_handle(), timer_cb, timeout, repeat); + } + + extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { + let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); + let data = watcher.get_watcher_data(); + let cb = data.timer_cb.get_ref(); + let status = status_to_maybe_uv_error(status); + (*cb)(watcher, status); + } + } + + pub fn stop(&mut self) { + unsafe { + uvll::timer_stop(self.native_handle()); + } + } +} + +impl NativeHandle<*uvll::uv_timer_t> for TimerWatcher { + fn from_native_handle(handle: *uvll::uv_timer_t) -> TimerWatcher { + TimerWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_idle_t { + match self { &TimerWatcher(ptr) => ptr } + } +} + +#[cfg(test)] +mod test { + use super::*; + use Loop; + use std::unstable::run_in_bare_thread; + + #[test] + fn smoke_test() { + do run_in_bare_thread { + let mut count = 0; + let count_ptr: *mut int = &mut count; + let mut loop_ = Loop::new(); + let mut timer = TimerWatcher::new(&mut loop_); + do timer.start(10, 0) |timer, status| { + assert!(status.is_none()); + unsafe { *count_ptr += 1 }; + timer.close(||()); + } + loop_.run(); + loop_.close(); + assert!(count == 1); + } + } + + #[test] + fn start_twice() { + do run_in_bare_thread { + let mut count = 0; + let count_ptr: *mut int = &mut count; + let mut loop_ = Loop::new(); + let mut timer = TimerWatcher::new(&mut loop_); + do timer.start(10, 0) |timer, status| { + let mut timer = timer; + assert!(status.is_none()); + unsafe { *count_ptr += 1 }; + do timer.start(10, 0) |timer, status| { + assert!(status.is_none()); + unsafe { *count_ptr += 1 }; + timer.close(||()); + } + } + loop_.run(); + loop_.close(); + assert!(count == 2); + } + } + + #[test] + fn repeat_stop() { + do run_in_bare_thread { + let mut count = 0; + let count_ptr: *mut int = &mut count; + let mut loop_ = Loop::new(); + let mut timer = TimerWatcher::new(&mut loop_); + do timer.start(1, 2) |timer, status| { + assert!(status.is_none()); + unsafe { + *count_ptr += 1; + + if *count_ptr == 10 { + + // Stop the timer and do something else + let mut timer = timer; + timer.stop(); + // Freeze timer so it can be captured + let timer = timer; + + let mut loop_ = timer.event_loop(); + let mut timer2 = TimerWatcher::new(&mut loop_); + do timer2.start(10, 0) |timer2, _| { + + *count_ptr += 1; + + timer2.close(||()); + + // Restart the original timer + let mut timer = timer; + do timer.start(1, 0) |timer, _| { + *count_ptr += 1; + timer.close(||()); + } + } + } + }; + } + loop_.run(); + loop_.close(); + assert!(count == 12); + } + } + +} diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs new file mode 100644 index 0000000000000..65ba09376c14d --- /dev/null +++ b/src/librustuv/tty.rs @@ -0,0 +1,83 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc; + +use super::{Watcher, Loop, NativeHandle, UvError}; +use net; +use uvll; + +/// A process wraps the handle of the underlying uv_process_t. +pub struct TTY(*uvll::uv_tty_t); + +impl Watcher for TTY {} + +impl TTY { + #[fixed_stack_segment] #[inline(never)] + pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) -> + Result + { + let handle = unsafe { uvll::malloc_handle(uvll::UV_TTY) }; + assert!(handle.is_not_null()); + + let ret = unsafe { + uvll::tty_init(loop_.native_handle(), handle, fd as libc::c_int, + readable as libc::c_int) + }; + match ret { + 0 => { + let mut ret: TTY = NativeHandle::from_native_handle(handle); + ret.install_watcher_data(); + Ok(ret) + } + n => { + unsafe { uvll::free_handle(handle); } + Err(UvError(n)) + } + } + } + + pub fn as_stream(&self) -> net::StreamWatcher { + net::StreamWatcher(**self as *uvll::uv_stream_t) + } + + #[fixed_stack_segment] #[inline(never)] + pub fn set_mode(&self, raw: bool) -> Result<(), UvError> { + let raw = raw as libc::c_int; + match unsafe { uvll::tty_set_mode(self.native_handle(), raw) } { + 0 => Ok(()), + n => Err(UvError(n)) + } + } + + #[fixed_stack_segment] #[inline(never)] #[allow(unused_mut)] + pub fn get_winsize(&self) -> Result<(int, int), UvError> { + let mut width: libc::c_int = 0; + let mut height: libc::c_int = 0; + let widthptr: *libc::c_int = &width; + let heightptr: *libc::c_int = &width; + + match unsafe { uvll::tty_get_winsize(self.native_handle(), + widthptr, heightptr) } { + 0 => Ok((width as int, height as int)), + n => Err(UvError(n)) + } + } +} + +impl NativeHandle<*uvll::uv_tty_t> for TTY { + fn from_native_handle(handle: *uvll::uv_tty_t) -> TTY { + TTY(handle) + } + fn native_handle(&self) -> *uvll::uv_tty_t { + match self { &TTY(ptr) => ptr } + } +} + diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs new file mode 100644 index 0000000000000..8b80a24a1b48f --- /dev/null +++ b/src/librustuv/uvio.rs @@ -0,0 +1,2526 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::c_str::{ToCStr, CString}; +use std::cast::transmute; +use std::cast; +use std::cell::Cell; +use std::clone::Clone; +use std::comm::{SendDeferred, SharedChan, Port, PortOne, GenericChan}; +use std::libc::{c_int, c_uint, c_void, pid_t}; +use std::ops::Drop; +use std::option::*; +use std::ptr; +use std::str; +use std::result::*; +use std::rt::io::IoError; +use std::rt::io::net::ip::{SocketAddr, IpAddr}; +use std::rt::io::{standard_error, OtherIoError, SeekStyle, SeekSet, SeekCur, + SeekEnd}; +use std::rt::io::process::ProcessConfig; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::rtio::*; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::tube::Tube; +use std::rt::task::Task; +use std::unstable::sync::Exclusive; +use std::path::{GenericPath, Path}; +use std::libc::{lseek, off_t, O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, + O_WRONLY, S_IRUSR, S_IWUSR, S_IRWXU}; +use std::rt::io::{FileMode, FileAccess, OpenOrCreate, Open, Create, + CreateOrTruncate, Append, Truncate, Read, Write, ReadWrite, + FileStat}; +use std::rt::io::signal::Signum; +use std::task; +use ai = std::rt::io::net::addrinfo; + +#[cfg(test)] use std::container::Container; +#[cfg(test)] use std::unstable::run_in_bare_thread; +#[cfg(test)] use std::rt::test::{spawntask, + next_test_ip4, + run_in_mt_newsched_task}; +#[cfg(test)] use std::iter::{Iterator, range}; +#[cfg(test)] use std::rt::comm::oneshot; + +use super::*; +use idle::IdleWatcher; +use net::{UvIpv4SocketAddr, UvIpv6SocketAddr}; +use addrinfo::{GetAddrInfoRequest, accum_addrinfo}; + +// XXX we should not be calling uvll functions in here. + +trait HomingIO { + + fn home<'r>(&'r mut self) -> &'r mut SchedHandle; + + /// This function will move tasks to run on their home I/O scheduler. Note + /// that this function does *not* pin the task to the I/O scheduler, but + /// rather it simply moves it to running on the I/O scheduler. + fn go_to_IO_home(&mut self) -> uint { + use std::rt::sched::RunOnce; + + let current_sched_id = do Local::borrow |sched: &mut Scheduler| { + sched.sched_id() + }; + + // Only need to invoke a context switch if we're not on the right + // scheduler. + if current_sched_id != self.home().sched_id { + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + /* FIXME(#8674) if the task was already killed then wake + * will return None. In that case, the home pointer will + * never be set. + * + * RESOLUTION IDEA: Since the task is dead, we should + * just abort the IO action. + */ + do task.wake().map |task| { + self.home().send(RunOnce(task)); + }; + } + } + } + + self.home().sched_id + } + + // XXX: dummy self parameter + fn restore_original_home(_: Option, io_home: uint) { + // It would truly be a sad day if we had moved off the home I/O + // scheduler while we were doing I/O. + assert_eq!(Local::borrow(|sched: &mut Scheduler| sched.sched_id()), + io_home); + + // If we were a homed task, then we must send ourselves back to the + // original scheduler. Otherwise, we can just return and keep running + if !Task::on_appropriate_sched() { + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + do task.wake().map |task| { + Scheduler::run_task(task); + }; + } + } + } + } + + fn home_for_io(&mut self, io: &fn(&mut Self) -> A) -> A { + let home = self.go_to_IO_home(); + let a = io(self); // do IO + HomingIO::restore_original_home(None::, home); + a // return the result of the IO + } + + fn home_for_io_consume(self, io: &fn(Self) -> A) -> A { + let mut this = self; + let home = this.go_to_IO_home(); + let a = io(this); // do IO + HomingIO::restore_original_home(None::, home); + a // return the result of the IO + } + + fn home_for_io_with_sched(&mut self, io_sched: &fn(&mut Self, ~Scheduler) -> A) -> A { + let home = self.go_to_IO_home(); + let a = do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + io_sched(self, scheduler) // do IO and scheduling action + }; + HomingIO::restore_original_home(None::, home); + a // return result of IO + } +} + +// get a handle for the current scheduler +macro_rules! get_handle_to_current_scheduler( + () => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() }) +) + +enum SocketNameKind { + TcpPeer, + Tcp, + Udp +} + +fn socket_name>(sk: SocketNameKind, + handle: U) -> Result { + let getsockname = match sk { + TcpPeer => uvll::tcp_getpeername, + Tcp => uvll::tcp_getsockname, + Udp => uvll::udp_getsockname, + }; + + // Allocate a sockaddr_storage + // since we don't know if it's ipv4 or ipv6 + let r_addr = unsafe { uvll::malloc_sockaddr_storage() }; + + let r = unsafe { + getsockname(handle.native_handle() as *c_void, r_addr as *uvll::sockaddr_storage) + }; + + if r != 0 { + let status = status_to_maybe_uv_error(r); + return Err(uv_error_to_io_error(status.unwrap())); + } + + let addr = unsafe { + if uvll::is_ip6_addr(r_addr as *uvll::sockaddr) { + net::uv_socket_addr_to_socket_addr(UvIpv6SocketAddr(r_addr as *uvll::sockaddr_in6)) + } else { + net::uv_socket_addr_to_socket_addr(UvIpv4SocketAddr(r_addr as *uvll::sockaddr_in)) + } + }; + + unsafe { uvll::free_sockaddr_storage(r_addr); } + + Ok(addr) + +} + +// Obviously an Event Loop is always home. +pub struct UvEventLoop { + priv uvio: UvIoFactory +} + +impl UvEventLoop { + pub fn new() -> UvEventLoop { + UvEventLoop { + uvio: UvIoFactory(Loop::new()) + } + } +} + +impl Drop for UvEventLoop { + fn drop(&mut self) { + self.uvio.uv_loop().close(); + } +} + +impl EventLoop for UvEventLoop { + fn run(&mut self) { + self.uvio.uv_loop().run(); + } + + fn callback(&mut self, f: ~fn()) { + let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); + do idle_watcher.start |mut idle_watcher, status| { + assert!(status.is_none()); + idle_watcher.stop(); + idle_watcher.close(||()); + f(); + } + } + + fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback { + let idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); + ~UvPausibleIdleCallback { + watcher: idle_watcher, + idle_flag: false, + closed: false + } as ~PausibleIdleCallback + } + + fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback { + ~UvRemoteCallback::new(self.uvio.uv_loop(), f) as ~RemoteCallback + } + + fn io<'a>(&'a mut self, f: &fn(&'a mut IoFactory)) { + f(&mut self.uvio as &mut IoFactory) + } +} + +#[cfg(not(stage0), not(test))] +#[lang = "event_loop_factory"] +pub extern "C" fn new_loop() -> ~EventLoop { + ~UvEventLoop::new() as ~EventLoop +} + +pub struct UvPausibleIdleCallback { + priv watcher: IdleWatcher, + priv idle_flag: bool, + priv closed: bool +} + +impl PausibleIdleCallback for UvPausibleIdleCallback { + #[inline] + fn start(&mut self, f: ~fn()) { + do self.watcher.start |_idle_watcher, _status| { + f(); + }; + self.idle_flag = true; + } + #[inline] + fn pause(&mut self) { + if self.idle_flag == true { + self.watcher.stop(); + self.idle_flag = false; + } + } + #[inline] + fn resume(&mut self) { + if self.idle_flag == false { + self.watcher.restart(); + self.idle_flag = true; + } + } + #[inline] + fn close(&mut self) { + self.pause(); + if !self.closed { + self.closed = true; + self.watcher.close(||{}); + } + } +} + +#[test] +fn test_callback_run_once() { + do run_in_bare_thread { + let mut event_loop = UvEventLoop::new(); + let mut count = 0; + let count_ptr: *mut int = &mut count; + do event_loop.callback { + unsafe { *count_ptr += 1 } + } + event_loop.run(); + assert_eq!(count, 1); + } +} + +// The entire point of async is to call into a loop from other threads so it does not need to home. +pub struct UvRemoteCallback { + // The uv async handle for triggering the callback + priv async: AsyncWatcher, + // A flag to tell the callback to exit, set from the dtor. This is + // almost never contested - only in rare races with the dtor. + priv exit_flag: Exclusive +} + +impl UvRemoteCallback { + pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback { + let exit_flag = Exclusive::new(false); + let exit_flag_clone = exit_flag.clone(); + let async = do AsyncWatcher::new(loop_) |watcher, status| { + assert!(status.is_none()); + + // The synchronization logic here is subtle. To review, + // the uv async handle type promises that, after it is + // triggered the remote callback is definitely called at + // least once. UvRemoteCallback needs to maintain those + // semantics while also shutting down cleanly from the + // dtor. In our case that means that, when the + // UvRemoteCallback dtor calls `async.send()`, here `f` is + // always called later. + + // In the dtor both the exit flag is set and the async + // callback fired under a lock. Here, before calling `f`, + // we take the lock and check the flag. Because we are + // checking the flag before calling `f`, and the flag is + // set under the same lock as the send, then if the flag + // is set then we're guaranteed to call `f` after the + // final send. + + // If the check was done after `f()` then there would be a + // period between that call and the check where the dtor + // could be called in the other thread, missing the final + // callback while still destroying the handle. + + let should_exit = unsafe { + exit_flag_clone.with_imm(|&should_exit| should_exit) + }; + + f(); + + if should_exit { + watcher.close(||()); + } + + }; + UvRemoteCallback { + async: async, + exit_flag: exit_flag + } + } +} + +impl RemoteCallback for UvRemoteCallback { + fn fire(&mut self) { self.async.send() } +} + +impl Drop for UvRemoteCallback { + fn drop(&mut self) { + unsafe { + let this: &mut UvRemoteCallback = cast::transmute_mut(self); + do this.exit_flag.with |should_exit| { + // NB: These two things need to happen atomically. Otherwise + // the event handler could wake up due to a *previous* + // signal and see the exit flag, destroying the handle + // before the final send. + *should_exit = true; + this.async.send(); + } + } + } +} + +#[cfg(test)] +mod test_remote { + use std::cell::Cell; + use std::rt::test::*; + use std::rt::thread::Thread; + use std::rt::tube::Tube; + use std::rt::rtio::EventLoop; + use std::rt::local::Local; + use std::rt::sched::Scheduler; + + #[test] + fn test_uv_remote() { + do run_in_mt_newsched_task { + let mut tube = Tube::new(); + let tube_clone = tube.clone(); + let remote_cell = Cell::new_empty(); + do Local::borrow |sched: &mut Scheduler| { + let tube_clone = tube_clone.clone(); + let tube_clone_cell = Cell::new(tube_clone); + let remote = do sched.event_loop.remote_callback { + // This could be called multiple times + if !tube_clone_cell.is_empty() { + tube_clone_cell.take().send(1); + } + }; + remote_cell.put_back(remote); + } + let thread = do Thread::start { + remote_cell.take().fire(); + }; + + assert!(tube.recv() == 1); + thread.join(); + } + } +} + +pub struct UvIoFactory(Loop); + +impl UvIoFactory { + pub fn uv_loop<'a>(&'a mut self) -> &'a mut Loop { + match self { &UvIoFactory(ref mut ptr) => ptr } + } +} + +/// Helper for a variety of simple uv_fs_* functions that +/// have no ret val +fn uv_fs_helper(loop_: &mut Loop, path: &CString, + cb: ~fn(&mut FsRequest, &mut Loop, &CString, + ~fn(&FsRequest, Option))) + -> Result<(), IoError> { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let path_cell = Cell::new(path); + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + let mut new_req = FsRequest::new(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let path = path_cell.take(); + do cb(&mut new_req, loop_, path) |_, err| { + let res = match err { + None => Ok(()), + Some(err) => Err(uv_error_to_io_error(err)) + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + }; + } + } + assert!(!result_cell.is_empty()); + return result_cell.take(); +} + +impl IoFactory for UvIoFactory { + // Connect to an address and return a new stream + // NB: This blocks the task waiting on the connection. + // It would probably be better to return a future + fn tcp_connect(&mut self, addr: SocketAddr) -> Result<~RtioTcpStream, IoError> { + // Create a cell in the task to hold the result. We will fill + // the cell before resuming the task. + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + + // Block this task and take ownership, switch to scheduler context + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + + let mut tcp = TcpWatcher::new(self.uv_loop()); + let task_cell = Cell::new(task); + + // Wait for a connection + do tcp.connect(addr) |stream, status| { + match status { + None => { + let tcp = NativeHandle::from_native_handle(stream.native_handle()); + let home = get_handle_to_current_scheduler!(); + let res = Ok(~UvTcpStream { watcher: tcp, home: home } + as ~RtioTcpStream); + + // Store the stream in the task's stack + unsafe { (*result_cell_ptr).put_back(res); } + + // Context switch + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + Some(_) => { + let task_cell = Cell::new(task_cell.take()); + do stream.close { + let res = Err(uv_error_to_io_error(status.unwrap())); + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } + } + } + + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + + fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError> { + let mut watcher = TcpWatcher::new(self.uv_loop()); + match watcher.bind(addr) { + Ok(_) => { + let home = get_handle_to_current_scheduler!(); + Ok(~UvTcpListener::new(watcher, home) as ~RtioTcpListener) + } + Err(uverr) => { + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do watcher.as_stream().close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + Err(uv_error_to_io_error(uverr)) + } + } + } + } + + fn udp_bind(&mut self, addr: SocketAddr) -> Result<~RtioUdpSocket, IoError> { + let mut watcher = UdpWatcher::new(self.uv_loop()); + match watcher.bind(addr) { + Ok(_) => { + let home = get_handle_to_current_scheduler!(); + Ok(~UvUdpSocket { watcher: watcher, home: home } as ~RtioUdpSocket) + } + Err(uverr) => { + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + Err(uv_error_to_io_error(uverr)) + } + } + } + } + + fn timer_init(&mut self) -> Result<~RtioTimer, IoError> { + let watcher = TimerWatcher::new(self.uv_loop()); + let home = get_handle_to_current_scheduler!(); + Ok(~UvTimer::new(watcher, home) as ~RtioTimer) + } + + fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior) -> ~RtioFileStream { + let loop_ = Loop {handle: self.uv_loop().native_handle()}; + let home = get_handle_to_current_scheduler!(); + ~UvFileStream::new(loop_, fd, close, home) as ~RtioFileStream + } + + fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess) + -> Result<~RtioFileStream, IoError> { + let mut flags = match fm { + Open => 0, + Create => O_CREAT, + OpenOrCreate => O_CREAT, + Append => O_APPEND, + Truncate => O_TRUNC, + CreateOrTruncate => O_TRUNC | O_CREAT + }; + flags = match fa { + Read => flags | O_RDONLY, + Write => flags | O_WRONLY, + ReadWrite => flags | O_RDWR + }; + let create_mode = match fm { + Create|OpenOrCreate|CreateOrTruncate => + S_IRUSR | S_IWUSR, + _ => 0 + }; + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let path_cell = Cell::new(path); + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + let open_req = file::FsRequest::new(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let path = path_cell.take(); + do open_req.open(self.uv_loop(), path, flags as int, create_mode as int) + |req,err| { + if err.is_none() { + let loop_ = Loop {handle: req.get_loop().native_handle()}; + let home = get_handle_to_current_scheduler!(); + let fd = req.get_result() as c_int; + let fs = ~UvFileStream::new( + loop_, fd, CloseSynchronously, home) as ~RtioFileStream; + let res = Ok(fs); + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } else { + let res = Err(uv_error_to_io_error(err.unwrap())); + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + }; + }; + }; + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + + fn fs_unlink(&mut self, path: &CString) -> Result<(), IoError> { + do uv_fs_helper(self.uv_loop(), path) |unlink_req, l, p, cb| { + do unlink_req.unlink(l, p) |req, err| { + cb(req, err) + }; + } + } + fn fs_stat(&mut self, path: &CString) -> Result { + use str::StrSlice; + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let path_cell = Cell::new(path); + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + let stat_req = file::FsRequest::new(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let path = path_cell.take(); + // Don't pick up the null byte + let slice = path.as_bytes().slice(0, path.len()); + let path_instance = Cell::new(Path::new(slice)); + do stat_req.stat(self.uv_loop(), path) |req,err| { + let res = match err { + None => { + let stat = req.get_stat(); + Ok(FileStat { + path: path_instance.take(), + is_file: stat.is_file(), + is_dir: stat.is_dir(), + device: stat.st_dev, + mode: stat.st_mode, + inode: stat.st_ino, + size: stat.st_size, + created: stat.st_ctim.tv_sec as u64, + modified: stat.st_mtim.tv_sec as u64, + accessed: stat.st_atim.tv_sec as u64 + }) + }, + Some(e) => { + Err(uv_error_to_io_error(e)) + } + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + }; + }; + }; + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + + fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>, + hint: Option) -> Result<~[ai::Info], IoError> { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let host_ptr: *Option<&str> = &host; + let servname_ptr: *Option<&str> = &servname; + let hint_ptr: *Option = &hint; + let addrinfo_req = GetAddrInfoRequest::new(); + let addrinfo_req_cell = Cell::new(addrinfo_req); + + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let mut addrinfo_req = addrinfo_req_cell.take(); + unsafe { + do addrinfo_req.getaddrinfo(self.uv_loop(), + *host_ptr, *servname_ptr, + *hint_ptr) |_, addrinfo, err| { + let res = match err { + None => Ok(accum_addrinfo(addrinfo)), + Some(err) => Err(uv_error_to_io_error(err)) + }; + (*result_cell_ptr).put_back(res); + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } + addrinfo_req.delete(); + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + fn fs_mkdir(&mut self, path: &CString) -> Result<(), IoError> { + let mode = S_IRWXU as int; + do uv_fs_helper(self.uv_loop(), path) |mkdir_req, l, p, cb| { + do mkdir_req.mkdir(l, p, mode as int) |req, err| { + cb(req, err) + }; + } + } + fn fs_rmdir(&mut self, path: &CString) -> Result<(), IoError> { + do uv_fs_helper(self.uv_loop(), path) |rmdir_req, l, p, cb| { + do rmdir_req.rmdir(l, p) |req, err| { + cb(req, err) + }; + } + } + fn fs_readdir(&mut self, path: &CString, flags: c_int) -> + Result<~[Path], IoError> { + use str::StrSlice; + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let path_cell = Cell::new(path); + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + let stat_req = file::FsRequest::new(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let path = path_cell.take(); + // Don't pick up the null byte + let slice = path.as_bytes().slice(0, path.len()); + let path_parent = Cell::new(Path::new(slice)); + do stat_req.readdir(self.uv_loop(), path, flags) |req,err| { + let parent = path_parent.take(); + let res = match err { + None => { + let mut paths = ~[]; + do req.each_path |rel_path| { + let p = rel_path.as_bytes(); + paths.push(parent.join(p.slice_to(rel_path.len()))); + } + Ok(paths) + }, + Some(e) => { + Err(uv_error_to_io_error(e)) + } + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + }; + }; + }; + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + + fn spawn(&mut self, config: ProcessConfig) + -> Result<(~RtioProcess, ~[Option<~RtioPipe>]), IoError> + { + // Sadly, we must create the UvProcess before we actually call uv_spawn + // so that the exit_cb can close over it and notify it when the process + // has exited. + let mut ret = ~UvProcess { + process: Process::new(), + home: None, + exit_status: None, + term_signal: None, + exit_error: None, + descheduled: None, + }; + let ret_ptr = unsafe { + *cast::transmute::<&~UvProcess, &*mut UvProcess>(&ret) + }; + + // The purpose of this exit callback is to record the data about the + // exit and then wake up the task which may be waiting for the process + // to exit. This is all performed in the current io-loop, and the + // implementation of UvProcess ensures that reading these fields always + // occurs on the current io-loop. + let exit_cb: ExitCallback = |_, exit_status, term_signal, error| { + unsafe { + assert!((*ret_ptr).exit_status.is_none()); + (*ret_ptr).exit_status = Some(exit_status); + (*ret_ptr).term_signal = Some(term_signal); + (*ret_ptr).exit_error = error; + match (*ret_ptr).descheduled.take() { + Some(task) => { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task); + } + None => {} + } + } + }; + + match ret.process.spawn(self.uv_loop(), config, exit_cb) { + Ok(io) => { + // Only now do we actually get a handle to this scheduler. + ret.home = Some(get_handle_to_current_scheduler!()); + Ok((ret as ~RtioProcess, + io.move_iter().map(|p| p.map(|p| p as ~RtioPipe)).collect())) + } + Err(uverr) => { + // We still need to close the process handle we created, but + // that's taken care for us in the destructor of UvProcess + Err(uv_error_to_io_error(uverr)) + } + } + } + + fn unix_bind(&mut self, path: &CString) -> + Result<~RtioUnixListener, IoError> { + let mut pipe = UvUnboundPipe::new(self.uv_loop()); + match pipe.pipe.bind(path) { + Ok(()) => Ok(~UvUnixListener::new(pipe) as ~RtioUnixListener), + Err(e) => Err(uv_error_to_io_error(e)), + } + } + + fn unix_connect(&mut self, path: &CString) -> Result<~RtioPipe, IoError> { + let pipe = UvUnboundPipe::new(self.uv_loop()); + let mut rawpipe = pipe.pipe; + + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let pipe_cell = Cell::new(pipe); + let pipe_cell_ptr: *Cell = &pipe_cell; + + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do rawpipe.connect(path) |_stream, err| { + let res = match err { + None => { + let pipe = unsafe { (*pipe_cell_ptr).take() }; + Ok(~UvPipeStream::new(pipe) as ~RtioPipe) + } + Some(e) => Err(uv_error_to_io_error(e)), + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + + fn tty_open(&mut self, fd: c_int, readable: bool) + -> Result<~RtioTTY, IoError> { + match tty::TTY::new(self.uv_loop(), fd, readable) { + Ok(tty) => Ok(~UvTTY { + home: get_handle_to_current_scheduler!(), + tty: tty, + fd: fd, + } as ~RtioTTY), + Err(e) => Err(uv_error_to_io_error(e)) + } + } + + fn pipe_open(&mut self, fd: c_int) -> Result<~RtioPipe, IoError> { + let mut pipe = UvUnboundPipe::new(self.uv_loop()); + match pipe.pipe.open(fd) { + Ok(()) => Ok(~UvPipeStream::new(pipe) as ~RtioPipe), + Err(e) => Err(uv_error_to_io_error(e)) + } + } + + fn signal(&mut self, signum: Signum, channel: SharedChan) + -> Result<~RtioSignal, IoError> { + let watcher = SignalWatcher::new(self.uv_loop()); + let home = get_handle_to_current_scheduler!(); + let mut signal = ~UvSignal::new(watcher, home); + match signal.watcher.start(signum, |_, _| channel.send_deferred(signum)) { + Ok(()) => Ok(signal as ~RtioSignal), + Err(e) => Err(uv_error_to_io_error(e)), + } + } +} + +pub struct UvTcpListener { + priv watcher : TcpWatcher, + priv home: SchedHandle, +} + +impl HomingIO for UvTcpListener { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvTcpListener { + fn new(watcher: TcpWatcher, home: SchedHandle) -> UvTcpListener { + UvTcpListener { watcher: watcher, home: home } + } +} + +impl Drop for UvTcpListener { + fn drop(&mut self) { + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_, task| { + let task = Cell::new(task); + do self_.watcher.as_stream().close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task.take()); + } + } + } + } +} + +impl RtioSocket for UvTcpListener { + fn socket_name(&mut self) -> Result { + do self.home_for_io |self_| { + socket_name(Tcp, self_.watcher) + } + } +} + +impl RtioTcpListener for UvTcpListener { + fn listen(~self) -> Result<~RtioTcpAcceptor, IoError> { + do self.home_for_io_consume |self_| { + let acceptor = ~UvTcpAcceptor::new(self_); + let incoming = Cell::new(acceptor.incoming.clone()); + let mut stream = acceptor.listener.watcher.as_stream(); + let res = do stream.listen |mut server, status| { + do incoming.with_mut_ref |incoming| { + let inc = match status { + Some(_) => Err(standard_error(OtherIoError)), + None => { + let inc = TcpWatcher::new(&server.event_loop()); + // first accept call in the callback guarenteed to succeed + server.accept(inc.as_stream()); + let home = get_handle_to_current_scheduler!(); + Ok(~UvTcpStream { watcher: inc, home: home } + as ~RtioTcpStream) + } + }; + incoming.send(inc); + } + }; + match res { + Ok(()) => Ok(acceptor as ~RtioTcpAcceptor), + Err(e) => Err(uv_error_to_io_error(e)), + } + } + } +} + +pub struct UvTcpAcceptor { + priv listener: UvTcpListener, + priv incoming: Tube>, +} + +impl HomingIO for UvTcpAcceptor { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } +} + +impl UvTcpAcceptor { + fn new(listener: UvTcpListener) -> UvTcpAcceptor { + UvTcpAcceptor { listener: listener, incoming: Tube::new() } + } +} + +impl RtioSocket for UvTcpAcceptor { + fn socket_name(&mut self) -> Result { + do self.home_for_io |self_| { + socket_name(Tcp, self_.listener.watcher) + } + } +} + +fn accept_simultaneously(stream: StreamWatcher, a: int) -> Result<(), IoError> { + let r = unsafe { + uvll::tcp_simultaneous_accepts(stream.native_handle(), a as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } +} + +impl RtioTcpAcceptor for UvTcpAcceptor { + fn accept(&mut self) -> Result<~RtioTcpStream, IoError> { + do self.home_for_io |self_| { + self_.incoming.recv() + } + } + + fn accept_simultaneously(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + accept_simultaneously(self_.listener.watcher.as_stream(), 1) + } + } + + fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + accept_simultaneously(self_.listener.watcher.as_stream(), 0) + } + } +} + +fn read_stream(mut watcher: StreamWatcher, + scheduler: ~Scheduler, + buf: &mut [u8]) -> Result { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + + let buf_ptr: *&mut [u8] = &buf; + do scheduler.deschedule_running_task_and_then |_sched, task| { + let task_cell = Cell::new(task); + // XXX: We shouldn't reallocate these callbacks every + // call to read + let alloc: AllocCallback = |_| unsafe { + slice_to_uv_buf(*buf_ptr) + }; + do watcher.read_start(alloc) |mut watcher, nread, _buf, status| { + + // Stop reading so that no read callbacks are + // triggered before the user calls `read` again. + // XXX: Is there a performance impact to calling + // stop here? + watcher.read_stop(); + + let result = if status.is_none() { + assert!(nread >= 0); + Ok(nread as uint) + } else { + Err(uv_error_to_io_error(status.unwrap())) + }; + + unsafe { (*result_cell_ptr).put_back(result); } + + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + + assert!(!result_cell.is_empty()); + result_cell.take() +} + +fn write_stream(mut watcher: StreamWatcher, + scheduler: ~Scheduler, + buf: &[u8]) -> Result<(), IoError> { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let buf_ptr: *&[u8] = &buf; + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + do watcher.write(buf) |_watcher, status| { + let result = if status.is_none() { + Ok(()) + } else { + Err(uv_error_to_io_error(status.unwrap())) + }; + + unsafe { (*result_cell_ptr).put_back(result); } + + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + + assert!(!result_cell.is_empty()); + result_cell.take() +} + +pub struct UvUnboundPipe { + pipe: Pipe, + priv home: SchedHandle, +} + +impl UvUnboundPipe { + /// Creates a new unbound pipe homed to the current scheduler, placed on the + /// specified event loop + pub fn new(loop_: &Loop) -> UvUnboundPipe { + UvUnboundPipe { + pipe: Pipe::new(loop_, false), + home: get_handle_to_current_scheduler!(), + } + } +} + +impl HomingIO for UvUnboundPipe { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl Drop for UvUnboundPipe { + fn drop(&mut self) { + do self.home_for_io |self_| { + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self_.pipe.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } +} + +pub struct UvPipeStream { + priv inner: UvUnboundPipe, +} + +impl UvPipeStream { + pub fn new(inner: UvUnboundPipe) -> UvPipeStream { + UvPipeStream { inner: inner } + } +} + +impl RtioPipe for UvPipeStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + do self.inner.home_for_io_with_sched |self_, scheduler| { + read_stream(self_.pipe.as_stream(), scheduler, buf) + } + } + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + do self.inner.home_for_io_with_sched |self_, scheduler| { + write_stream(self_.pipe.as_stream(), scheduler, buf) + } + } +} + +pub struct UvTcpStream { + priv watcher: TcpWatcher, + priv home: SchedHandle, +} + +impl HomingIO for UvTcpStream { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl Drop for UvTcpStream { + fn drop(&mut self) { + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self_.watcher.as_stream().close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } +} + +impl RtioSocket for UvTcpStream { + fn socket_name(&mut self) -> Result { + do self.home_for_io |self_| { + socket_name(Tcp, self_.watcher) + } + } +} + +impl RtioTcpStream for UvTcpStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + do self.home_for_io_with_sched |self_, scheduler| { + read_stream(self_.watcher.as_stream(), scheduler, buf) + } + } + + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + do self.home_for_io_with_sched |self_, scheduler| { + write_stream(self_.watcher.as_stream(), scheduler, buf) + } + } + + fn peer_name(&mut self) -> Result { + do self.home_for_io |self_| { + socket_name(TcpPeer, self_.watcher) + } + } + + fn control_congestion(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + let r = unsafe { uvll::tcp_nodelay(self_.watcher.native_handle(), 0 as c_int) }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn nodelay(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + let r = unsafe { uvll::tcp_nodelay(self_.watcher.native_handle(), 1 as c_int) }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { + do self.home_for_io |self_| { + let r = unsafe { + uvll::tcp_keepalive(self_.watcher.native_handle(), 1 as c_int, + delay_in_seconds as c_uint) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn letdie(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + let r = unsafe { + uvll::tcp_keepalive(self_.watcher.native_handle(), 0 as c_int, 0 as c_uint) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } +} + +pub struct UvUdpSocket { + priv watcher: UdpWatcher, + priv home: SchedHandle, +} + +impl HomingIO for UvUdpSocket { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl Drop for UvUdpSocket { + fn drop(&mut self) { + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self_.watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } +} + +impl RtioSocket for UvUdpSocket { + fn socket_name(&mut self) -> Result { + do self.home_for_io |self_| { + socket_name(Udp, self_.watcher) + } + } +} + +impl RtioUdpSocket for UvUdpSocket { + fn recvfrom(&mut self, buf: &mut [u8]) -> Result<(uint, SocketAddr), IoError> { + do self.home_for_io_with_sched |self_, scheduler| { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + + let buf_ptr: *&mut [u8] = &buf; + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; + do self_.watcher.recv_start(alloc) |mut watcher, nread, _buf, addr, flags, status| { + let _ = flags; // /XXX add handling for partials? + + watcher.recv_stop(); + + let result = match status { + None => { + assert!(nread >= 0); + Ok((nread as uint, addr)) + } + Some(err) => Err(uv_error_to_io_error(err)), + }; + + unsafe { (*result_cell_ptr).put_back(result); } + + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + + assert!(!result_cell.is_empty()); + result_cell.take() + } + } + + fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { + do self.home_for_io_with_sched |self_, scheduler| { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let buf_ptr: *&[u8] = &buf; + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + do self_.watcher.send(buf, dst) |_watcher, status| { + + let result = match status { + None => Ok(()), + Some(err) => Err(uv_error_to_io_error(err)), + }; + + unsafe { (*result_cell_ptr).put_back(result); } + + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + + assert!(!result_cell.is_empty()); + result_cell.take() + } + } + + fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { + do self.home_for_io |self_| { + let r = unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::udp_set_membership(self_.watcher.native_handle(), m_addr, + ptr::null(), uvll::UV_JOIN_GROUP) + } + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { + do self.home_for_io |self_| { + let r = unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::udp_set_membership(self_.watcher.native_handle(), m_addr, + ptr::null(), uvll::UV_LEAVE_GROUP) + } + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn loop_multicast_locally(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + + let r = unsafe { + uvll::udp_set_multicast_loop(self_.watcher.native_handle(), 1 as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + + let r = unsafe { + uvll::udp_set_multicast_loop(self_.watcher.native_handle(), 0 as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { + do self.home_for_io |self_| { + + let r = unsafe { + uvll::udp_set_multicast_ttl(self_.watcher.native_handle(), ttl as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { + do self.home_for_io |self_| { + + let r = unsafe { + uvll::udp_set_ttl(self_.watcher.native_handle(), ttl as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn hear_broadcasts(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + + let r = unsafe { + uvll::udp_set_broadcast(self_.watcher.native_handle(), 1 as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } + + fn ignore_broadcasts(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + + let r = unsafe { + uvll::udp_set_broadcast(self_.watcher.native_handle(), 0 as c_int) + }; + + match status_to_maybe_uv_error(r) { + Some(err) => Err(uv_error_to_io_error(err)), + None => Ok(()) + } + } + } +} + +pub struct UvTimer { + priv watcher: timer::TimerWatcher, + priv home: SchedHandle, +} + +impl HomingIO for UvTimer { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvTimer { + fn new(w: timer::TimerWatcher, home: SchedHandle) -> UvTimer { + UvTimer { watcher: w, home: home } + } +} + +impl Drop for UvTimer { + fn drop(&mut self) { + do self.home_for_io_with_sched |self_, scheduler| { + uvdebug!("closing UvTimer"); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self_.watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } +} + +impl RtioTimer for UvTimer { + fn sleep(&mut self, msecs: u64) { + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_sched, task| { + uvdebug!("sleep: entered scheduler context"); + let task_cell = Cell::new(task); + do self_.watcher.start(msecs, 0) |_, status| { + assert!(status.is_none()); + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + self_.watcher.stop(); + } + } + + fn oneshot(&mut self, msecs: u64) -> PortOne<()> { + use std::comm::oneshot; + + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + do self.home_for_io |self_| { + let chan = Cell::new(chan.take()); + do self_.watcher.start(msecs, 0) |_, status| { + assert!(status.is_none()); + assert!(!chan.is_empty()); + chan.take().send_deferred(()); + } + } + + return port; + } + + fn period(&mut self, msecs: u64) -> Port<()> { + use std::comm::stream; + + let (port, chan) = stream(); + let chan = Cell::new(chan); + do self.home_for_io |self_| { + let chan = Cell::new(chan.take()); + do self_.watcher.start(msecs, msecs) |_, status| { + assert!(status.is_none()); + do chan.with_ref |chan| { + chan.send_deferred(()); + } + } + } + + return port; + } +} + +pub struct UvFileStream { + priv loop_: Loop, + priv fd: c_int, + priv close: CloseBehavior, + priv home: SchedHandle, +} + +impl HomingIO for UvFileStream { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvFileStream { + fn new(loop_: Loop, fd: c_int, close: CloseBehavior, + home: SchedHandle) -> UvFileStream { + UvFileStream { + loop_: loop_, + fd: fd, + close: close, + home: home, + } + } + fn base_read(&mut self, buf: &mut [u8], offset: i64) -> Result { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let buf_ptr: *&mut [u8] = &buf; + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_, task| { + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + let task_cell = Cell::new(task); + let read_req = file::FsRequest::new(); + do read_req.read(&self_.loop_, self_.fd, buf, offset) |req, uverr| { + let res = match uverr { + None => Ok(req.get_result() as int), + Some(err) => Err(uv_error_to_io_error(err)) + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + result_cell.take() + } + fn base_write(&mut self, buf: &[u8], offset: i64) -> Result<(), IoError> { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let buf_ptr: *&[u8] = &buf; + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_, task| { + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + let task_cell = Cell::new(task); + let write_req = file::FsRequest::new(); + do write_req.write(&self_.loop_, self_.fd, buf, offset) |_, uverr| { + let res = match uverr { + None => Ok(()), + Some(err) => Err(uv_error_to_io_error(err)) + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + result_cell.take() + } + fn seek_common(&mut self, pos: i64, whence: c_int) -> + Result{ + #[fixed_stack_segment]; #[inline(never)]; + unsafe { + match lseek(self.fd, pos as off_t, whence) { + -1 => { + Err(IoError { + kind: OtherIoError, + desc: "Failed to lseek.", + detail: None + }) + }, + n => Ok(n as u64) + } + } + } +} + +impl Drop for UvFileStream { + fn drop(&mut self) { + match self.close { + DontClose => {} + CloseAsynchronously => { + let close_req = file::FsRequest::new(); + do close_req.close(&self.loop_, self.fd) |_,_| {} + } + CloseSynchronously => { + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let close_req = file::FsRequest::new(); + do close_req.close(&self_.loop_, self_.fd) |_,_| { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } + } + } +} + +impl RtioFileStream for UvFileStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + self.base_read(buf, -1) + } + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + self.base_write(buf, -1) + } + fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result { + self.base_read(buf, offset as i64) + } + fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> { + self.base_write(buf, offset as i64) + } + fn seek(&mut self, pos: i64, whence: SeekStyle) -> Result { + use std::libc::{SEEK_SET, SEEK_CUR, SEEK_END}; + let whence = match whence { + SeekSet => SEEK_SET, + SeekCur => SEEK_CUR, + SeekEnd => SEEK_END + }; + self.seek_common(pos, whence) + } + fn tell(&self) -> Result { + use std::libc::SEEK_CUR; + // this is temporary + let self_ = unsafe { cast::transmute::<&UvFileStream, &mut UvFileStream>(self) }; + self_.seek_common(0, SEEK_CUR) + } + fn flush(&mut self) -> Result<(), IoError> { + Ok(()) + } +} + +pub struct UvProcess { + priv process: process::Process, + + // Sadly, this structure must be created before we return it, so in that + // brief interim the `home` is None. + priv home: Option, + + // All None until the process exits (exit_error may stay None) + priv exit_status: Option, + priv term_signal: Option, + priv exit_error: Option, + + // Used to store which task to wake up from the exit_cb + priv descheduled: Option, +} + +impl HomingIO for UvProcess { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.home.get_mut_ref() } +} + +impl Drop for UvProcess { + fn drop(&mut self) { + let close = |self_: &mut UvProcess| { + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task = Cell::new(task); + do self_.process.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task.take()); + } + } + }; + + // If home is none, then this process never actually successfully + // spawned, so there's no need to switch event loops + if self.home.is_none() { + close(self) + } else { + self.home_for_io(close) + } + } +} + +impl RtioProcess for UvProcess { + fn id(&self) -> pid_t { + self.process.pid() + } + + fn kill(&mut self, signal: int) -> Result<(), IoError> { + do self.home_for_io |self_| { + match self_.process.kill(signal) { + Ok(()) => Ok(()), + Err(uverr) => Err(uv_error_to_io_error(uverr)) + } + } + } + + fn wait(&mut self) -> int { + // Make sure (on the home scheduler) that we have an exit status listed + do self.home_for_io |self_| { + match self_.exit_status { + Some(*) => {} + None => { + // If there's no exit code previously listed, then the + // process's exit callback has yet to be invoked. We just + // need to deschedule ourselves and wait to be reawoken. + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + assert!(self_.descheduled.is_none()); + self_.descheduled = Some(task); + } + assert!(self_.exit_status.is_some()); + } + } + } + + self.exit_status.unwrap() + } +} + +pub struct UvUnixListener { + priv inner: UvUnboundPipe +} + +impl HomingIO for UvUnixListener { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.inner.home() } +} + +impl UvUnixListener { + fn new(pipe: UvUnboundPipe) -> UvUnixListener { + UvUnixListener { inner: pipe } + } +} + +impl RtioUnixListener for UvUnixListener { + fn listen(~self) -> Result<~RtioUnixAcceptor, IoError> { + do self.home_for_io_consume |self_| { + let acceptor = ~UvUnixAcceptor::new(self_); + let incoming = Cell::new(acceptor.incoming.clone()); + let mut stream = acceptor.listener.inner.pipe.as_stream(); + let res = do stream.listen |mut server, status| { + do incoming.with_mut_ref |incoming| { + let inc = match status { + Some(e) => Err(uv_error_to_io_error(e)), + None => { + let pipe = UvUnboundPipe::new(&server.event_loop()); + server.accept(pipe.pipe.as_stream()); + Ok(~UvPipeStream::new(pipe) as ~RtioPipe) + } + }; + incoming.send(inc); + } + }; + match res { + Ok(()) => Ok(acceptor as ~RtioUnixAcceptor), + Err(e) => Err(uv_error_to_io_error(e)), + } + } + } +} + +pub struct UvTTY { + tty: tty::TTY, + home: SchedHandle, + fd: c_int, +} + +impl HomingIO for UvTTY { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl Drop for UvTTY { + fn drop(&mut self) { + // TTY handles are used for the logger in a task, so this destructor is + // run when a task is destroyed. When a task is being destroyed, a local + // scheduler isn't available, so we can't do the normal "take the + // scheduler and resume once close is done". Instead close operations on + // a TTY are asynchronous. + self.tty.close_async(); + } +} + +impl RtioTTY for UvTTY { + fn read(&mut self, buf: &mut [u8]) -> Result { + do self.home_for_io_with_sched |self_, scheduler| { + read_stream(self_.tty.as_stream(), scheduler, buf) + } + } + + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + do self.home_for_io_with_sched |self_, scheduler| { + write_stream(self_.tty.as_stream(), scheduler, buf) + } + } + + fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { + do self.home_for_io |self_| { + match self_.tty.set_mode(raw) { + Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) + } + } + } + + fn get_winsize(&mut self) -> Result<(int, int), IoError> { + do self.home_for_io |self_| { + match self_.tty.get_winsize() { + Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) + } + } + } + + fn isatty(&self) -> bool { + unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as c_int } + } +} + +pub struct UvUnixAcceptor { + listener: UvUnixListener, + incoming: Tube>, +} + +impl HomingIO for UvUnixAcceptor { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } +} + +impl UvUnixAcceptor { + fn new(listener: UvUnixListener) -> UvUnixAcceptor { + UvUnixAcceptor { listener: listener, incoming: Tube::new() } + } +} + +impl RtioUnixAcceptor for UvUnixAcceptor { + fn accept(&mut self) -> Result<~RtioPipe, IoError> { + do self.home_for_io |self_| { + self_.incoming.recv() + } + } + + fn accept_simultaneously(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + accept_simultaneously(self_.listener.inner.pipe.as_stream(), 1) + } + } + + fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { + do self.home_for_io |self_| { + accept_simultaneously(self_.listener.inner.pipe.as_stream(), 0) + } + } +} + +pub struct UvSignal { + watcher: signal::SignalWatcher, + home: SchedHandle, +} + +impl HomingIO for UvSignal { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvSignal { + fn new(w: signal::SignalWatcher, home: SchedHandle) -> UvSignal { + UvSignal { watcher: w, home: home } + } +} + +impl RtioSignal for UvSignal {} + +impl Drop for UvSignal { + fn drop(&mut self) { + do self.home_for_io_with_sched |self_, scheduler| { + uvdebug!("closing UvSignal"); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self_.watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } + } + } + } +} + +// this function is full of lies +unsafe fn local_io() -> &'static mut IoFactory { + do Local::borrow |sched: &mut Scheduler| { + let mut io = None; + sched.event_loop.io(|i| io = Some(i)); + cast::transmute(io.unwrap()) + } +} + +#[test] +fn test_simple_io_no_connect() { + do run_in_mt_newsched_task { + unsafe { + let io = local_io(); + let addr = next_test_ip4(); + let maybe_chan = io.tcp_connect(addr); + assert!(maybe_chan.is_err()); + } + } +} + +#[test] +fn test_simple_udp_io_bind_only() { + do run_in_mt_newsched_task { + unsafe { + let io = local_io(); + let addr = next_test_ip4(); + let maybe_socket = io.udp_bind(addr); + assert!(maybe_socket.is_ok()); + } + } +} + +#[test] +fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() { + use std::rt::sleeper_list::SleeperList; + use std::rt::work_queue::WorkQueue; + use std::rt::thread::Thread; + use std::rt::task::Task; + use std::rt::sched::{Shutdown, TaskFromFriend}; + use std::rt::task::UnwindResult; + do run_in_bare_thread { + let sleepers = SleeperList::new(); + let work_queue1 = WorkQueue::new(); + let work_queue2 = WorkQueue::new(); + let queues = ~[work_queue1.clone(), work_queue2.clone()]; + + let loop1 = ~UvEventLoop::new() as ~EventLoop; + let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), + sleepers.clone()); + let loop2 = ~UvEventLoop::new() as ~EventLoop; + let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), + sleepers.clone()); + + let handle1 = Cell::new(sched1.make_handle()); + let handle2 = Cell::new(sched2.make_handle()); + let tasksFriendHandle = Cell::new(sched2.make_handle()); + + let on_exit: ~fn(UnwindResult) = |exit_status| { + handle1.take().send(Shutdown); + handle2.take().send(Shutdown); + assert!(exit_status.is_success()); + }; + + let test_function: ~fn() = || { + let io = unsafe { local_io() }; + let addr = next_test_ip4(); + let maybe_socket = io.udp_bind(addr); + // this socket is bound to this event loop + assert!(maybe_socket.is_ok()); + + // block self on sched1 + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + // unblock task + do task.wake().map |task| { + // send self to sched2 + tasksFriendHandle.take().send(TaskFromFriend(task)); + }; + // sched1 should now sleep since it has nothing else to do + } + } + // sched2 will wake up and get the task + // as we do nothing else, the function ends and the socket goes out of scope + // sched2 will start to run the destructor + // the destructor will first block the task, set it's home as sched1, then enqueue it + // sched2 will dequeue the task, see that it has a home, and send it to sched1 + // sched1 will wake up, exec the close function on the correct loop, and then we're done + }; + + let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None, test_function); + main_task.death.on_exit = Some(on_exit); + let main_task = Cell::new(main_task); + + let null_task = Cell::new(~do Task::new_root(&mut sched2.stack_pool, None) || {}); + + let sched1 = Cell::new(sched1); + let sched2 = Cell::new(sched2); + + let thread1 = do Thread::start { + sched1.take().bootstrap(main_task.take()); + }; + let thread2 = do Thread::start { + sched2.take().bootstrap(null_task.take()); + }; + + thread1.join(); + thread2.join(); + } +} + +#[test] +fn test_simple_homed_udp_io_bind_then_move_handle_then_home_and_close() { + use std::rt::sleeper_list::SleeperList; + use std::rt::work_queue::WorkQueue; + use std::rt::thread::Thread; + use std::rt::task::Task; + use std::rt::comm::oneshot; + use std::rt::sched::Shutdown; + use std::rt::task::UnwindResult; + do run_in_bare_thread { + let sleepers = SleeperList::new(); + let work_queue1 = WorkQueue::new(); + let work_queue2 = WorkQueue::new(); + let queues = ~[work_queue1.clone(), work_queue2.clone()]; + + let loop1 = ~UvEventLoop::new() as ~EventLoop; + let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), + sleepers.clone()); + let loop2 = ~UvEventLoop::new() as ~EventLoop; + let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), + sleepers.clone()); + + let handle1 = Cell::new(sched1.make_handle()); + let handle2 = Cell::new(sched2.make_handle()); + + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + let body1: ~fn() = || { + let io = unsafe { local_io() }; + let addr = next_test_ip4(); + let socket = io.udp_bind(addr); + assert!(socket.is_ok()); + chan.take().send(socket); + }; + + let body2: ~fn() = || { + let socket = port.take().recv(); + assert!(socket.is_ok()); + /* The socket goes out of scope and the destructor is called. + * The destructor: + * - sends itself back to sched1 + * - frees the socket + * - resets the home of the task to whatever it was previously + */ + }; + + let on_exit: ~fn(UnwindResult) = |exit| { + handle1.take().send(Shutdown); + handle2.take().send(Shutdown); + assert!(exit.is_success()); + }; + + let task1 = Cell::new(~Task::new_root(&mut sched1.stack_pool, None, body1)); + + let mut task2 = ~Task::new_root(&mut sched2.stack_pool, None, body2); + task2.death.on_exit = Some(on_exit); + let task2 = Cell::new(task2); + + let sched1 = Cell::new(sched1); + let sched2 = Cell::new(sched2); + + let thread1 = do Thread::start { + sched1.take().bootstrap(task1.take()); + }; + let thread2 = do Thread::start { + sched2.take().bootstrap(task2.take()); + }; + + thread1.join(); + thread2.join(); + } +} + +#[test] +fn test_simple_tcp_server_and_client() { + do run_in_mt_newsched_task { + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + // Start the server first so it's listening when we connect + do spawntask { + unsafe { + let io = local_io(); + let listener = io.tcp_bind(addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + chan.take().send(()); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + let nread = stream.read(buf).unwrap(); + assert_eq!(nread, 8); + for i in range(0u, nread) { + uvdebug!("{}", buf[i]); + assert_eq!(buf[i], i as u8); + } + } + } + + do spawntask { + unsafe { + port.take().recv(); + let io = local_io(); + let mut stream = io.tcp_connect(addr).unwrap(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + } + } + } +} + +#[test] +fn test_simple_tcp_server_and_client_on_diff_threads() { + use std::rt::sleeper_list::SleeperList; + use std::rt::work_queue::WorkQueue; + use std::rt::thread::Thread; + use std::rt::task::Task; + use std::rt::sched::{Shutdown}; + use std::rt::task::UnwindResult; + do run_in_bare_thread { + let sleepers = SleeperList::new(); + + let server_addr = next_test_ip4(); + let client_addr = server_addr.clone(); + + let server_work_queue = WorkQueue::new(); + let client_work_queue = WorkQueue::new(); + let queues = ~[server_work_queue.clone(), client_work_queue.clone()]; + + let sloop = ~UvEventLoop::new() as ~EventLoop; + let mut server_sched = ~Scheduler::new(sloop, server_work_queue, + queues.clone(), sleepers.clone()); + let cloop = ~UvEventLoop::new() as ~EventLoop; + let mut client_sched = ~Scheduler::new(cloop, client_work_queue, + queues.clone(), sleepers.clone()); + + let server_handle = Cell::new(server_sched.make_handle()); + let client_handle = Cell::new(client_sched.make_handle()); + + let server_on_exit: ~fn(UnwindResult) = |exit_status| { + server_handle.take().send(Shutdown); + assert!(exit_status.is_success()); + }; + + let client_on_exit: ~fn(UnwindResult) = |exit_status| { + client_handle.take().send(Shutdown); + assert!(exit_status.is_success()); + }; + + let server_fn: ~fn() = || { + let io = unsafe { local_io() }; + let listener = io.tcp_bind(server_addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + let nread = stream.read(buf).unwrap(); + assert_eq!(nread, 8); + for i in range(0u, nread) { + assert_eq!(buf[i], i as u8); + } + }; + + let client_fn: ~fn() = || { + let io = unsafe { local_io() }; + let mut stream = io.tcp_connect(client_addr); + while stream.is_err() { + stream = io.tcp_connect(client_addr); + } + stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); + }; + + let mut server_task = ~Task::new_root(&mut server_sched.stack_pool, None, server_fn); + server_task.death.on_exit = Some(server_on_exit); + let server_task = Cell::new(server_task); + + let mut client_task = ~Task::new_root(&mut client_sched.stack_pool, None, client_fn); + client_task.death.on_exit = Some(client_on_exit); + let client_task = Cell::new(client_task); + + let server_sched = Cell::new(server_sched); + let client_sched = Cell::new(client_sched); + + let server_thread = do Thread::start { + server_sched.take().bootstrap(server_task.take()); + }; + let client_thread = do Thread::start { + client_sched.take().bootstrap(client_task.take()); + }; + + server_thread.join(); + client_thread.join(); + } +} + +#[test] +fn test_simple_udp_server_and_client() { + do run_in_mt_newsched_task { + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawntask { + unsafe { + let io = local_io(); + let mut server_socket = io.udp_bind(server_addr).unwrap(); + chan.take().send(()); + let mut buf = [0, .. 2048]; + let (nread,src) = server_socket.recvfrom(buf).unwrap(); + assert_eq!(nread, 8); + for i in range(0u, nread) { + uvdebug!("{}", buf[i]); + assert_eq!(buf[i], i as u8); + } + assert_eq!(src, client_addr); + } + } + + do spawntask { + unsafe { + let io = local_io(); + let mut client_socket = io.udp_bind(client_addr).unwrap(); + port.take().recv(); + client_socket.sendto([0, 1, 2, 3, 4, 5, 6, 7], server_addr); + } + } + } +} + +#[test] #[ignore(reason = "busted")] +fn test_read_and_block() { + do run_in_mt_newsched_task { + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawntask { + let io = unsafe { local_io() }; + let listener = io.tcp_bind(addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + chan.take().send(()); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + + let expected = 32; + let mut current = 0; + let mut reads = 0; + + while current < expected { + let nread = stream.read(buf).unwrap(); + for i in range(0u, nread) { + let val = buf[i] as uint; + assert_eq!(val, current % 8); + current += 1; + } + reads += 1; + + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + // Yield to the other task in hopes that it + // will trigger a read callback while we are + // not ready for it + do scheduler.deschedule_running_task_and_then |sched, task| { + let task = Cell::new(task); + sched.enqueue_blocked_task(task.take()); + } + } + } + + // Make sure we had multiple reads + assert!(reads > 1); + } + + do spawntask { + unsafe { + port.take().recv(); + let io = local_io(); + let mut stream = io.tcp_connect(addr).unwrap(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + } + } + + } +} + +#[test] +fn test_read_read_read() { + do run_in_mt_newsched_task { + let addr = next_test_ip4(); + static MAX: uint = 500000; + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawntask { + unsafe { + let io = local_io(); + let listener = io.tcp_bind(addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + chan.take().send(()); + let mut stream = acceptor.accept().unwrap(); + let buf = [1, .. 2048]; + let mut total_bytes_written = 0; + while total_bytes_written < MAX { + stream.write(buf); + total_bytes_written += buf.len(); + } + } + } + + do spawntask { + unsafe { + port.take().recv(); + let io = local_io(); + let mut stream = io.tcp_connect(addr).unwrap(); + let mut buf = [0, .. 2048]; + let mut total_bytes_read = 0; + while total_bytes_read < MAX { + let nread = stream.read(buf).unwrap(); + uvdebug!("read {} bytes", nread); + total_bytes_read += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); + } + } + uvdebug!("read {} bytes total", total_bytes_read); + } + } + } +} + +#[test] +#[ignore(cfg(windows))] // FIXME(#10102) the server never sees the second send +fn test_udp_twice() { + do run_in_mt_newsched_task { + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawntask { + unsafe { + let io = local_io(); + let mut client = io.udp_bind(client_addr).unwrap(); + port.take().recv(); + assert!(client.sendto([1], server_addr).is_ok()); + assert!(client.sendto([2], server_addr).is_ok()); + } + } + + do spawntask { + unsafe { + let io = local_io(); + let mut server = io.udp_bind(server_addr).unwrap(); + chan.take().send(()); + let mut buf1 = [0]; + let mut buf2 = [0]; + let (nread1, src1) = server.recvfrom(buf1).unwrap(); + let (nread2, src2) = server.recvfrom(buf2).unwrap(); + assert_eq!(nread1, 1); + assert_eq!(nread2, 1); + assert_eq!(src1, client_addr); + assert_eq!(src2, client_addr); + assert_eq!(buf1[0], 1); + assert_eq!(buf2[0], 2); + } + } + } +} + +#[test] +fn test_udp_many_read() { + do run_in_mt_newsched_task { + let server_out_addr = next_test_ip4(); + let server_in_addr = next_test_ip4(); + let client_out_addr = next_test_ip4(); + let client_in_addr = next_test_ip4(); + static MAX: uint = 500_000; + + let (p1, c1) = oneshot(); + let (p2, c2) = oneshot(); + + let first = Cell::new((p1, c2)); + let second = Cell::new((p2, c1)); + + do spawntask { + unsafe { + let io = local_io(); + let mut server_out = io.udp_bind(server_out_addr).unwrap(); + let mut server_in = io.udp_bind(server_in_addr).unwrap(); + let (port, chan) = first.take(); + chan.send(()); + port.recv(); + let msg = [1, .. 2048]; + let mut total_bytes_sent = 0; + let mut buf = [1]; + while buf[0] == 1 { + // send more data + assert!(server_out.sendto(msg, client_in_addr).is_ok()); + total_bytes_sent += msg.len(); + // check if the client has received enough + let res = server_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(nread, 1); + assert_eq!(src, client_out_addr); + } + assert!(total_bytes_sent >= MAX); + } + } + + do spawntask { + unsafe { + let io = local_io(); + let mut client_out = io.udp_bind(client_out_addr).unwrap(); + let mut client_in = io.udp_bind(client_in_addr).unwrap(); + let (port, chan) = second.take(); + port.recv(); + chan.send(()); + let mut total_bytes_recv = 0; + let mut buf = [0, .. 2048]; + while total_bytes_recv < MAX { + // ask for more + assert!(client_out.sendto([1], server_in_addr).is_ok()); + // wait for data + let res = client_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(src, server_out_addr); + total_bytes_recv += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); + } + } + // tell the server we're done + assert!(client_out.sendto([0], server_in_addr).is_ok()); + } + } + } +} + +#[test] +fn test_timer_sleep_simple() { + do run_in_mt_newsched_task { + unsafe { + let io = local_io(); + let timer = io.timer_init(); + do timer.map_move |mut t| { t.sleep(1) }; + } + } +} + +fn file_test_uvio_full_simple_impl() { + use std::rt::io::{Open, Create, ReadWrite, Read}; + unsafe { + let io = local_io(); + let write_val = "hello uvio!"; + let path = "./tmp/file_test_uvio_full.txt"; + { + let create_fm = Create; + let create_fa = ReadWrite; + let mut fd = io.fs_open(&path.to_c_str(), create_fm, create_fa).unwrap(); + let write_buf = write_val.as_bytes(); + fd.write(write_buf); + } + { + let ro_fm = Open; + let ro_fa = Read; + let mut fd = io.fs_open(&path.to_c_str(), ro_fm, ro_fa).unwrap(); + let mut read_vec = [0, .. 1028]; + let nread = fd.read(read_vec).unwrap(); + let read_val = str::from_utf8(read_vec.slice(0, nread as uint)); + assert!(read_val == write_val.to_owned()); + } + io.fs_unlink(&path.to_c_str()); + } +} + +#[test] +fn file_test_uvio_full_simple() { + do run_in_mt_newsched_task { + file_test_uvio_full_simple_impl(); + } +} + +fn uvio_naive_print(input: &str) { + unsafe { + use std::libc::{STDOUT_FILENO}; + let io = local_io(); + { + let mut fd = io.fs_from_raw_fd(STDOUT_FILENO, DontClose); + let write_buf = input.as_bytes(); + fd.write(write_buf); + } + } +} + +#[test] +fn file_test_uvio_write_to_stdout() { + do run_in_mt_newsched_task { + uvio_naive_print("jubilation\n"); + } +} diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs new file mode 100644 index 0000000000000..f80178cfa4c62 --- /dev/null +++ b/src/librustuv/uvll.rs @@ -0,0 +1,1174 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + * Low-level bindings to the libuv library. + * + * This module contains a set of direct, 'bare-metal' wrappers around + * the libuv C-API. + * + * We're not bothering yet to redefine uv's structs as Rust structs + * because they are quite large and change often between versions. + * The maintenance burden is just too high. Instead we use the uv's + * `uv_handle_size` and `uv_req_size` to find the correct size of the + * structs and allocate them on the heap. This can be revisited later. + * + * There are also a collection of helper functions to ease interacting + * with the low-level API. + * + * As new functionality, existent in uv.h, is added to the rust stdlib, + * the mappings should be added in this module. + */ + +#[allow(non_camel_case_types)]; // C types + +use std::libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t}; +use std::libc::ssize_t; +use std::libc::{malloc, free}; +use std::libc; +use std::ptr; +use std::vec; + +pub use self::errors::*; + +pub static OK: c_int = 0; +pub static EOF: c_int = -4095; +pub static UNKNOWN: c_int = -4094; + +// uv-errno.h redefines error codes for windows, but not for unix... + +#[cfg(windows)] +pub mod errors { + use std::libc::c_int; + + pub static EACCES: c_int = -4093; + pub static ECONNREFUSED: c_int = -4079; + pub static ECONNRESET: c_int = -4078; + pub static ENOTCONN: c_int = -4054; + pub static EPIPE: c_int = -4048; + pub static ECONNABORTED: c_int = -4080; +} +#[cfg(not(windows))] +pub mod errors { + use std::libc; + use std::libc::c_int; + + pub static EACCES: c_int = -libc::EACCES; + pub static ECONNREFUSED: c_int = -libc::ECONNREFUSED; + pub static ECONNRESET: c_int = -libc::ECONNRESET; + pub static ENOTCONN: c_int = -libc::ENOTCONN; + pub static EPIPE: c_int = -libc::EPIPE; + pub static ECONNABORTED: c_int = -libc::ECONNABORTED; +} + +pub static PROCESS_SETUID: c_int = 1 << 0; +pub static PROCESS_SETGID: c_int = 1 << 1; +pub static PROCESS_WINDOWS_VERBATIM_ARGUMENTS: c_int = 1 << 2; +pub static PROCESS_DETACHED: c_int = 1 << 3; +pub static PROCESS_WINDOWS_HIDE: c_int = 1 << 4; + +pub static STDIO_IGNORE: c_int = 0x00; +pub static STDIO_CREATE_PIPE: c_int = 0x01; +pub static STDIO_INHERIT_FD: c_int = 0x02; +pub static STDIO_INHERIT_STREAM: c_int = 0x04; +pub static STDIO_READABLE_PIPE: c_int = 0x10; +pub static STDIO_WRITABLE_PIPE: c_int = 0x20; + +// see libuv/include/uv-unix.h +#[cfg(unix)] +pub struct uv_buf_t { + base: *u8, + len: libc::size_t, +} + +// see libuv/include/uv-win.h +#[cfg(windows)] +pub struct uv_buf_t { + len: u32, + base: *u8, +} + +pub struct uv_process_options_t { + exit_cb: uv_exit_cb, + file: *libc::c_char, + args: **libc::c_char, + env: **libc::c_char, + cwd: *libc::c_char, + flags: libc::c_uint, + stdio_count: libc::c_int, + stdio: *uv_stdio_container_t, + uid: uv_uid_t, + gid: uv_gid_t, +} + +// These fields are private because they must be interfaced with through the +// functions below. +pub struct uv_stdio_container_t { + priv flags: libc::c_int, + priv stream: *uv_stream_t, +} + +pub type uv_handle_t = c_void; +pub type uv_loop_t = c_void; +pub type uv_idle_t = c_void; +pub type uv_tcp_t = c_void; +pub type uv_udp_t = c_void; +pub type uv_connect_t = c_void; +pub type uv_connection_t = c_void; +pub type uv_write_t = c_void; +pub type uv_async_t = c_void; +pub type uv_timer_t = c_void; +pub type uv_stream_t = c_void; +pub type uv_fs_t = c_void; +pub type uv_udp_send_t = c_void; +pub type uv_getaddrinfo_t = c_void; +pub type uv_process_t = c_void; +pub type uv_pipe_t = c_void; +pub type uv_tty_t = c_void; +pub type uv_signal_t = c_void; + +pub struct uv_timespec_t { + tv_sec: libc::c_long, + tv_nsec: libc::c_long +} + +pub struct uv_stat_t { + st_dev: libc::uint64_t, + st_mode: libc::uint64_t, + st_nlink: libc::uint64_t, + st_uid: libc::uint64_t, + st_gid: libc::uint64_t, + st_rdev: libc::uint64_t, + st_ino: libc::uint64_t, + st_size: libc::uint64_t, + st_blksize: libc::uint64_t, + st_blocks: libc::uint64_t, + st_flags: libc::uint64_t, + st_gen: libc::uint64_t, + st_atim: uv_timespec_t, + st_mtim: uv_timespec_t, + st_ctim: uv_timespec_t, + st_birthtim: uv_timespec_t +} + +impl uv_stat_t { + pub fn new() -> uv_stat_t { + uv_stat_t { + st_dev: 0, + st_mode: 0, + st_nlink: 0, + st_uid: 0, + st_gid: 0, + st_rdev: 0, + st_ino: 0, + st_size: 0, + st_blksize: 0, + st_blocks: 0, + st_flags: 0, + st_gen: 0, + st_atim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 }, + st_mtim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 }, + st_ctim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 }, + st_birthtim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 } + } + } + pub fn is_file(&self) -> bool { + ((self.st_mode) & libc::S_IFMT as libc::uint64_t) == libc::S_IFREG as libc::uint64_t + } + pub fn is_dir(&self) -> bool { + ((self.st_mode) & libc::S_IFMT as libc::uint64_t) == libc::S_IFDIR as libc::uint64_t + } +} + +pub type uv_idle_cb = extern "C" fn(handle: *uv_idle_t, + status: c_int); +pub type uv_alloc_cb = extern "C" fn(stream: *uv_stream_t, + suggested_size: size_t) -> uv_buf_t; +pub type uv_read_cb = extern "C" fn(stream: *uv_stream_t, + nread: ssize_t, + buf: uv_buf_t); +pub type uv_udp_send_cb = extern "C" fn(req: *uv_udp_send_t, + status: c_int); +pub type uv_udp_recv_cb = extern "C" fn(handle: *uv_udp_t, + nread: ssize_t, + buf: uv_buf_t, + addr: *sockaddr, + flags: c_uint); +pub type uv_close_cb = extern "C" fn(handle: *uv_handle_t); +pub type uv_walk_cb = extern "C" fn(handle: *uv_handle_t, + arg: *c_void); +pub type uv_async_cb = extern "C" fn(handle: *uv_async_t, + status: c_int); +pub type uv_connect_cb = extern "C" fn(handle: *uv_connect_t, + status: c_int); +pub type uv_connection_cb = extern "C" fn(handle: *uv_connection_t, + status: c_int); +pub type uv_timer_cb = extern "C" fn(handle: *uv_timer_t, + status: c_int); +pub type uv_write_cb = extern "C" fn(handle: *uv_write_t, + status: c_int); +pub type uv_getaddrinfo_cb = extern "C" fn(req: *uv_getaddrinfo_t, + status: c_int, + res: *addrinfo); +pub type uv_exit_cb = extern "C" fn(handle: *uv_process_t, + exit_status: c_int, + term_signal: c_int); +pub type uv_signal_cb = extern "C" fn(handle: *uv_signal_t, + signum: c_int); + +pub type sockaddr = c_void; +pub type sockaddr_in = c_void; +pub type sockaddr_in6 = c_void; +pub type sockaddr_storage = c_void; + +#[cfg(unix)] +pub type socklen_t = c_int; + +// XXX: This is a standard C type. Could probably be defined in libc +#[cfg(target_os = "android")] +#[cfg(target_os = "linux")] +pub struct addrinfo { + ai_flags: c_int, + ai_family: c_int, + ai_socktype: c_int, + ai_protocol: c_int, + ai_addrlen: socklen_t, + ai_addr: *sockaddr, + ai_canonname: *char, + ai_next: *addrinfo +} + +#[cfg(target_os = "macos")] +#[cfg(target_os = "freebsd")] +pub struct addrinfo { + ai_flags: c_int, + ai_family: c_int, + ai_socktype: c_int, + ai_protocol: c_int, + ai_addrlen: socklen_t, + ai_canonname: *char, + ai_addr: *sockaddr, + ai_next: *addrinfo +} + +#[cfg(windows)] +pub struct addrinfo { + ai_flags: c_int, + ai_family: c_int, + ai_socktype: c_int, + ai_protocol: c_int, + ai_addrlen: size_t, + ai_canonname: *char, + ai_addr: *sockaddr, + ai_next: *addrinfo +} + +#[cfg(unix)] pub type uv_uid_t = libc::types::os::arch::posix88::uid_t; +#[cfg(unix)] pub type uv_gid_t = libc::types::os::arch::posix88::gid_t; +#[cfg(windows)] pub type uv_uid_t = libc::c_uchar; +#[cfg(windows)] pub type uv_gid_t = libc::c_uchar; + +#[deriving(Eq)] +pub enum uv_handle_type { + UV_UNKNOWN_HANDLE, + UV_ASYNC, + UV_CHECK, + UV_FS_EVENT, + UV_FS_POLL, + UV_HANDLE, + UV_IDLE, + UV_NAMED_PIPE, + UV_POLL, + UV_PREPARE, + UV_PROCESS, + UV_STREAM, + UV_TCP, + UV_TIMER, + UV_TTY, + UV_UDP, + UV_SIGNAL, + UV_FILE, + UV_HANDLE_TYPE_MAX +} + +#[cfg(unix)] +#[deriving(Eq)] +pub enum uv_req_type { + UV_UNKNOWN_REQ, + UV_REQ, + UV_CONNECT, + UV_WRITE, + UV_SHUTDOWN, + UV_UDP_SEND, + UV_FS, + UV_WORK, + UV_GETADDRINFO, + UV_REQ_TYPE_MAX +} + +// uv_req_type may have additional fields defined by UV_REQ_TYPE_PRIVATE. +// See UV_REQ_TYPE_PRIVATE at libuv/include/uv-win.h +#[cfg(windows)] +#[deriving(Eq)] +pub enum uv_req_type { + UV_UNKNOWN_REQ, + UV_REQ, + UV_CONNECT, + UV_WRITE, + UV_SHUTDOWN, + UV_UDP_SEND, + UV_FS, + UV_WORK, + UV_GETADDRINFO, + UV_ACCEPT, + UV_FS_EVENT_REQ, + UV_POLL_REQ, + UV_PROCESS_EXIT, + UV_READ, + UV_UDP_RECV, + UV_WAKEUP, + UV_SIGNAL_REQ, + UV_REQ_TYPE_MAX +} + +#[deriving(Eq)] +pub enum uv_membership { + UV_LEAVE_GROUP, + UV_JOIN_GROUP +} + +pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); + let size = rust_uv_handle_size(handle as uint); + let p = malloc(size); + assert!(p.is_not_null()); + return p; +} + +pub unsafe fn free_handle(v: *c_void) { + #[fixed_stack_segment]; #[inline(never)]; + + free(v) +} + +pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); + let size = rust_uv_req_size(req as uint); + let p = malloc(size); + assert!(p.is_not_null()); + return p; +} + +pub unsafe fn free_req(v: *c_void) { + #[fixed_stack_segment]; #[inline(never)]; + + free(v) +} + +#[test] +fn handle_sanity_check() { + #[fixed_stack_segment]; #[inline(never)]; + unsafe { + assert_eq!(UV_HANDLE_TYPE_MAX as uint, rust_uv_handle_type_max()); + } +} + +#[test] +fn request_sanity_check() { + #[fixed_stack_segment]; #[inline(never)]; + unsafe { + assert_eq!(UV_REQ_TYPE_MAX as uint, rust_uv_req_type_max()); + } +} + +// XXX Event loops ignore SIGPIPE by default. +pub unsafe fn loop_new() -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_loop_new(); +} + +pub unsafe fn loop_delete(loop_handle: *c_void) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_loop_delete(loop_handle); +} + +pub unsafe fn run(loop_handle: *c_void) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_run(loop_handle); +} + +pub unsafe fn close(handle: *T, cb: uv_close_cb) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_close(handle as *c_void, cb); +} + +pub unsafe fn walk(loop_handle: *c_void, cb: uv_walk_cb, arg: *c_void) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_walk(loop_handle, cb, arg); +} + +pub unsafe fn idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_idle_init(loop_handle, handle) +} + +pub unsafe fn idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_idle_start(handle, cb) +} + +pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_idle_stop(handle) +} + +pub unsafe fn udp_init(loop_handle: *uv_loop_t, handle: *uv_udp_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_init(loop_handle, handle); +} + +pub unsafe fn udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_bind(server, addr, flags); +} + +pub unsafe fn udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_bind6(server, addr, flags); +} + +pub unsafe fn udp_send(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], + addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); +} + +pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], + addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); +} + +pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, + on_recv: uv_udp_recv_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_recv_start(server, on_alloc, on_recv); +} + +pub unsafe fn udp_recv_stop(server: *uv_udp_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_recv_stop(server); +} + +pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_udp_handle_from_send_req(send_req); +} + +pub unsafe fn udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_getsockname(handle, name); +} + +pub unsafe fn udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, + interface_addr: *c_char, membership: uv_membership) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_set_membership(handle, multicast_addr, interface_addr, membership as c_int); +} + +pub unsafe fn udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_set_multicast_loop(handle, on); +} + +pub unsafe fn udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_set_multicast_ttl(handle, ttl); +} + +pub unsafe fn udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_set_ttl(handle, ttl); +} + +pub unsafe fn udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_udp_set_broadcast(handle, on); +} + +pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_init(loop_handle, handle); +} + +pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, + addr_ptr: *sockaddr_in, after_connect_cb: uv_connect_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); +} + +pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, + addr_ptr: *sockaddr_in6, after_connect_cb: uv_connect_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); +} + +pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr); +} + +pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr); +} + +pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_getpeername(tcp_handle_ptr, name); +} + +pub unsafe fn tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_getsockname(handle, name); +} + +pub unsafe fn tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_nodelay(handle, enable); +} + +pub unsafe fn tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_keepalive(handle, enable, delay); +} + +pub unsafe fn tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_tcp_simultaneous_accepts(handle, enable); +} + +pub unsafe fn listen(stream: *T, backlog: c_int, + cb: uv_connection_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_listen(stream as *c_void, backlog, cb); +} + +pub unsafe fn accept(server: *c_void, client: *c_void) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_accept(server as *c_void, client as *c_void); +} + +pub unsafe fn write(req: *uv_write_t, + stream: *T, + buf_in: &[uv_buf_t], + cb: uv_write_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb); +} +pub unsafe fn read_start(stream: *uv_stream_t, + on_alloc: uv_alloc_cb, + on_read: uv_read_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_read_start(stream as *c_void, on_alloc, on_read); +} + +pub unsafe fn read_stop(stream: *uv_stream_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_read_stop(stream as *c_void); +} + +pub unsafe fn strerror(err: c_int) -> *c_char { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_strerror(err); +} +pub unsafe fn err_name(err: c_int) -> *c_char { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_err_name(err); +} + +pub unsafe fn async_init(loop_handle: *c_void, + async_handle: *uv_async_t, + cb: uv_async_cb) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_async_init(loop_handle, async_handle, cb); +} + +pub unsafe fn async_send(async_handle: *uv_async_t) { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_async_send(async_handle); +} +pub unsafe fn buf_init(input: *u8, len: uint) -> uv_buf_t { + #[fixed_stack_segment]; #[inline(never)]; + + let out_buf = uv_buf_t { base: ptr::null(), len: 0 as size_t }; + let out_buf_ptr = ptr::to_unsafe_ptr(&out_buf); + rust_uv_buf_init(out_buf_ptr, input, len as size_t); + return out_buf; +} + +pub unsafe fn timer_init(loop_ptr: *c_void, timer_ptr: *uv_timer_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_timer_init(loop_ptr, timer_ptr); +} +pub unsafe fn timer_start(timer_ptr: *uv_timer_t, + cb: uv_timer_cb, timeout: u64, + repeat: u64) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_timer_start(timer_ptr, cb, timeout, repeat); +} +pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_timer_stop(timer_ptr); +} + +pub unsafe fn is_ip4_addr(addr: *sockaddr) -> bool { + #[fixed_stack_segment]; #[inline(never)]; + + match rust_uv_is_ipv4_sockaddr(addr) { 0 => false, _ => true } +} + +pub unsafe fn is_ip6_addr(addr: *sockaddr) -> bool { + #[fixed_stack_segment]; #[inline(never)]; + + match rust_uv_is_ipv6_sockaddr(addr) { 0 => false, _ => true } +} + +pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in { + #[fixed_stack_segment]; #[inline(never)]; + do ip.with_c_str |ip_buf| { + rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int) + } +} +pub unsafe fn malloc_ip6_addr(ip: &str, port: int) -> *sockaddr_in6 { + #[fixed_stack_segment]; #[inline(never)]; + do ip.with_c_str |ip_buf| { + rust_uv_ip6_addrp(ip_buf as *u8, port as libc::c_int) + } +} + +pub unsafe fn malloc_sockaddr_storage() -> *sockaddr_storage { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_malloc_sockaddr_storage() +} + +pub unsafe fn free_sockaddr_storage(ss: *sockaddr_storage) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_free_sockaddr_storage(ss); +} + +pub unsafe fn free_ip4_addr(addr: *sockaddr_in) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_free_ip4_addr(addr); +} + +pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_free_ip6_addr(addr); +} + +pub unsafe fn ip4_name(addr: *sockaddr_in, dst: *u8, size: size_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_ip4_name(addr, dst, size); +} + +pub unsafe fn ip6_name(addr: *sockaddr_in6, dst: *u8, size: size_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_ip6_name(addr, dst, size); +} + +pub unsafe fn ip4_port(addr: *sockaddr_in) -> c_uint { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_ip4_port(addr); +} + +pub unsafe fn ip6_port(addr: *sockaddr_in6) -> c_uint { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_ip6_port(addr); +} + +pub unsafe fn fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, flags: int, mode: int, + cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_open(loop_ptr, req, path, flags as c_int, mode as c_int, cb) +} + +pub unsafe fn fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_unlink(loop_ptr, req, path, cb) +} +pub unsafe fn fs_write(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, + len: uint, offset: i64, cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_write(loop_ptr, req, fd, buf, len as c_uint, offset, cb) +} +pub unsafe fn fs_read(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, + len: uint, offset: i64, cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_read(loop_ptr, req, fd, buf, len as c_uint, offset, cb) +} +pub unsafe fn fs_close(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, + cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_close(loop_ptr, req, fd, cb) +} +pub unsafe fn fs_stat(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_stat(loop_ptr, req, path, cb) +} +pub unsafe fn fs_fstat(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_fstat(loop_ptr, req, fd, cb) +} +pub unsafe fn fs_mkdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, mode: int, + cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_mkdir(loop_ptr, req, path, mode as c_int, cb) +} +pub unsafe fn fs_rmdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_rmdir(loop_ptr, req, path, cb) +} +pub unsafe fn fs_readdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + flags: c_int, cb: *u8) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_readdir(loop_ptr, req, path, flags, cb) +} +pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_populate_uv_stat(req_in, stat_out) +} +pub unsafe fn fs_req_cleanup(req: *uv_fs_t) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_fs_req_cleanup(req); +} + +pub unsafe fn spawn(loop_ptr: *c_void, result: *uv_process_t, + options: uv_process_options_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_spawn(loop_ptr, result, options); +} + +pub unsafe fn process_kill(p: *uv_process_t, signum: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_process_kill(p, signum); +} + +pub unsafe fn process_pid(p: *uv_process_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_process_pid(p); +} + +pub unsafe fn set_stdio_container_flags(c: *uv_stdio_container_t, + flags: libc::c_int) { + #[fixed_stack_segment]; #[inline(never)]; + rust_set_stdio_container_flags(c, flags); +} + +pub unsafe fn set_stdio_container_fd(c: *uv_stdio_container_t, + fd: libc::c_int) { + #[fixed_stack_segment]; #[inline(never)]; + rust_set_stdio_container_fd(c, fd); +} + +pub unsafe fn set_stdio_container_stream(c: *uv_stdio_container_t, + stream: *uv_stream_t) { + #[fixed_stack_segment]; #[inline(never)]; + rust_set_stdio_container_stream(c, stream); +} + +pub unsafe fn pipe_init(loop_ptr: *c_void, p: *uv_pipe_t, ipc: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_pipe_init(loop_ptr, p, ipc) +} + +// data access helpers +pub unsafe fn get_result_from_fs_req(req: *uv_fs_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_get_result_from_fs_req(req) +} +pub unsafe fn get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_get_ptr_from_fs_req(req) +} +pub unsafe fn get_loop_from_fs_req(req: *uv_fs_t) -> *uv_loop_t { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_get_loop_from_fs_req(req) +} +pub unsafe fn get_loop_from_getaddrinfo_req(req: *uv_getaddrinfo_t) -> *uv_loop_t { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_get_loop_from_getaddrinfo_req(req) +} +pub unsafe fn get_loop_for_uv_handle(handle: *T) -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_loop_for_uv_handle(handle as *c_void); +} +pub unsafe fn get_stream_handle_from_connect_req(connect: *uv_connect_t) -> *uv_stream_t { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_stream_handle_from_connect_req(connect); +} +pub unsafe fn get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_stream_handle_from_write_req(write_req); +} +pub unsafe fn get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_get_data_for_uv_loop(loop_ptr) +} +pub unsafe fn set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_set_data_for_uv_loop(loop_ptr, data); +} +pub unsafe fn get_data_for_uv_handle(handle: *T) -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_data_for_uv_handle(handle as *c_void); +} +pub unsafe fn set_data_for_uv_handle(handle: *T, data: *U) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_set_data_for_uv_handle(handle as *c_void, data as *c_void); +} +pub unsafe fn get_data_for_req(req: *T) -> *c_void { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_data_for_req(req as *c_void); +} +pub unsafe fn set_data_for_req(req: *T, data: *U) { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_set_data_for_req(req as *c_void, data as *c_void); +} +pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_base_from_buf(buf); +} +pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t { + #[fixed_stack_segment]; #[inline(never)]; + + return rust_uv_get_len_from_buf(buf); +} +pub unsafe fn getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, + getaddrinfo_cb: uv_getaddrinfo_cb, + node: *c_char, service: *c_char, + hints: *addrinfo) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_getaddrinfo(loop_, req, getaddrinfo_cb, node, service, hints); +} +pub unsafe fn freeaddrinfo(ai: *addrinfo) { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_freeaddrinfo(ai); +} +pub unsafe fn pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_pipe_open(pipe, file) +} +pub unsafe fn pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_pipe_bind(pipe, name) +} +pub unsafe fn pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, + name: *c_char, cb: uv_connect_cb) { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_pipe_connect(req, handle, name, cb) +} +pub unsafe fn tty_init(loop_ptr: *uv_loop_t, tty: *uv_tty_t, fd: c_int, + readable: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_tty_init(loop_ptr, tty, fd, readable) +} +pub unsafe fn tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_tty_set_mode(tty, mode) +} +pub unsafe fn tty_get_winsize(tty: *uv_tty_t, width: *c_int, + height: *c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_tty_get_winsize(tty, width, height) +} +// FIXME(#9613) this should return uv_handle_type, not a c_int +pub unsafe fn guess_handle(fd: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + rust_uv_guess_handle(fd) +} + +pub unsafe fn signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_signal_init(loop_, handle); +} +pub unsafe fn signal_start(handle: *uv_signal_t, + signal_cb: uv_signal_cb, + signum: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_signal_start(handle, signal_cb, signum); +} +pub unsafe fn signal_stop(handle: *uv_signal_t) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + return rust_uv_signal_stop(handle); +} + +pub struct uv_err_data { + err_name: ~str, + err_msg: ~str, +} + +extern { + + fn rust_uv_handle_size(type_: uintptr_t) -> size_t; + fn rust_uv_req_size(type_: uintptr_t) -> size_t; + fn rust_uv_handle_type_max() -> uintptr_t; + fn rust_uv_req_type_max() -> uintptr_t; + + // libuv public API + fn rust_uv_loop_new() -> *c_void; + fn rust_uv_loop_delete(lp: *c_void); + fn rust_uv_run(loop_handle: *c_void); + fn rust_uv_close(handle: *c_void, cb: uv_close_cb); + fn rust_uv_walk(loop_handle: *c_void, cb: uv_walk_cb, arg: *c_void); + + fn rust_uv_idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int; + fn rust_uv_idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int; + fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int; + + fn rust_uv_async_send(handle: *uv_async_t); + fn rust_uv_async_init(loop_handle: *c_void, + async_handle: *uv_async_t, + cb: uv_async_cb) -> c_int; + fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int; + fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t); + fn rust_uv_strerror(err: c_int) -> *c_char; + fn rust_uv_err_name(err: c_int) -> *c_char; + fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; + fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; + fn rust_uv_free_ip4_addr(addr: *sockaddr_in); + fn rust_uv_free_ip6_addr(addr: *sockaddr_in6); + fn rust_uv_ip4_name(src: *sockaddr_in, dst: *u8, size: size_t) -> c_int; + fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; + fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; + fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; + fn rust_uv_tcp_connect(req: *uv_connect_t, handle: *uv_tcp_t, + cb: uv_connect_cb, + addr: *sockaddr_in) -> c_int; + fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; + fn rust_uv_tcp_connect6(req: *uv_connect_t, handle: *uv_tcp_t, + cb: uv_connect_cb, + addr: *sockaddr_in6) -> c_int; + fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; + fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int; + fn rust_uv_tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int; + fn rust_uv_tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int; + fn rust_uv_tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int; + fn rust_uv_tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int; + + fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int; + fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; + fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; + fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, + buf_cnt: c_int, addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int; + fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, + buf_cnt: c_int, addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int; + fn rust_uv_udp_recv_start(server: *uv_udp_t, + on_alloc: uv_alloc_cb, + on_recv: uv_udp_recv_cb) -> c_int; + fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; + fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; + fn rust_uv_udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int; + fn rust_uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, + interface_addr: *c_char, membership: c_int) -> c_int; + fn rust_uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int; + fn rust_uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; + fn rust_uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; + fn rust_uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int; + + fn rust_uv_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; + fn rust_uv_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; + fn rust_uv_malloc_sockaddr_storage() -> *sockaddr_storage; + fn rust_uv_free_sockaddr_storage(ss: *sockaddr_storage); + + fn rust_uv_listen(stream: *c_void, backlog: c_int, + cb: uv_connection_cb) -> c_int; + fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; + fn rust_uv_write(req: *c_void, stream: *c_void, buf_in: *uv_buf_t, buf_cnt: c_int, + cb: uv_write_cb) -> c_int; + fn rust_uv_read_start(stream: *c_void, + on_alloc: uv_alloc_cb, + on_read: uv_read_cb) -> c_int; + fn rust_uv_read_stop(stream: *c_void) -> c_int; + fn rust_uv_timer_init(loop_handle: *c_void, timer_handle: *uv_timer_t) -> c_int; + fn rust_uv_timer_start(timer_handle: *uv_timer_t, cb: uv_timer_cb, timeout: libc::uint64_t, + repeat: libc::uint64_t) -> c_int; + fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int; + fn rust_uv_fs_open(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, + flags: c_int, mode: c_int, cb: *u8) -> c_int; + fn rust_uv_fs_unlink(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, + cb: *u8) -> c_int; + fn rust_uv_fs_write(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, + buf: *c_void, len: c_uint, offset: i64, cb: *u8) -> c_int; + fn rust_uv_fs_read(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, + buf: *c_void, len: c_uint, offset: i64, cb: *u8) -> c_int; + fn rust_uv_fs_close(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, + cb: *u8) -> c_int; + fn rust_uv_fs_stat(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, cb: *u8) -> c_int; + fn rust_uv_fs_fstat(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, cb: *u8) -> c_int; + fn rust_uv_fs_mkdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, + mode: c_int, cb: *u8) -> c_int; + fn rust_uv_fs_rmdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, + cb: *u8) -> c_int; + fn rust_uv_fs_readdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, + flags: c_int, cb: *u8) -> c_int; + fn rust_uv_fs_req_cleanup(req: *uv_fs_t); + fn rust_uv_populate_uv_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t); + fn rust_uv_get_result_from_fs_req(req: *uv_fs_t) -> c_int; + fn rust_uv_get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void; + fn rust_uv_get_loop_from_fs_req(req: *uv_fs_t) -> *uv_loop_t; + fn rust_uv_get_loop_from_getaddrinfo_req(req: *uv_fs_t) -> *uv_loop_t; + + fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t; + fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t; + fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void; + fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void; + fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void); + fn rust_uv_get_data_for_uv_handle(handle: *c_void) -> *c_void; + fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void); + fn rust_uv_get_data_for_req(req: *c_void) -> *c_void; + fn rust_uv_set_data_for_req(req: *c_void, data: *c_void); + fn rust_uv_get_base_from_buf(buf: uv_buf_t) -> *u8; + fn rust_uv_get_len_from_buf(buf: uv_buf_t) -> size_t; + fn rust_uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, + getaddrinfo_cb: uv_getaddrinfo_cb, + node: *c_char, service: *c_char, + hints: *addrinfo) -> c_int; + fn rust_uv_freeaddrinfo(ai: *addrinfo); + fn rust_uv_spawn(loop_ptr: *c_void, outptr: *uv_process_t, + options: uv_process_options_t) -> c_int; + fn rust_uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int; + fn rust_uv_process_pid(p: *uv_process_t) -> c_int; + fn rust_set_stdio_container_flags(c: *uv_stdio_container_t, flags: c_int); + fn rust_set_stdio_container_fd(c: *uv_stdio_container_t, fd: c_int); + fn rust_set_stdio_container_stream(c: *uv_stdio_container_t, + stream: *uv_stream_t); + fn rust_uv_pipe_init(loop_ptr: *c_void, p: *uv_pipe_t, ipc: c_int) -> c_int; + + fn rust_uv_pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int; + fn rust_uv_pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int; + fn rust_uv_pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, + name: *c_char, cb: uv_connect_cb); + fn rust_uv_tty_init(loop_ptr: *uv_loop_t, tty: *uv_tty_t, fd: c_int, + readable: c_int) -> c_int; + fn rust_uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int; + fn rust_uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int, + height: *c_int) -> c_int; + fn rust_uv_guess_handle(fd: c_int) -> c_int; + + // XXX: see comments in addrinfo.rs + // These should all really be constants... + //#[rust_stack] pub fn rust_SOCK_STREAM() -> c_int; + //#[rust_stack] pub fn rust_SOCK_DGRAM() -> c_int; + //#[rust_stack] pub fn rust_SOCK_RAW() -> c_int; + //#[rust_stack] pub fn rust_IPPROTO_UDP() -> c_int; + //#[rust_stack] pub fn rust_IPPROTO_TCP() -> c_int; + //#[rust_stack] pub fn rust_AI_ADDRCONFIG() -> c_int; + //#[rust_stack] pub fn rust_AI_ALL() -> c_int; + //#[rust_stack] pub fn rust_AI_CANONNAME() -> c_int; + //#[rust_stack] pub fn rust_AI_NUMERICHOST() -> c_int; + //#[rust_stack] pub fn rust_AI_NUMERICSERV() -> c_int; + //#[rust_stack] pub fn rust_AI_PASSIVE() -> c_int; + //#[rust_stack] pub fn rust_AI_V4MAPPED() -> c_int; + + fn rust_uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int; + fn rust_uv_signal_start(handle: *uv_signal_t, + signal_cb: uv_signal_cb, + signum: c_int) -> c_int; + fn rust_uv_signal_stop(handle: *uv_signal_t) -> c_int; +} diff --git a/src/libstd/rt/crate_map.rs b/src/libstd/rt/crate_map.rs index dd71426938dc4..16b41276788a0 100644 --- a/src/libstd/rt/crate_map.rs +++ b/src/libstd/rt/crate_map.rs @@ -12,6 +12,8 @@ use container::MutableSet; use hashmap::HashSet; use option::{Some, None, Option}; use vec::ImmutableVector; +#[cfg(not(stage0))] +use rt::rtio::EventLoop; // Need to tell the linker on OS X to not barf on undefined symbols // and instead look them up at runtime, which we need to resolve @@ -25,18 +27,32 @@ pub struct ModEntry<'self> { log_level: *mut u32 } +#[cfg(stage0)] pub struct CrateMap<'self> { - priv version: i32, - priv entries: &'self [ModEntry<'self>], - priv children: &'self [&'self CrateMap<'self>] + version: i32, + entries: &'self [ModEntry<'self>], + children: &'self [&'self CrateMap<'self>] +} + +#[cfg(not(stage0))] +pub struct CrateMap<'self> { + version: i32, + entries: &'self [ModEntry<'self>], + children: &'self [&'self CrateMap<'self>], + event_loop_factory: Option ~EventLoop>, } #[cfg(not(windows))] pub fn get_crate_map() -> Option<&'static CrateMap<'static>> { extern { + #[cfg(stage0)] #[weak_linkage] #[link_name = "_rust_crate_map_toplevel"] static CRATE_MAP: CrateMap<'static>; + + #[cfg(not(stage0))] + #[crate_map] + static CRATE_MAP: CrateMap<'static>; } let ptr: (*CrateMap) = &'static CRATE_MAP; @@ -108,6 +124,7 @@ pub fn iter_crate_map<'a>(crate_map: &'a CrateMap<'a>, f: &fn(&ModEntry)) { #[cfg(test)] mod tests { + use option::None; use rt::crate_map::{CrateMap, ModEntry, iter_crate_map}; #[test] @@ -121,13 +138,15 @@ mod tests { let child_crate = CrateMap { version: 2, entries: entries, - children: [] + children: [], + event_loop_factory: None, }; let root_crate = CrateMap { version: 2, entries: [], - children: [&child_crate, &child_crate] + children: [&child_crate, &child_crate], + event_loop_factory: None, }; let mut cnt = 0; @@ -150,7 +169,8 @@ mod tests { ModEntry { name: "c::m1", log_level: &mut level2}, ModEntry { name: "c::m2", log_level: &mut level3}, ], - children: [] + children: [], + event_loop_factory: None, }; let child_crate1 = CrateMap { @@ -158,7 +178,8 @@ mod tests { entries: [ ModEntry { name: "t::f1", log_level: &mut 1}, ], - children: [&child_crate2] + children: [&child_crate2], + event_loop_factory: None, }; let root_crate = CrateMap { @@ -166,7 +187,8 @@ mod tests { entries: [ ModEntry { name: "t::f2", log_level: &mut 0}, ], - children: [&child_crate1] + children: [&child_crate1], + event_loop_factory: None, }; let mut cnt = 0; diff --git a/src/libstd/rt/io/net/tcp.rs b/src/libstd/rt/io/net/tcp.rs index 6314c0755a0bc..fb7c6766fd84c 100644 --- a/src/libstd/rt/io/net/tcp.rs +++ b/src/libstd/rt/io/net/tcp.rs @@ -383,7 +383,8 @@ mod test { // on windows assert!(e.kind == ConnectionReset || e.kind == BrokenPipe || - e.kind == ConnectionAborted); + e.kind == ConnectionAborted, + "unknown error: {:?}", e); stop = true; }).inside { stream.write(buf); @@ -420,7 +421,8 @@ mod test { // on windows assert!(e.kind == ConnectionReset || e.kind == BrokenPipe || - e.kind == ConnectionAborted); + e.kind == ConnectionAborted, + "unknown error: {:?}", e); stop = true; }).inside { stream.write(buf); diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 21fdf0e50a112..d8d07f140217d 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -68,7 +68,6 @@ use rt::sched::{Scheduler, Shutdown}; use rt::sleeper_list::SleeperList; use rt::task::UnwindResult; use rt::task::{Task, SchedTask, GreenTask, Sched}; -use rt::uv::uvio::UvEventLoop; use unstable::atomics::{AtomicInt, AtomicBool, SeqCst}; use unstable::sync::UnsafeArc; use vec::{OwnedVector, MutableVector, ImmutableVector}; @@ -123,6 +122,7 @@ pub mod io; pub mod rtio; /// libuv and default rtio implementation. +#[cfg(stage0)] pub mod uv; /// The Local trait for types that are accessible via thread-local @@ -287,7 +287,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int { rtdebug!("inserting a regular scheduler"); // Every scheduler is driven by an I/O event loop. - let loop_ = ~UvEventLoop::new() as ~rtio::EventLoop; + let loop_ = new_event_loop(); let mut sched = ~Scheduler::new(loop_, work_queue.clone(), work_queues.clone(), @@ -311,7 +311,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int { // set. let work_queue = WorkQueue::new(); - let main_loop = ~UvEventLoop::new() as ~rtio::EventLoop; + let main_loop = new_event_loop(); let mut main_sched = ~Scheduler::new_special(main_loop, work_queue, work_queues.clone(), @@ -462,3 +462,29 @@ pub fn in_green_task_context() -> bool { } } } + +#[cfg(stage0)] +pub fn new_event_loop() -> ~rtio::EventLoop { + use rt::uv::uvio::UvEventLoop; + ~UvEventLoop::new() as ~rtio::EventLoop +} + +#[cfg(not(stage0))] +pub fn new_event_loop() -> ~rtio::EventLoop { + #[fixed_stack_segment]; #[allow(cstack)]; + + match crate_map::get_crate_map() { + None => {} + Some(map) => { + match map.event_loop_factory { + None => {} + Some(factory) => return factory() + } + } + } + + // If the crate map didn't specify a factory to create an event loop, then + // instead just use a basic event loop missing all I/O services to at least + // get the scheduler running. + return basic::event_loop(); +} diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 3ee822ced2dd4..4b0333634024b 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -1013,7 +1013,6 @@ mod test { #[test] fn test_schedule_home_states() { - use rt::sleeper_list::SleeperList; use rt::work_queue::WorkQueue; use rt::sched::Shutdown; diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 5f78b9fc9591e..55a8db89d3c05 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -24,13 +24,12 @@ use rand; use result::{Result, Ok, Err}; use rt::basic; use rt::comm::oneshot; -use rt::rtio::EventLoop; +use rt::new_event_loop; use rt::sched::Scheduler; use rt::sleeper_list::SleeperList; use rt::task::Task; use rt::task::UnwindResult; use rt::thread::Thread; -use rt::uv::uvio::UvEventLoop; use rt::work_queue::WorkQueue; use unstable::{run_in_bare_thread}; use vec::{OwnedVector, MutableVector, ImmutableVector}; @@ -40,7 +39,7 @@ pub fn new_test_uv_sched() -> Scheduler { let queue = WorkQueue::new(); let queues = ~[queue.clone()]; - let mut sched = Scheduler::new(~UvEventLoop::new() as ~EventLoop, + let mut sched = Scheduler::new(new_event_loop(), queue, queues, SleeperList::new()); @@ -237,7 +236,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { } for i in range(0u, nthreads) { - let loop_ = ~UvEventLoop::new() as ~EventLoop; + let loop_ = new_event_loop(); let mut sched = ~Scheduler::new(loop_, work_queues[i].clone(), work_queues.clone(), diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index a8cde826125e3..497d790b4a9c9 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -237,6 +237,12 @@ impl EventLoop for UvEventLoop { } } +#[cfg(not(stage0))] +#[lang = "event_loop_factory"] +pub extern "C" fn new_loop() -> ~EventLoop { + ~UvEventLoop::new() as ~EventLoop +} + pub struct UvPausibleIdleCallback { priv watcher: IdleWatcher, priv idle_flag: bool, diff --git a/src/libstd/std.rs b/src/libstd/std.rs index 069a390f0103e..3a96cfb11718d 100644 --- a/src/libstd/std.rs +++ b/src/libstd/std.rs @@ -69,8 +69,13 @@ they contained the following prologue: #[deny(non_camel_case_types)]; #[deny(missing_doc)]; +// When testing libstd, bring in libuv as the I/O backend so tests can print +// things and all of the std::rt::io tests have an I/O interface to run on top +// of +#[cfg(test)] extern mod rustuv(vers = "0.9-pre"); + // Make extra accessible for benchmarking -#[cfg(test)] extern mod extra(vers="0.9-pre"); +#[cfg(test)] extern mod extra(vers = "0.9-pre"); // Make std testable by not duplicating lang items. See #2912 #[cfg(test)] extern mod realstd(name = "std"); diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index 2cda38f8a30ba..a08bf8f3147b8 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -80,17 +80,14 @@ use comm::{Chan, GenericChan, oneshot}; use container::MutableMap; use hashmap::{HashSet, HashSetMoveIterator}; use local_data; -use rt::in_green_task_context; use rt::local::Local; -use rt::sched::Scheduler; -use rt::KillHandle; -use rt::work_queue::WorkQueue; -use rt::rtio::EventLoop; -use rt::thread::Thread; +use rt::sched::{Scheduler, Shutdown, TaskFromFriend}; use rt::task::{Task, Sched}; use rt::task::{UnwindReasonLinked, UnwindReasonStr}; use rt::task::{UnwindResult, Success, Failure}; -use rt::uv::uvio::UvEventLoop; +use rt::thread::Thread; +use rt::work_queue::WorkQueue; +use rt::{in_green_task_context, new_event_loop, KillHandle}; use send_str::IntoSendStr; use task::SingleThreaded; use task::TaskOpts; @@ -621,8 +618,7 @@ pub fn spawn_raw(mut opts: TaskOpts, f: ~fn()) { let work_queue = WorkQueue::new(); // Create a new scheduler to hold the new task - let new_loop = ~UvEventLoop::new() as ~EventLoop; - let mut new_sched = ~Scheduler::new_special(new_loop, + let mut new_sched = ~Scheduler::new_special(new_event_loop(), work_queue, (*sched).work_queues.clone(), (*sched).sleeper_list.clone(),