librustuv: Remove all non-proc uses of do from libextra and

`librustuv`.
This commit is contained in:
Patrick Walton 2013-11-20 15:46:49 -08:00
parent 1eca34de7d
commit a61a3678eb
33 changed files with 456 additions and 449 deletions

View file

@ -224,11 +224,11 @@ impl<T:Send> MutexArc<T> {
let state = self.x.get();
// Borrowck would complain about this if the function were
// not already unsafe. See borrow_rwlock, far below.
do (&(*state).lock).lock {
(&(*state).lock).lock(|| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
})
}
/// As unsafe_access(), but with a condvar, as sync::mutex.lock_cond().
@ -237,14 +237,14 @@ impl<T:Send> MutexArc<T> {
blk: |x: &mut T, c: &Condvar| -> U)
-> U {
let state = self.x.get();
do (&(*state).lock).lock_cond |cond| {
(&(*state).lock).lock_cond(|cond| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: true,
failed: &mut (*state).failed,
cond: cond })
}
})
}
/**
@ -390,11 +390,11 @@ impl<T:Freeze + Send> RWArc<T> {
pub fn write<U>(&self, blk: |x: &mut T| -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write {
(*borrow_rwlock(state)).write(|| {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
})
}
}
@ -405,14 +405,14 @@ impl<T:Freeze + Send> RWArc<T> {
-> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_cond |cond| {
(*borrow_rwlock(state)).write_cond(|cond| {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: false,
failed: &mut (*state).failed,
cond: cond})
}
})
}
}
@ -428,10 +428,10 @@ impl<T:Freeze + Send> RWArc<T> {
pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
unsafe {
let state = self.x.get();
do (*state).lock.read {
(*state).lock.read(|| {
check_poison(false, (*state).failed);
blk(&(*state).data)
}
})
}
}
@ -458,14 +458,14 @@ impl<T:Freeze + Send> RWArc<T> {
pub fn write_downgrade<U>(&self, blk: |v: RWWriteMode<T>| -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_downgrade |write_mode| {
(*borrow_rwlock(state)).write_downgrade(|write_mode| {
check_poison(false, (*state).failed);
blk(RWWriteMode {
data: &mut (*state).data,
token: write_mode,
poison: PoisonOnFail(&mut (*state).failed)
})
}
})
}
}
@ -544,9 +544,7 @@ impl<'self, T:Freeze + Send> RWWriteMode<'self, T> {
token: ref token,
poison: _
} => {
do token.write {
blk(data)
}
token.write(|| blk(data))
}
}
}
@ -561,7 +559,7 @@ impl<'self, T:Freeze + Send> RWWriteMode<'self, T> {
token: ref token,
poison: ref poison
} => {
do token.write_cond |cond| {
token.write_cond(|cond| {
unsafe {
let cvar = Condvar {
is_mutex: false,
@ -570,7 +568,7 @@ impl<'self, T:Freeze + Send> RWWriteMode<'self, T> {
};
blk(data, &cvar)
}
}
})
}
}
}
@ -584,7 +582,7 @@ impl<'self, T:Freeze + Send> RWReadMode<'self, T> {
data: data,
token: ref token
} => {
do token.read { blk(data) }
token.read(|| blk(data))
}
}
}
@ -634,19 +632,19 @@ mod tests {
do task::spawn || {
// wait until parent gets in
p.take().recv();
do arc2.access_cond |state, cond| {
arc2.access_cond(|state, cond| {
*state = true;
cond.signal();
}
})
}
do arc.access_cond |state, cond| {
arc.access_cond(|state, cond| {
c.take().send(());
assert!(!*state);
while !*state {
cond.wait();
}
}
})
}
#[test] #[should_fail]
@ -657,19 +655,19 @@ mod tests {
do spawn {
let _ = p.recv();
do arc2.access_cond |one, cond| {
arc2.access_cond(|one, cond| {
cond.signal();
// Parent should fail when it wakes up.
assert_eq!(*one, 0);
}
})
}
do arc.access_cond |one, cond| {
arc.access_cond(|one, cond| {
c.send(());
while *one == 1 {
cond.wait();
}
}
})
}
#[test] #[should_fail]
@ -677,13 +675,13 @@ mod tests {
let arc = ~MutexArc::new(1);
let arc2 = ~arc.clone();
do task::try || {
do arc2.access |one| {
arc2.access(|one| {
assert_eq!(*one, 2);
}
})
};
do arc.access |one| {
arc.access(|one| {
assert_eq!(*one, 1);
}
})
}
#[test] #[should_fail]
@ -692,10 +690,10 @@ mod tests {
let arc2 = ~(&arc).clone();
let (p, c) = comm::stream();
do task::spawn {
do arc2.access |one| {
arc2.access(|one| {
c.send(());
assert!(*one == 2);
}
})
}
let _ = p.recv();
let one = arc.unwrap();
@ -710,11 +708,11 @@ mod tests {
let arc = ~MutexArc::new(1);
let arc2 = ~MutexArc::new(*arc);
do task::spawn || {
do (*arc2).unsafe_access |mutex| {
do (*mutex).access |one| {
(*arc2).unsafe_access(|mutex| {
(*mutex).access(|one| {
assert!(*one == 1);
}
}
})
})
};
}
}
@ -724,13 +722,13 @@ mod tests {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
do arc2.write |one| {
arc2.write(|one| {
assert_eq!(*one, 2);
}
})
};
do arc.read |one| {
arc.read(|one| {
assert_eq!(*one, 1);
}
})
}
#[test] #[should_fail]
@ -738,70 +736,70 @@ mod tests {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
do arc2.write |one| {
arc2.write(|one| {
assert_eq!(*one, 2);
}
})
};
do arc.write |one| {
arc.write(|one| {
assert_eq!(*one, 1);
}
})
}
#[test] #[should_fail]
fn test_rw_arc_poison_dw() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
do arc2.write_downgrade |mut write_mode| {
do write_mode.write |one| {
arc2.write_downgrade(|mut write_mode| {
write_mode.write(|one| {
assert_eq!(*one, 2);
}
}
})
})
};
do arc.write |one| {
arc.write(|one| {
assert_eq!(*one, 1);
}
})
}
#[test]
fn test_rw_arc_no_poison_rr() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
do arc2.read |one| {
arc2.read(|one| {
assert_eq!(*one, 2);
}
})
};
do arc.read |one| {
arc.read(|one| {
assert_eq!(*one, 1);
}
})
}
#[test]
fn test_rw_arc_no_poison_rw() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
do arc2.read |one| {
arc2.read(|one| {
assert_eq!(*one, 2);
}
})
};
do arc.write |one| {
arc.write(|one| {
assert_eq!(*one, 1);
}
})
}
#[test]
fn test_rw_arc_no_poison_dr() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
do arc2.write_downgrade |write_mode| {
arc2.write_downgrade(|write_mode| {
let read_mode = arc2.downgrade(write_mode);
do read_mode.read |one| {
read_mode.read(|one| {
assert_eq!(*one, 2);
}
}
})
})
};
do arc.write |one| {
arc.write(|one| {
assert_eq!(*one, 1);
}
})
}
#[test]
fn test_rw_arc() {
@ -810,29 +808,29 @@ mod tests {
let (p, c) = comm::stream();
do task::spawn {
do arc2.write |num| {
do 10.times {
arc2.write(|num| {
10.times(|| {
let tmp = *num;
*num = -1;
task::deschedule();
*num = tmp + 1;
}
});
c.send(());
}
})
}
// Readers try to catch the writer in the act
let mut children = ~[];
do 5.times {
5.times(|| {
let arc3 = arc.clone();
let mut builder = task::task();
children.push(builder.future_result());
do builder.spawn {
do arc3.read |num| {
arc3.read(|num| {
assert!(*num >= 0);
})
}
}
}
});
// Wait for children to pass their asserts
for r in children.iter() {
@ -841,9 +839,9 @@ mod tests {
// Wait for writer to finish
p.recv();
do arc.read |num| {
arc.read(|num| {
assert_eq!(*num, 10);
}
})
}
#[test]
fn test_rw_downgrade() {
@ -857,42 +855,42 @@ mod tests {
// Reader tasks
let mut reader_convos = ~[];
do 10.times {
10.times(|| {
let ((rp1, rc1), (rp2, rc2)) = (comm::stream(), comm::stream());
reader_convos.push((rc1, rp2));
let arcn = arc.clone();
do task::spawn {
rp1.recv(); // wait for downgrader to give go-ahead
do arcn.read |state| {
arcn.read(|state| {
assert_eq!(*state, 31337);
rc2.send(());
})
}
}
}
});
// Writer task
let arc2 = arc.clone();
let ((wp1, wc1), (wp2, wc2)) = (comm::stream(), comm::stream());
do task::spawn || {
wp1.recv();
do arc2.write_cond |state, cond| {
arc2.write_cond(|state, cond| {
assert_eq!(*state, 0);
*state = 42;
cond.signal();
}
});
wp1.recv();
do arc2.write |state| {
arc2.write(|state| {
// This shouldn't happen until after the downgrade read
// section, and all other readers, finish.
assert_eq!(*state, 31337);
*state = 42;
}
});
wc2.send(());
}
// Downgrader (us)
do arc.write_downgrade |mut write_mode| {
do write_mode.write_cond |state, cond| {
arc.write_downgrade(|mut write_mode| {
write_mode.write_cond(|state, cond| {
wc1.send(()); // send to another writer who will wake us up
while *state == 0 {
cond.wait();
@ -903,17 +901,17 @@ mod tests {
for &(ref rc, _) in reader_convos.iter() {
rc.send(())
}
}
});
let read_mode = arc.downgrade(write_mode);
do read_mode.read |state| {
read_mode.read(|state| {
// complete handshake with other readers
for &(_, ref rp) in reader_convos.iter() {
rp.recv()
}
wc1.send(()); // tell writer to try again
assert_eq!(*state, 31337);
}
}
});
});
wp2.recv(); // complete handshake with writer
}
@ -934,42 +932,42 @@ mod tests {
// writer task
let xw = x.clone();
do task::spawn {
do xw.write_cond |state, c| {
xw.write_cond(|state, c| {
wc.send(()); // tell downgrader it's ok to go
c.wait();
// The core of the test is here: the condvar reacquire path
// must involve order_lock, so that it cannot race with a reader
// trying to receive the "reader cloud lock hand-off".
*state = false;
}
})
}
wp.recv(); // wait for writer to get in
do x.write_downgrade |mut write_mode| {
do write_mode.write_cond |state, c| {
x.write_downgrade(|mut write_mode| {
write_mode.write_cond(|state, c| {
assert!(*state);
// make writer contend in the cond-reacquire path
c.signal();
}
});
// make a reader task to trigger the "reader cloud lock" handoff
let xr = x.clone();
let (rp, rc) = comm::stream();
do task::spawn {
rc.send(());
do xr.read |_state| { }
xr.read(|_state| { })
}
rp.recv(); // wait for reader task to exist
let read_mode = x.downgrade(write_mode);
do read_mode.read |state| {
read_mode.read(|state| {
// if writer mistakenly got in, make sure it mutates state
// before we assert on it
do 5.times { task::deschedule(); }
5.times(|| task::deschedule());
// make sure writer didn't get in.
assert!(*state);
}
}
})
});
}
#[test]
fn test_rw_write_cond_downgrade_read_race() {
@ -977,6 +975,6 @@ mod tests {
// helped to expose the race nearly 100% of the time... but adding
// deschedules in the intuitively-right locations made it even less likely,
// and I wasn't sure why :( . This is a mediocre "next best" option.
do 8.times { test_rw_write_cond_downgrade_read_race_helper() }
8.times(|| test_rw_write_cond_downgrade_read_race_helper());
}
}

View file

@ -96,12 +96,12 @@ impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
do self.chunks.each |chunk| {
self.chunks.each(|chunk| {
if !chunk.is_pod {
destroy_chunk(chunk);
}
true
};
});
}
}
}
@ -282,10 +282,10 @@ fn test_arena_destructors() {
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
do arena.alloc { @i };
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
do arena.alloc { [0u8, 1u8, 2u8] };
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}

View file

@ -166,7 +166,9 @@ impl BigBitv {
}
#[inline]
pub fn negate(&mut self) { do self.each_storage |w| { *w = !*w; true }; }
pub fn negate(&mut self) {
self.each_storage(|w| { *w = !*w; true });
}
#[inline]
pub fn union(&mut self, b: &BigBitv, nbits: uint) -> bool {
@ -359,7 +361,9 @@ impl Bitv {
pub fn clear(&mut self) {
match self.rep {
Small(ref mut b) => b.clear(),
Big(ref mut s) => { do s.each_storage() |w| { *w = 0u; true }; }
Big(ref mut s) => {
s.each_storage(|w| { *w = 0u; true });
}
}
}
@ -368,7 +372,9 @@ impl Bitv {
pub fn set_all(&mut self) {
match self.rep {
Small(ref mut b) => b.set_all(),
Big(ref mut s) => { do s.each_storage() |w| { *w = !0u; true }; }
Big(ref mut s) => {
s.each_storage(|w| { *w = !0u; true });
}
}
}
@ -377,7 +383,9 @@ impl Bitv {
pub fn negate(&mut self) {
match self.rep {
Small(ref mut b) => b.negate(),
Big(ref mut s) => { do s.each_storage() |w| { *w = !*w; true }; }
Big(ref mut s) => {
s.each_storage(|w| { *w = !*w; true });
}
}
}
@ -651,10 +659,10 @@ impl BitvSet {
/// Creates a new bit vector set from the given bit vector
pub fn from_bitv(bitv: Bitv) -> BitvSet {
let mut size = 0;
do bitv.ones |_| {
bitv.ones(|_| {
size += 1;
true
};
});
let Bitv{rep, _} = bitv;
match rep {
Big(b) => BitvSet{ size: size, bitv: b },
@ -786,7 +794,7 @@ impl Container for BitvSet {
impl Mutable for BitvSet {
fn clear(&mut self) {
do self.bitv.each_storage |w| { *w = 0; true };
self.bitv.each_storage(|w| { *w = 0; true });
self.size = 0;
}
}
@ -797,9 +805,7 @@ impl Set<uint> for BitvSet {
}
fn is_disjoint(&self, other: &BitvSet) -> bool {
do self.intersection(other) |_| {
false
}
self.intersection(other, |_| false)
}
fn is_subset(&self, other: &BitvSet) -> bool {

View file

@ -114,10 +114,10 @@ impl<T: Send> GenericPort<T> for SyncPort<T> {
}
fn try_recv(&self) -> Option<T> {
do self.duplex_stream.try_recv().map |val| {
self.duplex_stream.try_recv().map(|val| {
self.duplex_stream.try_send(());
val
}
})
}
}

View file

@ -165,14 +165,14 @@ impl<T> DList<T> {
/// Remove the first Node and return it, or None if the list is empty
#[inline]
fn pop_front_node(&mut self) -> Option<~Node<T>> {
do self.list_head.take().map |mut front_node| {
self.list_head.take().map(|mut front_node| {
self.length -= 1;
match front_node.next.take() {
Some(node) => self.list_head = link_with_prev(node, Rawlink::none()),
None => self.list_tail = Rawlink::none()
}
front_node
}
})
}
/// Add a Node last in the list
@ -191,14 +191,14 @@ impl<T> DList<T> {
/// Remove the last Node and return it, or None if the list is empty
#[inline]
fn pop_back_node(&mut self) -> Option<~Node<T>> {
do self.list_tail.resolve().map_default(None) |tail| {
self.list_tail.resolve().map_default(None, |tail| {
self.length -= 1;
self.list_tail = tail.prev;
match tail.prev.resolve() {
None => self.list_head.take(),
Some(tail_prev) => tail_prev.next.take()
}
}
})
}
}
@ -270,9 +270,9 @@ impl<T> DList<T> {
/// If the list is empty, do nothing.
#[inline]
pub fn rotate_forward(&mut self) {
do self.pop_back_node().map |tail| {
self.pop_back_node().map(|tail| {
self.push_front_node(tail)
};
});
}
/// Move the first element to the back of the list.
@ -280,9 +280,9 @@ impl<T> DList<T> {
/// If the list is empty, do nothing.
#[inline]
pub fn rotate_backward(&mut self) {
do self.pop_front_node().map |head| {
self.pop_front_node().map(|head| {
self.push_back_node(head)
};
});
}
/// Add all elements from `other` to the end of the list
@ -444,11 +444,11 @@ impl<'self, A> Iterator<&'self A> for DListIterator<'self, A> {
if self.nelem == 0 {
return None;
}
do self.head.as_ref().map |head| {
self.head.as_ref().map(|head| {
self.nelem -= 1;
self.head = &head.next;
&head.value
}
})
}
#[inline]
@ -464,11 +464,11 @@ impl<'self, A> DoubleEndedIterator<&'self A> for DListIterator<'self, A> {
return None;
}
let tmp = self.tail.resolve_immut(); // FIXME: #3511: shouldn't need variable
do tmp.as_ref().map |prev| {
tmp.as_ref().map(|prev| {
self.nelem -= 1;
self.tail = prev.prev;
&prev.value
}
})
}
}
@ -480,14 +480,14 @@ impl<'self, A> Iterator<&'self mut A> for MutDListIterator<'self, A> {
if self.nelem == 0 {
return None;
}
do self.head.resolve().map |next| {
self.head.resolve().map(|next| {
self.nelem -= 1;
self.head = match next.next {
Some(ref mut node) => Rawlink::some(&mut **node),
None => Rawlink::none(),
};
&mut next.value
}
})
}
#[inline]
@ -502,11 +502,11 @@ impl<'self, A> DoubleEndedIterator<&'self mut A> for MutDListIterator<'self, A>
if self.nelem == 0 {
return None;
}
do self.tail.resolve().map |prev| {
self.tail.resolve().map(|prev| {
self.nelem -= 1;
self.tail = prev.prev;
&mut prev.value
}
})
}
}

View file

@ -546,24 +546,24 @@ pub mod reader {
fn read_option<T>(&mut self, f: |&mut Decoder, bool| -> T) -> T {
debug!("read_option()");
do self.read_enum("Option") |this| {
do this.read_enum_variant(["None", "Some"]) |this, idx| {
self.read_enum("Option", |this| {
this.read_enum_variant(["None", "Some"], |this, idx| {
match idx {
0 => f(this, false),
1 => f(this, true),
_ => fail!(),
}
}
}
})
})
}
fn read_seq<T>(&mut self, f: |&mut Decoder, uint| -> T) -> T {
debug!("read_seq()");
do self.push_doc(EsVec) |d| {
self.push_doc(EsVec, |d| {
let len = d._next_uint(EsVecLen);
debug!(" len={}", len);
f(d, len)
}
})
}
fn read_seq_elt<T>(&mut self, idx: uint, f: |&mut Decoder| -> T)
@ -574,11 +574,11 @@ pub mod reader {
fn read_map<T>(&mut self, f: |&mut Decoder, uint| -> T) -> T {
debug!("read_map()");
do self.push_doc(EsMap) |d| {
self.push_doc(EsMap, |d| {
let len = d._next_uint(EsMapLen);
debug!(" len={}", len);
f(d, len)
}
})
}
fn read_map_elt_key<T>(&mut self, idx: uint, f: |&mut Decoder| -> T)
@ -687,21 +687,21 @@ pub mod writer {
}
pub fn wr_tagged_u64(&mut self, tag_id: uint, v: u64) {
do u64_to_be_bytes(v, 8u) |v| {
u64_to_be_bytes(v, 8u, |v| {
self.wr_tagged_bytes(tag_id, v);
}
})
}
pub fn wr_tagged_u32(&mut self, tag_id: uint, v: u32) {
do u64_to_be_bytes(v as u64, 4u) |v| {
u64_to_be_bytes(v as u64, 4u, |v| {
self.wr_tagged_bytes(tag_id, v);
}
})
}
pub fn wr_tagged_u16(&mut self, tag_id: uint, v: u16) {
do u64_to_be_bytes(v as u64, 2u) |v| {
u64_to_be_bytes(v as u64, 2u, |v| {
self.wr_tagged_bytes(tag_id, v);
}
})
}
pub fn wr_tagged_u8(&mut self, tag_id: uint, v: u8) {
@ -709,21 +709,21 @@ pub mod writer {
}
pub fn wr_tagged_i64(&mut self, tag_id: uint, v: i64) {
do u64_to_be_bytes(v as u64, 8u) |v| {
u64_to_be_bytes(v as u64, 8u, |v| {
self.wr_tagged_bytes(tag_id, v);
}
})
}
pub fn wr_tagged_i32(&mut self, tag_id: uint, v: i32) {
do u64_to_be_bytes(v as u64, 4u) |v| {
u64_to_be_bytes(v as u64, 4u, |v| {
self.wr_tagged_bytes(tag_id, v);
}
})
}
pub fn wr_tagged_i16(&mut self, tag_id: uint, v: i16) {
do u64_to_be_bytes(v as u64, 2u) |v| {
u64_to_be_bytes(v as u64, 2u, |v| {
self.wr_tagged_bytes(tag_id, v);
}
})
}
pub fn wr_tagged_i8(&mut self, tag_id: uint, v: i8) {

View file

@ -47,7 +47,7 @@ static TINFL_FLAG_PARSE_ZLIB_HEADER : c_int = 0x1; // parse zlib header and adle
static TDEFL_WRITE_ZLIB_HEADER : c_int = 0x01000; // write zlib header and adler32 checksum
fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> ~[u8] {
do bytes.as_imm_buf |b, len| {
bytes.as_imm_buf(|b, len| {
unsafe {
let mut outsz : size_t = 0;
let res =
@ -61,7 +61,7 @@ fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> ~[u8] {
libc::free(res);
out
}
}
})
}
pub fn deflate_bytes(bytes: &[u8]) -> ~[u8] {
@ -73,7 +73,7 @@ pub fn deflate_bytes_zlib(bytes: &[u8]) -> ~[u8] {
}
fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> ~[u8] {
do bytes.as_imm_buf |b, len| {
bytes.as_imm_buf(|b, len| {
unsafe {
let mut outsz : size_t = 0;
let res =
@ -87,7 +87,7 @@ fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> ~[u8] {
libc::free(res);
out
}
}
})
}
pub fn inflate_bytes(bytes: &[u8]) -> ~[u8] {

View file

@ -726,9 +726,9 @@ pub mod groups {
// here we just need to indent the start of the description
let rowlen = row.char_len();
if rowlen < 24 {
do (24 - rowlen).times {
(24 - rowlen).times(|| {
row.push_char(' ')
}
})
} else {
row.push_str(desc_sep)
}
@ -742,10 +742,10 @@ pub mod groups {
// FIXME: #5516 should be graphemes not codepoints
let mut desc_rows = ~[];
do each_split_within(desc_normalized_whitespace, 54) |substr| {
each_split_within(desc_normalized_whitespace, 54, |substr| {
desc_rows.push(substr.to_owned());
true
};
});
// FIXME: #5516 should be graphemes not codepoints
// wrapped description
@ -840,7 +840,7 @@ pub mod groups {
fn test_split_within() {
fn t(s: &str, i: uint, u: &[~str]) {
let mut v = ~[];
do each_split_within(s, i) |s| { v.push(s.to_owned()); true };
each_split_within(s, i, |s| { v.push(s.to_owned()); true });
assert!(v.iter().zip(u.iter()).all(|(a,b)| a == b));
}
t("", 0, []);

View file

@ -310,9 +310,9 @@ impl Pattern {
*/
pub fn matches_path(&self, path: &Path) -> bool {
// FIXME (#9639): This needs to handle non-utf8 paths
do path.as_str().map_default(false) |s| {
path.as_str().map_default(false, |s| {
self.matches(s)
}
})
}
/**
@ -328,9 +328,9 @@ impl Pattern {
*/
pub fn matches_path_with(&self, path: &Path, options: MatchOptions) -> bool {
// FIXME (#9639): This needs to handle non-utf8 paths
do path.as_str().map_default(false) |s| {
path.as_str().map_default(false, |s| {
self.matches_with(s, options)
}
})
}
fn matches_from(&self,

View file

@ -79,9 +79,7 @@ fn escape_str(s: &str) -> ~str {
fn spaces(n: uint) -> ~str {
let mut ss = ~"";
do n.times {
ss.push_str(" ");
}
n.times(|| ss.push_str(" "));
return ss;
}

View file

@ -46,7 +46,7 @@ pub fn from_vec<T:Clone + 'static>(v: &[T]) -> @List<T> {
*/
pub fn foldl<T:Clone,U>(z: T, ls: @List<U>, f: |&T, &U| -> T) -> T {
let mut accum: T = z;
do iter(ls) |elt| { accum = f(&accum, elt);}
iter(ls, |elt| accum = f(&accum, elt));
accum
}
@ -73,9 +73,9 @@ pub fn find<T:Clone>(ls: @List<T>, f: |&T| -> bool) -> Option<T> {
/// Returns true if a list contains an element with the given value
pub fn has<T:Eq>(ls: @List<T>, elt: T) -> bool {
let mut found = false;
do each(ls) |e| {
each(ls, |e| {
if *e == elt { found = true; false } else { true }
};
});
return found;
}

View file

@ -158,12 +158,12 @@ impl Orderable for BigUint {
impl BitAnd<BigUint, BigUint> for BigUint {
fn bitand(&self, other: &BigUint) -> BigUint {
let new_len = num::min(self.data.len(), other.data.len());
let anded = do vec::from_fn(new_len) |i| {
let anded = vec::from_fn(new_len, |i| {
// i will never be less than the size of either data vector
let ai = self.data[i];
let bi = other.data[i];
ai & bi
};
});
return BigUint::new(anded);
}
}
@ -171,11 +171,11 @@ impl BitAnd<BigUint, BigUint> for BigUint {
impl BitOr<BigUint, BigUint> for BigUint {
fn bitor(&self, other: &BigUint) -> BigUint {
let new_len = num::max(self.data.len(), other.data.len());
let ored = do vec::from_fn(new_len) |i| {
let ored = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
ai | bi
};
});
return BigUint::new(ored);
}
}
@ -183,11 +183,11 @@ impl BitOr<BigUint, BigUint> for BigUint {
impl BitXor<BigUint, BigUint> for BigUint {
fn bitxor(&self, other: &BigUint) -> BigUint {
let new_len = num::max(self.data.len(), other.data.len());
let xored = do vec::from_fn(new_len) |i| {
let xored = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
ai ^ bi
};
});
return BigUint::new(xored);
}
}
@ -230,7 +230,7 @@ impl Add<BigUint, BigUint> for BigUint {
let new_len = num::max(self.data.len(), other.data.len());
let mut carry = 0;
let mut sum = do vec::from_fn(new_len) |i| {
let mut sum = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
let (hi, lo) = BigDigit::from_uint(
@ -238,7 +238,7 @@ impl Add<BigUint, BigUint> for BigUint {
);
carry = hi;
lo
};
});
if carry != 0 { sum.push(carry); }
return BigUint::new(sum);
}
@ -249,7 +249,7 @@ impl Sub<BigUint, BigUint> for BigUint {
let new_len = num::max(self.data.len(), other.data.len());
let mut borrow = 0;
let diff = do vec::from_fn(new_len) |i| {
let diff = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
let (hi, lo) = BigDigit::from_uint(
@ -262,7 +262,7 @@ impl Sub<BigUint, BigUint> for BigUint {
*/
borrow = if hi == 0 { 1 } else { 0 };
lo
};
});
assert_eq!(borrow, 0); // <=> assert!((self >= other));
return BigUint::new(diff);
@ -306,13 +306,13 @@ impl Mul<BigUint, BigUint> for BigUint {
if n == 1 { return (*a).clone(); }
let mut carry = 0;
let mut prod = do a.data.iter().map |ai| {
let mut prod = a.data.iter().map(|ai| {
let (hi, lo) = BigDigit::from_uint(
(*ai as uint) * (n as uint) + (carry as uint)
);
carry = hi;
lo
}.collect::<~[BigDigit]>();
}).collect::<~[BigDigit]>();
if carry != 0 { prod.push(carry); }
return BigUint::new(prod);
}
@ -504,14 +504,14 @@ impl Integer for BigUint {
impl ToPrimitive for BigUint {
#[inline]
fn to_i64(&self) -> Option<i64> {
do self.to_u64().and_then |n| {
self.to_u64().and_then(|n| {
// If top bit of u64 is set, it's too large to convert to i64.
if n >> 63 == 0 {
Some(n as i64)
} else {
None
}
}
})
}
#[cfg(target_word_size = "32")]
@ -763,13 +763,13 @@ impl BigUint {
if n_bits == 0 || self.is_zero() { return (*self).clone(); }
let mut carry = 0;
let mut shifted = do self.data.iter().map |elem| {
let mut shifted = self.data.iter().map(|elem| {
let (hi, lo) = BigDigit::from_uint(
(*elem as uint) << n_bits | (carry as uint)
);
carry = hi;
lo
}.collect::<~[BigDigit]>();
}).collect::<~[BigDigit]>();
if carry != 0 { shifted.push(carry); }
return BigUint::new(shifted);
}
@ -1182,7 +1182,7 @@ impl ToPrimitive for BigInt {
Plus => self.data.to_i64(),
Zero => Some(0),
Minus => {
do self.data.to_u64().and_then |n| {
self.data.to_u64().and_then(|n| {
let m: u64 = 1 << 63;
if n < m {
Some(-(n as i64))
@ -1191,7 +1191,7 @@ impl ToPrimitive for BigInt {
} else {
None
}
}
})
}
}
}
@ -1210,13 +1210,14 @@ impl FromPrimitive for BigInt {
#[inline]
fn from_i64(n: i64) -> Option<BigInt> {
if n > 0 {
do FromPrimitive::from_u64(n as u64).and_then |n| {
FromPrimitive::from_u64(n as u64).and_then(|n| {
Some(BigInt::from_biguint(Plus, n))
}
})
} else if n < 0 {
do FromPrimitive::from_u64(u64::max_value - (n as u64) + 1).and_then |n| {
FromPrimitive::from_u64(u64::max_value - (n as u64) + 1).and_then(
|n| {
Some(BigInt::from_biguint(Minus, n))
}
})
} else {
Some(Zero::zero())
}
@ -1227,9 +1228,9 @@ impl FromPrimitive for BigInt {
if n == 0 {
Some(Zero::zero())
} else {
do FromPrimitive::from_u64(n).and_then |n| {
FromPrimitive::from_u64(n).and_then(|n| {
Some(BigInt::from_biguint(Plus, n))
}
})
}
}
}
@ -2051,22 +2052,22 @@ mod biguint_tests {
fn test_rand_range() {
let mut rng = task_rng();
do 10.times {
10.times(|| {
assert_eq!(rng.gen_bigint_range(&FromPrimitive::from_uint(236).unwrap(),
&FromPrimitive::from_uint(237).unwrap()),
FromPrimitive::from_uint(236).unwrap());
}
});
let l = FromPrimitive::from_uint(403469000 + 2352).unwrap();
let u = FromPrimitive::from_uint(403469000 + 3513).unwrap();
do 1000.times {
1000.times(|| {
let n: BigUint = rng.gen_biguint_below(&u);
assert!(n < u);
let n: BigUint = rng.gen_biguint_range(&l, &u);
assert!(n >= l);
assert!(n < u);
}
})
}
#[test]
@ -2548,19 +2549,19 @@ mod bigint_tests {
fn test_rand_range() {
let mut rng = task_rng();
do 10.times {
10.times(|| {
assert_eq!(rng.gen_bigint_range(&FromPrimitive::from_uint(236).unwrap(),
&FromPrimitive::from_uint(237).unwrap()),
FromPrimitive::from_uint(236).unwrap());
}
});
fn check(l: BigInt, u: BigInt) {
let mut rng = task_rng();
do 1000.times {
1000.times(|| {
let n: BigInt = rng.gen_bigint_range(&l, &u);
assert!(n >= l);
assert!(n < u);
}
});
}
let l: BigInt = FromPrimitive::from_uint(403469000 + 2352).unwrap();
let u: BigInt = FromPrimitive::from_uint(403469000 + 3513).unwrap();
@ -2614,19 +2615,19 @@ mod bench {
#[bench]
fn factorial_100(bh: &mut BenchHarness) {
do bh.iter { factorial(100); }
bh.iter(|| factorial(100));
}
#[bench]
fn fib_100(bh: &mut BenchHarness) {
do bh.iter { fib(100); }
bh.iter(|| fib(100));
}
#[bench]
fn to_str(bh: &mut BenchHarness) {
let fac = factorial(100);
let fib = fib(100);
do bh.iter { fac.to_str(); }
do bh.iter { fib.to_str(); }
bh.iter(|| fac.to_str());
bh.iter(|| fib.to_str());
}
}

View file

@ -297,12 +297,12 @@ impl<T: FromStr + Clone + Integer + Ord>
return None
}
let a_option: Option<T> = FromStr::from_str(split[0]);
do a_option.and_then |a| {
a_option.and_then(|a| {
let b_option: Option<T> = FromStr::from_str(split[1]);
do b_option.and_then |b| {
b_option.and_then(|b| {
Some(Ratio::new(a.clone(), b.clone()))
}
}
})
})
}
}
impl<T: FromStrRadix + Clone + Integer + Ord>
@ -315,13 +315,13 @@ impl<T: FromStrRadix + Clone + Integer + Ord>
} else {
let a_option: Option<T> = FromStrRadix::from_str_radix(split[0],
radix);
do a_option.and_then |a| {
a_option.and_then(|a| {
let b_option: Option<T> =
FromStrRadix::from_str_radix(split[1], radix);
do b_option.and_then |b| {
b_option.and_then(|b| {
Some(Ratio::new(a.clone(), b.clone()))
}
}
})
})
}
}
}

View file

@ -238,14 +238,14 @@ pub fn parse(s: &str) -> Option<Version> {
}
let s = s.trim();
let mut bad = false;
do bad_parse::cond.trap(|_| { debug!("bad"); bad = true }).inside {
bad_parse::cond.trap(|_| { debug!("bad"); bad = true }).inside(|| {
let v = parse_iter(&mut s.chars());
if bad || v.to_str() != s.to_owned() {
None
} else {
Some(v)
}
}
})
}
#[test]

View file

@ -440,74 +440,74 @@ impl<D:Decoder,T:Decodable<D> + 'static> Decodable<D> for @mut T {
impl<'self, S:Encoder,T:Encodable<S>> Encodable<S> for &'self [T] {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s))
}
}
})
}
}
impl<S:Encoder,T:Encodable<S>> Encodable<S> for ~[T] {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s))
}
}
})
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for ~[T] {
fn decode(d: &mut D) -> ~[T] {
do d.read_seq |d, len| {
do vec::from_fn(len) |i| {
d.read_seq(|d, len| {
vec::from_fn(len, |i| {
d.read_seq_elt(i, |d| Decodable::decode(d))
}
}
})
})
}
}
impl<S:Encoder,T:Encodable<S>> Encodable<S> for @[T] {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s))
}
}
})
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for @[T] {
fn decode(d: &mut D) -> @[T] {
do d.read_seq |d, len| {
do at_vec::from_fn(len) |i| {
d.read_seq(|d, len| {
at_vec::from_fn(len, |i| {
d.read_seq_elt(i, |d| Decodable::decode(d))
}
}
})
})
}
}
impl<S:Encoder,T:Encodable<S>> Encodable<S> for Option<T> {
fn encode(&self, s: &mut S) {
do s.emit_option |s| {
s.emit_option(|s| {
match *self {
None => s.emit_option_none(),
Some(ref v) => s.emit_option_some(|s| v.encode(s)),
}
}
})
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for Option<T> {
fn decode(d: &mut D) -> Option<T> {
do d.read_option |d, b| {
d.read_option(|d, b| {
if b {
Some(Decodable::decode(d))
} else {
None
}
}
})
}
}
@ -515,10 +515,10 @@ impl<S:Encoder,T0:Encodable<S>,T1:Encodable<S>> Encodable<S> for (T0, T1) {
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1) => {
do s.emit_seq(2) |s| {
s.emit_seq(2, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
}
})
}
}
}
@ -526,13 +526,13 @@ impl<S:Encoder,T0:Encodable<S>,T1:Encodable<S>> Encodable<S> for (T0, T1) {
impl<D:Decoder,T0:Decodable<D>,T1:Decodable<D>> Decodable<D> for (T0, T1) {
fn decode(d: &mut D) -> (T0, T1) {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
assert_eq!(len, 2);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
d.read_seq_elt(1, |d| Decodable::decode(d))
)
}
})
}
}
@ -545,11 +545,11 @@ impl<
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1, ref t2) => {
do s.emit_seq(3) |s| {
s.emit_seq(3, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
s.emit_seq_elt(2, |s| t2.encode(s));
}
})
}
}
}
@ -562,14 +562,14 @@ impl<
T2: Decodable<D>
> Decodable<D> for (T0, T1, T2) {
fn decode(d: &mut D) -> (T0, T1, T2) {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
assert_eq!(len, 3);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
d.read_seq_elt(1, |d| Decodable::decode(d)),
d.read_seq_elt(2, |d| Decodable::decode(d))
)
}
})
}
}
@ -583,12 +583,12 @@ impl<
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1, ref t2, ref t3) => {
do s.emit_seq(4) |s| {
s.emit_seq(4, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
s.emit_seq_elt(2, |s| t2.encode(s));
s.emit_seq_elt(3, |s| t3.encode(s));
}
})
}
}
}
@ -602,7 +602,7 @@ impl<
T3: Decodable<D>
> Decodable<D> for (T0, T1, T2, T3) {
fn decode(d: &mut D) -> (T0, T1, T2, T3) {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
assert_eq!(len, 4);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
@ -610,7 +610,7 @@ impl<
d.read_seq_elt(2, |d| Decodable::decode(d)),
d.read_seq_elt(3, |d| Decodable::decode(d))
)
}
})
}
}
@ -625,13 +625,13 @@ impl<
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1, ref t2, ref t3, ref t4) => {
do s.emit_seq(5) |s| {
s.emit_seq(5, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
s.emit_seq_elt(2, |s| t2.encode(s));
s.emit_seq_elt(3, |s| t3.encode(s));
s.emit_seq_elt(4, |s| t4.encode(s));
}
})
}
}
}
@ -646,7 +646,7 @@ impl<
T4: Decodable<D>
> Decodable<D> for (T0, T1, T2, T3, T4) {
fn decode(d: &mut D) -> (T0, T1, T2, T3, T4) {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
assert_eq!(len, 5);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
@ -655,7 +655,7 @@ impl<
d.read_seq_elt(3, |d| Decodable::decode(d)),
d.read_seq_elt(4, |d| Decodable::decode(d))
)
}
})
}
}
@ -664,22 +664,22 @@ impl<
T: Encodable<S>
> Encodable<S> for DList<T> {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s));
}
}
})
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for DList<T> {
fn decode(d: &mut D) -> DList<T> {
let mut list = DList::new();
do d.read_seq |d, len| {
d.read_seq(|d, len| {
for i in range(0u, len) {
list.push_back(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
}
});
list
}
}
@ -689,22 +689,22 @@ impl<
T: Encodable<S>
> Encodable<S> for RingBuf<T> {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s));
}
}
})
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for RingBuf<T> {
fn decode(d: &mut D) -> RingBuf<T> {
let mut deque = RingBuf::new();
do d.read_seq |d, len| {
d.read_seq(|d, len| {
for i in range(0u, len) {
deque.push_back(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
}
});
deque
}
}
@ -715,14 +715,14 @@ impl<
V: Encodable<E>
> Encodable<E> for HashMap<K, V> {
fn encode(&self, e: &mut E) {
do e.emit_map(self.len()) |e| {
e.emit_map(self.len(), |e| {
let mut i = 0;
for (key, val) in self.iter() {
e.emit_map_elt_key(i, |e| key.encode(e));
e.emit_map_elt_val(i, |e| val.encode(e));
i += 1;
}
}
})
}
}
@ -732,7 +732,7 @@ impl<
V: Decodable<D>
> Decodable<D> for HashMap<K, V> {
fn decode(d: &mut D) -> HashMap<K, V> {
do d.read_map |d, len| {
d.read_map(|d, len| {
let mut map = HashMap::with_capacity(len);
for i in range(0u, len) {
let key = d.read_map_elt_key(i, |d| Decodable::decode(d));
@ -740,7 +740,7 @@ impl<
map.insert(key, val);
}
map
}
})
}
}
@ -749,13 +749,13 @@ impl<
T: Encodable<S> + Hash + IterBytes + Eq
> Encodable<S> for HashSet<T> {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
let mut i = 0;
for e in self.iter() {
s.emit_seq_elt(i, |s| e.encode(s));
i += 1;
}
}
})
}
}
@ -764,13 +764,13 @@ impl<
T: Decodable<D> + Hash + IterBytes + Eq
> Decodable<D> for HashSet<T> {
fn decode(d: &mut D) -> HashSet<T> {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
let mut set = HashSet::with_capacity(len);
for i in range(0u, len) {
set.insert(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
set
}
})
}
}
@ -779,15 +779,15 @@ impl<
V: Encodable<E>
> Encodable<E> for TrieMap<V> {
fn encode(&self, e: &mut E) {
do e.emit_map(self.len()) |e| {
e.emit_map(self.len(), |e| {
let mut i = 0;
do self.each |key, val| {
self.each(|key, val| {
e.emit_map_elt_key(i, |e| key.encode(e));
e.emit_map_elt_val(i, |e| val.encode(e));
i += 1;
true
};
}
});
})
}
}
@ -796,7 +796,7 @@ impl<
V: Decodable<D>
> Decodable<D> for TrieMap<V> {
fn decode(d: &mut D) -> TrieMap<V> {
do d.read_map |d, len| {
d.read_map(|d, len| {
let mut map = TrieMap::new();
for i in range(0u, len) {
let key = d.read_map_elt_key(i, |d| Decodable::decode(d));
@ -804,32 +804,32 @@ impl<
map.insert(key, val);
}
map
}
})
}
}
impl<S: Encoder> Encodable<S> for TrieSet {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
let mut i = 0;
do self.each |e| {
self.each(|e| {
s.emit_seq_elt(i, |s| e.encode(s));
i += 1;
true
};
}
});
})
}
}
impl<D: Decoder> Decodable<D> for TrieSet {
fn decode(d: &mut D) -> TrieSet {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
let mut set = TrieSet::new();
for i in range(0u, len) {
set.insert(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
set
}
})
}
}
@ -839,14 +839,14 @@ impl<
V: Encodable<E> + Eq
> Encodable<E> for TreeMap<K, V> {
fn encode(&self, e: &mut E) {
do e.emit_map(self.len()) |e| {
e.emit_map(self.len(), |e| {
let mut i = 0;
for (key, val) in self.iter() {
e.emit_map_elt_key(i, |e| key.encode(e));
e.emit_map_elt_val(i, |e| val.encode(e));
i += 1;
}
}
})
}
}
@ -856,7 +856,7 @@ impl<
V: Decodable<D> + Eq
> Decodable<D> for TreeMap<K, V> {
fn decode(d: &mut D) -> TreeMap<K, V> {
do d.read_map |d, len| {
d.read_map(|d, len| {
let mut map = TreeMap::new();
for i in range(0u, len) {
let key = d.read_map_elt_key(i, |d| Decodable::decode(d));
@ -864,7 +864,7 @@ impl<
map.insert(key, val);
}
map
}
})
}
}
@ -873,13 +873,13 @@ impl<
T: Encodable<S> + Eq + TotalOrd
> Encodable<S> for TreeSet<T> {
fn encode(&self, s: &mut S) {
do s.emit_seq(self.len()) |s| {
s.emit_seq(self.len(), |s| {
let mut i = 0;
for e in self.iter() {
s.emit_seq_elt(i, |s| e.encode(s));
i += 1;
}
}
})
}
}
@ -888,13 +888,13 @@ impl<
T: Decodable<D> + Eq + TotalOrd
> Decodable<D> for TreeSet<T> {
fn decode(d: &mut D) -> TreeSet<T> {
do d.read_seq |d, len| {
d.read_seq(|d, len| {
let mut set = TreeSet::new();
for i in range(0u, len) {
set.insert(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
set
}
})
}
}
@ -909,13 +909,13 @@ pub trait EncoderHelpers {
impl<S:Encoder> EncoderHelpers for S {
fn emit_from_vec<T>(&mut self, v: &[T], f: |&mut S, &T|) {
do self.emit_seq(v.len()) |this| {
self.emit_seq(v.len(), |this| {
for (i, e) in v.iter().enumerate() {
do this.emit_seq_elt(i) |this| {
this.emit_seq_elt(i, |this| {
f(this, e)
})
}
}
}
})
}
}
@ -925,10 +925,10 @@ pub trait DecoderHelpers {
impl<D:Decoder> DecoderHelpers for D {
fn read_to_vec<T>(&mut self, f: |&mut D| -> T) -> ~[T] {
do self.read_seq |this, len| {
do vec::from_fn(len) |i| {
self.read_seq(|this, len| {
vec::from_fn(len, |i| {
this.read_seq_elt(i, |this| f(this))
}
}
})
})
}
}

View file

@ -104,16 +104,16 @@ impl<Q:Send> Sem<Q> {
pub fn acquire(&self) {
unsafe {
let mut waiter_nobe = None;
do (**self).with |state| {
(**self).with(|state| {
state.count -= 1;
if state.count < 0 {
// Create waiter nobe, enqueue ourself, and tell
// outer scope we need to block.
waiter_nobe = Some(state.waiters.wait_end());
}
}
});
// Uncomment if you wish to test for sem races. Not valgrind-friendly.
/* do 1000.times { task::deschedule(); } */
/* 1000.times(|| task::deschedule()); */
// Need to wait outside the exclusive.
if waiter_nobe.is_some() {
let _ = waiter_nobe.unwrap().recv();
@ -123,22 +123,22 @@ impl<Q:Send> Sem<Q> {
pub fn release(&self) {
unsafe {
do (**self).with |state| {
(**self).with(|state| {
state.count += 1;
if state.count <= 0 {
state.waiters.signal();
}
}
})
}
}
pub fn access<U>(&self, blk: || -> U) -> U {
do (|| {
(|| {
self.acquire();
blk()
}).finally {
}).finally(|| {
self.release();
}
})
}
}
@ -147,9 +147,7 @@ impl Sem<~[WaitQueue]> {
fn new_and_signal(count: int, num_condvars: uint)
-> Sem<~[WaitQueue]> {
let mut queues = ~[];
do num_condvars.times {
queues.push(WaitQueue::new());
}
num_condvars.times(|| queues.push(WaitQueue::new()));
Sem::new(count, queues)
}
}
@ -205,7 +203,7 @@ impl<'self> Condvar<'self> {
let mut out_of_bounds = None;
// Release lock, 'atomically' enqueuing ourselves in so doing.
unsafe {
do (**self.sem).with |state| {
(**self.sem).with(|state| {
if condvar_id < state.blocked.len() {
// Drop the lock.
state.count += 1;
@ -218,18 +216,18 @@ impl<'self> Condvar<'self> {
} else {
out_of_bounds = Some(state.blocked.len());
}
}
})
}
// If deschedule checks start getting inserted anywhere, we can be
// killed before or after enqueueing.
do check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()") {
check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()", || {
// Unconditionally "block". (Might not actually block if a
// signaller already sent -- I mean 'unconditionally' in contrast
// with acquire().)
do (|| {
(|| {
let _ = WaitEnd.take_unwrap().recv();
}).finally {
}).finally(|| {
// Reacquire the condvar.
match self.order {
Just(lock) => do lock.access {
@ -239,8 +237,8 @@ impl<'self> Condvar<'self> {
self.sem.acquire();
},
}
}
}
})
})
}
/// Wake up a blocked task. Returns false if there was no blocked task.
@ -251,16 +249,17 @@ impl<'self> Condvar<'self> {
unsafe {
let mut out_of_bounds = None;
let mut result = false;
do (**self.sem).with |state| {
(**self.sem).with(|state| {
if condvar_id < state.blocked.len() {
result = state.blocked[condvar_id].signal();
} else {
out_of_bounds = Some(state.blocked.len());
}
}
do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
result
}
});
check_cvar_bounds(out_of_bounds,
condvar_id,
"cond.signal_on()",
|| result)
}
}
@ -272,7 +271,7 @@ impl<'self> Condvar<'self> {
let mut out_of_bounds = None;
let mut queue = None;
unsafe {
do (**self.sem).with |state| {
(**self.sem).with(|state| {
if condvar_id < state.blocked.len() {
// To avoid :broadcast_heavy, we make a new waitqueue,
// swap it out with the old one, and broadcast on the
@ -282,11 +281,14 @@ impl<'self> Condvar<'self> {
} else {
out_of_bounds = Some(state.blocked.len());
}
}
do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
});
check_cvar_bounds(out_of_bounds,
condvar_id,
"cond.signal_on()",
|| {
let queue = queue.take_unwrap();
queue.broadcast()
}
})
}
}
}
@ -315,9 +317,13 @@ impl Sem<~[WaitQueue]> {
// The only other places that condvars get built are rwlock.write_cond()
// and rwlock_write_mode.
pub fn access_cond<U>(&self, blk: |c: &Condvar| -> U) -> U {
do self.access {
blk(&Condvar { sem: self, order: Nothing, token: NonCopyable })
}
self.access(|| {
blk(&Condvar {
sem: self,
order: Nothing,
token: NonCopyable
})
})
}
}
@ -474,17 +480,17 @@ impl RWLock {
*/
pub fn read<U>(&self, blk: || -> U) -> U {
unsafe {
do (&self.order_lock).access {
(&self.order_lock).access(|| {
let state = &mut *self.state.get();
let old_count = state.read_count.fetch_add(1, atomics::Acquire);
if old_count == 0 {
(&self.access_lock).acquire();
state.read_mode = true;
}
}
do (|| {
});
(|| {
blk()
}).finally {
}).finally(|| {
let state = &mut *self.state.get();
assert!(state.read_mode);
let old_count = state.read_count.fetch_sub(1, atomics::Release);
@ -497,7 +503,7 @@ impl RWLock {
// this access MUST NOT go inside the exclusive access.
(&self.access_lock).release();
}
}
})
}
}
@ -507,10 +513,10 @@ impl RWLock {
*/
pub fn write<U>(&self, blk: || -> U) -> U {
(&self.order_lock).acquire();
do (&self.access_lock).access {
(&self.access_lock).access(|| {
(&self.order_lock).release();
blk()
}
})
}
/**
@ -547,12 +553,12 @@ impl RWLock {
// The astute reader will also note that making waking writers use the
// order_lock is better for not starving readers.
(&self.order_lock).acquire();
do (&self.access_lock).access_cond |cond| {
(&self.access_lock).access_cond(|cond| {
(&self.order_lock).release();
let opt_lock = Just(&self.order_lock);
blk(&Condvar { sem: cond.sem, order: opt_lock,
token: NonCopyable })
}
})
}
/**
@ -582,9 +588,9 @@ impl RWLock {
(&self.order_lock).acquire();
(&self.access_lock).acquire();
(&self.order_lock).release();
do (|| {
(|| {
blk(RWLockWriteMode { lock: self, token: NonCopyable })
}).finally {
}).finally(|| {
let writer_or_last_reader;
// Check if we're releasing from read mode or from write mode.
let state = unsafe { &mut *self.state.get() };
@ -609,7 +615,7 @@ impl RWLock {
// Nobody left inside; release the "reader cloud" lock.
(&self.access_lock).release();
}
}
})
}
/// To be called inside of the write_downgrade block.

View file

@ -53,7 +53,7 @@ impl<T> TaskPool<T> {
-> TaskPool<T> {
assert!(n_tasks >= 1);
let channels = do vec::from_fn(n_tasks) |i| {
let channels = vec::from_fn(n_tasks, |i| {
let (port, chan) = comm::stream::<Msg<T>>();
let init_fn = init_fn_factory();
@ -81,7 +81,7 @@ impl<T> TaskPool<T> {
}
chan
};
});
return TaskPool { channels: channels, next_index: 0 };
}

View file

@ -220,9 +220,9 @@ impl<T: Writer> Terminal<T> {
cap = self.ti.strings.find_equiv(&("op"));
}
}
let s = do cap.map_default(Err(~"can't find terminfo capability `sgr0`")) |op| {
let s = cap.map_default(Err(~"can't find terminfo capability `sgr0`"), |op| {
expand(*op, [], &mut Variables::new())
};
});
if s.is_ok() {
self.out.write(s.unwrap());
} else if self.num_colors > 0 {

View file

@ -494,14 +494,14 @@ fn format(val: Param, op: FormatOp, flags: Flags) -> Result<~[u8],~str> {
match op {
FormatDigit => {
let sign = if flags.sign { SignAll } else { SignNeg };
do int_to_str_bytes_common(d, radix, sign) |c| {
int_to_str_bytes_common(d, radix, sign, |c| {
s.push(c);
}
})
}
_ => {
do int_to_str_bytes_common(d as uint, radix, SignNone) |c| {
int_to_str_bytes_common(d as uint, radix, SignNone, |c| {
s.push(c);
}
})
}
};
if flags.precision > s.len() {

View file

@ -176,7 +176,7 @@ pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
// semantics into parallel test runners, which in turn requires a ~[]
// rather than a &[].
pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
let owned_tests = do tests.map |t| {
let owned_tests = tests.map(|t| {
match t.testfn {
StaticTestFn(f) =>
TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
@ -188,7 +188,7 @@ pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
fail!("non-static tests passed to test::test_main_static");
}
}
};
});
test_main(args, owned_tests)
}
@ -730,12 +730,12 @@ fn run_tests(opts: &TestOpts,
callback(TeFiltered(filtered_descs));
let (filtered_tests, filtered_benchs_and_metrics) =
do filtered_tests.partition |e| {
filtered_tests.partition(|e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
_ => false
}
};
});
// It's tempting to just spawn all the tests at once, but since we have
// many tests that run in other processes we would be making a big mess.
@ -1044,12 +1044,12 @@ impl MetricMap {
};
let diff : MetricDiff = self.compare_to_old(&old, pct);
let ok = do diff.iter().all() |(_, v)| {
let ok = diff.iter().all(|(_, v)| {
match *v {
Regression(_) => false,
_ => true
}
};
});
if ok {
debug!("rewriting file '{:?}' with updated metrics", p);

View file

@ -394,7 +394,7 @@ impl<'self, T> Iterator<&'self T> for TreeSetIterator<'self, T> {
/// Advance the iterator to the next node (in order). If there are no more nodes, return `None`.
#[inline]
fn next(&mut self) -> Option<&'self T> {
do self.iter.next().map |(value, _)| { value }
self.iter.next().map(|(value, _)| value)
}
}
@ -402,7 +402,7 @@ impl<'self, T> Iterator<&'self T> for TreeSetRevIterator<'self, T> {
/// Advance the iterator to the next node (in order). If there are no more nodes, return `None`.
#[inline]
fn next(&mut self) -> Option<&'self T> {
do self.iter.next().map |(value, _)| { value }
self.iter.next().map(|(value, _)| value)
}
}

View file

@ -373,7 +373,7 @@ impl<'self> Prep<'self> {
None => fail!("missing freshness-function for '{}'", kind),
Some(f) => (*f)(name, val)
};
do self.ctxt.logger.write |lg| {
self.ctxt.logger.write(|lg| {
if fresh {
lg.info(format!("{} {}:{} is fresh",
cat, kind, name));
@ -381,7 +381,7 @@ impl<'self> Prep<'self> {
lg.info(format!("{} {}:{} is not fresh",
cat, kind, name))
}
};
});
fresh
}
@ -411,9 +411,9 @@ impl<'self> Prep<'self> {
debug!("exec_work: looking up {} and {:?}", self.fn_name,
self.declared_inputs);
let cached = do self.ctxt.db.read |db| {
let cached = self.ctxt.db.read(|db| {
db.prepare(self.fn_name, &self.declared_inputs)
};
});
match cached {
Some((ref disc_in, ref disc_out, ref res))
@ -432,7 +432,7 @@ impl<'self> Prep<'self> {
let blk = bo.take_unwrap();
let chan = Cell::new(chan);
// What happens if the task fails?
// XXX: What happens if the task fails?
do task::spawn {
let mut exe = Exec {
discovered_inputs: WorkMap::new(),
@ -467,13 +467,13 @@ impl<'self, T:Send +
WorkFromTask(prep, port) => {
let (exe, v) = port.recv();
let s = json_encode(&v);
do prep.ctxt.db.write |db| {
prep.ctxt.db.write(|db| {
db.cache(prep.fn_name,
&prep.declared_inputs,
&exe.discovered_inputs,
&exe.discovered_outputs,
s);
}
s)
});
v
}
}
@ -507,7 +507,7 @@ fn test() {
RWArc::new(Logger::new()),
Arc::new(TreeMap::new()));
let s = do cx.with_prep("test1") |prep| {
let s = cx.with_prep("test1", |prep| {
let subcx = cx.clone();
let pth = pth.clone();
@ -529,7 +529,7 @@ fn test() {
// FIXME (#9639): This needs to handle non-utf8 paths
out.as_str().unwrap().to_owned()
}
};
});
println(s);
}

View file

@ -55,11 +55,11 @@ impl GetAddrInfoRequest {
let hint = hints.map(|hint| {
let mut flags = 0;
do each_ai_flag |cval, aival| {
each_ai_flag(|cval, aival| {
if hint.flags & (aival as uint) != 0 {
flags |= cval as i32;
}
}
});
let socktype = 0;
let protocol = 0;
@ -86,9 +86,9 @@ impl GetAddrInfoRequest {
req.defuse(); // uv callback now owns this request
let mut cx = Ctx { slot: None, status: 0, addrinfo: None };
do wait_until_woken_after(&mut cx.slot) {
wait_until_woken_after(&mut cx.slot, || {
req.set_data(&cx);
}
});
match cx.status {
0 => Ok(accum_addrinfo(cx.addrinfo.get_ref())),
@ -144,11 +144,11 @@ pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] {
let rustaddr = net::sockaddr_to_socket_addr((*addr).ai_addr);
let mut flags = 0;
do each_ai_flag |cval, aival| {
each_ai_flag(|cval, aival| {
if (*addr).ai_flags & cval != 0 {
flags |= aival as uint;
}
}
});
/* XXX: do we really want to support these
let protocol = match (*addr).ai_protocol {

View file

@ -111,14 +111,14 @@ impl RemoteCallback for AsyncWatcher {
impl Drop for AsyncWatcher {
fn drop(&mut self) {
unsafe {
do self.exit_flag.with |should_exit| {
self.exit_flag.with(|should_exit| {
// NB: These two things need to happen atomically. Otherwise
// the event handler could wake up due to a *previous*
// signal and see the exit flag, destroying the handle
// before the final send.
*should_exit = true;
uvll::uv_async_send(self.handle)
}
})
}
}
}

View file

@ -86,13 +86,13 @@ impl FsRequest {
pub fn read(loop_: &Loop, fd: c_int, buf: &mut [u8], offset: i64)
-> Result<int, UvError>
{
do execute(|req, cb| unsafe {
execute(|req, cb| unsafe {
uvll::uv_fs_read(loop_.handle, req,
fd, vec::raw::to_ptr(buf) as *c_void,
buf.len() as size_t, offset, cb)
}).map |req| {
}).map(|req| {
req.get_result() as int
}
})
}
pub fn mkdir(loop_: &Loop, path: &CString, mode: c_int)
@ -142,24 +142,25 @@ impl FsRequest {
let mut paths = ~[];
let path = CString::new(path.with_ref(|p| p), false);
let parent = Path::new(path);
do c_str::from_c_multistring(req.get_ptr() as *libc::c_char,
Some(req.get_result() as uint)) |rel| {
c_str::from_c_multistring(req.get_ptr() as *libc::c_char,
Some(req.get_result() as uint),
|rel| {
let p = rel.as_bytes();
paths.push(parent.join(p.slice_to(rel.len())));
};
});
paths
})
}
pub fn readlink(loop_: &Loop, path: &CString) -> Result<Path, UvError> {
do execute(|req, cb| unsafe {
execute(|req, cb| unsafe {
uvll::uv_fs_readlink(loop_.handle, req,
path.with_ref(|p| p), cb)
}).map |req| {
}).map(|req| {
Path::new(unsafe {
CString::new(req.get_ptr() as *libc::c_char, false)
})
}
})
}
pub fn chown(loop_: &Loop, path: &CString, uid: int, gid: int)
@ -305,16 +306,15 @@ fn execute(f: |*uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int)
0 => {
req.fired = true;
let mut slot = None;
do wait_until_woken_after(&mut slot) {
wait_until_woken_after(&mut slot, || {
unsafe { uvll::set_data_for_req(req.req, &slot) }
}
});
match req.get_result() {
n if n < 0 => Err(UvError(n)),
_ => Ok(req),
}
}
n => Err(UvError(n))
};
extern fn fs_cb(req: *uvll::uv_fs_t) {

View file

@ -138,9 +138,9 @@ pub trait UvHandle<T> {
uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(), ptr::null::<()>());
do wait_until_woken_after(&mut slot) {
wait_until_woken_after(&mut slot, || {
uvll::set_data_for_uv_handle(self.uv_handle(), &slot);
}
})
}
extern fn close_cb(handle: *uvll::uv_handle_t) {
@ -201,10 +201,10 @@ fn wait_until_woken_after(slot: *mut Option<BlockedTask>, f: ||) {
unsafe {
assert!((*slot).is_none());
let sched: ~Scheduler = Local::take();
do sched.deschedule_running_task_and_then |_, task| {
sched.deschedule_running_task_and_then(|_, task| {
f();
*slot = Some(task);
}
})
}
}
@ -390,7 +390,7 @@ pub fn slice_to_uv_buf(v: &[u8]) -> Buf {
#[cfg(test)]
fn local_loop() -> &'static mut Loop {
unsafe {
cast::transmute(do Local::borrow |sched: &mut Scheduler| {
cast::transmute(Local::borrow(|sched: &mut Scheduler| {
let mut io = None;
do sched.event_loop.io |i| {
let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) =
@ -398,7 +398,7 @@ fn local_loop() -> &'static mut Loop {
io = Some(uvio);
}
io.unwrap()
}.uv_loop())
}).uv_loop())
}
}

View file

@ -29,7 +29,7 @@ macro_rules! uvdebug (
// get a handle for the current scheduler
macro_rules! get_handle_to_current_scheduler(
() => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() })
() => (Local::borrow(|sched: &mut Scheduler| sched.make_handle()))
)
pub fn dumb_println(args: &fmt::Arguments) {

View file

@ -42,11 +42,11 @@ fn socket_addr_as_sockaddr<T>(addr: SocketAddr, f: |*sockaddr| -> T) -> T {
let ip = addr.ip.to_str();
let addr = ip.with_c_str(|p| unsafe { malloc(p, addr.port as c_int) });
do (|| {
(|| {
f(addr)
}).finally {
}).finally(|| {
unsafe { libc::free(addr) };
}
})
}
pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr {
@ -88,9 +88,9 @@ pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr {
fn test_ip4_conversion() {
use std::rt;
let ip4 = rt::test::next_test_ip4();
do socket_addr_as_sockaddr(ip4) |addr| {
socket_addr_as_sockaddr(ip4, |addr| {
assert_eq!(ip4, sockaddr_to_socket_addr(addr));
}
})
}
#[cfg(test)]
@ -98,9 +98,9 @@ fn test_ip4_conversion() {
fn test_ip6_conversion() {
use std::rt;
let ip6 = rt::test::next_test_ip6();
do socket_addr_as_sockaddr(ip6) |addr| {
socket_addr_as_sockaddr(ip6, |addr| {
assert_eq!(ip6, sockaddr_to_socket_addr(addr));
}
})
}
enum SocketNameKind {
@ -176,7 +176,7 @@ impl TcpWatcher {
struct Ctx { status: c_int, task: Option<BlockedTask> }
let tcp = TcpWatcher::new(loop_);
let ret = do socket_addr_as_sockaddr(address) |addr| {
let ret = socket_addr_as_sockaddr(address, |addr| {
let mut req = Request::new(uvll::UV_CONNECT);
let result = unsafe {
uvll::uv_tcp_connect(req.handle, tcp.handle, addr,
@ -196,7 +196,7 @@ impl TcpWatcher {
}
n => Err(UvError(n))
}
};
});
return match ret {
Ok(()) => Ok(tcp),
@ -286,8 +286,7 @@ impl Drop for TcpWatcher {
impl TcpListener {
pub fn bind(loop_: &mut Loop, address: SocketAddr)
-> Result<~TcpListener, UvError>
{
-> Result<~TcpListener, UvError> {
let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) };
assert_eq!(unsafe {
uvll::uv_tcp_init(loop_.handle, handle)
@ -419,8 +418,7 @@ pub struct UdpWatcher {
impl UdpWatcher {
pub fn bind(loop_: &Loop, address: SocketAddr)
-> Result<UdpWatcher, UvError>
{
-> Result<UdpWatcher, UvError> {
let udp = UdpWatcher {
handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) },
home: get_handle_to_current_scheduler!(),
@ -473,9 +471,9 @@ impl rtio::RtioUdpSocket for UdpWatcher {
buf: Some(slice_to_uv_buf(buf)),
result: None,
};
do wait_until_woken_after(&mut cx.task) {
wait_until_woken_after(&mut cx.task, || {
unsafe { uvll::set_data_for_uv_handle(self.handle, &cx) }
}
});
match cx.result.take_unwrap() {
(n, _) if n < 0 =>
Err(uv_error_to_io_error(UvError(n as c_int))),
@ -545,9 +543,9 @@ impl rtio::RtioUdpSocket for UdpWatcher {
0 => {
req.defuse(); // uv callback now owns this request
let mut cx = Ctx { task: None, result: 0 };
do wait_until_woken_after(&mut cx.task) {
wait_until_woken_after(&mut cx.task, || {
req.set_data(&cx);
}
});
match cx.result {
0 => Ok(()),
n => Err(uv_error_to_io_error(UvError(n)))
@ -570,22 +568,22 @@ impl rtio::RtioUdpSocket for UdpWatcher {
fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> {
let _m = self.fire_homing_missile();
status_to_io_result(unsafe {
do multi.to_str().with_c_str |m_addr| {
multi.to_str().with_c_str(|m_addr| {
uvll::uv_udp_set_membership(self.handle,
m_addr, ptr::null(),
uvll::UV_JOIN_GROUP)
}
})
})
}
fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> {
let _m = self.fire_homing_missile();
status_to_io_result(unsafe {
do multi.to_str().with_c_str |m_addr| {
multi.to_str().with_c_str(|m_addr| {
uvll::uv_udp_set_membership(self.handle,
m_addr, ptr::null(),
uvll::UV_LEAVE_GROUP)
}
})
})
}
@ -1101,11 +1099,11 @@ mod test {
};
unsafe fn local_io() -> &'static mut IoFactory {
do Local::borrow |sched: &mut Scheduler| {
Local::borrow(|sched: &mut Scheduler| {
let mut io = None;
sched.event_loop.io(|i| io = Some(i));
cast::transmute(io.unwrap())
}
})
}
let test_function: proc() = || {
@ -1117,14 +1115,14 @@ mod test {
// block self on sched1
let scheduler: ~Scheduler = Local::take();
do scheduler.deschedule_running_task_and_then |_, task| {
scheduler.deschedule_running_task_and_then(|_, task| {
// unblock task
do task.wake().map |task| {
task.wake().map(|task| {
// send self to sched2
tasksFriendHandle.take().send(TaskFromFriend(task));
};
});
// sched1 should now sleep since it has nothing else to do
}
})
// sched2 will wake up and get the task as we do nothing else,
// the function ends and the socket goes out of scope sched2
// will start to run the destructor the destructor will first

View file

@ -77,7 +77,7 @@ impl PipeWatcher {
let mut req = Request::new(uvll::UV_CONNECT);
let pipe = PipeWatcher::new(loop_, false);
do wait_until_woken_after(&mut cx.task) {
wait_until_woken_after(&mut cx.task, || {
unsafe {
uvll::uv_pipe_connect(req.handle,
pipe.handle(),
@ -86,7 +86,7 @@ impl PipeWatcher {
}
req.set_data(&cx);
req.defuse(); // uv callback now owns this request
}
});
return match cx.result {
0 => Ok(pipe),
n => Err(UvError(n))

View file

@ -57,8 +57,8 @@ impl Process {
}
}
let ret = do with_argv(config.program, config.args) |argv| {
do with_env(config.env) |envp| {
let ret = with_argv(config.program, config.args, |argv| {
with_env(config.env, |envp| {
let options = uvll::uv_process_options_t {
exit_cb: on_exit,
file: unsafe { *argv },
@ -88,8 +88,8 @@ impl Process {
0 => Ok(process.install()),
err => Err(UvError(err)),
}
}
};
})
});
match ret {
Ok(p) => Ok((p, ret_io)),

View file

@ -123,9 +123,9 @@ impl StreamWatcher {
let mut wcx = WriteContext { result: 0, task: None, };
req.defuse(); // uv callback now owns this request
do wait_until_woken_after(&mut wcx.task) {
wait_until_woken_after(&mut wcx.task, || {
req.set_data(&wcx);
}
});
self.last_write_req = Some(Request::wrap(req.handle));
match wcx.result {
0 => Ok(()),

View file

@ -88,10 +88,10 @@ impl RtioTimer for TimerWatcher {
let _f = ForbidUnwind::new("timer");
let sched: ~Scheduler = Local::take();
do sched.deschedule_running_task_and_then |_sched, task| {
sched.deschedule_running_task_and_then(|_sched, task| {
self.action = Some(WakeTask(task));
self.start(msecs, 0);
}
});
self.stop();
}

View file

@ -46,23 +46,23 @@ pub trait HomingIO {
let _f = ForbidUnwind::new("going home");
let current_sched_id = do Local::borrow |sched: &mut Scheduler| {
let current_sched_id = Local::borrow(|sched: &mut Scheduler| {
sched.sched_id()
};
});
// Only need to invoke a context switch if we're not on the right
// scheduler.
if current_sched_id != self.home().sched_id {
let scheduler: ~Scheduler = Local::take();
do scheduler.deschedule_running_task_and_then |_, task| {
do task.wake().map |task| {
scheduler.deschedule_running_task_and_then(|_, task| {
task.wake().map(|task| {
self.home().send(RunOnce(task));
};
});
})
}
}
let current_sched_id = do Local::borrow |sched: &mut Scheduler| {
let current_sched_id = Local::borrow(|sched: &mut Scheduler| {
sched.sched_id()
};
});
assert!(current_sched_id == self.home().sched_id);
self.home().sched_id
@ -114,11 +114,11 @@ impl Drop for HomingMissile {
// original scheduler. Otherwise, we can just return and keep running
if !Task::on_appropriate_sched() {
let scheduler: ~Scheduler = Local::take();
do scheduler.deschedule_running_task_and_then |_, task| {
do task.wake().map |task| {
scheduler.deschedule_running_task_and_then(|_, task| {
task.wake().map(|task| {
Scheduler::run_task(task);
};
}
});
})
}
util::ignore(f);