Customize <FlatMap as Iterator>::fold

`FlatMap` can use internal iteration for its `fold`, which shows a
performance advantage in the new benchmarks:

    test iter::bench_flat_map_chain_ref_sum ... bench:   4,354,111 ns/iter (+/- 108,871)
    test iter::bench_flat_map_chain_sum     ... bench:     468,167 ns/iter (+/- 2,274)
    test iter::bench_flat_map_ref_sum       ... bench:     449,616 ns/iter (+/- 6,257)
    test iter::bench_flat_map_sum           ... bench:     348,010 ns/iter (+/- 1,227)

... where the "ref" benches are using `by_ref()` that isn't optimized.
So this change shows a decent advantage on its own, but much more when
combined with a `chain` iterator that also optimizes `fold`.
This commit is contained in:
Josh Stone 2017-09-14 13:51:32 -07:00
parent 5dfc84cfa7
commit 61a7703e55
2 changed files with 48 additions and 0 deletions

View file

@ -146,3 +146,41 @@ fn bench_for_each_chain_ref_fold(b: &mut Bencher) {
acc
});
}
#[bench]
fn bench_flat_map_sum(b: &mut Bencher) {
b.iter(|| -> i64 {
(0i64..1000).flat_map(|x| x..x+1000)
.map(black_box)
.sum()
});
}
#[bench]
fn bench_flat_map_ref_sum(b: &mut Bencher) {
b.iter(|| -> i64 {
(0i64..1000).flat_map(|x| x..x+1000)
.map(black_box)
.by_ref()
.sum()
});
}
#[bench]
fn bench_flat_map_chain_sum(b: &mut Bencher) {
b.iter(|| -> i64 {
(0i64..1000000).flat_map(|x| once(x).chain(once(x)))
.map(black_box)
.sum()
});
}
#[bench]
fn bench_flat_map_chain_ref_sum(b: &mut Bencher) {
b.iter(|| -> i64 {
(0i64..1000000).flat_map(|x| once(x).chain(once(x)))
.map(black_box)
.by_ref()
.sum()
});
}

View file

@ -1902,6 +1902,16 @@ impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F>
_ => (lo, None)
}
}
#[inline]
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
where Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.frontiter.into_iter()
.chain(self.iter.map(self.f).map(U::into_iter))
.chain(self.backiter)
.fold(init, |acc, iter| iter.fold(acc, &mut fold))
}
}
#[stable(feature = "rust1", since = "1.0.0")]