aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-09-15 15:34:12 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-16 09:18:52 -0400
commit77fad5e625e56eb31a343ae1d489979fdc61a2aa (patch)
treee0b881a38be27d0c4d8523289f51b70ffb98c080
parentdeed62edffe600bc5b379c872d3004116e001b66 (diff)
writeback: improve scalability of bdi writeback work queues
If you're going to do an atomic RMW on each list entry, there's not much point in all the RCU complexities of the list walking. This is only going to help the multi-thread case I guess, but it doesn't hurt to do now. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--fs/fs-writeback.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 59c99e729187..6bca6f8176f0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -772,8 +772,9 @@ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
772 rcu_read_lock(); 772 rcu_read_lock();
773 773
774 list_for_each_entry_rcu(work, &bdi->work_list, list) { 774 list_for_each_entry_rcu(work, &bdi->work_list, list) {
775 if (!test_and_clear_bit(wb->nr, &work->seen)) 775 if (!test_bit(wb->nr, &work->seen))
776 continue; 776 continue;
777 clear_bit(wb->nr, &work->seen);
777 778
778 ret = work; 779 ret = work;
779 break; 780 break;