diff options
author | Christoph Hellwig <hch@lst.de> | 2010-07-06 02:59:53 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-07-06 02:59:53 -0400 |
commit | 83ba7b071f30f7c01f72518ad72d5cd203c27502 (patch) | |
tree | 4737320dcce72cfff4d87d835e4f78428eca7ef5 /mm/backing-dev.c | |
parent | edadfb10ba35da7253541e4155aa92eff758ebe6 (diff) |
writeback: simplify the write back thread queue
First remove items from work_list as soon as we start working on them. This
means we don't have to track any pending or visited state and can get
rid of all the RCU magic freeing the work items - we can simply free
them once the operation has finished. Second use a real completion for
tracking synchronous requests - if the caller sets the completion pointer
we complete it, otherwise use it as a boolean indicator that we can free
the work item directly. Third unify struct wb_writeback_args and struct
bdi_work into a single data structure, wb_writeback_work. Previous we
set all parameters into a struct wb_writeback_args, copied it into
struct bdi_work, copied it again on the stack to use it there. Instead
of just allocate one structure dynamically or on the stack and use it
all the way through the stack.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 14 |
1 files changed, 3 insertions, 11 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6e0b09a1ec2c..123bcef13e51 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -104,15 +104,13 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
104 | "b_more_io: %8lu\n" | 104 | "b_more_io: %8lu\n" |
105 | "bdi_list: %8u\n" | 105 | "bdi_list: %8u\n" |
106 | "state: %8lx\n" | 106 | "state: %8lx\n" |
107 | "wb_mask: %8lx\n" | 107 | "wb_list: %8u\n", |
108 | "wb_list: %8u\n" | ||
109 | "wb_cnt: %8u\n", | ||
110 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), | 108 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), |
111 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), | 109 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), |
112 | K(bdi_thresh), K(dirty_thresh), | 110 | K(bdi_thresh), K(dirty_thresh), |
113 | K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, | 111 | K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, |
114 | !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask, | 112 | !list_empty(&bdi->bdi_list), bdi->state, |
115 | !list_empty(&bdi->wb_list), bdi->wb_cnt); | 113 | !list_empty(&bdi->wb_list)); |
116 | #undef K | 114 | #undef K |
117 | 115 | ||
118 | return 0; | 116 | return 0; |
@@ -674,12 +672,6 @@ int bdi_init(struct backing_dev_info *bdi) | |||
674 | 672 | ||
675 | bdi_wb_init(&bdi->wb, bdi); | 673 | bdi_wb_init(&bdi->wb, bdi); |
676 | 674 | ||
677 | /* | ||
678 | * Just one thread support for now, hard code mask and count | ||
679 | */ | ||
680 | bdi->wb_mask = 1; | ||
681 | bdi->wb_cnt = 1; | ||
682 | |||
683 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { | 675 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
684 | err = percpu_counter_init(&bdi->bdi_stat[i], 0); | 676 | err = percpu_counter_init(&bdi->bdi_stat[i], 0); |
685 | if (err) | 677 | if (err) |