diff options
author | Christoph Hellwig <hch@lst.de> | 2010-06-19 17:08:06 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:23:03 -0400 |
commit | c1955ce32fdb0877b7a1b22feb2669358f65be76 (patch) | |
tree | 805b4da7168b0b16de890ea248323f3cdf5dc0b7 /mm/backing-dev.c | |
parent | 4c4762d10faf93167b84ee03e4b58dd62012b06f (diff) |
writeback: remove wb_list
The wb_list member of struct backing_device_info always has exactly one
element. Just use the direct bdi->wb pointer instead and simplify some
code.
Also remove bdi_task_init which is now trivial to prepare for the next
patch.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 83 |
1 files changed, 29 insertions, 54 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 123bcef13e51..6c2a09c8922c 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -65,28 +65,21 @@ static void bdi_debug_init(void) | |||
65 | static int bdi_debug_stats_show(struct seq_file *m, void *v) | 65 | static int bdi_debug_stats_show(struct seq_file *m, void *v) |
66 | { | 66 | { |
67 | struct backing_dev_info *bdi = m->private; | 67 | struct backing_dev_info *bdi = m->private; |
68 | struct bdi_writeback *wb; | 68 | struct bdi_writeback *wb = &bdi->wb; |
69 | unsigned long background_thresh; | 69 | unsigned long background_thresh; |
70 | unsigned long dirty_thresh; | 70 | unsigned long dirty_thresh; |
71 | unsigned long bdi_thresh; | 71 | unsigned long bdi_thresh; |
72 | unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; | 72 | unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; |
73 | struct inode *inode; | 73 | struct inode *inode; |
74 | 74 | ||
75 | /* | ||
76 | * inode lock is enough here, the bdi->wb_list is protected by | ||
77 | * RCU on the reader side | ||
78 | */ | ||
79 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; | 75 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; |
80 | spin_lock(&inode_lock); | 76 | spin_lock(&inode_lock); |
81 | list_for_each_entry(wb, &bdi->wb_list, list) { | 77 | list_for_each_entry(inode, &wb->b_dirty, i_list) |
82 | nr_wb++; | 78 | nr_dirty++; |
83 | list_for_each_entry(inode, &wb->b_dirty, i_list) | 79 | list_for_each_entry(inode, &wb->b_io, i_list) |
84 | nr_dirty++; | 80 | nr_io++; |
85 | list_for_each_entry(inode, &wb->b_io, i_list) | 81 | list_for_each_entry(inode, &wb->b_more_io, i_list) |
86 | nr_io++; | 82 | nr_more_io++; |
87 | list_for_each_entry(inode, &wb->b_more_io, i_list) | ||
88 | nr_more_io++; | ||
89 | } | ||
90 | spin_unlock(&inode_lock); | 83 | spin_unlock(&inode_lock); |
91 | 84 | ||
92 | get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); | 85 | get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); |
@@ -98,19 +91,16 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
98 | "BdiDirtyThresh: %8lu kB\n" | 91 | "BdiDirtyThresh: %8lu kB\n" |
99 | "DirtyThresh: %8lu kB\n" | 92 | "DirtyThresh: %8lu kB\n" |
100 | "BackgroundThresh: %8lu kB\n" | 93 | "BackgroundThresh: %8lu kB\n" |
101 | "WritebackThreads: %8lu\n" | ||
102 | "b_dirty: %8lu\n" | 94 | "b_dirty: %8lu\n" |
103 | "b_io: %8lu\n" | 95 | "b_io: %8lu\n" |
104 | "b_more_io: %8lu\n" | 96 | "b_more_io: %8lu\n" |
105 | "bdi_list: %8u\n" | 97 | "bdi_list: %8u\n" |
106 | "state: %8lx\n" | 98 | "state: %8lx\n", |
107 | "wb_list: %8u\n", | ||
108 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), | 99 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), |
109 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), | 100 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), |
110 | K(bdi_thresh), K(dirty_thresh), | 101 | K(bdi_thresh), K(dirty_thresh), |
111 | K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, | 102 | K(background_thresh), nr_dirty, nr_io, nr_more_io, |
112 | !list_empty(&bdi->bdi_list), bdi->state, | 103 | !list_empty(&bdi->bdi_list), bdi->state); |
113 | !list_empty(&bdi->wb_list)); | ||
114 | #undef K | 104 | #undef K |
115 | 105 | ||
116 | return 0; | 106 | return 0; |
@@ -270,24 +260,6 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) | |||
270 | INIT_LIST_HEAD(&wb->b_more_io); | 260 | INIT_LIST_HEAD(&wb->b_more_io); |
271 | } | 261 | } |
272 | 262 | ||
273 | static void bdi_task_init(struct backing_dev_info *bdi, | ||
274 | struct bdi_writeback *wb) | ||
275 | { | ||
276 | struct task_struct *tsk = current; | ||
277 | |||
278 | spin_lock(&bdi->wb_lock); | ||
279 | list_add_tail_rcu(&wb->list, &bdi->wb_list); | ||
280 | spin_unlock(&bdi->wb_lock); | ||
281 | |||
282 | tsk->flags |= PF_FLUSHER | PF_SWAPWRITE; | ||
283 | set_freezable(); | ||
284 | |||
285 | /* | ||
286 | * Our parent may run at a different priority, just set us to normal | ||
287 | */ | ||
288 | set_user_nice(tsk, 0); | ||
289 | } | ||
290 | |||
291 | static int bdi_start_fn(void *ptr) | 263 | static int bdi_start_fn(void *ptr) |
292 | { | 264 | { |
293 | struct bdi_writeback *wb = ptr; | 265 | struct bdi_writeback *wb = ptr; |
@@ -301,7 +273,13 @@ static int bdi_start_fn(void *ptr) | |||
301 | list_add_rcu(&bdi->bdi_list, &bdi_list); | 273 | list_add_rcu(&bdi->bdi_list, &bdi_list); |
302 | spin_unlock_bh(&bdi_lock); | 274 | spin_unlock_bh(&bdi_lock); |
303 | 275 | ||
304 | bdi_task_init(bdi, wb); | 276 | current->flags |= PF_FLUSHER | PF_SWAPWRITE; |
277 | set_freezable(); | ||
278 | |||
279 | /* | ||
280 | * Our parent may run at a different priority, just set us to normal | ||
281 | */ | ||
282 | set_user_nice(current, 0); | ||
305 | 283 | ||
306 | /* | 284 | /* |
307 | * Clear pending bit and wakeup anybody waiting to tear us down | 285 | * Clear pending bit and wakeup anybody waiting to tear us down |
@@ -312,12 +290,7 @@ static int bdi_start_fn(void *ptr) | |||
312 | 290 | ||
313 | ret = bdi_writeback_task(wb); | 291 | ret = bdi_writeback_task(wb); |
314 | 292 | ||
315 | /* | 293 | wb->task = NULL; |
316 | * Remove us from the list | ||
317 | */ | ||
318 | spin_lock(&bdi->wb_lock); | ||
319 | list_del_rcu(&wb->list); | ||
320 | spin_unlock(&bdi->wb_lock); | ||
321 | 294 | ||
322 | /* | 295 | /* |
323 | * Flush any work that raced with us exiting. No new work | 296 | * Flush any work that raced with us exiting. No new work |
@@ -326,7 +299,6 @@ static int bdi_start_fn(void *ptr) | |||
326 | if (!list_empty(&bdi->work_list)) | 299 | if (!list_empty(&bdi->work_list)) |
327 | wb_do_writeback(wb, 1); | 300 | wb_do_writeback(wb, 1); |
328 | 301 | ||
329 | wb->task = NULL; | ||
330 | return ret; | 302 | return ret; |
331 | } | 303 | } |
332 | 304 | ||
@@ -391,7 +363,13 @@ static int bdi_forker_task(void *ptr) | |||
391 | { | 363 | { |
392 | struct bdi_writeback *me = ptr; | 364 | struct bdi_writeback *me = ptr; |
393 | 365 | ||
394 | bdi_task_init(me->bdi, me); | 366 | current->flags |= PF_FLUSHER | PF_SWAPWRITE; |
367 | set_freezable(); | ||
368 | |||
369 | /* | ||
370 | * Our parent may run at a different priority, just set us to normal | ||
371 | */ | ||
372 | set_user_nice(current, 0); | ||
395 | 373 | ||
396 | for (;;) { | 374 | for (;;) { |
397 | struct backing_dev_info *bdi, *tmp; | 375 | struct backing_dev_info *bdi, *tmp; |
@@ -598,8 +576,6 @@ EXPORT_SYMBOL(bdi_register_dev); | |||
598 | */ | 576 | */ |
599 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) | 577 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
600 | { | 578 | { |
601 | struct bdi_writeback *wb; | ||
602 | |||
603 | if (!bdi_cap_writeback_dirty(bdi)) | 579 | if (!bdi_cap_writeback_dirty(bdi)) |
604 | return; | 580 | return; |
605 | 581 | ||
@@ -615,14 +591,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
615 | bdi_remove_from_list(bdi); | 591 | bdi_remove_from_list(bdi); |
616 | 592 | ||
617 | /* | 593 | /* |
618 | * Finally, kill the kernel threads. We don't need to be RCU | 594 | * Finally, kill the kernel thread. We don't need to be RCU |
619 | * safe anymore, since the bdi is gone from visibility. Force | 595 | * safe anymore, since the bdi is gone from visibility. Force |
620 | * unfreeze of the thread before calling kthread_stop(), otherwise | 596 | * unfreeze of the thread before calling kthread_stop(), otherwise |
621 | * it would never exet if it is currently stuck in the refrigerator. | 597 | * it would never exet if it is currently stuck in the refrigerator. |
622 | */ | 598 | */ |
623 | list_for_each_entry(wb, &bdi->wb_list, list) { | 599 | if (bdi->wb.task) { |
624 | thaw_process(wb->task); | 600 | thaw_process(bdi->wb.task); |
625 | kthread_stop(wb->task); | 601 | kthread_stop(bdi->wb.task); |
626 | } | 602 | } |
627 | } | 603 | } |
628 | 604 | ||
@@ -667,7 +643,6 @@ int bdi_init(struct backing_dev_info *bdi) | |||
667 | spin_lock_init(&bdi->wb_lock); | 643 | spin_lock_init(&bdi->wb_lock); |
668 | INIT_RCU_HEAD(&bdi->rcu_head); | 644 | INIT_RCU_HEAD(&bdi->rcu_head); |
669 | INIT_LIST_HEAD(&bdi->bdi_list); | 645 | INIT_LIST_HEAD(&bdi->bdi_list); |
670 | INIT_LIST_HEAD(&bdi->wb_list); | ||
671 | INIT_LIST_HEAD(&bdi->work_list); | 646 | INIT_LIST_HEAD(&bdi->work_list); |
672 | 647 | ||
673 | bdi_wb_init(&bdi->wb, bdi); | 648 | bdi_wb_init(&bdi->wb, bdi); |