aboutsummaryrefslogtreecommitdiffstats
path: root/mm/backing-dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r--mm/backing-dev.c427
1 files changed, 420 insertions, 7 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index c86edd244294..5a37e2055717 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1,8 +1,11 @@
1 1
2#include <linux/wait.h> 2#include <linux/wait.h>
3#include <linux/backing-dev.h> 3#include <linux/backing-dev.h>
4#include <linux/kthread.h>
5#include <linux/freezer.h>
4#include <linux/fs.h> 6#include <linux/fs.h>
5#include <linux/pagemap.h> 7#include <linux/pagemap.h>
8#include <linux/mm.h>
6#include <linux/sched.h> 9#include <linux/sched.h>
7#include <linux/module.h> 10#include <linux/module.h>
8#include <linux/writeback.h> 11#include <linux/writeback.h>
@@ -14,6 +17,7 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
14EXPORT_SYMBOL(default_unplug_io_fn); 17EXPORT_SYMBOL(default_unplug_io_fn);
15 18
16struct backing_dev_info default_backing_dev_info = { 19struct backing_dev_info default_backing_dev_info = {
20 .name = "default",
17 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 21 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
18 .state = 0, 22 .state = 0,
19 .capabilities = BDI_CAP_MAP_COPY, 23 .capabilities = BDI_CAP_MAP_COPY,
@@ -23,6 +27,24 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
23 27
24static struct class *bdi_class; 28static struct class *bdi_class;
25 29
30/*
31 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
32 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
33 * locking.
34 */
35DEFINE_SPINLOCK(bdi_lock);
36LIST_HEAD(bdi_list);
37LIST_HEAD(bdi_pending_list);
38
39static struct task_struct *sync_supers_tsk;
40static struct timer_list sync_supers_timer;
41
42static int bdi_sync_supers(void *);
43static void sync_supers_timer_fn(unsigned long);
44static void arm_supers_timer(void);
45
46static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
47
26#ifdef CONFIG_DEBUG_FS 48#ifdef CONFIG_DEBUG_FS
27#include <linux/debugfs.h> 49#include <linux/debugfs.h>
28#include <linux/seq_file.h> 50#include <linux/seq_file.h>
@@ -37,9 +59,29 @@ static void bdi_debug_init(void)
37static int bdi_debug_stats_show(struct seq_file *m, void *v) 59static int bdi_debug_stats_show(struct seq_file *m, void *v)
38{ 60{
39 struct backing_dev_info *bdi = m->private; 61 struct backing_dev_info *bdi = m->private;
62 struct bdi_writeback *wb;
40 unsigned long background_thresh; 63 unsigned long background_thresh;
41 unsigned long dirty_thresh; 64 unsigned long dirty_thresh;
42 unsigned long bdi_thresh; 65 unsigned long bdi_thresh;
66 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
67 struct inode *inode;
68
69 /*
70 * inode lock is enough here, the bdi->wb_list is protected by
71 * RCU on the reader side
72 */
73 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
74 spin_lock(&inode_lock);
75 list_for_each_entry(wb, &bdi->wb_list, list) {
76 nr_wb++;
77 list_for_each_entry(inode, &wb->b_dirty, i_list)
78 nr_dirty++;
79 list_for_each_entry(inode, &wb->b_io, i_list)
80 nr_io++;
81 list_for_each_entry(inode, &wb->b_more_io, i_list)
82 nr_more_io++;
83 }
84 spin_unlock(&inode_lock);
43 85
44 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); 86 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
45 87
@@ -49,12 +91,22 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
49 "BdiReclaimable: %8lu kB\n" 91 "BdiReclaimable: %8lu kB\n"
50 "BdiDirtyThresh: %8lu kB\n" 92 "BdiDirtyThresh: %8lu kB\n"
51 "DirtyThresh: %8lu kB\n" 93 "DirtyThresh: %8lu kB\n"
52 "BackgroundThresh: %8lu kB\n", 94 "BackgroundThresh: %8lu kB\n"
95 "WritebackThreads: %8lu\n"
96 "b_dirty: %8lu\n"
97 "b_io: %8lu\n"
98 "b_more_io: %8lu\n"
99 "bdi_list: %8u\n"
100 "state: %8lx\n"
101 "wb_mask: %8lx\n"
102 "wb_list: %8u\n"
103 "wb_cnt: %8u\n",
53 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 104 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
54 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 105 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
55 K(bdi_thresh), 106 K(bdi_thresh), K(dirty_thresh),
56 K(dirty_thresh), 107 K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
57 K(background_thresh)); 108 !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
109 !list_empty(&bdi->wb_list), bdi->wb_cnt);
58#undef K 110#undef K
59 111
60 return 0; 112 return 0;
@@ -185,6 +237,13 @@ static int __init default_bdi_init(void)
185{ 237{
186 int err; 238 int err;
187 239
240 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
241 BUG_ON(IS_ERR(sync_supers_tsk));
242
243 init_timer(&sync_supers_timer);
244 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
245 arm_supers_timer();
246
188 err = bdi_init(&default_backing_dev_info); 247 err = bdi_init(&default_backing_dev_info);
189 if (!err) 248 if (!err)
190 bdi_register(&default_backing_dev_info, NULL, "default"); 249 bdi_register(&default_backing_dev_info, NULL, "default");
@@ -193,6 +252,279 @@ static int __init default_bdi_init(void)
193} 252}
194subsys_initcall(default_bdi_init); 253subsys_initcall(default_bdi_init);
195 254
255static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
256{
257 memset(wb, 0, sizeof(*wb));
258
259 wb->bdi = bdi;
260 wb->last_old_flush = jiffies;
261 INIT_LIST_HEAD(&wb->b_dirty);
262 INIT_LIST_HEAD(&wb->b_io);
263 INIT_LIST_HEAD(&wb->b_more_io);
264}
265
266static void bdi_task_init(struct backing_dev_info *bdi,
267 struct bdi_writeback *wb)
268{
269 struct task_struct *tsk = current;
270
271 spin_lock(&bdi->wb_lock);
272 list_add_tail_rcu(&wb->list, &bdi->wb_list);
273 spin_unlock(&bdi->wb_lock);
274
275 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
276 set_freezable();
277
278 /*
279 * Our parent may run at a different priority, just set us to normal
280 */
281 set_user_nice(tsk, 0);
282}
283
284static int bdi_start_fn(void *ptr)
285{
286 struct bdi_writeback *wb = ptr;
287 struct backing_dev_info *bdi = wb->bdi;
288 int ret;
289
290 /*
291 * Add us to the active bdi_list
292 */
293 spin_lock_bh(&bdi_lock);
294 list_add_rcu(&bdi->bdi_list, &bdi_list);
295 spin_unlock_bh(&bdi_lock);
296
297 bdi_task_init(bdi, wb);
298
299 /*
300 * Clear pending bit and wakeup anybody waiting to tear us down
301 */
302 clear_bit(BDI_pending, &bdi->state);
303 smp_mb__after_clear_bit();
304 wake_up_bit(&bdi->state, BDI_pending);
305
306 ret = bdi_writeback_task(wb);
307
308 /*
309 * Remove us from the list
310 */
311 spin_lock(&bdi->wb_lock);
312 list_del_rcu(&wb->list);
313 spin_unlock(&bdi->wb_lock);
314
315 /*
316 * Flush any work that raced with us exiting. No new work
317 * will be added, since this bdi isn't discoverable anymore.
318 */
319 if (!list_empty(&bdi->work_list))
320 wb_do_writeback(wb, 1);
321
322 wb->task = NULL;
323 return ret;
324}
325
326int bdi_has_dirty_io(struct backing_dev_info *bdi)
327{
328 return wb_has_dirty_io(&bdi->wb);
329}
330
331static void bdi_flush_io(struct backing_dev_info *bdi)
332{
333 struct writeback_control wbc = {
334 .bdi = bdi,
335 .sync_mode = WB_SYNC_NONE,
336 .older_than_this = NULL,
337 .range_cyclic = 1,
338 .nr_to_write = 1024,
339 };
340
341 writeback_inodes_wbc(&wbc);
342}
343
344/*
345 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
346 * or we risk deadlocking on ->s_umount. The longer term solution would be
347 * to implement sync_supers_bdi() or similar and simply do it from the
348 * bdi writeback tasks individually.
349 */
350static int bdi_sync_supers(void *unused)
351{
352 set_user_nice(current, 0);
353
354 while (!kthread_should_stop()) {
355 set_current_state(TASK_INTERRUPTIBLE);
356 schedule();
357
358 /*
359 * Do this periodically, like kupdated() did before.
360 */
361 sync_supers();
362 }
363
364 return 0;
365}
366
367static void arm_supers_timer(void)
368{
369 unsigned long next;
370
371 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
372 mod_timer(&sync_supers_timer, round_jiffies_up(next));
373}
374
375static void sync_supers_timer_fn(unsigned long unused)
376{
377 wake_up_process(sync_supers_tsk);
378 arm_supers_timer();
379}
380
381static int bdi_forker_task(void *ptr)
382{
383 struct bdi_writeback *me = ptr;
384
385 bdi_task_init(me->bdi, me);
386
387 for (;;) {
388 struct backing_dev_info *bdi, *tmp;
389 struct bdi_writeback *wb;
390
391 /*
392 * Temporary measure, we want to make sure we don't see
393 * dirty data on the default backing_dev_info
394 */
395 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
396 wb_do_writeback(me, 0);
397
398 spin_lock_bh(&bdi_lock);
399
400 /*
401 * Check if any existing bdi's have dirty data without
402 * a thread registered. If so, set that up.
403 */
404 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
405 if (bdi->wb.task)
406 continue;
407 if (list_empty(&bdi->work_list) &&
408 !bdi_has_dirty_io(bdi))
409 continue;
410
411 bdi_add_default_flusher_task(bdi);
412 }
413
414 set_current_state(TASK_INTERRUPTIBLE);
415
416 if (list_empty(&bdi_pending_list)) {
417 unsigned long wait;
418
419 spin_unlock_bh(&bdi_lock);
420 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
421 schedule_timeout(wait);
422 try_to_freeze();
423 continue;
424 }
425
426 __set_current_state(TASK_RUNNING);
427
428 /*
429 * This is our real job - check for pending entries in
430 * bdi_pending_list, and create the tasks that got added
431 */
432 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
433 bdi_list);
434 list_del_init(&bdi->bdi_list);
435 spin_unlock_bh(&bdi_lock);
436
437 wb = &bdi->wb;
438 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
439 dev_name(bdi->dev));
440 /*
441 * If task creation fails, then readd the bdi to
442 * the pending list and force writeout of the bdi
443 * from this forker thread. That will free some memory
444 * and we can try again.
445 */
446 if (IS_ERR(wb->task)) {
447 wb->task = NULL;
448
449 /*
450 * Add this 'bdi' to the back, so we get
451 * a chance to flush other bdi's to free
452 * memory.
453 */
454 spin_lock_bh(&bdi_lock);
455 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
456 spin_unlock_bh(&bdi_lock);
457
458 bdi_flush_io(bdi);
459 }
460 }
461
462 return 0;
463}
464
465static void bdi_add_to_pending(struct rcu_head *head)
466{
467 struct backing_dev_info *bdi;
468
469 bdi = container_of(head, struct backing_dev_info, rcu_head);
470 INIT_LIST_HEAD(&bdi->bdi_list);
471
472 spin_lock(&bdi_lock);
473 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
474 spin_unlock(&bdi_lock);
475
476 /*
477 * We are now on the pending list, wake up bdi_forker_task()
478 * to finish the job and add us back to the active bdi_list
479 */
480 wake_up_process(default_backing_dev_info.wb.task);
481}
482
483/*
484 * Add the default flusher task that gets created for any bdi
485 * that has dirty data pending writeout
486 */
487void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
488{
489 if (!bdi_cap_writeback_dirty(bdi))
490 return;
491
492 if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
493 printk(KERN_ERR "bdi %p/%s is not registered!\n",
494 bdi, bdi->name);
495 return;
496 }
497
498 /*
499 * Check with the helper whether to proceed adding a task. Will only
500 * abort if we two or more simultanous calls to
501 * bdi_add_default_flusher_task() occured, further additions will block
502 * waiting for previous additions to finish.
503 */
504 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
505 list_del_rcu(&bdi->bdi_list);
506
507 /*
508 * We must wait for the current RCU period to end before
509 * moving to the pending list. So schedule that operation
510 * from an RCU callback.
511 */
512 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
513 }
514}
515
516/*
517 * Remove bdi from bdi_list, and ensure that it is no longer visible
518 */
519static void bdi_remove_from_list(struct backing_dev_info *bdi)
520{
521 spin_lock_bh(&bdi_lock);
522 list_del_rcu(&bdi->bdi_list);
523 spin_unlock_bh(&bdi_lock);
524
525 synchronize_rcu();
526}
527
196int bdi_register(struct backing_dev_info *bdi, struct device *parent, 528int bdi_register(struct backing_dev_info *bdi, struct device *parent,
197 const char *fmt, ...) 529 const char *fmt, ...)
198{ 530{
@@ -211,9 +543,33 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
211 goto exit; 543 goto exit;
212 } 544 }
213 545
546 spin_lock_bh(&bdi_lock);
547 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
548 spin_unlock_bh(&bdi_lock);
549
214 bdi->dev = dev; 550 bdi->dev = dev;
215 bdi_debug_register(bdi, dev_name(dev));
216 551
552 /*
553 * Just start the forker thread for our default backing_dev_info,
554 * and add other bdi's to the list. They will get a thread created
555 * on-demand when they need it.
556 */
557 if (bdi_cap_flush_forker(bdi)) {
558 struct bdi_writeback *wb = &bdi->wb;
559
560 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
561 dev_name(dev));
562 if (IS_ERR(wb->task)) {
563 wb->task = NULL;
564 ret = -ENOMEM;
565
566 bdi_remove_from_list(bdi);
567 goto exit;
568 }
569 }
570
571 bdi_debug_register(bdi, dev_name(dev));
572 set_bit(BDI_registered, &bdi->state);
217exit: 573exit:
218 return ret; 574 return ret;
219} 575}
@@ -225,9 +581,40 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
225} 581}
226EXPORT_SYMBOL(bdi_register_dev); 582EXPORT_SYMBOL(bdi_register_dev);
227 583
584/*
585 * Remove bdi from the global list and shutdown any threads we have running
586 */
587static void bdi_wb_shutdown(struct backing_dev_info *bdi)
588{
589 struct bdi_writeback *wb;
590
591 if (!bdi_cap_writeback_dirty(bdi))
592 return;
593
594 /*
595 * If setup is pending, wait for that to complete first
596 */
597 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
598 TASK_UNINTERRUPTIBLE);
599
600 /*
601 * Make sure nobody finds us on the bdi_list anymore
602 */
603 bdi_remove_from_list(bdi);
604
605 /*
606 * Finally, kill the kernel threads. We don't need to be RCU
607 * safe anymore, since the bdi is gone from visibility.
608 */
609 list_for_each_entry(wb, &bdi->wb_list, list)
610 kthread_stop(wb->task);
611}
612
228void bdi_unregister(struct backing_dev_info *bdi) 613void bdi_unregister(struct backing_dev_info *bdi)
229{ 614{
230 if (bdi->dev) { 615 if (bdi->dev) {
616 if (!bdi_cap_flush_forker(bdi))
617 bdi_wb_shutdown(bdi);
231 bdi_debug_unregister(bdi); 618 bdi_debug_unregister(bdi);
232 device_unregister(bdi->dev); 619 device_unregister(bdi->dev);
233 bdi->dev = NULL; 620 bdi->dev = NULL;
@@ -237,14 +624,26 @@ EXPORT_SYMBOL(bdi_unregister);
237 624
238int bdi_init(struct backing_dev_info *bdi) 625int bdi_init(struct backing_dev_info *bdi)
239{ 626{
240 int i; 627 int i, err;
241 int err;
242 628
243 bdi->dev = NULL; 629 bdi->dev = NULL;
244 630
245 bdi->min_ratio = 0; 631 bdi->min_ratio = 0;
246 bdi->max_ratio = 100; 632 bdi->max_ratio = 100;
247 bdi->max_prop_frac = PROP_FRAC_BASE; 633 bdi->max_prop_frac = PROP_FRAC_BASE;
634 spin_lock_init(&bdi->wb_lock);
635 INIT_RCU_HEAD(&bdi->rcu_head);
636 INIT_LIST_HEAD(&bdi->bdi_list);
637 INIT_LIST_HEAD(&bdi->wb_list);
638 INIT_LIST_HEAD(&bdi->work_list);
639
640 bdi_wb_init(&bdi->wb, bdi);
641
642 /*
643 * Just one thread support for now, hard code mask and count
644 */
645 bdi->wb_mask = 1;
646 bdi->wb_cnt = 1;
248 647
249 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 648 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
250 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 649 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
@@ -269,6 +668,20 @@ void bdi_destroy(struct backing_dev_info *bdi)
269{ 668{
270 int i; 669 int i;
271 670
671 /*
672 * Splice our entries to the default_backing_dev_info, if this
673 * bdi disappears
674 */
675 if (bdi_has_dirty_io(bdi)) {
676 struct bdi_writeback *dst = &default_backing_dev_info.wb;
677
678 spin_lock(&inode_lock);
679 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
680 list_splice(&bdi->wb.b_io, &dst->b_io);
681 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
682 spin_unlock(&inode_lock);
683 }
684
272 bdi_unregister(bdi); 685 bdi_unregister(bdi);
273 686
274 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 687 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)