diff options
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 460 |
1 files changed, 213 insertions, 247 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 660a87a22511..eaa4a5bbe063 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <trace/events/writeback.h> | ||
13 | 14 | ||
14 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | 15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
15 | 16 | ||
@@ -49,8 +50,6 @@ static struct timer_list sync_supers_timer; | |||
49 | static int bdi_sync_supers(void *); | 50 | static int bdi_sync_supers(void *); |
50 | static void sync_supers_timer_fn(unsigned long); | 51 | static void sync_supers_timer_fn(unsigned long); |
51 | 52 | ||
52 | static void bdi_add_default_flusher_task(struct backing_dev_info *bdi); | ||
53 | |||
54 | #ifdef CONFIG_DEBUG_FS | 53 | #ifdef CONFIG_DEBUG_FS |
55 | #include <linux/debugfs.h> | 54 | #include <linux/debugfs.h> |
56 | #include <linux/seq_file.h> | 55 | #include <linux/seq_file.h> |
@@ -65,31 +64,25 @@ static void bdi_debug_init(void) | |||
65 | static int bdi_debug_stats_show(struct seq_file *m, void *v) | 64 | static int bdi_debug_stats_show(struct seq_file *m, void *v) |
66 | { | 65 | { |
67 | struct backing_dev_info *bdi = m->private; | 66 | struct backing_dev_info *bdi = m->private; |
68 | struct bdi_writeback *wb; | 67 | struct bdi_writeback *wb = &bdi->wb; |
69 | unsigned long background_thresh; | 68 | unsigned long background_thresh; |
70 | unsigned long dirty_thresh; | 69 | unsigned long dirty_thresh; |
71 | unsigned long bdi_thresh; | 70 | unsigned long bdi_thresh; |
72 | unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; | 71 | unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; |
73 | struct inode *inode; | 72 | struct inode *inode; |
74 | 73 | ||
75 | /* | ||
76 | * inode lock is enough here, the bdi->wb_list is protected by | ||
77 | * RCU on the reader side | ||
78 | */ | ||
79 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; | 74 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; |
80 | spin_lock(&inode_lock); | 75 | spin_lock(&inode_lock); |
81 | list_for_each_entry(wb, &bdi->wb_list, list) { | 76 | list_for_each_entry(inode, &wb->b_dirty, i_list) |
82 | nr_wb++; | 77 | nr_dirty++; |
83 | list_for_each_entry(inode, &wb->b_dirty, i_list) | 78 | list_for_each_entry(inode, &wb->b_io, i_list) |
84 | nr_dirty++; | 79 | nr_io++; |
85 | list_for_each_entry(inode, &wb->b_io, i_list) | 80 | list_for_each_entry(inode, &wb->b_more_io, i_list) |
86 | nr_io++; | 81 | nr_more_io++; |
87 | list_for_each_entry(inode, &wb->b_more_io, i_list) | ||
88 | nr_more_io++; | ||
89 | } | ||
90 | spin_unlock(&inode_lock); | 82 | spin_unlock(&inode_lock); |
91 | 83 | ||
92 | get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); | 84 | global_dirty_limits(&background_thresh, &dirty_thresh); |
85 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | ||
93 | 86 | ||
94 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | 87 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
95 | seq_printf(m, | 88 | seq_printf(m, |
@@ -98,21 +91,16 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
98 | "BdiDirtyThresh: %8lu kB\n" | 91 | "BdiDirtyThresh: %8lu kB\n" |
99 | "DirtyThresh: %8lu kB\n" | 92 | "DirtyThresh: %8lu kB\n" |
100 | "BackgroundThresh: %8lu kB\n" | 93 | "BackgroundThresh: %8lu kB\n" |
101 | "WritebackThreads: %8lu\n" | ||
102 | "b_dirty: %8lu\n" | 94 | "b_dirty: %8lu\n" |
103 | "b_io: %8lu\n" | 95 | "b_io: %8lu\n" |
104 | "b_more_io: %8lu\n" | 96 | "b_more_io: %8lu\n" |
105 | "bdi_list: %8u\n" | 97 | "bdi_list: %8u\n" |
106 | "state: %8lx\n" | 98 | "state: %8lx\n", |
107 | "wb_mask: %8lx\n" | ||
108 | "wb_list: %8u\n" | ||
109 | "wb_cnt: %8u\n", | ||
110 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), | 99 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), |
111 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), | 100 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), |
112 | K(bdi_thresh), K(dirty_thresh), | 101 | K(bdi_thresh), K(dirty_thresh), |
113 | K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, | 102 | K(background_thresh), nr_dirty, nr_io, nr_more_io, |
114 | !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask, | 103 | !list_empty(&bdi->bdi_list), bdi->state); |
115 | !list_empty(&bdi->wb_list), bdi->wb_cnt); | ||
116 | #undef K | 104 | #undef K |
117 | 105 | ||
118 | return 0; | 106 | return 0; |
@@ -249,7 +237,6 @@ static int __init default_bdi_init(void) | |||
249 | sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); | 237 | sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); |
250 | BUG_ON(IS_ERR(sync_supers_tsk)); | 238 | BUG_ON(IS_ERR(sync_supers_tsk)); |
251 | 239 | ||
252 | init_timer(&sync_supers_timer); | ||
253 | setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); | 240 | setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); |
254 | bdi_arm_supers_timer(); | 241 | bdi_arm_supers_timer(); |
255 | 242 | ||
@@ -261,77 +248,6 @@ static int __init default_bdi_init(void) | |||
261 | } | 248 | } |
262 | subsys_initcall(default_bdi_init); | 249 | subsys_initcall(default_bdi_init); |
263 | 250 | ||
264 | static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) | ||
265 | { | ||
266 | memset(wb, 0, sizeof(*wb)); | ||
267 | |||
268 | wb->bdi = bdi; | ||
269 | wb->last_old_flush = jiffies; | ||
270 | INIT_LIST_HEAD(&wb->b_dirty); | ||
271 | INIT_LIST_HEAD(&wb->b_io); | ||
272 | INIT_LIST_HEAD(&wb->b_more_io); | ||
273 | } | ||
274 | |||
275 | static void bdi_task_init(struct backing_dev_info *bdi, | ||
276 | struct bdi_writeback *wb) | ||
277 | { | ||
278 | struct task_struct *tsk = current; | ||
279 | |||
280 | spin_lock(&bdi->wb_lock); | ||
281 | list_add_tail_rcu(&wb->list, &bdi->wb_list); | ||
282 | spin_unlock(&bdi->wb_lock); | ||
283 | |||
284 | tsk->flags |= PF_FLUSHER | PF_SWAPWRITE; | ||
285 | set_freezable(); | ||
286 | |||
287 | /* | ||
288 | * Our parent may run at a different priority, just set us to normal | ||
289 | */ | ||
290 | set_user_nice(tsk, 0); | ||
291 | } | ||
292 | |||
293 | static int bdi_start_fn(void *ptr) | ||
294 | { | ||
295 | struct bdi_writeback *wb = ptr; | ||
296 | struct backing_dev_info *bdi = wb->bdi; | ||
297 | int ret; | ||
298 | |||
299 | /* | ||
300 | * Add us to the active bdi_list | ||
301 | */ | ||
302 | spin_lock_bh(&bdi_lock); | ||
303 | list_add_rcu(&bdi->bdi_list, &bdi_list); | ||
304 | spin_unlock_bh(&bdi_lock); | ||
305 | |||
306 | bdi_task_init(bdi, wb); | ||
307 | |||
308 | /* | ||
309 | * Clear pending bit and wakeup anybody waiting to tear us down | ||
310 | */ | ||
311 | clear_bit(BDI_pending, &bdi->state); | ||
312 | smp_mb__after_clear_bit(); | ||
313 | wake_up_bit(&bdi->state, BDI_pending); | ||
314 | |||
315 | ret = bdi_writeback_task(wb); | ||
316 | |||
317 | /* | ||
318 | * Remove us from the list | ||
319 | */ | ||
320 | spin_lock(&bdi->wb_lock); | ||
321 | list_del_rcu(&wb->list); | ||
322 | spin_unlock(&bdi->wb_lock); | ||
323 | |||
324 | /* | ||
325 | * Flush any work that raced with us exiting. No new work | ||
326 | * will be added, since this bdi isn't discoverable anymore. | ||
327 | */ | ||
328 | if (!list_empty(&bdi->work_list)) | ||
329 | wb_do_writeback(wb, 1); | ||
330 | |||
331 | wb->task = NULL; | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | int bdi_has_dirty_io(struct backing_dev_info *bdi) | 251 | int bdi_has_dirty_io(struct backing_dev_info *bdi) |
336 | { | 252 | { |
337 | return wb_has_dirty_io(&bdi->wb); | 253 | return wb_has_dirty_io(&bdi->wb); |
@@ -340,21 +256,20 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) | |||
340 | static void bdi_flush_io(struct backing_dev_info *bdi) | 256 | static void bdi_flush_io(struct backing_dev_info *bdi) |
341 | { | 257 | { |
342 | struct writeback_control wbc = { | 258 | struct writeback_control wbc = { |
343 | .bdi = bdi, | ||
344 | .sync_mode = WB_SYNC_NONE, | 259 | .sync_mode = WB_SYNC_NONE, |
345 | .older_than_this = NULL, | 260 | .older_than_this = NULL, |
346 | .range_cyclic = 1, | 261 | .range_cyclic = 1, |
347 | .nr_to_write = 1024, | 262 | .nr_to_write = 1024, |
348 | }; | 263 | }; |
349 | 264 | ||
350 | writeback_inodes_wbc(&wbc); | 265 | writeback_inodes_wb(&bdi->wb, &wbc); |
351 | } | 266 | } |
352 | 267 | ||
353 | /* | 268 | /* |
354 | * kupdated() used to do this. We cannot do it from the bdi_forker_task() | 269 | * kupdated() used to do this. We cannot do it from the bdi_forker_thread() |
355 | * or we risk deadlocking on ->s_umount. The longer term solution would be | 270 | * or we risk deadlocking on ->s_umount. The longer term solution would be |
356 | * to implement sync_supers_bdi() or similar and simply do it from the | 271 | * to implement sync_supers_bdi() or similar and simply do it from the |
357 | * bdi writeback tasks individually. | 272 | * bdi writeback thread individually. |
358 | */ | 273 | */ |
359 | static int bdi_sync_supers(void *unused) | 274 | static int bdi_sync_supers(void *unused) |
360 | { | 275 | { |
@@ -390,144 +305,198 @@ static void sync_supers_timer_fn(unsigned long unused) | |||
390 | bdi_arm_supers_timer(); | 305 | bdi_arm_supers_timer(); |
391 | } | 306 | } |
392 | 307 | ||
393 | static int bdi_forker_task(void *ptr) | 308 | static void wakeup_timer_fn(unsigned long data) |
309 | { | ||
310 | struct backing_dev_info *bdi = (struct backing_dev_info *)data; | ||
311 | |||
312 | spin_lock_bh(&bdi->wb_lock); | ||
313 | if (bdi->wb.task) { | ||
314 | trace_writeback_wake_thread(bdi); | ||
315 | wake_up_process(bdi->wb.task); | ||
316 | } else { | ||
317 | /* | ||
318 | * When bdi tasks are inactive for long time, they are killed. | ||
319 | * In this case we have to wake-up the forker thread which | ||
320 | * should create and run the bdi thread. | ||
321 | */ | ||
322 | trace_writeback_wake_forker_thread(bdi); | ||
323 | wake_up_process(default_backing_dev_info.wb.task); | ||
324 | } | ||
325 | spin_unlock_bh(&bdi->wb_lock); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * This function is used when the first inode for this bdi is marked dirty. It | ||
330 | * wakes-up the corresponding bdi thread which should then take care of the | ||
331 | * periodic background write-out of dirty inodes. Since the write-out would | ||
332 | * starts only 'dirty_writeback_interval' centisecs from now anyway, we just | ||
333 | * set up a timer which wakes the bdi thread up later. | ||
334 | * | ||
335 | * Note, we wouldn't bother setting up the timer, but this function is on the | ||
336 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches | ||
337 | * by delaying the wake-up. | ||
338 | */ | ||
339 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) | ||
340 | { | ||
341 | unsigned long timeout; | ||
342 | |||
343 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); | ||
344 | mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Calculate the longest interval (jiffies) bdi threads are allowed to be | ||
349 | * inactive. | ||
350 | */ | ||
351 | static unsigned long bdi_longest_inactive(void) | ||
352 | { | ||
353 | unsigned long interval; | ||
354 | |||
355 | interval = msecs_to_jiffies(dirty_writeback_interval * 10); | ||
356 | return max(5UL * 60 * HZ, interval); | ||
357 | } | ||
358 | |||
359 | static int bdi_forker_thread(void *ptr) | ||
394 | { | 360 | { |
395 | struct bdi_writeback *me = ptr; | 361 | struct bdi_writeback *me = ptr; |
396 | 362 | ||
397 | bdi_task_init(me->bdi, me); | 363 | current->flags |= PF_FLUSHER | PF_SWAPWRITE; |
364 | set_freezable(); | ||
365 | |||
366 | /* | ||
367 | * Our parent may run at a different priority, just set us to normal | ||
368 | */ | ||
369 | set_user_nice(current, 0); | ||
398 | 370 | ||
399 | for (;;) { | 371 | for (;;) { |
400 | struct backing_dev_info *bdi, *tmp; | 372 | struct task_struct *task = NULL; |
401 | struct bdi_writeback *wb; | 373 | struct backing_dev_info *bdi; |
374 | enum { | ||
375 | NO_ACTION, /* Nothing to do */ | ||
376 | FORK_THREAD, /* Fork bdi thread */ | ||
377 | KILL_THREAD, /* Kill inactive bdi thread */ | ||
378 | } action = NO_ACTION; | ||
402 | 379 | ||
403 | /* | 380 | /* |
404 | * Temporary measure, we want to make sure we don't see | 381 | * Temporary measure, we want to make sure we don't see |
405 | * dirty data on the default backing_dev_info | 382 | * dirty data on the default backing_dev_info |
406 | */ | 383 | */ |
407 | if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) | 384 | if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { |
385 | del_timer(&me->wakeup_timer); | ||
408 | wb_do_writeback(me, 0); | 386 | wb_do_writeback(me, 0); |
387 | } | ||
409 | 388 | ||
410 | spin_lock_bh(&bdi_lock); | 389 | spin_lock_bh(&bdi_lock); |
390 | set_current_state(TASK_INTERRUPTIBLE); | ||
411 | 391 | ||
412 | /* | 392 | list_for_each_entry(bdi, &bdi_list, bdi_list) { |
413 | * Check if any existing bdi's have dirty data without | 393 | bool have_dirty_io; |
414 | * a thread registered. If so, set that up. | 394 | |
415 | */ | 395 | if (!bdi_cap_writeback_dirty(bdi) || |
416 | list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) { | 396 | bdi_cap_flush_forker(bdi)) |
417 | if (bdi->wb.task) | ||
418 | continue; | ||
419 | if (list_empty(&bdi->work_list) && | ||
420 | !bdi_has_dirty_io(bdi)) | ||
421 | continue; | 397 | continue; |
422 | 398 | ||
423 | bdi_add_default_flusher_task(bdi); | 399 | WARN(!test_bit(BDI_registered, &bdi->state), |
424 | } | 400 | "bdi %p/%s is not registered!\n", bdi, bdi->name); |
425 | 401 | ||
426 | set_current_state(TASK_INTERRUPTIBLE); | 402 | have_dirty_io = !list_empty(&bdi->work_list) || |
403 | wb_has_dirty_io(&bdi->wb); | ||
427 | 404 | ||
428 | if (list_empty(&bdi_pending_list)) { | 405 | /* |
429 | unsigned long wait; | 406 | * If the bdi has work to do, but the thread does not |
407 | * exist - create it. | ||
408 | */ | ||
409 | if (!bdi->wb.task && have_dirty_io) { | ||
410 | /* | ||
411 | * Set the pending bit - if someone will try to | ||
412 | * unregister this bdi - it'll wait on this bit. | ||
413 | */ | ||
414 | set_bit(BDI_pending, &bdi->state); | ||
415 | action = FORK_THREAD; | ||
416 | break; | ||
417 | } | ||
418 | |||
419 | spin_lock(&bdi->wb_lock); | ||
420 | |||
421 | /* | ||
422 | * If there is no work to do and the bdi thread was | ||
423 | * inactive long enough - kill it. The wb_lock is taken | ||
424 | * to make sure no-one adds more work to this bdi and | ||
425 | * wakes the bdi thread up. | ||
426 | */ | ||
427 | if (bdi->wb.task && !have_dirty_io && | ||
428 | time_after(jiffies, bdi->wb.last_active + | ||
429 | bdi_longest_inactive())) { | ||
430 | task = bdi->wb.task; | ||
431 | bdi->wb.task = NULL; | ||
432 | spin_unlock(&bdi->wb_lock); | ||
433 | set_bit(BDI_pending, &bdi->state); | ||
434 | action = KILL_THREAD; | ||
435 | break; | ||
436 | } | ||
437 | spin_unlock(&bdi->wb_lock); | ||
438 | } | ||
439 | spin_unlock_bh(&bdi_lock); | ||
430 | 440 | ||
431 | spin_unlock_bh(&bdi_lock); | 441 | /* Keep working if default bdi still has things to do */ |
432 | wait = msecs_to_jiffies(dirty_writeback_interval * 10); | 442 | if (!list_empty(&me->bdi->work_list)) |
433 | if (wait) | 443 | __set_current_state(TASK_RUNNING); |
434 | schedule_timeout(wait); | 444 | |
445 | switch (action) { | ||
446 | case FORK_THREAD: | ||
447 | __set_current_state(TASK_RUNNING); | ||
448 | task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", | ||
449 | dev_name(bdi->dev)); | ||
450 | if (IS_ERR(task)) { | ||
451 | /* | ||
452 | * If thread creation fails, force writeout of | ||
453 | * the bdi from the thread. | ||
454 | */ | ||
455 | bdi_flush_io(bdi); | ||
456 | } else { | ||
457 | /* | ||
458 | * The spinlock makes sure we do not lose | ||
459 | * wake-ups when racing with 'bdi_queue_work()'. | ||
460 | */ | ||
461 | spin_lock_bh(&bdi->wb_lock); | ||
462 | bdi->wb.task = task; | ||
463 | spin_unlock_bh(&bdi->wb_lock); | ||
464 | } | ||
465 | break; | ||
466 | |||
467 | case KILL_THREAD: | ||
468 | __set_current_state(TASK_RUNNING); | ||
469 | kthread_stop(task); | ||
470 | break; | ||
471 | |||
472 | case NO_ACTION: | ||
473 | if (!wb_has_dirty_io(me) || !dirty_writeback_interval) | ||
474 | /* | ||
475 | * There are no dirty data. The only thing we | ||
476 | * should now care about is checking for | ||
477 | * inactive bdi threads and killing them. Thus, | ||
478 | * let's sleep for longer time, save energy and | ||
479 | * be friendly for battery-driven devices. | ||
480 | */ | ||
481 | schedule_timeout(bdi_longest_inactive()); | ||
435 | else | 482 | else |
436 | schedule(); | 483 | schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); |
437 | try_to_freeze(); | 484 | try_to_freeze(); |
485 | /* Back to the main loop */ | ||
438 | continue; | 486 | continue; |
439 | } | 487 | } |
440 | 488 | ||
441 | __set_current_state(TASK_RUNNING); | ||
442 | |||
443 | /* | ||
444 | * This is our real job - check for pending entries in | ||
445 | * bdi_pending_list, and create the tasks that got added | ||
446 | */ | ||
447 | bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, | ||
448 | bdi_list); | ||
449 | list_del_init(&bdi->bdi_list); | ||
450 | spin_unlock_bh(&bdi_lock); | ||
451 | |||
452 | wb = &bdi->wb; | ||
453 | wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", | ||
454 | dev_name(bdi->dev)); | ||
455 | /* | 489 | /* |
456 | * If task creation fails, then readd the bdi to | 490 | * Clear pending bit and wakeup anybody waiting to tear us down. |
457 | * the pending list and force writeout of the bdi | ||
458 | * from this forker thread. That will free some memory | ||
459 | * and we can try again. | ||
460 | */ | 491 | */ |
461 | if (IS_ERR(wb->task)) { | 492 | clear_bit(BDI_pending, &bdi->state); |
462 | wb->task = NULL; | 493 | smp_mb__after_clear_bit(); |
463 | 494 | wake_up_bit(&bdi->state, BDI_pending); | |
464 | /* | ||
465 | * Add this 'bdi' to the back, so we get | ||
466 | * a chance to flush other bdi's to free | ||
467 | * memory. | ||
468 | */ | ||
469 | spin_lock_bh(&bdi_lock); | ||
470 | list_add_tail(&bdi->bdi_list, &bdi_pending_list); | ||
471 | spin_unlock_bh(&bdi_lock); | ||
472 | |||
473 | bdi_flush_io(bdi); | ||
474 | } | ||
475 | } | 495 | } |
476 | 496 | ||
477 | return 0; | 497 | return 0; |
478 | } | 498 | } |
479 | 499 | ||
480 | static void bdi_add_to_pending(struct rcu_head *head) | ||
481 | { | ||
482 | struct backing_dev_info *bdi; | ||
483 | |||
484 | bdi = container_of(head, struct backing_dev_info, rcu_head); | ||
485 | INIT_LIST_HEAD(&bdi->bdi_list); | ||
486 | |||
487 | spin_lock(&bdi_lock); | ||
488 | list_add_tail(&bdi->bdi_list, &bdi_pending_list); | ||
489 | spin_unlock(&bdi_lock); | ||
490 | |||
491 | /* | ||
492 | * We are now on the pending list, wake up bdi_forker_task() | ||
493 | * to finish the job and add us back to the active bdi_list | ||
494 | */ | ||
495 | wake_up_process(default_backing_dev_info.wb.task); | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * Add the default flusher task that gets created for any bdi | ||
500 | * that has dirty data pending writeout | ||
501 | */ | ||
502 | void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) | ||
503 | { | ||
504 | if (!bdi_cap_writeback_dirty(bdi)) | ||
505 | return; | ||
506 | |||
507 | if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) { | ||
508 | printk(KERN_ERR "bdi %p/%s is not registered!\n", | ||
509 | bdi, bdi->name); | ||
510 | return; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Check with the helper whether to proceed adding a task. Will only | ||
515 | * abort if we two or more simultanous calls to | ||
516 | * bdi_add_default_flusher_task() occured, further additions will block | ||
517 | * waiting for previous additions to finish. | ||
518 | */ | ||
519 | if (!test_and_set_bit(BDI_pending, &bdi->state)) { | ||
520 | list_del_rcu(&bdi->bdi_list); | ||
521 | |||
522 | /* | ||
523 | * We must wait for the current RCU period to end before | ||
524 | * moving to the pending list. So schedule that operation | ||
525 | * from an RCU callback. | ||
526 | */ | ||
527 | call_rcu(&bdi->rcu_head, bdi_add_to_pending); | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* | 500 | /* |
532 | * Remove bdi from bdi_list, and ensure that it is no longer visible | 501 | * Remove bdi from bdi_list, and ensure that it is no longer visible |
533 | */ | 502 | */ |
@@ -544,23 +513,16 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
544 | const char *fmt, ...) | 513 | const char *fmt, ...) |
545 | { | 514 | { |
546 | va_list args; | 515 | va_list args; |
547 | int ret = 0; | ||
548 | struct device *dev; | 516 | struct device *dev; |
549 | 517 | ||
550 | if (bdi->dev) /* The driver needs to use separate queues per device */ | 518 | if (bdi->dev) /* The driver needs to use separate queues per device */ |
551 | goto exit; | 519 | return 0; |
552 | 520 | ||
553 | va_start(args, fmt); | 521 | va_start(args, fmt); |
554 | dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); | 522 | dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); |
555 | va_end(args); | 523 | va_end(args); |
556 | if (IS_ERR(dev)) { | 524 | if (IS_ERR(dev)) |
557 | ret = PTR_ERR(dev); | 525 | return PTR_ERR(dev); |
558 | goto exit; | ||
559 | } | ||
560 | |||
561 | spin_lock_bh(&bdi_lock); | ||
562 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); | ||
563 | spin_unlock_bh(&bdi_lock); | ||
564 | 526 | ||
565 | bdi->dev = dev; | 527 | bdi->dev = dev; |
566 | 528 | ||
@@ -572,21 +534,21 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
572 | if (bdi_cap_flush_forker(bdi)) { | 534 | if (bdi_cap_flush_forker(bdi)) { |
573 | struct bdi_writeback *wb = &bdi->wb; | 535 | struct bdi_writeback *wb = &bdi->wb; |
574 | 536 | ||
575 | wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s", | 537 | wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", |
576 | dev_name(dev)); | 538 | dev_name(dev)); |
577 | if (IS_ERR(wb->task)) { | 539 | if (IS_ERR(wb->task)) |
578 | wb->task = NULL; | 540 | return PTR_ERR(wb->task); |
579 | ret = -ENOMEM; | ||
580 | |||
581 | bdi_remove_from_list(bdi); | ||
582 | goto exit; | ||
583 | } | ||
584 | } | 541 | } |
585 | 542 | ||
586 | bdi_debug_register(bdi, dev_name(dev)); | 543 | bdi_debug_register(bdi, dev_name(dev)); |
587 | set_bit(BDI_registered, &bdi->state); | 544 | set_bit(BDI_registered, &bdi->state); |
588 | exit: | 545 | |
589 | return ret; | 546 | spin_lock_bh(&bdi_lock); |
547 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); | ||
548 | spin_unlock_bh(&bdi_lock); | ||
549 | |||
550 | trace_writeback_bdi_register(bdi); | ||
551 | return 0; | ||
590 | } | 552 | } |
591 | EXPORT_SYMBOL(bdi_register); | 553 | EXPORT_SYMBOL(bdi_register); |
592 | 554 | ||
@@ -601,31 +563,29 @@ EXPORT_SYMBOL(bdi_register_dev); | |||
601 | */ | 563 | */ |
602 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) | 564 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) |
603 | { | 565 | { |
604 | struct bdi_writeback *wb; | ||
605 | |||
606 | if (!bdi_cap_writeback_dirty(bdi)) | 566 | if (!bdi_cap_writeback_dirty(bdi)) |
607 | return; | 567 | return; |
608 | 568 | ||
609 | /* | 569 | /* |
610 | * If setup is pending, wait for that to complete first | 570 | * Make sure nobody finds us on the bdi_list anymore |
611 | */ | 571 | */ |
612 | wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, | 572 | bdi_remove_from_list(bdi); |
613 | TASK_UNINTERRUPTIBLE); | ||
614 | 573 | ||
615 | /* | 574 | /* |
616 | * Make sure nobody finds us on the bdi_list anymore | 575 | * If setup is pending, wait for that to complete first |
617 | */ | 576 | */ |
618 | bdi_remove_from_list(bdi); | 577 | wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, |
578 | TASK_UNINTERRUPTIBLE); | ||
619 | 579 | ||
620 | /* | 580 | /* |
621 | * Finally, kill the kernel threads. We don't need to be RCU | 581 | * Finally, kill the kernel thread. We don't need to be RCU |
622 | * safe anymore, since the bdi is gone from visibility. Force | 582 | * safe anymore, since the bdi is gone from visibility. Force |
623 | * unfreeze of the thread before calling kthread_stop(), otherwise | 583 | * unfreeze of the thread before calling kthread_stop(), otherwise |
624 | * it would never exet if it is currently stuck in the refrigerator. | 584 | * it would never exet if it is currently stuck in the refrigerator. |
625 | */ | 585 | */ |
626 | list_for_each_entry(wb, &bdi->wb_list, list) { | 586 | if (bdi->wb.task) { |
627 | thaw_process(wb->task); | 587 | thaw_process(bdi->wb.task); |
628 | kthread_stop(wb->task); | 588 | kthread_stop(bdi->wb.task); |
629 | } | 589 | } |
630 | } | 590 | } |
631 | 591 | ||
@@ -647,7 +607,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) | |||
647 | void bdi_unregister(struct backing_dev_info *bdi) | 607 | void bdi_unregister(struct backing_dev_info *bdi) |
648 | { | 608 | { |
649 | if (bdi->dev) { | 609 | if (bdi->dev) { |
610 | trace_writeback_bdi_unregister(bdi); | ||
650 | bdi_prune_sb(bdi); | 611 | bdi_prune_sb(bdi); |
612 | del_timer_sync(&bdi->wb.wakeup_timer); | ||
651 | 613 | ||
652 | if (!bdi_cap_flush_forker(bdi)) | 614 | if (!bdi_cap_flush_forker(bdi)) |
653 | bdi_wb_shutdown(bdi); | 615 | bdi_wb_shutdown(bdi); |
@@ -658,6 +620,18 @@ void bdi_unregister(struct backing_dev_info *bdi) | |||
658 | } | 620 | } |
659 | EXPORT_SYMBOL(bdi_unregister); | 621 | EXPORT_SYMBOL(bdi_unregister); |
660 | 622 | ||
623 | static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) | ||
624 | { | ||
625 | memset(wb, 0, sizeof(*wb)); | ||
626 | |||
627 | wb->bdi = bdi; | ||
628 | wb->last_old_flush = jiffies; | ||
629 | INIT_LIST_HEAD(&wb->b_dirty); | ||
630 | INIT_LIST_HEAD(&wb->b_io); | ||
631 | INIT_LIST_HEAD(&wb->b_more_io); | ||
632 | setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); | ||
633 | } | ||
634 | |||
661 | int bdi_init(struct backing_dev_info *bdi) | 635 | int bdi_init(struct backing_dev_info *bdi) |
662 | { | 636 | { |
663 | int i, err; | 637 | int i, err; |
@@ -668,19 +642,11 @@ int bdi_init(struct backing_dev_info *bdi) | |||
668 | bdi->max_ratio = 100; | 642 | bdi->max_ratio = 100; |
669 | bdi->max_prop_frac = PROP_FRAC_BASE; | 643 | bdi->max_prop_frac = PROP_FRAC_BASE; |
670 | spin_lock_init(&bdi->wb_lock); | 644 | spin_lock_init(&bdi->wb_lock); |
671 | INIT_RCU_HEAD(&bdi->rcu_head); | ||
672 | INIT_LIST_HEAD(&bdi->bdi_list); | 645 | INIT_LIST_HEAD(&bdi->bdi_list); |
673 | INIT_LIST_HEAD(&bdi->wb_list); | ||
674 | INIT_LIST_HEAD(&bdi->work_list); | 646 | INIT_LIST_HEAD(&bdi->work_list); |
675 | 647 | ||
676 | bdi_wb_init(&bdi->wb, bdi); | 648 | bdi_wb_init(&bdi->wb, bdi); |
677 | 649 | ||
678 | /* | ||
679 | * Just one thread support for now, hard code mask and count | ||
680 | */ | ||
681 | bdi->wb_mask = 1; | ||
682 | bdi->wb_cnt = 1; | ||
683 | |||
684 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { | 650 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
685 | err = percpu_counter_init(&bdi->bdi_stat[i], 0); | 651 | err = percpu_counter_init(&bdi->bdi_stat[i], 0); |
686 | if (err) | 652 | if (err) |