diff options
Diffstat (limited to 'mm/backing-dev.c')
| -rw-r--r-- | mm/backing-dev.c | 82 |
1 files changed, 18 insertions, 64 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 72e6eb96efe2..dbc66815a0fe 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -50,8 +50,6 @@ static struct timer_list sync_supers_timer; | |||
| 50 | static int bdi_sync_supers(void *); | 50 | static int bdi_sync_supers(void *); |
| 51 | static void sync_supers_timer_fn(unsigned long); | 51 | static void sync_supers_timer_fn(unsigned long); |
| 52 | 52 | ||
| 53 | static void bdi_add_default_flusher_thread(struct backing_dev_info *bdi); | ||
| 54 | |||
| 55 | #ifdef CONFIG_DEBUG_FS | 53 | #ifdef CONFIG_DEBUG_FS |
| 56 | #include <linux/debugfs.h> | 54 | #include <linux/debugfs.h> |
| 57 | #include <linux/seq_file.h> | 55 | #include <linux/seq_file.h> |
| @@ -331,6 +329,7 @@ static int bdi_forker_thread(void *ptr) | |||
| 331 | set_user_nice(current, 0); | 329 | set_user_nice(current, 0); |
| 332 | 330 | ||
| 333 | for (;;) { | 331 | for (;;) { |
| 332 | bool fork = false; | ||
| 334 | struct task_struct *task; | 333 | struct task_struct *task; |
| 335 | struct backing_dev_info *bdi, *tmp; | 334 | struct backing_dev_info *bdi, *tmp; |
| 336 | 335 | ||
| @@ -349,23 +348,30 @@ static int bdi_forker_thread(void *ptr) | |||
| 349 | * a thread registered. If so, set that up. | 348 | * a thread registered. If so, set that up. |
| 350 | */ | 349 | */ |
| 351 | list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) { | 350 | list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) { |
| 351 | if (!bdi_cap_writeback_dirty(bdi)) | ||
| 352 | continue; | ||
| 352 | if (bdi->wb.task) | 353 | if (bdi->wb.task) |
| 353 | continue; | 354 | continue; |
| 354 | if (list_empty(&bdi->work_list) && | 355 | if (list_empty(&bdi->work_list) && |
| 355 | !bdi_has_dirty_io(bdi)) | 356 | !bdi_has_dirty_io(bdi)) |
| 356 | continue; | 357 | continue; |
| 357 | 358 | ||
| 358 | bdi_add_default_flusher_thread(bdi); | 359 | WARN(!test_bit(BDI_registered, &bdi->state), |
| 360 | "bdi %p/%s is not registered!\n", bdi, bdi->name); | ||
| 361 | |||
| 362 | list_del_rcu(&bdi->bdi_list); | ||
| 363 | fork = true; | ||
| 364 | break; | ||
| 359 | } | 365 | } |
| 366 | spin_unlock_bh(&bdi_lock); | ||
| 360 | 367 | ||
| 361 | /* Keep working if default bdi still has things to do */ | 368 | /* Keep working if default bdi still has things to do */ |
| 362 | if (!list_empty(&me->bdi->work_list)) | 369 | if (!list_empty(&me->bdi->work_list)) |
| 363 | __set_current_state(TASK_RUNNING); | 370 | __set_current_state(TASK_RUNNING); |
| 364 | 371 | ||
| 365 | if (list_empty(&bdi_pending_list)) { | 372 | if (!fork) { |
| 366 | unsigned long wait; | 373 | unsigned long wait; |
| 367 | 374 | ||
| 368 | spin_unlock_bh(&bdi_lock); | ||
| 369 | wait = msecs_to_jiffies(dirty_writeback_interval * 10); | 375 | wait = msecs_to_jiffies(dirty_writeback_interval * 10); |
| 370 | if (wait) | 376 | if (wait) |
| 371 | schedule_timeout(wait); | 377 | schedule_timeout(wait); |
| @@ -378,13 +384,13 @@ static int bdi_forker_thread(void *ptr) | |||
| 378 | __set_current_state(TASK_RUNNING); | 384 | __set_current_state(TASK_RUNNING); |
| 379 | 385 | ||
| 380 | /* | 386 | /* |
| 381 | * This is our real job - check for pending entries in | 387 | * Set the pending bit - if someone will try to unregister this |
| 382 | * bdi_pending_list, and create the threads that got added | 388 | * bdi - it'll wait on this bit. |
| 383 | */ | 389 | */ |
| 384 | bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, | 390 | set_bit(BDI_pending, &bdi->state); |
| 385 | bdi_list); | 391 | |
| 386 | list_del_init(&bdi->bdi_list); | 392 | /* Make sure no one uses the picked bdi */ |
| 387 | spin_unlock_bh(&bdi_lock); | 393 | synchronize_rcu(); |
| 388 | 394 | ||
| 389 | task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", | 395 | task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", |
| 390 | dev_name(bdi->dev)); | 396 | dev_name(bdi->dev)); |
| @@ -397,7 +403,7 @@ static int bdi_forker_thread(void *ptr) | |||
| 397 | * flush other bdi's to free memory. | 403 | * flush other bdi's to free memory. |
| 398 | */ | 404 | */ |
| 399 | spin_lock_bh(&bdi_lock); | 405 | spin_lock_bh(&bdi_lock); |
| 400 | list_add_tail(&bdi->bdi_list, &bdi_pending_list); | 406 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); |
| 401 | spin_unlock_bh(&bdi_lock); | 407 | spin_unlock_bh(&bdi_lock); |
| 402 | 408 | ||
| 403 | bdi_flush_io(bdi); | 409 | bdi_flush_io(bdi); |
| @@ -408,57 +414,6 @@ static int bdi_forker_thread(void *ptr) | |||
| 408 | return 0; | 414 | return 0; |
| 409 | } | 415 | } |
| 410 | 416 | ||
| 411 | static void bdi_add_to_pending(struct rcu_head *head) | ||
| 412 | { | ||
| 413 | struct backing_dev_info *bdi; | ||
| 414 | |||
| 415 | bdi = container_of(head, struct backing_dev_info, rcu_head); | ||
| 416 | INIT_LIST_HEAD(&bdi->bdi_list); | ||
| 417 | |||
| 418 | spin_lock(&bdi_lock); | ||
| 419 | list_add_tail(&bdi->bdi_list, &bdi_pending_list); | ||
| 420 | spin_unlock(&bdi_lock); | ||
| 421 | |||
| 422 | /* | ||
| 423 | * We are now on the pending list, wake up bdi_forker_task() | ||
| 424 | * to finish the job and add us back to the active bdi_list | ||
| 425 | */ | ||
| 426 | wake_up_process(default_backing_dev_info.wb.task); | ||
| 427 | } | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Add the default flusher thread that gets created for any bdi | ||
| 431 | * that has dirty data pending writeout | ||
| 432 | */ | ||
| 433 | static void bdi_add_default_flusher_thread(struct backing_dev_info *bdi) | ||
| 434 | { | ||
| 435 | if (!bdi_cap_writeback_dirty(bdi)) | ||
| 436 | return; | ||
| 437 | |||
| 438 | if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) { | ||
| 439 | printk(KERN_ERR "bdi %p/%s is not registered!\n", | ||
| 440 | bdi, bdi->name); | ||
| 441 | return; | ||
| 442 | } | ||
| 443 | |||
| 444 | /* | ||
| 445 | * Check with the helper whether to proceed adding a thread. Will only | ||
| 446 | * abort if we two or more simultanous calls to | ||
| 447 | * bdi_add_default_flusher_thread() occured, further additions will | ||
| 448 | * block waiting for previous additions to finish. | ||
| 449 | */ | ||
| 450 | if (!test_and_set_bit(BDI_pending, &bdi->state)) { | ||
| 451 | list_del_rcu(&bdi->bdi_list); | ||
| 452 | |||
| 453 | /* | ||
| 454 | * We must wait for the current RCU period to end before | ||
| 455 | * moving to the pending list. So schedule that operation | ||
| 456 | * from an RCU callback. | ||
| 457 | */ | ||
| 458 | call_rcu(&bdi->rcu_head, bdi_add_to_pending); | ||
| 459 | } | ||
| 460 | } | ||
| 461 | |||
| 462 | /* | 417 | /* |
| 463 | * Remove bdi from bdi_list, and ensure that it is no longer visible | 418 | * Remove bdi from bdi_list, and ensure that it is no longer visible |
| 464 | */ | 419 | */ |
| @@ -599,7 +554,6 @@ int bdi_init(struct backing_dev_info *bdi) | |||
| 599 | bdi->max_ratio = 100; | 554 | bdi->max_ratio = 100; |
| 600 | bdi->max_prop_frac = PROP_FRAC_BASE; | 555 | bdi->max_prop_frac = PROP_FRAC_BASE; |
| 601 | spin_lock_init(&bdi->wb_lock); | 556 | spin_lock_init(&bdi->wb_lock); |
| 602 | INIT_RCU_HEAD(&bdi->rcu_head); | ||
| 603 | INIT_LIST_HEAD(&bdi->bdi_list); | 557 | INIT_LIST_HEAD(&bdi->bdi_list); |
| 604 | INIT_LIST_HEAD(&bdi->work_list); | 558 | INIT_LIST_HEAD(&bdi->work_list); |
| 605 | 559 | ||
