aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-03 15:12:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-03 15:12:16 -0400
commit1e512b08da88dc2f28afb70406c5a6b2cd7531e4 (patch)
treef713fdaa4c5ee9e817de1ede39f6035ed29e9319 /mm
parent1c65ae63c061c8eb22c780d99ebcd4492743c04e (diff)
parent758dd7fdffd60507624edce34fff122a63163b3f (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Mainly sending this off now for the writeback fixes, since they fix a real regression introduced with the cgroup writeback changes. The NVMe fix could wait for next pull for this series, but it's simple enough that we might as well include it. This contains: - two cgroup writeback fixes from Tejun, fixing a user reported issue with luks crypt devices hanging when being closed. - NVMe error cleanup fix from Jon Derrick, fixing a case where we'd attempt to free an unregistered IRQ" * 'for-linus' of git://git.kernel.dk/linux-block: NVMe: Fix irq freeing when queue_request_irq fails writeback: don't drain bdi_writeback_congested on bdi destruction writeback: don't embed root bdi_writeback_congested in bdi_writeback
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c109
1 files changed, 62 insertions, 47 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7756da31b02b..dac5bf59309d 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -287,7 +287,7 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
287#define INIT_BW (100 << (20 - PAGE_SHIFT)) 287#define INIT_BW (100 << (20 - PAGE_SHIFT))
288 288
289static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, 289static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
290 gfp_t gfp) 290 int blkcg_id, gfp_t gfp)
291{ 291{
292 int i, err; 292 int i, err;
293 293
@@ -311,21 +311,29 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
311 INIT_LIST_HEAD(&wb->work_list); 311 INIT_LIST_HEAD(&wb->work_list);
312 INIT_DELAYED_WORK(&wb->dwork, wb_workfn); 312 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
313 313
314 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
315 if (!wb->congested)
316 return -ENOMEM;
317
314 err = fprop_local_init_percpu(&wb->completions, gfp); 318 err = fprop_local_init_percpu(&wb->completions, gfp);
315 if (err) 319 if (err)
316 return err; 320 goto out_put_cong;
317 321
318 for (i = 0; i < NR_WB_STAT_ITEMS; i++) { 322 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
319 err = percpu_counter_init(&wb->stat[i], 0, gfp); 323 err = percpu_counter_init(&wb->stat[i], 0, gfp);
320 if (err) { 324 if (err)
321 while (--i) 325 goto out_destroy_stat;
322 percpu_counter_destroy(&wb->stat[i]);
323 fprop_local_destroy_percpu(&wb->completions);
324 return err;
325 }
326 } 326 }
327 327
328 return 0; 328 return 0;
329
330out_destroy_stat:
331 while (--i)
332 percpu_counter_destroy(&wb->stat[i]);
333 fprop_local_destroy_percpu(&wb->completions);
334out_put_cong:
335 wb_congested_put(wb->congested);
336 return err;
329} 337}
330 338
331/* 339/*
@@ -361,6 +369,7 @@ static void wb_exit(struct bdi_writeback *wb)
361 percpu_counter_destroy(&wb->stat[i]); 369 percpu_counter_destroy(&wb->stat[i]);
362 370
363 fprop_local_destroy_percpu(&wb->completions); 371 fprop_local_destroy_percpu(&wb->completions);
372 wb_congested_put(wb->congested);
364} 373}
365 374
366#ifdef CONFIG_CGROUP_WRITEBACK 375#ifdef CONFIG_CGROUP_WRITEBACK
@@ -392,9 +401,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
392 struct bdi_writeback_congested *new_congested = NULL, *congested; 401 struct bdi_writeback_congested *new_congested = NULL, *congested;
393 struct rb_node **node, *parent; 402 struct rb_node **node, *parent;
394 unsigned long flags; 403 unsigned long flags;
395
396 if (blkcg_id == 1)
397 return &bdi->wb_congested;
398retry: 404retry:
399 spin_lock_irqsave(&cgwb_lock, flags); 405 spin_lock_irqsave(&cgwb_lock, flags);
400 406
@@ -419,7 +425,6 @@ retry:
419 new_congested = NULL; 425 new_congested = NULL;
420 rb_link_node(&congested->rb_node, parent, node); 426 rb_link_node(&congested->rb_node, parent, node);
421 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); 427 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
422 atomic_inc(&bdi->usage_cnt);
423 goto found; 428 goto found;
424 } 429 }
425 430
@@ -450,24 +455,23 @@ found:
450 */ 455 */
451void wb_congested_put(struct bdi_writeback_congested *congested) 456void wb_congested_put(struct bdi_writeback_congested *congested)
452{ 457{
453 struct backing_dev_info *bdi = congested->bdi;
454 unsigned long flags; 458 unsigned long flags;
455 459
456 if (congested->blkcg_id == 1)
457 return;
458
459 local_irq_save(flags); 460 local_irq_save(flags);
460 if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { 461 if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
461 local_irq_restore(flags); 462 local_irq_restore(flags);
462 return; 463 return;
463 } 464 }
464 465
465 rb_erase(&congested->rb_node, &congested->bdi->cgwb_congested_tree); 466 /* bdi might already have been destroyed leaving @congested unlinked */
467 if (congested->bdi) {
468 rb_erase(&congested->rb_node,
469 &congested->bdi->cgwb_congested_tree);
470 congested->bdi = NULL;
471 }
472
466 spin_unlock_irqrestore(&cgwb_lock, flags); 473 spin_unlock_irqrestore(&cgwb_lock, flags);
467 kfree(congested); 474 kfree(congested);
468
469 if (atomic_dec_and_test(&bdi->usage_cnt))
470 wake_up_all(&cgwb_release_wait);
471} 475}
472 476
473static void cgwb_release_workfn(struct work_struct *work) 477static void cgwb_release_workfn(struct work_struct *work)
@@ -480,7 +484,6 @@ static void cgwb_release_workfn(struct work_struct *work)
480 484
481 css_put(wb->memcg_css); 485 css_put(wb->memcg_css);
482 css_put(wb->blkcg_css); 486 css_put(wb->blkcg_css);
483 wb_congested_put(wb->congested);
484 487
485 fprop_local_destroy_percpu(&wb->memcg_completions); 488 fprop_local_destroy_percpu(&wb->memcg_completions);
486 percpu_ref_exit(&wb->refcnt); 489 percpu_ref_exit(&wb->refcnt);
@@ -541,7 +544,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
541 if (!wb) 544 if (!wb)
542 return -ENOMEM; 545 return -ENOMEM;
543 546
544 ret = wb_init(wb, bdi, gfp); 547 ret = wb_init(wb, bdi, blkcg_css->id, gfp);
545 if (ret) 548 if (ret)
546 goto err_free; 549 goto err_free;
547 550
@@ -553,12 +556,6 @@ static int cgwb_create(struct backing_dev_info *bdi,
553 if (ret) 556 if (ret)
554 goto err_ref_exit; 557 goto err_ref_exit;
555 558
556 wb->congested = wb_congested_get_create(bdi, blkcg_css->id, gfp);
557 if (!wb->congested) {
558 ret = -ENOMEM;
559 goto err_fprop_exit;
560 }
561
562 wb->memcg_css = memcg_css; 559 wb->memcg_css = memcg_css;
563 wb->blkcg_css = blkcg_css; 560 wb->blkcg_css = blkcg_css;
564 INIT_WORK(&wb->release_work, cgwb_release_workfn); 561 INIT_WORK(&wb->release_work, cgwb_release_workfn);
@@ -588,12 +585,10 @@ static int cgwb_create(struct backing_dev_info *bdi,
588 if (ret) { 585 if (ret) {
589 if (ret == -EEXIST) 586 if (ret == -EEXIST)
590 ret = 0; 587 ret = 0;
591 goto err_put_congested; 588 goto err_fprop_exit;
592 } 589 }
593 goto out_put; 590 goto out_put;
594 591
595err_put_congested:
596 wb_congested_put(wb->congested);
597err_fprop_exit: 592err_fprop_exit:
598 fprop_local_destroy_percpu(&wb->memcg_completions); 593 fprop_local_destroy_percpu(&wb->memcg_completions);
599err_ref_exit: 594err_ref_exit:
@@ -662,26 +657,41 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
662 return wb; 657 return wb;
663} 658}
664 659
665static void cgwb_bdi_init(struct backing_dev_info *bdi) 660static int cgwb_bdi_init(struct backing_dev_info *bdi)
666{ 661{
667 bdi->wb.memcg_css = mem_cgroup_root_css; 662 int ret;
668 bdi->wb.blkcg_css = blkcg_root_css; 663
669 bdi->wb_congested.blkcg_id = 1;
670 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); 664 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
671 bdi->cgwb_congested_tree = RB_ROOT; 665 bdi->cgwb_congested_tree = RB_ROOT;
672 atomic_set(&bdi->usage_cnt, 1); 666 atomic_set(&bdi->usage_cnt, 1);
667
668 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
669 if (!ret) {
670 bdi->wb.memcg_css = mem_cgroup_root_css;
671 bdi->wb.blkcg_css = blkcg_root_css;
672 }
673 return ret;
673} 674}
674 675
675static void cgwb_bdi_destroy(struct backing_dev_info *bdi) 676static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
676{ 677{
677 struct radix_tree_iter iter; 678 struct radix_tree_iter iter;
679 struct bdi_writeback_congested *congested, *congested_n;
678 void **slot; 680 void **slot;
679 681
680 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); 682 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
681 683
682 spin_lock_irq(&cgwb_lock); 684 spin_lock_irq(&cgwb_lock);
685
683 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) 686 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
684 cgwb_kill(*slot); 687 cgwb_kill(*slot);
688
689 rbtree_postorder_for_each_entry_safe(congested, congested_n,
690 &bdi->cgwb_congested_tree, rb_node) {
691 rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
692 congested->bdi = NULL; /* mark @congested unlinked */
693 }
694
685 spin_unlock_irq(&cgwb_lock); 695 spin_unlock_irq(&cgwb_lock);
686 696
687 /* 697 /*
@@ -732,15 +742,28 @@ void wb_blkcg_offline(struct blkcg *blkcg)
732 742
733#else /* CONFIG_CGROUP_WRITEBACK */ 743#else /* CONFIG_CGROUP_WRITEBACK */
734 744
735static void cgwb_bdi_init(struct backing_dev_info *bdi) { } 745static int cgwb_bdi_init(struct backing_dev_info *bdi)
746{
747 int err;
748
749 bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
750 if (!bdi->wb_congested)
751 return -ENOMEM;
752
753 err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
754 if (err) {
755 kfree(bdi->wb_congested);
756 return err;
757 }
758 return 0;
759}
760
736static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } 761static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
737 762
738#endif /* CONFIG_CGROUP_WRITEBACK */ 763#endif /* CONFIG_CGROUP_WRITEBACK */
739 764
740int bdi_init(struct backing_dev_info *bdi) 765int bdi_init(struct backing_dev_info *bdi)
741{ 766{
742 int err;
743
744 bdi->dev = NULL; 767 bdi->dev = NULL;
745 768
746 bdi->min_ratio = 0; 769 bdi->min_ratio = 0;
@@ -749,15 +772,7 @@ int bdi_init(struct backing_dev_info *bdi)
749 INIT_LIST_HEAD(&bdi->bdi_list); 772 INIT_LIST_HEAD(&bdi->bdi_list);
750 init_waitqueue_head(&bdi->wb_waitq); 773 init_waitqueue_head(&bdi->wb_waitq);
751 774
752 err = wb_init(&bdi->wb, bdi, GFP_KERNEL); 775 return cgwb_bdi_init(bdi);
753 if (err)
754 return err;
755
756 bdi->wb_congested.state = 0;
757 bdi->wb.congested = &bdi->wb_congested;
758
759 cgwb_bdi_init(bdi);
760 return 0;
761} 776}
762EXPORT_SYMBOL(bdi_init); 777EXPORT_SYMBOL(bdi_init);
763 778