diff options
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 72 |
1 files changed, 56 insertions, 16 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e940972ccd66..2ca9a15db0f7 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -596,8 +596,11 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |||
596 | /* insert */ | 596 | /* insert */ |
597 | spin_lock(&blkcg->lock); | 597 | spin_lock(&blkcg->lock); |
598 | swap(blkg, new_blkg); | 598 | swap(blkg, new_blkg); |
599 | |||
599 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | 600 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
600 | pol->ops.blkio_link_group_fn(q, blkg); | 601 | list_add(&blkg->q_node[plid], &q->blkg_list[plid]); |
602 | q->nr_blkgs[plid]++; | ||
603 | |||
601 | spin_unlock(&blkcg->lock); | 604 | spin_unlock(&blkcg->lock); |
602 | out: | 605 | out: |
603 | blkg_free(new_blkg); | 606 | blkg_free(new_blkg); |
@@ -646,36 +649,69 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, | |||
646 | } | 649 | } |
647 | EXPORT_SYMBOL_GPL(blkg_lookup); | 650 | EXPORT_SYMBOL_GPL(blkg_lookup); |
648 | 651 | ||
649 | void blkg_destroy_all(struct request_queue *q) | 652 | static void blkg_destroy(struct blkio_group *blkg, enum blkio_policy_id plid) |
653 | { | ||
654 | struct request_queue *q = blkg->q; | ||
655 | |||
656 | lockdep_assert_held(q->queue_lock); | ||
657 | |||
658 | /* Something wrong if we are trying to remove same group twice */ | ||
659 | WARN_ON_ONCE(list_empty(&blkg->q_node[plid])); | ||
660 | list_del_init(&blkg->q_node[plid]); | ||
661 | |||
662 | WARN_ON_ONCE(q->nr_blkgs[plid] <= 0); | ||
663 | q->nr_blkgs[plid]--; | ||
664 | |||
665 | /* | ||
666 | * Put the reference taken at the time of creation so that when all | ||
667 | * queues are gone, group can be destroyed. | ||
668 | */ | ||
669 | blkg_put(blkg); | ||
670 | } | ||
671 | |||
672 | void blkg_destroy_all(struct request_queue *q, enum blkio_policy_id plid, | ||
673 | bool destroy_root) | ||
650 | { | 674 | { |
651 | struct blkio_policy_type *pol; | 675 | struct blkio_group *blkg, *n; |
652 | 676 | ||
653 | while (true) { | 677 | while (true) { |
654 | bool done = true; | 678 | bool done = true; |
655 | 679 | ||
656 | spin_lock(&blkio_list_lock); | ||
657 | spin_lock_irq(q->queue_lock); | 680 | spin_lock_irq(q->queue_lock); |
658 | 681 | ||
659 | /* | 682 | list_for_each_entry_safe(blkg, n, &q->blkg_list[plid], |
660 | * clear_queue_fn() might return with non-empty group list | 683 | q_node[plid]) { |
661 | * if it raced cgroup removal and lost. cgroup removal is | 684 | /* skip root? */ |
662 | * guaranteed to make forward progress and retrying after a | 685 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) |
663 | * while is enough. This ugliness is scheduled to be | 686 | continue; |
664 | * removed after locking update. | 687 | |
665 | */ | 688 | /* |
666 | list_for_each_entry(pol, &blkio_list, list) | 689 | * If cgroup removal path got to blk_group first |
667 | if (!pol->ops.blkio_clear_queue_fn(q)) | 690 | * and removed it from cgroup list, then it will |
691 | * take care of destroying cfqg also. | ||
692 | */ | ||
693 | if (!blkiocg_del_blkio_group(blkg)) | ||
694 | blkg_destroy(blkg, plid); | ||
695 | else | ||
668 | done = false; | 696 | done = false; |
697 | } | ||
669 | 698 | ||
670 | spin_unlock_irq(q->queue_lock); | 699 | spin_unlock_irq(q->queue_lock); |
671 | spin_unlock(&blkio_list_lock); | ||
672 | 700 | ||
701 | /* | ||
702 | * Group list may not be empty if we raced cgroup removal | ||
703 | * and lost. cgroup removal is guaranteed to make forward | ||
704 | * progress and retrying after a while is enough. This | ||
705 | * ugliness is scheduled to be removed after locking | ||
706 | * update. | ||
707 | */ | ||
673 | if (done) | 708 | if (done) |
674 | break; | 709 | break; |
675 | 710 | ||
676 | msleep(10); /* just some random duration I like */ | 711 | msleep(10); /* just some random duration I like */ |
677 | } | 712 | } |
678 | } | 713 | } |
714 | EXPORT_SYMBOL_GPL(blkg_destroy_all); | ||
679 | 715 | ||
680 | static void blkg_rcu_free(struct rcu_head *rcu_head) | 716 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
681 | { | 717 | { |
@@ -1549,11 +1585,13 @@ static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, | |||
1549 | * this event. | 1585 | * this event. |
1550 | */ | 1586 | */ |
1551 | spin_lock(&blkio_list_lock); | 1587 | spin_lock(&blkio_list_lock); |
1588 | spin_lock_irqsave(q->queue_lock, flags); | ||
1552 | list_for_each_entry(blkiop, &blkio_list, list) { | 1589 | list_for_each_entry(blkiop, &blkio_list, list) { |
1553 | if (blkiop->plid != blkg->plid) | 1590 | if (blkiop->plid != blkg->plid) |
1554 | continue; | 1591 | continue; |
1555 | blkiop->ops.blkio_unlink_group_fn(q, blkg); | 1592 | blkg_destroy(blkg, blkiop->plid); |
1556 | } | 1593 | } |
1594 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1557 | spin_unlock(&blkio_list_lock); | 1595 | spin_unlock(&blkio_list_lock); |
1558 | } while (1); | 1596 | } while (1); |
1559 | 1597 | ||
@@ -1695,12 +1733,14 @@ static void blkcg_bypass_start(void) | |||
1695 | __acquires(&all_q_mutex) | 1733 | __acquires(&all_q_mutex) |
1696 | { | 1734 | { |
1697 | struct request_queue *q; | 1735 | struct request_queue *q; |
1736 | int i; | ||
1698 | 1737 | ||
1699 | mutex_lock(&all_q_mutex); | 1738 | mutex_lock(&all_q_mutex); |
1700 | 1739 | ||
1701 | list_for_each_entry(q, &all_q_list, all_q_node) { | 1740 | list_for_each_entry(q, &all_q_list, all_q_node) { |
1702 | blk_queue_bypass_start(q); | 1741 | blk_queue_bypass_start(q); |
1703 | blkg_destroy_all(q); | 1742 | for (i = 0; i < BLKIO_NR_POLICIES; i++) |
1743 | blkg_destroy_all(q, i, false); | ||
1704 | } | 1744 | } |
1705 | } | 1745 | } |
1706 | 1746 | ||