aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:13 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commit923adde1be1df57cebd80c563058e503376645e8 (patch)
tree7009edf644abdf6b91daaab3967ffa6a8b30633a
parent5efd611351d1a847c72d74fb12ff4bd187c0cb2c (diff)
blkcg: clear all request_queues on blkcg policy [un]registrations
Keep track of all request_queues which have blkcg initialized and turn on bypass and invoke blkcg_clear_queue() on all before making changes to blkcg policies. This is to prepare for moving blkg management into blkcg core. Note that this uses more brute force than necessary. Finer grained shoot down will be implemented later and given that policy [un]registration almost never happens on running systems (blk-throtl can't be built as a module and cfq usually is the builtin default iosched), this shouldn't be a problem for the time being. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-cgroup.c48
-rw-r--r--include/linux/blkdev.h3
2 files changed, 50 insertions, 1 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b302ce1d662b..266c0707d588 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -27,6 +27,9 @@
27static DEFINE_SPINLOCK(blkio_list_lock); 27static DEFINE_SPINLOCK(blkio_list_lock);
28static LIST_HEAD(blkio_list); 28static LIST_HEAD(blkio_list);
29 29
30static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list);
32
30struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; 33struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
31EXPORT_SYMBOL_GPL(blkio_root_cgroup); 34EXPORT_SYMBOL_GPL(blkio_root_cgroup);
32 35
@@ -1472,9 +1475,20 @@ done:
1472 */ 1475 */
1473int blkcg_init_queue(struct request_queue *q) 1476int blkcg_init_queue(struct request_queue *q)
1474{ 1477{
1478 int ret;
1479
1475 might_sleep(); 1480 might_sleep();
1476 1481
1477 return blk_throtl_init(q); 1482 ret = blk_throtl_init(q);
1483 if (ret)
1484 return ret;
1485
1486 mutex_lock(&all_q_mutex);
1487 INIT_LIST_HEAD(&q->all_q_node);
1488 list_add_tail(&q->all_q_node, &all_q_list);
1489 mutex_unlock(&all_q_mutex);
1490
1491 return 0;
1478} 1492}
1479 1493
1480/** 1494/**
@@ -1498,6 +1512,10 @@ void blkcg_drain_queue(struct request_queue *q)
1498 */ 1512 */
1499void blkcg_exit_queue(struct request_queue *q) 1513void blkcg_exit_queue(struct request_queue *q)
1500{ 1514{
1515 mutex_lock(&all_q_mutex);
1516 list_del_init(&q->all_q_node);
1517 mutex_unlock(&all_q_mutex);
1518
1501 blk_throtl_exit(q); 1519 blk_throtl_exit(q);
1502} 1520}
1503 1521
@@ -1543,8 +1561,33 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1543 } 1561 }
1544} 1562}
1545 1563
1564static void blkcg_bypass_start(void)
1565 __acquires(&all_q_mutex)
1566{
1567 struct request_queue *q;
1568
1569 mutex_lock(&all_q_mutex);
1570
1571 list_for_each_entry(q, &all_q_list, all_q_node) {
1572 blk_queue_bypass_start(q);
1573 blkg_destroy_all(q);
1574 }
1575}
1576
1577static void blkcg_bypass_end(void)
1578 __releases(&all_q_mutex)
1579{
1580 struct request_queue *q;
1581
1582 list_for_each_entry(q, &all_q_list, all_q_node)
1583 blk_queue_bypass_end(q);
1584
1585 mutex_unlock(&all_q_mutex);
1586}
1587
1546void blkio_policy_register(struct blkio_policy_type *blkiop) 1588void blkio_policy_register(struct blkio_policy_type *blkiop)
1547{ 1589{
1590 blkcg_bypass_start();
1548 spin_lock(&blkio_list_lock); 1591 spin_lock(&blkio_list_lock);
1549 1592
1550 BUG_ON(blkio_policy[blkiop->plid]); 1593 BUG_ON(blkio_policy[blkiop->plid]);
@@ -1552,11 +1595,13 @@ void blkio_policy_register(struct blkio_policy_type *blkiop)
1552 list_add_tail(&blkiop->list, &blkio_list); 1595 list_add_tail(&blkiop->list, &blkio_list);
1553 1596
1554 spin_unlock(&blkio_list_lock); 1597 spin_unlock(&blkio_list_lock);
1598 blkcg_bypass_end();
1555} 1599}
1556EXPORT_SYMBOL_GPL(blkio_policy_register); 1600EXPORT_SYMBOL_GPL(blkio_policy_register);
1557 1601
1558void blkio_policy_unregister(struct blkio_policy_type *blkiop) 1602void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1559{ 1603{
1604 blkcg_bypass_start();
1560 spin_lock(&blkio_list_lock); 1605 spin_lock(&blkio_list_lock);
1561 1606
1562 BUG_ON(blkio_policy[blkiop->plid] != blkiop); 1607 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
@@ -1564,5 +1609,6 @@ void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1564 list_del_init(&blkiop->list); 1609 list_del_init(&blkiop->list);
1565 1610
1566 spin_unlock(&blkio_list_lock); 1611 spin_unlock(&blkio_list_lock);
1612 blkcg_bypass_end();
1567} 1613}
1568EXPORT_SYMBOL_GPL(blkio_policy_unregister); 1614EXPORT_SYMBOL_GPL(blkio_policy_unregister);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 315db1d91bc4..e8c0bbd06b9a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -397,6 +397,9 @@ struct request_queue {
397 struct bsg_class_device bsg_dev; 397 struct bsg_class_device bsg_dev;
398#endif 398#endif
399 399
400#ifdef CONFIG_BLK_CGROUP
401 struct list_head all_q_node;
402#endif
400#ifdef CONFIG_BLK_DEV_THROTTLING 403#ifdef CONFIG_BLK_DEV_THROTTLING
401 /* Throttle data */ 404 /* Throttle data */
402 struct throtl_data *td; 405 struct throtl_data *td;