aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-11-04 13:52:27 -0500
committerJens Axboe <axboe@fb.com>2014-11-04 16:49:31 -0500
commitf3af020b9a8d298022b811a19719df0cf461efa5 (patch)
tree7468b9e6077738b941c89433d491662c7b8da8b4 /block
parentece9c72accdc45c3a9484dacb1125ce572647288 (diff)
blk-mq: make mq_queue_reinit_notify() freeze queues in parallel
q->mq_usage_counter is a percpu_ref which is killed and drained when the queue is frozen. On a CPU hotplug event, blk_mq_queue_reinit() which involves freezing the queue is invoked on all existing queues. Because percpu_ref killing and draining involve a RCU grace period, doing the above on one queue after another may take a long time if there are many queues on the system. This patch splits out initiation of freezing and waiting for its completion, and updates blk_mq_queue_reinit_notify() so that the queues are frozen in parallel instead of one after another. Note that freezing and unfreezing are moved from blk_mq_queue_reinit() to blk_mq_queue_reinit_notify(). Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 68929bad9a6a..1d016fc9a8b6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
107 wake_up_all(&q->mq_freeze_wq); 107 wake_up_all(&q->mq_freeze_wq);
108} 108}
109 109
110/* 110static void blk_mq_freeze_queue_start(struct request_queue *q)
111 * Guarantee no request is in use, so we can change any data structure of
112 * the queue afterward.
113 */
114void blk_mq_freeze_queue(struct request_queue *q)
115{ 111{
116 bool freeze; 112 bool freeze;
117 113
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
123 percpu_ref_kill(&q->mq_usage_counter); 119 percpu_ref_kill(&q->mq_usage_counter);
124 blk_mq_run_queues(q, false); 120 blk_mq_run_queues(q, false);
125 } 121 }
122}
123
124static void blk_mq_freeze_queue_wait(struct request_queue *q)
125{
126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127} 127}
128 128
129/*
130 * Guarantee no request is in use, so we can change any data structure of
131 * the queue afterward.
132 */
133void blk_mq_freeze_queue(struct request_queue *q)
134{
135 blk_mq_freeze_queue_start(q);
136 blk_mq_freeze_queue_wait(q);
137}
138
129static void blk_mq_unfreeze_queue(struct request_queue *q) 139static void blk_mq_unfreeze_queue(struct request_queue *q)
130{ 140{
131 bool wake; 141 bool wake;
@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
1921/* Basically redo blk_mq_init_queue with queue frozen */ 1931/* Basically redo blk_mq_init_queue with queue frozen */
1922static void blk_mq_queue_reinit(struct request_queue *q) 1932static void blk_mq_queue_reinit(struct request_queue *q)
1923{ 1933{
1924 blk_mq_freeze_queue(q); 1934 WARN_ON_ONCE(!q->mq_freeze_depth);
1925 1935
1926 blk_mq_sysfs_unregister(q); 1936 blk_mq_sysfs_unregister(q);
1927 1937
@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
1936 blk_mq_map_swqueue(q); 1946 blk_mq_map_swqueue(q);
1937 1947
1938 blk_mq_sysfs_register(q); 1948 blk_mq_sysfs_register(q);
1939
1940 blk_mq_unfreeze_queue(q);
1941} 1949}
1942 1950
1943static int blk_mq_queue_reinit_notify(struct notifier_block *nb, 1951static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1956 return NOTIFY_OK; 1964 return NOTIFY_OK;
1957 1965
1958 mutex_lock(&all_q_mutex); 1966 mutex_lock(&all_q_mutex);
1967
1968 /*
1969 * We need to freeze and reinit all existing queues. Freezing
1970 * involves synchronous wait for an RCU grace period and doing it
1971 * one by one may take a long time. Start freezing all queues in
1972 * one swoop and then wait for the completions so that freezing can
1973 * take place in parallel.
1974 */
1975 list_for_each_entry(q, &all_q_list, all_q_node)
1976 blk_mq_freeze_queue_start(q);
1977 list_for_each_entry(q, &all_q_list, all_q_node)
1978 blk_mq_freeze_queue_wait(q);
1979
1959 list_for_each_entry(q, &all_q_list, all_q_node) 1980 list_for_each_entry(q, &all_q_list, all_q_node)
1960 blk_mq_queue_reinit(q); 1981 blk_mq_queue_reinit(q);
1982
1983 list_for_each_entry(q, &all_q_list, all_q_node)
1984 blk_mq_unfreeze_queue(q);
1985
1961 mutex_unlock(&all_q_mutex); 1986 mutex_unlock(&all_q_mutex);
1962 return NOTIFY_OK; 1987 return NOTIFY_OK;
1963} 1988}