aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-15 14:22:51 -0500
committerJens Axboe <axboe@kernel.dk>2018-11-16 10:34:06 -0500
commit344e9ffcbd1898e1dc04085564a6e05c30ea8199 (patch)
treeba71320bc66d1158790acf1cdeedd21d2da9dead /block/blk-cgroup.c
parentdabcefab45d36ecb5a22f16577bb0f298876a22d (diff)
block: add queue_is_mq() helper
Various spots check for q->mq_ops being non-NULL, but provide a helper to do this instead. Where the ->mq_ops != NULL check is redundant, remove it. Since mq == rq-based now that legacy is gone, get rid of the queue_is_rq_based() and just use queue_is_mq() everywhere. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0f6b44614165..63d226a084cd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1324,7 +1324,7 @@ int blkcg_activate_policy(struct request_queue *q,
1324 if (blkcg_policy_enabled(q, pol)) 1324 if (blkcg_policy_enabled(q, pol))
1325 return 0; 1325 return 0;
1326 1326
1327 if (q->mq_ops) 1327 if (queue_is_mq(q))
1328 blk_mq_freeze_queue(q); 1328 blk_mq_freeze_queue(q);
1329pd_prealloc: 1329pd_prealloc:
1330 if (!pd_prealloc) { 1330 if (!pd_prealloc) {
@@ -1363,7 +1363,7 @@ pd_prealloc:
1363 1363
1364 spin_unlock_irq(&q->queue_lock); 1364 spin_unlock_irq(&q->queue_lock);
1365out_bypass_end: 1365out_bypass_end:
1366 if (q->mq_ops) 1366 if (queue_is_mq(q))
1367 blk_mq_unfreeze_queue(q); 1367 blk_mq_unfreeze_queue(q);
1368 if (pd_prealloc) 1368 if (pd_prealloc)
1369 pol->pd_free_fn(pd_prealloc); 1369 pol->pd_free_fn(pd_prealloc);
@@ -1387,7 +1387,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
1387 if (!blkcg_policy_enabled(q, pol)) 1387 if (!blkcg_policy_enabled(q, pol))
1388 return; 1388 return;
1389 1389
1390 if (q->mq_ops) 1390 if (queue_is_mq(q))
1391 blk_mq_freeze_queue(q); 1391 blk_mq_freeze_queue(q);
1392 1392
1393 spin_lock_irq(&q->queue_lock); 1393 spin_lock_irq(&q->queue_lock);
@@ -1405,7 +1405,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
1405 1405
1406 spin_unlock_irq(&q->queue_lock); 1406 spin_unlock_irq(&q->queue_lock);
1407 1407
1408 if (q->mq_ops) 1408 if (queue_is_mq(q))
1409 blk_mq_unfreeze_queue(q); 1409 blk_mq_unfreeze_queue(q);
1410} 1410}
1411EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1411EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);