aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2018-09-05 18:21:22 -0400
committerJason Gunthorpe <jgg@mellanox.com>2018-09-05 18:21:22 -0400
commit2c910cb75e1fe6de52d95c8e32caedd1629a33a5 (patch)
tree94a0eea6f8cde689d11e7583ddd0a930b8785ab4 /drivers/mmc/core/queue.c
parent627212c9d49ba2759b699450f5d8f45f73e062fa (diff)
parentb53b1c08a23eb1091982daacb2122f90a7094a77 (diff)
Merge branch 'uverbs_dev_cleanups' into rdma.git for-next
For dependencies, branch based on rdma.git 'for-rc' of https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git/ Pull 'uverbs_dev_cleanups' from Leon Romanovsky: ==================== Reuse the char device code interfaces to simplify ib_uverbs_device creation and destruction. As part of this series, we are sending fix to cleanup path, which was discovered during internal review, The fix definitely can go to -rc, but it means that this series will be dependent on rdma-rc. ==================== * branch 'uverbs_dev_cleanups': RDMA/uverbs: Use device.groups to initialize device attributes RDMA/uverbs: Use cdev_device_add() instead of cdev_add() RDMA/core: Depend on device_add() to add device attributes RDMA/uverbs: Fix error cleanup path of ib_uverbs_add_one() Resolved conflict in ib_device_unregister_sysfs() Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 648eb6743ed5..6edffeed9953 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
238 mmc_exit_request(mq->queue, req); 238 mmc_exit_request(mq->queue, req);
239} 239}
240 240
241/*
242 * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
243 * will not be dispatched in parallel.
244 */
245static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 241static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
246 const struct blk_mq_queue_data *bd) 242 const struct blk_mq_queue_data *bd)
247{ 243{
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
264 260
265 spin_lock_irq(q->queue_lock); 261 spin_lock_irq(q->queue_lock);
266 262
267 if (mq->recovery_needed) { 263 if (mq->recovery_needed || mq->busy) {
268 spin_unlock_irq(q->queue_lock); 264 spin_unlock_irq(q->queue_lock);
269 return BLK_STS_RESOURCE; 265 return BLK_STS_RESOURCE;
270 } 266 }
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
291 break; 287 break;
292 } 288 }
293 289
290 /* Parallel dispatch of requests is not supported at the moment */
291 mq->busy = true;
292
294 mq->in_flight[issue_type] += 1; 293 mq->in_flight[issue_type] += 1;
295 get_card = (mmc_tot_in_flight(mq) == 1); 294 get_card = (mmc_tot_in_flight(mq) == 1);
296 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); 295 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
333 mq->in_flight[issue_type] -= 1; 332 mq->in_flight[issue_type] -= 1;
334 if (mmc_tot_in_flight(mq) == 0) 333 if (mmc_tot_in_flight(mq) == 0)
335 put_card = true; 334 put_card = true;
335 mq->busy = false;
336 spin_unlock_irq(q->queue_lock); 336 spin_unlock_irq(q->queue_lock);
337 if (put_card) 337 if (put_card)
338 mmc_put_card(card, &mq->ctx); 338 mmc_put_card(card, &mq->ctx);
339 } else {
340 WRITE_ONCE(mq->busy, false);
339 } 341 }
340 342
341 return ret; 343 return ret;