aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/scsi-parameters.txt5
-rw-r--r--drivers/scsi/Kconfig12
-rw-r--r--drivers/scsi/cxlflash/main.c6
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_debug.c3
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c603
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--include/scsi/scsi_host.h18
-rw-r--r--include/scsi/scsi_tcq.h14
17 files changed, 77 insertions, 680 deletions
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 92999d4e0cb8..25a4b4cf04a6 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -97,11 +97,6 @@ parameters may be changed at runtime by the command
97 allowing boot to proceed. none ignores them, expecting 97 allowing boot to proceed. none ignores them, expecting
98 user space to do the scan. 98 user space to do the scan.
99 99
100 scsi_mod.use_blk_mq=
101 [SCSI] use blk-mq I/O path by default
102 See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
103 Format: <y/n>
104
105 sim710= [SCSI,HW] 100 sim710= [SCSI,HW]
106 See header of drivers/scsi/sim710.c. 101 See header of drivers/scsi/sim710.c.
107 102
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f07444d30b21..dfdc6940de2f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -50,18 +50,6 @@ config SCSI_NETLINK
50 default n 50 default n
51 depends on NET 51 depends on NET
52 52
53config SCSI_MQ_DEFAULT
54 bool "SCSI: use blk-mq I/O path by default"
55 default y
56 depends on SCSI
57 ---help---
58 This option enables the blk-mq based I/O path for SCSI devices by
59 default. With this option the scsi_mod.use_blk_mq module/boot
60 option defaults to Y, without it to N, but it can still be
61 overridden either way.
62
63 If unsure say Y.
64
65config SCSI_PROC_FS 53config SCSI_PROC_FS
66 bool "legacy /proc/scsi/ support" 54 bool "legacy /proc/scsi/ support"
67 depends on SCSI && PROC_FS 55 depends on SCSI && PROC_FS
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6637116529aa..abdc9eac4173 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3088,12 +3088,6 @@ static ssize_t hwq_mode_store(struct device *dev,
3088 return -EINVAL; 3088 return -EINVAL;
3089 } 3089 }
3090 3090
3091 if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
3092 dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
3093 "HWQ steering mode.\n");
3094 return -EINVAL;
3095 }
3096
3097 afu->hwq_mode = mode; 3091 afu->hwq_mode = mode;
3098 3092
3099 return count; 3093 return count;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ea4b0bb0c1cd..cc71136ba300 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -222,18 +222,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
222 if (error) 222 if (error)
223 goto fail; 223 goto fail;
224 224
225 if (shost_use_blk_mq(shost)) { 225 error = scsi_mq_setup_tags(shost);
226 error = scsi_mq_setup_tags(shost); 226 if (error)
227 if (error) 227 goto fail;
228 goto fail;
229 } else {
230 shost->bqt = blk_init_tags(shost->can_queue,
231 shost->hostt->tag_alloc_policy);
232 if (!shost->bqt) {
233 error = -ENOMEM;
234 goto fail;
235 }
236 }
237 228
238 if (!shost->shost_gendev.parent) 229 if (!shost->shost_gendev.parent)
239 shost->shost_gendev.parent = dev ? dev : &platform_bus; 230 shost->shost_gendev.parent = dev ? dev : &platform_bus;
@@ -309,8 +300,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
309 pm_runtime_disable(&shost->shost_gendev); 300 pm_runtime_disable(&shost->shost_gendev);
310 pm_runtime_set_suspended(&shost->shost_gendev); 301 pm_runtime_set_suspended(&shost->shost_gendev);
311 pm_runtime_put_noidle(&shost->shost_gendev); 302 pm_runtime_put_noidle(&shost->shost_gendev);
312 if (shost_use_blk_mq(shost)) 303 scsi_mq_destroy_tags(shost);
313 scsi_mq_destroy_tags(shost);
314 fail: 304 fail:
315 return error; 305 return error;
316} 306}
@@ -344,13 +334,8 @@ static void scsi_host_dev_release(struct device *dev)
344 kfree(dev_name(&shost->shost_dev)); 334 kfree(dev_name(&shost->shost_dev));
345 } 335 }
346 336
347 if (shost_use_blk_mq(shost)) { 337 if (shost->tag_set.tags)
348 if (shost->tag_set.tags) 338 scsi_mq_destroy_tags(shost);
349 scsi_mq_destroy_tags(shost);
350 } else {
351 if (shost->bqt)
352 blk_free_tags(shost->bqt);
353 }
354 339
355 kfree(shost->shost_data); 340 kfree(shost->shost_data);
356 341
@@ -472,8 +457,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
472 else 457 else
473 shost->dma_boundary = 0xffffffff; 458 shost->dma_boundary = 0xffffffff;
474 459
475 shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
476
477 device_initialize(&shost->shost_gendev); 460 device_initialize(&shost->shost_gendev);
478 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); 461 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
479 shost->shost_gendev.bus = &scsi_bus_type; 462 shost->shost_gendev.bus = &scsi_bus_type;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4fa6703a9ec9..baed2b891efb 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3914,7 +3914,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
3914 uint32_t tag; 3914 uint32_t tag;
3915 uint16_t hwq; 3915 uint16_t hwq;
3916 3916
3917 if (cmnd && shost_use_blk_mq(cmnd->device->host)) { 3917 if (cmnd) {
3918 tag = blk_mq_unique_tag(cmnd->request); 3918 tag = blk_mq_unique_tag(cmnd->request);
3919 hwq = blk_mq_unique_tag_to_hwq(tag); 3919 hwq = blk_mq_unique_tag_to_hwq(tag);
3920 3920
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 105b0e4d7818..311eb22068e1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -644,8 +644,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
644 qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA; 644 qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
645 qedi->max_sqes = QEDI_SQ_SIZE; 645 qedi->max_sqes = QEDI_SQ_SIZE;
646 646
647 if (shost_use_blk_mq(shost)) 647 shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
648 shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
649 648
650 pci_set_drvdata(pdev, qedi); 649 pci_set_drvdata(pdev, qedi);
651 650
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 518f15141170..4ea9f2b4e04f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -857,13 +857,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
857 } 857 }
858 858
859 if (ha->mqenable) { 859 if (ha->mqenable) {
860 if (shost_use_blk_mq(vha->host)) { 860 tag = blk_mq_unique_tag(cmd->request);
861 tag = blk_mq_unique_tag(cmd->request); 861 hwq = blk_mq_unique_tag_to_hwq(tag);
862 hwq = blk_mq_unique_tag_to_hwq(tag); 862 qpair = ha->queue_pair_map[hwq];
863 qpair = ha->queue_pair_map[hwq];
864 } else if (vha->vp_idx && vha->qpair) {
865 qpair = vha->qpair;
866 }
867 863
868 if (qpair) 864 if (qpair)
869 return qla2xxx_mqueuecommand(host, cmd, qpair); 865 return qla2xxx_mqueuecommand(host, cmd, qpair);
@@ -3153,7 +3149,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3153 goto probe_failed; 3149 goto probe_failed;
3154 } 3150 }
3155 3151
3156 if (ha->mqenable && shost_use_blk_mq(host)) { 3152 if (ha->mqenable) {
3157 /* number of hardware queues supported by blk/scsi-mq*/ 3153 /* number of hardware queues supported by blk/scsi-mq*/
3158 host->nr_hw_queues = ha->max_qpairs; 3154 host->nr_hw_queues = ha->max_qpairs;
3159 3155
@@ -3265,25 +3261,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3265 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3261 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3266 3262
3267 if (ha->mqenable) { 3263 if (ha->mqenable) {
3268 bool mq = false;
3269 bool startit = false; 3264 bool startit = false;
3270 3265
3271 if (QLA_TGT_MODE_ENABLED()) { 3266 if (QLA_TGT_MODE_ENABLED())
3272 mq = true;
3273 startit = false; 3267 startit = false;
3274 }
3275 3268
3276 if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) && 3269 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
3277 shost_use_blk_mq(host)) {
3278 mq = true;
3279 startit = true; 3270 startit = true;
3280 }
3281 3271
3282 if (mq) { 3272 /* Create start of day qpairs for Block MQ */
3283 /* Create start of day qpairs for Block MQ */ 3273 for (i = 0; i < ha->max_qpairs; i++)
3284 for (i = 0; i < ha->max_qpairs; i++) 3274 qla2xxx_create_qpair(base_vha, 5, 0, startit);
3285 qla2xxx_create_qpair(base_vha, 5, 0, startit);
3286 }
3287 } 3275 }
3288 3276
3289 if (ha->flags.running_gold_fw) 3277 if (ha->flags.running_gold_fw)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fc1356d101b0..7675ff0ca2ea 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -780,11 +780,8 @@ MODULE_LICENSE("GPL");
780module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 780module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
781MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 781MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
782 782
783#ifdef CONFIG_SCSI_MQ_DEFAULT 783/* This should go away in the future, it doesn't do anything anymore */
784bool scsi_use_blk_mq = true; 784bool scsi_use_blk_mq = true;
785#else
786bool scsi_use_blk_mq = false;
787#endif
788module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 785module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
789 786
790static int __init init_scsi(void) 787static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 60bcc6df97a9..4740f1e9dd17 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5881,8 +5881,7 @@ static int sdebug_driver_probe(struct device *dev)
5881 } 5881 }
5882 /* Decide whether to tell scsi subsystem that we want mq */ 5882 /* Decide whether to tell scsi subsystem that we want mq */
5883 /* Following should give the same answer for each host */ 5883 /* Following should give the same answer for each host */
5884 if (shost_use_blk_mq(hpnt)) 5884 hpnt->nr_hw_queues = submit_queues;
5885 hpnt->nr_hw_queues = submit_queues;
5886 5885
5887 sdbg_host->shost = hpnt; 5886 sdbg_host->shost = hpnt;
5888 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; 5887 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c736d61b1648..fff128aa9ec2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -308,7 +308,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
308 * error handler. In that case we can return immediately as no 308 * error handler. In that case we can return immediately as no
309 * further action is required. 309 * further action is required.
310 */ 310 */
311 if (req->q->mq_ops && !blk_mq_mark_complete(req)) 311 if (!blk_mq_mark_complete(req))
312 return rtn; 312 return rtn;
313 if (scsi_abort_command(scmd) != SUCCESS) { 313 if (scsi_abort_command(scmd) != SUCCESS) {
314 set_host_byte(scmd, DID_TIME_OUT); 314 set_host_byte(scmd, DID_TIME_OUT);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8b0345924a92..651be30ba96a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -168,8 +168,6 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
168static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) 168static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
169{ 169{
170 struct scsi_device *device = cmd->device; 170 struct scsi_device *device = cmd->device;
171 struct request_queue *q = device->request_queue;
172 unsigned long flags;
173 171
174 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 172 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
175 "Inserting command %p into mlqueue\n", cmd)); 173 "Inserting command %p into mlqueue\n", cmd));
@@ -190,26 +188,20 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
190 * before blk_cleanup_queue() finishes. 188 * before blk_cleanup_queue() finishes.
191 */ 189 */
192 cmd->result = 0; 190 cmd->result = 0;
193 if (q->mq_ops) { 191
194 /* 192 /*
195 * Before a SCSI command is dispatched, 193 * Before a SCSI command is dispatched,
196 * get_device(&sdev->sdev_gendev) is called and the host, 194 * get_device(&sdev->sdev_gendev) is called and the host,
197 * target and device busy counters are increased. Since 195 * target and device busy counters are increased. Since
198 * requeuing a request causes these actions to be repeated and 196 * requeuing a request causes these actions to be repeated and
199 * since scsi_device_unbusy() has already been called, 197 * since scsi_device_unbusy() has already been called,
200 * put_device(&device->sdev_gendev) must still be called. Call 198 * put_device(&device->sdev_gendev) must still be called. Call
201 * put_device() after blk_mq_requeue_request() to avoid that 199 * put_device() after blk_mq_requeue_request() to avoid that
202 * removal of the SCSI device can start before requeueing has 200 * removal of the SCSI device can start before requeueing has
203 * happened. 201 * happened.
204 */ 202 */
205 blk_mq_requeue_request(cmd->request, true); 203 blk_mq_requeue_request(cmd->request, true);
206 put_device(&device->sdev_gendev); 204 put_device(&device->sdev_gendev);
207 return;
208 }
209 spin_lock_irqsave(q->queue_lock, flags);
210 blk_requeue_request(q, cmd->request);
211 kblockd_schedule_work(&device->requeue_work);
212 spin_unlock_irqrestore(q->queue_lock, flags);
213} 205}
214 206
215/* 207/*
@@ -370,10 +362,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
370 362
371static void scsi_kick_queue(struct request_queue *q) 363static void scsi_kick_queue(struct request_queue *q)
372{ 364{
373 if (q->mq_ops) 365 blk_mq_run_hw_queues(q, false);
374 blk_mq_run_hw_queues(q, false);
375 else
376 blk_run_queue(q);
377} 366}
378 367
379/* 368/*
@@ -534,10 +523,7 @@ static void scsi_run_queue(struct request_queue *q)
534 if (!list_empty(&sdev->host->starved_list)) 523 if (!list_empty(&sdev->host->starved_list))
535 scsi_starved_list_run(sdev->host); 524 scsi_starved_list_run(sdev->host);
536 525
537 if (q->mq_ops) 526 blk_mq_run_hw_queues(q, false);
538 blk_mq_run_hw_queues(q, false);
539 else
540 blk_run_queue(q);
541} 527}
542 528
543void scsi_requeue_run_queue(struct work_struct *work) 529void scsi_requeue_run_queue(struct work_struct *work)
@@ -550,42 +536,6 @@ void scsi_requeue_run_queue(struct work_struct *work)
550 scsi_run_queue(q); 536 scsi_run_queue(q);
551} 537}
552 538
553/*
554 * Function: scsi_requeue_command()
555 *
556 * Purpose: Handle post-processing of completed commands.
557 *
558 * Arguments: q - queue to operate on
559 * cmd - command that may need to be requeued.
560 *
561 * Returns: Nothing
562 *
563 * Notes: After command completion, there may be blocks left
564 * over which weren't finished by the previous command
565 * this can be for a number of reasons - the main one is
566 * I/O errors in the middle of the request, in which case
567 * we need to request the blocks that come after the bad
568 * sector.
569 * Notes: Upon return, cmd is a stale pointer.
570 */
571static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
572{
573 struct scsi_device *sdev = cmd->device;
574 struct request *req = cmd->request;
575 unsigned long flags;
576
577 spin_lock_irqsave(q->queue_lock, flags);
578 blk_unprep_request(req);
579 req->special = NULL;
580 scsi_put_command(cmd);
581 blk_requeue_request(q, req);
582 spin_unlock_irqrestore(q->queue_lock, flags);
583
584 scsi_run_queue(q);
585
586 put_device(&sdev->sdev_gendev);
587}
588
589void scsi_run_host_queues(struct Scsi_Host *shost) 539void scsi_run_host_queues(struct Scsi_Host *shost)
590{ 540{
591 struct scsi_device *sdev; 541 struct scsi_device *sdev;
@@ -626,42 +576,6 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
626 scsi_del_cmd_from_list(cmd); 576 scsi_del_cmd_from_list(cmd);
627} 577}
628 578
629/*
630 * Function: scsi_release_buffers()
631 *
632 * Purpose: Free resources allocate for a scsi_command.
633 *
634 * Arguments: cmd - command that we are bailing.
635 *
636 * Lock status: Assumed that no lock is held upon entry.
637 *
638 * Returns: Nothing
639 *
640 * Notes: In the event that an upper level driver rejects a
641 * command, we must release resources allocated during
642 * the __init_io() function. Primarily this would involve
643 * the scatter-gather table.
644 */
645static void scsi_release_buffers(struct scsi_cmnd *cmd)
646{
647 if (cmd->sdb.table.nents)
648 sg_free_table_chained(&cmd->sdb.table, false);
649
650 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
651
652 if (scsi_prot_sg_count(cmd))
653 sg_free_table_chained(&cmd->prot_sdb->table, false);
654}
655
656static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
657{
658 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
659
660 sg_free_table_chained(&bidi_sdb->table, false);
661 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
662 cmd->request->next_rq->special = NULL;
663}
664
665/* Returns false when no more bytes to process, true if there are more */ 579/* Returns false when no more bytes to process, true if there are more */
666static bool scsi_end_request(struct request *req, blk_status_t error, 580static bool scsi_end_request(struct request *req, blk_status_t error,
667 unsigned int bytes, unsigned int bidi_bytes) 581 unsigned int bytes, unsigned int bidi_bytes)
@@ -687,37 +601,22 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
687 destroy_rcu_head(&cmd->rcu); 601 destroy_rcu_head(&cmd->rcu);
688 } 602 }
689 603
690 if (req->mq_ctx) { 604 /*
691 /* 605 * In the MQ case the command gets freed by __blk_mq_end_request,
692 * In the MQ case the command gets freed by __blk_mq_end_request, 606 * so we have to do all cleanup that depends on it earlier.
693 * so we have to do all cleanup that depends on it earlier. 607 *
694 * 608 * We also can't kick the queues from irq context, so we
695 * We also can't kick the queues from irq context, so we 609 * will have to defer it to a workqueue.
696 * will have to defer it to a workqueue. 610 */
697 */ 611 scsi_mq_uninit_cmd(cmd);
698 scsi_mq_uninit_cmd(cmd);
699
700 __blk_mq_end_request(req, error);
701
702 if (scsi_target(sdev)->single_lun ||
703 !list_empty(&sdev->host->starved_list))
704 kblockd_schedule_work(&sdev->requeue_work);
705 else
706 blk_mq_run_hw_queues(q, true);
707 } else {
708 unsigned long flags;
709
710 if (bidi_bytes)
711 scsi_release_bidi_buffers(cmd);
712 scsi_release_buffers(cmd);
713 scsi_put_command(cmd);
714 612
715 spin_lock_irqsave(q->queue_lock, flags); 613 __blk_mq_end_request(req, error);
716 blk_finish_request(req, error);
717 spin_unlock_irqrestore(q->queue_lock, flags);
718 614
719 scsi_run_queue(q); 615 if (scsi_target(sdev)->single_lun ||
720 } 616 !list_empty(&sdev->host->starved_list))
617 kblockd_schedule_work(&sdev->requeue_work);
618 else
619 blk_mq_run_hw_queues(q, true);
721 620
722 put_device(&sdev->sdev_gendev); 621 put_device(&sdev->sdev_gendev);
723 return false; 622 return false;
@@ -766,13 +665,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
766 struct request_queue *q) 665 struct request_queue *q)
767{ 666{
768 /* A new command will be prepared and issued. */ 667 /* A new command will be prepared and issued. */
769 if (q->mq_ops) { 668 scsi_mq_requeue_cmd(cmd);
770 scsi_mq_requeue_cmd(cmd);
771 } else {
772 /* Unprep request and put it back at head of the queue. */
773 scsi_release_buffers(cmd);
774 scsi_requeue_command(q, cmd);
775 }
776} 669}
777 670
778/* Helper for scsi_io_completion() when special action required. */ 671/* Helper for scsi_io_completion() when special action required. */
@@ -1147,9 +1040,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1147 */ 1040 */
1148int scsi_init_io(struct scsi_cmnd *cmd) 1041int scsi_init_io(struct scsi_cmnd *cmd)
1149{ 1042{
1150 struct scsi_device *sdev = cmd->device;
1151 struct request *rq = cmd->request; 1043 struct request *rq = cmd->request;
1152 bool is_mq = (rq->mq_ctx != NULL);
1153 int error = BLKPREP_KILL; 1044 int error = BLKPREP_KILL;
1154 1045
1155 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) 1046 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
@@ -1160,17 +1051,6 @@ int scsi_init_io(struct scsi_cmnd *cmd)
1160 goto err_exit; 1051 goto err_exit;
1161 1052
1162 if (blk_bidi_rq(rq)) { 1053 if (blk_bidi_rq(rq)) {
1163 if (!rq->q->mq_ops) {
1164 struct scsi_data_buffer *bidi_sdb =
1165 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1166 if (!bidi_sdb) {
1167 error = BLKPREP_DEFER;
1168 goto err_exit;
1169 }
1170
1171 rq->next_rq->special = bidi_sdb;
1172 }
1173
1174 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1054 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1175 if (error) 1055 if (error)
1176 goto err_exit; 1056 goto err_exit;
@@ -1210,14 +1090,7 @@ int scsi_init_io(struct scsi_cmnd *cmd)
1210 1090
1211 return BLKPREP_OK; 1091 return BLKPREP_OK;
1212err_exit: 1092err_exit:
1213 if (is_mq) { 1093 scsi_mq_free_sgtables(cmd);
1214 scsi_mq_free_sgtables(cmd);
1215 } else {
1216 scsi_release_buffers(cmd);
1217 cmd->request->special = NULL;
1218 scsi_put_command(cmd);
1219 put_device(&sdev->sdev_gendev);
1220 }
1221 return error; 1094 return error;
1222} 1095}
1223EXPORT_SYMBOL(scsi_init_io); 1096EXPORT_SYMBOL(scsi_init_io);
@@ -1423,75 +1296,6 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1423 return ret; 1296 return ret;
1424} 1297}
1425 1298
1426static int
1427scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1428{
1429 struct scsi_device *sdev = q->queuedata;
1430
1431 switch (ret) {
1432 case BLKPREP_KILL:
1433 case BLKPREP_INVALID:
1434 scsi_req(req)->result = DID_NO_CONNECT << 16;
1435 /* release the command and kill it */
1436 if (req->special) {
1437 struct scsi_cmnd *cmd = req->special;
1438 scsi_release_buffers(cmd);
1439 scsi_put_command(cmd);
1440 put_device(&sdev->sdev_gendev);
1441 req->special = NULL;
1442 }
1443 break;
1444 case BLKPREP_DEFER:
1445 /*
1446 * If we defer, the blk_peek_request() returns NULL, but the
1447 * queue must be restarted, so we schedule a callback to happen
1448 * shortly.
1449 */
1450 if (atomic_read(&sdev->device_busy) == 0)
1451 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1452 break;
1453 default:
1454 req->rq_flags |= RQF_DONTPREP;
1455 }
1456
1457 return ret;
1458}
1459
1460static int scsi_prep_fn(struct request_queue *q, struct request *req)
1461{
1462 struct scsi_device *sdev = q->queuedata;
1463 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1464 int ret;
1465
1466 ret = scsi_prep_state_check(sdev, req);
1467 if (ret != BLKPREP_OK)
1468 goto out;
1469
1470 if (!req->special) {
1471 /* Bail if we can't get a reference to the device */
1472 if (unlikely(!get_device(&sdev->sdev_gendev))) {
1473 ret = BLKPREP_DEFER;
1474 goto out;
1475 }
1476
1477 scsi_init_command(sdev, cmd);
1478 req->special = cmd;
1479 }
1480
1481 cmd->tag = req->tag;
1482 cmd->request = req;
1483 cmd->prot_op = SCSI_PROT_NORMAL;
1484
1485 ret = scsi_setup_cmnd(sdev, req);
1486out:
1487 return scsi_prep_return(q, req, ret);
1488}
1489
1490static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1491{
1492 scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
1493}
1494
1495/* 1299/*
1496 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1300 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1497 * return 0. 1301 * return 0.
@@ -1511,14 +1315,8 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1511 /* 1315 /*
1512 * unblock after device_blocked iterates to zero 1316 * unblock after device_blocked iterates to zero
1513 */ 1317 */
1514 if (atomic_dec_return(&sdev->device_blocked) > 0) { 1318 if (atomic_dec_return(&sdev->device_blocked) > 0)
1515 /*
1516 * For the MQ case we take care of this in the caller.
1517 */
1518 if (!q->mq_ops)
1519 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1520 goto out_dec; 1319 goto out_dec;
1521 }
1522 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1320 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1523 "unblocking device at zero depth\n")); 1321 "unblocking device at zero depth\n"));
1524 } 1322 }
@@ -1653,13 +1451,13 @@ out_dec:
1653 * needs to return 'not busy'. Otherwise, request stacking drivers 1451 * needs to return 'not busy'. Otherwise, request stacking drivers
1654 * may hold requests forever. 1452 * may hold requests forever.
1655 */ 1453 */
1656static int scsi_lld_busy(struct request_queue *q) 1454static bool scsi_mq_lld_busy(struct request_queue *q)
1657{ 1455{
1658 struct scsi_device *sdev = q->queuedata; 1456 struct scsi_device *sdev = q->queuedata;
1659 struct Scsi_Host *shost; 1457 struct Scsi_Host *shost;
1660 1458
1661 if (blk_queue_dying(q)) 1459 if (blk_queue_dying(q))
1662 return 0; 1460 return false;
1663 1461
1664 shost = sdev->host; 1462 shost = sdev->host;
1665 1463
@@ -1670,48 +1468,9 @@ static int scsi_lld_busy(struct request_queue *q)
1670 * in SCSI layer. 1468 * in SCSI layer.
1671 */ 1469 */
1672 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1470 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1673 return 1; 1471 return true;
1674
1675 return 0;
1676}
1677
1678static bool scsi_mq_lld_busy(struct request_queue *q)
1679{
1680 return scsi_lld_busy(q);
1681}
1682
1683/*
1684 * Kill a request for a dead device
1685 */
1686static void scsi_kill_request(struct request *req, struct request_queue *q)
1687{
1688 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1689 struct scsi_device *sdev;
1690 struct scsi_target *starget;
1691 struct Scsi_Host *shost;
1692
1693 blk_start_request(req);
1694
1695 scmd_printk(KERN_INFO, cmd, "killing request\n");
1696
1697 sdev = cmd->device;
1698 starget = scsi_target(sdev);
1699 shost = sdev->host;
1700 scsi_init_cmd_errh(cmd);
1701 cmd->result = DID_NO_CONNECT << 16;
1702 atomic_inc(&cmd->device->iorequest_cnt);
1703
1704 /*
1705 * SCSI request completion path will do scsi_device_unbusy(),
1706 * bump busy counts. To bump the counters, we need to dance
1707 * with the locks as normal issue path does.
1708 */
1709 atomic_inc(&sdev->device_busy);
1710 atomic_inc(&shost->host_busy);
1711 if (starget->can_queue > 0)
1712 atomic_inc(&starget->target_busy);
1713 1472
1714 blk_complete_request(req); 1473 return false;
1715} 1474}
1716 1475
1717static void scsi_softirq_done(struct request *rq) 1476static void scsi_softirq_done(struct request *rq)
@@ -1834,158 +1593,6 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1834 return 0; 1593 return 0;
1835} 1594}
1836 1595
1837/**
1838 * scsi_done - Invoke completion on finished SCSI command.
1839 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
1840 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
1841 *
1842 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
1843 * which regains ownership of the SCSI command (de facto) from a LLDD, and
1844 * calls blk_complete_request() for further processing.
1845 *
1846 * This function is interrupt context safe.
1847 */
1848static void scsi_done(struct scsi_cmnd *cmd)
1849{
1850 trace_scsi_dispatch_cmd_done(cmd);
1851 blk_complete_request(cmd->request);
1852}
1853
1854/*
1855 * Function: scsi_request_fn()
1856 *
1857 * Purpose: Main strategy routine for SCSI.
1858 *
1859 * Arguments: q - Pointer to actual queue.
1860 *
1861 * Returns: Nothing
1862 *
1863 * Lock status: request queue lock assumed to be held when called.
1864 *
1865 * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
1866 * protection for ZBC disks.
1867 */
1868static void scsi_request_fn(struct request_queue *q)
1869 __releases(q->queue_lock)
1870 __acquires(q->queue_lock)
1871{
1872 struct scsi_device *sdev = q->queuedata;
1873 struct Scsi_Host *shost;
1874 struct scsi_cmnd *cmd;
1875 struct request *req;
1876
1877 /*
1878 * To start with, we keep looping until the queue is empty, or until
1879 * the host is no longer able to accept any more requests.
1880 */
1881 shost = sdev->host;
1882 for (;;) {
1883 int rtn;
1884 /*
1885 * get next queueable request. We do this early to make sure
1886 * that the request is fully prepared even if we cannot
1887 * accept it.
1888 */
1889 req = blk_peek_request(q);
1890 if (!req)
1891 break;
1892
1893 if (unlikely(!scsi_device_online(sdev))) {
1894 sdev_printk(KERN_ERR, sdev,
1895 "rejecting I/O to offline device\n");
1896 scsi_kill_request(req, q);
1897 continue;
1898 }
1899
1900 if (!scsi_dev_queue_ready(q, sdev))
1901 break;
1902
1903 /*
1904 * Remove the request from the request list.
1905 */
1906 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1907 blk_start_request(req);
1908
1909 spin_unlock_irq(q->queue_lock);
1910 cmd = blk_mq_rq_to_pdu(req);
1911 if (cmd != req->special) {
1912 printk(KERN_CRIT "impossible request in %s.\n"
1913 "please mail a stack trace to "
1914 "linux-scsi@vger.kernel.org\n",
1915 __func__);
1916 blk_dump_rq_flags(req, "foo");
1917 BUG();
1918 }
1919
1920 /*
1921 * We hit this when the driver is using a host wide
1922 * tag map. For device level tag maps the queue_depth check
1923 * in the device ready fn would prevent us from trying
1924 * to allocate a tag. Since the map is a shared host resource
1925 * we add the dev to the starved list so it eventually gets
1926 * a run when a tag is freed.
1927 */
1928 if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
1929 spin_lock_irq(shost->host_lock);
1930 if (list_empty(&sdev->starved_entry))
1931 list_add_tail(&sdev->starved_entry,
1932 &shost->starved_list);
1933 spin_unlock_irq(shost->host_lock);
1934 goto not_ready;
1935 }
1936
1937 if (!scsi_target_queue_ready(shost, sdev))
1938 goto not_ready;
1939
1940 if (!scsi_host_queue_ready(q, shost, sdev))
1941 goto host_not_ready;
1942
1943 if (sdev->simple_tags)
1944 cmd->flags |= SCMD_TAGGED;
1945 else
1946 cmd->flags &= ~SCMD_TAGGED;
1947
1948 /*
1949 * Finally, initialize any error handling parameters, and set up
1950 * the timers for timeouts.
1951 */
1952 scsi_init_cmd_errh(cmd);
1953
1954 /*
1955 * Dispatch the command to the low-level driver.
1956 */
1957 cmd->scsi_done = scsi_done;
1958 rtn = scsi_dispatch_cmd(cmd);
1959 if (rtn) {
1960 scsi_queue_insert(cmd, rtn);
1961 spin_lock_irq(q->queue_lock);
1962 goto out_delay;
1963 }
1964 spin_lock_irq(q->queue_lock);
1965 }
1966
1967 return;
1968
1969 host_not_ready:
1970 if (scsi_target(sdev)->can_queue > 0)
1971 atomic_dec(&scsi_target(sdev)->target_busy);
1972 not_ready:
1973 /*
1974 * lock q, handle tag, requeue req, and decrement device_busy. We
1975 * must return with queue_lock held.
1976 *
1977 * Decrementing device_busy without checking it is OK, as all such
1978 * cases (host limits or settings) should run the queue at some
1979 * later time.
1980 */
1981 spin_lock_irq(q->queue_lock);
1982 blk_requeue_request(q, req);
1983 atomic_dec(&sdev->device_busy);
1984out_delay:
1985 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1986 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1987}
1988
1989static inline blk_status_t prep_to_mq(int ret) 1596static inline blk_status_t prep_to_mq(int ret)
1990{ 1597{
1991 switch (ret) { 1598 switch (ret) {
@@ -2248,77 +1855,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2248} 1855}
2249EXPORT_SYMBOL_GPL(__scsi_init_queue); 1856EXPORT_SYMBOL_GPL(__scsi_init_queue);
2250 1857
2251static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
2252 gfp_t gfp)
2253{
2254 struct Scsi_Host *shost = q->rq_alloc_data;
2255 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
2256 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2257
2258 memset(cmd, 0, sizeof(*cmd));
2259
2260 if (unchecked_isa_dma)
2261 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
2262 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
2263 NUMA_NO_NODE);
2264 if (!cmd->sense_buffer)
2265 goto fail;
2266 cmd->req.sense = cmd->sense_buffer;
2267
2268 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
2269 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
2270 if (!cmd->prot_sdb)
2271 goto fail_free_sense;
2272 }
2273
2274 return 0;
2275
2276fail_free_sense:
2277 scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
2278fail:
2279 return -ENOMEM;
2280}
2281
2282static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
2283{
2284 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2285
2286 if (cmd->prot_sdb)
2287 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
2288 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
2289 cmd->sense_buffer);
2290}
2291
2292struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
2293{
2294 struct Scsi_Host *shost = sdev->host;
2295 struct request_queue *q;
2296
2297 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
2298 if (!q)
2299 return NULL;
2300 q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
2301 q->rq_alloc_data = shost;
2302 q->request_fn = scsi_request_fn;
2303 q->init_rq_fn = scsi_old_init_rq;
2304 q->exit_rq_fn = scsi_old_exit_rq;
2305 q->initialize_rq_fn = scsi_initialize_rq;
2306
2307 if (blk_init_allocated_queue(q) < 0) {
2308 blk_cleanup_queue(q);
2309 return NULL;
2310 }
2311
2312 __scsi_init_queue(shost, q);
2313 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
2314 blk_queue_prep_rq(q, scsi_prep_fn);
2315 blk_queue_unprep_rq(q, scsi_unprep_fn);
2316 blk_queue_softirq_done(q, scsi_softirq_done);
2317 blk_queue_rq_timed_out(q, scsi_times_out);
2318 blk_queue_lld_busy(q, scsi_lld_busy);
2319 return q;
2320}
2321
2322static const struct blk_mq_ops scsi_mq_ops = { 1858static const struct blk_mq_ops scsi_mq_ops = {
2323 .get_budget = scsi_mq_get_budget, 1859 .get_budget = scsi_mq_get_budget,
2324 .put_budget = scsi_mq_put_budget, 1860 .put_budget = scsi_mq_put_budget,
@@ -2386,10 +1922,7 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
2386{ 1922{
2387 struct scsi_device *sdev = NULL; 1923 struct scsi_device *sdev = NULL;
2388 1924
2389 if (q->mq_ops) { 1925 if (q->mq_ops == &scsi_mq_ops)
2390 if (q->mq_ops == &scsi_mq_ops)
2391 sdev = q->queuedata;
2392 } else if (q->request_fn == scsi_request_fn)
2393 sdev = q->queuedata; 1926 sdev = q->queuedata;
2394 if (!sdev || !get_device(&sdev->sdev_gendev)) 1927 if (!sdev || !get_device(&sdev->sdev_gendev))
2395 sdev = NULL; 1928 sdev = NULL;
@@ -2993,39 +2526,6 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
2993EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2526EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2994 2527
2995/** 2528/**
2996 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
2997 * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
2998 */
2999static int scsi_request_fn_active(struct scsi_device *sdev)
3000{
3001 struct request_queue *q = sdev->request_queue;
3002 int request_fn_active;
3003
3004 WARN_ON_ONCE(sdev->host->use_blk_mq);
3005
3006 spin_lock_irq(q->queue_lock);
3007 request_fn_active = q->request_fn_active;
3008 spin_unlock_irq(q->queue_lock);
3009
3010 return request_fn_active;
3011}
3012
3013/**
3014 * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
3015 * @sdev: SCSI device pointer.
3016 *
3017 * Wait until the ongoing shost->hostt->queuecommand() calls that are
3018 * invoked from scsi_request_fn() have finished.
3019 */
3020static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
3021{
3022 WARN_ON_ONCE(sdev->host->use_blk_mq);
3023
3024 while (scsi_request_fn_active(sdev))
3025 msleep(20);
3026}
3027
3028/**
3029 * scsi_device_quiesce - Block user issued commands. 2529 * scsi_device_quiesce - Block user issued commands.
3030 * @sdev: scsi device to quiesce. 2530 * @sdev: scsi device to quiesce.
3031 * 2531 *
@@ -3148,7 +2648,6 @@ EXPORT_SYMBOL(scsi_target_resume);
3148int scsi_internal_device_block_nowait(struct scsi_device *sdev) 2648int scsi_internal_device_block_nowait(struct scsi_device *sdev)
3149{ 2649{
3150 struct request_queue *q = sdev->request_queue; 2650 struct request_queue *q = sdev->request_queue;
3151 unsigned long flags;
3152 int err = 0; 2651 int err = 0;
3153 2652
3154 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2653 err = scsi_device_set_state(sdev, SDEV_BLOCK);
@@ -3164,14 +2663,7 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev)
3164 * block layer from calling the midlayer with this device's 2663 * block layer from calling the midlayer with this device's
3165 * request queue. 2664 * request queue.
3166 */ 2665 */
3167 if (q->mq_ops) { 2666 blk_mq_quiesce_queue_nowait(q);
3168 blk_mq_quiesce_queue_nowait(q);
3169 } else {
3170 spin_lock_irqsave(q->queue_lock, flags);
3171 blk_stop_queue(q);
3172 spin_unlock_irqrestore(q->queue_lock, flags);
3173 }
3174
3175 return 0; 2667 return 0;
3176} 2668}
3177EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); 2669EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
@@ -3202,12 +2694,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
3202 2694
3203 mutex_lock(&sdev->state_mutex); 2695 mutex_lock(&sdev->state_mutex);
3204 err = scsi_internal_device_block_nowait(sdev); 2696 err = scsi_internal_device_block_nowait(sdev);
3205 if (err == 0) { 2697 if (err == 0)
3206 if (q->mq_ops) 2698 blk_mq_quiesce_queue(q);
3207 blk_mq_quiesce_queue(q);
3208 else
3209 scsi_wait_for_queuecommand(sdev);
3210 }
3211 mutex_unlock(&sdev->state_mutex); 2699 mutex_unlock(&sdev->state_mutex);
3212 2700
3213 return err; 2701 return err;
@@ -3216,15 +2704,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
3216void scsi_start_queue(struct scsi_device *sdev) 2704void scsi_start_queue(struct scsi_device *sdev)
3217{ 2705{
3218 struct request_queue *q = sdev->request_queue; 2706 struct request_queue *q = sdev->request_queue;
3219 unsigned long flags;
3220 2707
3221 if (q->mq_ops) { 2708 blk_mq_unquiesce_queue(q);
3222 blk_mq_unquiesce_queue(q);
3223 } else {
3224 spin_lock_irqsave(q->queue_lock, flags);
3225 blk_start_queue(q);
3226 spin_unlock_irqrestore(q->queue_lock, flags);
3227 }
3228} 2709}
3229 2710
3230/** 2711/**
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99f1db5e467e..5f21547b2ad2 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -92,7 +92,6 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
92extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); 92extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
93extern void scsi_run_host_queues(struct Scsi_Host *shost); 93extern void scsi_run_host_queues(struct Scsi_Host *shost);
94extern void scsi_requeue_run_queue(struct work_struct *work); 94extern void scsi_requeue_run_queue(struct work_struct *work);
95extern struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev);
96extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev); 95extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
97extern void scsi_start_queue(struct scsi_device *sdev); 96extern void scsi_start_queue(struct scsi_device *sdev);
98extern int scsi_mq_setup_tags(struct Scsi_Host *shost); 97extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 78ca63dfba4a..dd0d516f65e2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -266,10 +266,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
266 */ 266 */
267 sdev->borken = 1; 267 sdev->borken = 1;
268 268
269 if (shost_use_blk_mq(shost)) 269 sdev->request_queue = scsi_mq_alloc_queue(sdev);
270 sdev->request_queue = scsi_mq_alloc_queue(sdev);
271 else
272 sdev->request_queue = scsi_old_alloc_queue(sdev);
273 if (!sdev->request_queue) { 270 if (!sdev->request_queue) {
274 /* release fn is set up in scsi_sysfs_device_initialise, so 271 /* release fn is set up in scsi_sysfs_device_initialise, so
275 * have to free and put manually here */ 272 * have to free and put manually here */
@@ -280,11 +277,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
280 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); 277 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
281 sdev->request_queue->queuedata = sdev; 278 sdev->request_queue->queuedata = sdev;
282 279
283 if (!shost_use_blk_mq(sdev->host)) {
284 blk_queue_init_tags(sdev->request_queue,
285 sdev->host->cmd_per_lun, shost->bqt,
286 shost->hostt->tag_alloc_policy);
287 }
288 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ? 280 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
289 sdev->host->cmd_per_lun : 1); 281 sdev->host->cmd_per_lun : 1);
290 282
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 3aee9464a7bf..6a9040faed00 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -367,7 +367,6 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
367 367
368static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); 368static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
369 369
370shost_rd_attr(use_blk_mq, "%d\n");
371shost_rd_attr(unique_id, "%u\n"); 370shost_rd_attr(unique_id, "%u\n");
372shost_rd_attr(cmd_per_lun, "%hd\n"); 371shost_rd_attr(cmd_per_lun, "%hd\n");
373shost_rd_attr(can_queue, "%hd\n"); 372shost_rd_attr(can_queue, "%hd\n");
@@ -386,6 +385,13 @@ show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
386} 385}
387static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); 386static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
388 387
388static ssize_t
389show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf)
390{
391 return sprintf(buf, "1\n");
392}
393static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL);
394
389static struct attribute *scsi_sysfs_shost_attrs[] = { 395static struct attribute *scsi_sysfs_shost_attrs[] = {
390 &dev_attr_use_blk_mq.attr, 396 &dev_attr_use_blk_mq.attr,
391 &dev_attr_unique_id.attr, 397 &dev_attr_unique_id.attr,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 23d7cca36ff0..fb308ea8e9a5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8100,12 +8100,6 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8100 goto out_error; 8100 goto out_error;
8101 } 8101 }
8102 8102
8103 /*
8104 * Do not use blk-mq at this time because blk-mq does not support
8105 * runtime pm.
8106 */
8107 host->use_blk_mq = false;
8108
8109 hba = shost_priv(host); 8103 hba = shost_priv(host);
8110 hba->host = host; 8104 hba->host = host;
8111 hba->dev = dev; 8105 hba->dev = dev;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5ea06d310a25..aa760df8c6b3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -11,7 +11,6 @@
11#include <linux/blk-mq.h> 11#include <linux/blk-mq.h>
12#include <scsi/scsi.h> 12#include <scsi/scsi.h>
13 13
14struct request_queue;
15struct block_device; 14struct block_device;
16struct completion; 15struct completion;
17struct module; 16struct module;
@@ -22,7 +21,6 @@ struct scsi_target;
22struct Scsi_Host; 21struct Scsi_Host;
23struct scsi_host_cmd_pool; 22struct scsi_host_cmd_pool;
24struct scsi_transport_template; 23struct scsi_transport_template;
25struct blk_queue_tags;
26 24
27 25
28/* 26/*
@@ -547,14 +545,8 @@ struct Scsi_Host {
547 struct scsi_host_template *hostt; 545 struct scsi_host_template *hostt;
548 struct scsi_transport_template *transportt; 546 struct scsi_transport_template *transportt;
549 547
550 /* 548 /* Area to keep a shared tag map */
551 * Area to keep a shared tag map (if needed, will be 549 struct blk_mq_tag_set tag_set;
552 * NULL if not).
553 */
554 union {
555 struct blk_queue_tag *bqt;
556 struct blk_mq_tag_set tag_set;
557 };
558 550
559 atomic_t host_busy; /* commands actually active on low-level */ 551 atomic_t host_busy; /* commands actually active on low-level */
560 atomic_t host_blocked; 552 atomic_t host_blocked;
@@ -648,7 +640,6 @@ struct Scsi_Host {
648 /* The controller does not support WRITE SAME */ 640 /* The controller does not support WRITE SAME */
649 unsigned no_write_same:1; 641 unsigned no_write_same:1;
650 642
651 unsigned use_blk_mq:1;
652 unsigned use_cmd_list:1; 643 unsigned use_cmd_list:1;
653 644
654 /* Host responded with short (<36 bytes) INQUIRY result */ 645 /* Host responded with short (<36 bytes) INQUIRY result */
@@ -742,11 +733,6 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
742 shost->tmf_in_progress; 733 shost->tmf_in_progress;
743} 734}
744 735
745static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
746{
747 return shost->use_blk_mq;
748}
749
750extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 736extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
751extern void scsi_flush_work(struct Scsi_Host *); 737extern void scsi_flush_work(struct Scsi_Host *);
752 738
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index e192a0caa850..6053d46e794e 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -23,19 +23,15 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
23 int tag) 23 int tag)
24{ 24{
25 struct request *req = NULL; 25 struct request *req = NULL;
26 u16 hwq;
26 27
27 if (tag == SCSI_NO_TAG) 28 if (tag == SCSI_NO_TAG)
28 return NULL; 29 return NULL;
29 30
30 if (shost_use_blk_mq(shost)) { 31 hwq = blk_mq_unique_tag_to_hwq(tag);
31 u16 hwq = blk_mq_unique_tag_to_hwq(tag); 32 if (hwq < shost->tag_set.nr_hw_queues) {
32 33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
33 if (hwq < shost->tag_set.nr_hw_queues) { 34 blk_mq_unique_tag_to_tag(tag));
34 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
35 blk_mq_unique_tag_to_tag(tag));
36 }
37 } else {
38 req = blk_map_queue_find_tag(shost->bqt, tag);
39 } 35 }
40 36
41 if (!req) 37 if (!req)