diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-10-26 11:53:52 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 15:42:31 -0500 |
commit | 600335205b8d162891b5ef2e32343f5b8020efd8 (patch) | |
tree | 0f42d3369caf0aa805ef2c48cf4f8aa5bd2e83aa | |
parent | d0be12274dad242271fb2055275d10b67a0d7649 (diff) |
ide: convert to blk-mq
ide-disk and ide-cd tested as working just fine, ide-tape and
ide-floppy haven't. But the latter don't require changes, so they
should work without issue.
Add helper function to insert a request from a work queue, since we
cannot invoke the blk-mq request insertion from IRQ context.
Cc: David Miller <davem@davemloft.net>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/ide/ide-atapi.c | 25 | ||||
-rw-r--r-- | drivers/ide/ide-cd.c | 175 | ||||
-rw-r--r-- | drivers/ide/ide-disk.c | 5 | ||||
-rw-r--r-- | drivers/ide/ide-io.c | 100 | ||||
-rw-r--r-- | drivers/ide/ide-park.c | 4 | ||||
-rw-r--r-- | drivers/ide/ide-pm.c | 28 | ||||
-rw-r--r-- | drivers/ide/ide-probe.c | 68 | ||||
-rw-r--r-- | include/linux/ide.h | 13 |
8 files changed, 239 insertions, 179 deletions
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 8b2b72b93885..33210bc67618 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -172,8 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd); | |||
172 | void ide_prep_sense(ide_drive_t *drive, struct request *rq) | 172 | void ide_prep_sense(ide_drive_t *drive, struct request *rq) |
173 | { | 173 | { |
174 | struct request_sense *sense = &drive->sense_data; | 174 | struct request_sense *sense = &drive->sense_data; |
175 | struct request *sense_rq = drive->sense_rq; | 175 | struct request *sense_rq; |
176 | struct scsi_request *req = scsi_req(sense_rq); | 176 | struct scsi_request *req; |
177 | unsigned int cmd_len, sense_len; | 177 | unsigned int cmd_len, sense_len; |
178 | int err; | 178 | int err; |
179 | 179 | ||
@@ -196,9 +196,16 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) | |||
196 | if (ata_sense_request(rq) || drive->sense_rq_armed) | 196 | if (ata_sense_request(rq) || drive->sense_rq_armed) |
197 | return; | 197 | return; |
198 | 198 | ||
199 | sense_rq = drive->sense_rq; | ||
200 | if (!sense_rq) { | ||
201 | sense_rq = blk_mq_alloc_request(drive->queue, REQ_OP_DRV_IN, | ||
202 | BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); | ||
203 | drive->sense_rq = sense_rq; | ||
204 | } | ||
205 | req = scsi_req(sense_rq); | ||
206 | |||
199 | memset(sense, 0, sizeof(*sense)); | 207 | memset(sense, 0, sizeof(*sense)); |
200 | 208 | ||
201 | blk_rq_init(rq->q, sense_rq); | ||
202 | scsi_req_init(req); | 209 | scsi_req_init(req); |
203 | 210 | ||
204 | err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len, | 211 | err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len, |
@@ -207,6 +214,8 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) | |||
207 | if (printk_ratelimit()) | 214 | if (printk_ratelimit()) |
208 | printk(KERN_WARNING PFX "%s: failed to map sense " | 215 | printk(KERN_WARNING PFX "%s: failed to map sense " |
209 | "buffer\n", drive->name); | 216 | "buffer\n", drive->name); |
217 | blk_mq_free_request(sense_rq); | ||
218 | drive->sense_rq = NULL; | ||
210 | return; | 219 | return; |
211 | } | 220 | } |
212 | 221 | ||
@@ -226,6 +235,8 @@ EXPORT_SYMBOL_GPL(ide_prep_sense); | |||
226 | 235 | ||
227 | int ide_queue_sense_rq(ide_drive_t *drive, void *special) | 236 | int ide_queue_sense_rq(ide_drive_t *drive, void *special) |
228 | { | 237 | { |
238 | struct request *sense_rq = drive->sense_rq; | ||
239 | |||
229 | /* deferred failure from ide_prep_sense() */ | 240 | /* deferred failure from ide_prep_sense() */ |
230 | if (!drive->sense_rq_armed) { | 241 | if (!drive->sense_rq_armed) { |
231 | printk(KERN_WARNING PFX "%s: error queuing a sense request\n", | 242 | printk(KERN_WARNING PFX "%s: error queuing a sense request\n", |
@@ -233,12 +244,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special) | |||
233 | return -ENOMEM; | 244 | return -ENOMEM; |
234 | } | 245 | } |
235 | 246 | ||
236 | drive->sense_rq->special = special; | 247 | sense_rq->special = special; |
237 | drive->sense_rq_armed = false; | 248 | drive->sense_rq_armed = false; |
238 | 249 | ||
239 | drive->hwif->rq = NULL; | 250 | drive->hwif->rq = NULL; |
240 | 251 | ||
241 | elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT); | 252 | ide_insert_request_head(drive, sense_rq); |
242 | return 0; | 253 | return 0; |
243 | } | 254 | } |
244 | EXPORT_SYMBOL_GPL(ide_queue_sense_rq); | 255 | EXPORT_SYMBOL_GPL(ide_queue_sense_rq); |
@@ -270,10 +281,8 @@ void ide_retry_pc(ide_drive_t *drive) | |||
270 | */ | 281 | */ |
271 | drive->hwif->rq = NULL; | 282 | drive->hwif->rq = NULL; |
272 | ide_requeue_and_plug(drive, failed_rq); | 283 | ide_requeue_and_plug(drive, failed_rq); |
273 | if (ide_queue_sense_rq(drive, pc)) { | 284 | if (ide_queue_sense_rq(drive, pc)) |
274 | blk_start_request(failed_rq); | ||
275 | ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq)); | 285 | ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq)); |
276 | } | ||
277 | } | 286 | } |
278 | EXPORT_SYMBOL_GPL(ide_retry_pc); | 287 | EXPORT_SYMBOL_GPL(ide_retry_pc); |
279 | 288 | ||
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index f9b59d41813f..4ecaf2ace4cb 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -258,11 +258,22 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) | |||
258 | /* | 258 | /* |
259 | * take a breather | 259 | * take a breather |
260 | */ | 260 | */ |
261 | blk_delay_queue(drive->queue, 1); | 261 | blk_mq_requeue_request(rq, false); |
262 | blk_mq_delay_kick_requeue_list(drive->queue, 1); | ||
262 | return 1; | 263 | return 1; |
263 | } | 264 | } |
264 | } | 265 | } |
265 | 266 | ||
267 | static void ide_cd_free_sense(ide_drive_t *drive) | ||
268 | { | ||
269 | if (!drive->sense_rq) | ||
270 | return; | ||
271 | |||
272 | blk_mq_free_request(drive->sense_rq); | ||
273 | drive->sense_rq = NULL; | ||
274 | drive->sense_rq_armed = false; | ||
275 | } | ||
276 | |||
266 | /** | 277 | /** |
267 | * Returns: | 278 | * Returns: |
268 | * 0: if the request should be continued. | 279 | * 0: if the request should be continued. |
@@ -516,6 +527,82 @@ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) | |||
516 | return false; | 527 | return false; |
517 | } | 528 | } |
518 | 529 | ||
530 | /* standard prep_rq_fn that builds 10 byte cmds */ | ||
531 | static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) | ||
532 | { | ||
533 | int hard_sect = queue_logical_block_size(q); | ||
534 | long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); | ||
535 | unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); | ||
536 | struct scsi_request *req = scsi_req(rq); | ||
537 | |||
538 | if (rq_data_dir(rq) == READ) | ||
539 | req->cmd[0] = GPCMD_READ_10; | ||
540 | else | ||
541 | req->cmd[0] = GPCMD_WRITE_10; | ||
542 | |||
543 | /* | ||
544 | * fill in lba | ||
545 | */ | ||
546 | req->cmd[2] = (block >> 24) & 0xff; | ||
547 | req->cmd[3] = (block >> 16) & 0xff; | ||
548 | req->cmd[4] = (block >> 8) & 0xff; | ||
549 | req->cmd[5] = block & 0xff; | ||
550 | |||
551 | /* | ||
552 | * and transfer length | ||
553 | */ | ||
554 | req->cmd[7] = (blocks >> 8) & 0xff; | ||
555 | req->cmd[8] = blocks & 0xff; | ||
556 | req->cmd_len = 10; | ||
557 | return BLKPREP_OK; | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * Most of the SCSI commands are supported directly by ATAPI devices. | ||
562 | * This transform handles the few exceptions. | ||
563 | */ | ||
564 | static int ide_cdrom_prep_pc(struct request *rq) | ||
565 | { | ||
566 | u8 *c = scsi_req(rq)->cmd; | ||
567 | |||
568 | /* transform 6-byte read/write commands to the 10-byte version */ | ||
569 | if (c[0] == READ_6 || c[0] == WRITE_6) { | ||
570 | c[8] = c[4]; | ||
571 | c[5] = c[3]; | ||
572 | c[4] = c[2]; | ||
573 | c[3] = c[1] & 0x1f; | ||
574 | c[2] = 0; | ||
575 | c[1] &= 0xe0; | ||
576 | c[0] += (READ_10 - READ_6); | ||
577 | scsi_req(rq)->cmd_len = 10; | ||
578 | return BLKPREP_OK; | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * it's silly to pretend we understand 6-byte sense commands, just | ||
583 | * reject with ILLEGAL_REQUEST and the caller should take the | ||
584 | * appropriate action | ||
585 | */ | ||
586 | if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) { | ||
587 | scsi_req(rq)->result = ILLEGAL_REQUEST; | ||
588 | return BLKPREP_KILL; | ||
589 | } | ||
590 | |||
591 | return BLKPREP_OK; | ||
592 | } | ||
593 | |||
594 | static int ide_cdrom_prep_fn(ide_drive_t *drive, struct request *rq) | ||
595 | { | ||
596 | if (!blk_rq_is_passthrough(rq)) { | ||
597 | scsi_req_init(scsi_req(rq)); | ||
598 | |||
599 | return ide_cdrom_prep_fs(drive->queue, rq); | ||
600 | } else if (blk_rq_is_scsi(rq)) | ||
601 | return ide_cdrom_prep_pc(rq); | ||
602 | |||
603 | return 0; | ||
604 | } | ||
605 | |||
519 | static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | 606 | static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) |
520 | { | 607 | { |
521 | ide_hwif_t *hwif = drive->hwif; | 608 | ide_hwif_t *hwif = drive->hwif; |
@@ -675,7 +762,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
675 | out_end: | 762 | out_end: |
676 | if (blk_rq_is_scsi(rq) && rc == 0) { | 763 | if (blk_rq_is_scsi(rq) && rc == 0) { |
677 | scsi_req(rq)->resid_len = 0; | 764 | scsi_req(rq)->resid_len = 0; |
678 | blk_end_request_all(rq, BLK_STS_OK); | 765 | blk_mq_end_request(rq, BLK_STS_OK); |
679 | hwif->rq = NULL; | 766 | hwif->rq = NULL; |
680 | } else { | 767 | } else { |
681 | if (sense && uptodate) | 768 | if (sense && uptodate) |
@@ -705,6 +792,8 @@ out_end: | |||
705 | if (sense && rc == 2) | 792 | if (sense && rc == 2) |
706 | ide_error(drive, "request sense failure", stat); | 793 | ide_error(drive, "request sense failure", stat); |
707 | } | 794 | } |
795 | |||
796 | ide_cd_free_sense(drive); | ||
708 | return ide_stopped; | 797 | return ide_stopped; |
709 | } | 798 | } |
710 | 799 | ||
@@ -729,7 +818,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) | |||
729 | * We may be retrying this request after an error. Fix up any | 818 | * We may be retrying this request after an error. Fix up any |
730 | * weirdness which might be present in the request packet. | 819 | * weirdness which might be present in the request packet. |
731 | */ | 820 | */ |
732 | q->prep_rq_fn(q, rq); | 821 | ide_cdrom_prep_fn(drive, rq); |
733 | } | 822 | } |
734 | 823 | ||
735 | /* fs requests *must* be hardware frame aligned */ | 824 | /* fs requests *must* be hardware frame aligned */ |
@@ -1323,82 +1412,6 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) | |||
1323 | return nslots; | 1412 | return nslots; |
1324 | } | 1413 | } |
1325 | 1414 | ||
1326 | /* standard prep_rq_fn that builds 10 byte cmds */ | ||
1327 | static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) | ||
1328 | { | ||
1329 | int hard_sect = queue_logical_block_size(q); | ||
1330 | long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); | ||
1331 | unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); | ||
1332 | struct scsi_request *req = scsi_req(rq); | ||
1333 | |||
1334 | q->initialize_rq_fn(rq); | ||
1335 | |||
1336 | if (rq_data_dir(rq) == READ) | ||
1337 | req->cmd[0] = GPCMD_READ_10; | ||
1338 | else | ||
1339 | req->cmd[0] = GPCMD_WRITE_10; | ||
1340 | |||
1341 | /* | ||
1342 | * fill in lba | ||
1343 | */ | ||
1344 | req->cmd[2] = (block >> 24) & 0xff; | ||
1345 | req->cmd[3] = (block >> 16) & 0xff; | ||
1346 | req->cmd[4] = (block >> 8) & 0xff; | ||
1347 | req->cmd[5] = block & 0xff; | ||
1348 | |||
1349 | /* | ||
1350 | * and transfer length | ||
1351 | */ | ||
1352 | req->cmd[7] = (blocks >> 8) & 0xff; | ||
1353 | req->cmd[8] = blocks & 0xff; | ||
1354 | req->cmd_len = 10; | ||
1355 | return BLKPREP_OK; | ||
1356 | } | ||
1357 | |||
1358 | /* | ||
1359 | * Most of the SCSI commands are supported directly by ATAPI devices. | ||
1360 | * This transform handles the few exceptions. | ||
1361 | */ | ||
1362 | static int ide_cdrom_prep_pc(struct request *rq) | ||
1363 | { | ||
1364 | u8 *c = scsi_req(rq)->cmd; | ||
1365 | |||
1366 | /* transform 6-byte read/write commands to the 10-byte version */ | ||
1367 | if (c[0] == READ_6 || c[0] == WRITE_6) { | ||
1368 | c[8] = c[4]; | ||
1369 | c[5] = c[3]; | ||
1370 | c[4] = c[2]; | ||
1371 | c[3] = c[1] & 0x1f; | ||
1372 | c[2] = 0; | ||
1373 | c[1] &= 0xe0; | ||
1374 | c[0] += (READ_10 - READ_6); | ||
1375 | scsi_req(rq)->cmd_len = 10; | ||
1376 | return BLKPREP_OK; | ||
1377 | } | ||
1378 | |||
1379 | /* | ||
1380 | * it's silly to pretend we understand 6-byte sense commands, just | ||
1381 | * reject with ILLEGAL_REQUEST and the caller should take the | ||
1382 | * appropriate action | ||
1383 | */ | ||
1384 | if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) { | ||
1385 | scsi_req(rq)->result = ILLEGAL_REQUEST; | ||
1386 | return BLKPREP_KILL; | ||
1387 | } | ||
1388 | |||
1389 | return BLKPREP_OK; | ||
1390 | } | ||
1391 | |||
1392 | static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) | ||
1393 | { | ||
1394 | if (!blk_rq_is_passthrough(rq)) | ||
1395 | return ide_cdrom_prep_fs(q, rq); | ||
1396 | else if (blk_rq_is_scsi(rq)) | ||
1397 | return ide_cdrom_prep_pc(rq); | ||
1398 | |||
1399 | return 0; | ||
1400 | } | ||
1401 | |||
1402 | struct cd_list_entry { | 1415 | struct cd_list_entry { |
1403 | const char *id_model; | 1416 | const char *id_model; |
1404 | const char *id_firmware; | 1417 | const char *id_firmware; |
@@ -1508,7 +1521,7 @@ static int ide_cdrom_setup(ide_drive_t *drive) | |||
1508 | 1521 | ||
1509 | ide_debug_log(IDE_DBG_PROBE, "enter"); | 1522 | ide_debug_log(IDE_DBG_PROBE, "enter"); |
1510 | 1523 | ||
1511 | blk_queue_prep_rq(q, ide_cdrom_prep_fn); | 1524 | drive->prep_rq = ide_cdrom_prep_fn; |
1512 | blk_queue_dma_alignment(q, 31); | 1525 | blk_queue_dma_alignment(q, 31); |
1513 | blk_queue_update_dma_pad(q, 15); | 1526 | blk_queue_update_dma_pad(q, 15); |
1514 | 1527 | ||
@@ -1569,7 +1582,7 @@ static void ide_cd_release(struct device *dev) | |||
1569 | if (devinfo->handle == drive) | 1582 | if (devinfo->handle == drive) |
1570 | unregister_cdrom(devinfo); | 1583 | unregister_cdrom(devinfo); |
1571 | drive->driver_data = NULL; | 1584 | drive->driver_data = NULL; |
1572 | blk_queue_prep_rq(drive->queue, NULL); | 1585 | drive->prep_rq = NULL; |
1573 | g->private_data = NULL; | 1586 | g->private_data = NULL; |
1574 | put_disk(g); | 1587 | put_disk(g); |
1575 | kfree(info); | 1588 | kfree(info); |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index e3b4e659082d..f8567c8c9dd1 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -427,9 +427,8 @@ static void ide_disk_unlock_native_capacity(ide_drive_t *drive) | |||
427 | drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */ | 427 | drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */ |
428 | } | 428 | } |
429 | 429 | ||
430 | static int idedisk_prep_fn(struct request_queue *q, struct request *rq) | 430 | static int idedisk_prep_fn(ide_drive_t *drive, struct request *rq) |
431 | { | 431 | { |
432 | ide_drive_t *drive = q->queuedata; | ||
433 | struct ide_cmd *cmd; | 432 | struct ide_cmd *cmd; |
434 | 433 | ||
435 | if (req_op(rq) != REQ_OP_FLUSH) | 434 | if (req_op(rq) != REQ_OP_FLUSH) |
@@ -548,7 +547,7 @@ static void update_flush(ide_drive_t *drive) | |||
548 | 547 | ||
549 | if (barrier) { | 548 | if (barrier) { |
550 | wc = true; | 549 | wc = true; |
551 | blk_queue_prep_rq(drive->queue, idedisk_prep_fn); | 550 | drive->prep_rq = idedisk_prep_fn; |
552 | } | 551 | } |
553 | } | 552 | } |
554 | 553 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 0d93e0cfbeaf..5093c605c91c 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -67,7 +67,15 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, | |||
67 | ide_dma_on(drive); | 67 | ide_dma_on(drive); |
68 | } | 68 | } |
69 | 69 | ||
70 | return blk_end_request(rq, error, nr_bytes); | 70 | if (!blk_update_request(rq, error, nr_bytes)) { |
71 | if (rq == drive->sense_rq) | ||
72 | drive->sense_rq = NULL; | ||
73 | |||
74 | __blk_mq_end_request(rq, error); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | return 1; | ||
71 | } | 79 | } |
72 | EXPORT_SYMBOL_GPL(ide_end_rq); | 80 | EXPORT_SYMBOL_GPL(ide_end_rq); |
73 | 81 | ||
@@ -307,8 +315,6 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
307 | { | 315 | { |
308 | ide_startstop_t startstop; | 316 | ide_startstop_t startstop; |
309 | 317 | ||
310 | BUG_ON(!(rq->rq_flags & RQF_STARTED)); | ||
311 | |||
312 | #ifdef DEBUG | 318 | #ifdef DEBUG |
313 | printk("%s: start_request: current=0x%08lx\n", | 319 | printk("%s: start_request: current=0x%08lx\n", |
314 | drive->hwif->name, (unsigned long) rq); | 320 | drive->hwif->name, (unsigned long) rq); |
@@ -320,6 +326,9 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
320 | goto kill_rq; | 326 | goto kill_rq; |
321 | } | 327 | } |
322 | 328 | ||
329 | if (drive->prep_rq && drive->prep_rq(drive, rq)) | ||
330 | return ide_stopped; | ||
331 | |||
323 | if (ata_pm_request(rq)) | 332 | if (ata_pm_request(rq)) |
324 | ide_check_pm_state(drive, rq); | 333 | ide_check_pm_state(drive, rq); |
325 | 334 | ||
@@ -430,44 +439,38 @@ static inline void ide_unlock_host(struct ide_host *host) | |||
430 | } | 439 | } |
431 | } | 440 | } |
432 | 441 | ||
433 | static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) | ||
434 | { | ||
435 | if (rq) | ||
436 | blk_requeue_request(q, rq); | ||
437 | if (rq || blk_peek_request(q)) { | ||
438 | /* Use 3ms as that was the old plug delay */ | ||
439 | blk_delay_queue(q, 3); | ||
440 | } | ||
441 | } | ||
442 | |||
443 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | 442 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
444 | { | 443 | { |
445 | struct request_queue *q = drive->queue; | 444 | struct request_queue *q = drive->queue; |
446 | unsigned long flags; | ||
447 | 445 | ||
448 | spin_lock_irqsave(q->queue_lock, flags); | 446 | /* Use 3ms as that was the old plug delay */ |
449 | __ide_requeue_and_plug(q, rq); | 447 | if (rq) { |
450 | spin_unlock_irqrestore(q->queue_lock, flags); | 448 | blk_mq_requeue_request(rq, false); |
449 | blk_mq_delay_kick_requeue_list(q, 3); | ||
450 | } else | ||
451 | blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); | ||
451 | } | 452 | } |
452 | 453 | ||
453 | /* | 454 | /* |
454 | * Issue a new request to a device. | 455 | * Issue a new request to a device. |
455 | */ | 456 | */ |
456 | void do_ide_request(struct request_queue *q) | 457 | blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, |
458 | const struct blk_mq_queue_data *bd) | ||
457 | { | 459 | { |
458 | ide_drive_t *drive = q->queuedata; | 460 | ide_drive_t *drive = hctx->queue->queuedata; |
459 | ide_hwif_t *hwif = drive->hwif; | 461 | ide_hwif_t *hwif = drive->hwif; |
460 | struct ide_host *host = hwif->host; | 462 | struct ide_host *host = hwif->host; |
461 | struct request *rq = NULL; | 463 | struct request *rq = NULL; |
462 | ide_startstop_t startstop; | 464 | ide_startstop_t startstop; |
463 | 465 | ||
464 | spin_unlock_irq(q->queue_lock); | ||
465 | |||
466 | /* HLD do_request() callback might sleep, make sure it's okay */ | 466 | /* HLD do_request() callback might sleep, make sure it's okay */ |
467 | might_sleep(); | 467 | might_sleep(); |
468 | 468 | ||
469 | if (ide_lock_host(host, hwif)) | 469 | if (ide_lock_host(host, hwif)) |
470 | goto plug_device_2; | 470 | return BLK_STS_DEV_RESOURCE; |
471 | |||
472 | rq = bd->rq; | ||
473 | blk_mq_start_request(rq); | ||
471 | 474 | ||
472 | spin_lock_irq(&hwif->lock); | 475 | spin_lock_irq(&hwif->lock); |
473 | 476 | ||
@@ -503,21 +506,16 @@ repeat: | |||
503 | hwif->cur_dev = drive; | 506 | hwif->cur_dev = drive; |
504 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); | 507 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); |
505 | 508 | ||
506 | spin_unlock_irq(&hwif->lock); | ||
507 | spin_lock_irq(q->queue_lock); | ||
508 | /* | 509 | /* |
509 | * we know that the queue isn't empty, but this can happen | 510 | * we know that the queue isn't empty, but this can happen |
510 | * if the q->prep_rq_fn() decides to kill a request | 511 | * if the q->prep_rq_fn() decides to kill a request |
511 | */ | 512 | */ |
512 | if (!rq) | ||
513 | rq = blk_fetch_request(drive->queue); | ||
514 | |||
515 | spin_unlock_irq(q->queue_lock); | ||
516 | spin_lock_irq(&hwif->lock); | ||
517 | |||
518 | if (!rq) { | 513 | if (!rq) { |
519 | ide_unlock_port(hwif); | 514 | rq = bd->rq; |
520 | goto out; | 515 | if (!rq) { |
516 | ide_unlock_port(hwif); | ||
517 | goto out; | ||
518 | } | ||
521 | } | 519 | } |
522 | 520 | ||
523 | /* | 521 | /* |
@@ -551,23 +549,24 @@ repeat: | |||
551 | if (startstop == ide_stopped) { | 549 | if (startstop == ide_stopped) { |
552 | rq = hwif->rq; | 550 | rq = hwif->rq; |
553 | hwif->rq = NULL; | 551 | hwif->rq = NULL; |
554 | goto repeat; | 552 | if (rq) |
553 | goto repeat; | ||
554 | ide_unlock_port(hwif); | ||
555 | goto out; | ||
555 | } | 556 | } |
556 | } else | 557 | } else { |
557 | goto plug_device; | 558 | plug_device: |
559 | spin_unlock_irq(&hwif->lock); | ||
560 | ide_unlock_host(host); | ||
561 | ide_requeue_and_plug(drive, rq); | ||
562 | return BLK_STS_OK; | ||
563 | } | ||
564 | |||
558 | out: | 565 | out: |
559 | spin_unlock_irq(&hwif->lock); | 566 | spin_unlock_irq(&hwif->lock); |
560 | if (rq == NULL) | 567 | if (rq == NULL) |
561 | ide_unlock_host(host); | 568 | ide_unlock_host(host); |
562 | spin_lock_irq(q->queue_lock); | 569 | return BLK_STS_OK; |
563 | return; | ||
564 | |||
565 | plug_device: | ||
566 | spin_unlock_irq(&hwif->lock); | ||
567 | ide_unlock_host(host); | ||
568 | plug_device_2: | ||
569 | spin_lock_irq(q->queue_lock); | ||
570 | __ide_requeue_and_plug(q, rq); | ||
571 | } | 570 | } |
572 | 571 | ||
573 | static int drive_is_ready(ide_drive_t *drive) | 572 | static int drive_is_ready(ide_drive_t *drive) |
@@ -887,3 +886,16 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len) | |||
887 | } | 886 | } |
888 | } | 887 | } |
889 | EXPORT_SYMBOL_GPL(ide_pad_transfer); | 888 | EXPORT_SYMBOL_GPL(ide_pad_transfer); |
889 | |||
890 | void ide_insert_request_head(ide_drive_t *drive, struct request *rq) | ||
891 | { | ||
892 | ide_hwif_t *hwif = drive->hwif; | ||
893 | unsigned long flags; | ||
894 | |||
895 | spin_lock_irqsave(&hwif->lock, flags); | ||
896 | list_add_tail(&rq->queuelist, &drive->rq_list); | ||
897 | spin_unlock_irqrestore(&hwif->lock, flags); | ||
898 | |||
899 | kblockd_schedule_work(&drive->rq_work); | ||
900 | } | ||
901 | EXPORT_SYMBOL_GPL(ide_insert_request_head); | ||
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 622f0edb3945..de9e85cf74d1 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -27,7 +27,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
27 | spin_unlock_irq(&hwif->lock); | 27 | spin_unlock_irq(&hwif->lock); |
28 | 28 | ||
29 | if (start_queue) | 29 | if (start_queue) |
30 | blk_run_queue(q); | 30 | blk_mq_run_hw_queues(q, true); |
31 | return; | 31 | return; |
32 | } | 32 | } |
33 | spin_unlock_irq(&hwif->lock); | 33 | spin_unlock_irq(&hwif->lock); |
@@ -54,7 +54,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
54 | scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; | 54 | scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; |
55 | scsi_req(rq)->cmd_len = 1; | 55 | scsi_req(rq)->cmd_len = 1; |
56 | ide_req(rq)->type = ATA_PRIV_MISC; | 56 | ide_req(rq)->type = ATA_PRIV_MISC; |
57 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); | 57 | ide_insert_request_head(drive, rq); |
58 | 58 | ||
59 | out: | 59 | out: |
60 | return; | 60 | return; |
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index 59217aa1d1fb..ea10507e5190 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c | |||
@@ -40,32 +40,20 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
40 | return ret; | 40 | return ret; |
41 | } | 41 | } |
42 | 42 | ||
43 | static void ide_end_sync_rq(struct request *rq, blk_status_t error) | ||
44 | { | ||
45 | complete(rq->end_io_data); | ||
46 | } | ||
47 | |||
48 | static int ide_pm_execute_rq(struct request *rq) | 43 | static int ide_pm_execute_rq(struct request *rq) |
49 | { | 44 | { |
50 | struct request_queue *q = rq->q; | 45 | struct request_queue *q = rq->q; |
51 | DECLARE_COMPLETION_ONSTACK(wait); | ||
52 | |||
53 | rq->end_io_data = &wait; | ||
54 | rq->end_io = ide_end_sync_rq; | ||
55 | 46 | ||
56 | spin_lock_irq(q->queue_lock); | 47 | spin_lock_irq(q->queue_lock); |
57 | if (unlikely(blk_queue_dying(q))) { | 48 | if (unlikely(blk_queue_dying(q))) { |
58 | rq->rq_flags |= RQF_QUIET; | 49 | rq->rq_flags |= RQF_QUIET; |
59 | scsi_req(rq)->result = -ENXIO; | 50 | scsi_req(rq)->result = -ENXIO; |
60 | __blk_end_request_all(rq, BLK_STS_OK); | ||
61 | spin_unlock_irq(q->queue_lock); | 51 | spin_unlock_irq(q->queue_lock); |
52 | blk_mq_end_request(rq, BLK_STS_OK); | ||
62 | return -ENXIO; | 53 | return -ENXIO; |
63 | } | 54 | } |
64 | __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); | ||
65 | __blk_run_queue_uncond(q); | ||
66 | spin_unlock_irq(q->queue_lock); | 55 | spin_unlock_irq(q->queue_lock); |
67 | 56 | blk_execute_rq(q, NULL, rq, true); | |
68 | wait_for_completion_io(&wait); | ||
69 | 57 | ||
70 | return scsi_req(rq)->result ? -EIO : 0; | 58 | return scsi_req(rq)->result ? -EIO : 0; |
71 | } | 59 | } |
@@ -79,6 +67,8 @@ int generic_ide_resume(struct device *dev) | |||
79 | struct ide_pm_state rqpm; | 67 | struct ide_pm_state rqpm; |
80 | int err; | 68 | int err; |
81 | 69 | ||
70 | blk_mq_start_stopped_hw_queues(drive->queue, true); | ||
71 | |||
82 | if (ide_port_acpi(hwif)) { | 72 | if (ide_port_acpi(hwif)) { |
83 | /* call ACPI _PS0 / _STM only once */ | 73 | /* call ACPI _PS0 / _STM only once */ |
84 | if ((drive->dn & 1) == 0 || pair == NULL) { | 74 | if ((drive->dn & 1) == 0 || pair == NULL) { |
@@ -226,15 +216,14 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) | |||
226 | #endif | 216 | #endif |
227 | spin_lock_irqsave(q->queue_lock, flags); | 217 | spin_lock_irqsave(q->queue_lock, flags); |
228 | if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) | 218 | if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) |
229 | blk_stop_queue(q); | 219 | blk_mq_stop_hw_queues(q); |
230 | else | 220 | else |
231 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; | 221 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; |
232 | spin_unlock_irqrestore(q->queue_lock, flags); | 222 | spin_unlock_irqrestore(q->queue_lock, flags); |
233 | 223 | ||
234 | drive->hwif->rq = NULL; | 224 | drive->hwif->rq = NULL; |
235 | 225 | ||
236 | if (blk_end_request(rq, BLK_STS_OK, 0)) | 226 | blk_mq_end_request(rq, BLK_STS_OK); |
237 | BUG(); | ||
238 | } | 227 | } |
239 | 228 | ||
240 | void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | 229 | void ide_check_pm_state(ide_drive_t *drive, struct request *rq) |
@@ -260,7 +249,6 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | |||
260 | ide_hwif_t *hwif = drive->hwif; | 249 | ide_hwif_t *hwif = drive->hwif; |
261 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | 250 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
262 | struct request_queue *q = drive->queue; | 251 | struct request_queue *q = drive->queue; |
263 | unsigned long flags; | ||
264 | int rc; | 252 | int rc; |
265 | #ifdef DEBUG_PM | 253 | #ifdef DEBUG_PM |
266 | printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); | 254 | printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); |
@@ -274,8 +262,6 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | |||
274 | if (rc) | 262 | if (rc) |
275 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); | 263 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); |
276 | 264 | ||
277 | spin_lock_irqsave(q->queue_lock, flags); | 265 | blk_mq_start_hw_queues(q); |
278 | blk_start_queue(q); | ||
279 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
280 | } | 266 | } |
281 | } | 267 | } |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 3b75a7b7a284..40384838e439 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -750,6 +750,11 @@ static void ide_initialize_rq(struct request *rq) | |||
750 | req->sreq.sense = req->sense; | 750 | req->sreq.sense = req->sense; |
751 | } | 751 | } |
752 | 752 | ||
753 | static const struct blk_mq_ops ide_mq_ops = { | ||
754 | .queue_rq = ide_queue_rq, | ||
755 | .initialize_rq_fn = ide_initialize_rq, | ||
756 | }; | ||
757 | |||
753 | /* | 758 | /* |
754 | * init request queue | 759 | * init request queue |
755 | */ | 760 | */ |
@@ -759,6 +764,7 @@ static int ide_init_queue(ide_drive_t *drive) | |||
759 | ide_hwif_t *hwif = drive->hwif; | 764 | ide_hwif_t *hwif = drive->hwif; |
760 | int max_sectors = 256; | 765 | int max_sectors = 256; |
761 | int max_sg_entries = PRD_ENTRIES; | 766 | int max_sg_entries = PRD_ENTRIES; |
767 | struct blk_mq_tag_set *set; | ||
762 | 768 | ||
763 | /* | 769 | /* |
764 | * Our default set up assumes the normal IDE case, | 770 | * Our default set up assumes the normal IDE case, |
@@ -767,19 +773,26 @@ static int ide_init_queue(ide_drive_t *drive) | |||
767 | * limits and LBA48 we could raise it but as yet | 773 | * limits and LBA48 we could raise it but as yet |
768 | * do not. | 774 | * do not. |
769 | */ | 775 | */ |
770 | q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL); | 776 | |
771 | if (!q) | 777 | set = &drive->tag_set; |
778 | set->ops = &ide_mq_ops; | ||
779 | set->nr_hw_queues = 1; | ||
780 | set->queue_depth = 32; | ||
781 | set->reserved_tags = 1; | ||
782 | set->cmd_size = sizeof(struct ide_request); | ||
783 | set->numa_node = hwif_to_node(hwif); | ||
784 | set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; | ||
785 | if (blk_mq_alloc_tag_set(set)) | ||
772 | return 1; | 786 | return 1; |
773 | 787 | ||
774 | q->request_fn = do_ide_request; | 788 | q = blk_mq_init_queue(set); |
775 | q->initialize_rq_fn = ide_initialize_rq; | 789 | if (IS_ERR(q)) { |
776 | q->cmd_size = sizeof(struct ide_request); | 790 | blk_mq_free_tag_set(set); |
777 | blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); | ||
778 | if (blk_init_allocated_queue(q) < 0) { | ||
779 | blk_cleanup_queue(q); | ||
780 | return 1; | 791 | return 1; |
781 | } | 792 | } |
782 | 793 | ||
794 | blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); | ||
795 | |||
783 | q->queuedata = drive; | 796 | q->queuedata = drive; |
784 | blk_queue_segment_boundary(q, 0xffff); | 797 | blk_queue_segment_boundary(q, 0xffff); |
785 | 798 | ||
@@ -965,8 +978,12 @@ static void drive_release_dev (struct device *dev) | |||
965 | 978 | ||
966 | ide_proc_unregister_device(drive); | 979 | ide_proc_unregister_device(drive); |
967 | 980 | ||
981 | if (drive->sense_rq) | ||
982 | blk_mq_free_request(drive->sense_rq); | ||
983 | |||
968 | blk_cleanup_queue(drive->queue); | 984 | blk_cleanup_queue(drive->queue); |
969 | drive->queue = NULL; | 985 | drive->queue = NULL; |
986 | blk_mq_free_tag_set(&drive->tag_set); | ||
970 | 987 | ||
971 | drive->dev_flags &= ~IDE_DFLAG_PRESENT; | 988 | drive->dev_flags &= ~IDE_DFLAG_PRESENT; |
972 | 989 | ||
@@ -1133,6 +1150,28 @@ static void ide_port_cable_detect(ide_hwif_t *hwif) | |||
1133 | } | 1150 | } |
1134 | } | 1151 | } |
1135 | 1152 | ||
1153 | /* | ||
1154 | * Deferred request list insertion handler | ||
1155 | */ | ||
1156 | static void drive_rq_insert_work(struct work_struct *work) | ||
1157 | { | ||
1158 | ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); | ||
1159 | ide_hwif_t *hwif = drive->hwif; | ||
1160 | struct request *rq; | ||
1161 | LIST_HEAD(list); | ||
1162 | |||
1163 | spin_lock_irq(&hwif->lock); | ||
1164 | if (!list_empty(&drive->rq_list)) | ||
1165 | list_splice_init(&drive->rq_list, &list); | ||
1166 | spin_unlock_irq(&hwif->lock); | ||
1167 | |||
1168 | while (!list_empty(&list)) { | ||
1169 | rq = list_first_entry(&list, struct request, queuelist); | ||
1170 | list_del_init(&rq->queuelist); | ||
1171 | blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1136 | static const u8 ide_hwif_to_major[] = | 1175 | static const u8 ide_hwif_to_major[] = |
1137 | { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR, | 1176 | { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR, |
1138 | IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR }; | 1177 | IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR }; |
@@ -1145,12 +1184,10 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) | |||
1145 | ide_port_for_each_dev(i, drive, hwif) { | 1184 | ide_port_for_each_dev(i, drive, hwif) { |
1146 | u8 j = (hwif->index * MAX_DRIVES) + i; | 1185 | u8 j = (hwif->index * MAX_DRIVES) + i; |
1147 | u16 *saved_id = drive->id; | 1186 | u16 *saved_id = drive->id; |
1148 | struct request *saved_sense_rq = drive->sense_rq; | ||
1149 | 1187 | ||
1150 | memset(drive, 0, sizeof(*drive)); | 1188 | memset(drive, 0, sizeof(*drive)); |
1151 | memset(saved_id, 0, SECTOR_SIZE); | 1189 | memset(saved_id, 0, SECTOR_SIZE); |
1152 | drive->id = saved_id; | 1190 | drive->id = saved_id; |
1153 | drive->sense_rq = saved_sense_rq; | ||
1154 | 1191 | ||
1155 | drive->media = ide_disk; | 1192 | drive->media = ide_disk; |
1156 | drive->select = (i << 4) | ATA_DEVICE_OBS; | 1193 | drive->select = (i << 4) | ATA_DEVICE_OBS; |
@@ -1166,6 +1203,9 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) | |||
1166 | 1203 | ||
1167 | INIT_LIST_HEAD(&drive->list); | 1204 | INIT_LIST_HEAD(&drive->list); |
1168 | init_completion(&drive->gendev_rel_comp); | 1205 | init_completion(&drive->gendev_rel_comp); |
1206 | |||
1207 | INIT_WORK(&drive->rq_work, drive_rq_insert_work); | ||
1208 | INIT_LIST_HEAD(&drive->rq_list); | ||
1169 | } | 1209 | } |
1170 | } | 1210 | } |
1171 | 1211 | ||
@@ -1255,7 +1295,6 @@ static void ide_port_free_devices(ide_hwif_t *hwif) | |||
1255 | int i; | 1295 | int i; |
1256 | 1296 | ||
1257 | ide_port_for_each_dev(i, drive, hwif) { | 1297 | ide_port_for_each_dev(i, drive, hwif) { |
1258 | kfree(drive->sense_rq); | ||
1259 | kfree(drive->id); | 1298 | kfree(drive->id); |
1260 | kfree(drive); | 1299 | kfree(drive); |
1261 | } | 1300 | } |
@@ -1283,17 +1322,10 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) | |||
1283 | if (drive->id == NULL) | 1322 | if (drive->id == NULL) |
1284 | goto out_free_drive; | 1323 | goto out_free_drive; |
1285 | 1324 | ||
1286 | drive->sense_rq = kmalloc(sizeof(struct request) + | ||
1287 | sizeof(struct ide_request), GFP_KERNEL); | ||
1288 | if (!drive->sense_rq) | ||
1289 | goto out_free_id; | ||
1290 | |||
1291 | hwif->devices[i] = drive; | 1325 | hwif->devices[i] = drive; |
1292 | } | 1326 | } |
1293 | return 0; | 1327 | return 0; |
1294 | 1328 | ||
1295 | out_free_id: | ||
1296 | kfree(drive->id); | ||
1297 | out_free_drive: | 1329 | out_free_drive: |
1298 | kfree(drive); | 1330 | kfree(drive); |
1299 | out_nomem: | 1331 | out_nomem: |
diff --git a/include/linux/ide.h b/include/linux/ide.h index c74b0321922a..079f8bc0b0f4 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/ioport.h> | 11 | #include <linux/ioport.h> |
12 | #include <linux/ata.h> | 12 | #include <linux/ata.h> |
13 | #include <linux/blkdev.h> | 13 | #include <linux/blk-mq.h> |
14 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
@@ -529,6 +529,10 @@ struct ide_drive_s { | |||
529 | 529 | ||
530 | struct request_queue *queue; /* request queue */ | 530 | struct request_queue *queue; /* request queue */ |
531 | 531 | ||
532 | int (*prep_rq)(struct ide_drive_s *, struct request *); | ||
533 | |||
534 | struct blk_mq_tag_set tag_set; | ||
535 | |||
532 | struct request *rq; /* current request */ | 536 | struct request *rq; /* current request */ |
533 | void *driver_data; /* extra driver data */ | 537 | void *driver_data; /* extra driver data */ |
534 | u16 *id; /* identification info */ | 538 | u16 *id; /* identification info */ |
@@ -612,6 +616,10 @@ struct ide_drive_s { | |||
612 | bool sense_rq_armed; | 616 | bool sense_rq_armed; |
613 | struct request *sense_rq; | 617 | struct request *sense_rq; |
614 | struct request_sense sense_data; | 618 | struct request_sense sense_data; |
619 | |||
620 | /* async sense insertion */ | ||
621 | struct work_struct rq_work; | ||
622 | struct list_head rq_list; | ||
615 | }; | 623 | }; |
616 | 624 | ||
617 | typedef struct ide_drive_s ide_drive_t; | 625 | typedef struct ide_drive_s ide_drive_t; |
@@ -1089,6 +1097,7 @@ extern int ide_pci_clk; | |||
1089 | 1097 | ||
1090 | int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); | 1098 | int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); |
1091 | void ide_kill_rq(ide_drive_t *, struct request *); | 1099 | void ide_kill_rq(ide_drive_t *, struct request *); |
1100 | void ide_insert_request_head(ide_drive_t *, struct request *); | ||
1092 | 1101 | ||
1093 | void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); | 1102 | void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); |
1094 | void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); | 1103 | void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); |
@@ -1208,7 +1217,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); | |||
1208 | 1217 | ||
1209 | extern void ide_timer_expiry(struct timer_list *t); | 1218 | extern void ide_timer_expiry(struct timer_list *t); |
1210 | extern irqreturn_t ide_intr(int irq, void *dev_id); | 1219 | extern irqreturn_t ide_intr(int irq, void *dev_id); |
1211 | extern void do_ide_request(struct request_queue *); | 1220 | extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); |
1212 | extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); | 1221 | extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); |
1213 | 1222 | ||
1214 | void ide_init_disk(struct gendisk *, ide_drive_t *); | 1223 | void ide_init_disk(struct gendisk *, ide_drive_t *); |