summaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-pm.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-26 11:53:52 -0400
committerJens Axboe <axboe@kernel.dk>2018-11-07 15:42:31 -0500
commit600335205b8d162891b5ef2e32343f5b8020efd8 (patch)
tree0f42d3369caf0aa805ef2c48cf4f8aa5bd2e83aa /drivers/ide/ide-pm.c
parentd0be12274dad242271fb2055275d10b67a0d7649 (diff)
ide: convert to blk-mq
ide-disk and ide-cd tested as working just fine, ide-tape and ide-floppy haven't. But the latter don't require changes, so they should work without issue. Add helper function to insert a request from a work queue, since we cannot invoke the blk-mq request insertion from IRQ context. Cc: David Miller <davem@davemloft.net> Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/ide/ide-pm.c')
-rw-r--r--drivers/ide/ide-pm.c28
1 files changed, 7 insertions, 21 deletions
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 59217aa1d1fb..ea10507e5190 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -40,32 +40,20 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
40 return ret; 40 return ret;
41} 41}
42 42
43static void ide_end_sync_rq(struct request *rq, blk_status_t error)
44{
45 complete(rq->end_io_data);
46}
47
48static int ide_pm_execute_rq(struct request *rq) 43static int ide_pm_execute_rq(struct request *rq)
49{ 44{
50 struct request_queue *q = rq->q; 45 struct request_queue *q = rq->q;
51 DECLARE_COMPLETION_ONSTACK(wait);
52
53 rq->end_io_data = &wait;
54 rq->end_io = ide_end_sync_rq;
55 46
56 spin_lock_irq(q->queue_lock); 47 spin_lock_irq(q->queue_lock);
57 if (unlikely(blk_queue_dying(q))) { 48 if (unlikely(blk_queue_dying(q))) {
58 rq->rq_flags |= RQF_QUIET; 49 rq->rq_flags |= RQF_QUIET;
59 scsi_req(rq)->result = -ENXIO; 50 scsi_req(rq)->result = -ENXIO;
60 __blk_end_request_all(rq, BLK_STS_OK);
61 spin_unlock_irq(q->queue_lock); 51 spin_unlock_irq(q->queue_lock);
52 blk_mq_end_request(rq, BLK_STS_OK);
62 return -ENXIO; 53 return -ENXIO;
63 } 54 }
64 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
65 __blk_run_queue_uncond(q);
66 spin_unlock_irq(q->queue_lock); 55 spin_unlock_irq(q->queue_lock);
67 56 blk_execute_rq(q, NULL, rq, true);
68 wait_for_completion_io(&wait);
69 57
70 return scsi_req(rq)->result ? -EIO : 0; 58 return scsi_req(rq)->result ? -EIO : 0;
71} 59}
@@ -79,6 +67,8 @@ int generic_ide_resume(struct device *dev)
79 struct ide_pm_state rqpm; 67 struct ide_pm_state rqpm;
80 int err; 68 int err;
81 69
70 blk_mq_start_stopped_hw_queues(drive->queue, true);
71
82 if (ide_port_acpi(hwif)) { 72 if (ide_port_acpi(hwif)) {
83 /* call ACPI _PS0 / _STM only once */ 73 /* call ACPI _PS0 / _STM only once */
84 if ((drive->dn & 1) == 0 || pair == NULL) { 74 if ((drive->dn & 1) == 0 || pair == NULL) {
@@ -226,15 +216,14 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
226#endif 216#endif
227 spin_lock_irqsave(q->queue_lock, flags); 217 spin_lock_irqsave(q->queue_lock, flags);
228 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) 218 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
229 blk_stop_queue(q); 219 blk_mq_stop_hw_queues(q);
230 else 220 else
231 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 221 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
232 spin_unlock_irqrestore(q->queue_lock, flags); 222 spin_unlock_irqrestore(q->queue_lock, flags);
233 223
234 drive->hwif->rq = NULL; 224 drive->hwif->rq = NULL;
235 225
236 if (blk_end_request(rq, BLK_STS_OK, 0)) 226 blk_mq_end_request(rq, BLK_STS_OK);
237 BUG();
238} 227}
239 228
240void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 229void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
@@ -260,7 +249,6 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
260 ide_hwif_t *hwif = drive->hwif; 249 ide_hwif_t *hwif = drive->hwif;
261 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 250 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
262 struct request_queue *q = drive->queue; 251 struct request_queue *q = drive->queue;
263 unsigned long flags;
264 int rc; 252 int rc;
265#ifdef DEBUG_PM 253#ifdef DEBUG_PM
266 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 254 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
@@ -274,8 +262,6 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
274 if (rc) 262 if (rc)
275 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 263 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
276 264
277 spin_lock_irqsave(q->queue_lock, flags); 265 blk_mq_start_hw_queues(q);
278 blk_start_queue(q);
279 spin_unlock_irqrestore(q->queue_lock, flags);
280 } 266 }
281} 267}