diff options
author | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2008-12-29 14:27:30 -0500 |
---|---|---|
committer | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2008-12-29 14:27:30 -0500 |
commit | 1d0bf587df5b17bb93b32d760171417883ca907f (patch) | |
tree | 9068aa5b43b06609cdc0b57f6c5aee29069e6d21 /drivers/ide/ide-io.c | |
parent | 44e312310889145b47311a311d3faf2488349116 (diff) |
ide: ide_hwgroup_t.rq doesn't need an ide_lock held
While at it:
- no need to check for hwgroup presence in ide_dump_opcode()
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r-- | drivers/ide/ide-io.c | 40 |
1 files changed, 19 insertions, 21 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 35b625882803..8afe7b15decf 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -107,17 +107,10 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, | |||
107 | int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) | 107 | int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) |
108 | { | 108 | { |
109 | unsigned int nr_bytes = nr_sectors << 9; | 109 | unsigned int nr_bytes = nr_sectors << 9; |
110 | struct request *rq; | 110 | struct request *rq = drive->hwif->hwgroup->rq; |
111 | unsigned long flags; | 111 | unsigned long flags; |
112 | int ret = 1; | 112 | int ret = 1; |
113 | 113 | ||
114 | /* | ||
115 | * room for locking improvements here, the calls below don't | ||
116 | * need the queue lock held at all | ||
117 | */ | ||
118 | spin_lock_irqsave(&ide_lock, flags); | ||
119 | rq = HWGROUP(drive)->rq; | ||
120 | |||
121 | if (!nr_bytes) { | 114 | if (!nr_bytes) { |
122 | if (blk_pc_request(rq)) | 115 | if (blk_pc_request(rq)) |
123 | nr_bytes = rq->data_len; | 116 | nr_bytes = rq->data_len; |
@@ -125,9 +118,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) | |||
125 | nr_bytes = rq->hard_cur_sectors << 9; | 118 | nr_bytes = rq->hard_cur_sectors << 9; |
126 | } | 119 | } |
127 | 120 | ||
121 | spin_lock_irqsave(&ide_lock, flags); | ||
128 | ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); | 122 | ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); |
129 | |||
130 | spin_unlock_irqrestore(&ide_lock, flags); | 123 | spin_unlock_irqrestore(&ide_lock, flags); |
124 | |||
131 | return ret; | 125 | return ret; |
132 | } | 126 | } |
133 | EXPORT_SYMBOL(ide_end_request); | 127 | EXPORT_SYMBOL(ide_end_request); |
@@ -245,8 +239,9 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, | |||
245 | unsigned long flags; | 239 | unsigned long flags; |
246 | int ret; | 240 | int ret; |
247 | 241 | ||
248 | spin_lock_irqsave(&ide_lock, flags); | ||
249 | BUG_ON(!blk_rq_started(rq)); | 242 | BUG_ON(!blk_rq_started(rq)); |
243 | |||
244 | spin_lock_irqsave(&ide_lock, flags); | ||
250 | ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); | 245 | ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); |
251 | spin_unlock_irqrestore(&ide_lock, flags); | 246 | spin_unlock_irqrestore(&ide_lock, flags); |
252 | 247 | ||
@@ -278,7 +273,11 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) | |||
278 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; | 273 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; |
279 | blk_start_queue(drive->queue); | 274 | blk_start_queue(drive->queue); |
280 | } | 275 | } |
281 | HWGROUP(drive)->rq = NULL; | 276 | spin_unlock_irqrestore(&ide_lock, flags); |
277 | |||
278 | drive->hwif->hwgroup->rq = NULL; | ||
279 | |||
280 | spin_lock_irqsave(&ide_lock, flags); | ||
282 | if (__blk_end_request(rq, 0, 0)) | 281 | if (__blk_end_request(rq, 0, 0)) |
283 | BUG(); | 282 | BUG(); |
284 | spin_unlock_irqrestore(&ide_lock, flags); | 283 | spin_unlock_irqrestore(&ide_lock, flags); |
@@ -300,12 +299,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) | |||
300 | 299 | ||
301 | void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | 300 | void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) |
302 | { | 301 | { |
302 | ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; | ||
303 | struct request *rq = hwgroup->rq; | ||
303 | unsigned long flags; | 304 | unsigned long flags; |
304 | struct request *rq; | ||
305 | |||
306 | spin_lock_irqsave(&ide_lock, flags); | ||
307 | rq = HWGROUP(drive)->rq; | ||
308 | spin_unlock_irqrestore(&ide_lock, flags); | ||
309 | 305 | ||
310 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | 306 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
311 | ide_task_t *task = (ide_task_t *)rq->special; | 307 | ide_task_t *task = (ide_task_t *)rq->special; |
@@ -333,15 +329,16 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
333 | return; | 329 | return; |
334 | } | 330 | } |
335 | 331 | ||
336 | spin_lock_irqsave(&ide_lock, flags); | 332 | hwgroup->rq = NULL; |
337 | HWGROUP(drive)->rq = NULL; | 333 | |
338 | rq->errors = err; | 334 | rq->errors = err; |
335 | |||
336 | spin_lock_irqsave(&ide_lock, flags); | ||
339 | if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), | 337 | if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), |
340 | blk_rq_bytes(rq)))) | 338 | blk_rq_bytes(rq)))) |
341 | BUG(); | 339 | BUG(); |
342 | spin_unlock_irqrestore(&ide_lock, flags); | 340 | spin_unlock_irqrestore(&ide_lock, flags); |
343 | } | 341 | } |
344 | |||
345 | EXPORT_SYMBOL(ide_end_drive_cmd); | 342 | EXPORT_SYMBOL(ide_end_drive_cmd); |
346 | 343 | ||
347 | static void ide_kill_rq(ide_drive_t *drive, struct request *rq) | 344 | static void ide_kill_rq(ide_drive_t *drive, struct request *rq) |
@@ -1489,11 +1486,12 @@ out: | |||
1489 | 1486 | ||
1490 | void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) | 1487 | void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) |
1491 | { | 1488 | { |
1489 | ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; | ||
1492 | unsigned long flags; | 1490 | unsigned long flags; |
1493 | ide_hwgroup_t *hwgroup = HWGROUP(drive); | ||
1494 | 1491 | ||
1495 | spin_lock_irqsave(&ide_lock, flags); | ||
1496 | hwgroup->rq = NULL; | 1492 | hwgroup->rq = NULL; |
1493 | |||
1494 | spin_lock_irqsave(&ide_lock, flags); | ||
1497 | __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0); | 1495 | __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0); |
1498 | blk_start_queueing(drive->queue); | 1496 | blk_start_queueing(drive->queue); |
1499 | spin_unlock_irqrestore(&ide_lock, flags); | 1497 | spin_unlock_irqrestore(&ide_lock, flags); |