aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r--drivers/ide/ide-io.c292
1 files changed, 53 insertions, 239 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index cc35d6dbd410..ecacc008fdaf 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -84,11 +84,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
84 ide_dma_on(drive); 84 ide_dma_on(drive);
85 } 85 }
86 86
87 if (!__blk_end_request(rq, error, nr_bytes)) { 87 if (!blk_end_request(rq, error, nr_bytes))
88 if (dequeue)
89 HWGROUP(drive)->rq = NULL;
90 ret = 0; 88 ret = 0;
91 } 89
90 if (ret == 0 && dequeue)
91 drive->hwif->hwgroup->rq = NULL;
92 92
93 return ret; 93 return ret;
94} 94}
@@ -107,16 +107,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
108{ 108{
109 unsigned int nr_bytes = nr_sectors << 9; 109 unsigned int nr_bytes = nr_sectors << 9;
110 struct request *rq; 110 struct request *rq = drive->hwif->hwgroup->rq;
111 unsigned long flags;
112 int ret = 1;
113
114 /*
115 * room for locking improvements here, the calls below don't
116 * need the queue lock held at all
117 */
118 spin_lock_irqsave(&ide_lock, flags);
119 rq = HWGROUP(drive)->rq;
120 111
121 if (!nr_bytes) { 112 if (!nr_bytes) {
122 if (blk_pc_request(rq)) 113 if (blk_pc_request(rq))
@@ -125,105 +116,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
125 nr_bytes = rq->hard_cur_sectors << 9; 116 nr_bytes = rq->hard_cur_sectors << 9;
126 } 117 }
127 118
128 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 119 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
129
130 spin_unlock_irqrestore(&ide_lock, flags);
131 return ret;
132} 120}
133EXPORT_SYMBOL(ide_end_request); 121EXPORT_SYMBOL(ide_end_request);
134 122
135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
136{
137 struct request_pm_state *pm = rq->data;
138
139#ifdef DEBUG_PM
140 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
141 drive->name, pm->pm_step);
142#endif
143 if (drive->media != ide_disk)
144 return;
145
146 switch (pm->pm_step) {
147 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
148 if (pm->pm_state == PM_EVENT_FREEZE)
149 pm->pm_step = IDE_PM_COMPLETED;
150 else
151 pm->pm_step = IDE_PM_STANDBY;
152 break;
153 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
154 pm->pm_step = IDE_PM_COMPLETED;
155 break;
156 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
157 pm->pm_step = IDE_PM_IDLE;
158 break;
159 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
160 pm->pm_step = IDE_PM_RESTORE_DMA;
161 break;
162 }
163}
164
165static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
166{
167 struct request_pm_state *pm = rq->data;
168 ide_task_t *args = rq->special;
169
170 memset(args, 0, sizeof(*args));
171
172 switch (pm->pm_step) {
173 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
174 if (drive->media != ide_disk)
175 break;
176 /* Not supported? Switch to next step now. */
177 if (ata_id_flush_enabled(drive->id) == 0 ||
178 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
179 ide_complete_power_step(drive, rq);
180 return ide_stopped;
181 }
182 if (ata_id_flush_ext_enabled(drive->id))
183 args->tf.command = ATA_CMD_FLUSH_EXT;
184 else
185 args->tf.command = ATA_CMD_FLUSH;
186 goto out_do_tf;
187 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
188 args->tf.command = ATA_CMD_STANDBYNOW1;
189 goto out_do_tf;
190 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
191 ide_set_max_pio(drive);
192 /*
193 * skip IDE_PM_IDLE for ATAPI devices
194 */
195 if (drive->media != ide_disk)
196 pm->pm_step = IDE_PM_RESTORE_DMA;
197 else
198 ide_complete_power_step(drive, rq);
199 return ide_stopped;
200 case IDE_PM_IDLE: /* Resume step 2 (idle) */
201 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
202 goto out_do_tf;
203 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
204 /*
205 * Right now, all we do is call ide_set_dma(drive),
206 * we could be smarter and check for current xfer_speed
207 * in struct drive etc...
208 */
209 if (drive->hwif->dma_ops == NULL)
210 break;
211 /*
212 * TODO: respect IDE_DFLAG_USING_DMA
213 */
214 ide_set_dma(drive);
215 break;
216 }
217
218 pm->pm_step = IDE_PM_COMPLETED;
219 return ide_stopped;
220
221out_do_tf:
222 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
223 args->data_phase = TASKFILE_NO_DATA;
224 return do_rw_taskfile(drive, args);
225}
226
227/** 123/**
228 * ide_end_dequeued_request - complete an IDE I/O 124 * ide_end_dequeued_request - complete an IDE I/O
229 * @drive: IDE device for the I/O 125 * @drive: IDE device for the I/O
@@ -242,48 +138,12 @@ out_do_tf:
242int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 138int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
243 int uptodate, int nr_sectors) 139 int uptodate, int nr_sectors)
244{ 140{
245 unsigned long flags;
246 int ret;
247
248 spin_lock_irqsave(&ide_lock, flags);
249 BUG_ON(!blk_rq_started(rq)); 141 BUG_ON(!blk_rq_started(rq));
250 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
251 spin_unlock_irqrestore(&ide_lock, flags);
252 142
253 return ret; 143 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
254} 144}
255EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 145EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
256 146
257
258/**
259 * ide_complete_pm_request - end the current Power Management request
260 * @drive: target drive
261 * @rq: request
262 *
263 * This function cleans up the current PM request and stops the queue
264 * if necessary.
265 */
266static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
267{
268 unsigned long flags;
269
270#ifdef DEBUG_PM
271 printk("%s: completing PM request, %s\n", drive->name,
272 blk_pm_suspend_request(rq) ? "suspend" : "resume");
273#endif
274 spin_lock_irqsave(&ide_lock, flags);
275 if (blk_pm_suspend_request(rq)) {
276 blk_stop_queue(drive->queue);
277 } else {
278 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
279 blk_start_queue(drive->queue);
280 }
281 HWGROUP(drive)->rq = NULL;
282 if (__blk_end_request(rq, 0, 0))
283 BUG();
284 spin_unlock_irqrestore(&ide_lock, flags);
285}
286
287/** 147/**
288 * ide_end_drive_cmd - end an explicit drive command 148 * ide_end_drive_cmd - end an explicit drive command
289 * @drive: command 149 * @drive: command
@@ -300,19 +160,12 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
300 160
301void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
302{ 162{
303 unsigned long flags; 163 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
304 struct request *rq; 164 struct request *rq = hwgroup->rq;
305
306 spin_lock_irqsave(&ide_lock, flags);
307 rq = HWGROUP(drive)->rq;
308 spin_unlock_irqrestore(&ide_lock, flags);
309 165
310 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
311 ide_task_t *task = (ide_task_t *)rq->special; 167 ide_task_t *task = (ide_task_t *)rq->special;
312 168
313 if (rq->errors == 0)
314 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT);
315
316 if (task) { 169 if (task) {
317 struct ide_taskfile *tf = &task->tf; 170 struct ide_taskfile *tf = &task->tf;
318 171
@@ -333,15 +186,14 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
333 return; 186 return;
334 } 187 }
335 188
336 spin_lock_irqsave(&ide_lock, flags); 189 hwgroup->rq = NULL;
337 HWGROUP(drive)->rq = NULL; 190
338 rq->errors = err; 191 rq->errors = err;
339 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 192
340 blk_rq_bytes(rq)))) 193 if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
194 blk_rq_bytes(rq))))
341 BUG(); 195 BUG();
342 spin_unlock_irqrestore(&ide_lock, flags);
343} 196}
344
345EXPORT_SYMBOL(ide_end_drive_cmd); 197EXPORT_SYMBOL(ide_end_drive_cmd);
346 198
347static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 199static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
@@ -720,40 +572,6 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
720 } 572 }
721} 573}
722 574
723static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
724{
725 struct request_pm_state *pm = rq->data;
726
727 if (blk_pm_suspend_request(rq) &&
728 pm->pm_step == IDE_PM_START_SUSPEND)
729 /* Mark drive blocked when starting the suspend sequence. */
730 drive->dev_flags |= IDE_DFLAG_BLOCKED;
731 else if (blk_pm_resume_request(rq) &&
732 pm->pm_step == IDE_PM_START_RESUME) {
733 /*
734 * The first thing we do on wakeup is to wait for BSY bit to
735 * go away (with a looong timeout) as a drive on this hwif may
736 * just be POSTing itself.
737 * We do that before even selecting as the "other" device on
738 * the bus may be broken enough to walk on our toes at this
739 * point.
740 */
741 ide_hwif_t *hwif = drive->hwif;
742 int rc;
743#ifdef DEBUG_PM
744 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
745#endif
746 rc = ide_wait_not_busy(hwif, 35000);
747 if (rc)
748 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
749 SELECT_DRIVE(drive);
750 hwif->tp_ops->set_irq(hwif, 1);
751 rc = ide_wait_not_busy(hwif, 100000);
752 if (rc)
753 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
754 }
755}
756
757/** 575/**
758 * start_request - start of I/O and command issuing for IDE 576 * start_request - start of I/O and command issuing for IDE
759 * 577 *
@@ -927,7 +745,7 @@ repeat:
927 745
928/* 746/*
929 * Issue a new request to a drive from hwgroup 747 * Issue a new request to a drive from hwgroup
930 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 748 * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..);
931 * 749 *
932 * A hwgroup is a serialized group of IDE interfaces. Usually there is 750 * A hwgroup is a serialized group of IDE interfaces. Usually there is
933 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 751 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
@@ -939,7 +757,7 @@ repeat:
939 * possibly along with many other devices. This is especially common in 757 * possibly along with many other devices. This is especially common in
940 * PCI-based systems with off-board IDE controller cards. 758 * PCI-based systems with off-board IDE controller cards.
941 * 759 *
942 * The IDE driver uses the single global ide_lock spinlock to protect 760 * The IDE driver uses a per-hwgroup spinlock to protect
943 * access to the request queues, and to protect the hwgroup->busy flag. 761 * access to the request queues, and to protect the hwgroup->busy flag.
944 * 762 *
945 * The first thread into the driver for a particular hwgroup sets the 763 * The first thread into the driver for a particular hwgroup sets the
@@ -955,7 +773,7 @@ repeat:
955 * will start the next request from the queue. If no more work remains, 773 * will start the next request from the queue. If no more work remains,
956 * the driver will clear the hwgroup->busy flag and exit. 774 * the driver will clear the hwgroup->busy flag and exit.
957 * 775 *
958 * The ide_lock (spinlock) is used to protect all access to the 776 * The per-hwgroup spinlock is used to protect all access to the
959 * hwgroup->busy flag, but is otherwise not needed for most processing in 777 * hwgroup->busy flag, but is otherwise not needed for most processing in
960 * the driver. This makes the driver much more friendlier to shared IRQs 778 * the driver. This makes the driver much more friendlier to shared IRQs
961 * than previous designs, while remaining 100% (?) SMP safe and capable. 779 * than previous designs, while remaining 100% (?) SMP safe and capable.
@@ -968,7 +786,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
968 ide_startstop_t startstop; 786 ide_startstop_t startstop;
969 int loops = 0; 787 int loops = 0;
970 788
971 /* caller must own ide_lock */ 789 /* caller must own hwgroup->lock */
972 BUG_ON(!irqs_disabled()); 790 BUG_ON(!irqs_disabled());
973 791
974 while (!hwgroup->busy) { 792 while (!hwgroup->busy) {
@@ -1023,12 +841,12 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1023 } 841 }
1024 again: 842 again:
1025 hwif = HWIF(drive); 843 hwif = HWIF(drive);
1026 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 844 if (hwif != hwgroup->hwif) {
1027 /* 845 /*
1028 * set nIEN for previous hwif, drives in the 846 * set nIEN for previous hwif, drives in the
1029 * quirk_list may not like intr setups/cleanups 847 * quirk_list may not like intr setups/cleanups
1030 */ 848 */
1031 if (drive->quirk_list != 1) 849 if (drive->quirk_list == 0)
1032 hwif->tp_ops->set_irq(hwif, 0); 850 hwif->tp_ops->set_irq(hwif, 0);
1033 } 851 }
1034 hwgroup->hwif = hwif; 852 hwgroup->hwif = hwif;
@@ -1036,11 +854,6 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1036 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 854 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
1037 drive->service_start = jiffies; 855 drive->service_start = jiffies;
1038 856
1039 if (blk_queue_plugged(drive->queue)) {
1040 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1041 break;
1042 }
1043
1044 /* 857 /*
1045 * we know that the queue isn't empty, but this can happen 858 * we know that the queue isn't empty, but this can happen
1046 * if the q->prep_rq_fn() decides to kill a request 859 * if the q->prep_rq_fn() decides to kill a request
@@ -1090,11 +903,11 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1090 */ 903 */
1091 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 904 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1092 disable_irq_nosync(hwif->irq); 905 disable_irq_nosync(hwif->irq);
1093 spin_unlock(&ide_lock); 906 spin_unlock(&hwgroup->lock);
1094 local_irq_enable_in_hardirq(); 907 local_irq_enable_in_hardirq();
1095 /* allow other IRQs while we start this request */ 908 /* allow other IRQs while we start this request */
1096 startstop = start_request(drive, rq); 909 startstop = start_request(drive, rq);
1097 spin_lock_irq(&ide_lock); 910 spin_lock_irq(&hwgroup->lock);
1098 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 911 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1099 enable_irq(hwif->irq); 912 enable_irq(hwif->irq);
1100 if (startstop == ide_stopped) 913 if (startstop == ide_stopped)
@@ -1192,7 +1005,7 @@ void ide_timer_expiry (unsigned long data)
1192 unsigned long flags; 1005 unsigned long flags;
1193 unsigned long wait = -1; 1006 unsigned long wait = -1;
1194 1007
1195 spin_lock_irqsave(&ide_lock, flags); 1008 spin_lock_irqsave(&hwgroup->lock, flags);
1196 1009
1197 if (((handler = hwgroup->handler) == NULL) || 1010 if (((handler = hwgroup->handler) == NULL) ||
1198 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1011 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
@@ -1225,7 +1038,7 @@ void ide_timer_expiry (unsigned long data)
1225 hwgroup->timer.expires = jiffies + wait; 1038 hwgroup->timer.expires = jiffies + wait;
1226 hwgroup->req_gen_timer = hwgroup->req_gen; 1039 hwgroup->req_gen_timer = hwgroup->req_gen;
1227 add_timer(&hwgroup->timer); 1040 add_timer(&hwgroup->timer);
1228 spin_unlock_irqrestore(&ide_lock, flags); 1041 spin_unlock_irqrestore(&hwgroup->lock, flags);
1229 return; 1042 return;
1230 } 1043 }
1231 } 1044 }
@@ -1235,7 +1048,7 @@ void ide_timer_expiry (unsigned long data)
1235 * the handler() function, which means we need to 1048 * the handler() function, which means we need to
1236 * globally mask the specific IRQ: 1049 * globally mask the specific IRQ:
1237 */ 1050 */
1238 spin_unlock(&ide_lock); 1051 spin_unlock(&hwgroup->lock);
1239 hwif = HWIF(drive); 1052 hwif = HWIF(drive);
1240 /* disable_irq_nosync ?? */ 1053 /* disable_irq_nosync ?? */
1241 disable_irq(hwif->irq); 1054 disable_irq(hwif->irq);
@@ -1259,14 +1072,14 @@ void ide_timer_expiry (unsigned long data)
1259 hwif->tp_ops->read_status(hwif)); 1072 hwif->tp_ops->read_status(hwif));
1260 } 1073 }
1261 drive->service_time = jiffies - drive->service_start; 1074 drive->service_time = jiffies - drive->service_start;
1262 spin_lock_irq(&ide_lock); 1075 spin_lock_irq(&hwgroup->lock);
1263 enable_irq(hwif->irq); 1076 enable_irq(hwif->irq);
1264 if (startstop == ide_stopped) 1077 if (startstop == ide_stopped)
1265 hwgroup->busy = 0; 1078 hwgroup->busy = 0;
1266 } 1079 }
1267 } 1080 }
1268 ide_do_request(hwgroup, IDE_NO_IRQ); 1081 ide_do_request(hwgroup, IDE_NO_IRQ);
1269 spin_unlock_irqrestore(&ide_lock, flags); 1082 spin_unlock_irqrestore(&hwgroup->lock, flags);
1270} 1083}
1271 1084
1272/** 1085/**
@@ -1359,18 +1172,16 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1359{ 1172{
1360 unsigned long flags; 1173 unsigned long flags;
1361 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1174 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1362 ide_hwif_t *hwif; 1175 ide_hwif_t *hwif = hwgroup->hwif;
1363 ide_drive_t *drive; 1176 ide_drive_t *drive;
1364 ide_handler_t *handler; 1177 ide_handler_t *handler;
1365 ide_startstop_t startstop; 1178 ide_startstop_t startstop;
1179 irqreturn_t irq_ret = IRQ_NONE;
1366 1180
1367 spin_lock_irqsave(&ide_lock, flags); 1181 spin_lock_irqsave(&hwgroup->lock, flags);
1368 hwif = hwgroup->hwif;
1369 1182
1370 if (!ide_ack_intr(hwif)) { 1183 if (!ide_ack_intr(hwif))
1371 spin_unlock_irqrestore(&ide_lock, flags); 1184 goto out;
1372 return IRQ_NONE;
1373 }
1374 1185
1375 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1186 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1376 /* 1187 /*
@@ -1406,9 +1217,9 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1406 (void)hwif->tp_ops->read_status(hwif); 1217 (void)hwif->tp_ops->read_status(hwif);
1407#endif /* CONFIG_BLK_DEV_IDEPCI */ 1218#endif /* CONFIG_BLK_DEV_IDEPCI */
1408 } 1219 }
1409 spin_unlock_irqrestore(&ide_lock, flags); 1220 goto out;
1410 return IRQ_NONE;
1411 } 1221 }
1222
1412 drive = hwgroup->drive; 1223 drive = hwgroup->drive;
1413 if (!drive) { 1224 if (!drive) {
1414 /* 1225 /*
@@ -1417,10 +1228,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1417 * 1228 *
1418 * [Note - this can occur if the drive is hot unplugged] 1229 * [Note - this can occur if the drive is hot unplugged]
1419 */ 1230 */
1420 spin_unlock_irqrestore(&ide_lock, flags); 1231 goto out_handled;
1421 return IRQ_HANDLED;
1422 } 1232 }
1423 if (!drive_is_ready(drive)) { 1233
1234 if (!drive_is_ready(drive))
1424 /* 1235 /*
1425 * This happens regularly when we share a PCI IRQ with 1236 * This happens regularly when we share a PCI IRQ with
1426 * another device. Unfortunately, it can also happen 1237 * another device. Unfortunately, it can also happen
@@ -1428,9 +1239,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1428 * their status register is up to date. Hopefully we have 1239 * their status register is up to date. Hopefully we have
1429 * enough advance overhead that the latter isn't a problem. 1240 * enough advance overhead that the latter isn't a problem.
1430 */ 1241 */
1431 spin_unlock_irqrestore(&ide_lock, flags); 1242 goto out;
1432 return IRQ_NONE; 1243
1433 }
1434 if (!hwgroup->busy) { 1244 if (!hwgroup->busy) {
1435 hwgroup->busy = 1; /* paranoia */ 1245 hwgroup->busy = 1; /* paranoia */
1436 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1246 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
@@ -1438,7 +1248,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1438 hwgroup->handler = NULL; 1248 hwgroup->handler = NULL;
1439 hwgroup->req_gen++; 1249 hwgroup->req_gen++;
1440 del_timer(&hwgroup->timer); 1250 del_timer(&hwgroup->timer);
1441 spin_unlock(&ide_lock); 1251 spin_unlock(&hwgroup->lock);
1442 1252
1443 if (hwif->port_ops && hwif->port_ops->clear_irq) 1253 if (hwif->port_ops && hwif->port_ops->clear_irq)
1444 hwif->port_ops->clear_irq(drive); 1254 hwif->port_ops->clear_irq(drive);
@@ -1449,7 +1259,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1449 /* service this interrupt, may set handler for next interrupt */ 1259 /* service this interrupt, may set handler for next interrupt */
1450 startstop = handler(drive); 1260 startstop = handler(drive);
1451 1261
1452 spin_lock_irq(&ide_lock); 1262 spin_lock_irq(&hwgroup->lock);
1453 /* 1263 /*
1454 * Note that handler() may have set things up for another 1264 * Note that handler() may have set things up for another
1455 * interrupt to occur soon, but it cannot happen until 1265 * interrupt to occur soon, but it cannot happen until
@@ -1467,8 +1277,11 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1467 "on exit\n", drive->name); 1277 "on exit\n", drive->name);
1468 } 1278 }
1469 } 1279 }
1470 spin_unlock_irqrestore(&ide_lock, flags); 1280out_handled:
1471 return IRQ_HANDLED; 1281 irq_ret = IRQ_HANDLED;
1282out:
1283 spin_unlock_irqrestore(&hwgroup->lock, flags);
1284 return irq_ret;
1472} 1285}
1473 1286
1474/** 1287/**
@@ -1488,16 +1301,17 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1488 1301
1489void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1302void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1490{ 1303{
1304 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1305 struct request_queue *q = drive->queue;
1491 unsigned long flags; 1306 unsigned long flags;
1492 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1493 1307
1494 spin_lock_irqsave(&ide_lock, flags);
1495 hwgroup->rq = NULL; 1308 hwgroup->rq = NULL;
1496 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
1497 blk_start_queueing(drive->queue);
1498 spin_unlock_irqrestore(&ide_lock, flags);
1499}
1500 1309
1310 spin_lock_irqsave(q->queue_lock, flags);
1311 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
1312 blk_start_queueing(q);
1313 spin_unlock_irqrestore(q->queue_lock, flags);
1314}
1501EXPORT_SYMBOL(ide_do_drive_cmd); 1315EXPORT_SYMBOL(ide_do_drive_cmd);
1502 1316
1503void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1317void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)