aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-io.c
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
committerLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
commit478c6a43fcbc6c11609f8cee7c7b57223907754f (patch)
treea7f7952099da60d33032aed6de9c0c56c9f8779e /drivers/ide/ide-io.c
parent8a3f257c704e02aee9869decd069a806b45be3f1 (diff)
parent6bb597507f9839b13498781e481f5458aea33620 (diff)
Merge branch 'linus' into release
Conflicts: arch/x86/kernel/cpu/cpufreq/longhaul.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r--drivers/ide/ide-io.c612
1 files changed, 141 insertions, 471 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index a9a6c208288a..1deb6d29b186 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -40,7 +40,6 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/ide.h> 42#include <linux/ide.h>
43#include <linux/hdreg.h>
44#include <linux/completion.h> 43#include <linux/completion.h>
45#include <linux/reboot.h> 44#include <linux/reboot.h>
46#include <linux/cdrom.h> 45#include <linux/cdrom.h>
@@ -55,25 +54,9 @@
55#include <asm/uaccess.h> 54#include <asm/uaccess.h>
56#include <asm/io.h> 55#include <asm/io.h>
57 56
58static int __ide_end_request(ide_drive_t *drive, struct request *rq, 57int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
59 int uptodate, unsigned int nr_bytes, int dequeue) 58 unsigned int nr_bytes)
60{ 59{
61 int ret = 1;
62 int error = 0;
63
64 if (uptodate <= 0)
65 error = uptodate ? uptodate : -EIO;
66
67 /*
68 * if failfast is set on a request, override number of sectors and
69 * complete the whole request right now
70 */
71 if (blk_noretry_request(rq) && error)
72 nr_bytes = rq->hard_nr_sectors << 9;
73
74 if (!blk_fs_request(rq) && error && !rq->errors)
75 rq->errors = -EIO;
76
77 /* 60 /*
78 * decide whether to reenable DMA -- 3 is a random magic for now, 61 * decide whether to reenable DMA -- 3 is a random magic for now,
79 * if we DMA timeout more than 3 times, just stay in PIO 62 * if we DMA timeout more than 3 times, just stay in PIO
@@ -84,255 +67,97 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
84 ide_dma_on(drive); 67 ide_dma_on(drive);
85 } 68 }
86 69
87 if (!blk_end_request(rq, error, nr_bytes)) 70 return blk_end_request(rq, error, nr_bytes);
88 ret = 0;
89
90 if (ret == 0 && dequeue)
91 drive->hwif->rq = NULL;
92
93 return ret;
94} 71}
72EXPORT_SYMBOL_GPL(ide_end_rq);
95 73
96/** 74void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
97 * ide_end_request - complete an IDE I/O
98 * @drive: IDE device for the I/O
99 * @uptodate:
100 * @nr_sectors: number of sectors completed
101 *
102 * This is our end_request wrapper function. We complete the I/O
103 * update random number input and dequeue the request, which if
104 * it was tagged may be out of order.
105 */
106
107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
108{ 75{
109 unsigned int nr_bytes = nr_sectors << 9; 76 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
110 struct request *rq = drive->hwif->rq; 77 struct ide_taskfile *tf = &cmd->tf;
78 struct request *rq = cmd->rq;
79 u8 tf_cmd = tf->command;
111 80
112 if (!nr_bytes) { 81 tf->error = err;
113 if (blk_pc_request(rq)) 82 tf->status = stat;
114 nr_bytes = rq->data_len;
115 else
116 nr_bytes = rq->hard_cur_sectors << 9;
117 }
118 83
119 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 84 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
120} 85 u8 data[2];
121EXPORT_SYMBOL(ide_end_request);
122 86
123/** 87 tp_ops->input_data(drive, cmd, data, 2);
124 * ide_end_dequeued_request - complete an IDE I/O
125 * @drive: IDE device for the I/O
126 * @uptodate:
127 * @nr_sectors: number of sectors completed
128 *
129 * Complete an I/O that is no longer on the request queue. This
130 * typically occurs when we pull the request and issue a REQUEST_SENSE.
131 * We must still finish the old request but we must not tamper with the
132 * queue in the meantime.
133 *
134 * NOTE: This path does not handle barrier, but barrier is not supported
135 * on ide-cd anyway.
136 */
137
138int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
139 int uptodate, int nr_sectors)
140{
141 BUG_ON(!blk_rq_started(rq));
142 88
143 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 89 tf->data = data[0];
144} 90 tf->hob_data = data[1];
145EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 91 }
146
147/**
148 * ide_end_drive_cmd - end an explicit drive command
149 * @drive: command
150 * @stat: status bits
151 * @err: error bits
152 *
153 * Clean up after success/failure of an explicit drive command.
154 * These get thrown onto the queue so they are synchronized with
155 * real I/O operations on the drive.
156 *
157 * In LBA48 mode we have to read the register set twice to get
158 * all the extra information out.
159 */
160
161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
162{
163 ide_hwif_t *hwif = drive->hwif;
164 struct request *rq = hwif->rq;
165
166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
167 ide_task_t *task = (ide_task_t *)rq->special;
168
169 if (task) {
170 struct ide_taskfile *tf = &task->tf;
171
172 tf->error = err;
173 tf->status = stat;
174
175 drive->hwif->tp_ops->tf_read(drive, task);
176 92
177 if (task->tf_flags & IDE_TFLAG_DYN) 93 tp_ops->tf_read(drive, cmd);
178 kfree(task);
179 }
180 } else if (blk_pm_request(rq)) {
181 struct request_pm_state *pm = rq->data;
182 94
183 ide_complete_power_step(drive, rq); 95 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
184 if (pm->pm_step == IDE_PM_COMPLETED) 96 tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
185 ide_complete_pm_request(drive, rq); 97 if (tf->lbal != 0xc4) {
186 return; 98 printk(KERN_ERR "%s: head unload failed!\n",
99 drive->name);
100 ide_tf_dump(drive->name, tf);
101 } else
102 drive->dev_flags |= IDE_DFLAG_PARKED;
187 } 103 }
188 104
189 hwif->rq = NULL; 105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
106 memcpy(rq->special, cmd, sizeof(*cmd));
190 107
191 rq->errors = err; 108 if (cmd->tf_flags & IDE_TFLAG_DYN)
192 109 kfree(cmd);
193 if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
194 blk_rq_bytes(rq))))
195 BUG();
196} 110}
197EXPORT_SYMBOL(ide_end_drive_cmd);
198 111
199static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 112/* obsolete, blk_rq_bytes() should be used instead */
113unsigned int ide_rq_bytes(struct request *rq)
200{ 114{
201 if (rq->rq_disk) { 115 if (blk_pc_request(rq))
202 struct ide_driver *drv; 116 return rq->data_len;
203 117 else
204 drv = *(struct ide_driver **)rq->rq_disk->private_data; 118 return rq->hard_cur_sectors << 9;
205 drv->end_request(drive, 0, 0);
206 } else
207 ide_end_request(drive, 0, 0);
208} 119}
120EXPORT_SYMBOL_GPL(ide_rq_bytes);
209 121
210static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 122int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
211{ 123{
212 ide_hwif_t *hwif = drive->hwif; 124 ide_hwif_t *hwif = drive->hwif;
125 struct request *rq = hwif->rq;
126 int rc;
213 127
214 if ((stat & ATA_BUSY) || 128 /*
215 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 129 * if failfast is set on a request, override number of sectors
216 /* other bits are useless when BUSY */ 130 * and complete the whole request right now
217 rq->errors |= ERROR_RESET; 131 */
218 } else if (stat & ATA_ERR) { 132 if (blk_noretry_request(rq) && error <= 0)
219 /* err has different meaning on cdrom and tape */ 133 nr_bytes = rq->hard_nr_sectors << 9;
220 if (err == ATA_ABORTED) {
221 if ((drive->dev_flags & IDE_DFLAG_LBA) &&
222 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
223 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
224 return ide_stopped;
225 } else if ((err & BAD_CRC) == BAD_CRC) {
226 /* UDMA crc error, just retry the operation */
227 drive->crc_count++;
228 } else if (err & (ATA_BBK | ATA_UNC)) {
229 /* retries won't help these */
230 rq->errors = ERROR_MAX;
231 } else if (err & ATA_TRK0NF) {
232 /* help it find track zero */
233 rq->errors |= ERROR_RECAL;
234 }
235 }
236
237 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
238 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
239 int nsect = drive->mult_count ? drive->mult_count : 1;
240
241 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
242 }
243
244 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
245 ide_kill_rq(drive, rq);
246 return ide_stopped;
247 }
248
249 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
250 rq->errors |= ERROR_RESET;
251
252 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
253 ++rq->errors;
254 return ide_do_reset(drive);
255 }
256
257 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
258 drive->special.b.recalibrate = 1;
259 134
260 ++rq->errors; 135 rc = ide_end_rq(drive, rq, error, nr_bytes);
136 if (rc == 0)
137 hwif->rq = NULL;
261 138
262 return ide_stopped; 139 return rc;
263} 140}
141EXPORT_SYMBOL(ide_complete_rq);
264 142
265static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 143void ide_kill_rq(ide_drive_t *drive, struct request *rq)
266{ 144{
267 ide_hwif_t *hwif = drive->hwif; 145 u8 drv_req = blk_special_request(rq) && rq->rq_disk;
146 u8 media = drive->media;
268 147
269 if ((stat & ATA_BUSY) || 148 drive->failed_pc = NULL;
270 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
271 /* other bits are useless when BUSY */
272 rq->errors |= ERROR_RESET;
273 } else {
274 /* add decoding error stuff */
275 }
276
277 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
278 /* force an abort */
279 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
280 149
281 if (rq->errors >= ERROR_MAX) { 150 if ((media == ide_floppy || media == ide_tape) && drv_req) {
282 ide_kill_rq(drive, rq); 151 rq->errors = 0;
152 ide_complete_rq(drive, 0, blk_rq_bytes(rq));
283 } else { 153 } else {
284 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 154 if (media == ide_tape)
285 ++rq->errors; 155 rq->errors = IDE_DRV_ERROR_GENERAL;
286 return ide_do_reset(drive); 156 else if (blk_fs_request(rq) == 0 && rq->errors == 0)
287 } 157 rq->errors = -EIO;
288 ++rq->errors; 158 ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
289 }
290
291 return ide_stopped;
292}
293
294static ide_startstop_t
295__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
296{
297 if (drive->media == ide_disk)
298 return ide_ata_error(drive, rq, stat, err);
299 return ide_atapi_error(drive, rq, stat, err);
300}
301
302/**
303 * ide_error - handle an error on the IDE
304 * @drive: drive the error occurred on
305 * @msg: message to report
306 * @stat: status bits
307 *
308 * ide_error() takes action based on the error returned by the drive.
309 * For normal I/O that may well include retries. We deal with
310 * both new-style (taskfile) and old style command handling here.
311 * In the case of taskfile command handling there is work left to
312 * do
313 */
314
315ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
316{
317 struct request *rq;
318 u8 err;
319
320 err = ide_dump_status(drive, msg, stat);
321
322 rq = drive->hwif->rq;
323 if (rq == NULL)
324 return ide_stopped;
325
326 /* retry only "normal" I/O: */
327 if (!blk_fs_request(rq)) {
328 rq->errors = 1;
329 ide_end_drive_cmd(drive, stat, err);
330 return ide_stopped;
331 } 159 }
332
333 return __ide_error(drive, rq, stat, err);
334} 160}
335EXPORT_SYMBOL_GPL(ide_error);
336 161
337static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 162static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
338{ 163{
@@ -359,20 +184,20 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
359static ide_startstop_t ide_disk_special(ide_drive_t *drive) 184static ide_startstop_t ide_disk_special(ide_drive_t *drive)
360{ 185{
361 special_t *s = &drive->special; 186 special_t *s = &drive->special;
362 ide_task_t args; 187 struct ide_cmd cmd;
363 188
364 memset(&args, 0, sizeof(ide_task_t)); 189 memset(&cmd, 0, sizeof(cmd));
365 args.data_phase = TASKFILE_NO_DATA; 190 cmd.protocol = ATA_PROT_NODATA;
366 191
367 if (s->b.set_geometry) { 192 if (s->b.set_geometry) {
368 s->b.set_geometry = 0; 193 s->b.set_geometry = 0;
369 ide_tf_set_specify_cmd(drive, &args.tf); 194 ide_tf_set_specify_cmd(drive, &cmd.tf);
370 } else if (s->b.recalibrate) { 195 } else if (s->b.recalibrate) {
371 s->b.recalibrate = 0; 196 s->b.recalibrate = 0;
372 ide_tf_set_restore_cmd(drive, &args.tf); 197 ide_tf_set_restore_cmd(drive, &cmd.tf);
373 } else if (s->b.set_multmode) { 198 } else if (s->b.set_multmode) {
374 s->b.set_multmode = 0; 199 s->b.set_multmode = 0;
375 ide_tf_set_setmult_cmd(drive, &args.tf); 200 ide_tf_set_setmult_cmd(drive, &cmd.tf);
376 } else if (s->all) { 201 } else if (s->all) {
377 int special = s->all; 202 int special = s->all;
378 s->all = 0; 203 s->all = 0;
@@ -380,10 +205,10 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
380 return ide_stopped; 205 return ide_stopped;
381 } 206 }
382 207
383 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 208 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
384 IDE_TFLAG_CUSTOM_HANDLER; 209 IDE_TFLAG_CUSTOM_HANDLER;
385 210
386 do_rw_taskfile(drive, &args); 211 do_rw_taskfile(drive, &cmd);
387 212
388 return ide_started; 213 return ide_started;
389} 214}
@@ -413,33 +238,29 @@ static ide_startstop_t do_special (ide_drive_t *drive)
413 return ide_stopped; 238 return ide_stopped;
414} 239}
415 240
416void ide_map_sg(ide_drive_t *drive, struct request *rq) 241void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
417{ 242{
418 ide_hwif_t *hwif = drive->hwif; 243 ide_hwif_t *hwif = drive->hwif;
419 struct scatterlist *sg = hwif->sg_table; 244 struct scatterlist *sg = hwif->sg_table;
245 struct request *rq = cmd->rq;
420 246
421 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 247 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
422 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 248 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
423 hwif->sg_nents = 1; 249 cmd->sg_nents = 1;
424 } else if (!rq->bio) { 250 } else if (!rq->bio) {
425 sg_init_one(sg, rq->data, rq->data_len); 251 sg_init_one(sg, rq->data, rq->data_len);
426 hwif->sg_nents = 1; 252 cmd->sg_nents = 1;
427 } else { 253 } else
428 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 254 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
429 }
430} 255}
431
432EXPORT_SYMBOL_GPL(ide_map_sg); 256EXPORT_SYMBOL_GPL(ide_map_sg);
433 257
434void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 258void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
435{ 259{
436 ide_hwif_t *hwif = drive->hwif; 260 cmd->nbytes = cmd->nleft = nr_bytes;
437 261 cmd->cursg_ofs = 0;
438 hwif->nsect = hwif->nleft = rq->nr_sectors; 262 cmd->cursg = NULL;
439 hwif->cursg_ofs = 0;
440 hwif->cursg = NULL;
441} 263}
442
443EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 264EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
444 265
445/** 266/**
@@ -457,24 +278,15 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
457static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 278static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
458 struct request *rq) 279 struct request *rq)
459{ 280{
460 ide_hwif_t *hwif = drive->hwif; 281 struct ide_cmd *cmd = rq->special;
461 ide_task_t *task = rq->special; 282
462 283 if (cmd) {
463 if (task) { 284 if (cmd->protocol == ATA_PROT_PIO) {
464 hwif->data_phase = task->data_phase; 285 ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
465 286 ide_map_sg(drive, cmd);
466 switch (hwif->data_phase) {
467 case TASKFILE_MULTI_OUT:
468 case TASKFILE_OUT:
469 case TASKFILE_MULTI_IN:
470 case TASKFILE_IN:
471 ide_init_sg_cmd(drive, rq);
472 ide_map_sg(drive, rq);
473 default:
474 break;
475 } 287 }
476 288
477 return do_rw_taskfile(drive, task); 289 return do_rw_taskfile(drive, cmd);
478 } 290 }
479 291
480 /* 292 /*
@@ -484,83 +296,26 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
484#ifdef DEBUG 296#ifdef DEBUG
485 printk("%s: DRIVE_CMD (null)\n", drive->name); 297 printk("%s: DRIVE_CMD (null)\n", drive->name);
486#endif 298#endif
487 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), 299 rq->errors = 0;
488 ide_read_error(drive)); 300 ide_complete_rq(drive, 0, blk_rq_bytes(rq));
489 301
490 return ide_stopped; 302 return ide_stopped;
491} 303}
492 304
493int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
494 int arg)
495{
496 struct request_queue *q = drive->queue;
497 struct request *rq;
498 int ret = 0;
499
500 if (!(setting->flags & DS_SYNC))
501 return setting->set(drive, arg);
502
503 rq = blk_get_request(q, READ, __GFP_WAIT);
504 rq->cmd_type = REQ_TYPE_SPECIAL;
505 rq->cmd_len = 5;
506 rq->cmd[0] = REQ_DEVSET_EXEC;
507 *(int *)&rq->cmd[1] = arg;
508 rq->special = setting->set;
509
510 if (blk_execute_rq(q, NULL, rq, 0))
511 ret = rq->errors;
512 blk_put_request(rq);
513
514 return ret;
515}
516EXPORT_SYMBOL_GPL(ide_devset_execute);
517
518static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 305static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
519{ 306{
520 u8 cmd = rq->cmd[0]; 307 u8 cmd = rq->cmd[0];
521 308
522 if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
523 ide_task_t task;
524 struct ide_taskfile *tf = &task.tf;
525
526 memset(&task, 0, sizeof(task));
527 if (cmd == REQ_PARK_HEADS) {
528 drive->sleep = *(unsigned long *)rq->special;
529 drive->dev_flags |= IDE_DFLAG_SLEEPING;
530 tf->command = ATA_CMD_IDLEIMMEDIATE;
531 tf->feature = 0x44;
532 tf->lbal = 0x4c;
533 tf->lbam = 0x4e;
534 tf->lbah = 0x55;
535 task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
536 } else /* cmd == REQ_UNPARK_HEADS */
537 tf->command = ATA_CMD_CHK_POWER;
538
539 task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
540 task.rq = rq;
541 drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
542 return do_rw_taskfile(drive, &task);
543 }
544
545 switch (cmd) { 309 switch (cmd) {
310 case REQ_PARK_HEADS:
311 case REQ_UNPARK_HEADS:
312 return ide_do_park_unpark(drive, rq);
546 case REQ_DEVSET_EXEC: 313 case REQ_DEVSET_EXEC:
547 { 314 return ide_do_devset(drive, rq);
548 int err, (*setfunc)(ide_drive_t *, int) = rq->special;
549
550 err = setfunc(drive, *(int *)&rq->cmd[1]);
551 if (err)
552 rq->errors = err;
553 else
554 err = 1;
555 ide_end_request(drive, err, 0);
556 return ide_stopped;
557 }
558 case REQ_DRIVE_RESET: 315 case REQ_DRIVE_RESET:
559 return ide_do_reset(drive); 316 return ide_do_reset(drive);
560 default: 317 default:
561 blk_dump_rq_flags(rq, "ide_special_rq - bad request"); 318 BUG();
562 ide_end_request(drive, 0, 0);
563 return ide_stopped;
564 } 319 }
565} 320}
566 321
@@ -593,7 +348,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
593 if (blk_pm_request(rq)) 348 if (blk_pm_request(rq))
594 ide_check_pm_state(drive, rq); 349 ide_check_pm_state(drive, rq);
595 350
596 SELECT_DRIVE(drive); 351 drive->hwif->tp_ops->dev_select(drive);
597 if (ide_wait_stat(&startstop, drive, drive->ready_stat, 352 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
598 ATA_BUSY | ATA_DRQ, WAIT_READY)) { 353 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
599 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 354 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
@@ -620,7 +375,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
620 startstop = ide_start_power_step(drive, rq); 375 startstop = ide_start_power_step(drive, rq);
621 if (startstop == ide_stopped && 376 if (startstop == ide_stopped &&
622 pm->pm_step == IDE_PM_COMPLETED) 377 pm->pm_step == IDE_PM_COMPLETED)
623 ide_complete_pm_request(drive, rq); 378 ide_complete_pm_rq(drive, rq);
624 return startstop; 379 return startstop;
625 } else if (!rq->rq_disk && blk_special_request(rq)) 380 } else if (!rq->rq_disk && blk_special_request(rq))
626 /* 381 /*
@@ -683,8 +438,8 @@ static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
683 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 438 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
684 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); 439 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
685 if (rc == 0) { 440 if (rc == 0) {
686 /* for atari only */ 441 if (host->get_lock)
687 ide_get_lock(ide_intr, hwif); 442 host->get_lock(ide_intr, hwif);
688 } 443 }
689 } 444 }
690 return rc; 445 return rc;
@@ -693,8 +448,8 @@ static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
693static inline void ide_unlock_host(struct ide_host *host) 448static inline void ide_unlock_host(struct ide_host *host)
694{ 449{
695 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 450 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
696 /* for atari only */ 451 if (host->release_lock)
697 ide_release_lock(); 452 host->release_lock();
698 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); 453 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
699 } 454 }
700} 455}
@@ -736,11 +491,10 @@ repeat:
736 prev_port = hwif->host->cur_port; 491 prev_port = hwif->host->cur_port;
737 hwif->rq = NULL; 492 hwif->rq = NULL;
738 493
739 if (drive->dev_flags & IDE_DFLAG_SLEEPING) { 494 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
740 if (time_before(drive->sleep, jiffies)) { 495 time_after(drive->sleep, jiffies)) {
741 ide_unlock_port(hwif); 496 ide_unlock_port(hwif);
742 goto plug_device; 497 goto plug_device;
743 }
744 } 498 }
745 499
746 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && 500 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
@@ -750,7 +504,9 @@ repeat:
750 * quirk_list may not like intr setups/cleanups 504 * quirk_list may not like intr setups/cleanups
751 */ 505 */
752 if (prev_port && prev_port->cur_dev->quirk_list == 0) 506 if (prev_port && prev_port->cur_dev->quirk_list == 0)
753 prev_port->tp_ops->set_irq(prev_port, 0); 507 prev_port->tp_ops->write_devctl(prev_port,
508 ATA_NIEN |
509 ATA_DEVCTL_OBS);
754 510
755 hwif->host->cur_port = hwif; 511 hwif->host->cur_port = hwif;
756 } 512 }
@@ -820,63 +576,6 @@ plug_device_2:
820 blk_plug_device(q); 576 blk_plug_device(q);
821} 577}
822 578
823/*
824 * un-busy the port etc, and clear any pending DMA status. we want to
825 * retry the current request in pio mode instead of risking tossing it
826 * all away
827 */
828static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
829{
830 ide_hwif_t *hwif = drive->hwif;
831 struct request *rq;
832 ide_startstop_t ret = ide_stopped;
833
834 /*
835 * end current dma transaction
836 */
837
838 if (error < 0) {
839 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
840 (void)hwif->dma_ops->dma_end(drive);
841 ret = ide_error(drive, "dma timeout error",
842 hwif->tp_ops->read_status(hwif));
843 } else {
844 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
845 hwif->dma_ops->dma_timeout(drive);
846 }
847
848 /*
849 * disable dma for now, but remember that we did so because of
850 * a timeout -- we'll reenable after we finish this next request
851 * (or rather the first chunk of it) in pio.
852 */
853 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
854 drive->retry_pio++;
855 ide_dma_off_quietly(drive);
856
857 /*
858 * un-busy drive etc and make sure request is sane
859 */
860
861 rq = hwif->rq;
862 if (!rq)
863 goto out;
864
865 hwif->rq = NULL;
866
867 rq->errors = 0;
868
869 if (!rq->bio)
870 goto out;
871
872 rq->sector = rq->bio->bi_sector;
873 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
874 rq->hard_cur_sectors = rq->current_nr_sectors;
875 rq->buffer = bio_data(rq->bio);
876out:
877 return ret;
878}
879
880static void ide_plug_device(ide_drive_t *drive) 579static void ide_plug_device(ide_drive_t *drive)
881{ 580{
882 struct request_queue *q = drive->queue; 581 struct request_queue *q = drive->queue;
@@ -888,6 +587,29 @@ static void ide_plug_device(ide_drive_t *drive)
888 spin_unlock_irqrestore(q->queue_lock, flags); 587 spin_unlock_irqrestore(q->queue_lock, flags);
889} 588}
890 589
590static int drive_is_ready(ide_drive_t *drive)
591{
592 ide_hwif_t *hwif = drive->hwif;
593 u8 stat = 0;
594
595 if (drive->waiting_for_dma)
596 return hwif->dma_ops->dma_test_irq(drive);
597
598 if (hwif->io_ports.ctl_addr &&
599 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
600 stat = hwif->tp_ops->read_altstatus(hwif);
601 else
602 /* Note: this may clear a pending IRQ!! */
603 stat = hwif->tp_ops->read_status(hwif);
604
605 if (stat & ATA_BUSY)
606 /* drive busy: definitely not interrupting */
607 return 0;
608
609 /* drive ready: *might* be interrupting */
610 return 1;
611}
612
891/** 613/**
892 * ide_timer_expiry - handle lack of an IDE interrupt 614 * ide_timer_expiry - handle lack of an IDE interrupt
893 * @data: timer callback magic (hwif) 615 * @data: timer callback magic (hwif)
@@ -940,6 +662,7 @@ void ide_timer_expiry (unsigned long data)
940 } 662 }
941 } 663 }
942 hwif->handler = NULL; 664 hwif->handler = NULL;
665 hwif->expiry = NULL;
943 /* 666 /*
944 * We need to simulate a real interrupt when invoking 667 * We need to simulate a real interrupt when invoking
945 * the handler() function, which means we need to 668 * the handler() function, which means we need to
@@ -955,7 +678,8 @@ void ide_timer_expiry (unsigned long data)
955 } else if (drive_is_ready(drive)) { 678 } else if (drive_is_ready(drive)) {
956 if (drive->waiting_for_dma) 679 if (drive->waiting_for_dma)
957 hwif->dma_ops->dma_lost_irq(drive); 680 hwif->dma_ops->dma_lost_irq(drive);
958 (void)ide_ack_intr(hwif); 681 if (hwif->ack_intr)
682 hwif->ack_intr(hwif);
959 printk(KERN_WARNING "%s: lost interrupt\n", 683 printk(KERN_WARNING "%s: lost interrupt\n",
960 drive->name); 684 drive->name);
961 startstop = handler(drive); 685 startstop = handler(drive);
@@ -1056,6 +780,7 @@ static void unexpected_intr(int irq, ide_hwif_t *hwif)
1056irqreturn_t ide_intr (int irq, void *dev_id) 780irqreturn_t ide_intr (int irq, void *dev_id)
1057{ 781{
1058 ide_hwif_t *hwif = (ide_hwif_t *)dev_id; 782 ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
783 struct ide_host *host = hwif->host;
1059 ide_drive_t *uninitialized_var(drive); 784 ide_drive_t *uninitialized_var(drive);
1060 ide_handler_t *handler; 785 ide_handler_t *handler;
1061 unsigned long flags; 786 unsigned long flags;
@@ -1063,14 +788,14 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1063 irqreturn_t irq_ret = IRQ_NONE; 788 irqreturn_t irq_ret = IRQ_NONE;
1064 int plug_device = 0; 789 int plug_device = 0;
1065 790
1066 if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) { 791 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
1067 if (hwif != hwif->host->cur_port) 792 if (hwif != host->cur_port)
1068 goto out_early; 793 goto out_early;
1069 } 794 }
1070 795
1071 spin_lock_irqsave(&hwif->lock, flags); 796 spin_lock_irqsave(&hwif->lock, flags);
1072 797
1073 if (!ide_ack_intr(hwif)) 798 if (hwif->ack_intr && hwif->ack_intr(hwif) == 0)
1074 goto out; 799 goto out;
1075 800
1076 handler = hwif->handler; 801 handler = hwif->handler;
@@ -1087,27 +812,19 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1087 * 812 *
1088 * For PCI, we cannot tell the difference, 813 * For PCI, we cannot tell the difference,
1089 * so in that case we just ignore it and hope it goes away. 814 * so in that case we just ignore it and hope it goes away.
1090 *
1091 * FIXME: unexpected_intr should be hwif-> then we can
1092 * remove all the ifdef PCI crap
1093 */ 815 */
1094#ifdef CONFIG_BLK_DEV_IDEPCI 816 if ((host->irq_flags & IRQF_SHARED) == 0) {
1095 if (hwif->chipset != ide_pci)
1096#endif /* CONFIG_BLK_DEV_IDEPCI */
1097 {
1098 /* 817 /*
1099 * Probably not a shared PCI interrupt, 818 * Probably not a shared PCI interrupt,
1100 * so we can safely try to do something about it: 819 * so we can safely try to do something about it:
1101 */ 820 */
1102 unexpected_intr(irq, hwif); 821 unexpected_intr(irq, hwif);
1103#ifdef CONFIG_BLK_DEV_IDEPCI
1104 } else { 822 } else {
1105 /* 823 /*
1106 * Whack the status register, just in case 824 * Whack the status register, just in case
1107 * we have a leftover pending IRQ. 825 * we have a leftover pending IRQ.
1108 */ 826 */
1109 (void)hwif->tp_ops->read_status(hwif); 827 (void)hwif->tp_ops->read_status(hwif);
1110#endif /* CONFIG_BLK_DEV_IDEPCI */
1111 } 828 }
1112 goto out; 829 goto out;
1113 } 830 }
@@ -1125,6 +842,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1125 goto out; 842 goto out;
1126 843
1127 hwif->handler = NULL; 844 hwif->handler = NULL;
845 hwif->expiry = NULL;
1128 hwif->req_gen++; 846 hwif->req_gen++;
1129 del_timer(&hwif->timer); 847 del_timer(&hwif->timer);
1130 spin_unlock(&hwif->lock); 848 spin_unlock(&hwif->lock);
@@ -1164,54 +882,6 @@ out_early:
1164} 882}
1165EXPORT_SYMBOL_GPL(ide_intr); 883EXPORT_SYMBOL_GPL(ide_intr);
1166 884
1167/**
1168 * ide_do_drive_cmd - issue IDE special command
1169 * @drive: device to issue command
1170 * @rq: request to issue
1171 *
1172 * This function issues a special IDE device request
1173 * onto the request queue.
1174 *
1175 * the rq is queued at the head of the request queue, displacing
1176 * the currently-being-processed request and this function
1177 * returns immediately without waiting for the new rq to be
1178 * completed. This is VERY DANGEROUS, and is intended for
1179 * careful use by the ATAPI tape/cdrom driver code.
1180 */
1181
1182void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1183{
1184 struct request_queue *q = drive->queue;
1185 unsigned long flags;
1186
1187 drive->hwif->rq = NULL;
1188
1189 spin_lock_irqsave(q->queue_lock, flags);
1190 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
1191 spin_unlock_irqrestore(q->queue_lock, flags);
1192}
1193EXPORT_SYMBOL(ide_do_drive_cmd);
1194
1195void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1196{
1197 ide_hwif_t *hwif = drive->hwif;
1198 ide_task_t task;
1199
1200 memset(&task, 0, sizeof(task));
1201 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
1202 IDE_TFLAG_OUT_FEATURE | tf_flags;
1203 task.tf.feature = dma; /* Use PIO/DMA */
1204 task.tf.lbam = bcount & 0xff;
1205 task.tf.lbah = (bcount >> 8) & 0xff;
1206
1207 ide_tf_dump(drive->name, &task.tf);
1208 hwif->tp_ops->set_irq(hwif, 1);
1209 SELECT_MASK(drive, 0);
1210 hwif->tp_ops->tf_load(drive, &task);
1211}
1212
1213EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
1214
1215void ide_pad_transfer(ide_drive_t *drive, int write, int len) 885void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1216{ 886{
1217 ide_hwif_t *hwif = drive->hwif; 887 ide_hwif_t *hwif = drive->hwif;