diff options
Diffstat (limited to 'drivers/ide/ide-dma.c')
-rw-r--r-- | drivers/ide/ide-dma.c | 519 |
1 files changed, 66 insertions, 453 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 3fa07c0aeaa4..fffd11717b2d 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -28,24 +28,13 @@ | |||
28 | * for supplying a Promise UDMA board & WD UDMA drive for this work! | 28 | * for supplying a Promise UDMA board & WD UDMA drive for this work! |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/types.h> | 31 | #include <linux/types.h> |
33 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
34 | #include <linux/timer.h> | ||
35 | #include <linux/mm.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/pci.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/ide.h> | 33 | #include <linux/ide.h> |
40 | #include <linux/delay.h> | ||
41 | #include <linux/scatterlist.h> | 34 | #include <linux/scatterlist.h> |
42 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
43 | 36 | ||
44 | #include <asm/io.h> | 37 | static const struct drive_list_entry drive_whitelist[] = { |
45 | #include <asm/irq.h> | ||
46 | |||
47 | static const struct drive_list_entry drive_whitelist [] = { | ||
48 | |||
49 | { "Micropolis 2112A" , NULL }, | 38 | { "Micropolis 2112A" , NULL }, |
50 | { "CONNER CTMA 4000" , NULL }, | 39 | { "CONNER CTMA 4000" , NULL }, |
51 | { "CONNER CTT8000-A" , NULL }, | 40 | { "CONNER CTT8000-A" , NULL }, |
@@ -53,8 +42,7 @@ static const struct drive_list_entry drive_whitelist [] = { | |||
53 | { NULL , NULL } | 42 | { NULL , NULL } |
54 | }; | 43 | }; |
55 | 44 | ||
56 | static const struct drive_list_entry drive_blacklist [] = { | 45 | static const struct drive_list_entry drive_blacklist[] = { |
57 | |||
58 | { "WDC AC11000H" , NULL }, | 46 | { "WDC AC11000H" , NULL }, |
59 | { "WDC AC22100H" , NULL }, | 47 | { "WDC AC22100H" , NULL }, |
60 | { "WDC AC32500H" , NULL }, | 48 | { "WDC AC32500H" , NULL }, |
@@ -94,11 +82,11 @@ static const struct drive_list_entry drive_blacklist [] = { | |||
94 | * ide_dma_intr - IDE DMA interrupt handler | 82 | * ide_dma_intr - IDE DMA interrupt handler |
95 | * @drive: the drive the interrupt is for | 83 | * @drive: the drive the interrupt is for |
96 | * | 84 | * |
97 | * Handle an interrupt completing a read/write DMA transfer on an | 85 | * Handle an interrupt completing a read/write DMA transfer on an |
98 | * IDE device | 86 | * IDE device |
99 | */ | 87 | */ |
100 | 88 | ||
101 | ide_startstop_t ide_dma_intr (ide_drive_t *drive) | 89 | ide_startstop_t ide_dma_intr(ide_drive_t *drive) |
102 | { | 90 | { |
103 | ide_hwif_t *hwif = drive->hwif; | 91 | ide_hwif_t *hwif = drive->hwif; |
104 | u8 stat = 0, dma_stat = 0; | 92 | u8 stat = 0, dma_stat = 0; |
@@ -106,22 +94,21 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive) | |||
106 | dma_stat = hwif->dma_ops->dma_end(drive); | 94 | dma_stat = hwif->dma_ops->dma_end(drive); |
107 | stat = hwif->tp_ops->read_status(hwif); | 95 | stat = hwif->tp_ops->read_status(hwif); |
108 | 96 | ||
109 | if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { | 97 | if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { |
110 | if (!dma_stat) { | 98 | if (!dma_stat) { |
111 | struct request *rq = HWGROUP(drive)->rq; | 99 | struct request *rq = hwif->hwgroup->rq; |
112 | 100 | ||
113 | task_end_request(drive, rq, stat); | 101 | task_end_request(drive, rq, stat); |
114 | return ide_stopped; | 102 | return ide_stopped; |
115 | } | 103 | } |
116 | printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", | 104 | printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", |
117 | drive->name, dma_stat); | 105 | drive->name, __func__, dma_stat); |
118 | } | 106 | } |
119 | return ide_error(drive, "dma_intr", stat); | 107 | return ide_error(drive, "dma_intr", stat); |
120 | } | 108 | } |
121 | |||
122 | EXPORT_SYMBOL_GPL(ide_dma_intr); | 109 | EXPORT_SYMBOL_GPL(ide_dma_intr); |
123 | 110 | ||
124 | static int ide_dma_good_drive(ide_drive_t *drive) | 111 | int ide_dma_good_drive(ide_drive_t *drive) |
125 | { | 112 | { |
126 | return ide_in_drive_list(drive->id, drive_whitelist); | 113 | return ide_in_drive_list(drive->id, drive_whitelist); |
127 | } | 114 | } |
@@ -139,7 +126,7 @@ static int ide_dma_good_drive(ide_drive_t *drive) | |||
139 | 126 | ||
140 | int ide_build_sglist(ide_drive_t *drive, struct request *rq) | 127 | int ide_build_sglist(ide_drive_t *drive, struct request *rq) |
141 | { | 128 | { |
142 | ide_hwif_t *hwif = HWIF(drive); | 129 | ide_hwif_t *hwif = drive->hwif; |
143 | struct scatterlist *sg = hwif->sg_table; | 130 | struct scatterlist *sg = hwif->sg_table; |
144 | 131 | ||
145 | ide_map_sg(drive, rq); | 132 | ide_map_sg(drive, rq); |
@@ -152,106 +139,8 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq) | |||
152 | return dma_map_sg(hwif->dev, sg, hwif->sg_nents, | 139 | return dma_map_sg(hwif->dev, sg, hwif->sg_nents, |
153 | hwif->sg_dma_direction); | 140 | hwif->sg_dma_direction); |
154 | } | 141 | } |
155 | |||
156 | EXPORT_SYMBOL_GPL(ide_build_sglist); | 142 | EXPORT_SYMBOL_GPL(ide_build_sglist); |
157 | 143 | ||
158 | #ifdef CONFIG_BLK_DEV_IDEDMA_SFF | ||
159 | /** | ||
160 | * ide_build_dmatable - build IDE DMA table | ||
161 | * | ||
162 | * ide_build_dmatable() prepares a dma request. We map the command | ||
163 | * to get the pci bus addresses of the buffers and then build up | ||
164 | * the PRD table that the IDE layer wants to be fed. The code | ||
165 | * knows about the 64K wrap bug in the CS5530. | ||
166 | * | ||
167 | * Returns the number of built PRD entries if all went okay, | ||
168 | * returns 0 otherwise. | ||
169 | * | ||
170 | * May also be invoked from trm290.c | ||
171 | */ | ||
172 | |||
173 | int ide_build_dmatable (ide_drive_t *drive, struct request *rq) | ||
174 | { | ||
175 | ide_hwif_t *hwif = HWIF(drive); | ||
176 | __le32 *table = (__le32 *)hwif->dmatable_cpu; | ||
177 | unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0; | ||
178 | unsigned int count = 0; | ||
179 | int i; | ||
180 | struct scatterlist *sg; | ||
181 | |||
182 | hwif->sg_nents = i = ide_build_sglist(drive, rq); | ||
183 | |||
184 | if (!i) | ||
185 | return 0; | ||
186 | |||
187 | sg = hwif->sg_table; | ||
188 | while (i) { | ||
189 | u32 cur_addr; | ||
190 | u32 cur_len; | ||
191 | |||
192 | cur_addr = sg_dma_address(sg); | ||
193 | cur_len = sg_dma_len(sg); | ||
194 | |||
195 | /* | ||
196 | * Fill in the dma table, without crossing any 64kB boundaries. | ||
197 | * Most hardware requires 16-bit alignment of all blocks, | ||
198 | * but the trm290 requires 32-bit alignment. | ||
199 | */ | ||
200 | |||
201 | while (cur_len) { | ||
202 | if (count++ >= PRD_ENTRIES) { | ||
203 | printk(KERN_ERR "%s: DMA table too small\n", drive->name); | ||
204 | goto use_pio_instead; | ||
205 | } else { | ||
206 | u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff); | ||
207 | |||
208 | if (bcount > cur_len) | ||
209 | bcount = cur_len; | ||
210 | *table++ = cpu_to_le32(cur_addr); | ||
211 | xcount = bcount & 0xffff; | ||
212 | if (is_trm290) | ||
213 | xcount = ((xcount >> 2) - 1) << 16; | ||
214 | else if (xcount == 0x0000) { | ||
215 | /* | ||
216 | * Most chipsets correctly interpret a length of 0x0000 as 64KB, | ||
217 | * but at least one (e.g. CS5530) misinterprets it as zero (!). | ||
218 | * So here we break the 64KB entry into two 32KB entries instead. | ||
219 | */ | ||
220 | if (count++ >= PRD_ENTRIES) { | ||
221 | printk(KERN_ERR "%s: DMA table too small\n", drive->name); | ||
222 | goto use_pio_instead; | ||
223 | } | ||
224 | *table++ = cpu_to_le32(0x8000); | ||
225 | *table++ = cpu_to_le32(cur_addr + 0x8000); | ||
226 | xcount = 0x8000; | ||
227 | } | ||
228 | *table++ = cpu_to_le32(xcount); | ||
229 | cur_addr += bcount; | ||
230 | cur_len -= bcount; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | sg = sg_next(sg); | ||
235 | i--; | ||
236 | } | ||
237 | |||
238 | if (count) { | ||
239 | if (!is_trm290) | ||
240 | *--table |= cpu_to_le32(0x80000000); | ||
241 | return count; | ||
242 | } | ||
243 | |||
244 | printk(KERN_ERR "%s: empty DMA table?\n", drive->name); | ||
245 | |||
246 | use_pio_instead: | ||
247 | ide_destroy_dmatable(drive); | ||
248 | |||
249 | return 0; /* revert to PIO for this request */ | ||
250 | } | ||
251 | |||
252 | EXPORT_SYMBOL_GPL(ide_build_dmatable); | ||
253 | #endif | ||
254 | |||
255 | /** | 144 | /** |
256 | * ide_destroy_dmatable - clean up DMA mapping | 145 | * ide_destroy_dmatable - clean up DMA mapping |
257 | * @drive: The drive to unmap | 146 | * @drive: The drive to unmap |
@@ -262,146 +151,30 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable); | |||
262 | * an oops as only one mapping can be live for each target at a given | 151 | * an oops as only one mapping can be live for each target at a given |
263 | * time. | 152 | * time. |
264 | */ | 153 | */ |
265 | 154 | ||
266 | void ide_destroy_dmatable (ide_drive_t *drive) | 155 | void ide_destroy_dmatable(ide_drive_t *drive) |
267 | { | 156 | { |
268 | ide_hwif_t *hwif = drive->hwif; | 157 | ide_hwif_t *hwif = drive->hwif; |
269 | 158 | ||
270 | dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents, | 159 | dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents, |
271 | hwif->sg_dma_direction); | 160 | hwif->sg_dma_direction); |
272 | } | 161 | } |
273 | |||
274 | EXPORT_SYMBOL_GPL(ide_destroy_dmatable); | 162 | EXPORT_SYMBOL_GPL(ide_destroy_dmatable); |
275 | 163 | ||
276 | #ifdef CONFIG_BLK_DEV_IDEDMA_SFF | ||
277 | /** | ||
278 | * config_drive_for_dma - attempt to activate IDE DMA | ||
279 | * @drive: the drive to place in DMA mode | ||
280 | * | ||
281 | * If the drive supports at least mode 2 DMA or UDMA of any kind | ||
282 | * then attempt to place it into DMA mode. Drives that are known to | ||
283 | * support DMA but predate the DMA properties or that are known | ||
284 | * to have DMA handling bugs are also set up appropriately based | ||
285 | * on the good/bad drive lists. | ||
286 | */ | ||
287 | |||
288 | static int config_drive_for_dma (ide_drive_t *drive) | ||
289 | { | ||
290 | ide_hwif_t *hwif = drive->hwif; | ||
291 | struct hd_driveid *id = drive->id; | ||
292 | |||
293 | if (drive->media != ide_disk) { | ||
294 | if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Enable DMA on any drive that has | ||
300 | * UltraDMA (mode 0/1/2/3/4/5/6) enabled | ||
301 | */ | ||
302 | if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f)) | ||
303 | return 1; | ||
304 | |||
305 | /* | ||
306 | * Enable DMA on any drive that has mode2 DMA | ||
307 | * (multi or single) enabled | ||
308 | */ | ||
309 | if (id->field_valid & 2) /* regular DMA */ | ||
310 | if ((id->dma_mword & 0x404) == 0x404 || | ||
311 | (id->dma_1word & 0x404) == 0x404) | ||
312 | return 1; | ||
313 | |||
314 | /* Consult the list of known "good" drives */ | ||
315 | if (ide_dma_good_drive(drive)) | ||
316 | return 1; | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * dma_timer_expiry - handle a DMA timeout | ||
323 | * @drive: Drive that timed out | ||
324 | * | ||
325 | * An IDE DMA transfer timed out. In the event of an error we ask | ||
326 | * the driver to resolve the problem, if a DMA transfer is still | ||
327 | * in progress we continue to wait (arguably we need to add a | ||
328 | * secondary 'I don't care what the drive thinks' timeout here) | ||
329 | * Finally if we have an interrupt we let it complete the I/O. | ||
330 | * But only one time - we clear expiry and if it's still not | ||
331 | * completed after WAIT_CMD, we error and retry in PIO. | ||
332 | * This can occur if an interrupt is lost or due to hang or bugs. | ||
333 | */ | ||
334 | |||
335 | static int dma_timer_expiry (ide_drive_t *drive) | ||
336 | { | ||
337 | ide_hwif_t *hwif = HWIF(drive); | ||
338 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); | ||
339 | |||
340 | printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", | ||
341 | drive->name, dma_stat); | ||
342 | |||
343 | if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ | ||
344 | return WAIT_CMD; | ||
345 | |||
346 | HWGROUP(drive)->expiry = NULL; /* one free ride for now */ | ||
347 | |||
348 | /* 1 dmaing, 2 error, 4 intr */ | ||
349 | if (dma_stat & 2) /* ERROR */ | ||
350 | return -1; | ||
351 | |||
352 | if (dma_stat & 1) /* DMAing */ | ||
353 | return WAIT_CMD; | ||
354 | |||
355 | if (dma_stat & 4) /* Got an Interrupt */ | ||
356 | return WAIT_CMD; | ||
357 | |||
358 | return 0; /* Status is unknown -- reset the bus */ | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * ide_dma_host_set - Enable/disable DMA on a host | ||
363 | * @drive: drive to control | ||
364 | * | ||
365 | * Enable/disable DMA on an IDE controller following generic | ||
366 | * bus-mastering IDE controller behaviour. | ||
367 | */ | ||
368 | |||
369 | void ide_dma_host_set(ide_drive_t *drive, int on) | ||
370 | { | ||
371 | ide_hwif_t *hwif = HWIF(drive); | ||
372 | u8 unit = (drive->select.b.unit & 0x01); | ||
373 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); | ||
374 | |||
375 | if (on) | ||
376 | dma_stat |= (1 << (5 + unit)); | ||
377 | else | ||
378 | dma_stat &= ~(1 << (5 + unit)); | ||
379 | |||
380 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
381 | writeb(dma_stat, | ||
382 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
383 | else | ||
384 | outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); | ||
385 | } | ||
386 | |||
387 | EXPORT_SYMBOL_GPL(ide_dma_host_set); | ||
388 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ | ||
389 | |||
390 | /** | 164 | /** |
391 | * ide_dma_off_quietly - Generic DMA kill | 165 | * ide_dma_off_quietly - Generic DMA kill |
392 | * @drive: drive to control | 166 | * @drive: drive to control |
393 | * | 167 | * |
394 | * Turn off the current DMA on this IDE controller. | 168 | * Turn off the current DMA on this IDE controller. |
395 | */ | 169 | */ |
396 | 170 | ||
397 | void ide_dma_off_quietly(ide_drive_t *drive) | 171 | void ide_dma_off_quietly(ide_drive_t *drive) |
398 | { | 172 | { |
399 | drive->using_dma = 0; | 173 | drive->dev_flags &= ~IDE_DFLAG_USING_DMA; |
400 | ide_toggle_bounce(drive, 0); | 174 | ide_toggle_bounce(drive, 0); |
401 | 175 | ||
402 | drive->hwif->dma_ops->dma_host_set(drive, 0); | 176 | drive->hwif->dma_ops->dma_host_set(drive, 0); |
403 | } | 177 | } |
404 | |||
405 | EXPORT_SYMBOL(ide_dma_off_quietly); | 178 | EXPORT_SYMBOL(ide_dma_off_quietly); |
406 | 179 | ||
407 | /** | 180 | /** |
@@ -417,7 +190,6 @@ void ide_dma_off(ide_drive_t *drive) | |||
417 | printk(KERN_INFO "%s: DMA disabled\n", drive->name); | 190 | printk(KERN_INFO "%s: DMA disabled\n", drive->name); |
418 | ide_dma_off_quietly(drive); | 191 | ide_dma_off_quietly(drive); |
419 | } | 192 | } |
420 | |||
421 | EXPORT_SYMBOL(ide_dma_off); | 193 | EXPORT_SYMBOL(ide_dma_off); |
422 | 194 | ||
423 | /** | 195 | /** |
@@ -429,179 +201,24 @@ EXPORT_SYMBOL(ide_dma_off); | |||
429 | 201 | ||
430 | void ide_dma_on(ide_drive_t *drive) | 202 | void ide_dma_on(ide_drive_t *drive) |
431 | { | 203 | { |
432 | drive->using_dma = 1; | 204 | drive->dev_flags |= IDE_DFLAG_USING_DMA; |
433 | ide_toggle_bounce(drive, 1); | 205 | ide_toggle_bounce(drive, 1); |
434 | 206 | ||
435 | drive->hwif->dma_ops->dma_host_set(drive, 1); | 207 | drive->hwif->dma_ops->dma_host_set(drive, 1); |
436 | } | 208 | } |
437 | 209 | ||
438 | #ifdef CONFIG_BLK_DEV_IDEDMA_SFF | 210 | int __ide_dma_bad_drive(ide_drive_t *drive) |
439 | /** | ||
440 | * ide_dma_setup - begin a DMA phase | ||
441 | * @drive: target device | ||
442 | * | ||
443 | * Build an IDE DMA PRD (IDE speak for scatter gather table) | ||
444 | * and then set up the DMA transfer registers for a device | ||
445 | * that follows generic IDE PCI DMA behaviour. Controllers can | ||
446 | * override this function if they need to | ||
447 | * | ||
448 | * Returns 0 on success. If a PIO fallback is required then 1 | ||
449 | * is returned. | ||
450 | */ | ||
451 | |||
452 | int ide_dma_setup(ide_drive_t *drive) | ||
453 | { | 211 | { |
454 | ide_hwif_t *hwif = drive->hwif; | 212 | u16 *id = drive->id; |
455 | struct request *rq = HWGROUP(drive)->rq; | ||
456 | unsigned int reading; | ||
457 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
458 | u8 dma_stat; | ||
459 | |||
460 | if (rq_data_dir(rq)) | ||
461 | reading = 0; | ||
462 | else | ||
463 | reading = 1 << 3; | ||
464 | |||
465 | /* fall back to pio! */ | ||
466 | if (!ide_build_dmatable(drive, rq)) { | ||
467 | ide_map_sg(drive, rq); | ||
468 | return 1; | ||
469 | } | ||
470 | |||
471 | /* PRD table */ | ||
472 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
473 | writel(hwif->dmatable_dma, | ||
474 | (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); | ||
475 | else | ||
476 | outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); | ||
477 | |||
478 | /* specify r/w */ | ||
479 | if (mmio) | ||
480 | writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
481 | else | ||
482 | outb(reading, hwif->dma_base + ATA_DMA_CMD); | ||
483 | |||
484 | /* read DMA status for INTR & ERROR flags */ | ||
485 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); | ||
486 | |||
487 | /* clear INTR & ERROR flags */ | ||
488 | if (mmio) | ||
489 | writeb(dma_stat | 6, | ||
490 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
491 | else | ||
492 | outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); | ||
493 | |||
494 | drive->waiting_for_dma = 1; | ||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | EXPORT_SYMBOL_GPL(ide_dma_setup); | ||
499 | |||
500 | void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) | ||
501 | { | ||
502 | /* issue cmd to drive */ | ||
503 | ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); | ||
504 | } | ||
505 | EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); | ||
506 | |||
507 | void ide_dma_start(ide_drive_t *drive) | ||
508 | { | ||
509 | ide_hwif_t *hwif = drive->hwif; | ||
510 | u8 dma_cmd; | ||
511 | |||
512 | /* Note that this is done *after* the cmd has | ||
513 | * been issued to the drive, as per the BM-IDE spec. | ||
514 | * The Promise Ultra33 doesn't work correctly when | ||
515 | * we do this part before issuing the drive cmd. | ||
516 | */ | ||
517 | if (hwif->host_flags & IDE_HFLAG_MMIO) { | ||
518 | dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
519 | /* start DMA */ | ||
520 | writeb(dma_cmd | 1, | ||
521 | (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
522 | } else { | ||
523 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); | ||
524 | outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); | ||
525 | } | ||
526 | |||
527 | hwif->dma = 1; | ||
528 | wmb(); | ||
529 | } | ||
530 | |||
531 | EXPORT_SYMBOL_GPL(ide_dma_start); | ||
532 | |||
533 | /* returns 1 on error, 0 otherwise */ | ||
534 | int __ide_dma_end (ide_drive_t *drive) | ||
535 | { | ||
536 | ide_hwif_t *hwif = drive->hwif; | ||
537 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
538 | u8 dma_stat = 0, dma_cmd = 0; | ||
539 | |||
540 | drive->waiting_for_dma = 0; | ||
541 | |||
542 | if (mmio) { | ||
543 | /* get DMA command mode */ | ||
544 | dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
545 | /* stop DMA */ | ||
546 | writeb(dma_cmd & ~1, | ||
547 | (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
548 | } else { | ||
549 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); | ||
550 | outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); | ||
551 | } | ||
552 | |||
553 | /* get DMA status */ | ||
554 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); | ||
555 | |||
556 | if (mmio) | ||
557 | /* clear the INTR & ERROR bits */ | ||
558 | writeb(dma_stat | 6, | ||
559 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
560 | else | ||
561 | outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); | ||
562 | |||
563 | /* purge DMA mappings */ | ||
564 | ide_destroy_dmatable(drive); | ||
565 | /* verify good DMA status */ | ||
566 | hwif->dma = 0; | ||
567 | wmb(); | ||
568 | return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; | ||
569 | } | ||
570 | |||
571 | EXPORT_SYMBOL(__ide_dma_end); | ||
572 | |||
573 | /* returns 1 if dma irq issued, 0 otherwise */ | ||
574 | int ide_dma_test_irq(ide_drive_t *drive) | ||
575 | { | ||
576 | ide_hwif_t *hwif = HWIF(drive); | ||
577 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); | ||
578 | |||
579 | /* return 1 if INTR asserted */ | ||
580 | if ((dma_stat & 4) == 4) | ||
581 | return 1; | ||
582 | if (!drive->waiting_for_dma) | ||
583 | printk(KERN_WARNING "%s: (%s) called while not waiting\n", | ||
584 | drive->name, __func__); | ||
585 | return 0; | ||
586 | } | ||
587 | EXPORT_SYMBOL_GPL(ide_dma_test_irq); | ||
588 | #else | ||
589 | static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } | ||
590 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ | ||
591 | |||
592 | int __ide_dma_bad_drive (ide_drive_t *drive) | ||
593 | { | ||
594 | struct hd_driveid *id = drive->id; | ||
595 | 213 | ||
596 | int blacklist = ide_in_drive_list(id, drive_blacklist); | 214 | int blacklist = ide_in_drive_list(id, drive_blacklist); |
597 | if (blacklist) { | 215 | if (blacklist) { |
598 | printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", | 216 | printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", |
599 | drive->name, id->model); | 217 | drive->name, (char *)&id[ATA_ID_PROD]); |
600 | return blacklist; | 218 | return blacklist; |
601 | } | 219 | } |
602 | return 0; | 220 | return 0; |
603 | } | 221 | } |
604 | |||
605 | EXPORT_SYMBOL(__ide_dma_bad_drive); | 222 | EXPORT_SYMBOL(__ide_dma_bad_drive); |
606 | 223 | ||
607 | static const u8 xfer_mode_bases[] = { | 224 | static const u8 xfer_mode_bases[] = { |
@@ -612,21 +229,21 @@ static const u8 xfer_mode_bases[] = { | |||
612 | 229 | ||
613 | static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) | 230 | static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) |
614 | { | 231 | { |
615 | struct hd_driveid *id = drive->id; | 232 | u16 *id = drive->id; |
616 | ide_hwif_t *hwif = drive->hwif; | 233 | ide_hwif_t *hwif = drive->hwif; |
617 | const struct ide_port_ops *port_ops = hwif->port_ops; | 234 | const struct ide_port_ops *port_ops = hwif->port_ops; |
618 | unsigned int mask = 0; | 235 | unsigned int mask = 0; |
619 | 236 | ||
620 | switch(base) { | 237 | switch (base) { |
621 | case XFER_UDMA_0: | 238 | case XFER_UDMA_0: |
622 | if ((id->field_valid & 4) == 0) | 239 | if ((id[ATA_ID_FIELD_VALID] & 4) == 0) |
623 | break; | 240 | break; |
624 | 241 | ||
625 | if (port_ops && port_ops->udma_filter) | 242 | if (port_ops && port_ops->udma_filter) |
626 | mask = port_ops->udma_filter(drive); | 243 | mask = port_ops->udma_filter(drive); |
627 | else | 244 | else |
628 | mask = hwif->ultra_mask; | 245 | mask = hwif->ultra_mask; |
629 | mask &= id->dma_ultra; | 246 | mask &= id[ATA_ID_UDMA_MODES]; |
630 | 247 | ||
631 | /* | 248 | /* |
632 | * avoid false cable warning from eighty_ninty_three() | 249 | * avoid false cable warning from eighty_ninty_three() |
@@ -637,19 +254,19 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) | |||
637 | } | 254 | } |
638 | break; | 255 | break; |
639 | case XFER_MW_DMA_0: | 256 | case XFER_MW_DMA_0: |
640 | if ((id->field_valid & 2) == 0) | 257 | if ((id[ATA_ID_FIELD_VALID] & 2) == 0) |
641 | break; | 258 | break; |
642 | if (port_ops && port_ops->mdma_filter) | 259 | if (port_ops && port_ops->mdma_filter) |
643 | mask = port_ops->mdma_filter(drive); | 260 | mask = port_ops->mdma_filter(drive); |
644 | else | 261 | else |
645 | mask = hwif->mwdma_mask; | 262 | mask = hwif->mwdma_mask; |
646 | mask &= id->dma_mword; | 263 | mask &= id[ATA_ID_MWDMA_MODES]; |
647 | break; | 264 | break; |
648 | case XFER_SW_DMA_0: | 265 | case XFER_SW_DMA_0: |
649 | if (id->field_valid & 2) { | 266 | if (id[ATA_ID_FIELD_VALID] & 2) { |
650 | mask = id->dma_1word & hwif->swdma_mask; | 267 | mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask; |
651 | } else if (id->tDMA) { | 268 | } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) { |
652 | u8 mode = id->tDMA; | 269 | u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; |
653 | 270 | ||
654 | /* | 271 | /* |
655 | * if the mode is valid convert it to the mask | 272 | * if the mode is valid convert it to the mask |
@@ -706,7 +323,8 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) | |||
706 | /* | 323 | /* |
707 | * is this correct? | 324 | * is this correct? |
708 | */ | 325 | */ |
709 | if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150) | 326 | if (ide_dma_good_drive(drive) && |
327 | drive->id[ATA_ID_EIDE_DMA_TIME] < 150) | ||
710 | mode = XFER_MW_DMA_1; | 328 | mode = XFER_MW_DMA_1; |
711 | } | 329 | } |
712 | 330 | ||
@@ -717,7 +335,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) | |||
717 | 335 | ||
718 | return mode; | 336 | return mode; |
719 | } | 337 | } |
720 | |||
721 | EXPORT_SYMBOL_GPL(ide_find_dma_mode); | 338 | EXPORT_SYMBOL_GPL(ide_find_dma_mode); |
722 | 339 | ||
723 | static int ide_tune_dma(ide_drive_t *drive) | 340 | static int ide_tune_dma(ide_drive_t *drive) |
@@ -725,7 +342,8 @@ static int ide_tune_dma(ide_drive_t *drive) | |||
725 | ide_hwif_t *hwif = drive->hwif; | 342 | ide_hwif_t *hwif = drive->hwif; |
726 | u8 speed; | 343 | u8 speed; |
727 | 344 | ||
728 | if (drive->nodma || (drive->id->capability & 1) == 0) | 345 | if (ata_id_has_dma(drive->id) == 0 || |
346 | (drive->dev_flags & IDE_DFLAG_NODMA)) | ||
729 | return 0; | 347 | return 0; |
730 | 348 | ||
731 | /* consult the list of known "bad" drives */ | 349 | /* consult the list of known "bad" drives */ |
@@ -767,13 +385,15 @@ static int ide_dma_check(ide_drive_t *drive) | |||
767 | 385 | ||
768 | int ide_id_dma_bug(ide_drive_t *drive) | 386 | int ide_id_dma_bug(ide_drive_t *drive) |
769 | { | 387 | { |
770 | struct hd_driveid *id = drive->id; | 388 | u16 *id = drive->id; |
771 | 389 | ||
772 | if (id->field_valid & 4) { | 390 | if (id[ATA_ID_FIELD_VALID] & 4) { |
773 | if ((id->dma_ultra >> 8) && (id->dma_mword >> 8)) | 391 | if ((id[ATA_ID_UDMA_MODES] >> 8) && |
392 | (id[ATA_ID_MWDMA_MODES] >> 8)) | ||
774 | goto err_out; | 393 | goto err_out; |
775 | } else if (id->field_valid & 2) { | 394 | } else if (id[ATA_ID_FIELD_VALID] & 2) { |
776 | if ((id->dma_mword >> 8) && (id->dma_1word >> 8)) | 395 | if ((id[ATA_ID_MWDMA_MODES] >> 8) && |
396 | (id[ATA_ID_SWDMA_MODES] >> 8)) | ||
777 | goto err_out; | 397 | goto err_out; |
778 | } | 398 | } |
779 | return 0; | 399 | return 0; |
@@ -823,66 +443,59 @@ void ide_check_dma_crc(ide_drive_t *drive) | |||
823 | ide_dma_on(drive); | 443 | ide_dma_on(drive); |
824 | } | 444 | } |
825 | 445 | ||
826 | #ifdef CONFIG_BLK_DEV_IDEDMA_SFF | 446 | void ide_dma_lost_irq(ide_drive_t *drive) |
827 | void ide_dma_lost_irq (ide_drive_t *drive) | ||
828 | { | 447 | { |
829 | printk("%s: DMA interrupt recovery\n", drive->name); | 448 | printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name); |
830 | } | 449 | } |
450 | EXPORT_SYMBOL_GPL(ide_dma_lost_irq); | ||
831 | 451 | ||
832 | EXPORT_SYMBOL(ide_dma_lost_irq); | 452 | void ide_dma_timeout(ide_drive_t *drive) |
833 | |||
834 | void ide_dma_timeout (ide_drive_t *drive) | ||
835 | { | 453 | { |
836 | ide_hwif_t *hwif = HWIF(drive); | 454 | ide_hwif_t *hwif = drive->hwif; |
837 | 455 | ||
838 | printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); | 456 | printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); |
839 | 457 | ||
840 | if (hwif->dma_ops->dma_test_irq(drive)) | 458 | if (hwif->dma_ops->dma_test_irq(drive)) |
841 | return; | 459 | return; |
842 | 460 | ||
461 | ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); | ||
462 | |||
843 | hwif->dma_ops->dma_end(drive); | 463 | hwif->dma_ops->dma_end(drive); |
844 | } | 464 | } |
845 | 465 | EXPORT_SYMBOL_GPL(ide_dma_timeout); | |
846 | EXPORT_SYMBOL(ide_dma_timeout); | ||
847 | 466 | ||
848 | void ide_release_dma_engine(ide_hwif_t *hwif) | 467 | void ide_release_dma_engine(ide_hwif_t *hwif) |
849 | { | 468 | { |
850 | if (hwif->dmatable_cpu) { | 469 | if (hwif->dmatable_cpu) { |
851 | struct pci_dev *pdev = to_pci_dev(hwif->dev); | 470 | int prd_size = hwif->prd_max_nents * hwif->prd_ent_size; |
852 | 471 | ||
853 | pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES, | 472 | dma_free_coherent(hwif->dev, prd_size, |
854 | hwif->dmatable_cpu, hwif->dmatable_dma); | 473 | hwif->dmatable_cpu, hwif->dmatable_dma); |
855 | hwif->dmatable_cpu = NULL; | 474 | hwif->dmatable_cpu = NULL; |
856 | } | 475 | } |
857 | } | 476 | } |
477 | EXPORT_SYMBOL_GPL(ide_release_dma_engine); | ||
858 | 478 | ||
859 | int ide_allocate_dma_engine(ide_hwif_t *hwif) | 479 | int ide_allocate_dma_engine(ide_hwif_t *hwif) |
860 | { | 480 | { |
861 | struct pci_dev *pdev = to_pci_dev(hwif->dev); | 481 | int prd_size; |
862 | 482 | ||
863 | hwif->dmatable_cpu = pci_alloc_consistent(pdev, | 483 | if (hwif->prd_max_nents == 0) |
864 | PRD_ENTRIES * PRD_BYTES, | 484 | hwif->prd_max_nents = PRD_ENTRIES; |
865 | &hwif->dmatable_dma); | 485 | if (hwif->prd_ent_size == 0) |
486 | hwif->prd_ent_size = PRD_BYTES; | ||
866 | 487 | ||
867 | if (hwif->dmatable_cpu) | 488 | prd_size = hwif->prd_max_nents * hwif->prd_ent_size; |
868 | return 0; | ||
869 | 489 | ||
870 | printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", | 490 | hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size, |
491 | &hwif->dmatable_dma, | ||
492 | GFP_ATOMIC); | ||
493 | if (hwif->dmatable_cpu == NULL) { | ||
494 | printk(KERN_ERR "%s: unable to allocate PRD table\n", | ||
871 | hwif->name); | 495 | hwif->name); |
496 | return -ENOMEM; | ||
497 | } | ||
872 | 498 | ||
873 | return 1; | 499 | return 0; |
874 | } | 500 | } |
875 | EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); | 501 | EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); |
876 | |||
877 | const struct ide_dma_ops sff_dma_ops = { | ||
878 | .dma_host_set = ide_dma_host_set, | ||
879 | .dma_setup = ide_dma_setup, | ||
880 | .dma_exec_cmd = ide_dma_exec_cmd, | ||
881 | .dma_start = ide_dma_start, | ||
882 | .dma_end = __ide_dma_end, | ||
883 | .dma_test_irq = ide_dma_test_irq, | ||
884 | .dma_timeout = ide_dma_timeout, | ||
885 | .dma_lost_irq = ide_dma_lost_irq, | ||
886 | }; | ||
887 | EXPORT_SYMBOL_GPL(sff_dma_ops); | ||
888 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ | ||