aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2008-10-13 15:39:47 -0400
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2008-10-13 15:39:47 -0400
commitdb3f99ef7c30d541e4a78931acf2c64abe3e26d1 (patch)
tree0eb382a6d4cde1e43359c103aa3a7bcdf42a88ef /drivers
parent14c123f37187aba0b4e0e893a969efc6820c4170 (diff)
ide: cleanup ide-dma.c
- s/HWIF(drive)/drive->hwif/ - s/HWGROUP(drive)/[drive->]hwif->hwgroup/ - fixup error messages in ide_dma_intr() & dma_timer_expiry() - fix checkpatch.pl errors/warnings Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ide/ide-dma.c88
1 files changed, 38 insertions, 50 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 4d212b867c35..d935a6ec022f 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -33,11 +33,9 @@
33#include <linux/ide.h> 33#include <linux/ide.h>
34#include <linux/scatterlist.h> 34#include <linux/scatterlist.h>
35#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
36#include <linux/io.h>
36 37
37#include <asm/io.h> 38static const struct drive_list_entry drive_whitelist[] = {
38
39static const struct drive_list_entry drive_whitelist [] = {
40
41 { "Micropolis 2112A" , NULL }, 39 { "Micropolis 2112A" , NULL },
42 { "CONNER CTMA 4000" , NULL }, 40 { "CONNER CTMA 4000" , NULL },
43 { "CONNER CTT8000-A" , NULL }, 41 { "CONNER CTT8000-A" , NULL },
@@ -45,8 +43,7 @@ static const struct drive_list_entry drive_whitelist [] = {
45 { NULL , NULL } 43 { NULL , NULL }
46}; 44};
47 45
48static const struct drive_list_entry drive_blacklist [] = { 46static const struct drive_list_entry drive_blacklist[] = {
49
50 { "WDC AC11000H" , NULL }, 47 { "WDC AC11000H" , NULL },
51 { "WDC AC22100H" , NULL }, 48 { "WDC AC22100H" , NULL },
52 { "WDC AC32500H" , NULL }, 49 { "WDC AC32500H" , NULL },
@@ -86,11 +83,11 @@ static const struct drive_list_entry drive_blacklist [] = {
86 * ide_dma_intr - IDE DMA interrupt handler 83 * ide_dma_intr - IDE DMA interrupt handler
87 * @drive: the drive the interrupt is for 84 * @drive: the drive the interrupt is for
88 * 85 *
89 * Handle an interrupt completing a read/write DMA transfer on an 86 * Handle an interrupt completing a read/write DMA transfer on an
90 * IDE device 87 * IDE device
91 */ 88 */
92 89
93ide_startstop_t ide_dma_intr (ide_drive_t *drive) 90ide_startstop_t ide_dma_intr(ide_drive_t *drive)
94{ 91{
95 ide_hwif_t *hwif = drive->hwif; 92 ide_hwif_t *hwif = drive->hwif;
96 u8 stat = 0, dma_stat = 0; 93 u8 stat = 0, dma_stat = 0;
@@ -100,17 +97,16 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
100 97
101 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { 98 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
102 if (!dma_stat) { 99 if (!dma_stat) {
103 struct request *rq = HWGROUP(drive)->rq; 100 struct request *rq = hwif->hwgroup->rq;
104 101
105 task_end_request(drive, rq, stat); 102 task_end_request(drive, rq, stat);
106 return ide_stopped; 103 return ide_stopped;
107 } 104 }
108 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 105 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
109 drive->name, dma_stat); 106 drive->name, __func__, dma_stat);
110 } 107 }
111 return ide_error(drive, "dma_intr", stat); 108 return ide_error(drive, "dma_intr", stat);
112} 109}
113
114EXPORT_SYMBOL_GPL(ide_dma_intr); 110EXPORT_SYMBOL_GPL(ide_dma_intr);
115 111
116static int ide_dma_good_drive(ide_drive_t *drive) 112static int ide_dma_good_drive(ide_drive_t *drive)
@@ -131,7 +127,7 @@ static int ide_dma_good_drive(ide_drive_t *drive)
131 127
132int ide_build_sglist(ide_drive_t *drive, struct request *rq) 128int ide_build_sglist(ide_drive_t *drive, struct request *rq)
133{ 129{
134 ide_hwif_t *hwif = HWIF(drive); 130 ide_hwif_t *hwif = drive->hwif;
135 struct scatterlist *sg = hwif->sg_table; 131 struct scatterlist *sg = hwif->sg_table;
136 132
137 ide_map_sg(drive, rq); 133 ide_map_sg(drive, rq);
@@ -144,7 +140,6 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
144 return dma_map_sg(hwif->dev, sg, hwif->sg_nents, 140 return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
145 hwif->sg_dma_direction); 141 hwif->sg_dma_direction);
146} 142}
147
148EXPORT_SYMBOL_GPL(ide_build_sglist); 143EXPORT_SYMBOL_GPL(ide_build_sglist);
149 144
150#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 145#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -164,10 +159,10 @@ EXPORT_SYMBOL_GPL(ide_build_sglist);
164 * 159 *
165 * May also be invoked from trm290.c 160 * May also be invoked from trm290.c
166 */ 161 */
167 162
168int ide_build_dmatable (ide_drive_t *drive, struct request *rq) 163int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
169{ 164{
170 ide_hwif_t *hwif = HWIF(drive); 165 ide_hwif_t *hwif = drive->hwif;
171 __le32 *table = (__le32 *)hwif->dmatable_cpu; 166 __le32 *table = (__le32 *)hwif->dmatable_cpu;
172 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0; 167 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
173 unsigned int count = 0; 168 unsigned int count = 0;
@@ -241,15 +236,14 @@ EXPORT_SYMBOL_GPL(ide_build_dmatable);
241 * an oops as only one mapping can be live for each target at a given 236 * an oops as only one mapping can be live for each target at a given
242 * time. 237 * time.
243 */ 238 */
244 239
245void ide_destroy_dmatable (ide_drive_t *drive) 240void ide_destroy_dmatable(ide_drive_t *drive)
246{ 241{
247 ide_hwif_t *hwif = drive->hwif; 242 ide_hwif_t *hwif = drive->hwif;
248 243
249 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents, 244 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
250 hwif->sg_dma_direction); 245 hwif->sg_dma_direction);
251} 246}
252
253EXPORT_SYMBOL_GPL(ide_destroy_dmatable); 247EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
254 248
255#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 249#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -263,8 +257,8 @@ EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
263 * to have DMA handling bugs are also set up appropriately based 257 * to have DMA handling bugs are also set up appropriately based
264 * on the good/bad drive lists. 258 * on the good/bad drive lists.
265 */ 259 */
266 260
267static int config_drive_for_dma (ide_drive_t *drive) 261static int config_drive_for_dma(ide_drive_t *drive)
268{ 262{
269 ide_hwif_t *hwif = drive->hwif; 263 ide_hwif_t *hwif = drive->hwif;
270 u16 *id = drive->id; 264 u16 *id = drive->id;
@@ -304,26 +298,26 @@ static int config_drive_for_dma (ide_drive_t *drive)
304 * 298 *
305 * An IDE DMA transfer timed out. In the event of an error we ask 299 * An IDE DMA transfer timed out. In the event of an error we ask
306 * the driver to resolve the problem, if a DMA transfer is still 300 * the driver to resolve the problem, if a DMA transfer is still
307 * in progress we continue to wait (arguably we need to add a 301 * in progress we continue to wait (arguably we need to add a
308 * secondary 'I don't care what the drive thinks' timeout here) 302 * secondary 'I don't care what the drive thinks' timeout here)
309 * Finally if we have an interrupt we let it complete the I/O. 303 * Finally if we have an interrupt we let it complete the I/O.
310 * But only one time - we clear expiry and if it's still not 304 * But only one time - we clear expiry and if it's still not
311 * completed after WAIT_CMD, we error and retry in PIO. 305 * completed after WAIT_CMD, we error and retry in PIO.
312 * This can occur if an interrupt is lost or due to hang or bugs. 306 * This can occur if an interrupt is lost or due to hang or bugs.
313 */ 307 */
314 308
315static int dma_timer_expiry (ide_drive_t *drive) 309static int dma_timer_expiry(ide_drive_t *drive)
316{ 310{
317 ide_hwif_t *hwif = HWIF(drive); 311 ide_hwif_t *hwif = drive->hwif;
318 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 312 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
319 313
320 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", 314 printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n",
321 drive->name, dma_stat); 315 drive->name, __func__, dma_stat);
322 316
323 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ 317 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
324 return WAIT_CMD; 318 return WAIT_CMD;
325 319
326 HWGROUP(drive)->expiry = NULL; /* one free ride for now */ 320 hwif->hwgroup->expiry = NULL; /* one free ride for now */
327 321
328 /* 1 dmaing, 2 error, 4 intr */ 322 /* 1 dmaing, 2 error, 4 intr */
329 if (dma_stat & 2) /* ERROR */ 323 if (dma_stat & 2) /* ERROR */
@@ -348,9 +342,9 @@ static int dma_timer_expiry (ide_drive_t *drive)
348 342
349void ide_dma_host_set(ide_drive_t *drive, int on) 343void ide_dma_host_set(ide_drive_t *drive, int on)
350{ 344{
351 ide_hwif_t *hwif = HWIF(drive); 345 ide_hwif_t *hwif = drive->hwif;
352 u8 unit = drive->dn & 1; 346 u8 unit = drive->dn & 1;
353 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 347 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
354 348
355 if (on) 349 if (on)
356 dma_stat |= (1 << (5 + unit)); 350 dma_stat |= (1 << (5 + unit));
@@ -363,7 +357,6 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
363 else 357 else
364 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); 358 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
365} 359}
366
367EXPORT_SYMBOL_GPL(ide_dma_host_set); 360EXPORT_SYMBOL_GPL(ide_dma_host_set);
368#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 361#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
369 362
@@ -371,7 +364,7 @@ EXPORT_SYMBOL_GPL(ide_dma_host_set);
371 * ide_dma_off_quietly - Generic DMA kill 364 * ide_dma_off_quietly - Generic DMA kill
372 * @drive: drive to control 365 * @drive: drive to control
373 * 366 *
374 * Turn off the current DMA on this IDE controller. 367 * Turn off the current DMA on this IDE controller.
375 */ 368 */
376 369
377void ide_dma_off_quietly(ide_drive_t *drive) 370void ide_dma_off_quietly(ide_drive_t *drive)
@@ -381,7 +374,6 @@ void ide_dma_off_quietly(ide_drive_t *drive)
381 374
382 drive->hwif->dma_ops->dma_host_set(drive, 0); 375 drive->hwif->dma_ops->dma_host_set(drive, 0);
383} 376}
384
385EXPORT_SYMBOL(ide_dma_off_quietly); 377EXPORT_SYMBOL(ide_dma_off_quietly);
386 378
387/** 379/**
@@ -397,7 +389,6 @@ void ide_dma_off(ide_drive_t *drive)
397 printk(KERN_INFO "%s: DMA disabled\n", drive->name); 389 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
398 ide_dma_off_quietly(drive); 390 ide_dma_off_quietly(drive);
399} 391}
400
401EXPORT_SYMBOL(ide_dma_off); 392EXPORT_SYMBOL(ide_dma_off);
402 393
403/** 394/**
@@ -426,13 +417,13 @@ void ide_dma_on(ide_drive_t *drive)
426 * override this function if they need to 417 * override this function if they need to
427 * 418 *
428 * Returns 0 on success. If a PIO fallback is required then 1 419 * Returns 0 on success. If a PIO fallback is required then 1
429 * is returned. 420 * is returned.
430 */ 421 */
431 422
432int ide_dma_setup(ide_drive_t *drive) 423int ide_dma_setup(ide_drive_t *drive)
433{ 424{
434 ide_hwif_t *hwif = drive->hwif; 425 ide_hwif_t *hwif = drive->hwif;
435 struct request *rq = HWGROUP(drive)->rq; 426 struct request *rq = hwif->hwgroup->rq;
436 unsigned int reading; 427 unsigned int reading;
437 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 428 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
438 u8 dma_stat; 429 u8 dma_stat;
@@ -474,13 +465,13 @@ int ide_dma_setup(ide_drive_t *drive)
474 drive->waiting_for_dma = 1; 465 drive->waiting_for_dma = 1;
475 return 0; 466 return 0;
476} 467}
477
478EXPORT_SYMBOL_GPL(ide_dma_setup); 468EXPORT_SYMBOL_GPL(ide_dma_setup);
479 469
480void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 470void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
481{ 471{
482 /* issue cmd to drive */ 472 /* issue cmd to drive */
483 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); 473 ide_execute_command(drive, command, &ide_dma_intr, 2 * WAIT_CMD,
474 dma_timer_expiry);
484} 475}
485EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); 476EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
486 477
@@ -506,7 +497,6 @@ void ide_dma_start(ide_drive_t *drive)
506 497
507 wmb(); 498 wmb();
508} 499}
509
510EXPORT_SYMBOL_GPL(ide_dma_start); 500EXPORT_SYMBOL_GPL(ide_dma_start);
511 501
512/* returns 1 on error, 0 otherwise */ 502/* returns 1 on error, 0 otherwise */
@@ -550,8 +540,8 @@ EXPORT_SYMBOL_GPL(ide_dma_end);
550/* returns 1 if dma irq issued, 0 otherwise */ 540/* returns 1 if dma irq issued, 0 otherwise */
551int ide_dma_test_irq(ide_drive_t *drive) 541int ide_dma_test_irq(ide_drive_t *drive)
552{ 542{
553 ide_hwif_t *hwif = HWIF(drive); 543 ide_hwif_t *hwif = drive->hwif;
554 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 544 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
555 545
556 /* return 1 if INTR asserted */ 546 /* return 1 if INTR asserted */
557 if ((dma_stat & 4) == 4) 547 if ((dma_stat & 4) == 4)
@@ -564,7 +554,7 @@ EXPORT_SYMBOL_GPL(ide_dma_test_irq);
564static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 554static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
565#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 555#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
566 556
567int __ide_dma_bad_drive (ide_drive_t *drive) 557int __ide_dma_bad_drive(ide_drive_t *drive)
568{ 558{
569 u16 *id = drive->id; 559 u16 *id = drive->id;
570 560
@@ -576,7 +566,6 @@ int __ide_dma_bad_drive (ide_drive_t *drive)
576 } 566 }
577 return 0; 567 return 0;
578} 568}
579
580EXPORT_SYMBOL(__ide_dma_bad_drive); 569EXPORT_SYMBOL(__ide_dma_bad_drive);
581 570
582static const u8 xfer_mode_bases[] = { 571static const u8 xfer_mode_bases[] = {
@@ -592,7 +581,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
592 const struct ide_port_ops *port_ops = hwif->port_ops; 581 const struct ide_port_ops *port_ops = hwif->port_ops;
593 unsigned int mask = 0; 582 unsigned int mask = 0;
594 583
595 switch(base) { 584 switch (base) {
596 case XFER_UDMA_0: 585 case XFER_UDMA_0:
597 if ((id[ATA_ID_FIELD_VALID] & 4) == 0) 586 if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
598 break; 587 break;
@@ -693,7 +682,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
693 682
694 return mode; 683 return mode;
695} 684}
696
697EXPORT_SYMBOL_GPL(ide_find_dma_mode); 685EXPORT_SYMBOL_GPL(ide_find_dma_mode);
698 686
699static int ide_tune_dma(ide_drive_t *drive) 687static int ide_tune_dma(ide_drive_t *drive)
@@ -810,7 +798,7 @@ EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
810 798
811void ide_dma_timeout(ide_drive_t *drive) 799void ide_dma_timeout(ide_drive_t *drive)
812{ 800{
813 ide_hwif_t *hwif = HWIF(drive); 801 ide_hwif_t *hwif = drive->hwif;
814 802
815 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 803 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
816 804