aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/pci/serverworks.c
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2007-05-09 18:01:08 -0400
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2007-05-09 18:01:08 -0400
commit2d5eaa6dd744a641e75503232a01f52d0768884c (patch)
tree0736bd00ea3bd032d601d0a676c998cb043b877a /drivers/ide/pci/serverworks.c
parent18137207236285989dfc0ee7f929b954199228f3 (diff)
ide: rework the code for selecting the best DMA transfer mode (v3)
Depends on the "ide: fix UDMA/MWDMA/SWDMA masks" patch. * add ide_hwif_t.udma_filter hook for filtering UDMA mask (use it in alim15x3, hpt366, siimage and serverworks drivers) * add ide_max_dma_mode() for finding best DMA mode for the device (loosely based on some older libata-core.c code) * convert ide_dma_speed() users to use ide_max_dma_mode() * make ide_rate_filter() take "ide_drive_t *drive" as an argument instead of "u8 mode" and teach it to how to use UDMA mask to do filtering * use ide_rate_filter() in hpt366 driver * remove no longer needed ide_dma_speed() and *_ratemask() * unexport eighty_ninty_three() v2: * rename ->filter_udma_mask to ->udma_filter [ Suggested by Sergei Shtylyov <sshtylyov@ru.mvista.com>. ] v3: * updated for scc_pata driver (fixes XFER_UDMA_6 filtering for user-space originated transfer mode change requests when 100MHz clock is used) Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide/pci/serverworks.c')
-rw-r--r--drivers/ide/pci/serverworks.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index dbcd37a0c652..2fa6d92d16cc 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -65,16 +65,16 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
65 return 0; 65 return 0;
66} 66}
67 67
68static u8 svwks_ratemask (ide_drive_t *drive) 68static u8 svwks_udma_filter(ide_drive_t *drive)
69{ 69{
70 struct pci_dev *dev = HWIF(drive)->pci_dev; 70 struct pci_dev *dev = HWIF(drive)->pci_dev;
71 u8 mode = 0; 71 u8 mask = 0;
72 72
73 if (!svwks_revision) 73 if (!svwks_revision)
74 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); 74 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
75 75
76 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) 76 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
77 return 2; 77 return 0x1f;
78 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 78 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
79 u32 reg = 0; 79 u32 reg = 0;
80 if (isa_dev) 80 if (isa_dev)
@@ -86,25 +86,31 @@ static u8 svwks_ratemask (ide_drive_t *drive)
86 if(drive->media == ide_disk) 86 if(drive->media == ide_disk)
87 return 0; 87 return 0;
88 /* Check the OSB4 DMA33 enable bit */ 88 /* Check the OSB4 DMA33 enable bit */
89 return ((reg & 0x00004000) == 0x00004000) ? 1 : 0; 89 return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
90 } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) { 90 } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) {
91 return 1; 91 return 0x07;
92 } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) { 92 } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) {
93 u8 btr = 0; 93 u8 btr = 0, mode;
94 pci_read_config_byte(dev, 0x5A, &btr); 94 pci_read_config_byte(dev, 0x5A, &btr);
95 mode = btr & 0x3; 95 mode = btr & 0x3;
96 if (!eighty_ninty_three(drive)) 96
97 mode = min(mode, (u8)1);
98 /* If someone decides to do UDMA133 on CSB5 the same 97 /* If someone decides to do UDMA133 on CSB5 the same
99 issue will bite so be inclusive */ 98 issue will bite so be inclusive */
100 if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100)) 99 if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
101 mode = 2; 100 mode = 2;
101
102 switch(mode) {
103 case 2: mask = 0x1f; break;
104 case 1: mask = 0x07; break;
105 default: mask = 0x00; break;
106 }
102 } 107 }
103 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 108 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
104 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && 109 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
105 (!(PCI_FUNC(dev->devfn) & 1))) 110 (!(PCI_FUNC(dev->devfn) & 1)))
106 mode = 2; 111 mask = 0x1f;
107 return mode; 112
113 return mask;
108} 114}
109 115
110static u8 svwks_csb_check (struct pci_dev *dev) 116static u8 svwks_csb_check (struct pci_dev *dev)
@@ -141,7 +147,7 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
141 if (xferspeed == 255) /* PIO auto-tuning */ 147 if (xferspeed == 255) /* PIO auto-tuning */
142 speed = XFER_PIO_0 + pio; 148 speed = XFER_PIO_0 + pio;
143 else 149 else
144 speed = ide_rate_filter(svwks_ratemask(drive), xferspeed); 150 speed = ide_rate_filter(drive, xferspeed);
145 151
146 /* If we are about to put a disk into UDMA mode we screwed up. 152 /* If we are about to put a disk into UDMA mode we screwed up.
147 Our code assumes we never _ever_ do this on an OSB4 */ 153 Our code assumes we never _ever_ do this on an OSB4 */
@@ -304,7 +310,7 @@ static void svwks_tune_drive (ide_drive_t *drive, u8 pio)
304 310
305static int config_chipset_for_dma (ide_drive_t *drive) 311static int config_chipset_for_dma (ide_drive_t *drive)
306{ 312{
307 u8 speed = ide_dma_speed(drive, svwks_ratemask(drive)); 313 u8 speed = ide_max_dma_mode(drive);
308 314
309 if (!(speed)) 315 if (!(speed))
310 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL); 316 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
@@ -500,6 +506,7 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
500 506
501 hwif->tuneproc = &svwks_tune_drive; 507 hwif->tuneproc = &svwks_tune_drive;
502 hwif->speedproc = &svwks_tune_chipset; 508 hwif->speedproc = &svwks_tune_chipset;
509 hwif->udma_filter = &svwks_udma_filter;
503 510
504 hwif->atapi_dma = 1; 511 hwif->atapi_dma = 1;
505 512