aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/pci/cmd64x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/pci/cmd64x.c')
-rw-r--r--drivers/ide/pci/cmd64x.c138
1 files changed, 78 insertions, 60 deletions
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 8baccfef237f..006fb62656bc 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -223,7 +223,7 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
223 (void) pci_write_config_byte(dev, pciU, regU); 223 (void) pci_write_config_byte(dev, pciU, regU);
224} 224}
225 225
226static int cmd648_ide_dma_end (ide_drive_t *drive) 226static int cmd648_dma_end(ide_drive_t *drive)
227{ 227{
228 ide_hwif_t *hwif = HWIF(drive); 228 ide_hwif_t *hwif = HWIF(drive);
229 unsigned long base = hwif->dma_base - (hwif->channel * 8); 229 unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -239,7 +239,7 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
239 return err; 239 return err;
240} 240}
241 241
242static int cmd64x_ide_dma_end (ide_drive_t *drive) 242static int cmd64x_dma_end(ide_drive_t *drive)
243{ 243{
244 ide_hwif_t *hwif = HWIF(drive); 244 ide_hwif_t *hwif = HWIF(drive);
245 struct pci_dev *dev = to_pci_dev(hwif->dev); 245 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -256,7 +256,7 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
256 return err; 256 return err;
257} 257}
258 258
259static int cmd648_ide_dma_test_irq (ide_drive_t *drive) 259static int cmd648_dma_test_irq(ide_drive_t *drive)
260{ 260{
261 ide_hwif_t *hwif = HWIF(drive); 261 ide_hwif_t *hwif = HWIF(drive);
262 unsigned long base = hwif->dma_base - (hwif->channel * 8); 262 unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -279,7 +279,7 @@ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
279 return 0; 279 return 0;
280} 280}
281 281
282static int cmd64x_ide_dma_test_irq (ide_drive_t *drive) 282static int cmd64x_dma_test_irq(ide_drive_t *drive)
283{ 283{
284 ide_hwif_t *hwif = HWIF(drive); 284 ide_hwif_t *hwif = HWIF(drive);
285 struct pci_dev *dev = to_pci_dev(hwif->dev); 285 struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -310,7 +310,7 @@ static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
310 * event order for DMA transfers. 310 * event order for DMA transfers.
311 */ 311 */
312 312
313static int cmd646_1_ide_dma_end (ide_drive_t *drive) 313static int cmd646_1_dma_end(ide_drive_t *drive)
314{ 314{
315 ide_hwif_t *hwif = HWIF(drive); 315 ide_hwif_t *hwif = HWIF(drive);
316 u8 dma_stat = 0, dma_cmd = 0; 316 u8 dma_stat = 0, dma_cmd = 0;
@@ -370,7 +370,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
370 return 0; 370 return 0;
371} 371}
372 372
373static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif) 373static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif)
374{ 374{
375 struct pci_dev *dev = to_pci_dev(hwif->dev); 375 struct pci_dev *dev = to_pci_dev(hwif->dev);
376 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01; 376 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
@@ -385,60 +385,52 @@ static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
385 } 385 }
386} 386}
387 387
388static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif) 388static const struct ide_port_ops cmd64x_port_ops = {
389{ 389 .set_pio_mode = cmd64x_set_pio_mode,
390 struct pci_dev *dev = to_pci_dev(hwif->dev); 390 .set_dma_mode = cmd64x_set_dma_mode,
391 391 .cable_detect = cmd64x_cable_detect,
392 hwif->set_pio_mode = &cmd64x_set_pio_mode; 392};
393 hwif->set_dma_mode = &cmd64x_set_dma_mode;
394
395 hwif->cable_detect = ata66_cmd64x;
396 393
397 if (!hwif->dma_base) 394static const struct ide_dma_ops cmd64x_dma_ops = {
398 return; 395 .dma_host_set = ide_dma_host_set,
396 .dma_setup = ide_dma_setup,
397 .dma_exec_cmd = ide_dma_exec_cmd,
398 .dma_start = ide_dma_start,
399 .dma_end = cmd64x_dma_end,
400 .dma_test_irq = cmd64x_dma_test_irq,
401 .dma_lost_irq = ide_dma_lost_irq,
402 .dma_timeout = ide_dma_timeout,
403};
399 404
400 /* 405static const struct ide_dma_ops cmd646_rev1_dma_ops = {
401 * UltraDMA only supported on PCI646U and PCI646U2, which 406 .dma_host_set = ide_dma_host_set,
402 * correspond to revisions 0x03, 0x05 and 0x07 respectively. 407 .dma_setup = ide_dma_setup,
403 * Actually, although the CMD tech support people won't 408 .dma_exec_cmd = ide_dma_exec_cmd,
404 * tell me the details, the 0x03 revision cannot support 409 .dma_start = ide_dma_start,
405 * UDMA correctly without hardware modifications, and even 410 .dma_end = cmd646_1_dma_end,
406 * then it only works with Quantum disks due to some 411 .dma_test_irq = ide_dma_test_irq,
407 * hold time assumptions in the 646U part which are fixed 412 .dma_lost_irq = ide_dma_lost_irq,
408 * in the 646U2. 413 .dma_timeout = ide_dma_timeout,
409 * 414};
410 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
411 */
412 if (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 5)
413 hwif->ultra_mask = 0x00;
414 415
415 switch (dev->device) { 416static const struct ide_dma_ops cmd648_dma_ops = {
416 case PCI_DEVICE_ID_CMD_648: 417 .dma_host_set = ide_dma_host_set,
417 case PCI_DEVICE_ID_CMD_649: 418 .dma_setup = ide_dma_setup,
418 alt_irq_bits: 419 .dma_exec_cmd = ide_dma_exec_cmd,
419 hwif->ide_dma_end = &cmd648_ide_dma_end; 420 .dma_start = ide_dma_start,
420 hwif->ide_dma_test_irq = &cmd648_ide_dma_test_irq; 421 .dma_end = cmd648_dma_end,
421 break; 422 .dma_test_irq = cmd648_dma_test_irq,
422 case PCI_DEVICE_ID_CMD_646: 423 .dma_lost_irq = ide_dma_lost_irq,
423 if (dev->revision == 0x01) { 424 .dma_timeout = ide_dma_timeout,
424 hwif->ide_dma_end = &cmd646_1_ide_dma_end; 425};
425 break;
426 } else if (dev->revision >= 0x03)
427 goto alt_irq_bits;
428 /* fall thru */
429 default:
430 hwif->ide_dma_end = &cmd64x_ide_dma_end;
431 hwif->ide_dma_test_irq = &cmd64x_ide_dma_test_irq;
432 break;
433 }
434}
435 426
436static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { 427static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
437 { /* 0 */ 428 { /* 0 */
438 .name = "CMD643", 429 .name = "CMD643",
439 .init_chipset = init_chipset_cmd64x, 430 .init_chipset = init_chipset_cmd64x,
440 .init_hwif = init_hwif_cmd64x,
441 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, 431 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
432 .port_ops = &cmd64x_port_ops,
433 .dma_ops = &cmd64x_dma_ops,
442 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 434 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
443 IDE_HFLAG_ABUSE_PREFETCH, 435 IDE_HFLAG_ABUSE_PREFETCH,
444 .pio_mask = ATA_PIO5, 436 .pio_mask = ATA_PIO5,
@@ -447,9 +439,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
447 },{ /* 1 */ 439 },{ /* 1 */
448 .name = "CMD646", 440 .name = "CMD646",
449 .init_chipset = init_chipset_cmd64x, 441 .init_chipset = init_chipset_cmd64x,
450 .init_hwif = init_hwif_cmd64x,
451 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 442 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
452 .chipset = ide_cmd646, 443 .chipset = ide_cmd646,
444 .port_ops = &cmd64x_port_ops,
445 .dma_ops = &cmd648_dma_ops,
453 .host_flags = IDE_HFLAG_ABUSE_PREFETCH, 446 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
454 .pio_mask = ATA_PIO5, 447 .pio_mask = ATA_PIO5,
455 .mwdma_mask = ATA_MWDMA2, 448 .mwdma_mask = ATA_MWDMA2,
@@ -457,8 +450,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
457 },{ /* 2 */ 450 },{ /* 2 */
458 .name = "CMD648", 451 .name = "CMD648",
459 .init_chipset = init_chipset_cmd64x, 452 .init_chipset = init_chipset_cmd64x,
460 .init_hwif = init_hwif_cmd64x,
461 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 453 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
454 .port_ops = &cmd64x_port_ops,
455 .dma_ops = &cmd648_dma_ops,
462 .host_flags = IDE_HFLAG_ABUSE_PREFETCH, 456 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
463 .pio_mask = ATA_PIO5, 457 .pio_mask = ATA_PIO5,
464 .mwdma_mask = ATA_MWDMA2, 458 .mwdma_mask = ATA_MWDMA2,
@@ -466,8 +460,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
466 },{ /* 3 */ 460 },{ /* 3 */
467 .name = "CMD649", 461 .name = "CMD649",
468 .init_chipset = init_chipset_cmd64x, 462 .init_chipset = init_chipset_cmd64x,
469 .init_hwif = init_hwif_cmd64x,
470 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 463 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
464 .port_ops = &cmd64x_port_ops,
465 .dma_ops = &cmd648_dma_ops,
471 .host_flags = IDE_HFLAG_ABUSE_PREFETCH, 466 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
472 .pio_mask = ATA_PIO5, 467 .pio_mask = ATA_PIO5,
473 .mwdma_mask = ATA_MWDMA2, 468 .mwdma_mask = ATA_MWDMA2,
@@ -482,12 +477,35 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
482 477
483 d = cmd64x_chipsets[idx]; 478 d = cmd64x_chipsets[idx];
484 479
485 /* 480 if (idx == 1) {
486 * The original PCI0646 didn't have the primary channel enable bit, 481 /*
487 * it appeared starting with PCI0646U (i.e. revision ID 3). 482 * UltraDMA only supported on PCI646U and PCI646U2, which
488 */ 483 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
489 if (idx == 1 && dev->revision < 3) 484 * Actually, although the CMD tech support people won't
490 d.enablebits[0].reg = 0; 485 * tell me the details, the 0x03 revision cannot support
486 * UDMA correctly without hardware modifications, and even
487 * then it only works with Quantum disks due to some
488 * hold time assumptions in the 646U part which are fixed
489 * in the 646U2.
490 *
491 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
492 */
493 if (dev->revision < 5) {
494 d.udma_mask = 0x00;
495 /*
496 * The original PCI0646 didn't have the primary
497 * channel enable bit, it appeared starting with
498 * PCI0646U (i.e. revision ID 3).
499 */
500 if (dev->revision < 3) {
501 d.enablebits[0].reg = 0;
502 if (dev->revision == 1)
503 d.dma_ops = &cmd646_rev1_dma_ops;
504 else
505 d.dma_ops = &cmd64x_dma_ops;
506 }
507 }
508 }
491 509
492 return ide_setup_pci_device(dev, &d); 510 return ide_setup_pci_device(dev, &d);
493} 511}