aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/ide-dma.c')
-rw-r--r--drivers/ide/ide-dma.c140
1 files changed, 31 insertions, 109 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 8757e5ef6c95..c352cf27b6e7 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -102,7 +102,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{ 102{
103 u8 stat = 0, dma_stat = 0; 103 u8 stat = 0, dma_stat = 0;
104 104
105 dma_stat = HWIF(drive)->ide_dma_end(drive); 105 dma_stat = drive->hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive); 106 stat = ide_read_status(drive);
107 107
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
@@ -394,7 +394,7 @@ void ide_dma_off_quietly(ide_drive_t *drive)
394 drive->using_dma = 0; 394 drive->using_dma = 0;
395 ide_toggle_bounce(drive, 0); 395 ide_toggle_bounce(drive, 0);
396 396
397 drive->hwif->dma_host_set(drive, 0); 397 drive->hwif->dma_ops->dma_host_set(drive, 0);
398} 398}
399 399
400EXPORT_SYMBOL(ide_dma_off_quietly); 400EXPORT_SYMBOL(ide_dma_off_quietly);
@@ -427,7 +427,7 @@ void ide_dma_on(ide_drive_t *drive)
427 drive->using_dma = 1; 427 drive->using_dma = 1;
428 ide_toggle_bounce(drive, 1); 428 ide_toggle_bounce(drive, 1);
429 429
430 drive->hwif->dma_host_set(drive, 1); 430 drive->hwif->dma_ops->dma_host_set(drive, 1);
431} 431}
432 432
433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -482,11 +482,12 @@ int ide_dma_setup(ide_drive_t *drive)
482 482
483EXPORT_SYMBOL_GPL(ide_dma_setup); 483EXPORT_SYMBOL_GPL(ide_dma_setup);
484 484
485static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 485void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
486{ 486{
487 /* issue cmd to drive */ 487 /* issue cmd to drive */
488 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); 488 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
489} 489}
490EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
490 491
491void ide_dma_start(ide_drive_t *drive) 492void ide_dma_start(ide_drive_t *drive)
492{ 493{
@@ -532,7 +533,7 @@ int __ide_dma_end (ide_drive_t *drive)
532EXPORT_SYMBOL(__ide_dma_end); 533EXPORT_SYMBOL(__ide_dma_end);
533 534
534/* returns 1 if dma irq issued, 0 otherwise */ 535/* returns 1 if dma irq issued, 0 otherwise */
535static int __ide_dma_test_irq(ide_drive_t *drive) 536int ide_dma_test_irq(ide_drive_t *drive)
536{ 537{
537 ide_hwif_t *hwif = HWIF(drive); 538 ide_hwif_t *hwif = HWIF(drive);
538 u8 dma_stat = hwif->INB(hwif->dma_status); 539 u8 dma_stat = hwif->INB(hwif->dma_status);
@@ -542,9 +543,10 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
542 return 1; 543 return 1;
543 if (!drive->waiting_for_dma) 544 if (!drive->waiting_for_dma)
544 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 545 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
545 drive->name, __FUNCTION__); 546 drive->name, __func__);
546 return 0; 547 return 0;
547} 548}
549EXPORT_SYMBOL_GPL(ide_dma_test_irq);
548#else 550#else
549static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 551static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
550#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 552#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -574,6 +576,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
574{ 576{
575 struct hd_driveid *id = drive->id; 577 struct hd_driveid *id = drive->id;
576 ide_hwif_t *hwif = drive->hwif; 578 ide_hwif_t *hwif = drive->hwif;
579 const struct ide_port_ops *port_ops = hwif->port_ops;
577 unsigned int mask = 0; 580 unsigned int mask = 0;
578 581
579 switch(base) { 582 switch(base) {
@@ -581,8 +584,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
581 if ((id->field_valid & 4) == 0) 584 if ((id->field_valid & 4) == 0)
582 break; 585 break;
583 586
584 if (hwif->udma_filter) 587 if (port_ops && port_ops->udma_filter)
585 mask = hwif->udma_filter(drive); 588 mask = port_ops->udma_filter(drive);
586 else 589 else
587 mask = hwif->ultra_mask; 590 mask = hwif->ultra_mask;
588 mask &= id->dma_ultra; 591 mask &= id->dma_ultra;
@@ -598,8 +601,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
598 case XFER_MW_DMA_0: 601 case XFER_MW_DMA_0:
599 if ((id->field_valid & 2) == 0) 602 if ((id->field_valid & 2) == 0)
600 break; 603 break;
601 if (hwif->mdma_filter) 604 if (port_ops && port_ops->mdma_filter)
602 mask = hwif->mdma_filter(drive); 605 mask = port_ops->mdma_filter(drive);
603 else 606 else
604 mask = hwif->mwdma_mask; 607 mask = hwif->mwdma_mask;
605 mask &= id->dma_mword; 608 mask &= id->dma_mword;
@@ -801,15 +804,15 @@ void ide_dma_timeout (ide_drive_t *drive)
801 804
802 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 805 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
803 806
804 if (hwif->ide_dma_test_irq(drive)) 807 if (hwif->dma_ops->dma_test_irq(drive))
805 return; 808 return;
806 809
807 hwif->ide_dma_end(drive); 810 hwif->dma_ops->dma_end(drive);
808} 811}
809 812
810EXPORT_SYMBOL(ide_dma_timeout); 813EXPORT_SYMBOL(ide_dma_timeout);
811 814
812static void ide_release_dma_engine(ide_hwif_t *hwif) 815void ide_release_dma_engine(ide_hwif_t *hwif)
813{ 816{
814 if (hwif->dmatable_cpu) { 817 if (hwif->dmatable_cpu) {
815 struct pci_dev *pdev = to_pci_dev(hwif->dev); 818 struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -820,28 +823,7 @@ static void ide_release_dma_engine(ide_hwif_t *hwif)
820 } 823 }
821} 824}
822 825
823static int ide_release_iomio_dma(ide_hwif_t *hwif) 826int ide_allocate_dma_engine(ide_hwif_t *hwif)
824{
825 release_region(hwif->dma_base, 8);
826 if (hwif->extra_ports)
827 release_region(hwif->extra_base, hwif->extra_ports);
828 return 1;
829}
830
831/*
832 * Needed for allowing full modular support of ide-driver
833 */
834int ide_release_dma(ide_hwif_t *hwif)
835{
836 ide_release_dma_engine(hwif);
837
838 if (hwif->mmio)
839 return 1;
840 else
841 return ide_release_iomio_dma(hwif);
842}
843
844static int ide_allocate_dma_engine(ide_hwif_t *hwif)
845{ 827{
846 struct pci_dev *pdev = to_pci_dev(hwif->dev); 828 struct pci_dev *pdev = to_pci_dev(hwif->dev);
847 829
@@ -853,65 +835,25 @@ static int ide_allocate_dma_engine(ide_hwif_t *hwif)
853 return 0; 835 return 0;
854 836
855 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", 837 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
856 hwif->cds->name); 838 hwif->name);
857 839
858 return 1; 840 return 1;
859} 841}
860 842EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
861static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base) 843
862{ 844static const struct ide_dma_ops sff_dma_ops = {
863 printk(KERN_INFO " %s: MMIO-DMA ", hwif->name); 845 .dma_host_set = ide_dma_host_set,
864 846 .dma_setup = ide_dma_setup,
865 return 0; 847 .dma_exec_cmd = ide_dma_exec_cmd,
866} 848 .dma_start = ide_dma_start,
867 849 .dma_end = __ide_dma_end,
868static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base) 850 .dma_test_irq = ide_dma_test_irq,
869{ 851 .dma_timeout = ide_dma_timeout,
870 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx", 852 .dma_lost_irq = ide_dma_lost_irq,
871 hwif->name, base, base + 7); 853};
872
873 if (!request_region(base, 8, hwif->name)) {
874 printk(" -- Error, ports in use.\n");
875 return 1;
876 }
877
878 if (hwif->cds->extra) {
879 hwif->extra_base = base + (hwif->channel ? 8 : 16);
880
881 if (!hwif->mate || !hwif->mate->extra_ports) {
882 if (!request_region(hwif->extra_base,
883 hwif->cds->extra, hwif->cds->name)) {
884 printk(" -- Error, extra ports in use.\n");
885 release_region(base, 8);
886 return 1;
887 }
888 hwif->extra_ports = hwif->cds->extra;
889 }
890 }
891
892 return 0;
893}
894
895static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base)
896{
897 if (hwif->mmio)
898 return ide_mapped_mmio_dma(hwif, base);
899
900 return ide_iomio_dma(hwif, base);
901}
902 854
903void ide_setup_dma(ide_hwif_t *hwif, unsigned long base) 855void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
904{ 856{
905 u8 dma_stat;
906
907 if (ide_dma_iobase(hwif, base))
908 return;
909
910 if (ide_allocate_dma_engine(hwif)) {
911 ide_release_dma(hwif);
912 return;
913 }
914
915 hwif->dma_base = base; 857 hwif->dma_base = base;
916 858
917 if (!hwif->dma_command) 859 if (!hwif->dma_command)
@@ -925,27 +867,7 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
925 if (!hwif->dma_prdtable) 867 if (!hwif->dma_prdtable)
926 hwif->dma_prdtable = hwif->dma_base + 4; 868 hwif->dma_prdtable = hwif->dma_base + 4;
927 869
928 if (!hwif->dma_host_set) 870 hwif->dma_ops = &sff_dma_ops;
929 hwif->dma_host_set = &ide_dma_host_set;
930 if (!hwif->dma_setup)
931 hwif->dma_setup = &ide_dma_setup;
932 if (!hwif->dma_exec_cmd)
933 hwif->dma_exec_cmd = &ide_dma_exec_cmd;
934 if (!hwif->dma_start)
935 hwif->dma_start = &ide_dma_start;
936 if (!hwif->ide_dma_end)
937 hwif->ide_dma_end = &__ide_dma_end;
938 if (!hwif->ide_dma_test_irq)
939 hwif->ide_dma_test_irq = &__ide_dma_test_irq;
940 if (!hwif->dma_timeout)
941 hwif->dma_timeout = &ide_dma_timeout;
942 if (!hwif->dma_lost_irq)
943 hwif->dma_lost_irq = &ide_dma_lost_irq;
944
945 dma_stat = hwif->INB(hwif->dma_status);
946 printk(KERN_CONT ", BIOS settings: %s:%s, %s:%s\n",
947 hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "PIO",
948 hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "PIO");
949} 871}
950 872
951EXPORT_SYMBOL_GPL(ide_setup_dma); 873EXPORT_SYMBOL_GPL(ide_setup_dma);