aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ide/ide-dma.c31
-rw-r--r--drivers/ide/ide.c3
-rw-r--r--drivers/ide/mips/au1xxx-ide.c7
-rw-r--r--drivers/ide/pci/scc_pata.c14
-rw-r--r--drivers/ide/pci/sgiioc4.c15
-rw-r--r--include/linux/ide.h17
6 files changed, 47 insertions, 40 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 244b61b573ce..3f949b5db353 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -844,36 +844,43 @@ void ide_dma_timeout(ide_drive_t *drive)
844} 844}
845EXPORT_SYMBOL_GPL(ide_dma_timeout); 845EXPORT_SYMBOL_GPL(ide_dma_timeout);
846 846
847#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
848void ide_release_dma_engine(ide_hwif_t *hwif) 847void ide_release_dma_engine(ide_hwif_t *hwif)
849{ 848{
850 if (hwif->dmatable_cpu) { 849 if (hwif->dmatable_cpu) {
851 struct pci_dev *pdev = to_pci_dev(hwif->dev); 850 int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
852 851
853 pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES, 852 dma_free_coherent(hwif->dev, prd_size,
854 hwif->dmatable_cpu, hwif->dmatable_dma); 853 hwif->dmatable_cpu, hwif->dmatable_dma);
855 hwif->dmatable_cpu = NULL; 854 hwif->dmatable_cpu = NULL;
856 } 855 }
857} 856}
857EXPORT_SYMBOL_GPL(ide_release_dma_engine);
858 858
859int ide_allocate_dma_engine(ide_hwif_t *hwif) 859int ide_allocate_dma_engine(ide_hwif_t *hwif)
860{ 860{
861 struct pci_dev *pdev = to_pci_dev(hwif->dev); 861 int prd_size;
862 862
863 hwif->dmatable_cpu = pci_alloc_consistent(pdev, 863 if (hwif->prd_max_nents == 0)
864 PRD_ENTRIES * PRD_BYTES, 864 hwif->prd_max_nents = PRD_ENTRIES;
865 &hwif->dmatable_dma); 865 if (hwif->prd_ent_size == 0)
866 hwif->prd_ent_size = PRD_BYTES;
866 867
867 if (hwif->dmatable_cpu) 868 prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
868 return 0;
869 869
870 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", 870 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
871 &hwif->dmatable_dma,
872 GFP_ATOMIC);
873 if (hwif->dmatable_cpu == NULL) {
874 printk(KERN_ERR "%s: unable to allocate PRD table\n",
871 hwif->name); 875 hwif->name);
876 return -ENOMEM;
877 }
872 878
873 return 1; 879 return 0;
874} 880}
875EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); 881EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
876 882
883#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
877const struct ide_dma_ops sff_dma_ops = { 884const struct ide_dma_ops sff_dma_ops = {
878 .dma_host_set = ide_dma_host_set, 885 .dma_host_set = ide_dma_host_set,
879 .dma_setup = ide_dma_setup, 886 .dma_setup = ide_dma_setup,
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index a498245dc213..083783e851d1 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -227,8 +227,7 @@ void ide_unregister(ide_hwif_t *hwif)
227 kfree(hwif->sg_table); 227 kfree(hwif->sg_table);
228 unregister_blkdev(hwif->major, hwif->name); 228 unregister_blkdev(hwif->major, hwif->name);
229 229
230 if (hwif->dma_base) 230 ide_release_dma_engine(hwif);
231 ide_release_dma_engine(hwif);
232 231
233 mutex_unlock(&ide_cfg_mtx); 232 mutex_unlock(&ide_cfg_mtx);
234} 233}
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index f9e88cfec827..0ec8fd1e4dcb 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -427,10 +427,9 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
427 NUM_DESCRIPTORS); 427 NUM_DESCRIPTORS);
428 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, 428 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
429 NUM_DESCRIPTORS); 429 NUM_DESCRIPTORS);
430 430
431 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, 431 /* FIXME: check return value */
432 PRD_ENTRIES * PRD_BYTES, /* 1 Page */ 432 (void)ide_allocate_dma_engine(hwif);
433 &hwif->dmatable_dma, GFP_KERNEL);
434 433
435 au1xxx_dbdma_start( auide->tx_chan ); 434 au1xxx_dbdma_start( auide->tx_chan );
436 au1xxx_dbdma_start( auide->rx_chan ); 435 au1xxx_dbdma_start( auide->rx_chan );
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 3e75bf5f5e37..9ce1d8059921 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -821,6 +821,12 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
821 init_mmio_iops_scc(hwif); 821 init_mmio_iops_scc(hwif);
822} 822}
823 823
824static int __devinit scc_init_dma(ide_hwif_t *hwif,
825 const struct ide_port_info *d)
826{
827 return ide_allocate_dma_engine(hwif);
828}
829
824static u8 scc_cable_detect(ide_hwif_t *hwif) 830static u8 scc_cable_detect(ide_hwif_t *hwif)
825{ 831{
826 return ATA_CBL_PATA80; 832 return ATA_CBL_PATA80;
@@ -885,6 +891,7 @@ static const struct ide_dma_ops scc_dma_ops = {
885 { \ 891 { \
886 .name = name_str, \ 892 .name = name_str, \
887 .init_iops = init_iops_scc, \ 893 .init_iops = init_iops_scc, \
894 .init_dma = scc_init_dma, \
888 .init_hwif = init_hwif_scc, \ 895 .init_hwif = init_hwif_scc, \
889 .tp_ops = &scc_tp_ops, \ 896 .tp_ops = &scc_tp_ops, \
890 .port_ops = &scc_port_ops, \ 897 .port_ops = &scc_port_ops, \
@@ -922,13 +929,6 @@ static void __devexit scc_remove(struct pci_dev *dev)
922{ 929{
923 struct scc_ports *ports = pci_get_drvdata(dev); 930 struct scc_ports *ports = pci_get_drvdata(dev);
924 struct ide_host *host = ports->host; 931 struct ide_host *host = ports->host;
925 ide_hwif_t *hwif = host->ports[0];
926
927 if (hwif->dmatable_cpu) {
928 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
929 hwif->dmatable_cpu, hwif->dmatable_dma);
930 hwif->dmatable_cpu = NULL;
931 }
932 932
933 ide_host_remove(host); 933 ide_host_remove(host);
934 934
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 84cd986810cf..dd634541ce36 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -357,14 +357,13 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
357 } 357 }
358 hwif->dma_base = (unsigned long) virt_dma_base; 358 hwif->dma_base = (unsigned long) virt_dma_base;
359 359
360 hwif->dmatable_cpu = pci_alloc_consistent(dev, 360 hwif->sg_max_nents = IOC4_PRD_ENTRIES;
361 IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
362 &hwif->dmatable_dma);
363 361
364 if (!hwif->dmatable_cpu) 362 hwif->prd_max_nents = IOC4_PRD_ENTRIES;
365 goto dma_pci_alloc_failure; 363 hwif->prd_ent_size = IOC4_PRD_BYTES;
366 364
367 hwif->sg_max_nents = IOC4_PRD_ENTRIES; 365 if (ide_allocate_dma_engine(hwif))
366 goto dma_pci_alloc_failure;
368 367
369 pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE, 368 pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
370 (dma_addr_t *)&hwif->extra_base); 369 (dma_addr_t *)&hwif->extra_base);
@@ -373,8 +372,8 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
373 return 0; 372 return 0;
374 } 373 }
375 374
376 pci_free_consistent(dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, 375 ide_release_dma_engine(hwif);
377 hwif->dmatable_cpu, hwif->dmatable_dma); 376
378 printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n", 377 printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
379 __func__, hwif->name); 378 __func__, hwif->name);
380 printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name); 379 printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 39aaff8ff457..8121aa9240c4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -788,6 +788,12 @@ typedef struct hwif_s {
788 unsigned int *dmatable_cpu; 788 unsigned int *dmatable_cpu;
789 /* dma physical region descriptor table (dma view) */ 789 /* dma physical region descriptor table (dma view) */
790 dma_addr_t dmatable_dma; 790 dma_addr_t dmatable_dma;
791
792 /* maximum number of PRD table entries */
793 int prd_max_nents;
794 /* PRD entry size in bytes */
795 int prd_ent_size;
796
791 /* Scatter-gather list used to build the above */ 797 /* Scatter-gather list used to build the above */
792 struct scatterlist *sg_table; 798 struct scatterlist *sg_table;
793 int sg_max_nents; /* Maximum number of entries in it */ 799 int sg_max_nents; /* Maximum number of entries in it */
@@ -1423,14 +1429,14 @@ int ide_set_dma(ide_drive_t *);
1423void ide_check_dma_crc(ide_drive_t *); 1429void ide_check_dma_crc(ide_drive_t *);
1424ide_startstop_t ide_dma_intr(ide_drive_t *); 1430ide_startstop_t ide_dma_intr(ide_drive_t *);
1425 1431
1432int ide_allocate_dma_engine(ide_hwif_t *);
1433void ide_release_dma_engine(ide_hwif_t *);
1434
1426int ide_build_sglist(ide_drive_t *, struct request *); 1435int ide_build_sglist(ide_drive_t *, struct request *);
1427void ide_destroy_dmatable(ide_drive_t *); 1436void ide_destroy_dmatable(ide_drive_t *);
1428 1437
1429#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 1438#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1430extern int ide_build_dmatable(ide_drive_t *, struct request *); 1439extern int ide_build_dmatable(ide_drive_t *, struct request *);
1431int ide_allocate_dma_engine(ide_hwif_t *);
1432void ide_release_dma_engine(ide_hwif_t *);
1433
1434void ide_dma_host_set(ide_drive_t *, int); 1440void ide_dma_host_set(ide_drive_t *, int);
1435extern int ide_dma_setup(ide_drive_t *); 1441extern int ide_dma_setup(ide_drive_t *);
1436void ide_dma_exec_cmd(ide_drive_t *, u8); 1442void ide_dma_exec_cmd(ide_drive_t *, u8);
@@ -1453,11 +1459,8 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; }
1453static inline void ide_dma_verbose(ide_drive_t *drive) { ; } 1459static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
1454static inline int ide_set_dma(ide_drive_t *drive) { return 1; } 1460static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
1455static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } 1461static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
1456#endif /* CONFIG_BLK_DEV_IDEDMA */
1457
1458#ifndef CONFIG_BLK_DEV_IDEDMA_SFF
1459static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } 1462static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
1460#endif 1463#endif /* CONFIG_BLK_DEV_IDEDMA */
1461 1464
1462#ifdef CONFIG_BLK_DEV_IDEACPI 1465#ifdef CONFIG_BLK_DEV_IDEACPI
1463extern int ide_acpi_exec_tfs(ide_drive_t *drive); 1466extern int ide_acpi_exec_tfs(ide_drive_t *drive);