diff options
author | Tejun Heo <tj@kernel.org> | 2010-05-10 15:41:32 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2010-05-17 22:49:07 -0400 |
commit | 9f2f72107ff621fdf3066e5a1b5ecb03ee587ebc (patch) | |
tree | 266c1371cb706c941ebaebd097380beccd530041 /drivers/ata | |
parent | 6bc0d390dd3dfb7be4325ad0685ae8ec2baf15a4 (diff) |
libata-sff: reorder SFF/BMDMA functions
Reorder functions such that SFF and BMDMA functions are grouped.
While at it, s/BMDMA/SFF in a few comments where it actually meant
SFF.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/libata-sff.c | 448 |
1 files changed, 226 insertions, 222 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 0e2c972292c..6400e875139 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -71,26 +71,6 @@ const struct ata_port_operations ata_sff_port_ops = { | |||
71 | }; | 71 | }; |
72 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); | 72 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); |
73 | 73 | ||
74 | const struct ata_port_operations ata_bmdma_port_ops = { | ||
75 | .inherits = &ata_sff_port_ops, | ||
76 | |||
77 | .mode_filter = ata_bmdma_mode_filter, | ||
78 | |||
79 | .bmdma_setup = ata_bmdma_setup, | ||
80 | .bmdma_start = ata_bmdma_start, | ||
81 | .bmdma_stop = ata_bmdma_stop, | ||
82 | .bmdma_status = ata_bmdma_status, | ||
83 | }; | ||
84 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
85 | |||
86 | const struct ata_port_operations ata_bmdma32_port_ops = { | ||
87 | .inherits = &ata_bmdma_port_ops, | ||
88 | |||
89 | .sff_data_xfer = ata_sff_data_xfer32, | ||
90 | .port_start = ata_sff_port_start32, | ||
91 | }; | ||
92 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | ||
93 | |||
94 | /** | 74 | /** |
95 | * ata_fill_sg - Fill PCI IDE PRD table | 75 | * ata_fill_sg - Fill PCI IDE PRD table |
96 | * @qc: Metadata associated with taskfile to be transferred | 76 | * @qc: Metadata associated with taskfile to be transferred |
@@ -1750,7 +1730,7 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1750 | goto idle_irq; | 1730 | goto idle_irq; |
1751 | } | 1731 | } |
1752 | 1732 | ||
1753 | /* ack bmdma irq events */ | 1733 | /* clear irq events */ |
1754 | ap->ops->sff_irq_clear(ap); | 1734 | ap->ops->sff_irq_clear(ap); |
1755 | 1735 | ||
1756 | ata_sff_hsm_move(ap, qc, status, 0); | 1736 | ata_sff_hsm_move(ap, qc, status, 0); |
@@ -1904,7 +1884,7 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); | |||
1904 | * ata_sff_freeze - Freeze SFF controller port | 1884 | * ata_sff_freeze - Freeze SFF controller port |
1905 | * @ap: port to freeze | 1885 | * @ap: port to freeze |
1906 | * | 1886 | * |
1907 | * Freeze BMDMA controller port. | 1887 | * Freeze SFF controller port. |
1908 | * | 1888 | * |
1909 | * LOCKING: | 1889 | * LOCKING: |
1910 | * Inherited from caller. | 1890 | * Inherited from caller. |
@@ -2533,208 +2513,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr) | |||
2533 | } | 2513 | } |
2534 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); | 2514 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); |
2535 | 2515 | ||
2536 | unsigned long ata_bmdma_mode_filter(struct ata_device *adev, | ||
2537 | unsigned long xfer_mask) | ||
2538 | { | ||
2539 | /* Filter out DMA modes if the device has been configured by | ||
2540 | the BIOS as PIO only */ | ||
2541 | |||
2542 | if (adev->link->ap->ioaddr.bmdma_addr == NULL) | ||
2543 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | ||
2544 | return xfer_mask; | ||
2545 | } | ||
2546 | EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); | ||
2547 | |||
2548 | /** | ||
2549 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
2550 | * @qc: Info associated with this ATA transaction. | ||
2551 | * | ||
2552 | * LOCKING: | ||
2553 | * spin_lock_irqsave(host lock) | ||
2554 | */ | ||
2555 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
2556 | { | ||
2557 | struct ata_port *ap = qc->ap; | ||
2558 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
2559 | u8 dmactl; | ||
2560 | |||
2561 | /* load PRD table addr. */ | ||
2562 | mb(); /* make sure PRD table writes are visible to controller */ | ||
2563 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
2564 | |||
2565 | /* specify data direction, triple-check start bit is clear */ | ||
2566 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2567 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
2568 | if (!rw) | ||
2569 | dmactl |= ATA_DMA_WR; | ||
2570 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2571 | |||
2572 | /* issue r/w command */ | ||
2573 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
2574 | } | ||
2575 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2576 | |||
2577 | /** | ||
2578 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
2579 | * @qc: Info associated with this ATA transaction. | ||
2580 | * | ||
2581 | * LOCKING: | ||
2582 | * spin_lock_irqsave(host lock) | ||
2583 | */ | ||
2584 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
2585 | { | ||
2586 | struct ata_port *ap = qc->ap; | ||
2587 | u8 dmactl; | ||
2588 | |||
2589 | /* start host DMA transaction */ | ||
2590 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2591 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2592 | |||
2593 | /* Strictly, one may wish to issue an ioread8() here, to | ||
2594 | * flush the mmio write. However, control also passes | ||
2595 | * to the hardware at this point, and it will interrupt | ||
2596 | * us when we are to resume control. So, in effect, | ||
2597 | * we don't care when the mmio write flushes. | ||
2598 | * Further, a read of the DMA status register _immediately_ | ||
2599 | * following the write may not be what certain flaky hardware | ||
2600 | * is expected, so I think it is best to not add a readb() | ||
2601 | * without first all the MMIO ATA cards/mobos. | ||
2602 | * Or maybe I'm just being paranoid. | ||
2603 | * | ||
2604 | * FIXME: The posting of this write means I/O starts are | ||
2605 | * unneccessarily delayed for MMIO | ||
2606 | */ | ||
2607 | } | ||
2608 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
2609 | |||
2610 | /** | ||
2611 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
2612 | * @qc: Command we are ending DMA for | ||
2613 | * | ||
2614 | * Clears the ATA_DMA_START flag in the dma control register | ||
2615 | * | ||
2616 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
2617 | * | ||
2618 | * LOCKING: | ||
2619 | * spin_lock_irqsave(host lock) | ||
2620 | */ | ||
2621 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
2622 | { | ||
2623 | struct ata_port *ap = qc->ap; | ||
2624 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
2625 | |||
2626 | /* clear start/stop bit */ | ||
2627 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
2628 | mmio + ATA_DMA_CMD); | ||
2629 | |||
2630 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
2631 | ata_sff_dma_pause(ap); | ||
2632 | } | ||
2633 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
2634 | |||
2635 | /** | ||
2636 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
2637 | * @ap: Port associated with this ATA transaction. | ||
2638 | * | ||
2639 | * Read and return BMDMA status register. | ||
2640 | * | ||
2641 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
2642 | * | ||
2643 | * LOCKING: | ||
2644 | * spin_lock_irqsave(host lock) | ||
2645 | */ | ||
2646 | u8 ata_bmdma_status(struct ata_port *ap) | ||
2647 | { | ||
2648 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
2649 | } | ||
2650 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
2651 | |||
2652 | #ifdef CONFIG_PCI | 2516 | #ifdef CONFIG_PCI |
2653 | 2517 | ||
2654 | /** | ||
2655 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex | ||
2656 | * @pdev: PCI device | ||
2657 | * | ||
2658 | * Some PCI ATA devices report simplex mode but in fact can be told to | ||
2659 | * enter non simplex mode. This implements the necessary logic to | ||
2660 | * perform the task on such devices. Calling it on other devices will | ||
2661 | * have -undefined- behaviour. | ||
2662 | */ | ||
2663 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | ||
2664 | { | ||
2665 | unsigned long bmdma = pci_resource_start(pdev, 4); | ||
2666 | u8 simplex; | ||
2667 | |||
2668 | if (bmdma == 0) | ||
2669 | return -ENOENT; | ||
2670 | |||
2671 | simplex = inb(bmdma + 0x02); | ||
2672 | outb(simplex & 0x60, bmdma + 0x02); | ||
2673 | simplex = inb(bmdma + 0x02); | ||
2674 | if (simplex & 0x80) | ||
2675 | return -EOPNOTSUPP; | ||
2676 | return 0; | ||
2677 | } | ||
2678 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
2679 | |||
2680 | /** | ||
2681 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | ||
2682 | * @host: target ATA host | ||
2683 | * | ||
2684 | * Acquire PCI BMDMA resources and initialize @host accordingly. | ||
2685 | * | ||
2686 | * LOCKING: | ||
2687 | * Inherited from calling layer (may sleep). | ||
2688 | * | ||
2689 | * RETURNS: | ||
2690 | * 0 on success, -errno otherwise. | ||
2691 | */ | ||
2692 | int ata_pci_bmdma_init(struct ata_host *host) | ||
2693 | { | ||
2694 | struct device *gdev = host->dev; | ||
2695 | struct pci_dev *pdev = to_pci_dev(gdev); | ||
2696 | int i, rc; | ||
2697 | |||
2698 | /* No BAR4 allocation: No DMA */ | ||
2699 | if (pci_resource_start(pdev, 4) == 0) | ||
2700 | return 0; | ||
2701 | |||
2702 | /* TODO: If we get no DMA mask we should fall back to PIO */ | ||
2703 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
2704 | if (rc) | ||
2705 | return rc; | ||
2706 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
2707 | if (rc) | ||
2708 | return rc; | ||
2709 | |||
2710 | /* request and iomap DMA region */ | ||
2711 | rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); | ||
2712 | if (rc) { | ||
2713 | dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); | ||
2714 | return -ENOMEM; | ||
2715 | } | ||
2716 | host->iomap = pcim_iomap_table(pdev); | ||
2717 | |||
2718 | for (i = 0; i < 2; i++) { | ||
2719 | struct ata_port *ap = host->ports[i]; | ||
2720 | void __iomem *bmdma = host->iomap[4] + 8 * i; | ||
2721 | |||
2722 | if (ata_port_is_dummy(ap)) | ||
2723 | continue; | ||
2724 | |||
2725 | ap->ioaddr.bmdma_addr = bmdma; | ||
2726 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | ||
2727 | (ioread8(bmdma + 2) & 0x80)) | ||
2728 | host->flags |= ATA_HOST_SIMPLEX; | ||
2729 | |||
2730 | ata_port_desc(ap, "bmdma 0x%llx", | ||
2731 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | ||
2732 | } | ||
2733 | |||
2734 | return 0; | ||
2735 | } | ||
2736 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
2737 | |||
2738 | static int ata_resources_present(struct pci_dev *pdev, int port) | 2518 | static int ata_resources_present(struct pci_dev *pdev, int port) |
2739 | { | 2519 | { |
2740 | int i; | 2520 | int i; |
@@ -3059,3 +2839,227 @@ out: | |||
3059 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | 2839 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); |
3060 | 2840 | ||
3061 | #endif /* CONFIG_PCI */ | 2841 | #endif /* CONFIG_PCI */ |
2842 | |||
2843 | const struct ata_port_operations ata_bmdma_port_ops = { | ||
2844 | .inherits = &ata_sff_port_ops, | ||
2845 | |||
2846 | .mode_filter = ata_bmdma_mode_filter, | ||
2847 | |||
2848 | .bmdma_setup = ata_bmdma_setup, | ||
2849 | .bmdma_start = ata_bmdma_start, | ||
2850 | .bmdma_stop = ata_bmdma_stop, | ||
2851 | .bmdma_status = ata_bmdma_status, | ||
2852 | }; | ||
2853 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
2854 | |||
2855 | const struct ata_port_operations ata_bmdma32_port_ops = { | ||
2856 | .inherits = &ata_bmdma_port_ops, | ||
2857 | |||
2858 | .sff_data_xfer = ata_sff_data_xfer32, | ||
2859 | .port_start = ata_sff_port_start32, | ||
2860 | }; | ||
2861 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | ||
2862 | |||
2863 | unsigned long ata_bmdma_mode_filter(struct ata_device *adev, | ||
2864 | unsigned long xfer_mask) | ||
2865 | { | ||
2866 | /* Filter out DMA modes if the device has been configured by | ||
2867 | the BIOS as PIO only */ | ||
2868 | |||
2869 | if (adev->link->ap->ioaddr.bmdma_addr == NULL) | ||
2870 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | ||
2871 | return xfer_mask; | ||
2872 | } | ||
2873 | EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); | ||
2874 | |||
2875 | /** | ||
2876 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
2877 | * @qc: Info associated with this ATA transaction. | ||
2878 | * | ||
2879 | * LOCKING: | ||
2880 | * spin_lock_irqsave(host lock) | ||
2881 | */ | ||
2882 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
2883 | { | ||
2884 | struct ata_port *ap = qc->ap; | ||
2885 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
2886 | u8 dmactl; | ||
2887 | |||
2888 | /* load PRD table addr. */ | ||
2889 | mb(); /* make sure PRD table writes are visible to controller */ | ||
2890 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
2891 | |||
2892 | /* specify data direction, triple-check start bit is clear */ | ||
2893 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2894 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
2895 | if (!rw) | ||
2896 | dmactl |= ATA_DMA_WR; | ||
2897 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2898 | |||
2899 | /* issue r/w command */ | ||
2900 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
2901 | } | ||
2902 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2903 | |||
2904 | /** | ||
2905 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
2906 | * @qc: Info associated with this ATA transaction. | ||
2907 | * | ||
2908 | * LOCKING: | ||
2909 | * spin_lock_irqsave(host lock) | ||
2910 | */ | ||
2911 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
2912 | { | ||
2913 | struct ata_port *ap = qc->ap; | ||
2914 | u8 dmactl; | ||
2915 | |||
2916 | /* start host DMA transaction */ | ||
2917 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2918 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2919 | |||
2920 | /* Strictly, one may wish to issue an ioread8() here, to | ||
2921 | * flush the mmio write. However, control also passes | ||
2922 | * to the hardware at this point, and it will interrupt | ||
2923 | * us when we are to resume control. So, in effect, | ||
2924 | * we don't care when the mmio write flushes. | ||
2925 | * Further, a read of the DMA status register _immediately_ | ||
2926 | * following the write may not be what certain flaky hardware | ||
2927 | * is expected, so I think it is best to not add a readb() | ||
2928 | * without first all the MMIO ATA cards/mobos. | ||
2929 | * Or maybe I'm just being paranoid. | ||
2930 | * | ||
2931 | * FIXME: The posting of this write means I/O starts are | ||
2932 | * unneccessarily delayed for MMIO | ||
2933 | */ | ||
2934 | } | ||
2935 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
2936 | |||
2937 | /** | ||
2938 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
2939 | * @qc: Command we are ending DMA for | ||
2940 | * | ||
2941 | * Clears the ATA_DMA_START flag in the dma control register | ||
2942 | * | ||
2943 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
2944 | * | ||
2945 | * LOCKING: | ||
2946 | * spin_lock_irqsave(host lock) | ||
2947 | */ | ||
2948 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
2949 | { | ||
2950 | struct ata_port *ap = qc->ap; | ||
2951 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
2952 | |||
2953 | /* clear start/stop bit */ | ||
2954 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
2955 | mmio + ATA_DMA_CMD); | ||
2956 | |||
2957 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
2958 | ata_sff_dma_pause(ap); | ||
2959 | } | ||
2960 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
2961 | |||
2962 | /** | ||
2963 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
2964 | * @ap: Port associated with this ATA transaction. | ||
2965 | * | ||
2966 | * Read and return BMDMA status register. | ||
2967 | * | ||
2968 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
2969 | * | ||
2970 | * LOCKING: | ||
2971 | * spin_lock_irqsave(host lock) | ||
2972 | */ | ||
2973 | u8 ata_bmdma_status(struct ata_port *ap) | ||
2974 | { | ||
2975 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
2976 | } | ||
2977 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
2978 | |||
2979 | #ifdef CONFIG_PCI | ||
2980 | |||
2981 | /** | ||
2982 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex | ||
2983 | * @pdev: PCI device | ||
2984 | * | ||
2985 | * Some PCI ATA devices report simplex mode but in fact can be told to | ||
2986 | * enter non simplex mode. This implements the necessary logic to | ||
2987 | * perform the task on such devices. Calling it on other devices will | ||
2988 | * have -undefined- behaviour. | ||
2989 | */ | ||
2990 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | ||
2991 | { | ||
2992 | unsigned long bmdma = pci_resource_start(pdev, 4); | ||
2993 | u8 simplex; | ||
2994 | |||
2995 | if (bmdma == 0) | ||
2996 | return -ENOENT; | ||
2997 | |||
2998 | simplex = inb(bmdma + 0x02); | ||
2999 | outb(simplex & 0x60, bmdma + 0x02); | ||
3000 | simplex = inb(bmdma + 0x02); | ||
3001 | if (simplex & 0x80) | ||
3002 | return -EOPNOTSUPP; | ||
3003 | return 0; | ||
3004 | } | ||
3005 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
3006 | |||
3007 | /** | ||
3008 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | ||
3009 | * @host: target ATA host | ||
3010 | * | ||
3011 | * Acquire PCI BMDMA resources and initialize @host accordingly. | ||
3012 | * | ||
3013 | * LOCKING: | ||
3014 | * Inherited from calling layer (may sleep). | ||
3015 | * | ||
3016 | * RETURNS: | ||
3017 | * 0 on success, -errno otherwise. | ||
3018 | */ | ||
3019 | int ata_pci_bmdma_init(struct ata_host *host) | ||
3020 | { | ||
3021 | struct device *gdev = host->dev; | ||
3022 | struct pci_dev *pdev = to_pci_dev(gdev); | ||
3023 | int i, rc; | ||
3024 | |||
3025 | /* No BAR4 allocation: No DMA */ | ||
3026 | if (pci_resource_start(pdev, 4) == 0) | ||
3027 | return 0; | ||
3028 | |||
3029 | /* TODO: If we get no DMA mask we should fall back to PIO */ | ||
3030 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
3031 | if (rc) | ||
3032 | return rc; | ||
3033 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
3034 | if (rc) | ||
3035 | return rc; | ||
3036 | |||
3037 | /* request and iomap DMA region */ | ||
3038 | rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); | ||
3039 | if (rc) { | ||
3040 | dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); | ||
3041 | return -ENOMEM; | ||
3042 | } | ||
3043 | host->iomap = pcim_iomap_table(pdev); | ||
3044 | |||
3045 | for (i = 0; i < 2; i++) { | ||
3046 | struct ata_port *ap = host->ports[i]; | ||
3047 | void __iomem *bmdma = host->iomap[4] + 8 * i; | ||
3048 | |||
3049 | if (ata_port_is_dummy(ap)) | ||
3050 | continue; | ||
3051 | |||
3052 | ap->ioaddr.bmdma_addr = bmdma; | ||
3053 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | ||
3054 | (ioread8(bmdma + 2) & 0x80)) | ||
3055 | host->flags |= ATA_HOST_SIMPLEX; | ||
3056 | |||
3057 | ata_port_desc(ap, "bmdma 0x%llx", | ||
3058 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | ||
3059 | } | ||
3060 | |||
3061 | return 0; | ||
3062 | } | ||
3063 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
3064 | |||
3065 | #endif /* CONFIG_PCI */ | ||