aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/pata_bf54x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/pata_bf54x.c')
-rw-r--r--drivers/ata/pata_bf54x.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 41cd921082ba..a32e3c44a606 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -832,6 +832,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
832{ 832{
833 unsigned short config = WDSIZE_16; 833 unsigned short config = WDSIZE_16;
834 struct scatterlist *sg; 834 struct scatterlist *sg;
835 unsigned int si;
835 836
836 pr_debug("in atapi dma setup\n"); 837 pr_debug("in atapi dma setup\n");
837 /* Program the ATA_CTRL register with dir */ 838 /* Program the ATA_CTRL register with dir */
@@ -839,7 +840,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
839 /* fill the ATAPI DMA controller */ 840 /* fill the ATAPI DMA controller */
840 set_dma_config(CH_ATAPI_TX, config); 841 set_dma_config(CH_ATAPI_TX, config);
841 set_dma_x_modify(CH_ATAPI_TX, 2); 842 set_dma_x_modify(CH_ATAPI_TX, 2);
842 ata_for_each_sg(sg, qc) { 843 for_each_sg(qc->sg, sg, qc->n_elem, si) {
843 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); 844 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
844 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); 845 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
845 } 846 }
@@ -848,7 +849,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
848 /* fill the ATAPI DMA controller */ 849 /* fill the ATAPI DMA controller */
849 set_dma_config(CH_ATAPI_RX, config); 850 set_dma_config(CH_ATAPI_RX, config);
850 set_dma_x_modify(CH_ATAPI_RX, 2); 851 set_dma_x_modify(CH_ATAPI_RX, 2);
851 ata_for_each_sg(sg, qc) { 852 for_each_sg(qc->sg, sg, qc->n_elem, si) {
852 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); 853 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
853 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); 854 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
854 } 855 }
@@ -867,6 +868,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
867 struct ata_port *ap = qc->ap; 868 struct ata_port *ap = qc->ap;
868 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 869 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
869 struct scatterlist *sg; 870 struct scatterlist *sg;
871 unsigned int si;
870 872
871 pr_debug("in atapi dma start\n"); 873 pr_debug("in atapi dma start\n");
872 if (!(ap->udma_mask || ap->mwdma_mask)) 874 if (!(ap->udma_mask || ap->mwdma_mask))
@@ -881,7 +883,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
881 * data cache is enabled. Otherwise, this loop 883 * data cache is enabled. Otherwise, this loop
882 * is an empty loop and optimized out. 884 * is an empty loop and optimized out.
883 */ 885 */
884 ata_for_each_sg(sg, qc) { 886 for_each_sg(qc->sg, sg, qc->n_elem, si) {
885 flush_dcache_range(sg_dma_address(sg), 887 flush_dcache_range(sg_dma_address(sg),
886 sg_dma_address(sg) + sg_dma_len(sg)); 888 sg_dma_address(sg) + sg_dma_len(sg));
887 } 889 }
@@ -910,7 +912,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
910 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); 912 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
911 913
912 /* Set transfer length to buffer len */ 914 /* Set transfer length to buffer len */
913 ata_for_each_sg(sg, qc) { 915 for_each_sg(qc->sg, sg, qc->n_elem, si) {
914 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); 916 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
915 } 917 }
916 918
@@ -932,6 +934,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
932{ 934{
933 struct ata_port *ap = qc->ap; 935 struct ata_port *ap = qc->ap;
934 struct scatterlist *sg; 936 struct scatterlist *sg;
937 unsigned int si;
935 938
936 pr_debug("in atapi dma stop\n"); 939 pr_debug("in atapi dma stop\n");
937 if (!(ap->udma_mask || ap->mwdma_mask)) 940 if (!(ap->udma_mask || ap->mwdma_mask))
@@ -950,7 +953,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
950 * data cache is enabled. Otherwise, this loop 953 * data cache is enabled. Otherwise, this loop
951 * is an empty loop and optimized out. 954 * is an empty loop and optimized out.
952 */ 955 */
953 ata_for_each_sg(sg, qc) { 956 for_each_sg(qc->sg, sg, qc->n_elem, si) {
954 invalidate_dcache_range( 957 invalidate_dcache_range(
955 sg_dma_address(sg), 958 sg_dma_address(sg),
956 sg_dma_address(sg) 959 sg_dma_address(sg)