diff options
author | Sonic Zhang <sonic.zhang@analog.com> | 2012-01-04 01:06:51 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2012-01-17 20:49:28 -0500 |
commit | 81b0287d341535ac722de891b19f7c49212ac91c (patch) | |
tree | 72c3b28073c3b021061f87e29aafddaaf9d3650b /drivers/ata/pata_bf54x.c | |
parent | 93272b132a72450dfc16f13d32223fe47aaf5061 (diff) |
[libata] pata_bf54x: Support sg list in bmdma transfer.
BF54x on-chip ATAPI controller allows maximum 0x1fffe bytes to be transfered
in one ATAPI transfer. So, set the max sg_tablesize to 4.
Signed-off-by: Sonic Zhang <sonic.zhang@analog.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/pata_bf54x.c')
-rw-r--r-- | drivers/ata/pata_bf54x.c | 167 |
1 files changed, 88 insertions, 79 deletions
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index d6a4677fdf71..1e65842e2ca7 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c | |||
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20; | |||
251 | static const u32 udma_tackmin = 20; | 251 | static const u32 udma_tackmin = 20; |
252 | static const u32 udma_tssmin = 50; | 252 | static const u32 udma_tssmin = 50; |
253 | 253 | ||
254 | #define BFIN_MAX_SG_SEGMENTS 4 | ||
255 | |||
254 | /** | 256 | /** |
255 | * | 257 | * |
256 | * Function: num_clocks_min | 258 | * Function: num_clocks_min |
@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl) | |||
829 | 831 | ||
830 | static void bfin_bmdma_setup(struct ata_queued_cmd *qc) | 832 | static void bfin_bmdma_setup(struct ata_queued_cmd *qc) |
831 | { | 833 | { |
832 | unsigned short config = WDSIZE_16; | 834 | struct ata_port *ap = qc->ap; |
835 | struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd; | ||
836 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
837 | unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN; | ||
833 | struct scatterlist *sg; | 838 | struct scatterlist *sg; |
834 | unsigned int si; | 839 | unsigned int si; |
840 | unsigned int channel; | ||
841 | unsigned int dir; | ||
842 | unsigned int size = 0; | ||
835 | 843 | ||
836 | dev_dbg(qc->ap->dev, "in atapi dma setup\n"); | 844 | dev_dbg(qc->ap->dev, "in atapi dma setup\n"); |
837 | /* Program the ATA_CTRL register with dir */ | 845 | /* Program the ATA_CTRL register with dir */ |
838 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 846 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
839 | /* fill the ATAPI DMA controller */ | 847 | channel = CH_ATAPI_TX; |
840 | set_dma_config(CH_ATAPI_TX, config); | 848 | dir = DMA_TO_DEVICE; |
841 | set_dma_x_modify(CH_ATAPI_TX, 2); | ||
842 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
843 | set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); | ||
844 | set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); | ||
845 | } | ||
846 | } else { | 849 | } else { |
850 | channel = CH_ATAPI_RX; | ||
851 | dir = DMA_FROM_DEVICE; | ||
847 | config |= WNR; | 852 | config |= WNR; |
848 | /* fill the ATAPI DMA controller */ | ||
849 | set_dma_config(CH_ATAPI_RX, config); | ||
850 | set_dma_x_modify(CH_ATAPI_RX, 2); | ||
851 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
852 | set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); | ||
853 | set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); | ||
854 | } | ||
855 | } | 853 | } |
856 | } | ||
857 | 854 | ||
858 | /** | 855 | dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir); |
859 | * bfin_bmdma_start - Start an IDE DMA transaction | ||
860 | * @qc: Info associated with this ATA transaction. | ||
861 | * | ||
862 | * Note: Original code is ata_bmdma_start(). | ||
863 | */ | ||
864 | 856 | ||
865 | static void bfin_bmdma_start(struct ata_queued_cmd *qc) | 857 | /* fill the ATAPI DMA controller */ |
866 | { | 858 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
867 | struct ata_port *ap = qc->ap; | 859 | dma_desc_cpu[si].start_addr = sg_dma_address(sg); |
868 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 860 | dma_desc_cpu[si].cfg = config; |
869 | struct scatterlist *sg; | 861 | dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1; |
870 | unsigned int si; | 862 | dma_desc_cpu[si].x_modify = 2; |
863 | size += sg_dma_len(sg); | ||
864 | } | ||
871 | 865 | ||
872 | dev_dbg(qc->ap->dev, "in atapi dma start\n"); | 866 | /* Set the last descriptor to stop mode */ |
873 | if (!(ap->udma_mask || ap->mwdma_mask)) | 867 | dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE); |
874 | return; | ||
875 | 868 | ||
876 | /* start ATAPI DMA controller*/ | 869 | flush_dcache_range((unsigned int)dma_desc_cpu, |
877 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 870 | (unsigned int)dma_desc_cpu + |
878 | /* | 871 | qc->n_elem * sizeof(struct dma_desc_array)); |
879 | * On blackfin arch, uncacheable memory is not | ||
880 | * allocated with flag GFP_DMA. DMA buffer from | ||
881 | * common kenel code should be flushed if WB | ||
882 | * data cache is enabled. Otherwise, this loop | ||
883 | * is an empty loop and optimized out. | ||
884 | */ | ||
885 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
886 | flush_dcache_range(sg_dma_address(sg), | ||
887 | sg_dma_address(sg) + sg_dma_len(sg)); | ||
888 | } | ||
889 | enable_dma(CH_ATAPI_TX); | ||
890 | dev_dbg(qc->ap->dev, "enable udma write\n"); | ||
891 | 872 | ||
892 | /* Send ATA DMA write command */ | 873 | /* Enable ATA DMA operation*/ |
893 | bfin_exec_command(ap, &qc->tf); | 874 | set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma); |
875 | set_dma_x_count(channel, 0); | ||
876 | set_dma_x_modify(channel, 0); | ||
877 | set_dma_config(channel, config); | ||
878 | |||
879 | SSYNC(); | ||
880 | |||
881 | /* Send ATA DMA command */ | ||
882 | bfin_exec_command(ap, &qc->tf); | ||
894 | 883 | ||
884 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
895 | /* set ATA DMA write direction */ | 885 | /* set ATA DMA write direction */ |
896 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | 886 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) |
897 | | XFER_DIR)); | 887 | | XFER_DIR)); |
898 | } else { | 888 | } else { |
899 | enable_dma(CH_ATAPI_RX); | ||
900 | dev_dbg(qc->ap->dev, "enable udma read\n"); | ||
901 | |||
902 | /* Send ATA DMA read command */ | ||
903 | bfin_exec_command(ap, &qc->tf); | ||
904 | |||
905 | /* set ATA DMA read direction */ | 889 | /* set ATA DMA read direction */ |
906 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | 890 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) |
907 | & ~XFER_DIR)); | 891 | & ~XFER_DIR)); |
@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
913 | /* Set ATAPI state machine contorl in terminate sequence */ | 897 | /* Set ATAPI state machine contorl in terminate sequence */ |
914 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); | 898 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); |
915 | 899 | ||
916 | /* Set transfer length to buffer len */ | 900 | /* Set transfer length to the total size of sg buffers */ |
917 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | 901 | ATAPI_SET_XFER_LEN(base, size >> 1); |
918 | ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); | 902 | } |
919 | } | ||
920 | 903 | ||
921 | /* Enable ATA DMA operation*/ | 904 | /** |
905 | * bfin_bmdma_start - Start an IDE DMA transaction | ||
906 | * @qc: Info associated with this ATA transaction. | ||
907 | * | ||
908 | * Note: Original code is ata_bmdma_start(). | ||
909 | */ | ||
910 | |||
911 | static void bfin_bmdma_start(struct ata_queued_cmd *qc) | ||
912 | { | ||
913 | struct ata_port *ap = qc->ap; | ||
914 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
915 | |||
916 | dev_dbg(qc->ap->dev, "in atapi dma start\n"); | ||
917 | |||
918 | if (!(ap->udma_mask || ap->mwdma_mask)) | ||
919 | return; | ||
920 | |||
921 | /* start ATAPI transfer*/ | ||
922 | if (ap->udma_mask) | 922 | if (ap->udma_mask) |
923 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | 923 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) |
924 | | ULTRA_START); | 924 | | ULTRA_START); |
@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
935 | static void bfin_bmdma_stop(struct ata_queued_cmd *qc) | 935 | static void bfin_bmdma_stop(struct ata_queued_cmd *qc) |
936 | { | 936 | { |
937 | struct ata_port *ap = qc->ap; | 937 | struct ata_port *ap = qc->ap; |
938 | struct scatterlist *sg; | 938 | unsigned int dir; |
939 | unsigned int si; | ||
940 | 939 | ||
941 | dev_dbg(qc->ap->dev, "in atapi dma stop\n"); | 940 | dev_dbg(qc->ap->dev, "in atapi dma stop\n"); |
941 | |||
942 | if (!(ap->udma_mask || ap->mwdma_mask)) | 942 | if (!(ap->udma_mask || ap->mwdma_mask)) |
943 | return; | 943 | return; |
944 | 944 | ||
945 | /* stop ATAPI DMA controller*/ | 945 | /* stop ATAPI DMA controller*/ |
946 | if (qc->tf.flags & ATA_TFLAG_WRITE) | 946 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
947 | dir = DMA_TO_DEVICE; | ||
947 | disable_dma(CH_ATAPI_TX); | 948 | disable_dma(CH_ATAPI_TX); |
948 | else { | 949 | } else { |
950 | dir = DMA_FROM_DEVICE; | ||
949 | disable_dma(CH_ATAPI_RX); | 951 | disable_dma(CH_ATAPI_RX); |
950 | if (ap->hsm_task_state & HSM_ST_LAST) { | ||
951 | /* | ||
952 | * On blackfin arch, uncacheable memory is not | ||
953 | * allocated with flag GFP_DMA. DMA buffer from | ||
954 | * common kenel code should be invalidated if | ||
955 | * data cache is enabled. Otherwise, this loop | ||
956 | * is an empty loop and optimized out. | ||
957 | */ | ||
958 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
959 | invalidate_dcache_range( | ||
960 | sg_dma_address(sg), | ||
961 | sg_dma_address(sg) | ||
962 | + sg_dma_len(sg)); | ||
963 | } | ||
964 | } | ||
965 | } | 952 | } |
953 | |||
954 | dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir); | ||
966 | } | 955 | } |
967 | 956 | ||
968 | /** | 957 | /** |
@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap) | |||
1260 | { | 1249 | { |
1261 | dev_dbg(ap->dev, "in atapi port stop\n"); | 1250 | dev_dbg(ap->dev, "in atapi port stop\n"); |
1262 | if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { | 1251 | if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { |
1252 | dma_free_coherent(ap->dev, | ||
1253 | BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), | ||
1254 | ap->bmdma_prd, | ||
1255 | ap->bmdma_prd_dma); | ||
1256 | |||
1263 | free_dma(CH_ATAPI_RX); | 1257 | free_dma(CH_ATAPI_RX); |
1264 | free_dma(CH_ATAPI_TX); | 1258 | free_dma(CH_ATAPI_TX); |
1265 | } | 1259 | } |
@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap) | |||
1271 | if (!(ap->udma_mask || ap->mwdma_mask)) | 1265 | if (!(ap->udma_mask || ap->mwdma_mask)) |
1272 | return 0; | 1266 | return 0; |
1273 | 1267 | ||
1268 | ap->bmdma_prd = dma_alloc_coherent(ap->dev, | ||
1269 | BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), | ||
1270 | &ap->bmdma_prd_dma, | ||
1271 | GFP_KERNEL); | ||
1272 | |||
1273 | if (ap->bmdma_prd == NULL) { | ||
1274 | dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n"); | ||
1275 | goto out; | ||
1276 | } | ||
1277 | |||
1274 | if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { | 1278 | if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { |
1275 | if (request_dma(CH_ATAPI_TX, | 1279 | if (request_dma(CH_ATAPI_TX, |
1276 | "BFIN ATAPI TX DMA") >= 0) | 1280 | "BFIN ATAPI TX DMA") >= 0) |
1277 | return 0; | 1281 | return 0; |
1278 | 1282 | ||
1279 | free_dma(CH_ATAPI_RX); | 1283 | free_dma(CH_ATAPI_RX); |
1284 | dma_free_coherent(ap->dev, | ||
1285 | BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), | ||
1286 | ap->bmdma_prd, | ||
1287 | ap->bmdma_prd_dma); | ||
1280 | } | 1288 | } |
1281 | 1289 | ||
1290 | out: | ||
1282 | ap->udma_mask = 0; | 1291 | ap->udma_mask = 0; |
1283 | ap->mwdma_mask = 0; | 1292 | ap->mwdma_mask = 0; |
1284 | dev_err(ap->dev, "Unable to request ATAPI DMA!" | 1293 | dev_err(ap->dev, "Unable to request ATAPI DMA!" |
@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) | |||
1400 | 1409 | ||
1401 | static struct scsi_host_template bfin_sht = { | 1410 | static struct scsi_host_template bfin_sht = { |
1402 | ATA_BASE_SHT(DRV_NAME), | 1411 | ATA_BASE_SHT(DRV_NAME), |
1403 | .sg_tablesize = SG_NONE, | 1412 | .sg_tablesize = BFIN_MAX_SG_SEGMENTS, |
1404 | .dma_boundary = ATA_DMA_BOUNDARY, | 1413 | .dma_boundary = ATA_DMA_BOUNDARY, |
1405 | }; | 1414 | }; |
1406 | 1415 | ||