diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/ata/ata_piix.c | 7 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 2 | ||||
-rw-r--r-- | drivers/ata/libata-transport.c | 1 | ||||
-rw-r--r-- | drivers/ata/pata_bf54x.c | 167 | ||||
-rw-r--r-- | drivers/ata/sata_fsl.c | 11 |
5 files changed, 109 insertions, 79 deletions
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 69ac373c72ab..fdf27b9fce43 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -1117,6 +1117,13 @@ static int piix_broken_suspend(void) | |||
1117 | }, | 1117 | }, |
1118 | }, | 1118 | }, |
1119 | { | 1119 | { |
1120 | .ident = "Satellite Pro A120", | ||
1121 | .matches = { | ||
1122 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
1123 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"), | ||
1124 | }, | ||
1125 | }, | ||
1126 | { | ||
1120 | .ident = "Portege M500", | 1127 | .ident = "Portege M500", |
1121 | .matches = { | 1128 | .matches = { |
1122 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 1129 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 11c9aea4f4f7..c06e0ec11556 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4125 | * device and controller are SATA. | 4125 | * device and controller are SATA. |
4126 | */ | 4126 | */ |
4127 | { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, | 4127 | { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, |
4128 | { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, | ||
4129 | { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, | ||
4128 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, | 4130 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
4129 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4131 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
4130 | 4132 | ||
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 9a7f0ea565df..74aaee30e264 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c | |||
@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent, | |||
291 | goto tport_err; | 291 | goto tport_err; |
292 | } | 292 | } |
293 | 293 | ||
294 | device_enable_async_suspend(dev); | ||
294 | pm_runtime_set_active(dev); | 295 | pm_runtime_set_active(dev); |
295 | pm_runtime_enable(dev); | 296 | pm_runtime_enable(dev); |
296 | 297 | ||
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index d6a4677fdf71..1e65842e2ca7 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c | |||
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20; | |||
251 | static const u32 udma_tackmin = 20; | 251 | static const u32 udma_tackmin = 20; |
252 | static const u32 udma_tssmin = 50; | 252 | static const u32 udma_tssmin = 50; |
253 | 253 | ||
254 | #define BFIN_MAX_SG_SEGMENTS 4 | ||
255 | |||
254 | /** | 256 | /** |
255 | * | 257 | * |
256 | * Function: num_clocks_min | 258 | * Function: num_clocks_min |
@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl) | |||
829 | 831 | ||
830 | static void bfin_bmdma_setup(struct ata_queued_cmd *qc) | 832 | static void bfin_bmdma_setup(struct ata_queued_cmd *qc) |
831 | { | 833 | { |
832 | unsigned short config = WDSIZE_16; | 834 | struct ata_port *ap = qc->ap; |
835 | struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd; | ||
836 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
837 | unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN; | ||
833 | struct scatterlist *sg; | 838 | struct scatterlist *sg; |
834 | unsigned int si; | 839 | unsigned int si; |
840 | unsigned int channel; | ||
841 | unsigned int dir; | ||
842 | unsigned int size = 0; | ||
835 | 843 | ||
836 | dev_dbg(qc->ap->dev, "in atapi dma setup\n"); | 844 | dev_dbg(qc->ap->dev, "in atapi dma setup\n"); |
837 | /* Program the ATA_CTRL register with dir */ | 845 | /* Program the ATA_CTRL register with dir */ |
838 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 846 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
839 | /* fill the ATAPI DMA controller */ | 847 | channel = CH_ATAPI_TX; |
840 | set_dma_config(CH_ATAPI_TX, config); | 848 | dir = DMA_TO_DEVICE; |
841 | set_dma_x_modify(CH_ATAPI_TX, 2); | ||
842 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
843 | set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); | ||
844 | set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); | ||
845 | } | ||
846 | } else { | 849 | } else { |
850 | channel = CH_ATAPI_RX; | ||
851 | dir = DMA_FROM_DEVICE; | ||
847 | config |= WNR; | 852 | config |= WNR; |
848 | /* fill the ATAPI DMA controller */ | ||
849 | set_dma_config(CH_ATAPI_RX, config); | ||
850 | set_dma_x_modify(CH_ATAPI_RX, 2); | ||
851 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
852 | set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); | ||
853 | set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); | ||
854 | } | ||
855 | } | 853 | } |
856 | } | ||
857 | 854 | ||
858 | /** | 855 | dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir); |
859 | * bfin_bmdma_start - Start an IDE DMA transaction | ||
860 | * @qc: Info associated with this ATA transaction. | ||
861 | * | ||
862 | * Note: Original code is ata_bmdma_start(). | ||
863 | */ | ||
864 | 856 | ||
865 | static void bfin_bmdma_start(struct ata_queued_cmd *qc) | 857 | /* fill the ATAPI DMA controller */ |
866 | { | 858 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
867 | struct ata_port *ap = qc->ap; | 859 | dma_desc_cpu[si].start_addr = sg_dma_address(sg); |
868 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 860 | dma_desc_cpu[si].cfg = config; |
869 | struct scatterlist *sg; | 861 | dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1; |
870 | unsigned int si; | 862 | dma_desc_cpu[si].x_modify = 2; |
863 | size += sg_dma_len(sg); | ||
864 | } | ||
871 | 865 | ||
872 | dev_dbg(qc->ap->dev, "in atapi dma start\n"); | 866 | /* Set the last descriptor to stop mode */ |
873 | if (!(ap->udma_mask || ap->mwdma_mask)) | 867 | dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE); |
874 | return; | ||
875 | 868 | ||
876 | /* start ATAPI DMA controller*/ | 869 | flush_dcache_range((unsigned int)dma_desc_cpu, |
877 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 870 | (unsigned int)dma_desc_cpu + |
878 | /* | 871 | qc->n_elem * sizeof(struct dma_desc_array)); |
879 | * On blackfin arch, uncacheable memory is not | ||
880 | * allocated with flag GFP_DMA. DMA buffer from | ||
881 | * common kenel code should be flushed if WB | ||
882 | * data cache is enabled. Otherwise, this loop | ||
883 | * is an empty loop and optimized out. | ||
884 | */ | ||
885 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
886 | flush_dcache_range(sg_dma_address(sg), | ||
887 | sg_dma_address(sg) + sg_dma_len(sg)); | ||
888 | } | ||
889 | enable_dma(CH_ATAPI_TX); | ||
890 | dev_dbg(qc->ap->dev, "enable udma write\n"); | ||
891 | 872 | ||
892 | /* Send ATA DMA write command */ | 873 | /* Enable ATA DMA operation*/ |
893 | bfin_exec_command(ap, &qc->tf); | 874 | set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma); |
875 | set_dma_x_count(channel, 0); | ||
876 | set_dma_x_modify(channel, 0); | ||
877 | set_dma_config(channel, config); | ||
878 | |||
879 | SSYNC(); | ||
880 | |||
881 | /* Send ATA DMA command */ | ||
882 | bfin_exec_command(ap, &qc->tf); | ||
894 | 883 | ||
884 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
895 | /* set ATA DMA write direction */ | 885 | /* set ATA DMA write direction */ |
896 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | 886 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) |
897 | | XFER_DIR)); | 887 | | XFER_DIR)); |
898 | } else { | 888 | } else { |
899 | enable_dma(CH_ATAPI_RX); | ||
900 | dev_dbg(qc->ap->dev, "enable udma read\n"); | ||
901 | |||
902 | /* Send ATA DMA read command */ | ||
903 | bfin_exec_command(ap, &qc->tf); | ||
904 | |||
905 | /* set ATA DMA read direction */ | 889 | /* set ATA DMA read direction */ |
906 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | 890 | ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) |
907 | & ~XFER_DIR)); | 891 | & ~XFER_DIR)); |
@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
913 | /* Set ATAPI state machine contorl in terminate sequence */ | 897 | /* Set ATAPI state machine contorl in terminate sequence */ |
914 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); | 898 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); |
915 | 899 | ||
916 | /* Set transfer length to buffer len */ | 900 | /* Set transfer length to the total size of sg buffers */ |
917 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | 901 | ATAPI_SET_XFER_LEN(base, size >> 1); |
918 | ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); | 902 | } |
919 | } | ||
920 | 903 | ||
921 | /* Enable ATA DMA operation*/ | 904 | /** |
905 | * bfin_bmdma_start - Start an IDE DMA transaction | ||
906 | * @qc: Info associated with this ATA transaction. | ||
907 | * | ||
908 | * Note: Original code is ata_bmdma_start(). | ||
909 | */ | ||
910 | |||
911 | static void bfin_bmdma_start(struct ata_queued_cmd *qc) | ||
912 | { | ||
913 | struct ata_port *ap = qc->ap; | ||
914 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | ||
915 | |||
916 | dev_dbg(qc->ap->dev, "in atapi dma start\n"); | ||
917 | |||
918 | if (!(ap->udma_mask || ap->mwdma_mask)) | ||
919 | return; | ||
920 | |||
921 | /* start ATAPI transfer*/ | ||
922 | if (ap->udma_mask) | 922 | if (ap->udma_mask) |
923 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | 923 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) |
924 | | ULTRA_START); | 924 | | ULTRA_START); |
@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
935 | static void bfin_bmdma_stop(struct ata_queued_cmd *qc) | 935 | static void bfin_bmdma_stop(struct ata_queued_cmd *qc) |
936 | { | 936 | { |
937 | struct ata_port *ap = qc->ap; | 937 | struct ata_port *ap = qc->ap; |
938 | struct scatterlist *sg; | 938 | unsigned int dir; |
939 | unsigned int si; | ||
940 | 939 | ||
941 | dev_dbg(qc->ap->dev, "in atapi dma stop\n"); | 940 | dev_dbg(qc->ap->dev, "in atapi dma stop\n"); |
941 | |||
942 | if (!(ap->udma_mask || ap->mwdma_mask)) | 942 | if (!(ap->udma_mask || ap->mwdma_mask)) |
943 | return; | 943 | return; |
944 | 944 | ||
945 | /* stop ATAPI DMA controller*/ | 945 | /* stop ATAPI DMA controller*/ |
946 | if (qc->tf.flags & ATA_TFLAG_WRITE) | 946 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
947 | dir = DMA_TO_DEVICE; | ||
947 | disable_dma(CH_ATAPI_TX); | 948 | disable_dma(CH_ATAPI_TX); |
948 | else { | 949 | } else { |
950 | dir = DMA_FROM_DEVICE; | ||
949 | disable_dma(CH_ATAPI_RX); | 951 | disable_dma(CH_ATAPI_RX); |
950 | if (ap->hsm_task_state & HSM_ST_LAST) { | ||
951 | /* | ||
952 | * On blackfin arch, uncacheable memory is not | ||
953 | * allocated with flag GFP_DMA. DMA buffer from | ||
954 | * common kenel code should be invalidated if | ||
955 | * data cache is enabled. Otherwise, this loop | ||
956 | * is an empty loop and optimized out. | ||
957 | */ | ||
958 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
959 | invalidate_dcache_range( | ||
960 | sg_dma_address(sg), | ||
961 | sg_dma_address(sg) | ||
962 | + sg_dma_len(sg)); | ||
963 | } | ||
964 | } | ||
965 | } | 952 | } |
953 | |||
954 | dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir); | ||
966 | } | 955 | } |
967 | 956 | ||
968 | /** | 957 | /** |
@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap) | |||
1260 | { | 1249 | { |
1261 | dev_dbg(ap->dev, "in atapi port stop\n"); | 1250 | dev_dbg(ap->dev, "in atapi port stop\n"); |
1262 | if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { | 1251 | if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { |
1252 | dma_free_coherent(ap->dev, | ||
1253 | BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), | ||
1254 | ap->bmdma_prd, | ||
1255 | ap->bmdma_prd_dma); | ||
1256 | |||
1263 | free_dma(CH_ATAPI_RX); | 1257 | free_dma(CH_ATAPI_RX); |
1264 | free_dma(CH_ATAPI_TX); | 1258 | free_dma(CH_ATAPI_TX); |
1265 | } | 1259 | } |
@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap) | |||
1271 | if (!(ap->udma_mask || ap->mwdma_mask)) | 1265 | if (!(ap->udma_mask || ap->mwdma_mask)) |
1272 | return 0; | 1266 | return 0; |
1273 | 1267 | ||
1268 | ap->bmdma_prd = dma_alloc_coherent(ap->dev, | ||
1269 | BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), | ||
1270 | &ap->bmdma_prd_dma, | ||
1271 | GFP_KERNEL); | ||
1272 | |||
1273 | if (ap->bmdma_prd == NULL) { | ||
1274 | dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n"); | ||
1275 | goto out; | ||
1276 | } | ||
1277 | |||
1274 | if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { | 1278 | if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { |
1275 | if (request_dma(CH_ATAPI_TX, | 1279 | if (request_dma(CH_ATAPI_TX, |
1276 | "BFIN ATAPI TX DMA") >= 0) | 1280 | "BFIN ATAPI TX DMA") >= 0) |
1277 | return 0; | 1281 | return 0; |
1278 | 1282 | ||
1279 | free_dma(CH_ATAPI_RX); | 1283 | free_dma(CH_ATAPI_RX); |
1284 | dma_free_coherent(ap->dev, | ||
1285 | BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), | ||
1286 | ap->bmdma_prd, | ||
1287 | ap->bmdma_prd_dma); | ||
1280 | } | 1288 | } |
1281 | 1289 | ||
1290 | out: | ||
1282 | ap->udma_mask = 0; | 1291 | ap->udma_mask = 0; |
1283 | ap->mwdma_mask = 0; | 1292 | ap->mwdma_mask = 0; |
1284 | dev_err(ap->dev, "Unable to request ATAPI DMA!" | 1293 | dev_err(ap->dev, "Unable to request ATAPI DMA!" |
@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) | |||
1400 | 1409 | ||
1401 | static struct scsi_host_template bfin_sht = { | 1410 | static struct scsi_host_template bfin_sht = { |
1402 | ATA_BASE_SHT(DRV_NAME), | 1411 | ATA_BASE_SHT(DRV_NAME), |
1403 | .sg_tablesize = SG_NONE, | 1412 | .sg_tablesize = BFIN_MAX_SG_SEGMENTS, |
1404 | .dma_boundary = ATA_DMA_BOUNDARY, | 1413 | .dma_boundary = ATA_DMA_BOUNDARY, |
1405 | }; | 1414 | }; |
1406 | 1415 | ||
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 5a2c95ba050a..0120b0d1e9a5 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -140,6 +140,7 @@ enum { | |||
140 | */ | 140 | */ |
141 | HCONTROL_ONLINE_PHY_RST = (1 << 31), | 141 | HCONTROL_ONLINE_PHY_RST = (1 << 31), |
142 | HCONTROL_FORCE_OFFLINE = (1 << 30), | 142 | HCONTROL_FORCE_OFFLINE = (1 << 30), |
143 | HCONTROL_LEGACY = (1 << 28), | ||
143 | HCONTROL_PARITY_PROT_MOD = (1 << 14), | 144 | HCONTROL_PARITY_PROT_MOD = (1 << 14), |
144 | HCONTROL_DPATH_PARITY = (1 << 12), | 145 | HCONTROL_DPATH_PARITY = (1 << 12), |
145 | HCONTROL_SNOOP_ENABLE = (1 << 10), | 146 | HCONTROL_SNOOP_ENABLE = (1 << 10), |
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host) | |||
1223 | * part of the port_start() callback | 1224 | * part of the port_start() callback |
1224 | */ | 1225 | */ |
1225 | 1226 | ||
1227 | /* sata controller to operate in enterprise mode */ | ||
1228 | temp = ioread32(hcr_base + HCONTROL); | ||
1229 | iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL); | ||
1230 | |||
1226 | /* ack. any pending IRQs for this controller/port */ | 1231 | /* ack. any pending IRQs for this controller/port */ |
1227 | temp = ioread32(hcr_base + HSTATUS); | 1232 | temp = ioread32(hcr_base + HSTATUS); |
1228 | if (temp & 0x3F) | 1233 | if (temp & 0x3F) |
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op) | |||
1421 | /* Recovery the CHBA register in host controller cmd register set */ | 1426 | /* Recovery the CHBA register in host controller cmd register set */ |
1422 | iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); | 1427 | iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); |
1423 | 1428 | ||
1429 | iowrite32((ioread32(hcr_base + HCONTROL) | ||
1430 | | HCONTROL_ONLINE_PHY_RST | ||
1431 | | HCONTROL_SNOOP_ENABLE | ||
1432 | | HCONTROL_PMP_ATTACHED), | ||
1433 | hcr_base + HCONTROL); | ||
1434 | |||
1424 | ata_host_resume(host); | 1435 | ata_host_resume(host); |
1425 | return 0; | 1436 | return 0; |
1426 | } | 1437 | } |