diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 19:42:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 19:42:16 -0400 |
commit | a1480a166dd509f25f90e824411cb488fa9fff7e (patch) | |
tree | f90633d00164827e002e79b8d26c130c0db25f33 /drivers/ata | |
parent | 45141eeafefdb8998d2ab1f87c2afe0457059b47 (diff) | |
parent | c54c719b558e0eb3ba60b1390aeb47ed25ff4352 (diff) |
Merge branch 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata
Pull libata updates from Tejun Heo:
- Hannes's patchset implements support for better error reporting
introduced by the new ATA command spec.
- the deperecated pci_ dma API usages have been replaced by dma_ ones.
- a bunch of hardware specific updates and some cleanups.
* 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
ata: remove deprecated use of pci api
ahci: st: st_configure_oob must be called after IP is clocked.
ahci: st: Update the ahci_st DT documentation
ahci: st: Update the DT example for how to obtain the PHY.
sata_dwc_460ex: indent an if statement
libata: Add tracepoints
libata-eh: Set 'information' field for autosense
libata: Implement support for sense data reporting
libata: Implement NCQ autosense
libata: use status bit definitions in ata_dump_status()
ide,ata: Rename ATA_IDX to ATA_SENSE
libata: whitespace fixes in ata_to_sense_error()
libata: whitespace cleanup in ata_get_cmd_descript()
libata: use READ_LOG_DMA_EXT
libata: remove ATA_FLAG_LOWTAG
sata_dwc_460ex: re-use hsdev->dev instead of dwc_dev
sata_dwc_460ex: move to generic DMA driver
sata_dwc_460ex: join messages back
sata: xgene: add ACPI support for APM X-Gene SATA ports
ata: sata_mv: add proper definitions for LP_PHY_CTL register values
Diffstat (limited to 'drivers/ata')
31 files changed, 673 insertions, 815 deletions
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index ae41107afc1f..b67e995179a9 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -111,7 +111,8 @@ obj-$(CONFIG_ATA_GENERIC) += ata_generic.o | |||
111 | # Should be last libata driver | 111 | # Should be last libata driver |
112 | obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o | 112 | obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o |
113 | 113 | ||
114 | libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o | 114 | libata-y := libata-core.o libata-scsi.o libata-eh.o \ |
115 | libata-transport.o libata-trace.o | ||
115 | libata-$(CONFIG_ATA_SFF) += libata-sff.o | 116 | libata-$(CONFIG_ATA_SFF) += libata-sff.o |
116 | libata-$(CONFIG_SATA_PMP) += libata-pmp.o | 117 | libata-$(CONFIG_SATA_PMP) += libata-pmp.o |
117 | libata-$(CONFIG_ATA_ACPI) += libata-acpi.o | 118 | libata-$(CONFIG_ATA_ACPI) += libata-acpi.o |
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index c962886d7e71..12489ce863c4 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c | |||
@@ -181,10 +181,10 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) | |||
181 | int rc; | 181 | int rc; |
182 | 182 | ||
183 | if (using_dac && | 183 | if (using_dac && |
184 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 184 | !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
185 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 185 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
186 | if (rc) { | 186 | if (rc) { |
187 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 187 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
188 | if (rc) { | 188 | if (rc) { |
189 | dev_err(&pdev->dev, | 189 | dev_err(&pdev->dev, |
190 | "64-bit DMA enable failed\n"); | 190 | "64-bit DMA enable failed\n"); |
@@ -192,12 +192,12 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) | |||
192 | } | 192 | } |
193 | } | 193 | } |
194 | } else { | 194 | } else { |
195 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 195 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
196 | if (rc) { | 196 | if (rc) { |
197 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 197 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
198 | return rc; | 198 | return rc; |
199 | } | 199 | } |
200 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 200 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
201 | if (rc) { | 201 | if (rc) { |
202 | dev_err(&pdev->dev, | 202 | dev_err(&pdev->dev, |
203 | "32-bit consistent DMA enable failed\n"); | 203 | "32-bit consistent DMA enable failed\n"); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 33bb06e006c9..c7a92a743ed0 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -738,10 +738,10 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) | |||
738 | return 0; | 738 | return 0; |
739 | 739 | ||
740 | if (using_dac && | 740 | if (using_dac && |
741 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 741 | !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
742 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 742 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
743 | if (rc) { | 743 | if (rc) { |
744 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 744 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
745 | if (rc) { | 745 | if (rc) { |
746 | dev_err(&pdev->dev, | 746 | dev_err(&pdev->dev, |
747 | "64-bit DMA enable failed\n"); | 747 | "64-bit DMA enable failed\n"); |
@@ -749,12 +749,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) | |||
749 | } | 749 | } |
750 | } | 750 | } |
751 | } else { | 751 | } else { |
752 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 752 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
753 | if (rc) { | 753 | if (rc) { |
754 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 754 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
755 | return rc; | 755 | return rc; |
756 | } | 756 | } |
757 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 757 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
758 | if (rc) { | 758 | if (rc) { |
759 | dev_err(&pdev->dev, | 759 | dev_err(&pdev->dev, |
760 | "32-bit consistent DMA enable failed\n"); | 760 | "32-bit consistent DMA enable failed\n"); |
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index bc971af262e7..ea0ff005b86c 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c | |||
@@ -68,8 +68,6 @@ static int st_ahci_deassert_resets(struct device *dev) | |||
68 | } | 68 | } |
69 | } | 69 | } |
70 | 70 | ||
71 | st_ahci_configure_oob(drv_data->hpriv->mmio); | ||
72 | |||
73 | if (drv_data->sw_rst) { | 71 | if (drv_data->sw_rst) { |
74 | err = reset_control_deassert(drv_data->sw_rst); | 72 | err = reset_control_deassert(drv_data->sw_rst); |
75 | if (err) { | 73 | if (err) { |
@@ -172,6 +170,8 @@ static int st_ahci_probe(struct platform_device *pdev) | |||
172 | if (err) | 170 | if (err) |
173 | return err; | 171 | return err; |
174 | 172 | ||
173 | st_ahci_configure_oob(drv_data->hpriv->mmio); | ||
174 | |||
175 | err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, | 175 | err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, |
176 | &ahci_platform_sht); | 176 | &ahci_platform_sht); |
177 | if (err) { | 177 | if (err) { |
@@ -222,6 +222,8 @@ static int st_ahci_resume(struct device *dev) | |||
222 | return err; | 222 | return err; |
223 | } | 223 | } |
224 | 224 | ||
225 | st_ahci_configure_oob(drv_data->hpriv->mmio); | ||
226 | |||
225 | return ahci_platform_resume_host(dev); | 227 | return ahci_platform_resume_host(dev); |
226 | } | 228 | } |
227 | #endif | 229 | #endif |
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 2e8bb603e447..2b78510d94dd 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * NOTE: PM support is not currently available. | 22 | * NOTE: PM support is not currently available. |
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | #include <linux/acpi.h> | ||
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/ahci_platform.h> | 28 | #include <linux/ahci_platform.h> |
@@ -718,6 +719,14 @@ disable_resources: | |||
718 | return rc; | 719 | return rc; |
719 | } | 720 | } |
720 | 721 | ||
722 | #ifdef CONFIG_ACPI | ||
723 | static const struct acpi_device_id xgene_ahci_acpi_match[] = { | ||
724 | { "APMC0D0D", }, | ||
725 | { } | ||
726 | }; | ||
727 | MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match); | ||
728 | #endif | ||
729 | |||
721 | static const struct of_device_id xgene_ahci_of_match[] = { | 730 | static const struct of_device_id xgene_ahci_of_match[] = { |
722 | {.compatible = "apm,xgene-ahci"}, | 731 | {.compatible = "apm,xgene-ahci"}, |
723 | {}, | 732 | {}, |
@@ -730,6 +739,7 @@ static struct platform_driver xgene_ahci_driver = { | |||
730 | .driver = { | 739 | .driver = { |
731 | .name = DRV_NAME, | 740 | .name = DRV_NAME, |
732 | .of_match_table = xgene_ahci_of_match, | 741 | .of_match_table = xgene_ahci_of_match, |
742 | .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match), | ||
733 | }, | 743 | }, |
734 | }; | 744 | }; |
735 | 745 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 23dac3babfe3..f6cb1f1b30b7 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -70,6 +70,9 @@ | |||
70 | #include <linux/pm_runtime.h> | 70 | #include <linux/pm_runtime.h> |
71 | #include <linux/platform_device.h> | 71 | #include <linux/platform_device.h> |
72 | 72 | ||
73 | #define CREATE_TRACE_POINTS | ||
74 | #include <trace/events/libata.h> | ||
75 | |||
73 | #include "libata.h" | 76 | #include "libata.h" |
74 | #include "libata-transport.h" | 77 | #include "libata-transport.h" |
75 | 78 | ||
@@ -691,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) | |||
691 | * RETURNS: | 694 | * RETURNS: |
692 | * Block address read from @tf. | 695 | * Block address read from @tf. |
693 | */ | 696 | */ |
694 | u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) | 697 | u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) |
695 | { | 698 | { |
696 | u64 block = 0; | 699 | u64 block = 0; |
697 | 700 | ||
698 | if (tf->flags & ATA_TFLAG_LBA) { | 701 | if (!dev || tf->flags & ATA_TFLAG_LBA) { |
699 | if (tf->flags & ATA_TFLAG_LBA48) { | 702 | if (tf->flags & ATA_TFLAG_LBA48) { |
700 | block |= (u64)tf->hob_lbah << 40; | 703 | block |= (u64)tf->hob_lbah << 40; |
701 | block |= (u64)tf->hob_lbam << 32; | 704 | block |= (u64)tf->hob_lbam << 32; |
@@ -2144,6 +2147,24 @@ static int ata_dev_config_ncq(struct ata_device *dev, | |||
2144 | return 0; | 2147 | return 0; |
2145 | } | 2148 | } |
2146 | 2149 | ||
2150 | static void ata_dev_config_sense_reporting(struct ata_device *dev) | ||
2151 | { | ||
2152 | unsigned int err_mask; | ||
2153 | |||
2154 | if (!ata_id_has_sense_reporting(dev->id)) | ||
2155 | return; | ||
2156 | |||
2157 | if (ata_id_sense_reporting_enabled(dev->id)) | ||
2158 | return; | ||
2159 | |||
2160 | err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); | ||
2161 | if (err_mask) { | ||
2162 | ata_dev_dbg(dev, | ||
2163 | "failed to enable Sense Data Reporting, Emask 0x%x\n", | ||
2164 | err_mask); | ||
2165 | } | ||
2166 | } | ||
2167 | |||
2147 | /** | 2168 | /** |
2148 | * ata_dev_configure - Configure the specified ATA/ATAPI device | 2169 | * ata_dev_configure - Configure the specified ATA/ATAPI device |
2149 | * @dev: Target device to configure | 2170 | * @dev: Target device to configure |
@@ -2366,7 +2387,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2366 | dev->devslp_timing[i] = sata_setting[j]; | 2387 | dev->devslp_timing[i] = sata_setting[j]; |
2367 | } | 2388 | } |
2368 | } | 2389 | } |
2369 | 2390 | ata_dev_config_sense_reporting(dev); | |
2370 | dev->cdb_len = 16; | 2391 | dev->cdb_len = 16; |
2371 | } | 2392 | } |
2372 | 2393 | ||
@@ -4897,6 +4918,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4897 | */ | 4918 | */ |
4898 | if (unlikely(ata_tag_internal(qc->tag))) { | 4919 | if (unlikely(ata_tag_internal(qc->tag))) { |
4899 | fill_result_tf(qc); | 4920 | fill_result_tf(qc); |
4921 | trace_ata_qc_complete_internal(qc); | ||
4900 | __ata_qc_complete(qc); | 4922 | __ata_qc_complete(qc); |
4901 | return; | 4923 | return; |
4902 | } | 4924 | } |
@@ -4907,6 +4929,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4907 | */ | 4929 | */ |
4908 | if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { | 4930 | if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { |
4909 | fill_result_tf(qc); | 4931 | fill_result_tf(qc); |
4932 | trace_ata_qc_complete_failed(qc); | ||
4910 | ata_qc_schedule_eh(qc); | 4933 | ata_qc_schedule_eh(qc); |
4911 | return; | 4934 | return; |
4912 | } | 4935 | } |
@@ -4917,6 +4940,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4917 | if (qc->flags & ATA_QCFLAG_RESULT_TF) | 4940 | if (qc->flags & ATA_QCFLAG_RESULT_TF) |
4918 | fill_result_tf(qc); | 4941 | fill_result_tf(qc); |
4919 | 4942 | ||
4943 | trace_ata_qc_complete_done(qc); | ||
4920 | /* Some commands need post-processing after successful | 4944 | /* Some commands need post-processing after successful |
4921 | * completion. | 4945 | * completion. |
4922 | */ | 4946 | */ |
@@ -5064,7 +5088,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5064 | } | 5088 | } |
5065 | 5089 | ||
5066 | ap->ops->qc_prep(qc); | 5090 | ap->ops->qc_prep(qc); |
5067 | 5091 | trace_ata_qc_issue(qc); | |
5068 | qc->err_mask |= ap->ops->qc_issue(qc); | 5092 | qc->err_mask |= ap->ops->qc_issue(qc); |
5069 | if (unlikely(qc->err_mask)) | 5093 | if (unlikely(qc->err_mask)) |
5070 | goto err; | 5094 | goto err; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index d2029a462e2c..07f41be38fbe 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -46,6 +46,7 @@ | |||
46 | 46 | ||
47 | #include <linux/libata.h> | 47 | #include <linux/libata.h> |
48 | 48 | ||
49 | #include <trace/events/libata.h> | ||
49 | #include "libata.h" | 50 | #include "libata.h" |
50 | 51 | ||
51 | enum { | 52 | enum { |
@@ -1510,13 +1511,18 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, | |||
1510 | DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); | 1511 | DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); |
1511 | 1512 | ||
1512 | ata_tf_init(dev, &tf); | 1513 | ata_tf_init(dev, &tf); |
1513 | tf.command = ATA_CMD_READ_LOG_EXT; | 1514 | if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) { |
1515 | tf.command = ATA_CMD_READ_LOG_DMA_EXT; | ||
1516 | tf.protocol = ATA_PROT_DMA; | ||
1517 | } else { | ||
1518 | tf.command = ATA_CMD_READ_LOG_EXT; | ||
1519 | tf.protocol = ATA_PROT_PIO; | ||
1520 | } | ||
1514 | tf.lbal = log; | 1521 | tf.lbal = log; |
1515 | tf.lbam = page; | 1522 | tf.lbam = page; |
1516 | tf.nsect = sectors; | 1523 | tf.nsect = sectors; |
1517 | tf.hob_nsect = sectors >> 8; | 1524 | tf.hob_nsect = sectors >> 8; |
1518 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; | 1525 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; |
1519 | tf.protocol = ATA_PROT_PIO; | ||
1520 | 1526 | ||
1521 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, | 1527 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, |
1522 | buf, sectors * ATA_SECT_SIZE, 0); | 1528 | buf, sectors * ATA_SECT_SIZE, 0); |
@@ -1575,6 +1581,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, | |||
1575 | tf->hob_lbah = buf[10]; | 1581 | tf->hob_lbah = buf[10]; |
1576 | tf->nsect = buf[12]; | 1582 | tf->nsect = buf[12]; |
1577 | tf->hob_nsect = buf[13]; | 1583 | tf->hob_nsect = buf[13]; |
1584 | if (ata_id_has_ncq_autosense(dev->id)) | ||
1585 | tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; | ||
1578 | 1586 | ||
1579 | return 0; | 1587 | return 0; |
1580 | } | 1588 | } |
@@ -1611,6 +1619,70 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) | |||
1611 | } | 1619 | } |
1612 | 1620 | ||
1613 | /** | 1621 | /** |
1622 | * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT | ||
1623 | * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to | ||
1624 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) | ||
1625 | * @dfl_sense_key: default sense key to use | ||
1626 | * | ||
1627 | * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK | ||
1628 | * SENSE. This function is EH helper. | ||
1629 | * | ||
1630 | * LOCKING: | ||
1631 | * Kernel thread context (may sleep). | ||
1632 | * | ||
1633 | * RETURNS: | ||
1634 | * encoded sense data on success, 0 on failure or if sense data | ||
1635 | * is not available. | ||
1636 | */ | ||
1637 | static u32 ata_eh_request_sense(struct ata_queued_cmd *qc, | ||
1638 | struct scsi_cmnd *cmd) | ||
1639 | { | ||
1640 | struct ata_device *dev = qc->dev; | ||
1641 | struct ata_taskfile tf; | ||
1642 | unsigned int err_mask; | ||
1643 | |||
1644 | if (!cmd) | ||
1645 | return 0; | ||
1646 | |||
1647 | DPRINTK("ATA request sense\n"); | ||
1648 | ata_dev_warn(dev, "request sense\n"); | ||
1649 | if (!ata_id_sense_reporting_enabled(dev->id)) { | ||
1650 | ata_dev_warn(qc->dev, "sense data reporting disabled\n"); | ||
1651 | return 0; | ||
1652 | } | ||
1653 | ata_tf_init(dev, &tf); | ||
1654 | |||
1655 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
1656 | tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; | ||
1657 | tf.command = ATA_CMD_REQ_SENSE_DATA; | ||
1658 | tf.protocol = ATA_PROT_NODATA; | ||
1659 | |||
1660 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); | ||
1661 | /* | ||
1662 | * ACS-4 states: | ||
1663 | * The device may set the SENSE DATA AVAILABLE bit to one in the | ||
1664 | * STATUS field and clear the ERROR bit to zero in the STATUS field | ||
1665 | * to indicate that the command returned completion without an error | ||
1666 | * and the sense data described in table 306 is available. | ||
1667 | * | ||
1668 | * IOW the 'ATA_SENSE' bit might not be set even though valid | ||
1669 | * sense data is available. | ||
1670 | * So check for both. | ||
1671 | */ | ||
1672 | if ((tf.command & ATA_SENSE) || | ||
1673 | tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) { | ||
1674 | ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal); | ||
1675 | qc->flags |= ATA_QCFLAG_SENSE_VALID; | ||
1676 | ata_dev_warn(dev, "sense data %02x/%02x/%02x\n", | ||
1677 | tf.lbah, tf.lbam, tf.lbal); | ||
1678 | } else { | ||
1679 | ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", | ||
1680 | tf.command, err_mask); | ||
1681 | } | ||
1682 | return err_mask; | ||
1683 | } | ||
1684 | |||
1685 | /** | ||
1614 | * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE | 1686 | * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE |
1615 | * @dev: device to perform REQUEST_SENSE to | 1687 | * @dev: device to perform REQUEST_SENSE to |
1616 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) | 1688 | * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) |
@@ -1772,6 +1844,19 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) | |||
1772 | memcpy(&qc->result_tf, &tf, sizeof(tf)); | 1844 | memcpy(&qc->result_tf, &tf, sizeof(tf)); |
1773 | qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; | 1845 | qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; |
1774 | qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; | 1846 | qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; |
1847 | if (qc->result_tf.auxiliary) { | ||
1848 | char sense_key, asc, ascq; | ||
1849 | |||
1850 | sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; | ||
1851 | asc = (qc->result_tf.auxiliary >> 8) & 0xff; | ||
1852 | ascq = qc->result_tf.auxiliary & 0xff; | ||
1853 | ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n", | ||
1854 | sense_key, asc, ascq); | ||
1855 | ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq); | ||
1856 | ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf); | ||
1857 | qc->flags |= ATA_QCFLAG_SENSE_VALID; | ||
1858 | } | ||
1859 | |||
1775 | ehc->i.err_mask &= ~AC_ERR_DEV; | 1860 | ehc->i.err_mask &= ~AC_ERR_DEV; |
1776 | } | 1861 | } |
1777 | 1862 | ||
@@ -1801,6 +1886,27 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, | |||
1801 | return ATA_EH_RESET; | 1886 | return ATA_EH_RESET; |
1802 | } | 1887 | } |
1803 | 1888 | ||
1889 | /* | ||
1890 | * Sense data reporting does not work if the | ||
1891 | * device fault bit is set. | ||
1892 | */ | ||
1893 | if ((stat & ATA_SENSE) && !(stat & ATA_DF) && | ||
1894 | !(qc->flags & ATA_QCFLAG_SENSE_VALID)) { | ||
1895 | if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { | ||
1896 | tmp = ata_eh_request_sense(qc, qc->scsicmd); | ||
1897 | if (tmp) | ||
1898 | qc->err_mask |= tmp; | ||
1899 | else | ||
1900 | ata_scsi_set_sense_information(qc->scsicmd, tf); | ||
1901 | } else { | ||
1902 | ata_dev_warn(qc->dev, "sense data available but port frozen\n"); | ||
1903 | } | ||
1904 | } | ||
1905 | |||
1906 | /* Set by NCQ autosense or request sense above */ | ||
1907 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) | ||
1908 | return 0; | ||
1909 | |||
1804 | if (stat & (ATA_ERR | ATA_DF)) | 1910 | if (stat & (ATA_ERR | ATA_DF)) |
1805 | qc->err_mask |= AC_ERR_DEV; | 1911 | qc->err_mask |= AC_ERR_DEV; |
1806 | else | 1912 | else |
@@ -2186,6 +2292,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
2186 | all_err_mask |= qc->err_mask; | 2292 | all_err_mask |= qc->err_mask; |
2187 | if (qc->flags & ATA_QCFLAG_IO) | 2293 | if (qc->flags & ATA_QCFLAG_IO) |
2188 | eflags |= ATA_EFLAG_IS_IO; | 2294 | eflags |= ATA_EFLAG_IS_IO; |
2295 | trace_ata_eh_link_autopsy_qc(qc); | ||
2189 | } | 2296 | } |
2190 | 2297 | ||
2191 | /* enforce default EH actions */ | 2298 | /* enforce default EH actions */ |
@@ -2220,7 +2327,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
2220 | eflags |= ATA_EFLAG_DUBIOUS_XFER; | 2327 | eflags |= ATA_EFLAG_DUBIOUS_XFER; |
2221 | ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); | 2328 | ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); |
2222 | } | 2329 | } |
2223 | 2330 | trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); | |
2224 | DPRINTK("EXIT\n"); | 2331 | DPRINTK("EXIT\n"); |
2225 | } | 2332 | } |
2226 | 2333 | ||
@@ -2289,27 +2396,27 @@ const char *ata_get_cmd_descript(u8 command) | |||
2289 | const char *text; | 2396 | const char *text; |
2290 | } cmd_descr[] = { | 2397 | } cmd_descr[] = { |
2291 | { ATA_CMD_DEV_RESET, "DEVICE RESET" }, | 2398 | { ATA_CMD_DEV_RESET, "DEVICE RESET" }, |
2292 | { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, | 2399 | { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, |
2293 | { ATA_CMD_STANDBY, "STANDBY" }, | 2400 | { ATA_CMD_STANDBY, "STANDBY" }, |
2294 | { ATA_CMD_IDLE, "IDLE" }, | 2401 | { ATA_CMD_IDLE, "IDLE" }, |
2295 | { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, | 2402 | { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, |
2296 | { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, | 2403 | { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, |
2297 | { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, | 2404 | { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, |
2298 | { ATA_CMD_NOP, "NOP" }, | 2405 | { ATA_CMD_NOP, "NOP" }, |
2299 | { ATA_CMD_FLUSH, "FLUSH CACHE" }, | 2406 | { ATA_CMD_FLUSH, "FLUSH CACHE" }, |
2300 | { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, | 2407 | { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, |
2301 | { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, | 2408 | { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, |
2302 | { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, | 2409 | { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, |
2303 | { ATA_CMD_SERVICE, "SERVICE" }, | 2410 | { ATA_CMD_SERVICE, "SERVICE" }, |
2304 | { ATA_CMD_READ, "READ DMA" }, | 2411 | { ATA_CMD_READ, "READ DMA" }, |
2305 | { ATA_CMD_READ_EXT, "READ DMA EXT" }, | 2412 | { ATA_CMD_READ_EXT, "READ DMA EXT" }, |
2306 | { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, | 2413 | { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, |
2307 | { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, | 2414 | { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, |
2308 | { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, | 2415 | { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, |
2309 | { ATA_CMD_WRITE, "WRITE DMA" }, | 2416 | { ATA_CMD_WRITE, "WRITE DMA" }, |
2310 | { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, | 2417 | { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, |
2311 | { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, | 2418 | { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, |
2312 | { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, | 2419 | { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, |
2313 | { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, | 2420 | { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, |
2314 | { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, | 2421 | { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, |
2315 | { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, | 2422 | { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, |
@@ -2325,7 +2432,7 @@ const char *ata_get_cmd_descript(u8 command) | |||
2325 | { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, | 2432 | { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, |
2326 | { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, | 2433 | { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, |
2327 | { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, | 2434 | { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, |
2328 | { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, | 2435 | { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, |
2329 | { ATA_CMD_SET_FEATURES, "SET FEATURES" }, | 2436 | { ATA_CMD_SET_FEATURES, "SET FEATURES" }, |
2330 | { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, | 2437 | { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, |
2331 | { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, | 2438 | { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, |
@@ -2342,12 +2449,12 @@ const char *ata_get_cmd_descript(u8 command) | |||
2342 | { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, | 2449 | { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, |
2343 | { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, | 2450 | { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, |
2344 | { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, | 2451 | { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, |
2345 | { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, | 2452 | { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, |
2346 | { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, | 2453 | { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, |
2347 | { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, | 2454 | { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, |
2348 | { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, | 2455 | { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, |
2349 | { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, | 2456 | { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, |
2350 | { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, | 2457 | { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, |
2351 | { ATA_CMD_PMP_READ, "READ BUFFER" }, | 2458 | { ATA_CMD_PMP_READ, "READ BUFFER" }, |
2352 | { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, | 2459 | { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, |
2353 | { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, | 2460 | { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, |
@@ -2364,12 +2471,12 @@ const char *ata_get_cmd_descript(u8 command) | |||
2364 | { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, | 2471 | { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, |
2365 | { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, | 2472 | { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, |
2366 | { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, | 2473 | { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, |
2367 | { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, | 2474 | { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, |
2368 | { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, | 2475 | { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, |
2369 | { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, | 2476 | { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, |
2370 | { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, | 2477 | { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, |
2371 | { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, | 2478 | { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, |
2372 | { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, | 2479 | { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, |
2373 | { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, | 2480 | { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, |
2374 | { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, | 2481 | { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, |
2375 | { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, | 2482 | { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, |
@@ -2543,14 +2650,15 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2543 | 2650 | ||
2544 | #ifdef CONFIG_ATA_VERBOSE_ERROR | 2651 | #ifdef CONFIG_ATA_VERBOSE_ERROR |
2545 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | | 2652 | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | |
2546 | ATA_ERR)) { | 2653 | ATA_SENSE | ATA_ERR)) { |
2547 | if (res->command & ATA_BUSY) | 2654 | if (res->command & ATA_BUSY) |
2548 | ata_dev_err(qc->dev, "status: { Busy }\n"); | 2655 | ata_dev_err(qc->dev, "status: { Busy }\n"); |
2549 | else | 2656 | else |
2550 | ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", | 2657 | ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", |
2551 | res->command & ATA_DRDY ? "DRDY " : "", | 2658 | res->command & ATA_DRDY ? "DRDY " : "", |
2552 | res->command & ATA_DF ? "DF " : "", | 2659 | res->command & ATA_DF ? "DF " : "", |
2553 | res->command & ATA_DRQ ? "DRQ " : "", | 2660 | res->command & ATA_DRQ ? "DRQ " : "", |
2661 | res->command & ATA_SENSE ? "SENSE " : "", | ||
2554 | res->command & ATA_ERR ? "ERR " : ""); | 2662 | res->command & ATA_ERR ? "ERR " : ""); |
2555 | } | 2663 | } |
2556 | 2664 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index b061ba2c31d8..3131adcc1f87 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -270,13 +270,28 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, | |||
270 | ata_scsi_park_show, ata_scsi_park_store); | 270 | ata_scsi_park_show, ata_scsi_park_store); |
271 | EXPORT_SYMBOL_GPL(dev_attr_unload_heads); | 271 | EXPORT_SYMBOL_GPL(dev_attr_unload_heads); |
272 | 272 | ||
273 | static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) | 273 | void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) |
274 | { | 274 | { |
275 | if (!cmd) | ||
276 | return; | ||
277 | |||
275 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | 278 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; |
276 | 279 | ||
277 | scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); | 280 | scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); |
278 | } | 281 | } |
279 | 282 | ||
283 | void ata_scsi_set_sense_information(struct scsi_cmnd *cmd, | ||
284 | const struct ata_taskfile *tf) | ||
285 | { | ||
286 | u64 information; | ||
287 | |||
288 | if (!cmd) | ||
289 | return; | ||
290 | |||
291 | information = ata_tf_read_block(tf, NULL); | ||
292 | scsi_set_sense_information(cmd->sense_buffer, information); | ||
293 | } | ||
294 | |||
280 | static ssize_t | 295 | static ssize_t |
281 | ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, | 296 | ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, |
282 | const char *buf, size_t count) | 297 | const char *buf, size_t count) |
@@ -799,26 +814,27 @@ static void ata_dump_status(unsigned id, struct ata_taskfile *tf) | |||
799 | if (stat & ATA_BUSY) { | 814 | if (stat & ATA_BUSY) { |
800 | printk("Busy }\n"); /* Data is not valid in this case */ | 815 | printk("Busy }\n"); /* Data is not valid in this case */ |
801 | } else { | 816 | } else { |
802 | if (stat & 0x40) printk("DriveReady "); | 817 | if (stat & ATA_DRDY) printk("DriveReady "); |
803 | if (stat & 0x20) printk("DeviceFault "); | 818 | if (stat & ATA_DF) printk("DeviceFault "); |
804 | if (stat & 0x10) printk("SeekComplete "); | 819 | if (stat & ATA_DSC) printk("SeekComplete "); |
805 | if (stat & 0x08) printk("DataRequest "); | 820 | if (stat & ATA_DRQ) printk("DataRequest "); |
806 | if (stat & 0x04) printk("CorrectedError "); | 821 | if (stat & ATA_CORR) printk("CorrectedError "); |
807 | if (stat & 0x02) printk("Index "); | 822 | if (stat & ATA_SENSE) printk("Sense "); |
808 | if (stat & 0x01) printk("Error "); | 823 | if (stat & ATA_ERR) printk("Error "); |
809 | printk("}\n"); | 824 | printk("}\n"); |
810 | 825 | ||
811 | if (err) { | 826 | if (err) { |
812 | printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); | 827 | printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); |
813 | if (err & 0x04) printk("DriveStatusError "); | 828 | if (err & ATA_ABORTED) printk("DriveStatusError "); |
814 | if (err & 0x80) { | 829 | if (err & ATA_ICRC) { |
815 | if (err & 0x04) printk("BadCRC "); | 830 | if (err & ATA_ABORTED) |
831 | printk("BadCRC "); | ||
816 | else printk("Sector "); | 832 | else printk("Sector "); |
817 | } | 833 | } |
818 | if (err & 0x40) printk("UncorrectableError "); | 834 | if (err & ATA_UNC) printk("UncorrectableError "); |
819 | if (err & 0x10) printk("SectorIdNotFound "); | 835 | if (err & ATA_IDNF) printk("SectorIdNotFound "); |
820 | if (err & 0x02) printk("TrackZeroNotFound "); | 836 | if (err & ATA_TRK0NF) printk("TrackZeroNotFound "); |
821 | if (err & 0x01) printk("AddrMarkNotFound "); | 837 | if (err & ATA_AMNF) printk("AddrMarkNotFound "); |
822 | printk("}\n"); | 838 | printk("}\n"); |
823 | } | 839 | } |
824 | } | 840 | } |
@@ -849,40 +865,59 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, | |||
849 | /* Based on the 3ware driver translation table */ | 865 | /* Based on the 3ware driver translation table */ |
850 | static const unsigned char sense_table[][4] = { | 866 | static const unsigned char sense_table[][4] = { |
851 | /* BBD|ECC|ID|MAR */ | 867 | /* BBD|ECC|ID|MAR */ |
852 | {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command | 868 | {0xd1, ABORTED_COMMAND, 0x00, 0x00}, |
869 | // Device busy Aborted command | ||
853 | /* BBD|ECC|ID */ | 870 | /* BBD|ECC|ID */ |
854 | {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command | 871 | {0xd0, ABORTED_COMMAND, 0x00, 0x00}, |
872 | // Device busy Aborted command | ||
855 | /* ECC|MC|MARK */ | 873 | /* ECC|MC|MARK */ |
856 | {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error | 874 | {0x61, HARDWARE_ERROR, 0x00, 0x00}, |
875 | // Device fault Hardware error | ||
857 | /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ | 876 | /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ |
858 | {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error | 877 | {0x84, ABORTED_COMMAND, 0x47, 0x00}, |
878 | // Data CRC error SCSI parity error | ||
859 | /* MC|ID|ABRT|TRK0|MARK */ | 879 | /* MC|ID|ABRT|TRK0|MARK */ |
860 | {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready | 880 | {0x37, NOT_READY, 0x04, 0x00}, |
881 | // Unit offline Not ready | ||
861 | /* MCR|MARK */ | 882 | /* MCR|MARK */ |
862 | {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready | 883 | {0x09, NOT_READY, 0x04, 0x00}, |
884 | // Unrecovered disk error Not ready | ||
863 | /* Bad address mark */ | 885 | /* Bad address mark */ |
864 | {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field | 886 | {0x01, MEDIUM_ERROR, 0x13, 0x00}, |
865 | /* TRK0 */ | 887 | // Address mark not found for data field |
866 | {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error | 888 | /* TRK0 - Track 0 not found */ |
889 | {0x02, HARDWARE_ERROR, 0x00, 0x00}, | ||
890 | // Hardware error | ||
867 | /* Abort: 0x04 is not translated here, see below */ | 891 | /* Abort: 0x04 is not translated here, see below */ |
868 | /* Media change request */ | 892 | /* Media change request */ |
869 | {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline | 893 | {0x08, NOT_READY, 0x04, 0x00}, |
870 | /* SRV/IDNF */ | 894 | // FIXME: faking offline |
871 | {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, // ID not found Logical address out of range | 895 | /* SRV/IDNF - ID not found */ |
872 | /* MC */ | 896 | {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, |
873 | {0x20, UNIT_ATTENTION, 0x28, 0x00}, // Media Changed Not ready to ready change, medium may have changed | 897 | // Logical address out of range |
874 | /* ECC */ | 898 | /* MC - Media Changed */ |
875 | {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error | 899 | {0x20, UNIT_ATTENTION, 0x28, 0x00}, |
900 | // Not ready to ready change, medium may have changed | ||
901 | /* ECC - Uncorrectable ECC error */ | ||
902 | {0x40, MEDIUM_ERROR, 0x11, 0x04}, | ||
903 | // Unrecovered read error | ||
876 | /* BBD - block marked bad */ | 904 | /* BBD - block marked bad */ |
877 | {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error | 905 | {0x80, MEDIUM_ERROR, 0x11, 0x04}, |
906 | // Block marked bad Medium error, unrecovered read error | ||
878 | {0xFF, 0xFF, 0xFF, 0xFF}, // END mark | 907 | {0xFF, 0xFF, 0xFF, 0xFF}, // END mark |
879 | }; | 908 | }; |
880 | static const unsigned char stat_table[][4] = { | 909 | static const unsigned char stat_table[][4] = { |
881 | /* Must be first because BUSY means no other bits valid */ | 910 | /* Must be first because BUSY means no other bits valid */ |
882 | {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now | 911 | {0x80, ABORTED_COMMAND, 0x47, 0x00}, |
883 | {0x20, HARDWARE_ERROR, 0x44, 0x00}, // Device fault, internal target failure | 912 | // Busy, fake parity for now |
884 | {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now | 913 | {0x40, ILLEGAL_REQUEST, 0x21, 0x04}, |
885 | {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered | 914 | // Device ready, unaligned write command |
915 | {0x20, HARDWARE_ERROR, 0x44, 0x00}, | ||
916 | // Device fault, internal target failure | ||
917 | {0x08, ABORTED_COMMAND, 0x47, 0x00}, | ||
918 | // Timed out in xfer, fake parity for now | ||
919 | {0x04, RECOVERED_ERROR, 0x11, 0x00}, | ||
920 | // Recovered ECC error Medium error, recovered | ||
886 | {0xFF, 0xFF, 0xFF, 0xFF}, // END mark | 921 | {0xFF, 0xFF, 0xFF, 0xFF}, // END mark |
887 | }; | 922 | }; |
888 | 923 | ||
@@ -1757,7 +1792,9 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1757 | ((cdb[2] & 0x20) || need_sense)) { | 1792 | ((cdb[2] & 0x20) || need_sense)) { |
1758 | ata_gen_passthru_sense(qc); | 1793 | ata_gen_passthru_sense(qc); |
1759 | } else { | 1794 | } else { |
1760 | if (!need_sense) { | 1795 | if (qc->flags & ATA_QCFLAG_SENSE_VALID) { |
1796 | cmd->result = SAM_STAT_CHECK_CONDITION; | ||
1797 | } else if (!need_sense) { | ||
1761 | cmd->result = SAM_STAT_GOOD; | 1798 | cmd->result = SAM_STAT_GOOD; |
1762 | } else { | 1799 | } else { |
1763 | /* TODO: decide which descriptor format to use | 1800 | /* TODO: decide which descriptor format to use |
@@ -4240,10 +4277,7 @@ int ata_sas_allocate_tag(struct ata_port *ap) | |||
4240 | unsigned int i, tag; | 4277 | unsigned int i, tag; |
4241 | 4278 | ||
4242 | for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { | 4279 | for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { |
4243 | if (ap->flags & ATA_FLAG_LOWTAG) | 4280 | tag = tag < max_queue ? tag : 0; |
4244 | tag = 1; | ||
4245 | else | ||
4246 | tag = tag < max_queue ? tag : 0; | ||
4247 | 4281 | ||
4248 | /* the last tag is reserved for internal command. */ | 4282 | /* the last tag is reserved for internal command. */ |
4249 | if (tag == ATA_TAG_INTERNAL) | 4283 | if (tag == ATA_TAG_INTERNAL) |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 2e86e3b85266..cdf6215a9a22 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -3220,11 +3220,11 @@ void ata_pci_bmdma_init(struct ata_host *host) | |||
3220 | * ->sff_irq_clear method. Try to initialize bmdma_addr | 3220 | * ->sff_irq_clear method. Try to initialize bmdma_addr |
3221 | * regardless of dma masks. | 3221 | * regardless of dma masks. |
3222 | */ | 3222 | */ |
3223 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 3223 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
3224 | if (rc) | 3224 | if (rc) |
3225 | ata_bmdma_nodma(host, "failed to set dma mask"); | 3225 | ata_bmdma_nodma(host, "failed to set dma mask"); |
3226 | if (!rc) { | 3226 | if (!rc) { |
3227 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 3227 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
3228 | if (rc) | 3228 | if (rc) |
3229 | ata_bmdma_nodma(host, | 3229 | ata_bmdma_nodma(host, |
3230 | "failed to set consistent dma mask"); | 3230 | "failed to set consistent dma mask"); |
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c new file mode 100644 index 000000000000..fd30b8c10cf5 --- /dev/null +++ b/drivers/ata/libata-trace.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * libata-trace.c - trace functions for libata | ||
3 | * | ||
4 | * Copyright 2015 Hannes Reinecke | ||
5 | * Copyright 2015 SUSE Linux GmbH | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; see the file COPYING. If not, write to | ||
19 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/trace_seq.h> | ||
24 | #include <trace/events/libata.h> | ||
25 | |||
26 | const char * | ||
27 | libata_trace_parse_status(struct trace_seq *p, unsigned char status) | ||
28 | { | ||
29 | const char *ret = trace_seq_buffer_ptr(p); | ||
30 | |||
31 | trace_seq_printf(p, "{ "); | ||
32 | if (status & ATA_BUSY) | ||
33 | trace_seq_printf(p, "BUSY "); | ||
34 | if (status & ATA_DRDY) | ||
35 | trace_seq_printf(p, "DRDY "); | ||
36 | if (status & ATA_DF) | ||
37 | trace_seq_printf(p, "DF "); | ||
38 | if (status & ATA_DSC) | ||
39 | trace_seq_printf(p, "DSC "); | ||
40 | if (status & ATA_DRQ) | ||
41 | trace_seq_printf(p, "DRQ "); | ||
42 | if (status & ATA_CORR) | ||
43 | trace_seq_printf(p, "CORR "); | ||
44 | if (status & ATA_SENSE) | ||
45 | trace_seq_printf(p, "SENSE "); | ||
46 | if (status & ATA_ERR) | ||
47 | trace_seq_printf(p, "ERR "); | ||
48 | trace_seq_putc(p, '}'); | ||
49 | trace_seq_putc(p, 0); | ||
50 | |||
51 | return ret; | ||
52 | } | ||
53 | |||
54 | const char * | ||
55 | libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action) | ||
56 | { | ||
57 | const char *ret = trace_seq_buffer_ptr(p); | ||
58 | |||
59 | trace_seq_printf(p, "%x", eh_action); | ||
60 | if (eh_action) { | ||
61 | trace_seq_printf(p, "{ "); | ||
62 | if (eh_action & ATA_EH_REVALIDATE) | ||
63 | trace_seq_printf(p, "REVALIDATE "); | ||
64 | if (eh_action & (ATA_EH_SOFTRESET | ATA_EH_HARDRESET)) | ||
65 | trace_seq_printf(p, "RESET "); | ||
66 | else if (eh_action & ATA_EH_SOFTRESET) | ||
67 | trace_seq_printf(p, "SOFTRESET "); | ||
68 | else if (eh_action & ATA_EH_HARDRESET) | ||
69 | trace_seq_printf(p, "HARDRESET "); | ||
70 | if (eh_action & ATA_EH_ENABLE_LINK) | ||
71 | trace_seq_printf(p, "ENABLE_LINK "); | ||
72 | if (eh_action & ATA_EH_PARK) | ||
73 | trace_seq_printf(p, "PARK "); | ||
74 | trace_seq_putc(p, '}'); | ||
75 | } | ||
76 | trace_seq_putc(p, 0); | ||
77 | |||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | const char * | ||
82 | libata_trace_parse_eh_err_mask(struct trace_seq *p, unsigned int eh_err_mask) | ||
83 | { | ||
84 | const char *ret = trace_seq_buffer_ptr(p); | ||
85 | |||
86 | trace_seq_printf(p, "%x", eh_err_mask); | ||
87 | if (eh_err_mask) { | ||
88 | trace_seq_printf(p, "{ "); | ||
89 | if (eh_err_mask & AC_ERR_DEV) | ||
90 | trace_seq_printf(p, "DEV "); | ||
91 | if (eh_err_mask & AC_ERR_HSM) | ||
92 | trace_seq_printf(p, "HSM "); | ||
93 | if (eh_err_mask & AC_ERR_TIMEOUT) | ||
94 | trace_seq_printf(p, "TIMEOUT "); | ||
95 | if (eh_err_mask & AC_ERR_MEDIA) | ||
96 | trace_seq_printf(p, "MEDIA "); | ||
97 | if (eh_err_mask & AC_ERR_ATA_BUS) | ||
98 | trace_seq_printf(p, "ATA_BUS "); | ||
99 | if (eh_err_mask & AC_ERR_HOST_BUS) | ||
100 | trace_seq_printf(p, "HOST_BUS "); | ||
101 | if (eh_err_mask & AC_ERR_SYSTEM) | ||
102 | trace_seq_printf(p, "SYSTEM "); | ||
103 | if (eh_err_mask & AC_ERR_INVALID) | ||
104 | trace_seq_printf(p, "INVALID "); | ||
105 | if (eh_err_mask & AC_ERR_OTHER) | ||
106 | trace_seq_printf(p, "OTHER "); | ||
107 | if (eh_err_mask & AC_ERR_NODEV_HINT) | ||
108 | trace_seq_printf(p, "NODEV_HINT "); | ||
109 | if (eh_err_mask & AC_ERR_NCQ) | ||
110 | trace_seq_printf(p, "NCQ "); | ||
111 | trace_seq_putc(p, '}'); | ||
112 | } | ||
113 | trace_seq_putc(p, 0); | ||
114 | |||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | const char * | ||
119 | libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags) | ||
120 | { | ||
121 | const char *ret = trace_seq_buffer_ptr(p); | ||
122 | |||
123 | trace_seq_printf(p, "%x", qc_flags); | ||
124 | if (qc_flags) { | ||
125 | trace_seq_printf(p, "{ "); | ||
126 | if (qc_flags & ATA_QCFLAG_ACTIVE) | ||
127 | trace_seq_printf(p, "ACTIVE "); | ||
128 | if (qc_flags & ATA_QCFLAG_DMAMAP) | ||
129 | trace_seq_printf(p, "DMAMAP "); | ||
130 | if (qc_flags & ATA_QCFLAG_IO) | ||
131 | trace_seq_printf(p, "IO "); | ||
132 | if (qc_flags & ATA_QCFLAG_RESULT_TF) | ||
133 | trace_seq_printf(p, "RESULT_TF "); | ||
134 | if (qc_flags & ATA_QCFLAG_CLEAR_EXCL) | ||
135 | trace_seq_printf(p, "CLEAR_EXCL "); | ||
136 | if (qc_flags & ATA_QCFLAG_QUIET) | ||
137 | trace_seq_printf(p, "QUIET "); | ||
138 | if (qc_flags & ATA_QCFLAG_RETRY) | ||
139 | trace_seq_printf(p, "RETRY "); | ||
140 | if (qc_flags & ATA_QCFLAG_FAILED) | ||
141 | trace_seq_printf(p, "FAILED "); | ||
142 | if (qc_flags & ATA_QCFLAG_SENSE_VALID) | ||
143 | trace_seq_printf(p, "SENSE_VALID "); | ||
144 | if (qc_flags & ATA_QCFLAG_EH_SCHEDULED) | ||
145 | trace_seq_printf(p, "EH_SCHEDULED "); | ||
146 | trace_seq_putc(p, '}'); | ||
147 | } | ||
148 | trace_seq_putc(p, 0); | ||
149 | |||
150 | return ret; | ||
151 | } | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index f840ca18a7c0..a998a175f9f1 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -67,7 +67,8 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag); | |||
67 | extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, | 67 | extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, |
68 | u64 block, u32 n_block, unsigned int tf_flags, | 68 | u64 block, u32 n_block, unsigned int tf_flags, |
69 | unsigned int tag); | 69 | unsigned int tag); |
70 | extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); | 70 | extern u64 ata_tf_read_block(const struct ata_taskfile *tf, |
71 | struct ata_device *dev); | ||
71 | extern unsigned ata_exec_internal(struct ata_device *dev, | 72 | extern unsigned ata_exec_internal(struct ata_device *dev, |
72 | struct ata_taskfile *tf, const u8 *cdb, | 73 | struct ata_taskfile *tf, const u8 *cdb, |
73 | int dma_dir, void *buf, unsigned int buflen, | 74 | int dma_dir, void *buf, unsigned int buflen, |
@@ -137,6 +138,9 @@ extern int ata_scsi_add_hosts(struct ata_host *host, | |||
137 | struct scsi_host_template *sht); | 138 | struct scsi_host_template *sht); |
138 | extern void ata_scsi_scan_host(struct ata_port *ap, int sync); | 139 | extern void ata_scsi_scan_host(struct ata_port *ap, int sync); |
139 | extern int ata_scsi_offline_dev(struct ata_device *dev); | 140 | extern int ata_scsi_offline_dev(struct ata_device *dev); |
141 | extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq); | ||
142 | extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd, | ||
143 | const struct ata_taskfile *tf); | ||
140 | extern void ata_scsi_media_change_notify(struct ata_device *dev); | 144 | extern void ata_scsi_media_change_notify(struct ata_device *dev); |
141 | extern void ata_scsi_hotplug(struct work_struct *work); | 145 | extern void ata_scsi_hotplug(struct work_struct *work); |
142 | extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); | 146 | extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); |
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index a705cfca90f7..3ea50dc5ea47 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c | |||
@@ -475,11 +475,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host) | |||
475 | 475 | ||
476 | atp867x_fixup(host); | 476 | atp867x_fixup(host); |
477 | 477 | ||
478 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 478 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
479 | if (rc) | 479 | if (rc) |
480 | return rc; | 480 | return rc; |
481 | 481 | ||
482 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 482 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
483 | return rc; | 483 | return rc; |
484 | } | 484 | } |
485 | 485 | ||
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index d65cb9d2fa8c..4cb24070cc2d 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c | |||
@@ -164,11 +164,11 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
164 | return -ENODEV; | 164 | return -ENODEV; |
165 | } | 165 | } |
166 | 166 | ||
167 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | 167 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
168 | printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); | 168 | printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); |
169 | return -ENODEV; | 169 | return -ENODEV; |
170 | } | 170 | } |
171 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { | 171 | if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
172 | printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); | 172 | printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); |
173 | return -ENODEV; | 173 | return -ENODEV; |
174 | } | 174 | } |
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index d019cdd5bc9f..b2fc023783b1 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c | |||
@@ -221,10 +221,10 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
221 | if (rc) | 221 | if (rc) |
222 | return rc; | 222 | return rc; |
223 | host->iomap = pcim_iomap_table(pdev); | 223 | host->iomap = pcim_iomap_table(pdev); |
224 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 224 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
225 | if (rc) | 225 | if (rc) |
226 | return rc; | 226 | return rc; |
227 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 227 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
228 | if (rc) | 228 | if (rc) |
229 | return rc; | 229 | return rc; |
230 | 230 | ||
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index efb272da8567..633aa2934a18 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c | |||
@@ -122,10 +122,10 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
122 | return rc; | 122 | return rc; |
123 | 123 | ||
124 | host->iomap = pcim_iomap_table(dev); | 124 | host->iomap = pcim_iomap_table(dev); |
125 | rc = pci_set_dma_mask(dev, ATA_DMA_MASK); | 125 | rc = dma_set_mask(&dev->dev, ATA_DMA_MASK); |
126 | if (rc) | 126 | if (rc) |
127 | return rc; | 127 | return rc; |
128 | rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK); | 128 | rc = dma_set_coherent_mask(&dev->dev, ATA_DMA_MASK); |
129 | if (rc) | 129 | if (rc) |
130 | return rc; | 130 | return rc; |
131 | pci_set_master(dev); | 131 | pci_set_master(dev); |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index dca8251b1aea..d9ef9e276225 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
@@ -730,11 +730,11 @@ static int pdc2027x_init_one(struct pci_dev *pdev, | |||
730 | return rc; | 730 | return rc; |
731 | host->iomap = pcim_iomap_table(pdev); | 731 | host->iomap = pcim_iomap_table(pdev); |
732 | 732 | ||
733 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 733 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
734 | if (rc) | 734 | if (rc) |
735 | return rc; | 735 | return rc; |
736 | 736 | ||
737 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 737 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
738 | if (rc) | 738 | if (rc) |
739 | return rc; | 739 | return rc; |
740 | 740 | ||
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 7f4cb76ed9fa..5cd60d6388ec 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -1029,10 +1029,10 @@ static int scc_host_init(struct ata_host *host) | |||
1029 | if (rc) | 1029 | if (rc) |
1030 | return rc; | 1030 | return rc; |
1031 | 1031 | ||
1032 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 1032 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
1033 | if (rc) | 1033 | if (rc) |
1034 | return rc; | 1034 | return rc; |
1035 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 1035 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
1036 | if (rc) | 1036 | if (rc) |
1037 | return rc; | 1037 | return rc; |
1038 | 1038 | ||
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index f597edccedec..c14071be4f55 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c | |||
@@ -374,10 +374,10 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
374 | host->iomap = pcim_iomap_table(pdev); | 374 | host->iomap = pcim_iomap_table(pdev); |
375 | 375 | ||
376 | /* Setup DMA masks */ | 376 | /* Setup DMA masks */ |
377 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 377 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
378 | if (rc) | 378 | if (rc) |
379 | return rc; | 379 | return rc; |
380 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 380 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
381 | if (rc) | 381 | if (rc) |
382 | return rc; | 382 | return rc; |
383 | pci_set_master(pdev); | 383 | pci_set_master(pdev); |
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index f10631beffa8..64d682c6ee57 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c | |||
@@ -593,12 +593,12 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) | |||
593 | { | 593 | { |
594 | int rc; | 594 | int rc; |
595 | 595 | ||
596 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 596 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
597 | if (rc) { | 597 | if (rc) { |
598 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 598 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
599 | return rc; | 599 | return rc; |
600 | } | 600 | } |
601 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 601 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
602 | if (rc) { | 602 | if (rc) { |
603 | dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); | 603 | dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); |
604 | return rc; | 604 | return rc; |
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index fdb0f2879ea7..902034991517 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -36,11 +36,16 @@ | |||
36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
37 | #include <linux/libata.h> | 37 | #include <linux/libata.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | |||
39 | #include "libata.h" | 40 | #include "libata.h" |
40 | 41 | ||
41 | #include <scsi/scsi_host.h> | 42 | #include <scsi/scsi_host.h> |
42 | #include <scsi/scsi_cmnd.h> | 43 | #include <scsi/scsi_cmnd.h> |
43 | 44 | ||
45 | /* Supported DMA engine drivers */ | ||
46 | #include <linux/platform_data/dma-dw.h> | ||
47 | #include <linux/dma/dw.h> | ||
48 | |||
44 | /* These two are defined in "libata.h" */ | 49 | /* These two are defined in "libata.h" */ |
45 | #undef DRV_NAME | 50 | #undef DRV_NAME |
46 | #undef DRV_VERSION | 51 | #undef DRV_VERSION |
@@ -60,153 +65,9 @@ | |||
60 | #define NO_IRQ 0 | 65 | #define NO_IRQ 0 |
61 | #endif | 66 | #endif |
62 | 67 | ||
63 | /* SATA DMA driver Globals */ | ||
64 | #define DMA_NUM_CHANS 1 | ||
65 | #define DMA_NUM_CHAN_REGS 8 | ||
66 | |||
67 | /* SATA DMA Register definitions */ | ||
68 | #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ | 68 | #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ |
69 | 69 | ||
70 | struct dmareg { | ||
71 | u32 low; /* Low bits 0-31 */ | ||
72 | u32 high; /* High bits 32-63 */ | ||
73 | }; | ||
74 | |||
75 | /* DMA Per Channel registers */ | ||
76 | struct dma_chan_regs { | ||
77 | struct dmareg sar; /* Source Address */ | ||
78 | struct dmareg dar; /* Destination address */ | ||
79 | struct dmareg llp; /* Linked List Pointer */ | ||
80 | struct dmareg ctl; /* Control */ | ||
81 | struct dmareg sstat; /* Source Status not implemented in core */ | ||
82 | struct dmareg dstat; /* Destination Status not implemented in core*/ | ||
83 | struct dmareg sstatar; /* Source Status Address not impl in core */ | ||
84 | struct dmareg dstatar; /* Destination Status Address not implemente */ | ||
85 | struct dmareg cfg; /* Config */ | ||
86 | struct dmareg sgr; /* Source Gather */ | ||
87 | struct dmareg dsr; /* Destination Scatter */ | ||
88 | }; | ||
89 | |||
90 | /* Generic Interrupt Registers */ | ||
91 | struct dma_interrupt_regs { | ||
92 | struct dmareg tfr; /* Transfer Interrupt */ | ||
93 | struct dmareg block; /* Block Interrupt */ | ||
94 | struct dmareg srctran; /* Source Transfer Interrupt */ | ||
95 | struct dmareg dsttran; /* Dest Transfer Interrupt */ | ||
96 | struct dmareg error; /* Error */ | ||
97 | }; | ||
98 | |||
99 | struct ahb_dma_regs { | ||
100 | struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; | ||
101 | struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ | ||
102 | struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ | ||
103 | struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ | ||
104 | struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ | ||
105 | struct dmareg statusInt; /* Interrupt combined*/ | ||
106 | struct dmareg rq_srcreg; /* Src Trans Req */ | ||
107 | struct dmareg rq_dstreg; /* Dst Trans Req */ | ||
108 | struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/ | ||
109 | struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/ | ||
110 | struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/ | ||
111 | struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/ | ||
112 | struct dmareg dma_cfg; /* DMA Config */ | ||
113 | struct dmareg dma_chan_en; /* DMA Channel Enable*/ | ||
114 | struct dmareg dma_id; /* DMA ID */ | ||
115 | struct dmareg dma_test; /* DMA Test */ | ||
116 | struct dmareg res1; /* reserved */ | ||
117 | struct dmareg res2; /* reserved */ | ||
118 | /* | ||
119 | * DMA Comp Params | ||
120 | * Param 6 = dma_param[0], Param 5 = dma_param[1], | ||
121 | * Param 4 = dma_param[2] ... | ||
122 | */ | ||
123 | struct dmareg dma_params[6]; | ||
124 | }; | ||
125 | |||
126 | /* Data structure for linked list item */ | ||
127 | struct lli { | ||
128 | u32 sar; /* Source Address */ | ||
129 | u32 dar; /* Destination address */ | ||
130 | u32 llp; /* Linked List Pointer */ | ||
131 | struct dmareg ctl; /* Control */ | ||
132 | struct dmareg dstat; /* Destination Status */ | ||
133 | }; | ||
134 | |||
135 | enum { | ||
136 | SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)), | ||
137 | SATA_DWC_DMAC_LLI_NUM = 256, | ||
138 | SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \ | ||
139 | SATA_DWC_DMAC_LLI_NUM), | ||
140 | SATA_DWC_DMAC_TWIDTH_BYTES = 4, | ||
141 | SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \ | ||
142 | SATA_DWC_DMAC_TWIDTH_BYTES), | ||
143 | }; | ||
144 | |||
145 | /* DMA Register Operation Bits */ | ||
146 | enum { | ||
147 | DMA_EN = 0x00000001, /* Enable AHB DMA */ | ||
148 | DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */ | ||
149 | DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */ | ||
150 | }; | ||
151 | |||
152 | #define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ | ||
153 | #define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ | ||
154 | /* Enable channel */ | ||
155 | #define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ | ||
156 | ((0x000000001 << (ch)) << 8)) | ||
157 | /* Disable channel */ | ||
158 | #define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) | ||
159 | /* Transfer Type & Flow Controller */ | ||
160 | #define DMA_CTL_TTFC(type) (((type) & 0x7) << 20) | ||
161 | #define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */ | ||
162 | #define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */ | ||
163 | /* Src Burst Transaction Length */ | ||
164 | #define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14) | ||
165 | /* Dst Burst Transaction Length */ | ||
166 | #define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11) | ||
167 | /* Source Transfer Width */ | ||
168 | #define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4) | ||
169 | /* Destination Transfer Width */ | ||
170 | #define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1) | ||
171 | |||
172 | /* Assign HW handshaking interface (x) to destination / source peripheral */ | ||
173 | #define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11) | ||
174 | #define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7) | ||
175 | #define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5) | ||
176 | #define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) | ||
177 | |||
178 | /* | ||
179 | * This define is used to set block chaining disabled in the control low | ||
180 | * register. It is already in little endian format so it can be &'d dirctly. | ||
181 | * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) | ||
182 | */ | ||
183 | enum { | 70 | enum { |
184 | DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7, | ||
185 | DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */ | ||
186 | DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */ | ||
187 | DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */ | ||
188 | DMA_CTL_SINC_DEC = 0x00000200, | ||
189 | DMA_CTL_SINC_NOCHANGE = 0x00000400, | ||
190 | DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */ | ||
191 | DMA_CTL_DINC_DEC = 0x00000080, | ||
192 | DMA_CTL_DINC_NOCHANGE = 0x00000100, | ||
193 | DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */ | ||
194 | |||
195 | /* Channel Configuration Register high bits */ | ||
196 | DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */ | ||
197 | DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */ | ||
198 | |||
199 | /* Channel Configuration Register low bits */ | ||
200 | DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */ | ||
201 | DMA_CFG_RELD_SRC = 0x40000000, | ||
202 | DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */ | ||
203 | DMA_CFG_HS_SELDST = 0x00000400, | ||
204 | DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */ | ||
205 | |||
206 | /* Channel Linked List Pointer Register */ | ||
207 | DMA_LLP_AHBMASTER1 = 0, /* List Master Select */ | ||
208 | DMA_LLP_AHBMASTER2 = 1, | ||
209 | |||
210 | SATA_DWC_MAX_PORTS = 1, | 71 | SATA_DWC_MAX_PORTS = 1, |
211 | 72 | ||
212 | SATA_DWC_SCR_OFFSET = 0x24, | 73 | SATA_DWC_SCR_OFFSET = 0x24, |
@@ -287,7 +148,7 @@ struct sata_dwc_device { | |||
287 | struct ata_host *host; | 148 | struct ata_host *host; |
288 | u8 __iomem *reg_base; | 149 | u8 __iomem *reg_base; |
289 | struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ | 150 | struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ |
290 | int irq_dma; | 151 | struct dw_dma_chip *dma; |
291 | }; | 152 | }; |
292 | 153 | ||
293 | #define SATA_DWC_QCMD_MAX 32 | 154 | #define SATA_DWC_QCMD_MAX 32 |
@@ -295,10 +156,13 @@ struct sata_dwc_device { | |||
295 | struct sata_dwc_device_port { | 156 | struct sata_dwc_device_port { |
296 | struct sata_dwc_device *hsdev; | 157 | struct sata_dwc_device *hsdev; |
297 | int cmd_issued[SATA_DWC_QCMD_MAX]; | 158 | int cmd_issued[SATA_DWC_QCMD_MAX]; |
298 | struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */ | ||
299 | dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; | ||
300 | u32 dma_chan[SATA_DWC_QCMD_MAX]; | ||
301 | int dma_pending[SATA_DWC_QCMD_MAX]; | 159 | int dma_pending[SATA_DWC_QCMD_MAX]; |
160 | |||
161 | /* DMA info */ | ||
162 | struct dw_dma_slave *dws; | ||
163 | struct dma_chan *chan; | ||
164 | struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; | ||
165 | u32 dma_interrupt_count; | ||
302 | }; | 166 | }; |
303 | 167 | ||
304 | /* | 168 | /* |
@@ -330,14 +194,17 @@ struct sata_dwc_host_priv { | |||
330 | void __iomem *scr_addr_sstatus; | 194 | void __iomem *scr_addr_sstatus; |
331 | u32 sata_dwc_sactive_issued ; | 195 | u32 sata_dwc_sactive_issued ; |
332 | u32 sata_dwc_sactive_queued ; | 196 | u32 sata_dwc_sactive_queued ; |
333 | u32 dma_interrupt_count; | ||
334 | struct ahb_dma_regs *sata_dma_regs; | ||
335 | struct device *dwc_dev; | ||
336 | int dma_channel; | ||
337 | }; | 197 | }; |
338 | 198 | ||
339 | static struct sata_dwc_host_priv host_pvt; | 199 | static struct sata_dwc_host_priv host_pvt; |
340 | 200 | ||
201 | static struct dw_dma_slave sata_dwc_dma_dws = { | ||
202 | .src_id = 0, | ||
203 | .dst_id = 0, | ||
204 | .src_master = 0, | ||
205 | .dst_master = 1, | ||
206 | }; | ||
207 | |||
341 | /* | 208 | /* |
342 | * Prototypes | 209 | * Prototypes |
343 | */ | 210 | */ |
@@ -347,12 +214,6 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
347 | static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); | 214 | static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); |
348 | static void sata_dwc_port_stop(struct ata_port *ap); | 215 | static void sata_dwc_port_stop(struct ata_port *ap); |
349 | static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); | 216 | static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); |
350 | static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq); | ||
351 | static void dma_dwc_exit(struct sata_dwc_device *hsdev); | ||
352 | static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, | ||
353 | struct lli *lli, dma_addr_t dma_lli, | ||
354 | void __iomem *addr, int dir); | ||
355 | static void dma_dwc_xfer_start(int dma_ch); | ||
356 | 217 | ||
357 | static const char *get_prot_descript(u8 protocol) | 218 | static const char *get_prot_descript(u8 protocol) |
358 | { | 219 | { |
@@ -390,90 +251,23 @@ static const char *get_dma_dir_descript(int dma_dir) | |||
390 | } | 251 | } |
391 | } | 252 | } |
392 | 253 | ||
393 | static void sata_dwc_tf_dump(struct ata_taskfile *tf) | 254 | static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) |
394 | { | 255 | { |
395 | dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" | 256 | dev_vdbg(ap->dev, |
396 | "0x%lx device: %x\n", tf->command, | 257 | "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", |
397 | get_prot_descript(tf->protocol), tf->flags, tf->device); | 258 | tf->command, get_prot_descript(tf->protocol), tf->flags, |
398 | dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " | 259 | tf->device); |
399 | "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, | 260 | dev_vdbg(ap->dev, |
400 | tf->lbam, tf->lbah); | 261 | "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", |
401 | dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x " | 262 | tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); |
402 | "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", | 263 | dev_vdbg(ap->dev, |
264 | "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", | ||
403 | tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, | 265 | tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, |
404 | tf->hob_lbah); | 266 | tf->hob_lbah); |
405 | } | 267 | } |
406 | 268 | ||
407 | /* | 269 | static void dma_dwc_xfer_done(void *hsdev_instance) |
408 | * Function: get_burst_length_encode | ||
409 | * arguments: datalength: length in bytes of data | ||
410 | * returns value to be programmed in register corresponding to data length | ||
411 | * This value is effectively the log(base 2) of the length | ||
412 | */ | ||
413 | static int get_burst_length_encode(int datalength) | ||
414 | { | ||
415 | int items = datalength >> 2; /* div by 4 to get lword count */ | ||
416 | |||
417 | if (items >= 64) | ||
418 | return 5; | ||
419 | |||
420 | if (items >= 32) | ||
421 | return 4; | ||
422 | |||
423 | if (items >= 16) | ||
424 | return 3; | ||
425 | |||
426 | if (items >= 8) | ||
427 | return 2; | ||
428 | |||
429 | if (items >= 4) | ||
430 | return 1; | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static void clear_chan_interrupts(int c) | ||
436 | { | 270 | { |
437 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low), | ||
438 | DMA_CHANNEL(c)); | ||
439 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low), | ||
440 | DMA_CHANNEL(c)); | ||
441 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low), | ||
442 | DMA_CHANNEL(c)); | ||
443 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low), | ||
444 | DMA_CHANNEL(c)); | ||
445 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low), | ||
446 | DMA_CHANNEL(c)); | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * Function: dma_request_channel | ||
451 | * arguments: None | ||
452 | * returns channel number if available else -1 | ||
453 | * This function assigns the next available DMA channel from the list to the | ||
454 | * requester | ||
455 | */ | ||
456 | static int dma_request_channel(void) | ||
457 | { | ||
458 | /* Check if the channel is not currently in use */ | ||
459 | if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) & | ||
460 | DMA_CHANNEL(host_pvt.dma_channel))) | ||
461 | return host_pvt.dma_channel; | ||
462 | dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n", | ||
463 | __func__, host_pvt.dma_channel); | ||
464 | return -1; | ||
465 | } | ||
466 | |||
467 | /* | ||
468 | * Function: dma_dwc_interrupt | ||
469 | * arguments: irq, dev_id, pt_regs | ||
470 | * returns channel number if available else -1 | ||
471 | * Interrupt Handler for DW AHB SATA DMA | ||
472 | */ | ||
473 | static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance) | ||
474 | { | ||
475 | int chan; | ||
476 | u32 tfr_reg, err_reg; | ||
477 | unsigned long flags; | 271 | unsigned long flags; |
478 | struct sata_dwc_device *hsdev = hsdev_instance; | 272 | struct sata_dwc_device *hsdev = hsdev_instance; |
479 | struct ata_host *host = (struct ata_host *)hsdev->host; | 273 | struct ata_host *host = (struct ata_host *)hsdev->host; |
@@ -487,341 +281,65 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance) | |||
487 | hsdevp = HSDEVP_FROM_AP(ap); | 281 | hsdevp = HSDEVP_FROM_AP(ap); |
488 | tag = ap->link.active_tag; | 282 | tag = ap->link.active_tag; |
489 | 283 | ||
490 | tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\ | ||
491 | .low)); | ||
492 | err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\ | ||
493 | .low)); | ||
494 | |||
495 | dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", | ||
496 | tfr_reg, err_reg, hsdevp->dma_pending[tag], port); | ||
497 | |||
498 | chan = host_pvt.dma_channel; | ||
499 | if (chan >= 0) { | ||
500 | /* Check for end-of-transfer interrupt. */ | ||
501 | if (tfr_reg & DMA_CHANNEL(chan)) { | ||
502 | /* | ||
503 | * Each DMA command produces 2 interrupts. Only | ||
504 | * complete the command after both interrupts have been | ||
505 | * seen. (See sata_dwc_isr()) | ||
506 | */ | ||
507 | host_pvt.dma_interrupt_count++; | ||
508 | sata_dwc_clear_dmacr(hsdevp, tag); | ||
509 | |||
510 | if (hsdevp->dma_pending[tag] == | ||
511 | SATA_DWC_DMA_PENDING_NONE) { | ||
512 | dev_err(ap->dev, "DMA not pending eot=0x%08x " | ||
513 | "err=0x%08x tag=0x%02x pending=%d\n", | ||
514 | tfr_reg, err_reg, tag, | ||
515 | hsdevp->dma_pending[tag]); | ||
516 | } | ||
517 | |||
518 | if ((host_pvt.dma_interrupt_count % 2) == 0) | ||
519 | sata_dwc_dma_xfer_complete(ap, 1); | ||
520 | |||
521 | /* Clear the interrupt */ | ||
522 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ | ||
523 | .tfr.low), | ||
524 | DMA_CHANNEL(chan)); | ||
525 | } | ||
526 | |||
527 | /* Check for error interrupt. */ | ||
528 | if (err_reg & DMA_CHANNEL(chan)) { | ||
529 | /* TODO Need error handler ! */ | ||
530 | dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", | ||
531 | err_reg); | ||
532 | |||
533 | /* Clear the interrupt. */ | ||
534 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ | ||
535 | .error.low), | ||
536 | DMA_CHANNEL(chan)); | ||
537 | } | ||
538 | } | ||
539 | spin_unlock_irqrestore(&host->lock, flags); | ||
540 | return IRQ_HANDLED; | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Function: dma_request_interrupts | ||
545 | * arguments: hsdev | ||
546 | * returns status | ||
547 | * This function registers ISR for a particular DMA channel interrupt | ||
548 | */ | ||
549 | static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) | ||
550 | { | ||
551 | int retval = 0; | ||
552 | int chan = host_pvt.dma_channel; | ||
553 | |||
554 | if (chan >= 0) { | ||
555 | /* Unmask error interrupt */ | ||
556 | out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low, | ||
557 | DMA_ENABLE_CHAN(chan)); | ||
558 | |||
559 | /* Unmask end-of-transfer interrupt */ | ||
560 | out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low, | ||
561 | DMA_ENABLE_CHAN(chan)); | ||
562 | } | ||
563 | |||
564 | retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev); | ||
565 | if (retval) { | ||
566 | dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n", | ||
567 | __func__, irq); | ||
568 | return -ENODEV; | ||
569 | } | ||
570 | |||
571 | /* Mark this interrupt as requested */ | ||
572 | hsdev->irq_dma = irq; | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | /* | ||
577 | * Function: map_sg_to_lli | ||
578 | * The Synopsis driver has a comment proposing that better performance | ||
579 | * is possible by only enabling interrupts on the last item in the linked list. | ||
580 | * However, it seems that could be a problem if an error happened on one of the | ||
581 | * first items. The transfer would halt, but no error interrupt would occur. | ||
582 | * Currently this function sets interrupts enabled for each linked list item: | ||
583 | * DMA_CTL_INT_EN. | ||
584 | */ | ||
585 | static int map_sg_to_lli(struct scatterlist *sg, int num_elems, | ||
586 | struct lli *lli, dma_addr_t dma_lli, | ||
587 | void __iomem *dmadr_addr, int dir) | ||
588 | { | ||
589 | int i, idx = 0; | ||
590 | int fis_len = 0; | ||
591 | dma_addr_t next_llp; | ||
592 | int bl; | ||
593 | int sms_val, dms_val; | ||
594 | |||
595 | sms_val = 0; | ||
596 | dms_val = 1 + host_pvt.dma_channel; | ||
597 | dev_dbg(host_pvt.dwc_dev, | ||
598 | "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n", | ||
599 | __func__, sg, num_elems, lli, &dma_lli, dmadr_addr); | ||
600 | |||
601 | bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); | ||
602 | |||
603 | for (i = 0; i < num_elems; i++, sg++) { | ||
604 | u32 addr, offset; | ||
605 | u32 sg_len, len; | ||
606 | |||
607 | addr = (u32) sg_dma_address(sg); | ||
608 | sg_len = sg_dma_len(sg); | ||
609 | |||
610 | dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len" | ||
611 | "=%d\n", __func__, i, addr, sg_len); | ||
612 | |||
613 | while (sg_len) { | ||
614 | if (idx >= SATA_DWC_DMAC_LLI_NUM) { | ||
615 | /* The LLI table is not large enough. */ | ||
616 | dev_err(host_pvt.dwc_dev, "LLI table overrun " | ||
617 | "(idx=%d)\n", idx); | ||
618 | break; | ||
619 | } | ||
620 | len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? | ||
621 | SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; | ||
622 | |||
623 | offset = addr & 0xffff; | ||
624 | if ((offset + sg_len) > 0x10000) | ||
625 | len = 0x10000 - offset; | ||
626 | |||
627 | /* | ||
628 | * Make sure a LLI block is not created that will span | ||
629 | * 8K max FIS boundary. If the block spans such a FIS | ||
630 | * boundary, there is a chance that a DMA burst will | ||
631 | * cross that boundary -- this results in an error in | ||
632 | * the host controller. | ||
633 | */ | ||
634 | if (fis_len + len > 8192) { | ||
635 | dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len=" | ||
636 | "%d(0x%x) len=%d(0x%x)\n", fis_len, | ||
637 | fis_len, len, len); | ||
638 | len = 8192 - fis_len; | ||
639 | fis_len = 0; | ||
640 | } else { | ||
641 | fis_len += len; | ||
642 | } | ||
643 | if (fis_len == 8192) | ||
644 | fis_len = 0; | ||
645 | |||
646 | /* | ||
647 | * Set DMA addresses and lower half of control register | ||
648 | * based on direction. | ||
649 | */ | ||
650 | if (dir == DMA_FROM_DEVICE) { | ||
651 | lli[idx].dar = cpu_to_le32(addr); | ||
652 | lli[idx].sar = cpu_to_le32((u32)dmadr_addr); | ||
653 | |||
654 | lli[idx].ctl.low = cpu_to_le32( | ||
655 | DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | | ||
656 | DMA_CTL_SMS(sms_val) | | ||
657 | DMA_CTL_DMS(dms_val) | | ||
658 | DMA_CTL_SRC_MSIZE(bl) | | ||
659 | DMA_CTL_DST_MSIZE(bl) | | ||
660 | DMA_CTL_SINC_NOCHANGE | | ||
661 | DMA_CTL_SRC_TRWID(2) | | ||
662 | DMA_CTL_DST_TRWID(2) | | ||
663 | DMA_CTL_INT_EN | | ||
664 | DMA_CTL_LLP_SRCEN | | ||
665 | DMA_CTL_LLP_DSTEN); | ||
666 | } else { /* DMA_TO_DEVICE */ | ||
667 | lli[idx].sar = cpu_to_le32(addr); | ||
668 | lli[idx].dar = cpu_to_le32((u32)dmadr_addr); | ||
669 | |||
670 | lli[idx].ctl.low = cpu_to_le32( | ||
671 | DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | | ||
672 | DMA_CTL_SMS(dms_val) | | ||
673 | DMA_CTL_DMS(sms_val) | | ||
674 | DMA_CTL_SRC_MSIZE(bl) | | ||
675 | DMA_CTL_DST_MSIZE(bl) | | ||
676 | DMA_CTL_DINC_NOCHANGE | | ||
677 | DMA_CTL_SRC_TRWID(2) | | ||
678 | DMA_CTL_DST_TRWID(2) | | ||
679 | DMA_CTL_INT_EN | | ||
680 | DMA_CTL_LLP_SRCEN | | ||
681 | DMA_CTL_LLP_DSTEN); | ||
682 | } | ||
683 | |||
684 | dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: " | ||
685 | "0x%08x val: 0x%08x\n", __func__, | ||
686 | len, DMA_CTL_BLK_TS(len / 4)); | ||
687 | |||
688 | /* Program the LLI CTL high register */ | ||
689 | lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\ | ||
690 | (len / 4)); | ||
691 | |||
692 | /* Program the next pointer. The next pointer must be | ||
693 | * the physical address, not the virtual address. | ||
694 | */ | ||
695 | next_llp = (dma_lli + ((idx + 1) * sizeof(struct \ | ||
696 | lli))); | ||
697 | |||
698 | /* The last 2 bits encode the list master select. */ | ||
699 | next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); | ||
700 | |||
701 | lli[idx].llp = cpu_to_le32(next_llp); | ||
702 | idx++; | ||
703 | sg_len -= len; | ||
704 | addr += len; | ||
705 | } | ||
706 | } | ||
707 | |||
708 | /* | 284 | /* |
709 | * The last next ptr has to be zero and the last control low register | 285 | * Each DMA command produces 2 interrupts. Only |
710 | * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source | 286 | * complete the command after both interrupts have been |
711 | * and destination enable) set back to 0 (disabled.) This is what tells | 287 | * seen. (See sata_dwc_isr()) |
712 | * the core that this is the last item in the linked list. | ||
713 | */ | 288 | */ |
714 | if (idx) { | 289 | hsdevp->dma_interrupt_count++; |
715 | lli[idx-1].llp = 0x00000000; | 290 | sata_dwc_clear_dmacr(hsdevp, tag); |
716 | lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; | ||
717 | 291 | ||
718 | /* Flush cache to memory */ | 292 | if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { |
719 | dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), | 293 | dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", |
720 | DMA_BIDIRECTIONAL); | 294 | tag, hsdevp->dma_pending[tag]); |
721 | } | 295 | } |
722 | 296 | ||
723 | return idx; | 297 | if ((hsdevp->dma_interrupt_count % 2) == 0) |
724 | } | 298 | sata_dwc_dma_xfer_complete(ap, 1); |
725 | 299 | ||
726 | /* | 300 | spin_unlock_irqrestore(&host->lock, flags); |
727 | * Function: dma_dwc_xfer_start | ||
728 | * arguments: Channel number | ||
729 | * Return : None | ||
730 | * Enables the DMA channel | ||
731 | */ | ||
732 | static void dma_dwc_xfer_start(int dma_ch) | ||
733 | { | ||
734 | /* Enable the DMA channel */ | ||
735 | out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low), | ||
736 | in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) | | ||
737 | DMA_ENABLE_CHAN(dma_ch)); | ||
738 | } | 301 | } |
739 | 302 | ||
740 | static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, | 303 | static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) |
741 | struct lli *lli, dma_addr_t dma_lli, | ||
742 | void __iomem *addr, int dir) | ||
743 | { | 304 | { |
744 | int dma_ch; | 305 | struct ata_port *ap = qc->ap; |
745 | int num_lli; | 306 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); |
746 | /* Acquire DMA channel */ | 307 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); |
747 | dma_ch = dma_request_channel(); | 308 | dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr; |
748 | if (dma_ch == -1) { | 309 | struct dma_slave_config sconf; |
749 | dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n", | 310 | struct dma_async_tx_descriptor *desc; |
750 | __func__); | 311 | |
751 | return -EAGAIN; | 312 | if (qc->dma_dir == DMA_DEV_TO_MEM) { |
313 | sconf.src_addr = addr; | ||
314 | sconf.device_fc = true; | ||
315 | } else { /* DMA_MEM_TO_DEV */ | ||
316 | sconf.dst_addr = addr; | ||
317 | sconf.device_fc = false; | ||
752 | } | 318 | } |
753 | 319 | ||
754 | /* Convert SG list to linked list of items (LLIs) for AHB DMA */ | 320 | sconf.direction = qc->dma_dir; |
755 | num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir); | 321 | sconf.src_maxburst = AHB_DMA_BRST_DFLT; |
756 | 322 | sconf.dst_maxburst = AHB_DMA_BRST_DFLT; | |
757 | dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:" | 323 | sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
758 | " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems, | 324 | sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
759 | lli, (u32)dma_lli, addr, num_lli); | ||
760 | |||
761 | clear_chan_interrupts(dma_ch); | ||
762 | 325 | ||
763 | /* Program the CFG register. */ | 326 | dmaengine_slave_config(hsdevp->chan, &sconf); |
764 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high), | ||
765 | DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) | | ||
766 | DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); | ||
767 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), | ||
768 | DMA_CFG_HW_CH_PRIOR(dma_ch)); | ||
769 | 327 | ||
770 | /* Program the address of the linked list */ | 328 | /* Convert SG list to linked list of items (LLIs) for AHB DMA */ |
771 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low), | 329 | desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, |
772 | DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); | 330 | qc->dma_dir, |
773 | 331 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
774 | /* Program the CTL register with src enable / dst enable */ | ||
775 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), | ||
776 | DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); | ||
777 | return dma_ch; | ||
778 | } | ||
779 | |||
780 | /* | ||
781 | * Function: dma_dwc_exit | ||
782 | * arguments: None | ||
783 | * returns status | ||
784 | * This function exits the SATA DMA driver | ||
785 | */ | ||
786 | static void dma_dwc_exit(struct sata_dwc_device *hsdev) | ||
787 | { | ||
788 | dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); | ||
789 | if (host_pvt.sata_dma_regs) { | ||
790 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | ||
791 | host_pvt.sata_dma_regs = NULL; | ||
792 | } | ||
793 | |||
794 | if (hsdev->irq_dma) { | ||
795 | free_irq(hsdev->irq_dma, hsdev); | ||
796 | hsdev->irq_dma = 0; | ||
797 | } | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | * Function: dma_dwc_init | ||
802 | * arguments: hsdev | ||
803 | * returns status | ||
804 | * This function initializes the SATA DMA driver | ||
805 | */ | ||
806 | static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) | ||
807 | { | ||
808 | int err; | ||
809 | 332 | ||
810 | err = dma_request_interrupts(hsdev, irq); | 333 | if (!desc) |
811 | if (err) { | 334 | return NULL; |
812 | dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" | ||
813 | " %d\n", __func__, err); | ||
814 | return err; | ||
815 | } | ||
816 | 335 | ||
817 | /* Enabe DMA */ | 336 | desc->callback = dma_dwc_xfer_done; |
818 | out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN); | 337 | desc->callback_param = hsdev; |
819 | 338 | ||
820 | dev_notice(host_pvt.dwc_dev, "DMA initialized\n"); | 339 | dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n", |
821 | dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ | 340 | __func__, qc->sg, qc->n_elem, &addr); |
822 | sata_dma_regs); | ||
823 | 341 | ||
824 | return 0; | 342 | return desc; |
825 | } | 343 | } |
826 | 344 | ||
827 | static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) | 345 | static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) |
@@ -891,21 +409,18 @@ static void sata_dwc_error_intr(struct ata_port *ap, | |||
891 | struct ata_queued_cmd *qc; | 409 | struct ata_queued_cmd *qc; |
892 | u32 serror; | 410 | u32 serror; |
893 | u8 status, tag; | 411 | u8 status, tag; |
894 | u32 err_reg; | ||
895 | 412 | ||
896 | ata_ehi_clear_desc(ehi); | 413 | ata_ehi_clear_desc(ehi); |
897 | 414 | ||
898 | serror = core_scr_read(SCR_ERROR); | 415 | serror = core_scr_read(SCR_ERROR); |
899 | status = ap->ops->sff_check_status(ap); | 416 | status = ap->ops->sff_check_status(ap); |
900 | 417 | ||
901 | err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\ | ||
902 | low)); | ||
903 | tag = ap->link.active_tag; | 418 | tag = ap->link.active_tag; |
904 | 419 | ||
905 | dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x " | 420 | dev_err(ap->dev, |
906 | "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n", | 421 | "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", |
907 | __func__, serror, intpr, status, host_pvt.dma_interrupt_count, | 422 | __func__, serror, intpr, status, hsdevp->dma_interrupt_count, |
908 | hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg); | 423 | hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); |
909 | 424 | ||
910 | /* Clear error register and interrupt bit */ | 425 | /* Clear error register and interrupt bit */ |
911 | clear_serror(); | 426 | clear_serror(); |
@@ -1003,8 +518,9 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) | |||
1003 | 518 | ||
1004 | /* DEV interrupt w/ no active qc? */ | 519 | /* DEV interrupt w/ no active qc? */ |
1005 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 520 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { |
1006 | dev_err(ap->dev, "%s interrupt with no active qc " | 521 | dev_err(ap->dev, |
1007 | "qc=%p\n", __func__, qc); | 522 | "%s interrupt with no active qc qc=%p\n", |
523 | __func__, qc); | ||
1008 | ap->ops->sff_check_status(ap); | 524 | ap->ops->sff_check_status(ap); |
1009 | handled = 1; | 525 | handled = 1; |
1010 | goto DONE; | 526 | goto DONE; |
@@ -1031,16 +547,16 @@ DRVSTILLBUSY: | |||
1031 | * operation done interrupt. The command should be | 547 | * operation done interrupt. The command should be |
1032 | * completed only after both interrupts are seen. | 548 | * completed only after both interrupts are seen. |
1033 | */ | 549 | */ |
1034 | host_pvt.dma_interrupt_count++; | 550 | hsdevp->dma_interrupt_count++; |
1035 | if (hsdevp->dma_pending[tag] == \ | 551 | if (hsdevp->dma_pending[tag] == \ |
1036 | SATA_DWC_DMA_PENDING_NONE) { | 552 | SATA_DWC_DMA_PENDING_NONE) { |
1037 | dev_err(ap->dev, "%s: DMA not pending " | 553 | dev_err(ap->dev, |
1038 | "intpr=0x%08x status=0x%08x pending" | 554 | "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", |
1039 | "=%d\n", __func__, intpr, status, | 555 | __func__, intpr, status, |
1040 | hsdevp->dma_pending[tag]); | 556 | hsdevp->dma_pending[tag]); |
1041 | } | 557 | } |
1042 | 558 | ||
1043 | if ((host_pvt.dma_interrupt_count % 2) == 0) | 559 | if ((hsdevp->dma_interrupt_count % 2) == 0) |
1044 | sata_dwc_dma_xfer_complete(ap, 1); | 560 | sata_dwc_dma_xfer_complete(ap, 1); |
1045 | } else if (ata_is_pio(qc->tf.protocol)) { | 561 | } else if (ata_is_pio(qc->tf.protocol)) { |
1046 | ata_sff_hsm_move(ap, qc, status, 0); | 562 | ata_sff_hsm_move(ap, qc, status, 0); |
@@ -1068,17 +584,17 @@ DRVSTILLBUSY: | |||
1068 | 584 | ||
1069 | if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ | 585 | if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ |
1070 | tag_mask > 1) { | 586 | tag_mask > 1) { |
1071 | dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x" | 587 | dev_dbg(ap->dev, |
1072 | "tag_mask=0x%08x\n", __func__, sactive, | 588 | "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", |
1073 | host_pvt.sata_dwc_sactive_issued, tag_mask); | 589 | __func__, sactive, host_pvt.sata_dwc_sactive_issued, |
590 | tag_mask); | ||
1074 | } | 591 | } |
1075 | 592 | ||
1076 | if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ | 593 | if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ |
1077 | (host_pvt.sata_dwc_sactive_issued)) { | 594 | (host_pvt.sata_dwc_sactive_issued)) { |
1078 | dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " | 595 | dev_warn(ap->dev, |
1079 | "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask" | 596 | "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n", |
1080 | "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued, | 597 | sactive, host_pvt.sata_dwc_sactive_issued, tag_mask); |
1081 | tag_mask); | ||
1082 | } | 598 | } |
1083 | 599 | ||
1084 | /* read just to clear ... not bad if currently still busy */ | 600 | /* read just to clear ... not bad if currently still busy */ |
@@ -1114,12 +630,12 @@ DRVSTILLBUSY: | |||
1114 | dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, | 630 | dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, |
1115 | get_prot_descript(qc->tf.protocol)); | 631 | get_prot_descript(qc->tf.protocol)); |
1116 | if (ata_is_dma(qc->tf.protocol)) { | 632 | if (ata_is_dma(qc->tf.protocol)) { |
1117 | host_pvt.dma_interrupt_count++; | 633 | hsdevp->dma_interrupt_count++; |
1118 | if (hsdevp->dma_pending[tag] == \ | 634 | if (hsdevp->dma_pending[tag] == \ |
1119 | SATA_DWC_DMA_PENDING_NONE) | 635 | SATA_DWC_DMA_PENDING_NONE) |
1120 | dev_warn(ap->dev, "%s: DMA not pending?\n", | 636 | dev_warn(ap->dev, "%s: DMA not pending?\n", |
1121 | __func__); | 637 | __func__); |
1122 | if ((host_pvt.dma_interrupt_count % 2) == 0) | 638 | if ((hsdevp->dma_interrupt_count % 2) == 0) |
1123 | sata_dwc_dma_xfer_complete(ap, 1); | 639 | sata_dwc_dma_xfer_complete(ap, 1); |
1124 | } else { | 640 | } else { |
1125 | if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) | 641 | if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) |
@@ -1142,8 +658,9 @@ STILLBUSY: | |||
1142 | */ | 658 | */ |
1143 | sactive2 = core_scr_read(SCR_ACTIVE); | 659 | sactive2 = core_scr_read(SCR_ACTIVE); |
1144 | if (sactive2 != sactive) { | 660 | if (sactive2 != sactive) { |
1145 | dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2" | 661 | dev_dbg(ap->dev, |
1146 | "=0x%x\n", sactive, sactive2); | 662 | "More completed - sactive=0x%x sactive2=0x%x\n", |
663 | sactive, sactive2); | ||
1147 | } | 664 | } |
1148 | handled = 1; | 665 | handled = 1; |
1149 | 666 | ||
@@ -1169,11 +686,10 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) | |||
1169 | * This should not happen, it indicates the driver is out of | 686 | * This should not happen, it indicates the driver is out of |
1170 | * sync. If it does happen, clear dmacr anyway. | 687 | * sync. If it does happen, clear dmacr anyway. |
1171 | */ | 688 | */ |
1172 | dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and" | 689 | dev_err(hsdev->dev, |
1173 | "TX DMA not pending tag=0x%02x pending=%d" | 690 | "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", |
1174 | " dmacr: 0x%08x\n", __func__, tag, | 691 | __func__, tag, hsdevp->dma_pending[tag], |
1175 | hsdevp->dma_pending[tag], | 692 | in_le32(&hsdev->sata_dwc_regs->dmacr)); |
1176 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); | ||
1177 | out_le32(&(hsdev->sata_dwc_regs->dmacr), | 693 | out_le32(&(hsdev->sata_dwc_regs->dmacr), |
1178 | SATA_DWC_DMACR_TXRXCH_CLEAR); | 694 | SATA_DWC_DMACR_TXRXCH_CLEAR); |
1179 | } | 695 | } |
@@ -1195,8 +711,9 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) | |||
1195 | 711 | ||
1196 | #ifdef DEBUG_NCQ | 712 | #ifdef DEBUG_NCQ |
1197 | if (tag > 0) { | 713 | if (tag > 0) { |
1198 | dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " | 714 | dev_info(ap->dev, |
1199 | "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, | 715 | "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", |
716 | __func__, qc->tag, qc->tf.command, | ||
1200 | get_dma_dir_descript(qc->dma_dir), | 717 | get_dma_dir_descript(qc->dma_dir), |
1201 | get_prot_descript(qc->tf.protocol), | 718 | get_prot_descript(qc->tf.protocol), |
1202 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); | 719 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); |
@@ -1205,8 +722,9 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) | |||
1205 | 722 | ||
1206 | if (ata_is_dma(qc->tf.protocol)) { | 723 | if (ata_is_dma(qc->tf.protocol)) { |
1207 | if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { | 724 | if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { |
1208 | dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " | 725 | dev_err(ap->dev, |
1209 | "pending dmacr: 0x%08x\n", __func__, | 726 | "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", |
727 | __func__, | ||
1210 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); | 728 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); |
1211 | } | 729 | } |
1212 | 730 | ||
@@ -1232,9 +750,9 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
1232 | dev_err(ap->dev, "TX DMA PENDING\n"); | 750 | dev_err(ap->dev, "TX DMA PENDING\n"); |
1233 | else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) | 751 | else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) |
1234 | dev_err(ap->dev, "RX DMA PENDING\n"); | 752 | dev_err(ap->dev, "RX DMA PENDING\n"); |
1235 | dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:" | 753 | dev_dbg(ap->dev, |
1236 | " protocol=%d\n", qc->tf.command, status, ap->print_id, | 754 | "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", |
1237 | qc->tf.protocol); | 755 | qc->tf.command, status, ap->print_id, qc->tf.protocol); |
1238 | 756 | ||
1239 | /* clear active bit */ | 757 | /* clear active bit */ |
1240 | mask = (~(qcmd_tag_to_mask(tag))); | 758 | mask = (~(qcmd_tag_to_mask(tag))); |
@@ -1260,11 +778,23 @@ static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) | |||
1260 | */ | 778 | */ |
1261 | out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); | 779 | out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); |
1262 | 780 | ||
1263 | dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", | 781 | dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", |
1264 | __func__, in_le32(&hsdev->sata_dwc_regs->intmr), | 782 | __func__, in_le32(&hsdev->sata_dwc_regs->intmr), |
1265 | in_le32(&hsdev->sata_dwc_regs->errmr)); | 783 | in_le32(&hsdev->sata_dwc_regs->errmr)); |
1266 | } | 784 | } |
1267 | 785 | ||
786 | static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) | ||
787 | { | ||
788 | struct sata_dwc_device_port *hsdevp = param; | ||
789 | struct dw_dma_slave *dws = hsdevp->dws; | ||
790 | |||
791 | if (dws->dma_dev != chan->device->dev) | ||
792 | return false; | ||
793 | |||
794 | chan->private = dws; | ||
795 | return true; | ||
796 | } | ||
797 | |||
1268 | static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) | 798 | static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) |
1269 | { | 799 | { |
1270 | port->cmd_addr = (void __iomem *)base + 0x00; | 800 | port->cmd_addr = (void __iomem *)base + 0x00; |
@@ -1299,6 +829,7 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1299 | struct sata_dwc_device *hsdev; | 829 | struct sata_dwc_device *hsdev; |
1300 | struct sata_dwc_device_port *hsdevp = NULL; | 830 | struct sata_dwc_device_port *hsdevp = NULL; |
1301 | struct device *pdev; | 831 | struct device *pdev; |
832 | dma_cap_mask_t mask; | ||
1302 | int i; | 833 | int i; |
1303 | 834 | ||
1304 | hsdev = HSDEV_FROM_AP(ap); | 835 | hsdev = HSDEV_FROM_AP(ap); |
@@ -1322,29 +853,27 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1322 | } | 853 | } |
1323 | hsdevp->hsdev = hsdev; | 854 | hsdevp->hsdev = hsdev; |
1324 | 855 | ||
856 | hsdevp->dws = &sata_dwc_dma_dws; | ||
857 | hsdevp->dws->dma_dev = hsdev->dev; | ||
858 | |||
859 | dma_cap_zero(mask); | ||
860 | dma_cap_set(DMA_SLAVE, mask); | ||
861 | |||
862 | /* Acquire DMA channel */ | ||
863 | hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); | ||
864 | if (!hsdevp->chan) { | ||
865 | dev_err(hsdev->dev, "%s: dma channel unavailable\n", | ||
866 | __func__); | ||
867 | err = -EAGAIN; | ||
868 | goto CLEANUP_ALLOC; | ||
869 | } | ||
870 | |||
1325 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) | 871 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) |
1326 | hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; | 872 | hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; |
1327 | 873 | ||
1328 | ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ | 874 | ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ |
1329 | ap->bmdma_prd_dma = 0; | 875 | ap->bmdma_prd_dma = 0; |
1330 | 876 | ||
1331 | /* | ||
1332 | * DMA - Assign scatter gather LLI table. We can't use the libata | ||
1333 | * version since it's PRD is IDE PCI specific. | ||
1334 | */ | ||
1335 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { | ||
1336 | hsdevp->llit[i] = dma_alloc_coherent(pdev, | ||
1337 | SATA_DWC_DMAC_LLI_TBL_SZ, | ||
1338 | &(hsdevp->llit_dma[i]), | ||
1339 | GFP_ATOMIC); | ||
1340 | if (!hsdevp->llit[i]) { | ||
1341 | dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", | ||
1342 | __func__); | ||
1343 | err = -ENOMEM; | ||
1344 | goto CLEANUP_ALLOC; | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | if (ap->port_no == 0) { | 877 | if (ap->port_no == 0) { |
1349 | dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", | 878 | dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", |
1350 | __func__); | 879 | __func__); |
@@ -1373,22 +902,14 @@ CLEANUP: | |||
1373 | 902 | ||
1374 | static void sata_dwc_port_stop(struct ata_port *ap) | 903 | static void sata_dwc_port_stop(struct ata_port *ap) |
1375 | { | 904 | { |
1376 | int i; | ||
1377 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); | ||
1378 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | 905 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); |
1379 | 906 | ||
1380 | dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); | 907 | dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); |
1381 | 908 | ||
1382 | if (hsdevp && hsdev) { | 909 | dmaengine_terminate_all(hsdevp->chan); |
1383 | /* deallocate LLI table */ | 910 | dma_release_channel(hsdevp->chan); |
1384 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { | ||
1385 | dma_free_coherent(ap->host->dev, | ||
1386 | SATA_DWC_DMAC_LLI_TBL_SZ, | ||
1387 | hsdevp->llit[i], hsdevp->llit_dma[i]); | ||
1388 | } | ||
1389 | 911 | ||
1390 | kfree(hsdevp); | 912 | kfree(hsdevp); |
1391 | } | ||
1392 | ap->private_data = NULL; | 913 | ap->private_data = NULL; |
1393 | } | 914 | } |
1394 | 915 | ||
@@ -1444,12 +965,12 @@ static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) | |||
1444 | static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) | 965 | static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) |
1445 | { | 966 | { |
1446 | int start_dma; | 967 | int start_dma; |
1447 | u32 reg, dma_chan; | 968 | u32 reg; |
1448 | struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); | 969 | struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); |
1449 | struct ata_port *ap = qc->ap; | 970 | struct ata_port *ap = qc->ap; |
1450 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | 971 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); |
972 | struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; | ||
1451 | int dir = qc->dma_dir; | 973 | int dir = qc->dma_dir; |
1452 | dma_chan = hsdevp->dma_chan[tag]; | ||
1453 | 974 | ||
1454 | if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { | 975 | if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { |
1455 | start_dma = 1; | 976 | start_dma = 1; |
@@ -1458,16 +979,17 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) | |||
1458 | else | 979 | else |
1459 | hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; | 980 | hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; |
1460 | } else { | 981 | } else { |
1461 | dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " | 982 | dev_err(ap->dev, |
1462 | "(tag=%d) DMA NOT started\n", __func__, | 983 | "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", |
1463 | hsdevp->cmd_issued[tag], tag); | 984 | __func__, hsdevp->cmd_issued[tag], tag); |
1464 | start_dma = 0; | 985 | start_dma = 0; |
1465 | } | 986 | } |
1466 | 987 | ||
1467 | dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " | 988 | dev_dbg(ap->dev, |
1468 | "start_dma? %x\n", __func__, qc, tag, qc->tf.command, | 989 | "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", |
990 | __func__, qc, tag, qc->tf.command, | ||
1469 | get_dma_dir_descript(qc->dma_dir), start_dma); | 991 | get_dma_dir_descript(qc->dma_dir), start_dma); |
1470 | sata_dwc_tf_dump(&(qc->tf)); | 992 | sata_dwc_tf_dump(ap, &qc->tf); |
1471 | 993 | ||
1472 | if (start_dma) { | 994 | if (start_dma) { |
1473 | reg = core_scr_read(SCR_ERROR); | 995 | reg = core_scr_read(SCR_ERROR); |
@@ -1484,7 +1006,8 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) | |||
1484 | SATA_DWC_DMACR_RXCHEN); | 1006 | SATA_DWC_DMACR_RXCHEN); |
1485 | 1007 | ||
1486 | /* Enable AHB DMA transfer on the specified channel */ | 1008 | /* Enable AHB DMA transfer on the specified channel */ |
1487 | dma_dwc_xfer_start(dma_chan); | 1009 | dmaengine_submit(desc); |
1010 | dma_async_issue_pending(hsdevp->chan); | ||
1488 | } | 1011 | } |
1489 | } | 1012 | } |
1490 | 1013 | ||
@@ -1510,26 +1033,21 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) | |||
1510 | */ | 1033 | */ |
1511 | static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) | 1034 | static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) |
1512 | { | 1035 | { |
1513 | struct scatterlist *sg = qc->sg; | 1036 | struct dma_async_tx_descriptor *desc; |
1514 | struct ata_port *ap = qc->ap; | 1037 | struct ata_port *ap = qc->ap; |
1515 | int dma_chan; | ||
1516 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); | ||
1517 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | 1038 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); |
1518 | 1039 | ||
1519 | dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", | 1040 | dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", |
1520 | __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), | 1041 | __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), |
1521 | qc->n_elem); | 1042 | qc->n_elem); |
1522 | 1043 | ||
1523 | dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], | 1044 | desc = dma_dwc_xfer_setup(qc); |
1524 | hsdevp->llit_dma[tag], | 1045 | if (!desc) { |
1525 | (void __iomem *)&hsdev->sata_dwc_regs->dmadr, | 1046 | dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n", |
1526 | qc->dma_dir); | 1047 | __func__); |
1527 | if (dma_chan < 0) { | ||
1528 | dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", | ||
1529 | __func__, dma_chan); | ||
1530 | return; | 1048 | return; |
1531 | } | 1049 | } |
1532 | hsdevp->dma_chan[tag] = dma_chan; | 1050 | hsdevp->desc[tag] = desc; |
1533 | } | 1051 | } |
1534 | 1052 | ||
1535 | static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) | 1053 | static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) |
@@ -1540,8 +1058,8 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) | |||
1540 | 1058 | ||
1541 | #ifdef DEBUG_NCQ | 1059 | #ifdef DEBUG_NCQ |
1542 | if (qc->tag > 0 || ap->link.sactive > 1) | 1060 | if (qc->tag > 0 || ap->link.sactive > 1) |
1543 | dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " | 1061 | dev_info(ap->dev, |
1544 | "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", | 1062 | "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", |
1545 | __func__, ap->print_id, qc->tf.command, | 1063 | __func__, ap->print_id, qc->tf.command, |
1546 | ata_get_cmd_descript(qc->tf.command), | 1064 | ata_get_cmd_descript(qc->tf.command), |
1547 | qc->tag, get_prot_descript(qc->tf.protocol), | 1065 | qc->tag, get_prot_descript(qc->tf.protocol), |
@@ -1557,9 +1075,9 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) | |||
1557 | sactive |= (0x00000001 << tag); | 1075 | sactive |= (0x00000001 << tag); |
1558 | core_scr_write(SCR_ACTIVE, sactive); | 1076 | core_scr_write(SCR_ACTIVE, sactive); |
1559 | 1077 | ||
1560 | dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " | 1078 | dev_dbg(qc->ap->dev, |
1561 | "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, | 1079 | "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", |
1562 | sactive); | 1080 | __func__, tag, qc->ap->link.sactive, sactive); |
1563 | 1081 | ||
1564 | ap->ops->sff_tf_load(ap, &qc->tf); | 1082 | ap->ops->sff_tf_load(ap, &qc->tf); |
1565 | sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, | 1083 | sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, |
@@ -1673,7 +1191,6 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1673 | struct ata_port_info pi = sata_dwc_port_info[0]; | 1191 | struct ata_port_info pi = sata_dwc_port_info[0]; |
1674 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 1192 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
1675 | struct device_node *np = ofdev->dev.of_node; | 1193 | struct device_node *np = ofdev->dev.of_node; |
1676 | u32 dma_chan; | ||
1677 | 1194 | ||
1678 | /* Allocate DWC SATA device */ | 1195 | /* Allocate DWC SATA device */ |
1679 | host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); | 1196 | host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); |
@@ -1683,18 +1200,11 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1683 | 1200 | ||
1684 | host->private_data = hsdev; | 1201 | host->private_data = hsdev; |
1685 | 1202 | ||
1686 | if (of_property_read_u32(np, "dma-channel", &dma_chan)) { | ||
1687 | dev_warn(&ofdev->dev, "no dma-channel property set." | ||
1688 | " Use channel 0\n"); | ||
1689 | dma_chan = 0; | ||
1690 | } | ||
1691 | host_pvt.dma_channel = dma_chan; | ||
1692 | |||
1693 | /* Ioremap SATA registers */ | 1203 | /* Ioremap SATA registers */ |
1694 | base = of_iomap(np, 0); | 1204 | base = of_iomap(np, 0); |
1695 | if (!base) { | 1205 | if (!base) { |
1696 | dev_err(&ofdev->dev, "ioremap failed for SATA register" | 1206 | dev_err(&ofdev->dev, |
1697 | " address\n"); | 1207 | "ioremap failed for SATA register address\n"); |
1698 | return -ENODEV; | 1208 | return -ENODEV; |
1699 | } | 1209 | } |
1700 | hsdev->reg_base = base; | 1210 | hsdev->reg_base = base; |
@@ -1716,27 +1226,29 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1716 | idr, ver[0], ver[1], ver[2]); | 1226 | idr, ver[0], ver[1], ver[2]); |
1717 | 1227 | ||
1718 | /* Get SATA DMA interrupt number */ | 1228 | /* Get SATA DMA interrupt number */ |
1719 | irq = irq_of_parse_and_map(np, 1); | 1229 | hsdev->dma->irq = irq_of_parse_and_map(np, 1); |
1720 | if (irq == NO_IRQ) { | 1230 | if (hsdev->dma->irq == NO_IRQ) { |
1721 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); | 1231 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); |
1722 | err = -ENODEV; | 1232 | err = -ENODEV; |
1723 | goto error_iomap; | 1233 | goto error_iomap; |
1724 | } | 1234 | } |
1725 | 1235 | ||
1726 | /* Get physical SATA DMA register base address */ | 1236 | /* Get physical SATA DMA register base address */ |
1727 | host_pvt.sata_dma_regs = (void *)of_iomap(np, 1); | 1237 | hsdev->dma->regs = of_iomap(np, 1); |
1728 | if (!(host_pvt.sata_dma_regs)) { | 1238 | if (!hsdev->dma->regs) { |
1729 | dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" | 1239 | dev_err(&ofdev->dev, |
1730 | " address\n"); | 1240 | "ioremap failed for AHBDMA register address\n"); |
1731 | err = -ENODEV; | 1241 | err = -ENODEV; |
1732 | goto error_iomap; | 1242 | goto error_iomap; |
1733 | } | 1243 | } |
1734 | 1244 | ||
1735 | /* Save dev for later use in dev_xxx() routines */ | 1245 | /* Save dev for later use in dev_xxx() routines */ |
1736 | host_pvt.dwc_dev = &ofdev->dev; | 1246 | hsdev->dev = &ofdev->dev; |
1247 | |||
1248 | hsdev->dma->dev = &ofdev->dev; | ||
1737 | 1249 | ||
1738 | /* Initialize AHB DMAC */ | 1250 | /* Initialize AHB DMAC */ |
1739 | err = dma_dwc_init(hsdev, irq); | 1251 | err = dw_dma_probe(hsdev->dma, NULL); |
1740 | if (err) | 1252 | if (err) |
1741 | goto error_dma_iomap; | 1253 | goto error_dma_iomap; |
1742 | 1254 | ||
@@ -1765,9 +1277,9 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1765 | 1277 | ||
1766 | error_out: | 1278 | error_out: |
1767 | /* Free SATA DMA resources */ | 1279 | /* Free SATA DMA resources */ |
1768 | dma_dwc_exit(hsdev); | 1280 | dw_dma_remove(hsdev->dma); |
1769 | error_dma_iomap: | 1281 | error_dma_iomap: |
1770 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | 1282 | iounmap(hsdev->dma->regs); |
1771 | error_iomap: | 1283 | error_iomap: |
1772 | iounmap(base); | 1284 | iounmap(base); |
1773 | return err; | 1285 | return err; |
@@ -1782,9 +1294,9 @@ static int sata_dwc_remove(struct platform_device *ofdev) | |||
1782 | ata_host_detach(host); | 1294 | ata_host_detach(host); |
1783 | 1295 | ||
1784 | /* Free SATA DMA resources */ | 1296 | /* Free SATA DMA resources */ |
1785 | dma_dwc_exit(hsdev); | 1297 | dw_dma_remove(hsdev->dma); |
1786 | 1298 | ||
1787 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | 1299 | iounmap(hsdev->dma->regs); |
1788 | iounmap(hsdev->reg_base); | 1300 | iounmap(hsdev->reg_base); |
1789 | dev_dbg(&ofdev->dev, "done\n"); | 1301 | dev_dbg(&ofdev->dev, "done\n"); |
1790 | return 0; | 1302 | return 0; |
@@ -1809,5 +1321,5 @@ module_platform_driver(sata_dwc_driver); | |||
1809 | 1321 | ||
1810 | MODULE_LICENSE("GPL"); | 1322 | MODULE_LICENSE("GPL"); |
1811 | MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); | 1323 | MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); |
1812 | MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver"); | 1324 | MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); |
1813 | MODULE_VERSION(DRV_VERSION); | 1325 | MODULE_VERSION(DRV_VERSION); |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 069827826b20..e81a8217f1ff 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -856,13 +856,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
856 | } | 856 | } |
857 | 857 | ||
858 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ | 858 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ |
859 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 859 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
860 | if (rc) { | 860 | if (rc) { |
861 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 861 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
862 | return rc; | 862 | return rc; |
863 | } | 863 | } |
864 | 864 | ||
865 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 865 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
866 | if (rc) { | 866 | if (rc) { |
867 | dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); | 867 | dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); |
868 | return rc; | 868 | return rc; |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f8c33e3772b8..bd74ee555278 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -306,6 +306,11 @@ enum { | |||
306 | MV5_PHY_CTL = 0x0C, | 306 | MV5_PHY_CTL = 0x0C, |
307 | SATA_IFCFG = 0x050, | 307 | SATA_IFCFG = 0x050, |
308 | LP_PHY_CTL = 0x058, | 308 | LP_PHY_CTL = 0x058, |
309 | LP_PHY_CTL_PIN_PU_PLL = (1 << 0), | ||
310 | LP_PHY_CTL_PIN_PU_RX = (1 << 1), | ||
311 | LP_PHY_CTL_PIN_PU_TX = (1 << 2), | ||
312 | LP_PHY_CTL_GEN_TX_3G = (1 << 5), | ||
313 | LP_PHY_CTL_GEN_RX_3G = (1 << 9), | ||
309 | 314 | ||
310 | MV_M2_PREAMP_MASK = 0x7e0, | 315 | MV_M2_PREAMP_MASK = 0x7e0, |
311 | 316 | ||
@@ -1391,10 +1396,17 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) | |||
1391 | /* | 1396 | /* |
1392 | * Set PHY speed according to SControl speed. | 1397 | * Set PHY speed according to SControl speed. |
1393 | */ | 1398 | */ |
1394 | if ((val & 0xf0) == 0x10) | 1399 | u32 lp_phy_val = |
1395 | writelfl(0x7, lp_phy_addr); | 1400 | LP_PHY_CTL_PIN_PU_PLL | |
1396 | else | 1401 | LP_PHY_CTL_PIN_PU_RX | |
1397 | writelfl(0x227, lp_phy_addr); | 1402 | LP_PHY_CTL_PIN_PU_TX; |
1403 | |||
1404 | if ((val & 0xf0) != 0x10) | ||
1405 | lp_phy_val |= | ||
1406 | LP_PHY_CTL_GEN_TX_3G | | ||
1407 | LP_PHY_CTL_GEN_RX_3G; | ||
1408 | |||
1409 | writelfl(lp_phy_val, lp_phy_addr); | ||
1398 | } | 1410 | } |
1399 | } | 1411 | } |
1400 | writelfl(val, addr); | 1412 | writelfl(val, addr); |
@@ -4308,10 +4320,10 @@ static int pci_go_64(struct pci_dev *pdev) | |||
4308 | { | 4320 | { |
4309 | int rc; | 4321 | int rc; |
4310 | 4322 | ||
4311 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 4323 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
4312 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 4324 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
4313 | if (rc) { | 4325 | if (rc) { |
4314 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 4326 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
4315 | if (rc) { | 4327 | if (rc) { |
4316 | dev_err(&pdev->dev, | 4328 | dev_err(&pdev->dev, |
4317 | "64-bit DMA enable failed\n"); | 4329 | "64-bit DMA enable failed\n"); |
@@ -4319,12 +4331,12 @@ static int pci_go_64(struct pci_dev *pdev) | |||
4319 | } | 4331 | } |
4320 | } | 4332 | } |
4321 | } else { | 4333 | } else { |
4322 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 4334 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
4323 | if (rc) { | 4335 | if (rc) { |
4324 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 4336 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
4325 | return rc; | 4337 | return rc; |
4326 | } | 4338 | } |
4327 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 4339 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
4328 | if (rc) { | 4340 | if (rc) { |
4329 | dev_err(&pdev->dev, | 4341 | dev_err(&pdev->dev, |
4330 | "32-bit consistent DMA enable failed\n"); | 4342 | "32-bit consistent DMA enable failed\n"); |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 1db6f5ce5e89..7ece85f43020 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -756,10 +756,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
756 | blk_queue_bounce_limit(sdev1->request_queue, | 756 | blk_queue_bounce_limit(sdev1->request_queue, |
757 | ATA_DMA_MASK); | 757 | ATA_DMA_MASK); |
758 | 758 | ||
759 | pci_set_dma_mask(pdev, ATA_DMA_MASK); | 759 | dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
760 | } else { | 760 | } else { |
761 | /** This shouldn't fail as it was set to this value before */ | 761 | /** This shouldn't fail as it was set to this value before */ |
762 | pci_set_dma_mask(pdev, pp->adma_dma_mask); | 762 | dma_set_mask(&pdev->dev, pp->adma_dma_mask); |
763 | if (sdev0) | 763 | if (sdev0) |
764 | blk_queue_bounce_limit(sdev0->request_queue, | 764 | blk_queue_bounce_limit(sdev0->request_queue, |
765 | pp->adma_dma_mask); | 765 | pp->adma_dma_mask); |
@@ -1133,10 +1133,10 @@ static int nv_adma_port_start(struct ata_port *ap) | |||
1133 | 1133 | ||
1134 | /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and | 1134 | /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and |
1135 | pad buffers */ | 1135 | pad buffers */ |
1136 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 1136 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
1137 | if (rc) | 1137 | if (rc) |
1138 | return rc; | 1138 | return rc; |
1139 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 1139 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
1140 | if (rc) | 1140 | if (rc) |
1141 | return rc; | 1141 | return rc; |
1142 | 1142 | ||
@@ -1161,8 +1161,8 @@ static int nv_adma_port_start(struct ata_port *ap) | |||
1161 | These are allowed to fail since we store the value that ends up | 1161 | These are allowed to fail since we store the value that ends up |
1162 | being used to set as the bounce limit in slave_config later if | 1162 | being used to set as the bounce limit in slave_config later if |
1163 | needed. */ | 1163 | needed. */ |
1164 | pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 1164 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
1165 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 1165 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
1166 | pp->adma_dma_mask = *dev->dma_mask; | 1166 | pp->adma_dma_mask = *dev->dma_mask; |
1167 | 1167 | ||
1168 | mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, | 1168 | mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 3638887476f6..0fa211e2831c 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -1246,10 +1246,10 @@ static int pdc_ata_init_one(struct pci_dev *pdev, | |||
1246 | /* initialize adapter */ | 1246 | /* initialize adapter */ |
1247 | pdc_host_init(host); | 1247 | pdc_host_init(host); |
1248 | 1248 | ||
1249 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 1249 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
1250 | if (rc) | 1250 | if (rc) |
1251 | return rc; | 1251 | return rc; |
1252 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 1252 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
1253 | if (rc) | 1253 | if (rc) |
1254 | return rc; | 1254 | return rc; |
1255 | 1255 | ||
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 9a6bd4cd29a0..af987a4f33d1 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
@@ -557,10 +557,10 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) | |||
557 | int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); | 557 | int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); |
558 | 558 | ||
559 | if (have_64bit_bus && | 559 | if (have_64bit_bus && |
560 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 560 | !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
561 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 561 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
562 | if (rc) { | 562 | if (rc) { |
563 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 563 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
564 | if (rc) { | 564 | if (rc) { |
565 | dev_err(&pdev->dev, | 565 | dev_err(&pdev->dev, |
566 | "64-bit DMA enable failed\n"); | 566 | "64-bit DMA enable failed\n"); |
@@ -568,12 +568,12 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) | |||
568 | } | 568 | } |
569 | } | 569 | } |
570 | } else { | 570 | } else { |
571 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 571 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
572 | if (rc) { | 572 | if (rc) { |
573 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 573 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
574 | return rc; | 574 | return rc; |
575 | } | 575 | } |
576 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 576 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
577 | if (rc) { | 577 | if (rc) { |
578 | dev_err(&pdev->dev, | 578 | dev_err(&pdev->dev, |
579 | "32-bit consistent DMA enable failed\n"); | 579 | "32-bit consistent DMA enable failed\n"); |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 40b76b2d18c6..dea6edcbf145 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -770,10 +770,10 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
770 | return rc; | 770 | return rc; |
771 | host->iomap = pcim_iomap_table(pdev); | 771 | host->iomap = pcim_iomap_table(pdev); |
772 | 772 | ||
773 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 773 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
774 | if (rc) | 774 | if (rc) |
775 | return rc; | 775 | return rc; |
776 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 776 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
777 | if (rc) | 777 | if (rc) |
778 | return rc; | 778 | return rc; |
779 | 779 | ||
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index ba2667fa0528..4b1995e2d044 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -246,7 +246,7 @@ enum { | |||
246 | /* host flags */ | 246 | /* host flags */ |
247 | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | | 247 | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | |
248 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | | 248 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | |
249 | ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, | 249 | ATA_FLAG_AN | ATA_FLAG_PMP, |
250 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ | 250 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ |
251 | 251 | ||
252 | IRQ_STAT_4PORTS = 0xf, | 252 | IRQ_STAT_4PORTS = 0xf, |
@@ -1312,10 +1312,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1312 | host->iomap = iomap; | 1312 | host->iomap = iomap; |
1313 | 1313 | ||
1314 | /* configure and activate the device */ | 1314 | /* configure and activate the device */ |
1315 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 1315 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
1316 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 1316 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
1317 | if (rc) { | 1317 | if (rc) { |
1318 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 1318 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
1319 | if (rc) { | 1319 | if (rc) { |
1320 | dev_err(&pdev->dev, | 1320 | dev_err(&pdev->dev, |
1321 | "64-bit DMA enable failed\n"); | 1321 | "64-bit DMA enable failed\n"); |
@@ -1323,12 +1323,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1323 | } | 1323 | } |
1324 | } | 1324 | } |
1325 | } else { | 1325 | } else { |
1326 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 1326 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
1327 | if (rc) { | 1327 | if (rc) { |
1328 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); | 1328 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
1329 | return rc; | 1329 | return rc; |
1330 | } | 1330 | } |
1331 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 1331 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
1332 | if (rc) { | 1332 | if (rc) { |
1333 | dev_err(&pdev->dev, | 1333 | dev_err(&pdev->dev, |
1334 | "32-bit consistent DMA enable failed\n"); | 1334 | "32-bit consistent DMA enable failed\n"); |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index c630fa812624..ff8307b30ff0 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -496,10 +496,10 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
496 | ata_port_pbar_desc(ap, 5, offset, "port"); | 496 | ata_port_pbar_desc(ap, 5, offset, "port"); |
497 | } | 497 | } |
498 | 498 | ||
499 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 499 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
500 | if (rc) | 500 | if (rc) |
501 | return rc; | 501 | return rc; |
502 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 502 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
503 | if (rc) | 503 | if (rc) |
504 | return rc; | 504 | return rc; |
505 | 505 | ||
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 39b5de60a1f9..3a18a8a719b4 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c | |||
@@ -1476,10 +1476,10 @@ static int pdc_sata_init_one(struct pci_dev *pdev, | |||
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | /* configure and activate */ | 1478 | /* configure and activate */ |
1479 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 1479 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
1480 | if (rc) | 1480 | if (rc) |
1481 | return rc; | 1481 | return rc; |
1482 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 1482 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
1483 | if (rc) | 1483 | if (rc) |
1484 | return rc; | 1484 | return rc; |
1485 | 1485 | ||
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 47bf89464cef..17d31fc009ab 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -502,10 +502,10 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
502 | for (i = 0; i < host->n_ports; i++) | 502 | for (i = 0; i < host->n_ports; i++) |
503 | vt6421_init_addrs(host->ports[i]); | 503 | vt6421_init_addrs(host->ports[i]); |
504 | 504 | ||
505 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 505 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); |
506 | if (rc) | 506 | if (rc) |
507 | return rc; | 507 | return rc; |
508 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 508 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); |
509 | if (rc) | 509 | if (rc) |
510 | return rc; | 510 | return rc; |
511 | 511 | ||
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 29e847aac34b..183eb52085df 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c | |||
@@ -387,10 +387,10 @@ static int vsc_sata_init_one(struct pci_dev *pdev, | |||
387 | /* | 387 | /* |
388 | * Use 32 bit DMA mask, because 64 bit address support is poor. | 388 | * Use 32 bit DMA mask, because 64 bit address support is poor. |
389 | */ | 389 | */ |
390 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 390 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
391 | if (rc) | 391 | if (rc) |
392 | return rc; | 392 | return rc; |
393 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 393 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
394 | if (rc) | 394 | if (rc) |
395 | return rc; | 395 | return rc; |
396 | 396 | ||