aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrice Goglin <Brice.Goglin@inria.fr>2013-08-02 15:18:03 -0400
committerDan Williams <djbw@fb.com>2013-08-23 01:57:39 -0400
commitc4dcf0e2dd7e06db0c5c3f396b2e2b9ce1f6d19f (patch)
tree8a3d52af6e05bfe2703188a71c8c01b9bc4ba95f
parente03bc654f85604bcd5304debb597f398d1d03778 (diff)
ioatdma: disable RAID on non-Atom platforms and reenable unaligned copies
Disable RAID on non-Atom platform and remove related fixups such as the 64-byte alignement restriction on legacy DMA operations (introduced in commit f26df1a1 as a workaround for silicon errata). Signed-off-by: Brice Goglin <Brice.Goglin@inria.fr> Acked-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Jon Mason <jon.mason@intel.com> Signed-off-by: Dan Williams <djbw@fb.com>
-rw-r--r--drivers/dma/ioat/dma_v3.c24
1 files changed, 1 insertions, 23 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e035579b..c94e0d210667 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1775 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1775 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1776 dma->device_free_chan_resources = ioat2_free_chan_resources; 1776 dma->device_free_chan_resources = ioat2_free_chan_resources;
1777 1777
1778 if (is_xeon_cb32(pdev))
1779 dma->copy_align = 6;
1780
1781 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1778 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1782 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1779 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1783 1780
1784 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1781 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1785 1782
1786 if (is_bwd_noraid(pdev)) 1783 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1787 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); 1784 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1788 1785
1789 /* dca is incompatible with raid operations */ 1786 /* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1793 if (device->cap & IOAT_CAP_XOR) { 1790 if (device->cap & IOAT_CAP_XOR) {
1794 is_raid_device = true; 1791 is_raid_device = true;
1795 dma->max_xor = 8; 1792 dma->max_xor = 8;
1796 dma->xor_align = 6;
1797 1793
1798 dma_cap_set(DMA_XOR, dma->cap_mask); 1794 dma_cap_set(DMA_XOR, dma->cap_mask);
1799 dma->device_prep_dma_xor = ioat3_prep_xor; 1795 dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1812 1808
1813 if (device->cap & IOAT_CAP_RAID16SS) { 1809 if (device->cap & IOAT_CAP_RAID16SS) {
1814 dma_set_maxpq(dma, 16, 0); 1810 dma_set_maxpq(dma, 16, 0);
1815 dma->pq_align = 0;
1816 } else { 1811 } else {
1817 dma_set_maxpq(dma, 8, 0); 1812 dma_set_maxpq(dma, 8, 0);
1818 if (is_xeon_cb32(pdev))
1819 dma->pq_align = 6;
1820 else
1821 dma->pq_align = 0;
1822 } 1813 }
1823 1814
1824 if (!(device->cap & IOAT_CAP_XOR)) { 1815 if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1829 1820
1830 if (device->cap & IOAT_CAP_RAID16SS) { 1821 if (device->cap & IOAT_CAP_RAID16SS) {
1831 dma->max_xor = 16; 1822 dma->max_xor = 16;
1832 dma->xor_align = 0;
1833 } else { 1823 } else {
1834 dma->max_xor = 8; 1824 dma->max_xor = 8;
1835 if (is_xeon_cb32(pdev))
1836 dma->xor_align = 6;
1837 else
1838 dma->xor_align = 0;
1839 } 1825 }
1840 } 1826 }
1841 } 1827 }
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1844 device->cleanup_fn = ioat3_cleanup_event; 1830 device->cleanup_fn = ioat3_cleanup_event;
1845 device->timer_fn = ioat3_timer_event; 1831 device->timer_fn = ioat3_timer_event;
1846 1832
1847 if (is_xeon_cb32(pdev)) {
1848 dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
1849 dma->device_prep_dma_xor_val = NULL;
1850
1851 dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
1852 dma->device_prep_dma_pq_val = NULL;
1853 }
1854
1855 /* starting with CB3.3 super extended descriptors are supported */ 1833 /* starting with CB3.3 super extended descriptors are supported */
1856 if (device->cap & IOAT_CAP_RAID16SS) { 1834 if (device->cap & IOAT_CAP_RAID16SS) {
1857 char pool_name[14]; 1835 char pool_name[14];