aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2013-03-26 18:43:15 -0400
committerVinod Koul <vinod.koul@intel.com>2013-04-15 00:21:20 -0400
commiteceec44ecd7f3285468a684e7216df2316b178f3 (patch)
tree9607c250ac6d2a9415877f768fc427c40de8f453 /drivers/dma
parent3f09ede4237fe4691ac687c6c43cb4c1a530777b (diff)
ioatdma: skip silicon bug workaround for pq_align for cb3.3
The alignment workaround is only necessary for cb3.2 or earlier platforms. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Dan Williams <djbw@fb.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat/dma_v3.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 9628ba2ff70c..cf97e3f16924 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1521,10 +1521,14 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1521 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1521 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1522 dma->device_prep_dma_xor_val = ioat3_prep_xor_val; 1522 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1523 } 1523 }
1524
1524 if (cap & IOAT_CAP_PQ) { 1525 if (cap & IOAT_CAP_PQ) {
1525 is_raid_device = true; 1526 is_raid_device = true;
1526 dma_set_maxpq(dma, 8, 0); 1527 dma_set_maxpq(dma, 8, 0);
1527 dma->pq_align = 6; 1528 if (is_xeon_cb32(pdev))
1529 dma->pq_align = 6;
1530 else
1531 dma->pq_align = 0;
1528 1532
1529 dma_cap_set(DMA_PQ, dma->cap_mask); 1533 dma_cap_set(DMA_PQ, dma->cap_mask);
1530 dma->device_prep_dma_pq = ioat3_prep_pq; 1534 dma->device_prep_dma_pq = ioat3_prep_pq;
@@ -1534,7 +1538,10 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1534 1538
1535 if (!(cap & IOAT_CAP_XOR)) { 1539 if (!(cap & IOAT_CAP_XOR)) {
1536 dma->max_xor = 8; 1540 dma->max_xor = 8;
1537 dma->xor_align = 6; 1541 if (is_xeon_cb32(pdev))
1542 dma->xor_align = 6;
1543 else
1544 dma->xor_align = 0;
1538 1545
1539 dma_cap_set(DMA_XOR, dma->cap_mask); 1546 dma_cap_set(DMA_XOR, dma->cap_mask);
1540 dma->device_prep_dma_xor = ioat3_prep_pqxor; 1547 dma->device_prep_dma_xor = ioat3_prep_pqxor;
@@ -1543,6 +1550,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1543 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; 1550 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1544 } 1551 }
1545 } 1552 }
1553
1546 if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { 1554 if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
1547 dma_cap_set(DMA_MEMSET, dma->cap_mask); 1555 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1548 dma->device_prep_dma_memset = ioat3_prep_memset_lock; 1556 dma->device_prep_dma_memset = ioat3_prep_memset_lock;