aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2010-05-01 18:22:56 -0400
committerDan Williams <dan.j.williams@intel.com>2010-05-01 18:22:56 -0400
commit2adfc550b6d9646301c810643bc309fa49375987 (patch)
tree1e1fe80772dc588be2dc294d8b9f371caec3f0b0 /drivers/dma
parent074cc47679f8b0931d7d5384e95822d82768f149 (diff)
ioat3: disable cacheline-unaligned transfers for raid operations
There are cases where cacheline-unaligned raid operations can hang the dma channel. Simply disable these operations by increasing the alignment constraints published to async_tx. The raid456 driver always issues page aligned requests, so the only in-kernel user of the ioatdma driver that is affected by this change is dmatest. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat/dma_v3.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 8b573fac2a25..e2a23952172d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1175,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1175 if (cap & IOAT_CAP_XOR) { 1175 if (cap & IOAT_CAP_XOR) {
1176 is_raid_device = true; 1176 is_raid_device = true;
1177 dma->max_xor = 8; 1177 dma->max_xor = 8;
1178 dma->xor_align = 2; 1178 dma->xor_align = 6;
1179 1179
1180 dma_cap_set(DMA_XOR, dma->cap_mask); 1180 dma_cap_set(DMA_XOR, dma->cap_mask);
1181 dma->device_prep_dma_xor = ioat3_prep_xor; 1181 dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1186,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1186 if (cap & IOAT_CAP_PQ) { 1186 if (cap & IOAT_CAP_PQ) {
1187 is_raid_device = true; 1187 is_raid_device = true;
1188 dma_set_maxpq(dma, 8, 0); 1188 dma_set_maxpq(dma, 8, 0);
1189 dma->pq_align = 2; 1189 dma->pq_align = 6;
1190 1190
1191 dma_cap_set(DMA_PQ, dma->cap_mask); 1191 dma_cap_set(DMA_PQ, dma->cap_mask);
1192 dma->device_prep_dma_pq = ioat3_prep_pq; 1192 dma->device_prep_dma_pq = ioat3_prep_pq;
@@ -1196,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1196 1196
1197 if (!(cap & IOAT_CAP_XOR)) { 1197 if (!(cap & IOAT_CAP_XOR)) {
1198 dma->max_xor = 8; 1198 dma->max_xor = 8;
1199 dma->xor_align = 2; 1199 dma->xor_align = 6;
1200 1200
1201 dma_cap_set(DMA_XOR, dma->cap_mask); 1201 dma_cap_set(DMA_XOR, dma->cap_mask);
1202 dma->device_prep_dma_xor = ioat3_prep_pqxor; 1202 dma->device_prep_dma_xor = ioat3_prep_pqxor;