diff options
Diffstat (limited to 'drivers/dma/ioat/dma_v3.c')
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 35 |
1 files changed, 29 insertions, 6 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 895f869d6c2c..32eae38291e5 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1265 | op = IOAT_OP_XOR; | 1265 | op = IOAT_OP_XOR; |
1266 | 1266 | ||
1267 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 1267 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
1268 | if (dma_mapping_error(dev, dest_dma)) | ||
1269 | goto dma_unmap; | ||
1270 | |||
1268 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 1271 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
1272 | dma_srcs[i] = DMA_ERROR_CODE; | ||
1273 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | ||
1269 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | 1274 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, |
1270 | DMA_TO_DEVICE); | 1275 | DMA_TO_DEVICE); |
1276 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
1277 | goto dma_unmap; | ||
1278 | } | ||
1271 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 1279 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
1272 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | 1280 | IOAT_NUM_SRC_TEST, PAGE_SIZE, |
1273 | DMA_PREP_INTERRUPT); | 1281 | DMA_PREP_INTERRUPT); |
@@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1298 | goto dma_unmap; | 1306 | goto dma_unmap; |
1299 | } | 1307 | } |
1300 | 1308 | ||
1301 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1302 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 1309 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
1303 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | 1310 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); |
1304 | 1311 | ||
@@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1313 | } | 1320 | } |
1314 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | 1321 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); |
1315 | 1322 | ||
1323 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1324 | |||
1316 | /* skip validate if the capability is not present */ | 1325 | /* skip validate if the capability is not present */ |
1317 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | 1326 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) |
1318 | goto free_resources; | 1327 | goto free_resources; |
@@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1327 | xor_val_result = 1; | 1336 | xor_val_result = 1; |
1328 | 1337 | ||
1329 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1338 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1339 | dma_srcs[i] = DMA_ERROR_CODE; | ||
1340 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | ||
1330 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 1341 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
1331 | DMA_TO_DEVICE); | 1342 | DMA_TO_DEVICE); |
1343 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
1344 | goto dma_unmap; | ||
1345 | } | ||
1332 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1346 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
1333 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1347 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
1334 | &xor_val_result, DMA_PREP_INTERRUPT); | 1348 | &xor_val_result, DMA_PREP_INTERRUPT); |
@@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1374 | 1388 | ||
1375 | xor_val_result = 0; | 1389 | xor_val_result = 0; |
1376 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1390 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1391 | dma_srcs[i] = DMA_ERROR_CODE; | ||
1392 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | ||
1377 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 1393 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
1378 | DMA_TO_DEVICE); | 1394 | DMA_TO_DEVICE); |
1395 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
1396 | goto dma_unmap; | ||
1397 | } | ||
1379 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1398 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
1380 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1399 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
1381 | &xor_val_result, DMA_PREP_INTERRUPT); | 1400 | &xor_val_result, DMA_PREP_INTERRUPT); |
@@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1417 | goto free_resources; | 1436 | goto free_resources; |
1418 | dma_unmap: | 1437 | dma_unmap: |
1419 | if (op == IOAT_OP_XOR) { | 1438 | if (op == IOAT_OP_XOR) { |
1420 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | 1439 | if (dest_dma != DMA_ERROR_CODE) |
1440 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, | ||
1441 | DMA_FROM_DEVICE); | ||
1421 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 1442 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
1422 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | 1443 | if (dma_srcs[i] != DMA_ERROR_CODE) |
1423 | DMA_TO_DEVICE); | 1444 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, |
1445 | DMA_TO_DEVICE); | ||
1424 | } else if (op == IOAT_OP_XOR_VAL) { | 1446 | } else if (op == IOAT_OP_XOR_VAL) { |
1425 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1447 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1426 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | 1448 | if (dma_srcs[i] != DMA_ERROR_CODE) |
1427 | DMA_TO_DEVICE); | 1449 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, |
1450 | DMA_TO_DEVICE); | ||
1428 | } | 1451 | } |
1429 | free_resources: | 1452 | free_resources: |
1430 | dma->device_free_chan_resources(dma_chan); | 1453 | dma->device_free_chan_resources(dma_chan); |