aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
authorPrarit Bhargava <prarit@redhat.com>2014-10-23 07:38:29 -0400
committerVinod Koul <vinod.koul@intel.com>2014-12-09 04:22:05 -0500
commit4ff2fd839ce86b1e2b1fed543886930a493de588 (patch)
treebe622a66a211ecb79f0ac51535ab76fc89badf1f /drivers/dma/ioat
parent754416e10beb067e0bb473e00bf210c6f268e666 (diff)
dmaengine: ioatdma: fix dma mapping errors
Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma_v3.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 895f869d6c2c..32eae38291e5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1265 op = IOAT_OP_XOR; 1265 op = IOAT_OP_XOR;
1266 1266
1267 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1267 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1268 if (dma_mapping_error(dev, dest_dma))
1269 goto dma_unmap;
1270
1268 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1271 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1272 dma_srcs[i] = DMA_ERROR_CODE;
1273 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
1269 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 1274 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1270 DMA_TO_DEVICE); 1275 DMA_TO_DEVICE);
1276 if (dma_mapping_error(dev, dma_srcs[i]))
1277 goto dma_unmap;
1278 }
1271 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1279 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1272 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1280 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1273 DMA_PREP_INTERRUPT); 1281 DMA_PREP_INTERRUPT);
@@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1298 goto dma_unmap; 1306 goto dma_unmap;
1299 } 1307 }
1300 1308
1301 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1302 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1309 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1303 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1310 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1304 1311
@@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1313 } 1320 }
1314 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1321 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1315 1322
1323 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1324
1316 /* skip validate if the capability is not present */ 1325 /* skip validate if the capability is not present */
1317 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1326 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1318 goto free_resources; 1327 goto free_resources;
@@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1327 xor_val_result = 1; 1336 xor_val_result = 1;
1328 1337
1329 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1338 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1339 dma_srcs[i] = DMA_ERROR_CODE;
1340 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1330 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1341 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1331 DMA_TO_DEVICE); 1342 DMA_TO_DEVICE);
1343 if (dma_mapping_error(dev, dma_srcs[i]))
1344 goto dma_unmap;
1345 }
1332 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1346 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1333 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1347 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1334 &xor_val_result, DMA_PREP_INTERRUPT); 1348 &xor_val_result, DMA_PREP_INTERRUPT);
@@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1374 1388
1375 xor_val_result = 0; 1389 xor_val_result = 0;
1376 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1390 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1391 dma_srcs[i] = DMA_ERROR_CODE;
1392 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1377 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1393 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1378 DMA_TO_DEVICE); 1394 DMA_TO_DEVICE);
1395 if (dma_mapping_error(dev, dma_srcs[i]))
1396 goto dma_unmap;
1397 }
1379 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1398 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1380 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1399 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1381 &xor_val_result, DMA_PREP_INTERRUPT); 1400 &xor_val_result, DMA_PREP_INTERRUPT);
@@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1417 goto free_resources; 1436 goto free_resources;
1418dma_unmap: 1437dma_unmap:
1419 if (op == IOAT_OP_XOR) { 1438 if (op == IOAT_OP_XOR) {
1420 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1439 if (dest_dma != DMA_ERROR_CODE)
1440 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1441 DMA_FROM_DEVICE);
1421 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1442 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1422 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1443 if (dma_srcs[i] != DMA_ERROR_CODE)
1423 DMA_TO_DEVICE); 1444 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1445 DMA_TO_DEVICE);
1424 } else if (op == IOAT_OP_XOR_VAL) { 1446 } else if (op == IOAT_OP_XOR_VAL) {
1425 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1447 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1426 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1448 if (dma_srcs[i] != DMA_ERROR_CODE)
1427 DMA_TO_DEVICE); 1449 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1450 DMA_TO_DEVICE);
1428 } 1451 }
1429free_resources: 1452free_resources:
1430 dma->device_free_chan_resources(dma_chan); 1453 dma->device_free_chan_resources(dma_chan);