diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-08-29 22:12:40 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-08-29 22:12:40 -0400 |
commit | f6dbf651615900646fe0ba1ef5ce1027e5b4748d (patch) | |
tree | a78d096174765ce893dddfd6fed9e5e92d45aaaa /drivers/dma | |
parent | 7bf649aee8ac93ecc280f8745dcf8ec19d7b9fb1 (diff) |
iop-adma: P+Q self test
Even though the intent is to extend dmatest with P+Q tests there is
still value in having an always-on sanity check to prevent an
unintentionally broken driver from registering.
This depends on raid6_pq.ko for verification, the side effect being that
PQ capable channels will fail to register when raid6 is disabled.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/iop-adma.c | 182 |
1 files changed, 181 insertions, 1 deletions
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 5a0f4fe2ee6e..f4c59e59f6cb 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/memory.h> | 32 | #include <linux/memory.h> |
33 | #include <linux/ioport.h> | 33 | #include <linux/ioport.h> |
34 | #include <linux/raid/pq.h> | ||
34 | 35 | ||
35 | #include <mach/adma.h> | 36 | #include <mach/adma.h> |
36 | 37 | ||
@@ -1267,6 +1268,170 @@ out: | |||
1267 | return err; | 1268 | return err; |
1268 | } | 1269 | } |
1269 | 1270 | ||
1271 | #ifdef CONFIG_MD_RAID6_PQ | ||
1272 | static int __devinit | ||
1273 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | ||
1274 | { | ||
1275 | /* combined sources, software pq results, and extra hw pq results */ | ||
1276 | struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; | ||
1277 | /* ptr to the extra hw pq buffers defined above */ | ||
1278 | struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; | ||
1279 | /* address conversion buffers (dma_map / page_address) */ | ||
1280 | void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; | ||
1281 | dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; | ||
1282 | dma_addr_t pq_dest[2]; | ||
1283 | |||
1284 | int i; | ||
1285 | struct dma_async_tx_descriptor *tx; | ||
1286 | struct dma_chan *dma_chan; | ||
1287 | dma_cookie_t cookie; | ||
1288 | u32 zero_sum_result; | ||
1289 | int err = 0; | ||
1290 | struct device *dev; | ||
1291 | |||
1292 | dev_dbg(device->common.dev, "%s\n", __func__); | ||
1293 | |||
1294 | for (i = 0; i < ARRAY_SIZE(pq); i++) { | ||
1295 | pq[i] = alloc_page(GFP_KERNEL); | ||
1296 | if (!pq[i]) { | ||
1297 | while (i--) | ||
1298 | __free_page(pq[i]); | ||
1299 | return -ENOMEM; | ||
1300 | } | ||
1301 | } | ||
1302 | |||
1303 | /* Fill in src buffers */ | ||
1304 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { | ||
1305 | pq_sw[i] = page_address(pq[i]); | ||
1306 | memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE); | ||
1307 | } | ||
1308 | pq_sw[i] = page_address(pq[i]); | ||
1309 | pq_sw[i+1] = page_address(pq[i+1]); | ||
1310 | |||
1311 | dma_chan = container_of(device->common.channels.next, | ||
1312 | struct dma_chan, | ||
1313 | device_node); | ||
1314 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { | ||
1315 | err = -ENODEV; | ||
1316 | goto out; | ||
1317 | } | ||
1318 | |||
1319 | dev = dma_chan->device->dev; | ||
1320 | |||
1321 | /* initialize the dests */ | ||
1322 | memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); | ||
1323 | memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); | ||
1324 | |||
1325 | /* test pq */ | ||
1326 | pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1327 | pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1328 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) | ||
1329 | pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, | ||
1330 | DMA_TO_DEVICE); | ||
1331 | |||
1332 | tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, | ||
1333 | IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, | ||
1334 | PAGE_SIZE, | ||
1335 | DMA_PREP_INTERRUPT | | ||
1336 | DMA_CTRL_ACK); | ||
1337 | |||
1338 | cookie = iop_adma_tx_submit(tx); | ||
1339 | iop_adma_issue_pending(dma_chan); | ||
1340 | msleep(8); | ||
1341 | |||
1342 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1343 | DMA_SUCCESS) { | ||
1344 | dev_err(dev, "Self-test pq timed out, disabling\n"); | ||
1345 | err = -ENODEV; | ||
1346 | goto free_resources; | ||
1347 | } | ||
1348 | |||
1349 | raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); | ||
1350 | |||
1351 | if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], | ||
1352 | page_address(pq_hw[0]), PAGE_SIZE) != 0) { | ||
1353 | dev_err(dev, "Self-test p failed compare, disabling\n"); | ||
1354 | err = -ENODEV; | ||
1355 | goto free_resources; | ||
1356 | } | ||
1357 | if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], | ||
1358 | page_address(pq_hw[1]), PAGE_SIZE) != 0) { | ||
1359 | dev_err(dev, "Self-test q failed compare, disabling\n"); | ||
1360 | err = -ENODEV; | ||
1361 | goto free_resources; | ||
1362 | } | ||
1363 | |||
1364 | /* test correct zero sum using the software generated pq values */ | ||
1365 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) | ||
1366 | pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, | ||
1367 | DMA_TO_DEVICE); | ||
1368 | |||
1369 | zero_sum_result = ~0; | ||
1370 | tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], | ||
1371 | pq_src, IOP_ADMA_NUM_SRC_TEST, | ||
1372 | raid6_gfexp, PAGE_SIZE, &zero_sum_result, | ||
1373 | DMA_PREP_INTERRUPT|DMA_CTRL_ACK); | ||
1374 | |||
1375 | cookie = iop_adma_tx_submit(tx); | ||
1376 | iop_adma_issue_pending(dma_chan); | ||
1377 | msleep(8); | ||
1378 | |||
1379 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1380 | DMA_SUCCESS) { | ||
1381 | dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); | ||
1382 | err = -ENODEV; | ||
1383 | goto free_resources; | ||
1384 | } | ||
1385 | |||
1386 | if (zero_sum_result != 0) { | ||
1387 | dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", | ||
1388 | zero_sum_result); | ||
1389 | err = -ENODEV; | ||
1390 | goto free_resources; | ||
1391 | } | ||
1392 | |||
1393 | /* test incorrect zero sum */ | ||
1394 | i = IOP_ADMA_NUM_SRC_TEST; | ||
1395 | memset(pq_sw[i] + 100, 0, 100); | ||
1396 | memset(pq_sw[i+1] + 200, 0, 200); | ||
1397 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) | ||
1398 | pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, | ||
1399 | DMA_TO_DEVICE); | ||
1400 | |||
1401 | zero_sum_result = 0; | ||
1402 | tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], | ||
1403 | pq_src, IOP_ADMA_NUM_SRC_TEST, | ||
1404 | raid6_gfexp, PAGE_SIZE, &zero_sum_result, | ||
1405 | DMA_PREP_INTERRUPT|DMA_CTRL_ACK); | ||
1406 | |||
1407 | cookie = iop_adma_tx_submit(tx); | ||
1408 | iop_adma_issue_pending(dma_chan); | ||
1409 | msleep(8); | ||
1410 | |||
1411 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1412 | DMA_SUCCESS) { | ||
1413 | dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); | ||
1414 | err = -ENODEV; | ||
1415 | goto free_resources; | ||
1416 | } | ||
1417 | |||
1418 | if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { | ||
1419 | dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", | ||
1420 | zero_sum_result); | ||
1421 | err = -ENODEV; | ||
1422 | goto free_resources; | ||
1423 | } | ||
1424 | |||
1425 | free_resources: | ||
1426 | iop_adma_free_chan_resources(dma_chan); | ||
1427 | out: | ||
1428 | i = ARRAY_SIZE(pq); | ||
1429 | while (i--) | ||
1430 | __free_page(pq[i]); | ||
1431 | return err; | ||
1432 | } | ||
1433 | #endif | ||
1434 | |||
1270 | static int __devexit iop_adma_remove(struct platform_device *dev) | 1435 | static int __devexit iop_adma_remove(struct platform_device *dev) |
1271 | { | 1436 | { |
1272 | struct iop_adma_device *device = platform_get_drvdata(dev); | 1437 | struct iop_adma_device *device = platform_get_drvdata(dev); |
@@ -1417,13 +1582,28 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1417 | } | 1582 | } |
1418 | 1583 | ||
1419 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || | 1584 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || |
1420 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { | 1585 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { |
1421 | ret = iop_adma_xor_val_self_test(adev); | 1586 | ret = iop_adma_xor_val_self_test(adev); |
1422 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | 1587 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1423 | if (ret) | 1588 | if (ret) |
1424 | goto err_free_iop_chan; | 1589 | goto err_free_iop_chan; |
1425 | } | 1590 | } |
1426 | 1591 | ||
1592 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && | ||
1593 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { | ||
1594 | #ifdef CONFIG_MD_RAID6_PQ | ||
1595 | ret = iop_adma_pq_zero_sum_self_test(adev); | ||
1596 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); | ||
1597 | #else | ||
1598 | /* can not test raid6, so do not publish capability */ | ||
1599 | dma_cap_clear(DMA_PQ, dma_dev->cap_mask); | ||
1600 | dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); | ||
1601 | ret = 0; | ||
1602 | #endif | ||
1603 | if (ret) | ||
1604 | goto err_free_iop_chan; | ||
1605 | } | ||
1606 | |||
1427 | dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " | 1607 | dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " |
1428 | "( %s%s%s%s%s%s%s%s%s%s)\n", | 1608 | "( %s%s%s%s%s%s%s%s%s%s)\n", |
1429 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", | 1609 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", |