aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/mmc_test.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@nokia.com>2010-09-10 04:33:45 -0400
committerChris Ball <cjb@laptop.org>2010-10-23 09:11:12 -0400
commitc8c8c1bdbe585d9159c4585216451faa4c35e4ec (patch)
tree5c641664baba07f1f50b2415ca70a2b2ecda344a /drivers/mmc/card/mmc_test.c
parent632cf92a72fecda096d0f4608eaefb2c7392b744 (diff)
mmc_test: fix performance tests that go over max_blk_count
The host controller driver limits I/O transfers to maximum transfer size, maximum block count, maximum segment size and maximum segment count. The performance tests were not obeying these limits which meant they would not work with some drivers. This patch fixes that. Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/mmc_test.c')
-rw-r--r--drivers/mmc/card/mmc_test.c183
1 files changed, 135 insertions, 48 deletions
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b992725ecbc9..7a38ae9754f6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -56,7 +56,9 @@ struct mmc_test_mem {
56 * struct mmc_test_area - information for performance tests. 56 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes) 57 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests 58 * @dev_addr: address on card at which to do performance tests
59 * @max_segs: maximum segments in scatterlist @sg 59 * @max_tfr: maximum transfer size allowed by driver (in bytes)
60 * @max_segs: maximum segments allowed by driver in scatterlist @sg
61 * @max_seg_sz: maximum segment size allowed by driver
60 * @blocks: number of (512 byte) blocks currently mapped by @sg 62 * @blocks: number of (512 byte) blocks currently mapped by @sg
61 * @sg_len: length of currently mapped scatterlist @sg 63 * @sg_len: length of currently mapped scatterlist @sg
62 * @mem: allocated memory 64 * @mem: allocated memory
@@ -65,7 +67,9 @@ struct mmc_test_mem {
65struct mmc_test_area { 67struct mmc_test_area {
66 unsigned long max_sz; 68 unsigned long max_sz;
67 unsigned int dev_addr; 69 unsigned int dev_addr;
70 unsigned int max_tfr;
68 unsigned int max_segs; 71 unsigned int max_segs;
72 unsigned int max_seg_sz;
69 unsigned int blocks; 73 unsigned int blocks;
70 unsigned int sg_len; 74 unsigned int sg_len;
71 struct mmc_test_mem *mem; 75 struct mmc_test_mem *mem;
@@ -245,13 +249,18 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
245 249
246/* 250/*
247 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case 251 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
248 * there isn't much memory do not exceed 1/16th total lowmem pages. 252 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
253 * not exceed a maximum number of segments and try not to make segments much
254 * bigger than maximum segment size.
249 */ 255 */
250static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 256static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
251 unsigned long max_sz) 257 unsigned long max_sz,
258 unsigned int max_segs,
259 unsigned int max_seg_sz)
252{ 260{
253 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 261 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
254 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 262 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
263 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
255 unsigned long page_cnt = 0; 264 unsigned long page_cnt = 0;
256 unsigned long limit = nr_free_buffer_pages() >> 4; 265 unsigned long limit = nr_free_buffer_pages() >> 4;
257 struct mmc_test_mem *mem; 266 struct mmc_test_mem *mem;
@@ -261,11 +270,17 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
261 if (max_page_cnt < min_page_cnt) 270 if (max_page_cnt < min_page_cnt)
262 max_page_cnt = min_page_cnt; 271 max_page_cnt = min_page_cnt;
263 272
273 if (max_seg_page_cnt > max_page_cnt)
274 max_seg_page_cnt = max_page_cnt;
275
276 if (max_segs > max_page_cnt)
277 max_segs = max_page_cnt;
278
264 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); 279 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
265 if (!mem) 280 if (!mem)
266 return NULL; 281 return NULL;
267 282
268 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt, 283 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
269 GFP_KERNEL); 284 GFP_KERNEL);
270 if (!mem->arr) 285 if (!mem->arr)
271 goto out_free; 286 goto out_free;
@@ -276,7 +291,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
276 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 291 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
277 __GFP_NORETRY; 292 __GFP_NORETRY;
278 293
279 order = get_order(max_page_cnt << PAGE_SHIFT); 294 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
280 while (1) { 295 while (1) {
281 page = alloc_pages(flags, order); 296 page = alloc_pages(flags, order);
282 if (page || !order) 297 if (page || !order)
@@ -293,6 +308,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
293 mem->cnt += 1; 308 mem->cnt += 1;
294 if (max_page_cnt <= (1UL << order)) 309 if (max_page_cnt <= (1UL << order))
295 break; 310 break;
311 if (mem->cnt >= max_segs) {
312 if (page_cnt < min_page_cnt)
313 goto out_free;
314 break;
315 }
296 max_page_cnt -= 1UL << order; 316 max_page_cnt -= 1UL << order;
297 page_cnt += 1UL << order; 317 page_cnt += 1UL << order;
298 } 318 }
@@ -310,7 +330,8 @@ out_free:
310 */ 330 */
311static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, 331static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
312 struct scatterlist *sglist, int repeat, 332 struct scatterlist *sglist, int repeat,
313 unsigned int max_segs, unsigned int *sg_len) 333 unsigned int max_segs, unsigned int max_seg_sz,
334 unsigned int *sg_len)
314{ 335{
315 struct scatterlist *sg = NULL; 336 struct scatterlist *sg = NULL;
316 unsigned int i; 337 unsigned int i;
@@ -322,8 +343,10 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
322 for (i = 0; i < mem->cnt; i++) { 343 for (i = 0; i < mem->cnt; i++) {
323 unsigned long len = PAGE_SIZE << mem->arr[i].order; 344 unsigned long len = PAGE_SIZE << mem->arr[i].order;
324 345
325 if (sz < len) 346 if (len > sz)
326 len = sz; 347 len = sz;
348 if (len > max_seg_sz)
349 len = max_seg_sz;
327 if (sg) 350 if (sg)
328 sg = sg_next(sg); 351 sg = sg_next(sg);
329 else 352 else
@@ -355,6 +378,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
355 unsigned long sz, 378 unsigned long sz,
356 struct scatterlist *sglist, 379 struct scatterlist *sglist,
357 unsigned int max_segs, 380 unsigned int max_segs,
381 unsigned int max_seg_sz,
358 unsigned int *sg_len) 382 unsigned int *sg_len)
359{ 383{
360 struct scatterlist *sg = NULL; 384 struct scatterlist *sg = NULL;
@@ -365,7 +389,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
365 sg_init_table(sglist, max_segs); 389 sg_init_table(sglist, max_segs);
366 390
367 *sg_len = 0; 391 *sg_len = 0;
368 while (sz && i) { 392 while (sz) {
369 base = page_address(mem->arr[--i].page); 393 base = page_address(mem->arr[--i].page);
370 cnt = 1 << mem->arr[i].order; 394 cnt = 1 << mem->arr[i].order;
371 while (sz && cnt) { 395 while (sz && cnt) {
@@ -374,7 +398,9 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
374 continue; 398 continue;
375 last_addr = addr; 399 last_addr = addr;
376 len = PAGE_SIZE; 400 len = PAGE_SIZE;
377 if (sz < len) 401 if (len > max_seg_sz)
402 len = max_seg_sz;
403 if (len > sz)
378 len = sz; 404 len = sz;
379 if (sg) 405 if (sg)
380 sg = sg_next(sg); 406 sg = sg_next(sg);
@@ -386,6 +412,8 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
386 sz -= len; 412 sz -= len;
387 *sg_len += 1; 413 *sg_len += 1;
388 } 414 }
415 if (i == 0)
416 i = mem->cnt;
389 } 417 }
390 418
391 if (sg) 419 if (sg)
@@ -1215,16 +1243,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1215 int max_scatter) 1243 int max_scatter)
1216{ 1244{
1217 struct mmc_test_area *t = &test->area; 1245 struct mmc_test_area *t = &test->area;
1246 int err;
1218 1247
1219 t->blocks = sz >> 9; 1248 t->blocks = sz >> 9;
1220 1249
1221 if (max_scatter) { 1250 if (max_scatter) {
1222 return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1251 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1223 t->max_segs, &t->sg_len); 1252 t->max_segs, t->max_seg_sz,
1224 } else {
1225 return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1226 &t->sg_len); 1253 &t->sg_len);
1254 } else {
1255 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1256 t->max_seg_sz, &t->sg_len);
1227 } 1257 }
1258 if (err)
1259 printk(KERN_INFO "%s: Failed to map sg list\n",
1260 mmc_hostname(test->card->host));
1261 return err;
1228} 1262}
1229 1263
1230/* 1264/*
@@ -1249,6 +1283,22 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1249 struct timespec ts1, ts2; 1283 struct timespec ts1, ts2;
1250 int ret; 1284 int ret;
1251 1285
1286 /*
1287 * In the case of a maximally scattered transfer, the maximum transfer
1288 * size is further limited by using PAGE_SIZE segments.
1289 */
1290 if (max_scatter) {
1291 struct mmc_test_area *t = &test->area;
1292 unsigned long max_tfr;
1293
1294 if (t->max_seg_sz >= PAGE_SIZE)
1295 max_tfr = t->max_segs * PAGE_SIZE;
1296 else
1297 max_tfr = t->max_segs * t->max_seg_sz;
1298 if (sz > max_tfr)
1299 sz = max_tfr;
1300 }
1301
1252 ret = mmc_test_area_map(test, sz, max_scatter); 1302 ret = mmc_test_area_map(test, sz, max_scatter);
1253 if (ret) 1303 if (ret)
1254 return ret; 1304 return ret;
@@ -1274,7 +1324,7 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1274 */ 1324 */
1275static int mmc_test_area_fill(struct mmc_test_card *test) 1325static int mmc_test_area_fill(struct mmc_test_card *test)
1276{ 1326{
1277 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, 1327 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1278 1, 0, 0); 1328 1, 0, 0);
1279} 1329}
1280 1330
@@ -1328,16 +1378,29 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1328 t->max_sz = TEST_AREA_MAX_SIZE; 1378 t->max_sz = TEST_AREA_MAX_SIZE;
1329 else 1379 else
1330 t->max_sz = (unsigned long)test->card->pref_erase << 9; 1380 t->max_sz = (unsigned long)test->card->pref_erase << 9;
1381
1382 t->max_segs = test->card->host->max_segs;
1383 t->max_seg_sz = test->card->host->max_seg_size;
1384
1385 t->max_tfr = t->max_sz;
1386 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1387 t->max_tfr = test->card->host->max_blk_count << 9;
1388 if (t->max_tfr > test->card->host->max_req_size)
1389 t->max_tfr = test->card->host->max_req_size;
1390 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1391 t->max_tfr = t->max_segs * t->max_seg_sz;
1392
1331 /* 1393 /*
1332 * Try to allocate enough memory for the whole area. Less is OK 1394 * Try to allocate enough memory for the whole area. Less is OK
1333 * because the same memory can be mapped into the scatterlist more than 1395 * because the same memory can be mapped into the scatterlist more than
1334 * once. 1396 * once. Also, take into account the limits imposed on scatterlist
1397 * segments by the host driver.
1335 */ 1398 */
1336 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz); 1399 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz, t->max_segs,
1400 t->max_seg_sz);
1337 if (!t->mem) 1401 if (!t->mem)
1338 return -ENOMEM; 1402 return -ENOMEM;
1339 1403
1340 t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
1341 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); 1404 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1342 if (!t->sg) { 1405 if (!t->sg) {
1343 ret = -ENOMEM; 1406 ret = -ENOMEM;
@@ -1401,7 +1464,7 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1401static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1464static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1402 int max_scatter) 1465 int max_scatter)
1403{ 1466{
1404 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, 1467 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1405 write, max_scatter, 1); 1468 write, max_scatter, 1);
1406} 1469}
1407 1470
@@ -1446,12 +1509,13 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1446 unsigned int dev_addr; 1509 unsigned int dev_addr;
1447 int ret; 1510 int ret;
1448 1511
1449 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1512 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1450 dev_addr = test->area.dev_addr + (sz >> 9); 1513 dev_addr = test->area.dev_addr + (sz >> 9);
1451 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1514 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1452 if (ret) 1515 if (ret)
1453 return ret; 1516 return ret;
1454 } 1517 }
1518 sz = test->area.max_tfr;
1455 dev_addr = test->area.dev_addr; 1519 dev_addr = test->area.dev_addr;
1456 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1520 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1457} 1521}
@@ -1468,7 +1532,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1468 ret = mmc_test_area_erase(test); 1532 ret = mmc_test_area_erase(test);
1469 if (ret) 1533 if (ret)
1470 return ret; 1534 return ret;
1471 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1535 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1472 dev_addr = test->area.dev_addr + (sz >> 9); 1536 dev_addr = test->area.dev_addr + (sz >> 9);
1473 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1537 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1474 if (ret) 1538 if (ret)
@@ -1477,6 +1541,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1477 ret = mmc_test_area_erase(test); 1541 ret = mmc_test_area_erase(test);
1478 if (ret) 1542 if (ret)
1479 return ret; 1543 return ret;
1544 sz = test->area.max_tfr;
1480 dev_addr = test->area.dev_addr; 1545 dev_addr = test->area.dev_addr;
1481 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1546 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1482} 1547}
@@ -1516,29 +1581,63 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1516 return 0; 1581 return 0;
1517} 1582}
1518 1583
1584static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1585{
1586 unsigned int dev_addr, i, cnt;
1587 struct timespec ts1, ts2;
1588 int ret;
1589
1590 cnt = test->area.max_sz / sz;
1591 dev_addr = test->area.dev_addr;
1592 getnstimeofday(&ts1);
1593 for (i = 0; i < cnt; i++) {
1594 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1595 if (ret)
1596 return ret;
1597 dev_addr += (sz >> 9);
1598 }
1599 getnstimeofday(&ts2);
1600 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1601 return 0;
1602}
1603
1519/* 1604/*
1520 * Consecutive read performance by transfer size. 1605 * Consecutive read performance by transfer size.
1521 */ 1606 */
1522static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1607static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1523{ 1608{
1524 unsigned long sz; 1609 unsigned long sz;
1610 int ret;
1611
1612 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1613 ret = mmc_test_seq_read_perf(test, sz);
1614 if (ret)
1615 return ret;
1616 }
1617 sz = test->area.max_tfr;
1618 return mmc_test_seq_read_perf(test, sz);
1619}
1620
1621static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1622{
1525 unsigned int dev_addr, i, cnt; 1623 unsigned int dev_addr, i, cnt;
1526 struct timespec ts1, ts2; 1624 struct timespec ts1, ts2;
1527 int ret; 1625 int ret;
1528 1626
1529 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1627 ret = mmc_test_area_erase(test);
1530 cnt = test->area.max_sz / sz; 1628 if (ret)
1531 dev_addr = test->area.dev_addr; 1629 return ret;
1532 getnstimeofday(&ts1); 1630 cnt = test->area.max_sz / sz;
1533 for (i = 0; i < cnt; i++) { 1631 dev_addr = test->area.dev_addr;
1534 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1632 getnstimeofday(&ts1);
1535 if (ret) 1633 for (i = 0; i < cnt; i++) {
1536 return ret; 1634 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1537 dev_addr += (sz >> 9); 1635 if (ret)
1538 } 1636 return ret;
1539 getnstimeofday(&ts2); 1637 dev_addr += (sz >> 9);
1540 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1541 } 1638 }
1639 getnstimeofday(&ts2);
1640 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1542 return 0; 1641 return 0;
1543} 1642}
1544 1643
@@ -1548,27 +1647,15 @@ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1548static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1647static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1549{ 1648{
1550 unsigned long sz; 1649 unsigned long sz;
1551 unsigned int dev_addr, i, cnt;
1552 struct timespec ts1, ts2;
1553 int ret; 1650 int ret;
1554 1651
1555 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1652 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1556 ret = mmc_test_area_erase(test); 1653 ret = mmc_test_seq_write_perf(test, sz);
1557 if (ret) 1654 if (ret)
1558 return ret; 1655 return ret;
1559 cnt = test->area.max_sz / sz;
1560 dev_addr = test->area.dev_addr;
1561 getnstimeofday(&ts1);
1562 for (i = 0; i < cnt; i++) {
1563 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1564 if (ret)
1565 return ret;
1566 dev_addr += (sz >> 9);
1567 }
1568 getnstimeofday(&ts2);
1569 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1570 } 1656 }
1571 return 0; 1657 sz = test->area.max_tfr;
1658 return mmc_test_seq_write_perf(test, sz);
1572} 1659}
1573 1660
1574/* 1661/*