aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/mmc_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/mmc_test.c')
-rw-r--r--drivers/mmc/card/mmc_test.c811
1 files changed, 809 insertions, 2 deletions
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 445d7db2277e..5dd8576b5c18 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17
18#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
19#include <linux/swap.h> /* For nr_free_buffer_pages() */
19 20
20#define RESULT_OK 0 21#define RESULT_OK 0
21#define RESULT_FAIL 1 22#define RESULT_FAIL 1
@@ -25,6 +26,60 @@
25#define BUFFER_ORDER 2 26#define BUFFER_ORDER 2
26#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 27#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
27 28
29/*
30 * Limit the test area size to the maximum MMC HC erase group size. Note that
31 * the maximum SD allocation unit size is just 4MiB.
32 */
33#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
34
35/**
36 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
37 * @page: first page in the allocation
38 * @order: order of the number of pages allocated
39 */
40struct mmc_test_pages {
41 struct page *page;
42 unsigned int order;
43};
44
45/**
46 * struct mmc_test_mem - allocated memory.
47 * @arr: array of allocations
48 * @cnt: number of allocations
49 */
50struct mmc_test_mem {
51 struct mmc_test_pages *arr;
52 unsigned int cnt;
53};
54
55/**
56 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests
59 * @max_segs: maximum segments in scatterlist @sg
60 * @blocks: number of (512 byte) blocks currently mapped by @sg
61 * @sg_len: length of currently mapped scatterlist @sg
62 * @mem: allocated memory
63 * @sg: scatterlist
64 */
65struct mmc_test_area {
66 unsigned long max_sz;
67 unsigned int dev_addr;
68 unsigned int max_segs;
69 unsigned int blocks;
70 unsigned int sg_len;
71 struct mmc_test_mem *mem;
72 struct scatterlist *sg;
73};
74
75/**
76 * struct mmc_test_card - test information.
77 * @card: card under test
78 * @scratch: transfer buffer
79 * @buffer: transfer buffer
80 * @highmem: buffer for highmem tests
81 * @area: information for performance tests
82 */
28struct mmc_test_card { 83struct mmc_test_card {
29 struct mmc_card *card; 84 struct mmc_card *card;
30 85
@@ -33,6 +88,7 @@ struct mmc_test_card {
33#ifdef CONFIG_HIGHMEM 88#ifdef CONFIG_HIGHMEM
34 struct page *highmem; 89 struct page *highmem;
35#endif 90#endif
91 struct mmc_test_area area;
36}; 92};
37 93
38/*******************************************************************/ 94/*******************************************************************/
@@ -97,6 +153,12 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
97 mmc_set_data_timeout(mrq->data, test->card); 153 mmc_set_data_timeout(mrq->data, test->card);
98} 154}
99 155
156static int mmc_test_busy(struct mmc_command *cmd)
157{
158 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
159 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
160}
161
100/* 162/*
101 * Wait for the card to finish the busy state 163 * Wait for the card to finish the busy state
102 */ 164 */
@@ -117,13 +179,13 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
117 if (ret) 179 if (ret)
118 break; 180 break;
119 181
120 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { 182 if (!busy && mmc_test_busy(&cmd)) {
121 busy = 1; 183 busy = 1;
122 printk(KERN_INFO "%s: Warning: Host did not " 184 printk(KERN_INFO "%s: Warning: Host did not "
123 "wait for busy state to end.\n", 185 "wait for busy state to end.\n",
124 mmc_hostname(test->card->host)); 186 mmc_hostname(test->card->host));
125 } 187 }
126 } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); 188 } while (mmc_test_busy(&cmd));
127 189
128 return ret; 190 return ret;
129} 191}
@@ -170,6 +232,248 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
170 return 0; 232 return 0;
171} 233}
172 234
235static void mmc_test_free_mem(struct mmc_test_mem *mem)
236{
237 if (!mem)
238 return;
239 while (mem->cnt--)
240 __free_pages(mem->arr[mem->cnt].page,
241 mem->arr[mem->cnt].order);
242 kfree(mem->arr);
243 kfree(mem);
244}
245
246/*
247 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
248 * there isn't much memory do not exceed 1/16th total lowmem pages.
249 */
250static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
251 unsigned long max_sz)
252{
253 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
254 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
255 unsigned long page_cnt = 0;
256 unsigned long limit = nr_free_buffer_pages() >> 4;
257 struct mmc_test_mem *mem;
258
259 if (max_page_cnt > limit)
260 max_page_cnt = limit;
261 if (max_page_cnt < min_page_cnt)
262 max_page_cnt = min_page_cnt;
263
264 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
265 if (!mem)
266 return NULL;
267
268 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt,
269 GFP_KERNEL);
270 if (!mem->arr)
271 goto out_free;
272
273 while (max_page_cnt) {
274 struct page *page;
275 unsigned int order;
276 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
277 __GFP_NORETRY;
278
279 order = get_order(max_page_cnt << PAGE_SHIFT);
280 while (1) {
281 page = alloc_pages(flags, order);
282 if (page || !order)
283 break;
284 order -= 1;
285 }
286 if (!page) {
287 if (page_cnt < min_page_cnt)
288 goto out_free;
289 break;
290 }
291 mem->arr[mem->cnt].page = page;
292 mem->arr[mem->cnt].order = order;
293 mem->cnt += 1;
294 if (max_page_cnt <= (1UL << order))
295 break;
296 max_page_cnt -= 1UL << order;
297 page_cnt += 1UL << order;
298 }
299
300 return mem;
301
302out_free:
303 mmc_test_free_mem(mem);
304 return NULL;
305}
306
307/*
308 * Map memory into a scatterlist. Optionally allow the same memory to be
309 * mapped more than once.
310 */
311static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
312 struct scatterlist *sglist, int repeat,
313 unsigned int max_segs, unsigned int *sg_len)
314{
315 struct scatterlist *sg = NULL;
316 unsigned int i;
317
318 sg_init_table(sglist, max_segs);
319
320 *sg_len = 0;
321 do {
322 for (i = 0; i < mem->cnt; i++) {
323 unsigned long len = PAGE_SIZE << mem->arr[i].order;
324
325 if (sz < len)
326 len = sz;
327 if (sg)
328 sg = sg_next(sg);
329 else
330 sg = sglist;
331 if (!sg)
332 return -EINVAL;
333 sg_set_page(sg, mem->arr[i].page, len, 0);
334 sz -= len;
335 *sg_len += 1;
336 if (!sz)
337 break;
338 }
339 } while (sz && repeat);
340
341 if (sz)
342 return -EINVAL;
343
344 if (sg)
345 sg_mark_end(sg);
346
347 return 0;
348}
349
350/*
351 * Map memory into a scatterlist so that no pages are contiguous. Allow the
352 * same memory to be mapped more than once.
353 */
354static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
355 unsigned long sz,
356 struct scatterlist *sglist,
357 unsigned int max_segs,
358 unsigned int *sg_len)
359{
360 struct scatterlist *sg = NULL;
361 unsigned int i = mem->cnt, cnt;
362 unsigned long len;
363 void *base, *addr, *last_addr = NULL;
364
365 sg_init_table(sglist, max_segs);
366
367 *sg_len = 0;
368 while (sz && i) {
369 base = page_address(mem->arr[--i].page);
370 cnt = 1 << mem->arr[i].order;
371 while (sz && cnt) {
372 addr = base + PAGE_SIZE * --cnt;
373 if (last_addr && last_addr + PAGE_SIZE == addr)
374 continue;
375 last_addr = addr;
376 len = PAGE_SIZE;
377 if (sz < len)
378 len = sz;
379 if (sg)
380 sg = sg_next(sg);
381 else
382 sg = sglist;
383 if (!sg)
384 return -EINVAL;
385 sg_set_page(sg, virt_to_page(addr), len, 0);
386 sz -= len;
387 *sg_len += 1;
388 }
389 }
390
391 if (sg)
392 sg_mark_end(sg);
393
394 return 0;
395}
396
397/*
398 * Calculate transfer rate in bytes per second.
399 */
400static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
401{
402 uint64_t ns;
403
404 ns = ts->tv_sec;
405 ns *= 1000000000;
406 ns += ts->tv_nsec;
407
408 bytes *= 1000000000;
409
410 while (ns > UINT_MAX) {
411 bytes >>= 1;
412 ns >>= 1;
413 }
414
415 if (!ns)
416 return 0;
417
418 do_div(bytes, (uint32_t)ns);
419
420 return bytes;
421}
422
423/*
424 * Print the transfer rate.
425 */
426static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
427 struct timespec *ts1, struct timespec *ts2)
428{
429 unsigned int rate, sectors = bytes >> 9;
430 struct timespec ts;
431
432 ts = timespec_sub(*ts2, *ts1);
433
434 rate = mmc_test_rate(bytes, &ts);
435
436 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
437 "seconds (%u kB/s, %u KiB/s)\n",
438 mmc_hostname(test->card->host), sectors, sectors >> 1,
439 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
440 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
441}
442
443/*
444 * Print the average transfer rate.
445 */
446static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
447 unsigned int count, struct timespec *ts1,
448 struct timespec *ts2)
449{
450 unsigned int rate, sectors = bytes >> 9;
451 uint64_t tot = bytes * count;
452 struct timespec ts;
453
454 ts = timespec_sub(*ts2, *ts1);
455
456 rate = mmc_test_rate(tot, &ts);
457
458 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
459 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
460 mmc_hostname(test->card->host), count, sectors, count,
461 sectors >> 1, (sectors == 1 ? ".5" : ""),
462 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
463 rate / 1000, rate / 1024);
464}
465
466/*
467 * Return the card size in sectors.
468 */
469static unsigned int mmc_test_capacity(struct mmc_card *card)
470{
471 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
472 return card->ext_csd.sectors;
473 else
474 return card->csd.capacity << (card->csd.read_blkbits - 9);
475}
476
173/*******************************************************************/ 477/*******************************************************************/
174/* Test preparation and cleanup */ 478/* Test preparation and cleanup */
175/*******************************************************************/ 479/*******************************************************************/
@@ -893,8 +1197,419 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
893 return 0; 1197 return 0;
894} 1198}
895 1199
1200#else
1201
1202static int mmc_test_no_highmem(struct mmc_test_card *test)
1203{
1204 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1205 mmc_hostname(test->card->host));
1206 return 0;
1207}
1208
896#endif /* CONFIG_HIGHMEM */ 1209#endif /* CONFIG_HIGHMEM */
897 1210
1211/*
1212 * Map sz bytes so that it can be transferred.
1213 */
1214static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1215 int max_scatter)
1216{
1217 struct mmc_test_area *t = &test->area;
1218
1219 t->blocks = sz >> 9;
1220
1221 if (max_scatter) {
1222 return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1223 t->max_segs, &t->sg_len);
1224 } else {
1225 return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1226 &t->sg_len);
1227 }
1228}
1229
1230/*
1231 * Transfer bytes mapped by mmc_test_area_map().
1232 */
1233static int mmc_test_area_transfer(struct mmc_test_card *test,
1234 unsigned int dev_addr, int write)
1235{
1236 struct mmc_test_area *t = &test->area;
1237
1238 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1239 t->blocks, 512, write);
1240}
1241
1242/*
1243 * Map and transfer bytes.
1244 */
1245static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1246 unsigned int dev_addr, int write, int max_scatter,
1247 int timed)
1248{
1249 struct timespec ts1, ts2;
1250 int ret;
1251
1252 ret = mmc_test_area_map(test, sz, max_scatter);
1253 if (ret)
1254 return ret;
1255
1256 if (timed)
1257 getnstimeofday(&ts1);
1258
1259 ret = mmc_test_area_transfer(test, dev_addr, write);
1260 if (ret)
1261 return ret;
1262
1263 if (timed)
1264 getnstimeofday(&ts2);
1265
1266 if (timed)
1267 mmc_test_print_rate(test, sz, &ts1, &ts2);
1268
1269 return 0;
1270}
1271
1272/*
1273 * Write the test area entirely.
1274 */
1275static int mmc_test_area_fill(struct mmc_test_card *test)
1276{
1277 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
1278 1, 0, 0);
1279}
1280
1281/*
1282 * Erase the test area entirely.
1283 */
1284static int mmc_test_area_erase(struct mmc_test_card *test)
1285{
1286 struct mmc_test_area *t = &test->area;
1287
1288 if (!mmc_can_erase(test->card))
1289 return 0;
1290
1291 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1292 MMC_ERASE_ARG);
1293}
1294
1295/*
1296 * Cleanup struct mmc_test_area.
1297 */
1298static int mmc_test_area_cleanup(struct mmc_test_card *test)
1299{
1300 struct mmc_test_area *t = &test->area;
1301
1302 kfree(t->sg);
1303 mmc_test_free_mem(t->mem);
1304
1305 return 0;
1306}
1307
1308/*
1309 * Initialize an area for testing large transfers. The size of the area is the
1310 * preferred erase size which is a good size for optimal transfer speed. Note
1311 * that is typically 4MiB for modern cards. The test area is set to the middle
1312 * of the card because cards may have different charateristics at the front
1313 * (for FAT file system optimization). Optionally, the area is erased (if the
1314 * card supports it) which may improve write performance. Optionally, the area
1315 * is filled with data for subsequent read tests.
1316 */
1317static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1318{
1319 struct mmc_test_area *t = &test->area;
1320 unsigned long min_sz = 64 * 1024;
1321 int ret;
1322
1323 ret = mmc_test_set_blksize(test, 512);
1324 if (ret)
1325 return ret;
1326
1327 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1328 t->max_sz = TEST_AREA_MAX_SIZE;
1329 else
1330 t->max_sz = (unsigned long)test->card->pref_erase << 9;
1331 /*
1332 * Try to allocate enough memory for the whole area. Less is OK
1333 * because the same memory can be mapped into the scatterlist more than
1334 * once.
1335 */
1336 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz);
1337 if (!t->mem)
1338 return -ENOMEM;
1339
1340 t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
1341 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1342 if (!t->sg) {
1343 ret = -ENOMEM;
1344 goto out_free;
1345 }
1346
1347 t->dev_addr = mmc_test_capacity(test->card) / 2;
1348 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1349
1350 if (erase) {
1351 ret = mmc_test_area_erase(test);
1352 if (ret)
1353 goto out_free;
1354 }
1355
1356 if (fill) {
1357 ret = mmc_test_area_fill(test);
1358 if (ret)
1359 goto out_free;
1360 }
1361
1362 return 0;
1363
1364out_free:
1365 mmc_test_area_cleanup(test);
1366 return ret;
1367}
1368
1369/*
1370 * Prepare for large transfers. Do not erase the test area.
1371 */
1372static int mmc_test_area_prepare(struct mmc_test_card *test)
1373{
1374 return mmc_test_area_init(test, 0, 0);
1375}
1376
1377/*
1378 * Prepare for large transfers. Do erase the test area.
1379 */
1380static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1381{
1382 return mmc_test_area_init(test, 1, 0);
1383}
1384
1385/*
1386 * Prepare for large transfers. Erase and fill the test area.
1387 */
1388static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1389{
1390 return mmc_test_area_init(test, 1, 1);
1391}
1392
1393/*
1394 * Test best-case performance. Best-case performance is expected from
1395 * a single large transfer.
1396 *
1397 * An additional option (max_scatter) allows the measurement of the same
1398 * transfer but with no contiguous pages in the scatter list. This tests
1399 * the efficiency of DMA to handle scattered pages.
1400 */
1401static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1402 int max_scatter)
1403{
1404 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
1405 write, max_scatter, 1);
1406}
1407
1408/*
1409 * Best-case read performance.
1410 */
1411static int mmc_test_best_read_performance(struct mmc_test_card *test)
1412{
1413 return mmc_test_best_performance(test, 0, 0);
1414}
1415
1416/*
1417 * Best-case write performance.
1418 */
1419static int mmc_test_best_write_performance(struct mmc_test_card *test)
1420{
1421 return mmc_test_best_performance(test, 1, 0);
1422}
1423
1424/*
1425 * Best-case read performance into scattered pages.
1426 */
1427static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1428{
1429 return mmc_test_best_performance(test, 0, 1);
1430}
1431
1432/*
1433 * Best-case write performance from scattered pages.
1434 */
1435static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1436{
1437 return mmc_test_best_performance(test, 1, 1);
1438}
1439
1440/*
1441 * Single read performance by transfer size.
1442 */
1443static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1444{
1445 unsigned long sz;
1446 unsigned int dev_addr;
1447 int ret;
1448
1449 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1450 dev_addr = test->area.dev_addr + (sz >> 9);
1451 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1452 if (ret)
1453 return ret;
1454 }
1455 dev_addr = test->area.dev_addr;
1456 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1457}
1458
1459/*
1460 * Single write performance by transfer size.
1461 */
1462static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1463{
1464 unsigned long sz;
1465 unsigned int dev_addr;
1466 int ret;
1467
1468 ret = mmc_test_area_erase(test);
1469 if (ret)
1470 return ret;
1471 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1472 dev_addr = test->area.dev_addr + (sz >> 9);
1473 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1474 if (ret)
1475 return ret;
1476 }
1477 ret = mmc_test_area_erase(test);
1478 if (ret)
1479 return ret;
1480 dev_addr = test->area.dev_addr;
1481 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1482}
1483
1484/*
1485 * Single trim performance by transfer size.
1486 */
1487static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1488{
1489 unsigned long sz;
1490 unsigned int dev_addr;
1491 struct timespec ts1, ts2;
1492 int ret;
1493
1494 if (!mmc_can_trim(test->card))
1495 return RESULT_UNSUP_CARD;
1496
1497 if (!mmc_can_erase(test->card))
1498 return RESULT_UNSUP_HOST;
1499
1500 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1501 dev_addr = test->area.dev_addr + (sz >> 9);
1502 getnstimeofday(&ts1);
1503 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1504 if (ret)
1505 return ret;
1506 getnstimeofday(&ts2);
1507 mmc_test_print_rate(test, sz, &ts1, &ts2);
1508 }
1509 dev_addr = test->area.dev_addr;
1510 getnstimeofday(&ts1);
1511 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1512 if (ret)
1513 return ret;
1514 getnstimeofday(&ts2);
1515 mmc_test_print_rate(test, sz, &ts1, &ts2);
1516 return 0;
1517}
1518
1519/*
1520 * Consecutive read performance by transfer size.
1521 */
1522static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1523{
1524 unsigned long sz;
1525 unsigned int dev_addr, i, cnt;
1526 struct timespec ts1, ts2;
1527 int ret;
1528
1529 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1530 cnt = test->area.max_sz / sz;
1531 dev_addr = test->area.dev_addr;
1532 getnstimeofday(&ts1);
1533 for (i = 0; i < cnt; i++) {
1534 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1535 if (ret)
1536 return ret;
1537 dev_addr += (sz >> 9);
1538 }
1539 getnstimeofday(&ts2);
1540 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1541 }
1542 return 0;
1543}
1544
1545/*
1546 * Consecutive write performance by transfer size.
1547 */
1548static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1549{
1550 unsigned long sz;
1551 unsigned int dev_addr, i, cnt;
1552 struct timespec ts1, ts2;
1553 int ret;
1554
1555 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1556 ret = mmc_test_area_erase(test);
1557 if (ret)
1558 return ret;
1559 cnt = test->area.max_sz / sz;
1560 dev_addr = test->area.dev_addr;
1561 getnstimeofday(&ts1);
1562 for (i = 0; i < cnt; i++) {
1563 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1564 if (ret)
1565 return ret;
1566 dev_addr += (sz >> 9);
1567 }
1568 getnstimeofday(&ts2);
1569 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1570 }
1571 return 0;
1572}
1573
1574/*
1575 * Consecutive trim performance by transfer size.
1576 */
1577static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1578{
1579 unsigned long sz;
1580 unsigned int dev_addr, i, cnt;
1581 struct timespec ts1, ts2;
1582 int ret;
1583
1584 if (!mmc_can_trim(test->card))
1585 return RESULT_UNSUP_CARD;
1586
1587 if (!mmc_can_erase(test->card))
1588 return RESULT_UNSUP_HOST;
1589
1590 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1591 ret = mmc_test_area_erase(test);
1592 if (ret)
1593 return ret;
1594 ret = mmc_test_area_fill(test);
1595 if (ret)
1596 return ret;
1597 cnt = test->area.max_sz / sz;
1598 dev_addr = test->area.dev_addr;
1599 getnstimeofday(&ts1);
1600 for (i = 0; i < cnt; i++) {
1601 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1602 MMC_TRIM_ARG);
1603 if (ret)
1604 return ret;
1605 dev_addr += (sz >> 9);
1606 }
1607 getnstimeofday(&ts2);
1608 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1609 }
1610 return 0;
1611}
1612
898static const struct mmc_test_case mmc_test_cases[] = { 1613static const struct mmc_test_case mmc_test_cases[] = {
899 { 1614 {
900 .name = "Basic write (no data verification)", 1615 .name = "Basic write (no data verification)",
@@ -1040,8 +1755,100 @@ static const struct mmc_test_case mmc_test_cases[] = {
1040 .cleanup = mmc_test_cleanup, 1755 .cleanup = mmc_test_cleanup,
1041 }, 1756 },
1042 1757
1758#else
1759
1760 {
1761 .name = "Highmem write",
1762 .run = mmc_test_no_highmem,
1763 },
1764
1765 {
1766 .name = "Highmem read",
1767 .run = mmc_test_no_highmem,
1768 },
1769
1770 {
1771 .name = "Multi-block highmem write",
1772 .run = mmc_test_no_highmem,
1773 },
1774
1775 {
1776 .name = "Multi-block highmem read",
1777 .run = mmc_test_no_highmem,
1778 },
1779
1043#endif /* CONFIG_HIGHMEM */ 1780#endif /* CONFIG_HIGHMEM */
1044 1781
1782 {
1783 .name = "Best-case read performance",
1784 .prepare = mmc_test_area_prepare_fill,
1785 .run = mmc_test_best_read_performance,
1786 .cleanup = mmc_test_area_cleanup,
1787 },
1788
1789 {
1790 .name = "Best-case write performance",
1791 .prepare = mmc_test_area_prepare_erase,
1792 .run = mmc_test_best_write_performance,
1793 .cleanup = mmc_test_area_cleanup,
1794 },
1795
1796 {
1797 .name = "Best-case read performance into scattered pages",
1798 .prepare = mmc_test_area_prepare_fill,
1799 .run = mmc_test_best_read_perf_max_scatter,
1800 .cleanup = mmc_test_area_cleanup,
1801 },
1802
1803 {
1804 .name = "Best-case write performance from scattered pages",
1805 .prepare = mmc_test_area_prepare_erase,
1806 .run = mmc_test_best_write_perf_max_scatter,
1807 .cleanup = mmc_test_area_cleanup,
1808 },
1809
1810 {
1811 .name = "Single read performance by transfer size",
1812 .prepare = mmc_test_area_prepare_fill,
1813 .run = mmc_test_profile_read_perf,
1814 .cleanup = mmc_test_area_cleanup,
1815 },
1816
1817 {
1818 .name = "Single write performance by transfer size",
1819 .prepare = mmc_test_area_prepare,
1820 .run = mmc_test_profile_write_perf,
1821 .cleanup = mmc_test_area_cleanup,
1822 },
1823
1824 {
1825 .name = "Single trim performance by transfer size",
1826 .prepare = mmc_test_area_prepare_fill,
1827 .run = mmc_test_profile_trim_perf,
1828 .cleanup = mmc_test_area_cleanup,
1829 },
1830
1831 {
1832 .name = "Consecutive read performance by transfer size",
1833 .prepare = mmc_test_area_prepare_fill,
1834 .run = mmc_test_profile_seq_read_perf,
1835 .cleanup = mmc_test_area_cleanup,
1836 },
1837
1838 {
1839 .name = "Consecutive write performance by transfer size",
1840 .prepare = mmc_test_area_prepare,
1841 .run = mmc_test_profile_seq_write_perf,
1842 .cleanup = mmc_test_area_cleanup,
1843 },
1844
1845 {
1846 .name = "Consecutive trim performance by transfer size",
1847 .prepare = mmc_test_area_prepare,
1848 .run = mmc_test_profile_seq_trim_perf,
1849 .cleanup = mmc_test_area_cleanup,
1850 },
1851
1045}; 1852};
1046 1853
1047static DEFINE_MUTEX(mmc_test_lock); 1854static DEFINE_MUTEX(mmc_test_lock);