aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@nokia.com>2010-08-11 17:17:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-12 11:43:30 -0400
commit64f7120d890b892ed2c82c87bed958902e809075 (patch)
treeb90a6254f7436d3883787d21849b0f4eb4505e73 /drivers/mmc
parent4980454868af4b2f84f1f15f4b76512eecb37e1d (diff)
mmc_test: add performance tests
mmc_test provides tests aimed at testing SD/MMC hosts. This patch adds performance tests. It is advantageous to have performance tests in a kernel module like mmc_test for the following reasons: - transfer times can be measured very accurately - arbitrarily large transfers are possible - the effect of contiguous vs scattered pages can be determined The new tests are: 23. Best-case read performance 24. Best-case write performance 25. Best-case read performance into scattered pages 26. Best-case write performance from scattered pages 27. Single read performance by transfer size 28. Single write performance by transfer size 29. Single trim performance by transfer size 30. Consecutive read performance by transfer size 31. Consecutive write performance by transfer size 32. Consecutive trim performance by transfer size Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com> Cc: <linux-mmc@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/mmc_test.c793
1 files changed, 791 insertions, 2 deletions
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 445d7db2277e..197f3877b017 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -25,6 +25,54 @@
25#define BUFFER_ORDER 2 25#define BUFFER_ORDER 2
26#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 26#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
27 27
28/**
29 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
30 * @page: first page in the allocation
31 * @order: order of the number of pages allocated
32 */
33struct mmc_test_pages {
34 struct page *page;
35 unsigned int order;
36};
37
38/**
39 * struct mmc_test_mem - allocated memory.
40 * @arr: array of allocations
41 * @cnt: number of allocations
42 */
43struct mmc_test_mem {
44 struct mmc_test_pages *arr;
45 unsigned int cnt;
46};
47
48/**
49 * struct mmc_test_area - information for performance tests.
50 * @dev_addr: address on card at which to do performance tests
51 * @max_sz: test area size (in bytes)
52 * @max_segs: maximum segments in scatterlist @sg
53 * @blocks: number of (512 byte) blocks currently mapped by @sg
54 * @sg_len: length of currently mapped scatterlist @sg
55 * @mem: allocated memory
56 * @sg: scatterlist
57 */
58struct mmc_test_area {
59 unsigned int dev_addr;
60 unsigned int max_sz;
61 unsigned int max_segs;
62 unsigned int blocks;
63 unsigned int sg_len;
64 struct mmc_test_mem *mem;
65 struct scatterlist *sg;
66};
67
68/**
69 * struct mmc_test_card - test information.
70 * @card: card under test
71 * @scratch: transfer buffer
72 * @buffer: transfer buffer
73 * @highmem: buffer for highmem tests
74 * @area: information for performance tests
75 */
28struct mmc_test_card { 76struct mmc_test_card {
29 struct mmc_card *card; 77 struct mmc_card *card;
30 78
@@ -33,6 +81,7 @@ struct mmc_test_card {
33#ifdef CONFIG_HIGHMEM 81#ifdef CONFIG_HIGHMEM
34 struct page *highmem; 82 struct page *highmem;
35#endif 83#endif
84 struct mmc_test_area area;
36}; 85};
37 86
38/*******************************************************************/ 87/*******************************************************************/
@@ -97,6 +146,12 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
97 mmc_set_data_timeout(mrq->data, test->card); 146 mmc_set_data_timeout(mrq->data, test->card);
98} 147}
99 148
149static int mmc_test_busy(struct mmc_command *cmd)
150{
151 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
152 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
153}
154
100/* 155/*
101 * Wait for the card to finish the busy state 156 * Wait for the card to finish the busy state
102 */ 157 */
@@ -117,13 +172,13 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
117 if (ret) 172 if (ret)
118 break; 173 break;
119 174
120 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { 175 if (!busy && mmc_test_busy(&cmd)) {
121 busy = 1; 176 busy = 1;
122 printk(KERN_INFO "%s: Warning: Host did not " 177 printk(KERN_INFO "%s: Warning: Host did not "
123 "wait for busy state to end.\n", 178 "wait for busy state to end.\n",
124 mmc_hostname(test->card->host)); 179 mmc_hostname(test->card->host));
125 } 180 }
126 } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); 181 } while (mmc_test_busy(&cmd));
127 182
128 return ret; 183 return ret;
129} 184}
@@ -170,6 +225,246 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
170 return 0; 225 return 0;
171} 226}
172 227
228static void mmc_test_free_mem(struct mmc_test_mem *mem)
229{
230 if (!mem)
231 return;
232 while (mem->cnt--)
233 __free_pages(mem->arr[mem->cnt].page,
234 mem->arr[mem->cnt].order);
235 kfree(mem->arr);
236 kfree(mem);
237}
238
239/*
240 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
241 * there isn't much memory do not exceed 1/16th total RAM.
242 */
243static struct mmc_test_mem *mmc_test_alloc_mem(unsigned int min_sz,
244 unsigned int max_sz)
245{
246 unsigned int max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
247 unsigned int min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
248 unsigned int page_cnt = 0;
249 struct mmc_test_mem *mem;
250 struct sysinfo si;
251
252 si_meminfo(&si);
253 if (max_page_cnt > si.totalram >> 4)
254 max_page_cnt = si.totalram >> 4;
255 if (max_page_cnt < min_page_cnt)
256 max_page_cnt = min_page_cnt;
257
258 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
259 if (!mem)
260 return NULL;
261
262 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt,
263 GFP_KERNEL);
264 if (!mem->arr)
265 goto out_free;
266
267 while (max_page_cnt) {
268 struct page *page;
269 unsigned int order;
270 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
271 __GFP_NORETRY;
272
273 order = get_order(page_cnt << PAGE_SHIFT);
274 while (1) {
275 page = alloc_pages(flags, order);
276 if (page || !order)
277 break;
278 order -= 1;
279 }
280 if (!page) {
281 if (page_cnt < min_page_cnt)
282 goto out_free;
283 break;
284 }
285 mem->arr[mem->cnt].page = page;
286 mem->arr[mem->cnt].order = order;
287 mem->cnt += 1;
288 max_page_cnt -= 1 << order;
289 page_cnt += 1 << order;
290 }
291
292 return mem;
293
294out_free:
295 mmc_test_free_mem(mem);
296 return NULL;
297}
298
299/*
300 * Map memory into a scatterlist. Optionally allow the same memory to be
301 * mapped more than once.
302 */
303static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned int sz,
304 struct scatterlist *sglist, int repeat,
305 unsigned int max_segs, unsigned int *sg_len)
306{
307 struct scatterlist *sg = NULL;
308 unsigned int i;
309
310 sg_init_table(sglist, max_segs);
311
312 *sg_len = 0;
313 do {
314 for (i = 0; i < mem->cnt; i++) {
315 unsigned int len = PAGE_SIZE << mem->arr[i].order;
316
317 if (sz < len)
318 len = sz;
319 if (sg)
320 sg = sg_next(sg);
321 else
322 sg = sglist;
323 if (!sg)
324 return -EINVAL;
325 sg_set_page(sg, mem->arr[i].page, len, 0);
326 sz -= len;
327 *sg_len += 1;
328 if (!sz)
329 break;
330 }
331 } while (sz && repeat);
332
333 if (sz)
334 return -EINVAL;
335
336 if (sg)
337 sg_mark_end(sg);
338
339 return 0;
340}
341
342/*
343 * Map memory into a scatterlist so that no pages are contiguous. Allow the
344 * same memory to be mapped more than once.
345 */
346static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
347 unsigned int sz,
348 struct scatterlist *sglist,
349 unsigned int max_segs,
350 unsigned int *sg_len)
351{
352 struct scatterlist *sg = NULL;
353 unsigned int i = mem->cnt, cnt, len;
354 void *base, *addr, *last_addr = NULL;
355
356 sg_init_table(sglist, max_segs);
357
358 *sg_len = 0;
359 while (sz && i) {
360 base = page_address(mem->arr[--i].page);
361 cnt = 1 << mem->arr[i].order;
362 while (sz && cnt) {
363 addr = base + PAGE_SIZE * --cnt;
364 if (last_addr && last_addr + PAGE_SIZE == addr)
365 continue;
366 last_addr = addr;
367 len = PAGE_SIZE;
368 if (sz < len)
369 len = sz;
370 if (sg)
371 sg = sg_next(sg);
372 else
373 sg = sglist;
374 if (!sg)
375 return -EINVAL;
376 sg_set_page(sg, virt_to_page(addr), len, 0);
377 sz -= len;
378 *sg_len += 1;
379 }
380 }
381
382 if (sg)
383 sg_mark_end(sg);
384
385 return 0;
386}
387
388/*
389 * Calculate transfer rate in bytes per second.
390 */
391static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
392{
393 uint64_t ns;
394
395 ns = ts->tv_sec;
396 ns *= 1000000000;
397 ns += ts->tv_nsec;
398
399 bytes *= 1000000000;
400
401 while (ns > UINT_MAX) {
402 bytes >>= 1;
403 ns >>= 1;
404 }
405
406 if (!ns)
407 return 0;
408
409 do_div(bytes, (uint32_t)ns);
410
411 return bytes;
412}
413
414/*
415 * Print the transfer rate.
416 */
417static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
418 struct timespec *ts1, struct timespec *ts2)
419{
420 unsigned int rate, sectors = bytes >> 9;
421 struct timespec ts;
422
423 ts = timespec_sub(*ts2, *ts1);
424
425 rate = mmc_test_rate(bytes, &ts);
426
427 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
428 "seconds (%u kB/s, %u KiB/s)\n",
429 mmc_hostname(test->card->host), sectors, sectors >> 1,
430 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
431 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
432}
433
434/*
435 * Print the average transfer rate.
436 */
437static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
438 unsigned int count, struct timespec *ts1,
439 struct timespec *ts2)
440{
441 unsigned int rate, sectors = bytes >> 9;
442 uint64_t tot = bytes * count;
443 struct timespec ts;
444
445 ts = timespec_sub(*ts2, *ts1);
446
447 rate = mmc_test_rate(tot, &ts);
448
449 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
450 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
451 mmc_hostname(test->card->host), count, sectors, count,
452 sectors >> 1, (sectors == 1 ? ".5" : ""),
453 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
454 rate / 1000, rate / 1024);
455}
456
457/*
458 * Return the card size in sectors.
459 */
460static unsigned int mmc_test_capacity(struct mmc_card *card)
461{
462 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
463 return card->ext_csd.sectors;
464 else
465 return card->csd.capacity << (card->csd.read_blkbits - 9);
466}
467
173/*******************************************************************/ 468/*******************************************************************/
174/* Test preparation and cleanup */ 469/* Test preparation and cleanup */
175/*******************************************************************/ 470/*******************************************************************/
@@ -893,8 +1188,410 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
893 return 0; 1188 return 0;
894} 1189}
895 1190
1191#else
1192
1193static int mmc_test_no_highmem(struct mmc_test_card *test)
1194{
1195 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1196 mmc_hostname(test->card->host));
1197 return 0;
1198}
1199
896#endif /* CONFIG_HIGHMEM */ 1200#endif /* CONFIG_HIGHMEM */
897 1201
1202/*
1203 * Map sz bytes so that it can be transferred.
1204 */
1205static int mmc_test_area_map(struct mmc_test_card *test, unsigned int sz,
1206 int max_scatter)
1207{
1208 struct mmc_test_area *t = &test->area;
1209
1210 t->blocks = sz >> 9;
1211
1212 if (max_scatter) {
1213 return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1214 t->max_segs, &t->sg_len);
1215 } else {
1216 return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1217 &t->sg_len);
1218 }
1219}
1220
1221/*
1222 * Transfer bytes mapped by mmc_test_area_map().
1223 */
1224static int mmc_test_area_transfer(struct mmc_test_card *test,
1225 unsigned int dev_addr, int write)
1226{
1227 struct mmc_test_area *t = &test->area;
1228
1229 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1230 t->blocks, 512, write);
1231}
1232
1233/*
1234 * Map and transfer bytes.
1235 */
1236static int mmc_test_area_io(struct mmc_test_card *test, unsigned int sz,
1237 unsigned int dev_addr, int write, int max_scatter,
1238 int timed)
1239{
1240 struct timespec ts1, ts2;
1241 int ret;
1242
1243 ret = mmc_test_area_map(test, sz, max_scatter);
1244 if (ret)
1245 return ret;
1246
1247 if (timed)
1248 getnstimeofday(&ts1);
1249
1250 ret = mmc_test_area_transfer(test, dev_addr, write);
1251 if (ret)
1252 return ret;
1253
1254 if (timed)
1255 getnstimeofday(&ts2);
1256
1257 if (timed)
1258 mmc_test_print_rate(test, sz, &ts1, &ts2);
1259
1260 return 0;
1261}
1262
1263/*
1264 * Write the test area entirely.
1265 */
1266static int mmc_test_area_fill(struct mmc_test_card *test)
1267{
1268 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
1269 1, 0, 0);
1270}
1271
1272/*
1273 * Erase the test area entirely.
1274 */
1275static int mmc_test_area_erase(struct mmc_test_card *test)
1276{
1277 struct mmc_test_area *t = &test->area;
1278
1279 if (!mmc_can_erase(test->card))
1280 return 0;
1281
1282 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1283 MMC_ERASE_ARG);
1284}
1285
1286/*
1287 * Cleanup struct mmc_test_area.
1288 */
1289static int mmc_test_area_cleanup(struct mmc_test_card *test)
1290{
1291 struct mmc_test_area *t = &test->area;
1292
1293 kfree(t->sg);
1294 mmc_test_free_mem(t->mem);
1295
1296 return 0;
1297}
1298
1299/*
1300 * Initialize an area for testing large transfers. The size of the area is the
1301 * preferred erase size which is a good size for optimal transfer speed. Note
1302 * that is typically 4MiB for modern cards. The test area is set to the middle
1303 * of the card because cards may have different charateristics at the front
1304 * (for FAT file system optimization). Optionally, the area is erased (if the
1305 * card supports it) which may improve write performance. Optionally, the area
1306 * is filled with data for subsequent read tests.
1307 */
1308static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1309{
1310 struct mmc_test_area *t = &test->area;
1311 unsigned int min_sz = 64 * 1024;
1312 int ret;
1313
1314 ret = mmc_test_set_blksize(test, 512);
1315 if (ret)
1316 return ret;
1317
1318 /*
1319 * Try to allocate enough memory for the whole area. Less is OK
1320 * because the same memory can be mapped into the scatterlist more than
1321 * once.
1322 */
1323 t->max_sz = test->card->pref_erase << 9;
1324 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz);
1325 if (!t->mem)
1326 return -ENOMEM;
1327
1328 t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
1329 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1330 if (!t->sg) {
1331 ret = -ENOMEM;
1332 goto out_free;
1333 }
1334
1335 t->dev_addr = mmc_test_capacity(test->card) / 2;
1336 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1337
1338 if (erase) {
1339 ret = mmc_test_area_erase(test);
1340 if (ret)
1341 goto out_free;
1342 }
1343
1344 if (fill) {
1345 ret = mmc_test_area_fill(test);
1346 if (ret)
1347 goto out_free;
1348 }
1349
1350 return 0;
1351
1352out_free:
1353 mmc_test_area_cleanup(test);
1354 return ret;
1355}
1356
1357/*
1358 * Prepare for large transfers. Do not erase the test area.
1359 */
1360static int mmc_test_area_prepare(struct mmc_test_card *test)
1361{
1362 return mmc_test_area_init(test, 0, 0);
1363}
1364
1365/*
1366 * Prepare for large transfers. Do erase the test area.
1367 */
1368static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1369{
1370 return mmc_test_area_init(test, 1, 0);
1371}
1372
1373/*
1374 * Prepare for large transfers. Erase and fill the test area.
1375 */
1376static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1377{
1378 return mmc_test_area_init(test, 1, 1);
1379}
1380
1381/*
1382 * Test best-case performance. Best-case performance is expected from
1383 * a single large transfer.
1384 *
1385 * An additional option (max_scatter) allows the measurement of the same
1386 * transfer but with no contiguous pages in the scatter list. This tests
1387 * the efficiency of DMA to handle scattered pages.
1388 */
1389static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1390 int max_scatter)
1391{
1392 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
1393 write, max_scatter, 1);
1394}
1395
1396/*
1397 * Best-case read performance.
1398 */
1399static int mmc_test_best_read_performance(struct mmc_test_card *test)
1400{
1401 return mmc_test_best_performance(test, 0, 0);
1402}
1403
1404/*
1405 * Best-case write performance.
1406 */
1407static int mmc_test_best_write_performance(struct mmc_test_card *test)
1408{
1409 return mmc_test_best_performance(test, 1, 0);
1410}
1411
1412/*
1413 * Best-case read performance into scattered pages.
1414 */
1415static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1416{
1417 return mmc_test_best_performance(test, 0, 1);
1418}
1419
1420/*
1421 * Best-case write performance from scattered pages.
1422 */
1423static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1424{
1425 return mmc_test_best_performance(test, 1, 1);
1426}
1427
1428/*
1429 * Single read performance by transfer size.
1430 */
1431static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1432{
1433 unsigned int sz, dev_addr;
1434 int ret;
1435
1436 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1437 dev_addr = test->area.dev_addr + (sz >> 9);
1438 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1439 if (ret)
1440 return ret;
1441 }
1442 dev_addr = test->area.dev_addr;
1443 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1444}
1445
1446/*
1447 * Single write performance by transfer size.
1448 */
1449static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1450{
1451 unsigned int sz, dev_addr;
1452 int ret;
1453
1454 ret = mmc_test_area_erase(test);
1455 if (ret)
1456 return ret;
1457 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1458 dev_addr = test->area.dev_addr + (sz >> 9);
1459 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1460 if (ret)
1461 return ret;
1462 }
1463 ret = mmc_test_area_erase(test);
1464 if (ret)
1465 return ret;
1466 dev_addr = test->area.dev_addr;
1467 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1468}
1469
1470/*
1471 * Single trim performance by transfer size.
1472 */
1473static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1474{
1475 unsigned int sz, dev_addr;
1476 struct timespec ts1, ts2;
1477 int ret;
1478
1479 if (!mmc_can_trim(test->card))
1480 return RESULT_UNSUP_CARD;
1481
1482 if (!mmc_can_erase(test->card))
1483 return RESULT_UNSUP_HOST;
1484
1485 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1486 dev_addr = test->area.dev_addr + (sz >> 9);
1487 getnstimeofday(&ts1);
1488 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1489 if (ret)
1490 return ret;
1491 getnstimeofday(&ts2);
1492 mmc_test_print_rate(test, sz, &ts1, &ts2);
1493 }
1494 dev_addr = test->area.dev_addr;
1495 getnstimeofday(&ts1);
1496 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1497 if (ret)
1498 return ret;
1499 getnstimeofday(&ts2);
1500 mmc_test_print_rate(test, sz, &ts1, &ts2);
1501 return 0;
1502}
1503
1504/*
1505 * Consecutive read performance by transfer size.
1506 */
1507static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1508{
1509 unsigned int sz, dev_addr, i, cnt;
1510 struct timespec ts1, ts2;
1511 int ret;
1512
1513 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1514 cnt = test->area.max_sz / sz;
1515 dev_addr = test->area.dev_addr;
1516 getnstimeofday(&ts1);
1517 for (i = 0; i < cnt; i++) {
1518 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1519 if (ret)
1520 return ret;
1521 dev_addr += (sz >> 9);
1522 }
1523 getnstimeofday(&ts2);
1524 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1525 }
1526 return 0;
1527}
1528
1529/*
1530 * Consecutive write performance by transfer size.
1531 */
1532static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1533{
1534 unsigned int sz, dev_addr, i, cnt;
1535 struct timespec ts1, ts2;
1536 int ret;
1537
1538 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1539 ret = mmc_test_area_erase(test);
1540 if (ret)
1541 return ret;
1542 cnt = test->area.max_sz / sz;
1543 dev_addr = test->area.dev_addr;
1544 getnstimeofday(&ts1);
1545 for (i = 0; i < cnt; i++) {
1546 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1547 if (ret)
1548 return ret;
1549 dev_addr += (sz >> 9);
1550 }
1551 getnstimeofday(&ts2);
1552 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1553 }
1554 return 0;
1555}
1556
1557/*
1558 * Consecutive trim performance by transfer size.
1559 */
1560static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1561{
1562 unsigned int sz, dev_addr, i, cnt;
1563 struct timespec ts1, ts2;
1564 int ret;
1565
1566 if (!mmc_can_trim(test->card))
1567 return RESULT_UNSUP_CARD;
1568
1569 if (!mmc_can_erase(test->card))
1570 return RESULT_UNSUP_HOST;
1571
1572 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1573 ret = mmc_test_area_erase(test);
1574 if (ret)
1575 return ret;
1576 ret = mmc_test_area_fill(test);
1577 if (ret)
1578 return ret;
1579 cnt = test->area.max_sz / sz;
1580 dev_addr = test->area.dev_addr;
1581 getnstimeofday(&ts1);
1582 for (i = 0; i < cnt; i++) {
1583 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1584 MMC_TRIM_ARG);
1585 if (ret)
1586 return ret;
1587 dev_addr += (sz >> 9);
1588 }
1589 getnstimeofday(&ts2);
1590 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1591 }
1592 return 0;
1593}
1594
898static const struct mmc_test_case mmc_test_cases[] = { 1595static const struct mmc_test_case mmc_test_cases[] = {
899 { 1596 {
900 .name = "Basic write (no data verification)", 1597 .name = "Basic write (no data verification)",
@@ -1040,8 +1737,100 @@ static const struct mmc_test_case mmc_test_cases[] = {
1040 .cleanup = mmc_test_cleanup, 1737 .cleanup = mmc_test_cleanup,
1041 }, 1738 },
1042 1739
1740#else
1741
1742 {
1743 .name = "Highmem write",
1744 .run = mmc_test_no_highmem,
1745 },
1746
1747 {
1748 .name = "Highmem read",
1749 .run = mmc_test_no_highmem,
1750 },
1751
1752 {
1753 .name = "Multi-block highmem write",
1754 .run = mmc_test_no_highmem,
1755 },
1756
1757 {
1758 .name = "Multi-block highmem read",
1759 .run = mmc_test_no_highmem,
1760 },
1761
1043#endif /* CONFIG_HIGHMEM */ 1762#endif /* CONFIG_HIGHMEM */
1044 1763
1764 {
1765 .name = "Best-case read performance",
1766 .prepare = mmc_test_area_prepare_fill,
1767 .run = mmc_test_best_read_performance,
1768 .cleanup = mmc_test_area_cleanup,
1769 },
1770
1771 {
1772 .name = "Best-case write performance",
1773 .prepare = mmc_test_area_prepare_erase,
1774 .run = mmc_test_best_write_performance,
1775 .cleanup = mmc_test_area_cleanup,
1776 },
1777
1778 {
1779 .name = "Best-case read performance into scattered pages",
1780 .prepare = mmc_test_area_prepare_fill,
1781 .run = mmc_test_best_read_perf_max_scatter,
1782 .cleanup = mmc_test_area_cleanup,
1783 },
1784
1785 {
1786 .name = "Best-case write performance from scattered pages",
1787 .prepare = mmc_test_area_prepare_erase,
1788 .run = mmc_test_best_write_perf_max_scatter,
1789 .cleanup = mmc_test_area_cleanup,
1790 },
1791
1792 {
1793 .name = "Single read performance by transfer size",
1794 .prepare = mmc_test_area_prepare_fill,
1795 .run = mmc_test_profile_read_perf,
1796 .cleanup = mmc_test_area_cleanup,
1797 },
1798
1799 {
1800 .name = "Single write performance by transfer size",
1801 .prepare = mmc_test_area_prepare,
1802 .run = mmc_test_profile_write_perf,
1803 .cleanup = mmc_test_area_cleanup,
1804 },
1805
1806 {
1807 .name = "Single trim performance by transfer size",
1808 .prepare = mmc_test_area_prepare_fill,
1809 .run = mmc_test_profile_trim_perf,
1810 .cleanup = mmc_test_area_cleanup,
1811 },
1812
1813 {
1814 .name = "Consecutive read performance by transfer size",
1815 .prepare = mmc_test_area_prepare_fill,
1816 .run = mmc_test_profile_seq_read_perf,
1817 .cleanup = mmc_test_area_cleanup,
1818 },
1819
1820 {
1821 .name = "Consecutive write performance by transfer size",
1822 .prepare = mmc_test_area_prepare,
1823 .run = mmc_test_profile_seq_write_perf,
1824 .cleanup = mmc_test_area_cleanup,
1825 },
1826
1827 {
1828 .name = "Consecutive trim performance by transfer size",
1829 .prepare = mmc_test_area_prepare,
1830 .run = mmc_test_profile_seq_trim_perf,
1831 .cleanup = mmc_test_area_cleanup,
1832 },
1833
1045}; 1834};
1046 1835
1047static DEFINE_MUTEX(mmc_test_lock); 1836static DEFINE_MUTEX(mmc_test_lock);