diff options
author | Subhash Jadavani <subhashj@codeaurora.org> | 2012-06-07 06:16:58 -0400 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2012-07-10 23:04:05 -0400 |
commit | ecf8b5d0a378a0f922ffca27bd0a9101f246fa11 (patch) | |
tree | ad2460168d74b2571fff4043042d4567d6d8b600 | |
parent | 6af9e96e052a6d1a760c60cb340c5a6584cb92db (diff) |
mmc: block: replace __blk_end_request() with blk_end_request()
For completing any block request, MMC block driver is calling:
spin_lock_irq(queue)
__blk_end_request()
spin_unlock_irq(queue)
But if we analyze the sources of latency in kernel using ftrace,
__blk_end_request() function at times may take up to 6.5ms with
spinlock held and irq disabled.
__blk_end_request() calls couple of functions and ftrace output
shows that blk_update_bidi_request() function is almost taking 6ms.
There are 2 function to end the current request: ___blk_end_request()
and blk_end_request(). Both these functions do same thing except
that blk_end_request() function doesn't take up the spinlock
while calling the blk_update_bidi_request().
This patch replaces all __blk_end_request() calls with
blk_end_request() and __blk_end_request_all() calls with
blk_end_request_all().
Testing done: 20 process concurrent read/write on sd card
and eMMC. Ran this test for almost a day on multicore system
and no errors observed.
This change is not meant for improving MMC throughput; it's basically
about becoming fair to other threads/interrupts in the system. By
holding spin lock and interrupts disabled for longer duration, we
won't allow other threads/interrupts to run at all. Actually slight
performance degradation at file system level can be expected as we
are not holding the spin lock during blk_update_bidi_request() which
means our mmcqd thread may get preempted for other high priority
thread or any interrupt in the system.
These are performance numbers (100MB file write) with eMMC running
in DDR mode:
Without this patch:
Name of the Test Value Unit
LMDD Read Test 53.79 MBPS
LMDD Write Test 18.86 MBPS
IOZONE Read Test 51.65 MBPS
IOZONE Write Test 24.36 MBPS
With this patch:
Name of the Test Value Unit
LMDD Read Test 52.94 MBPS
LMDD Write Test 16.70 MBPS
IOZONE Read Test 52.08 MBPS
IOZONE Write Test 23.29 MBPS
Read numbers are fine. Write numbers are bit down (especially LMDD
write), may be because write requests normally have large transfer
size and which means there are chances that while mmcq is executing
blk_update_bidi_request(), it may get interrupted by interrupts or
other high priority thread.
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
Reviewed-by: Namjae Jeon <linkinjeon@gmail.com>
Signed-off-by: Chris Ball <cjb@laptop.org>
-rw-r--r-- | drivers/mmc/card/block.c | 36 |
1 files changed, 9 insertions, 27 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 276d21ce6bc1..f1c84decb192 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -850,9 +850,7 @@ out: | |||
850 | goto retry; | 850 | goto retry; |
851 | if (!err) | 851 | if (!err) |
852 | mmc_blk_reset_success(md, type); | 852 | mmc_blk_reset_success(md, type); |
853 | spin_lock_irq(&md->lock); | 853 | blk_end_request(req, err, blk_rq_bytes(req)); |
854 | __blk_end_request(req, err, blk_rq_bytes(req)); | ||
855 | spin_unlock_irq(&md->lock); | ||
856 | 854 | ||
857 | return err ? 0 : 1; | 855 | return err ? 0 : 1; |
858 | } | 856 | } |
@@ -934,9 +932,7 @@ out_retry: | |||
934 | if (!err) | 932 | if (!err) |
935 | mmc_blk_reset_success(md, type); | 933 | mmc_blk_reset_success(md, type); |
936 | out: | 934 | out: |
937 | spin_lock_irq(&md->lock); | 935 | blk_end_request(req, err, blk_rq_bytes(req)); |
938 | __blk_end_request(req, err, blk_rq_bytes(req)); | ||
939 | spin_unlock_irq(&md->lock); | ||
940 | 936 | ||
941 | return err ? 0 : 1; | 937 | return err ? 0 : 1; |
942 | } | 938 | } |
@@ -951,9 +947,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | |||
951 | if (ret) | 947 | if (ret) |
952 | ret = -EIO; | 948 | ret = -EIO; |
953 | 949 | ||
954 | spin_lock_irq(&md->lock); | 950 | blk_end_request_all(req, ret); |
955 | __blk_end_request_all(req, ret); | ||
956 | spin_unlock_irq(&md->lock); | ||
957 | 951 | ||
958 | return ret ? 0 : 1; | 952 | return ret ? 0 : 1; |
959 | } | 953 | } |
@@ -1252,14 +1246,10 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | |||
1252 | 1246 | ||
1253 | blocks = mmc_sd_num_wr_blocks(card); | 1247 | blocks = mmc_sd_num_wr_blocks(card); |
1254 | if (blocks != (u32)-1) { | 1248 | if (blocks != (u32)-1) { |
1255 | spin_lock_irq(&md->lock); | 1249 | ret = blk_end_request(req, 0, blocks << 9); |
1256 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1257 | spin_unlock_irq(&md->lock); | ||
1258 | } | 1250 | } |
1259 | } else { | 1251 | } else { |
1260 | spin_lock_irq(&md->lock); | 1252 | ret = blk_end_request(req, 0, brq->data.bytes_xfered); |
1261 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1262 | spin_unlock_irq(&md->lock); | ||
1263 | } | 1253 | } |
1264 | return ret; | 1254 | return ret; |
1265 | } | 1255 | } |
@@ -1311,10 +1301,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1311 | * A block was successfully transferred. | 1301 | * A block was successfully transferred. |
1312 | */ | 1302 | */ |
1313 | mmc_blk_reset_success(md, type); | 1303 | mmc_blk_reset_success(md, type); |
1314 | spin_lock_irq(&md->lock); | 1304 | ret = blk_end_request(req, 0, |
1315 | ret = __blk_end_request(req, 0, | ||
1316 | brq->data.bytes_xfered); | 1305 | brq->data.bytes_xfered); |
1317 | spin_unlock_irq(&md->lock); | ||
1318 | /* | 1306 | /* |
1319 | * If the blk_end_request function returns non-zero even | 1307 | * If the blk_end_request function returns non-zero even |
1320 | * though all data has been transferred and no errors | 1308 | * though all data has been transferred and no errors |
@@ -1364,10 +1352,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1364 | * time, so we only reach here after trying to | 1352 | * time, so we only reach here after trying to |
1365 | * read a single sector. | 1353 | * read a single sector. |
1366 | */ | 1354 | */ |
1367 | spin_lock_irq(&md->lock); | 1355 | ret = blk_end_request(req, -EIO, |
1368 | ret = __blk_end_request(req, -EIO, | ||
1369 | brq->data.blksz); | 1356 | brq->data.blksz); |
1370 | spin_unlock_irq(&md->lock); | ||
1371 | if (!ret) | 1357 | if (!ret) |
1372 | goto start_new_req; | 1358 | goto start_new_req; |
1373 | break; | 1359 | break; |
@@ -1388,12 +1374,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1388 | return 1; | 1374 | return 1; |
1389 | 1375 | ||
1390 | cmd_abort: | 1376 | cmd_abort: |
1391 | spin_lock_irq(&md->lock); | ||
1392 | if (mmc_card_removed(card)) | 1377 | if (mmc_card_removed(card)) |
1393 | req->cmd_flags |= REQ_QUIET; | 1378 | req->cmd_flags |= REQ_QUIET; |
1394 | while (ret) | 1379 | while (ret) |
1395 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | 1380 | ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); |
1396 | spin_unlock_irq(&md->lock); | ||
1397 | 1381 | ||
1398 | start_new_req: | 1382 | start_new_req: |
1399 | if (rqc) { | 1383 | if (rqc) { |
@@ -1417,9 +1401,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1417 | ret = mmc_blk_part_switch(card, md); | 1401 | ret = mmc_blk_part_switch(card, md); |
1418 | if (ret) { | 1402 | if (ret) { |
1419 | if (req) { | 1403 | if (req) { |
1420 | spin_lock_irq(&md->lock); | 1404 | blk_end_request_all(req, -EIO); |
1421 | __blk_end_request_all(req, -EIO); | ||
1422 | spin_unlock_irq(&md->lock); | ||
1423 | } | 1405 | } |
1424 | ret = 0; | 1406 | ret = 0; |
1425 | goto out; | 1407 | goto out; |