diff options
author | Alex Dubov <oakad@yahoo.com> | 2007-04-13 13:04:38 -0400 |
---|---|---|
committer | Pierre Ossman <drzeus@drzeus.cx> | 2007-05-01 07:04:12 -0400 |
commit | 14d836e7499c53a1f6a65086c3d11600e871a971 (patch) | |
tree | 37d1cb767422bd498a13654ecabd6f891b27b0e3 /drivers/mmc/mmc_block.c | |
parent | dc87c3985e9b442c60994308a96f887579addc39 (diff) |
mmc: cull sg list to match mmc request size
mmc layer may introduce additional (compared to block layer) limits on
request size. Culling of the sg list to match adjusted request size
simplifies the handling of such cases in the low level driver, allowing
it to skip block count checks while processing sg entries.
(fixes for wbsd and sdhci by Pierre Ossman)
Signed-off-by: Alex Dubov <oakad@yahoo.com>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/mmc_block.c')
-rw-r--r-- | drivers/mmc/mmc_block.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index 86439a0bb271..95b0da6abe87 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c | |||
@@ -223,7 +223,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
223 | struct mmc_blk_data *md = mq->data; | 223 | struct mmc_blk_data *md = mq->data; |
224 | struct mmc_card *card = md->queue.card; | 224 | struct mmc_card *card = md->queue.card; |
225 | struct mmc_blk_request brq; | 225 | struct mmc_blk_request brq; |
226 | int ret = 1; | 226 | int ret = 1, sg_pos, data_size; |
227 | 227 | ||
228 | if (mmc_card_claim_host(card)) | 228 | if (mmc_card_claim_host(card)) |
229 | goto flush_queue; | 229 | goto flush_queue; |
@@ -283,6 +283,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
283 | brq.data.sg = mq->sg; | 283 | brq.data.sg = mq->sg; |
284 | brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); | 284 | brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); |
285 | 285 | ||
286 | if (brq.data.blocks != | ||
287 | (req->nr_sectors >> (md->block_bits - 9))) { | ||
288 | data_size = brq.data.blocks * brq.data.blksz; | ||
289 | for (sg_pos = 0; sg_pos < brq.data.sg_len; sg_pos++) { | ||
290 | data_size -= mq->sg[sg_pos].length; | ||
291 | if (data_size <= 0) { | ||
292 | mq->sg[sg_pos].length += data_size; | ||
293 | sg_pos++; | ||
294 | break; | ||
295 | } | ||
296 | } | ||
297 | brq.data.sg_len = sg_pos; | ||
298 | } | ||
299 | |||
286 | mmc_wait_for_req(card->host, &brq.mrq); | 300 | mmc_wait_for_req(card->host, &brq.mrq); |
287 | if (brq.cmd.error) { | 301 | if (brq.cmd.error) { |
288 | printk(KERN_ERR "%s: error %d sending read/write command\n", | 302 | printk(KERN_ERR "%s: error %d sending read/write command\n", |