diff options
author | Pierre Ossman <drzeus@drzeus.cx> | 2006-10-06 03:44:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-06 11:53:41 -0400 |
commit | ec5a19dd935eb3793e1f6ed491e8035b3d7b1df9 (patch) | |
tree | 931d2ffce2fe444f26b51b076c3472110a3e5a8a /drivers/mmc | |
parent | 45e02b5b52e43aa7a674f64ca3470b9ee51fc9fd (diff) |
[PATCH] mmc: multi sector write transfers
SD cards extend the protocol by allowing the host to query a card how many
blocks were successfully stored on the medium. This allows us to safely write
chunks of blocks at once.
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Cc: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/mmc_block.c | 104 |
1 files changed, 95 insertions, 9 deletions
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index c1293f1bda8..f9027c8db79 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/kdev_t.h> | 28 | #include <linux/kdev_t.h> |
29 | #include <linux/blkdev.h> | 29 | #include <linux/blkdev.h> |
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | #include <linux/scatterlist.h> | ||
31 | 32 | ||
32 | #include <linux/mmc/card.h> | 33 | #include <linux/mmc/card.h> |
33 | #include <linux/mmc/host.h> | 34 | #include <linux/mmc/host.h> |
@@ -154,6 +155,71 @@ static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req) | |||
154 | return stat; | 155 | return stat; |
155 | } | 156 | } |
156 | 157 | ||
158 | static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | ||
159 | { | ||
160 | int err; | ||
161 | u32 blocks; | ||
162 | |||
163 | struct mmc_request mrq; | ||
164 | struct mmc_command cmd; | ||
165 | struct mmc_data data; | ||
166 | unsigned int timeout_us; | ||
167 | |||
168 | struct scatterlist sg; | ||
169 | |||
170 | memset(&cmd, 0, sizeof(struct mmc_command)); | ||
171 | |||
172 | cmd.opcode = MMC_APP_CMD; | ||
173 | cmd.arg = card->rca << 16; | ||
174 | cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
175 | |||
176 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | ||
177 | if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) | ||
178 | return (u32)-1; | ||
179 | |||
180 | memset(&cmd, 0, sizeof(struct mmc_command)); | ||
181 | |||
182 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | ||
183 | cmd.arg = 0; | ||
184 | cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; | ||
185 | |||
186 | memset(&data, 0, sizeof(struct mmc_data)); | ||
187 | |||
188 | data.timeout_ns = card->csd.tacc_ns * 100; | ||
189 | data.timeout_clks = card->csd.tacc_clks * 100; | ||
190 | |||
191 | timeout_us = data.timeout_ns / 1000; | ||
192 | timeout_us += data.timeout_clks * 1000 / | ||
193 | (card->host->ios.clock / 1000); | ||
194 | |||
195 | if (timeout_us > 100000) { | ||
196 | data.timeout_ns = 100000000; | ||
197 | data.timeout_clks = 0; | ||
198 | } | ||
199 | |||
200 | data.blksz = 4; | ||
201 | data.blocks = 1; | ||
202 | data.flags = MMC_DATA_READ; | ||
203 | data.sg = &sg; | ||
204 | data.sg_len = 1; | ||
205 | |||
206 | memset(&mrq, 0, sizeof(struct mmc_request)); | ||
207 | |||
208 | mrq.cmd = &cmd; | ||
209 | mrq.data = &data; | ||
210 | |||
211 | sg_init_one(&sg, &blocks, 4); | ||
212 | |||
213 | mmc_wait_for_req(card->host, &mrq); | ||
214 | |||
215 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) | ||
216 | return (u32)-1; | ||
217 | |||
218 | blocks = ntohl(blocks); | ||
219 | |||
220 | return blocks; | ||
221 | } | ||
222 | |||
157 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | 223 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) |
158 | { | 224 | { |
159 | struct mmc_blk_data *md = mq->data; | 225 | struct mmc_blk_data *md = mq->data; |
@@ -184,10 +250,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
184 | 250 | ||
185 | /* | 251 | /* |
186 | * If the host doesn't support multiple block writes, force | 252 | * If the host doesn't support multiple block writes, force |
187 | * block writes to single block. | 253 | * block writes to single block. SD cards are excepted from |
254 | * this rule as they support querying the number of | ||
255 | * successfully written sectors. | ||
188 | */ | 256 | */ |
189 | if (rq_data_dir(req) != READ && | 257 | if (rq_data_dir(req) != READ && |
190 | !(card->host->caps & MMC_CAP_MULTIWRITE)) | 258 | !(card->host->caps & MMC_CAP_MULTIWRITE) && |
259 | !mmc_card_sd(card)) | ||
191 | brq.data.blocks = 1; | 260 | brq.data.blocks = 1; |
192 | 261 | ||
193 | if (brq.data.blocks > 1) { | 262 | if (brq.data.blocks > 1) { |
@@ -276,24 +345,41 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
276 | return 1; | 345 | return 1; |
277 | 346 | ||
278 | cmd_err: | 347 | cmd_err: |
279 | mmc_card_release_host(card); | ||
280 | |||
281 | ret = 1; | 348 | ret = 1; |
282 | 349 | ||
283 | /* | 350 | /* |
284 | * For writes and where the host claims to support proper | 351 | * If this is an SD card and we're writing, we can first |
285 | * error reporting, we first ok the successful blocks. | 352 | * mark the known good sectors as ok. |
353 | * | ||
354 | * If the card is not SD, we can still ok written sectors | ||
355 | * if the controller can do proper error reporting. | ||
286 | * | 356 | * |
287 | * For reads we just fail the entire chunk as that should | 357 | * For reads we just fail the entire chunk as that should |
288 | * be safe in all cases. | 358 | * be safe in all cases. |
289 | */ | 359 | */ |
290 | if (rq_data_dir(req) != READ && | 360 | if (rq_data_dir(req) != READ && mmc_card_sd(card)) { |
291 | (card->host->caps & MMC_CAP_MULTIWRITE)) { | 361 | u32 blocks; |
362 | unsigned int bytes; | ||
363 | |||
364 | blocks = mmc_sd_num_wr_blocks(card); | ||
365 | if (blocks != (u32)-1) { | ||
366 | if (card->csd.write_partial) | ||
367 | bytes = blocks << md->block_bits; | ||
368 | else | ||
369 | bytes = blocks << 9; | ||
370 | spin_lock_irq(&md->lock); | ||
371 | ret = end_that_request_chunk(req, 1, bytes); | ||
372 | spin_unlock_irq(&md->lock); | ||
373 | } | ||
374 | } else if (rq_data_dir(req) != READ && | ||
375 | (card->host->caps & MMC_CAP_MULTIWRITE)) { | ||
292 | spin_lock_irq(&md->lock); | 376 | spin_lock_irq(&md->lock); |
293 | ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); | 377 | ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); |
294 | spin_unlock_irq(&md->lock); | 378 | spin_unlock_irq(&md->lock); |
295 | } | 379 | } |
296 | 380 | ||
381 | mmc_card_release_host(card); | ||
382 | |||
297 | spin_lock_irq(&md->lock); | 383 | spin_lock_irq(&md->lock); |
298 | while (ret) { | 384 | while (ret) { |
299 | ret = end_that_request_chunk(req, 0, | 385 | ret = end_that_request_chunk(req, 0, |