aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-09-13 17:07:18 -0400
committerPaul Mackerras <paulus@samba.org>2006-09-13 17:07:18 -0400
commitc547fc28ab3e8716076fdaf4bd0260c5d63a18f7 (patch)
tree34af1fa64a63618660187ae58ad182665a1861ef /drivers/mmc
parent3dd836a56de0d4f049438412959b905e1db4666e (diff)
parent63b98080daa35f0d682db04f4fb7ada010888752 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/imxmmc.c69
-rw-r--r--drivers/mmc/mmc.c55
-rw-r--r--drivers/mmc/mmc_block.c60
3 files changed, 93 insertions, 91 deletions
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 7ca9e95bdf89..fb6565b98f32 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -91,6 +91,8 @@ struct imxmci_host {
91 int dma_allocated; 91 int dma_allocated;
92 92
93 unsigned char actual_bus_width; 93 unsigned char actual_bus_width;
94
95 int prev_cmd_code;
94}; 96};
95 97
96#define IMXMCI_PEND_IRQ_b 0 98#define IMXMCI_PEND_IRQ_b 0
@@ -248,16 +250,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
248 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. 250 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
249 * This is required for SCR read at least. 251 * This is required for SCR read at least.
250 */ 252 */
251 if (datasz < 64) { 253 if (datasz < 512) {
252 host->dma_size = datasz; 254 host->dma_size = datasz;
253 if (data->flags & MMC_DATA_READ) { 255 if (data->flags & MMC_DATA_READ) {
254 host->dma_dir = DMA_FROM_DEVICE; 256 host->dma_dir = DMA_FROM_DEVICE;
255 257
256 /* Hack to enable read SCR */ 258 /* Hack to enable read SCR */
257 if(datasz < 16) { 259 MMC_NOB = 1;
258 MMC_NOB = 1; 260 MMC_BLK_LEN = 512;
259 MMC_BLK_LEN = 16;
260 }
261 } else { 261 } else {
262 host->dma_dir = DMA_TO_DEVICE; 262 host->dma_dir = DMA_TO_DEVICE;
263 } 263 }
@@ -409,6 +409,9 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
409 409
410 spin_unlock_irqrestore(&host->lock, flags); 410 spin_unlock_irqrestore(&host->lock, flags);
411 411
412 if(req && req->cmd)
413 host->prev_cmd_code = req->cmd->opcode;
414
412 host->req = NULL; 415 host->req = NULL;
413 host->cmd = NULL; 416 host->cmd = NULL;
414 host->data = NULL; 417 host->data = NULL;
@@ -553,7 +556,6 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
553{ 556{
554 int i; 557 int i;
555 int burst_len; 558 int burst_len;
556 int flush_len;
557 int trans_done = 0; 559 int trans_done = 0;
558 unsigned int stat = *pstat; 560 unsigned int stat = *pstat;
559 561
@@ -566,44 +568,43 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
566 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", 568 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
567 stat); 569 stat);
568 570
571 udelay(20); /* required for clocks < 8MHz*/
572
569 if(host->dma_dir == DMA_FROM_DEVICE) { 573 if(host->dma_dir == DMA_FROM_DEVICE) {
570 imxmci_busy_wait_for_status(host, &stat, 574 imxmci_busy_wait_for_status(host, &stat,
571 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE, 575 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
572 20, "imxmci_cpu_driven_data read"); 576 50, "imxmci_cpu_driven_data read");
573 577
574 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && 578 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
575 (host->data_cnt < host->dma_size)) { 579 (host->data_cnt < 512)) {
576 if(burst_len >= host->dma_size - host->data_cnt) { 580
577 flush_len = burst_len; 581 udelay(20); /* required for clocks < 8MHz*/
578 burst_len = host->dma_size - host->data_cnt;
579 flush_len -= burst_len;
580 host->data_cnt = host->dma_size;
581 trans_done = 1;
582 } else {
583 flush_len = 0;
584 host->data_cnt += burst_len;
585 }
586 582
587 for(i = burst_len; i>=2 ; i-=2) { 583 for(i = burst_len; i>=2 ; i-=2) {
588 *(host->data_ptr++) = MMC_BUFFER_ACCESS; 584 u16 data;
589 udelay(20); /* required for clocks < 8MHz*/ 585 data = MMC_BUFFER_ACCESS;
586 udelay(10); /* required for clocks < 8MHz*/
587 if(host->data_cnt+2 <= host->dma_size) {
588 *(host->data_ptr++) = data;
589 } else {
590 if(host->data_cnt < host->dma_size)
591 *(u8*)(host->data_ptr) = data;
592 }
593 host->data_cnt += 2;
590 } 594 }
591 595
592 if(i == 1)
593 *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
594
595 stat = MMC_STATUS; 596 stat = MMC_STATUS;
596 597
597 /* Flush extra bytes from FIFO */ 598 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
598 while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){ 599 host->data_cnt, burst_len, stat);
599 i = MMC_BUFFER_ACCESS;
600 stat = MMC_STATUS;
601 stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
602 }
603
604 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
605 burst_len, stat);
606 } 600 }
601
602 if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
603 trans_done = 1;
604
605 if(host->dma_size & 0x1ff)
606 stat &= ~STATUS_CRC_READ_ERR;
607
607 } else { 608 } else {
608 imxmci_busy_wait_for_status(host, &stat, 609 imxmci_busy_wait_for_status(host, &stat,
609 STATUS_APPL_BUFF_FE, 610 STATUS_APPL_BUFF_FE,
@@ -692,8 +693,8 @@ static void imxmci_tasklet_fnc(unsigned long data)
692 what, stat, MMC_INT_MASK); 693 what, stat, MMC_INT_MASK);
693 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", 694 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
694 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma)); 695 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
695 dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n", 696 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
696 host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size); 697 host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
697 } 698 }
698 699
699 if(!host->present || timeout) 700 if(!host->present || timeout)
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 33525bdf2ab6..74eaaee66de0 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -247,6 +247,55 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
247 247
248EXPORT_SYMBOL(mmc_wait_for_app_cmd); 248EXPORT_SYMBOL(mmc_wait_for_app_cmd);
249 249
250/**
251 * mmc_set_data_timeout - set the timeout for a data command
252 * @data: data phase for command
253 * @card: the MMC card associated with the data transfer
254 * @write: flag to differentiate reads from writes
255 */
256void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
257 int write)
258{
259 unsigned int mult;
260
261 /*
262 * SD cards use a 100 multiplier rather than 10
263 */
264 mult = mmc_card_sd(card) ? 100 : 10;
265
266 /*
267 * Scale up the multiplier (and therefore the timeout) by
268 * the r2w factor for writes.
269 */
270 if (write)
271 mult <<= card->csd.r2w_factor;
272
273 data->timeout_ns = card->csd.tacc_ns * mult;
274 data->timeout_clks = card->csd.tacc_clks * mult;
275
276 /*
277 * SD cards also have an upper limit on the timeout.
278 */
279 if (mmc_card_sd(card)) {
280 unsigned int timeout_us, limit_us;
281
282 timeout_us = data->timeout_ns / 1000;
283 timeout_us += data->timeout_clks * 1000 /
284 (card->host->ios.clock / 1000);
285
286 if (write)
287 limit_us = 250000;
288 else
289 limit_us = 100000;
290
291 if (timeout_us > limit_us) {
292 data->timeout_ns = limit_us * 1000;
293 data->timeout_clks = 0;
294 }
295 }
296}
297EXPORT_SYMBOL(mmc_set_data_timeout);
298
250static int mmc_select_card(struct mmc_host *host, struct mmc_card *card); 299static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
251 300
252/** 301/**
@@ -908,11 +957,9 @@ static void mmc_read_scrs(struct mmc_host *host)
908{ 957{
909 int err; 958 int err;
910 struct mmc_card *card; 959 struct mmc_card *card;
911
912 struct mmc_request mrq; 960 struct mmc_request mrq;
913 struct mmc_command cmd; 961 struct mmc_command cmd;
914 struct mmc_data data; 962 struct mmc_data data;
915
916 struct scatterlist sg; 963 struct scatterlist sg;
917 964
918 list_for_each_entry(card, &host->cards, node) { 965 list_for_each_entry(card, &host->cards, node) {
@@ -947,8 +994,8 @@ static void mmc_read_scrs(struct mmc_host *host)
947 994
948 memset(&data, 0, sizeof(struct mmc_data)); 995 memset(&data, 0, sizeof(struct mmc_data));
949 996
950 data.timeout_ns = card->csd.tacc_ns * 10; 997 mmc_set_data_timeout(&data, card, 0);
951 data.timeout_clks = card->csd.tacc_clks * 10; 998
952 data.blksz_bits = 3; 999 data.blksz_bits = 3;
953 data.blksz = 1 << 3; 1000 data.blksz = 1 << 3;
954 data.blocks = 1; 1001 data.blocks = 1;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 115cc21094b9..a0e0dad1b419 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -30,6 +30,7 @@
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32#include <linux/mmc/card.h> 32#include <linux/mmc/card.h>
33#include <linux/mmc/host.h>
33#include <linux/mmc/protocol.h> 34#include <linux/mmc/protocol.h>
34 35
35#include <asm/system.h> 36#include <asm/system.h>
@@ -171,8 +172,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
171 172
172 brq.cmd.arg = req->sector << 9; 173 brq.cmd.arg = req->sector << 9;
173 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 174 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
174 brq.data.timeout_ns = card->csd.tacc_ns * 10;
175 brq.data.timeout_clks = card->csd.tacc_clks * 10;
176 brq.data.blksz_bits = md->block_bits; 175 brq.data.blksz_bits = md->block_bits;
177 brq.data.blksz = 1 << md->block_bits; 176 brq.data.blksz = 1 << md->block_bits;
178 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 177 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
@@ -180,6 +179,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
180 brq.stop.arg = 0; 179 brq.stop.arg = 0;
181 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 180 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
182 181
182 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
183
183 if (rq_data_dir(req) == READ) { 184 if (rq_data_dir(req) == READ) {
184 brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK; 185 brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
185 brq.data.flags |= MMC_DATA_READ; 186 brq.data.flags |= MMC_DATA_READ;
@@ -187,12 +188,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
187 brq.cmd.opcode = MMC_WRITE_BLOCK; 188 brq.cmd.opcode = MMC_WRITE_BLOCK;
188 brq.data.flags |= MMC_DATA_WRITE; 189 brq.data.flags |= MMC_DATA_WRITE;
189 brq.data.blocks = 1; 190 brq.data.blocks = 1;
190
191 /*
192 * Scale up the timeout by the r2w factor
193 */
194 brq.data.timeout_ns <<= card->csd.r2w_factor;
195 brq.data.timeout_clks <<= card->csd.r2w_factor;
196 } 191 }
197 192
198 if (brq.data.blocks > 1) { 193 if (brq.data.blocks > 1) {
@@ -324,52 +319,11 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
324 md->read_only = mmc_blk_readonly(card); 319 md->read_only = mmc_blk_readonly(card);
325 320
326 /* 321 /*
327 * Figure out a workable block size. MMC cards have: 322 * Both SD and MMC specifications state (although a bit
328 * - two block sizes, one for read and one for write. 323 * unclearly in the MMC case) that a block size of 512
329 * - may support partial reads and/or writes 324 * bytes must always be supported by the card.
330 * (allows block sizes smaller than specified)
331 */
332 md->block_bits = card->csd.read_blkbits;
333 if (card->csd.write_blkbits != card->csd.read_blkbits) {
334 if (card->csd.write_blkbits < card->csd.read_blkbits &&
335 card->csd.read_partial) {
336 /*
337 * write block size is smaller than read block
338 * size, but we support partial reads, so choose
339 * the smaller write block size.
340 */
341 md->block_bits = card->csd.write_blkbits;
342 } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
343 card->csd.write_partial) {
344 /*
345 * read block size is smaller than write block
346 * size, but we support partial writes. Use read
347 * block size.
348 */
349 } else {
350 /*
351 * We don't support this configuration for writes.
352 */
353 printk(KERN_ERR "%s: unable to select block size for "
354 "writing (rb%u wb%u rp%u wp%u)\n",
355 mmc_card_id(card),
356 1 << card->csd.read_blkbits,
357 1 << card->csd.write_blkbits,
358 card->csd.read_partial,
359 card->csd.write_partial);
360 md->read_only = 1;
361 }
362 }
363
364 /*
365 * Refuse to allow block sizes smaller than 512 bytes.
366 */ 325 */
367 if (md->block_bits < 9) { 326 md->block_bits = 9;
368 printk(KERN_ERR "%s: unable to support block size %u\n",
369 mmc_card_id(card), 1 << md->block_bits);
370 ret = -EINVAL;
371 goto err_kfree;
372 }
373 327
374 md->disk = alloc_disk(1 << MMC_SHIFT); 328 md->disk = alloc_disk(1 << MMC_SHIFT);
375 if (md->disk == NULL) { 329 if (md->disk == NULL) {