aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/at91_mci.c6
-rw-r--r--drivers/mmc/au1xmmc.c5
-rw-r--r--drivers/mmc/imxmmc.c72
-rw-r--r--drivers/mmc/mmc.c59
-rw-r--r--drivers/mmc/mmc_block.c130
-rw-r--r--drivers/mmc/mmc_queue.c3
-rw-r--r--drivers/mmc/mmci.c22
-rw-r--r--drivers/mmc/omap.c51
-rw-r--r--drivers/mmc/pxamci.c1
-rw-r--r--drivers/mmc/sdhci.c598
-rw-r--r--drivers/mmc/sdhci.h34
-rw-r--r--drivers/mmc/wbsd.c14
12 files changed, 642 insertions, 353 deletions
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 3228516b7d19..cb142a66098c 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -53,7 +53,6 @@
53 Gets the status of the write protect pin, if available. 53 Gets the status of the write protect pin, if available.
54*/ 54*/
55 55
56#include <linux/config.h>
57#include <linux/module.h> 56#include <linux/module.h>
58#include <linux/moduleparam.h> 57#include <linux/moduleparam.h>
59#include <linux/init.h> 58#include <linux/init.h>
@@ -823,6 +822,7 @@ static int at91_mci_probe(struct platform_device *pdev)
823 mmc->f_min = 375000; 822 mmc->f_min = 375000;
824 mmc->f_max = 25000000; 823 mmc->f_max = 25000000;
825 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 824 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
825 mmc->caps = MMC_CAP_BYTEBLOCK;
826 826
827 host = mmc_priv(mmc); 827 host = mmc_priv(mmc);
828 host->mmc = mmc; 828 host->mmc = mmc;
@@ -851,7 +851,7 @@ static int at91_mci_probe(struct platform_device *pdev)
851 /* 851 /*
852 * Allocate the MCI interrupt 852 * Allocate the MCI interrupt
853 */ 853 */
854 ret = request_irq(AT91_ID_MCI, at91_mci_irq, SA_SHIRQ, DRIVER_NAME, host); 854 ret = request_irq(AT91RM9200_ID_MCI, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
855 if (ret) { 855 if (ret) {
856 printk(KERN_ERR "Failed to request MCI interrupt\n"); 856 printk(KERN_ERR "Failed to request MCI interrupt\n");
857 clk_disable(mci_clk); 857 clk_disable(mci_clk);
@@ -907,7 +907,7 @@ static int at91_mci_remove(struct platform_device *pdev)
907 907
908 mmc_remove_host(mmc); 908 mmc_remove_host(mmc);
909 at91_mci_disable(); 909 at91_mci_disable();
910 free_irq(AT91_ID_MCI, host); 910 free_irq(AT91RM9200_ID_MCI, host);
911 mmc_free_host(mmc); 911 mmc_free_host(mmc);
912 912
913 clk_disable(mci_clk); /* Disable the peripheral clock */ 913 clk_disable(mci_clk); /* Disable the peripheral clock */
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 5dc4bee7abeb..61268da13957 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -34,7 +34,6 @@
34 * So we use the timer to check the status manually. 34 * So we use the timer to check the status manually.
35 */ 35 */
36 36
37#include <linux/config.h>
38#include <linux/module.h> 37#include <linux/module.h>
39#include <linux/init.h> 38#include <linux/init.h>
40#include <linux/platform_device.h> 39#include <linux/platform_device.h>
@@ -732,7 +731,7 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
732 } 731 }
733} 732}
734 733
735static void au1xmmc_dma_callback(int irq, void *dev_id, struct pt_regs *regs) 734static void au1xmmc_dma_callback(int irq, void *dev_id)
736{ 735{
737 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id; 736 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
738 737
@@ -887,7 +886,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
887 int i, ret = 0; 886 int i, ret = 0;
888 887
889 /* THe interrupt is shared among all controllers */ 888 /* THe interrupt is shared among all controllers */
890 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, SA_INTERRUPT, "MMC", 0); 889 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0);
891 890
892 if (ret) { 891 if (ret) {
893 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n", 892 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n",
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 5c62f4e6ad06..1b79dd271aae 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -25,7 +25,6 @@
25 * deficiencies 25 * deficiencies
26 * 26 *
27 */ 27 */
28#include <linux/config.h>
29 28
30#ifdef CONFIG_MMC_DEBUG 29#ifdef CONFIG_MMC_DEBUG
31#define DEBUG 30#define DEBUG
@@ -92,6 +91,8 @@ struct imxmci_host {
92 int dma_allocated; 91 int dma_allocated;
93 92
94 unsigned char actual_bus_width; 93 unsigned char actual_bus_width;
94
95 int prev_cmd_code;
95}; 96};
96 97
97#define IMXMCI_PEND_IRQ_b 0 98#define IMXMCI_PEND_IRQ_b 0
@@ -249,16 +250,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
249 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. 250 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
250 * This is required for SCR read at least. 251 * This is required for SCR read at least.
251 */ 252 */
252 if (datasz < 64) { 253 if (datasz < 512) {
253 host->dma_size = datasz; 254 host->dma_size = datasz;
254 if (data->flags & MMC_DATA_READ) { 255 if (data->flags & MMC_DATA_READ) {
255 host->dma_dir = DMA_FROM_DEVICE; 256 host->dma_dir = DMA_FROM_DEVICE;
256 257
257 /* Hack to enable read SCR */ 258 /* Hack to enable read SCR */
258 if(datasz < 16) { 259 MMC_NOB = 1;
259 MMC_NOB = 1; 260 MMC_BLK_LEN = 512;
260 MMC_BLK_LEN = 16;
261 }
262 } else { 261 } else {
263 host->dma_dir = DMA_TO_DEVICE; 262 host->dma_dir = DMA_TO_DEVICE;
264 } 263 }
@@ -410,6 +409,9 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
410 409
411 spin_unlock_irqrestore(&host->lock, flags); 410 spin_unlock_irqrestore(&host->lock, flags);
412 411
412 if(req && req->cmd)
413 host->prev_cmd_code = req->cmd->opcode;
414
413 host->req = NULL; 415 host->req = NULL;
414 host->cmd = NULL; 416 host->cmd = NULL;
415 host->data = NULL; 417 host->data = NULL;
@@ -554,7 +556,6 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
554{ 556{
555 int i; 557 int i;
556 int burst_len; 558 int burst_len;
557 int flush_len;
558 int trans_done = 0; 559 int trans_done = 0;
559 unsigned int stat = *pstat; 560 unsigned int stat = *pstat;
560 561
@@ -567,44 +568,43 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
567 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", 568 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
568 stat); 569 stat);
569 570
571 udelay(20); /* required for clocks < 8MHz*/
572
570 if(host->dma_dir == DMA_FROM_DEVICE) { 573 if(host->dma_dir == DMA_FROM_DEVICE) {
571 imxmci_busy_wait_for_status(host, &stat, 574 imxmci_busy_wait_for_status(host, &stat,
572 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE, 575 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
573 20, "imxmci_cpu_driven_data read"); 576 50, "imxmci_cpu_driven_data read");
574 577
575 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && 578 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
576 (host->data_cnt < host->dma_size)) { 579 (host->data_cnt < 512)) {
577 if(burst_len >= host->dma_size - host->data_cnt) { 580
578 flush_len = burst_len; 581 udelay(20); /* required for clocks < 8MHz*/
579 burst_len = host->dma_size - host->data_cnt;
580 flush_len -= burst_len;
581 host->data_cnt = host->dma_size;
582 trans_done = 1;
583 } else {
584 flush_len = 0;
585 host->data_cnt += burst_len;
586 }
587 582
588 for(i = burst_len; i>=2 ; i-=2) { 583 for(i = burst_len; i>=2 ; i-=2) {
589 *(host->data_ptr++) = MMC_BUFFER_ACCESS; 584 u16 data;
590 udelay(20); /* required for clocks < 8MHz*/ 585 data = MMC_BUFFER_ACCESS;
586 udelay(10); /* required for clocks < 8MHz*/
587 if(host->data_cnt+2 <= host->dma_size) {
588 *(host->data_ptr++) = data;
589 } else {
590 if(host->data_cnt < host->dma_size)
591 *(u8*)(host->data_ptr) = data;
592 }
593 host->data_cnt += 2;
591 } 594 }
592 595
593 if(i == 1)
594 *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
595
596 stat = MMC_STATUS; 596 stat = MMC_STATUS;
597 597
598 /* Flush extra bytes from FIFO */ 598 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
599 while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){ 599 host->data_cnt, burst_len, stat);
600 i = MMC_BUFFER_ACCESS;
601 stat = MMC_STATUS;
602 stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
603 }
604
605 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
606 burst_len, stat);
607 } 600 }
601
602 if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
603 trans_done = 1;
604
605 if(host->dma_size & 0x1ff)
606 stat &= ~STATUS_CRC_READ_ERR;
607
608 } else { 608 } else {
609 imxmci_busy_wait_for_status(host, &stat, 609 imxmci_busy_wait_for_status(host, &stat,
610 STATUS_APPL_BUFF_FE, 610 STATUS_APPL_BUFF_FE,
@@ -693,8 +693,8 @@ static void imxmci_tasklet_fnc(unsigned long data)
693 what, stat, MMC_INT_MASK); 693 what, stat, MMC_INT_MASK);
694 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", 694 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
695 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma)); 695 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
696 dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n", 696 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
697 host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size); 697 host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
698 } 698 }
699 699
700 if(!host->present || timeout) 700 if(!host->present || timeout)
@@ -956,7 +956,7 @@ static int imxmci_probe(struct platform_device *pdev)
956 mmc->f_min = 150000; 956 mmc->f_min = 150000;
957 mmc->f_max = CLK_RATE/2; 957 mmc->f_max = CLK_RATE/2;
958 mmc->ocr_avail = MMC_VDD_32_33; 958 mmc->ocr_avail = MMC_VDD_32_33;
959 mmc->caps |= MMC_CAP_4_BIT_DATA; 959 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK;
960 960
961 /* MMC core transfer sizes tunable parameters */ 961 /* MMC core transfer sizes tunable parameters */
962 mmc->max_hw_segs = 64; 962 mmc->max_hw_segs = 64;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 6201f3086a02..5b9caa7978d3 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -9,7 +9,6 @@
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12#include <linux/config.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
@@ -129,7 +128,7 @@ static void mmc_wait_done(struct mmc_request *mrq)
129 128
130int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 129int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
131{ 130{
132 DECLARE_COMPLETION(complete); 131 DECLARE_COMPLETION_ONSTACK(complete);
133 132
134 mrq->done_data = &complete; 133 mrq->done_data = &complete;
135 mrq->done = mmc_wait_done; 134 mrq->done = mmc_wait_done;
@@ -248,6 +247,55 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
248 247
249EXPORT_SYMBOL(mmc_wait_for_app_cmd); 248EXPORT_SYMBOL(mmc_wait_for_app_cmd);
250 249
250/**
251 * mmc_set_data_timeout - set the timeout for a data command
252 * @data: data phase for command
253 * @card: the MMC card associated with the data transfer
254 * @write: flag to differentiate reads from writes
255 */
256void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
257 int write)
258{
259 unsigned int mult;
260
261 /*
262 * SD cards use a 100 multiplier rather than 10
263 */
264 mult = mmc_card_sd(card) ? 100 : 10;
265
266 /*
267 * Scale up the multiplier (and therefore the timeout) by
268 * the r2w factor for writes.
269 */
270 if (write)
271 mult <<= card->csd.r2w_factor;
272
273 data->timeout_ns = card->csd.tacc_ns * mult;
274 data->timeout_clks = card->csd.tacc_clks * mult;
275
276 /*
277 * SD cards also have an upper limit on the timeout.
278 */
279 if (mmc_card_sd(card)) {
280 unsigned int timeout_us, limit_us;
281
282 timeout_us = data->timeout_ns / 1000;
283 timeout_us += data->timeout_clks * 1000 /
284 (card->host->ios.clock / 1000);
285
286 if (write)
287 limit_us = 250000;
288 else
289 limit_us = 100000;
290
291 if (timeout_us > limit_us) {
292 data->timeout_ns = limit_us * 1000;
293 data->timeout_clks = 0;
294 }
295 }
296}
297EXPORT_SYMBOL(mmc_set_data_timeout);
298
251static int mmc_select_card(struct mmc_host *host, struct mmc_card *card); 299static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
252 300
253/** 301/**
@@ -909,11 +957,9 @@ static void mmc_read_scrs(struct mmc_host *host)
909{ 957{
910 int err; 958 int err;
911 struct mmc_card *card; 959 struct mmc_card *card;
912
913 struct mmc_request mrq; 960 struct mmc_request mrq;
914 struct mmc_command cmd; 961 struct mmc_command cmd;
915 struct mmc_data data; 962 struct mmc_data data;
916
917 struct scatterlist sg; 963 struct scatterlist sg;
918 964
919 list_for_each_entry(card, &host->cards, node) { 965 list_for_each_entry(card, &host->cards, node) {
@@ -948,9 +994,8 @@ static void mmc_read_scrs(struct mmc_host *host)
948 994
949 memset(&data, 0, sizeof(struct mmc_data)); 995 memset(&data, 0, sizeof(struct mmc_data));
950 996
951 data.timeout_ns = card->csd.tacc_ns * 10; 997 mmc_set_data_timeout(&data, card, 0);
952 data.timeout_clks = card->csd.tacc_clks * 10; 998
953 data.blksz_bits = 3;
954 data.blksz = 1 << 3; 999 data.blksz = 1 << 3;
955 data.blocks = 1; 1000 data.blocks = 1;
956 data.flags = MMC_DATA_READ; 1001 data.flags = MMC_DATA_READ;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 587458b370b9..db0e8ad439a5 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -27,11 +27,12 @@
27#include <linux/hdreg.h> 27#include <linux/hdreg.h>
28#include <linux/kdev_t.h> 28#include <linux/kdev_t.h>
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/devfs_fs_kernel.h>
31#include <linux/mutex.h> 30#include <linux/mutex.h>
32 31
33#include <linux/mmc/card.h> 32#include <linux/mmc/card.h>
33#include <linux/mmc/host.h>
34#include <linux/mmc/protocol.h> 34#include <linux/mmc/protocol.h>
35#include <linux/mmc/host.h>
35 36
36#include <asm/system.h> 37#include <asm/system.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
@@ -165,6 +166,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
165 do { 166 do {
166 struct mmc_blk_request brq; 167 struct mmc_blk_request brq;
167 struct mmc_command cmd; 168 struct mmc_command cmd;
169 u32 readcmd, writecmd;
168 170
169 memset(&brq, 0, sizeof(struct mmc_blk_request)); 171 memset(&brq, 0, sizeof(struct mmc_blk_request));
170 brq.mrq.cmd = &brq.cmd; 172 brq.mrq.cmd = &brq.cmd;
@@ -172,35 +174,39 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
172 174
173 brq.cmd.arg = req->sector << 9; 175 brq.cmd.arg = req->sector << 9;
174 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 176 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
175 brq.data.timeout_ns = card->csd.tacc_ns * 10;
176 brq.data.timeout_clks = card->csd.tacc_clks * 10;
177 brq.data.blksz_bits = md->block_bits;
178 brq.data.blksz = 1 << md->block_bits; 177 brq.data.blksz = 1 << md->block_bits;
179 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 178 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
180 brq.stop.opcode = MMC_STOP_TRANSMISSION; 179 brq.stop.opcode = MMC_STOP_TRANSMISSION;
181 brq.stop.arg = 0; 180 brq.stop.arg = 0;
182 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 181 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
183 182
184 if (rq_data_dir(req) == READ) { 183 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
185 brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
186 brq.data.flags |= MMC_DATA_READ;
187 } else {
188 brq.cmd.opcode = MMC_WRITE_BLOCK;
189 brq.data.flags |= MMC_DATA_WRITE;
190 brq.data.blocks = 1;
191 184
192 /* 185 /*
193 * Scale up the timeout by the r2w factor 186 * If the host doesn't support multiple block writes, force
194 */ 187 * block writes to single block.
195 brq.data.timeout_ns <<= card->csd.r2w_factor; 188 */
196 brq.data.timeout_clks <<= card->csd.r2w_factor; 189 if (rq_data_dir(req) != READ &&
197 } 190 !(card->host->caps & MMC_CAP_MULTIWRITE))
191 brq.data.blocks = 1;
198 192
199 if (brq.data.blocks > 1) { 193 if (brq.data.blocks > 1) {
200 brq.data.flags |= MMC_DATA_MULTI; 194 brq.data.flags |= MMC_DATA_MULTI;
201 brq.mrq.stop = &brq.stop; 195 brq.mrq.stop = &brq.stop;
196 readcmd = MMC_READ_MULTIPLE_BLOCK;
197 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
202 } else { 198 } else {
203 brq.mrq.stop = NULL; 199 brq.mrq.stop = NULL;
200 readcmd = MMC_READ_SINGLE_BLOCK;
201 writecmd = MMC_WRITE_BLOCK;
202 }
203
204 if (rq_data_dir(req) == READ) {
205 brq.cmd.opcode = readcmd;
206 brq.data.flags |= MMC_DATA_READ;
207 } else {
208 brq.cmd.opcode = writecmd;
209 brq.data.flags |= MMC_DATA_WRITE;
204 } 210 }
205 211
206 brq.data.sg = mq->sg; 212 brq.data.sg = mq->sg;
@@ -225,27 +231,29 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
225 goto cmd_err; 231 goto cmd_err;
226 } 232 }
227 233
228 do { 234 if (rq_data_dir(req) != READ) {
229 int err; 235 do {
230 236 int err;
231 cmd.opcode = MMC_SEND_STATUS; 237
232 cmd.arg = card->rca << 16; 238 cmd.opcode = MMC_SEND_STATUS;
233 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 239 cmd.arg = card->rca << 16;
234 err = mmc_wait_for_cmd(card->host, &cmd, 5); 240 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
235 if (err) { 241 err = mmc_wait_for_cmd(card->host, &cmd, 5);
236 printk(KERN_ERR "%s: error %d requesting status\n", 242 if (err) {
237 req->rq_disk->disk_name, err); 243 printk(KERN_ERR "%s: error %d requesting status\n",
238 goto cmd_err; 244 req->rq_disk->disk_name, err);
239 } 245 goto cmd_err;
240 } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); 246 }
247 } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
241 248
242#if 0 249#if 0
243 if (cmd.resp[0] & ~0x00000900) 250 if (cmd.resp[0] & ~0x00000900)
244 printk(KERN_ERR "%s: status = %08x\n", 251 printk(KERN_ERR "%s: status = %08x\n",
245 req->rq_disk->disk_name, cmd.resp[0]); 252 req->rq_disk->disk_name, cmd.resp[0]);
246 if (mmc_decode_status(cmd.resp)) 253 if (mmc_decode_status(cmd.resp))
247 goto cmd_err; 254 goto cmd_err;
248#endif 255#endif
256 }
249 257
250 /* 258 /*
251 * A block was successfully transferred. 259 * A block was successfully transferred.
@@ -325,52 +333,11 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
325 md->read_only = mmc_blk_readonly(card); 333 md->read_only = mmc_blk_readonly(card);
326 334
327 /* 335 /*
328 * Figure out a workable block size. MMC cards have: 336 * Both SD and MMC specifications state (although a bit
329 * - two block sizes, one for read and one for write. 337 * unclearly in the MMC case) that a block size of 512
330 * - may support partial reads and/or writes 338 * bytes must always be supported by the card.
331 * (allows block sizes smaller than specified)
332 */
333 md->block_bits = card->csd.read_blkbits;
334 if (card->csd.write_blkbits != card->csd.read_blkbits) {
335 if (card->csd.write_blkbits < card->csd.read_blkbits &&
336 card->csd.read_partial) {
337 /*
338 * write block size is smaller than read block
339 * size, but we support partial reads, so choose
340 * the smaller write block size.
341 */
342 md->block_bits = card->csd.write_blkbits;
343 } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
344 card->csd.write_partial) {
345 /*
346 * read block size is smaller than write block
347 * size, but we support partial writes. Use read
348 * block size.
349 */
350 } else {
351 /*
352 * We don't support this configuration for writes.
353 */
354 printk(KERN_ERR "%s: unable to select block size for "
355 "writing (rb%u wb%u rp%u wp%u)\n",
356 mmc_card_id(card),
357 1 << card->csd.read_blkbits,
358 1 << card->csd.write_blkbits,
359 card->csd.read_partial,
360 card->csd.write_partial);
361 md->read_only = 1;
362 }
363 }
364
365 /*
366 * Refuse to allow block sizes smaller than 512 bytes.
367 */ 339 */
368 if (md->block_bits < 9) { 340 md->block_bits = 9;
369 printk(KERN_ERR "%s: unable to support block size %u\n",
370 mmc_card_id(card), 1 << md->block_bits);
371 ret = -EINVAL;
372 goto err_kfree;
373 }
374 341
375 md->disk = alloc_disk(1 << MMC_SHIFT); 342 md->disk = alloc_disk(1 << MMC_SHIFT);
376 if (md->disk == NULL) { 343 if (md->disk == NULL) {
@@ -409,7 +376,6 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
409 */ 376 */
410 377
411 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 378 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
412 sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
413 379
414 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits); 380 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
415 381
@@ -555,7 +521,6 @@ static int __init mmc_blk_init(void)
555 if (major == 0) 521 if (major == 0)
556 major = res; 522 major = res;
557 523
558 devfs_mk_dir("mmc");
559 return mmc_register_driver(&mmc_driver); 524 return mmc_register_driver(&mmc_driver);
560 525
561 out: 526 out:
@@ -565,7 +530,6 @@ static int __init mmc_blk_init(void)
565static void __exit mmc_blk_exit(void) 530static void __exit mmc_blk_exit(void)
566{ 531{
567 mmc_unregister_driver(&mmc_driver); 532 mmc_unregister_driver(&mmc_driver);
568 devfs_remove("mmc");
569 unregister_blkdev(major, "mmc"); 533 unregister_blkdev(major, "mmc");
570} 534}
571 535
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 0b9682e9a357..74f8cdeeff0f 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -79,7 +79,8 @@ static int mmc_queue_thread(void *d)
79 spin_lock_irq(q->queue_lock); 79 spin_lock_irq(q->queue_lock);
80 set_current_state(TASK_INTERRUPTIBLE); 80 set_current_state(TASK_INTERRUPTIBLE);
81 if (!blk_queue_plugged(q)) 81 if (!blk_queue_plugged(q))
82 mq->req = req = elv_next_request(q); 82 req = elv_next_request(q);
83 mq->req = req;
83 spin_unlock_irq(q->queue_lock); 84 spin_unlock_irq(q->queue_lock);
84 85
85 if (!req) { 86 if (!req) {
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index da8e4d7339cc..2b5a0cc9ea56 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -7,7 +7,6 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/config.h>
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
13#include <linux/init.h> 12#include <linux/init.h>
@@ -70,12 +69,13 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
70 unsigned int datactrl, timeout, irqmask; 69 unsigned int datactrl, timeout, irqmask;
71 unsigned long long clks; 70 unsigned long long clks;
72 void __iomem *base; 71 void __iomem *base;
72 int blksz_bits;
73 73
74 DBG(host, "blksz %04x blks %04x flags %08x\n", 74 DBG(host, "blksz %04x blks %04x flags %08x\n",
75 1 << data->blksz_bits, data->blocks, data->flags); 75 data->blksz, data->blocks, data->flags);
76 76
77 host->data = data; 77 host->data = data;
78 host->size = data->blocks << data->blksz_bits; 78 host->size = data->blksz;
79 host->data_xfered = 0; 79 host->data_xfered = 0;
80 80
81 mmci_init_sg(host, data); 81 mmci_init_sg(host, data);
@@ -89,7 +89,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
89 writel(timeout, base + MMCIDATATIMER); 89 writel(timeout, base + MMCIDATATIMER);
90 writel(host->size, base + MMCIDATALENGTH); 90 writel(host->size, base + MMCIDATALENGTH);
91 91
92 datactrl = MCI_DPSM_ENABLE | data->blksz_bits << 4; 92 blksz_bits = ffs(data->blksz) - 1;
93 BUG_ON(1 << blksz_bits != data->blksz);
94
95 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
93 if (data->flags & MMC_DATA_READ) { 96 if (data->flags & MMC_DATA_READ) {
94 datactrl |= MCI_DPSM_DIRECTION; 97 datactrl |= MCI_DPSM_DIRECTION;
95 irqmask = MCI_RXFIFOHALFFULLMASK; 98 irqmask = MCI_RXFIFOHALFFULLMASK;
@@ -146,7 +149,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
146 unsigned int status) 149 unsigned int status)
147{ 150{
148 if (status & MCI_DATABLOCKEND) { 151 if (status & MCI_DATABLOCKEND) {
149 host->data_xfered += 1 << data->blksz_bits; 152 host->data_xfered += data->blksz;
150 } 153 }
151 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 154 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
152 if (status & MCI_DATACRCFAIL) 155 if (status & MCI_DATACRCFAIL)
@@ -506,6 +509,7 @@ static int mmci_probe(struct amba_device *dev, void *id)
506 mmc->f_min = (host->mclk + 511) / 512; 509 mmc->f_min = (host->mclk + 511) / 512;
507 mmc->f_max = min(host->mclk, fmax); 510 mmc->f_max = min(host->mclk, fmax);
508 mmc->ocr_avail = plat->ocr_mask; 511 mmc->ocr_avail = plat->ocr_mask;
512 mmc->caps = MMC_CAP_MULTIWRITE;
509 513
510 /* 514 /*
511 * We can do SGIO 515 * We can do SGIO
@@ -532,11 +536,11 @@ static int mmci_probe(struct amba_device *dev, void *id)
532 writel(0, host->base + MMCIMASK1); 536 writel(0, host->base + MMCIMASK1);
533 writel(0xfff, host->base + MMCICLEAR); 537 writel(0xfff, host->base + MMCICLEAR);
534 538
535 ret = request_irq(dev->irq[0], mmci_irq, SA_SHIRQ, DRIVER_NAME " (cmd)", host); 539 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
536 if (ret) 540 if (ret)
537 goto unmap; 541 goto unmap;
538 542
539 ret = request_irq(dev->irq[1], mmci_pio_irq, SA_SHIRQ, DRIVER_NAME " (pio)", host); 543 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
540 if (ret) 544 if (ret)
541 goto irq0_free; 545 goto irq0_free;
542 546
@@ -546,9 +550,9 @@ static int mmci_probe(struct amba_device *dev, void *id)
546 550
547 mmc_add_host(mmc); 551 mmc_add_host(mmc);
548 552
549 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n", 553 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
550 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 554 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
551 dev->res.start, dev->irq[0], dev->irq[1]); 555 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
552 556
553 init_timer(&host->timer); 557 init_timer(&host->timer);
554 host->timer.data = (unsigned long)host; 558 host->timer.data = (unsigned long)host;
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index c25244b3657b..52c9e52e6b78 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -11,7 +11,6 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/config.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
17#include <linux/init.h> 16#include <linux/init.h>
@@ -61,6 +60,7 @@ struct mmc_omap_host {
61 unsigned char id; /* 16xx chips have 2 MMC blocks */ 60 unsigned char id; /* 16xx chips have 2 MMC blocks */
62 struct clk * iclk; 61 struct clk * iclk;
63 struct clk * fclk; 62 struct clk * fclk;
63 struct resource *res;
64 void __iomem *base; 64 void __iomem *base;
65 int irq; 65 int irq;
66 unsigned char bus_mode; 66 unsigned char bus_mode;
@@ -340,8 +340,6 @@ static void
340mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 340mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
341{ 341{
342 int n; 342 int n;
343 void __iomem *reg;
344 u16 *p;
345 343
346 if (host->buffer_bytes_left == 0) { 344 if (host->buffer_bytes_left == 0) {
347 host->sg_idx++; 345 host->sg_idx++;
@@ -658,12 +656,12 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
658 struct mmc_data *mmcdat = host->data; 656 struct mmc_data *mmcdat = host->data;
659 657
660 if (unlikely(host->dma_ch < 0)) { 658 if (unlikely(host->dma_ch < 0)) {
661 dev_err(mmc_dev(host->mmc), "DMA callback while DMA not 659 dev_err(mmc_dev(host->mmc),
662 enabled\n"); 660 "DMA callback while DMA not enabled\n");
663 return; 661 return;
664 } 662 }
665 /* FIXME: We really should do something to _handle_ the errors */ 663 /* FIXME: We really should do something to _handle_ the errors */
666 if (ch_status & OMAP_DMA_TOUT_IRQ) { 664 if (ch_status & OMAP1_DMA_TOUT_IRQ) {
667 dev_err(mmc_dev(host->mmc),"DMA timeout\n"); 665 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
668 return; 666 return;
669 } 667 }
@@ -973,20 +971,20 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
973 struct omap_mmc_conf *minfo = pdev->dev.platform_data; 971 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
974 struct mmc_host *mmc; 972 struct mmc_host *mmc;
975 struct mmc_omap_host *host = NULL; 973 struct mmc_omap_host *host = NULL;
974 struct resource *r;
976 int ret = 0; 975 int ret = 0;
976 int irq;
977 977
978 if (platform_get_resource(pdev, IORESOURCE_MEM, 0) || 978 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
979 platform_get_irq(pdev, IORESOURCE_IRQ, 0)) { 979 irq = platform_get_irq(pdev, 0);
980 dev_err(&pdev->dev, "mmc_omap_probe: invalid resource type\n"); 980 if (!r || irq < 0)
981 return -ENODEV; 981 return -ENXIO;
982 }
983 982
984 if (!request_mem_region(pdev->resource[0].start, 983 r = request_mem_region(pdev->resource[0].start,
985 pdev->resource[0].end - pdev->resource[0].start + 1, 984 pdev->resource[0].end - pdev->resource[0].start + 1,
986 pdev->name)) { 985 pdev->name);
987 dev_dbg(&pdev->dev, "request_mem_region failed\n"); 986 if (!r)
988 return -EBUSY; 987 return -EBUSY;
989 }
990 988
991 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev); 989 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
992 if (!mmc) { 990 if (!mmc) {
@@ -1003,6 +1001,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1003 host->dma_timer.data = (unsigned long) host; 1001 host->dma_timer.data = (unsigned long) host;
1004 1002
1005 host->id = pdev->id; 1003 host->id = pdev->id;
1004 host->res = r;
1005 host->irq = irq;
1006 1006
1007 if (cpu_is_omap24xx()) { 1007 if (cpu_is_omap24xx()) {
1008 host->iclk = clk_get(&pdev->dev, "mmc_ick"); 1008 host->iclk = clk_get(&pdev->dev, "mmc_ick");
@@ -1032,19 +1032,16 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1032 host->dma_ch = -1; 1032 host->dma_ch = -1;
1033 1033
1034 host->irq = pdev->resource[1].start; 1034 host->irq = pdev->resource[1].start;
1035 host->base = ioremap(pdev->res.start, SZ_4K); 1035 host->base = (void __iomem*)IO_ADDRESS(r->start);
1036 if (!host->base) {
1037 ret = -ENOMEM;
1038 goto out;
1039 }
1040
1041 if (minfo->wire4)
1042 mmc->caps |= MMC_CAP_4_BIT_DATA;
1043 1036
1044 mmc->ops = &mmc_omap_ops; 1037 mmc->ops = &mmc_omap_ops;
1045 mmc->f_min = 400000; 1038 mmc->f_min = 400000;
1046 mmc->f_max = 24000000; 1039 mmc->f_max = 24000000;
1047 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1040 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1041 mmc->caps = MMC_CAP_BYTEBLOCK;
1042
1043 if (minfo->wire4)
1044 mmc->caps |= MMC_CAP_4_BIT_DATA;
1048 1045
1049 /* Use scatterlist DMA to reduce per-transfer costs. 1046 /* Use scatterlist DMA to reduce per-transfer costs.
1050 * NOTE max_seg_size assumption that small blocks aren't 1047 * NOTE max_seg_size assumption that small blocks aren't
@@ -1057,8 +1054,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1057 1054
1058 if (host->power_pin >= 0) { 1055 if (host->power_pin >= 0) {
1059 if ((ret = omap_request_gpio(host->power_pin)) != 0) { 1056 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1060 dev_err(mmc_dev(host->mmc), "Unable to get GPIO 1057 dev_err(mmc_dev(host->mmc),
1061 pin for MMC power\n"); 1058 "Unable to get GPIO pin for MMC power\n");
1062 goto out; 1059 goto out;
1063 } 1060 }
1064 omap_set_gpio_direction(host->power_pin, 0); 1061 omap_set_gpio_direction(host->power_pin, 0);
@@ -1086,7 +1083,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1086 1083
1087 omap_set_gpio_direction(host->switch_pin, 1); 1084 omap_set_gpio_direction(host->switch_pin, 1);
1088 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin), 1085 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1089 mmc_omap_switch_irq, SA_TRIGGER_RISING, DRIVER_NAME, host); 1086 mmc_omap_switch_irq, IRQF_TRIGGER_RISING, DRIVER_NAME, host);
1090 if (ret) { 1087 if (ret) {
1091 dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n"); 1088 dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
1092 omap_free_gpio(host->switch_pin); 1089 omap_free_gpio(host->switch_pin);
@@ -1100,7 +1097,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1100 device_remove_file(&pdev->dev, &dev_attr_cover_switch); 1097 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1101 } 1098 }
1102 if (ret) { 1099 if (ret) {
1103 dev_wan(mmc_dev(host->mmc), "Unable to create sysfs attributes\n"); 1100 dev_warn(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
1104 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host); 1101 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1105 omap_free_gpio(host->switch_pin); 1102 omap_free_gpio(host->switch_pin);
1106 host->switch_pin = -1; 1103 host->switch_pin = -1;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index b49368fd96b8..ef350908478c 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -16,7 +16,6 @@
16 * 1 and 3 byte data transfers not supported 16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023 17 * max block length up to 1023
18 */ 18 */
19#include <linux/config.h>
20#include <linux/module.h> 19#include <linux/module.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/ioport.h> 21#include <linux/ioport.h>
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 8e9100bd57ef..fdfc3838dd79 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -8,12 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11 /*
12 * Note that PIO transfer is rather crappy atm. The buffer full/empty
13 * interrupts aren't reliable so we currently transfer the entire buffer
14 * directly. Patches to solve the problem are welcome.
15 */
16
17#include <linux/delay.h> 11#include <linux/delay.h>
18#include <linux/highmem.h> 12#include <linux/highmem.h>
19#include <linux/pci.h> 13#include <linux/pci.h>
@@ -27,16 +21,50 @@
27#include "sdhci.h" 21#include "sdhci.h"
28 22
29#define DRIVER_NAME "sdhci" 23#define DRIVER_NAME "sdhci"
30#define DRIVER_VERSION "0.11" 24#define DRIVER_VERSION "0.12"
31 25
32#define BUGMAIL "<sdhci-devel@list.drzeus.cx>" 26#define BUGMAIL "<sdhci-devel@list.drzeus.cx>"
33 27
34#define DBG(f, x...) \ 28#define DBG(f, x...) \
35 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 29 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
36 30
31static unsigned int debug_nodma = 0;
32static unsigned int debug_forcedma = 0;
33static unsigned int debug_quirks = 0;
34
35#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
36#define SDHCI_QUIRK_FORCE_DMA (1<<1)
37
37static const struct pci_device_id pci_ids[] __devinitdata = { 38static const struct pci_device_id pci_ids[] __devinitdata = {
38 /* handle any SD host controller */ 39 {
39 {PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)}, 40 .vendor = PCI_VENDOR_ID_RICOH,
41 .device = PCI_DEVICE_ID_RICOH_R5C822,
42 .subvendor = PCI_VENDOR_ID_IBM,
43 .subdevice = PCI_ANY_ID,
44 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
45 SDHCI_QUIRK_FORCE_DMA,
46 },
47
48 {
49 .vendor = PCI_VENDOR_ID_RICOH,
50 .device = PCI_DEVICE_ID_RICOH_R5C822,
51 .subvendor = PCI_ANY_ID,
52 .subdevice = PCI_ANY_ID,
53 .driver_data = SDHCI_QUIRK_FORCE_DMA,
54 },
55
56 {
57 .vendor = PCI_VENDOR_ID_TI,
58 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
59 .subvendor = PCI_ANY_ID,
60 .subdevice = PCI_ANY_ID,
61 .driver_data = SDHCI_QUIRK_FORCE_DMA,
62 },
63
64 { /* Generic SD host controller */
65 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
66 },
67
40 { /* end: all zeroes */ }, 68 { /* end: all zeroes */ },
41}; 69};
42 70
@@ -94,12 +122,27 @@ static void sdhci_dumpregs(struct sdhci_host *host)
94 122
95static void sdhci_reset(struct sdhci_host *host, u8 mask) 123static void sdhci_reset(struct sdhci_host *host, u8 mask)
96{ 124{
125 unsigned long timeout;
126
97 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET); 127 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
98 128
99 if (mask & SDHCI_RESET_ALL) { 129 if (mask & SDHCI_RESET_ALL)
100 host->clock = 0; 130 host->clock = 0;
101 131
102 mdelay(50); 132 /* Wait max 100 ms */
133 timeout = 100;
134
135 /* hw clears the bit when it's done */
136 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
137 if (timeout == 0) {
138 printk(KERN_ERR "%s: Reset 0x%x never completed. "
139 "Please report this to " BUGMAIL ".\n",
140 mmc_hostname(host->mmc), (int)mask);
141 sdhci_dumpregs(host);
142 return;
143 }
144 timeout--;
145 mdelay(1);
103 } 146 }
104} 147}
105 148
@@ -109,13 +152,15 @@ static void sdhci_init(struct sdhci_host *host)
109 152
110 sdhci_reset(host, SDHCI_RESET_ALL); 153 sdhci_reset(host, SDHCI_RESET_ALL);
111 154
112 intmask = ~(SDHCI_INT_CARD_INT | SDHCI_INT_BUF_EMPTY | SDHCI_INT_BUF_FULL); 155 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
156 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
157 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
158 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
159 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
160 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
113 161
114 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); 162 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
115 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); 163 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
116
117 /* This is unknown magic. */
118 writeb(0xE, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
119} 164}
120 165
121static void sdhci_activate_led(struct sdhci_host *host) 166static void sdhci_activate_led(struct sdhci_host *host)
@@ -172,79 +217,96 @@ static inline int sdhci_next_sg(struct sdhci_host* host)
172 return host->num_sg; 217 return host->num_sg;
173} 218}
174 219
175static void sdhci_transfer_pio(struct sdhci_host *host) 220static void sdhci_read_block_pio(struct sdhci_host *host)
176{ 221{
222 int blksize, chunk_remain;
223 u32 data;
177 char *buffer; 224 char *buffer;
178 u32 mask; 225 int size;
179 int bytes, size;
180 unsigned long max_jiffies;
181 226
182 BUG_ON(!host->data); 227 DBG("PIO reading\n");
183 228
184 if (host->num_sg == 0) 229 blksize = host->data->blksz;
185 return; 230 chunk_remain = 0;
186 231 data = 0;
187 bytes = 0;
188 if (host->data->flags & MMC_DATA_READ)
189 mask = SDHCI_DATA_AVAILABLE;
190 else
191 mask = SDHCI_SPACE_AVAILABLE;
192 232
193 buffer = sdhci_kmap_sg(host) + host->offset; 233 buffer = sdhci_kmap_sg(host) + host->offset;
194 234
195 /* Transfer shouldn't take more than 5 s */ 235 while (blksize) {
196 max_jiffies = jiffies + HZ * 5; 236 if (chunk_remain == 0) {
237 data = readl(host->ioaddr + SDHCI_BUFFER);
238 chunk_remain = min(blksize, 4);
239 }
197 240
198 while (host->size > 0) { 241 size = min(host->size, host->remain);
199 if (time_after(jiffies, max_jiffies)) { 242 size = min(size, chunk_remain);
200 printk(KERN_ERR "%s: PIO transfer stalled. "
201 "Please report this to "
202 BUGMAIL ".\n", mmc_hostname(host->mmc));
203 sdhci_dumpregs(host);
204 243
205 sdhci_kunmap_sg(host); 244 chunk_remain -= size;
245 blksize -= size;
246 host->offset += size;
247 host->remain -= size;
248 host->size -= size;
249 while (size) {
250 *buffer = data & 0xFF;
251 buffer++;
252 data >>= 8;
253 size--;
254 }
206 255
207 host->data->error = MMC_ERR_FAILED; 256 if (host->remain == 0) {
208 sdhci_finish_data(host); 257 sdhci_kunmap_sg(host);
209 return; 258 if (sdhci_next_sg(host) == 0) {
259 BUG_ON(blksize != 0);
260 return;
261 }
262 buffer = sdhci_kmap_sg(host);
210 } 263 }
264 }
211 265
212 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask)) 266 sdhci_kunmap_sg(host);
213 continue; 267}
214 268
215 size = min(host->size, host->remain); 269static void sdhci_write_block_pio(struct sdhci_host *host)
270{
271 int blksize, chunk_remain;
272 u32 data;
273 char *buffer;
274 int bytes, size;
216 275
217 if (size >= 4) { 276 DBG("PIO writing\n");
218 if (host->data->flags & MMC_DATA_READ)
219 *(u32*)buffer = readl(host->ioaddr + SDHCI_BUFFER);
220 else
221 writel(*(u32*)buffer, host->ioaddr + SDHCI_BUFFER);
222 size = 4;
223 } else if (size >= 2) {
224 if (host->data->flags & MMC_DATA_READ)
225 *(u16*)buffer = readw(host->ioaddr + SDHCI_BUFFER);
226 else
227 writew(*(u16*)buffer, host->ioaddr + SDHCI_BUFFER);
228 size = 2;
229 } else {
230 if (host->data->flags & MMC_DATA_READ)
231 *(u8*)buffer = readb(host->ioaddr + SDHCI_BUFFER);
232 else
233 writeb(*(u8*)buffer, host->ioaddr + SDHCI_BUFFER);
234 size = 1;
235 }
236 277
237 buffer += size; 278 blksize = host->data->blksz;
279 chunk_remain = 4;
280 data = 0;
281
282 bytes = 0;
283 buffer = sdhci_kmap_sg(host) + host->offset;
284
285 while (blksize) {
286 size = min(host->size, host->remain);
287 size = min(size, chunk_remain);
288
289 chunk_remain -= size;
290 blksize -= size;
238 host->offset += size; 291 host->offset += size;
239 host->remain -= size; 292 host->remain -= size;
240
241 bytes += size;
242 host->size -= size; 293 host->size -= size;
294 while (size) {
295 data >>= 8;
296 data |= (u32)*buffer << 24;
297 buffer++;
298 size--;
299 }
300
301 if (chunk_remain == 0) {
302 writel(data, host->ioaddr + SDHCI_BUFFER);
303 chunk_remain = min(blksize, 4);
304 }
243 305
244 if (host->remain == 0) { 306 if (host->remain == 0) {
245 sdhci_kunmap_sg(host); 307 sdhci_kunmap_sg(host);
246 if (sdhci_next_sg(host) == 0) { 308 if (sdhci_next_sg(host) == 0) {
247 DBG("PIO transfer: %d bytes\n", bytes); 309 BUG_ON(blksize != 0);
248 return; 310 return;
249 } 311 }
250 buffer = sdhci_kmap_sg(host); 312 buffer = sdhci_kmap_sg(host);
@@ -252,38 +314,87 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
252 } 314 }
253 315
254 sdhci_kunmap_sg(host); 316 sdhci_kunmap_sg(host);
317}
318
319static void sdhci_transfer_pio(struct sdhci_host *host)
320{
321 u32 mask;
322
323 BUG_ON(!host->data);
324
325 if (host->size == 0)
326 return;
327
328 if (host->data->flags & MMC_DATA_READ)
329 mask = SDHCI_DATA_AVAILABLE;
330 else
331 mask = SDHCI_SPACE_AVAILABLE;
332
333 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
334 if (host->data->flags & MMC_DATA_READ)
335 sdhci_read_block_pio(host);
336 else
337 sdhci_write_block_pio(host);
255 338
256 DBG("PIO transfer: %d bytes\n", bytes); 339 if (host->size == 0)
340 break;
341
342 BUG_ON(host->num_sg == 0);
343 }
344
345 DBG("PIO transfer complete.\n");
257} 346}
258 347
259static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 348static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
260{ 349{
261 u16 mode; 350 u8 count;
351 unsigned target_timeout, current_timeout;
262 352
263 WARN_ON(host->data); 353 WARN_ON(host->data);
264 354
265 if (data == NULL) { 355 if (data == NULL)
266 writew(0, host->ioaddr + SDHCI_TRANSFER_MODE);
267 return; 356 return;
268 }
269 357
270 DBG("blksz %04x blks %04x flags %08x\n", 358 DBG("blksz %04x blks %04x flags %08x\n",
271 data->blksz, data->blocks, data->flags); 359 data->blksz, data->blocks, data->flags);
272 DBG("tsac %d ms nsac %d clk\n", 360 DBG("tsac %d ms nsac %d clk\n",
273 data->timeout_ns / 1000000, data->timeout_clks); 361 data->timeout_ns / 1000000, data->timeout_clks);
274 362
275 mode = SDHCI_TRNS_BLK_CNT_EN; 363 /* Sanity checks */
276 if (data->blocks > 1) 364 BUG_ON(data->blksz * data->blocks > 524288);
277 mode |= SDHCI_TRNS_MULTI; 365 BUG_ON(data->blksz > host->max_block);
278 if (data->flags & MMC_DATA_READ) 366 BUG_ON(data->blocks > 65535);
279 mode |= SDHCI_TRNS_READ;
280 if (host->flags & SDHCI_USE_DMA)
281 mode |= SDHCI_TRNS_DMA;
282 367
283 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE); 368 /* timeout in us */
369 target_timeout = data->timeout_ns / 1000 +
370 data->timeout_clks / host->clock;
284 371
285 writew(data->blksz, host->ioaddr + SDHCI_BLOCK_SIZE); 372 /*
286 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT); 373 * Figure out needed cycles.
374 * We do this in steps in order to fit inside a 32 bit int.
375 * The first step is the minimum timeout, which will have a
376 * minimum resolution of 6 bits:
377 * (1) 2^13*1000 > 2^22,
378 * (2) host->timeout_clk < 2^16
379 * =>
380 * (1) / (2) > 2^6
381 */
382 count = 0;
383 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
384 while (current_timeout < target_timeout) {
385 count++;
386 current_timeout <<= 1;
387 if (count >= 0xF)
388 break;
389 }
390
391 if (count >= 0xF) {
392 printk(KERN_WARNING "%s: Too large timeout requested!\n",
393 mmc_hostname(host->mmc));
394 count = 0xE;
395 }
396
397 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
287 398
288 if (host->flags & SDHCI_USE_DMA) { 399 if (host->flags & SDHCI_USE_DMA) {
289 int count; 400 int count;
@@ -302,12 +413,37 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
302 host->offset = 0; 413 host->offset = 0;
303 host->remain = host->cur_sg->length; 414 host->remain = host->cur_sg->length;
304 } 415 }
416
417 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
418 writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
419 host->ioaddr + SDHCI_BLOCK_SIZE);
420 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
421}
422
423static void sdhci_set_transfer_mode(struct sdhci_host *host,
424 struct mmc_data *data)
425{
426 u16 mode;
427
428 WARN_ON(host->data);
429
430 if (data == NULL)
431 return;
432
433 mode = SDHCI_TRNS_BLK_CNT_EN;
434 if (data->blocks > 1)
435 mode |= SDHCI_TRNS_MULTI;
436 if (data->flags & MMC_DATA_READ)
437 mode |= SDHCI_TRNS_READ;
438 if (host->flags & SDHCI_USE_DMA)
439 mode |= SDHCI_TRNS_DMA;
440
441 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
305} 442}
306 443
307static void sdhci_finish_data(struct sdhci_host *host) 444static void sdhci_finish_data(struct sdhci_host *host)
308{ 445{
309 struct mmc_data *data; 446 struct mmc_data *data;
310 u32 intmask;
311 u16 blocks; 447 u16 blocks;
312 448
313 BUG_ON(!host->data); 449 BUG_ON(!host->data);
@@ -318,14 +454,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
318 if (host->flags & SDHCI_USE_DMA) { 454 if (host->flags & SDHCI_USE_DMA) {
319 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 455 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
320 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 456 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
321 } else {
322 intmask = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
323 intmask &= ~(SDHCI_INT_BUF_EMPTY | SDHCI_INT_BUF_FULL);
324 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
325
326 intmask = readl(host->ioaddr + SDHCI_INT_ENABLE);
327 intmask &= ~(SDHCI_INT_BUF_EMPTY | SDHCI_INT_BUF_FULL);
328 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
329 } 457 }
330 458
331 /* 459 /*
@@ -342,9 +470,7 @@ static void sdhci_finish_data(struct sdhci_host *host)
342 "though there were blocks left. Please report this " 470 "though there were blocks left. Please report this "
343 "to " BUGMAIL ".\n", mmc_hostname(host->mmc)); 471 "to " BUGMAIL ".\n", mmc_hostname(host->mmc));
344 data->error = MMC_ERR_FAILED; 472 data->error = MMC_ERR_FAILED;
345 } 473 } else if (host->size != 0) {
346
347 if (host->size != 0) {
348 printk(KERN_ERR "%s: %d bytes were left untransferred. " 474 printk(KERN_ERR "%s: %d bytes were left untransferred. "
349 "Please report this to " BUGMAIL ".\n", 475 "Please report this to " BUGMAIL ".\n",
350 mmc_hostname(host->mmc), host->size); 476 mmc_hostname(host->mmc), host->size);
@@ -371,27 +497,38 @@ static void sdhci_finish_data(struct sdhci_host *host)
371static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 497static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
372{ 498{
373 int flags; 499 int flags;
374 u32 present; 500 u32 mask;
375 unsigned long max_jiffies; 501 unsigned long timeout;
376 502
377 WARN_ON(host->cmd); 503 WARN_ON(host->cmd);
378 504
379 DBG("Sending cmd (%x)\n", cmd->opcode); 505 DBG("Sending cmd (%x)\n", cmd->opcode);
380 506
381 /* Wait max 10 ms */ 507 /* Wait max 10 ms */
382 max_jiffies = jiffies + (HZ + 99)/100; 508 timeout = 10;
383 do { 509
384 if (time_after(jiffies, max_jiffies)) { 510 mask = SDHCI_CMD_INHIBIT;
511 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
512 mask |= SDHCI_DATA_INHIBIT;
513
514 /* We shouldn't wait for data inihibit for stop commands, even
515 though they might use busy signaling */
516 if (host->mrq->data && (cmd == host->mrq->data->stop))
517 mask &= ~SDHCI_DATA_INHIBIT;
518
519 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
520 if (timeout == 0) {
385 printk(KERN_ERR "%s: Controller never released " 521 printk(KERN_ERR "%s: Controller never released "
386 "inhibit bits. Please report this to " 522 "inhibit bit(s). Please report this to "
387 BUGMAIL ".\n", mmc_hostname(host->mmc)); 523 BUGMAIL ".\n", mmc_hostname(host->mmc));
388 sdhci_dumpregs(host); 524 sdhci_dumpregs(host);
389 cmd->error = MMC_ERR_FAILED; 525 cmd->error = MMC_ERR_FAILED;
390 tasklet_schedule(&host->finish_tasklet); 526 tasklet_schedule(&host->finish_tasklet);
391 return; 527 return;
392 } 528 }
393 present = readl(host->ioaddr + SDHCI_PRESENT_STATE); 529 timeout--;
394 } while (present & (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)); 530 mdelay(1);
531 }
395 532
396 mod_timer(&host->timer, jiffies + 10 * HZ); 533 mod_timer(&host->timer, jiffies + 10 * HZ);
397 534
@@ -401,6 +538,8 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
401 538
402 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT); 539 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
403 540
541 sdhci_set_transfer_mode(host, cmd->data);
542
404 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 543 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
405 printk(KERN_ERR "%s: Unsupported response type! " 544 printk(KERN_ERR "%s: Unsupported response type! "
406 "Please report this to " BUGMAIL ".\n", 545 "Please report this to " BUGMAIL ".\n",
@@ -426,7 +565,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
426 if (cmd->data) 565 if (cmd->data)
427 flags |= SDHCI_CMD_DATA; 566 flags |= SDHCI_CMD_DATA;
428 567
429 writel(SDHCI_MAKE_CMD(cmd->opcode, flags), 568 writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
430 host->ioaddr + SDHCI_COMMAND); 569 host->ioaddr + SDHCI_COMMAND);
431} 570}
432 571
@@ -456,31 +595,9 @@ static void sdhci_finish_command(struct sdhci_host *host)
456 595
457 DBG("Ending cmd (%x)\n", host->cmd->opcode); 596 DBG("Ending cmd (%x)\n", host->cmd->opcode);
458 597
459 if (host->cmd->data) { 598 if (host->cmd->data)
460 u32 intmask;
461
462 host->data = host->cmd->data; 599 host->data = host->cmd->data;
463 600 else
464 if (!(host->flags & SDHCI_USE_DMA)) {
465 /*
466 * Don't enable the interrupts until now to make sure we
467 * get stable handling of the FIFO.
468 */
469 intmask = readl(host->ioaddr + SDHCI_INT_ENABLE);
470 intmask |= SDHCI_INT_BUF_EMPTY | SDHCI_INT_BUF_FULL;
471 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
472
473 intmask = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
474 intmask |= SDHCI_INT_BUF_EMPTY | SDHCI_INT_BUF_FULL;
475 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
476
477 /*
478 * The buffer interrupts are to unreliable so we
479 * start the transfer immediatly.
480 */
481 sdhci_transfer_pio(host);
482 }
483 } else
484 tasklet_schedule(&host->finish_tasklet); 601 tasklet_schedule(&host->finish_tasklet);
485 602
486 host->cmd = NULL; 603 host->cmd = NULL;
@@ -490,7 +607,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
490{ 607{
491 int div; 608 int div;
492 u16 clk; 609 u16 clk;
493 unsigned long max_jiffies; 610 unsigned long timeout;
494 611
495 if (clock == host->clock) 612 if (clock == host->clock)
496 return; 613 return;
@@ -511,17 +628,19 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
511 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL); 628 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
512 629
513 /* Wait max 10 ms */ 630 /* Wait max 10 ms */
514 max_jiffies = jiffies + (HZ + 99)/100; 631 timeout = 10;
515 do { 632 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
516 if (time_after(jiffies, max_jiffies)) { 633 & SDHCI_CLOCK_INT_STABLE)) {
634 if (timeout == 0) {
517 printk(KERN_ERR "%s: Internal clock never stabilised. " 635 printk(KERN_ERR "%s: Internal clock never stabilised. "
518 "Please report this to " BUGMAIL ".\n", 636 "Please report this to " BUGMAIL ".\n",
519 mmc_hostname(host->mmc)); 637 mmc_hostname(host->mmc));
520 sdhci_dumpregs(host); 638 sdhci_dumpregs(host);
521 return; 639 return;
522 } 640 }
523 clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL); 641 timeout--;
524 } while (!(clk & SDHCI_CLOCK_INT_STABLE)); 642 mdelay(1);
643 }
525 644
526 clk |= SDHCI_CLOCK_CARD_EN; 645 clk |= SDHCI_CLOCK_CARD_EN;
527 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL); 646 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
@@ -530,6 +649,46 @@ out:
530 host->clock = clock; 649 host->clock = clock;
531} 650}
532 651
652static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
653{
654 u8 pwr;
655
656 if (host->power == power)
657 return;
658
659 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
660
661 if (power == (unsigned short)-1)
662 goto out;
663
664 pwr = SDHCI_POWER_ON;
665
666 switch (power) {
667 case MMC_VDD_170:
668 case MMC_VDD_180:
669 case MMC_VDD_190:
670 pwr |= SDHCI_POWER_180;
671 break;
672 case MMC_VDD_290:
673 case MMC_VDD_300:
674 case MMC_VDD_310:
675 pwr |= SDHCI_POWER_300;
676 break;
677 case MMC_VDD_320:
678 case MMC_VDD_330:
679 case MMC_VDD_340:
680 pwr |= SDHCI_POWER_330;
681 break;
682 default:
683 BUG();
684 }
685
686 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
687
688out:
689 host->power = power;
690}
691
533/*****************************************************************************\ 692/*****************************************************************************\
534 * * 693 * *
535 * MMC callbacks * 694 * MMC callbacks *
@@ -576,17 +735,15 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
576 */ 735 */
577 if (ios->power_mode == MMC_POWER_OFF) { 736 if (ios->power_mode == MMC_POWER_OFF) {
578 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE); 737 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
579 spin_unlock_irqrestore(&host->lock, flags);
580 sdhci_init(host); 738 sdhci_init(host);
581 spin_lock_irqsave(&host->lock, flags);
582 } 739 }
583 740
584 sdhci_set_clock(host, ios->clock); 741 sdhci_set_clock(host, ios->clock);
585 742
586 if (ios->power_mode == MMC_POWER_OFF) 743 if (ios->power_mode == MMC_POWER_OFF)
587 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 744 sdhci_set_power(host, -1);
588 else 745 else
589 writeb(0xFF, host->ioaddr + SDHCI_POWER_CONTROL); 746 sdhci_set_power(host, ios->vdd);
590 747
591 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); 748 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
592 if (ios->bus_width == MMC_BUS_WIDTH_4) 749 if (ios->bus_width == MMC_BUS_WIDTH_4)
@@ -679,6 +836,19 @@ static void sdhci_tasklet_finish(unsigned long param)
679 if ((mrq->cmd->error != MMC_ERR_NONE) || 836 if ((mrq->cmd->error != MMC_ERR_NONE) ||
680 (mrq->data && ((mrq->data->error != MMC_ERR_NONE) || 837 (mrq->data && ((mrq->data->error != MMC_ERR_NONE) ||
681 (mrq->data->stop && (mrq->data->stop->error != MMC_ERR_NONE))))) { 838 (mrq->data->stop && (mrq->data->stop->error != MMC_ERR_NONE))))) {
839
840 /* Some controllers need this kick or reset won't work here */
841 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
842 unsigned int clock;
843
844 /* This is to force an update */
845 clock = host->clock;
846 host->clock = 0;
847 sdhci_set_clock(host, clock);
848 }
849
850 /* Spec says we should do both at the same time, but Ricoh
851 controllers do not like that. */
682 sdhci_reset(host, SDHCI_RESET_CMD); 852 sdhci_reset(host, SDHCI_RESET_CMD);
683 sdhci_reset(host, SDHCI_RESET_DATA); 853 sdhci_reset(host, SDHCI_RESET_DATA);
684 } 854 }
@@ -793,7 +963,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
793 if (host->data->error != MMC_ERR_NONE) 963 if (host->data->error != MMC_ERR_NONE)
794 sdhci_finish_data(host); 964 sdhci_finish_data(host);
795 else { 965 else {
796 if (intmask & (SDHCI_INT_BUF_FULL | SDHCI_INT_BUF_EMPTY)) 966 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
797 sdhci_transfer_pio(host); 967 sdhci_transfer_pio(host);
798 968
799 if (intmask & SDHCI_INT_DATA_END) 969 if (intmask & SDHCI_INT_DATA_END)
@@ -818,50 +988,44 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id, struct pt_regs *regs)
818 988
819 DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask); 989 DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask);
820 990
821 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) 991 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
992 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
993 host->ioaddr + SDHCI_INT_STATUS);
822 tasklet_schedule(&host->card_tasklet); 994 tasklet_schedule(&host->card_tasklet);
995 }
823 996
824 if (intmask & SDHCI_INT_CMD_MASK) { 997 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
825 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
826 998
999 if (intmask & SDHCI_INT_CMD_MASK) {
827 writel(intmask & SDHCI_INT_CMD_MASK, 1000 writel(intmask & SDHCI_INT_CMD_MASK,
828 host->ioaddr + SDHCI_INT_STATUS); 1001 host->ioaddr + SDHCI_INT_STATUS);
1002 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
829 } 1003 }
830 1004
831 if (intmask & SDHCI_INT_DATA_MASK) { 1005 if (intmask & SDHCI_INT_DATA_MASK) {
832 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
833
834 writel(intmask & SDHCI_INT_DATA_MASK, 1006 writel(intmask & SDHCI_INT_DATA_MASK,
835 host->ioaddr + SDHCI_INT_STATUS); 1007 host->ioaddr + SDHCI_INT_STATUS);
1008 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
836 } 1009 }
837 1010
838 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 1011 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
839 1012
840 if (intmask & SDHCI_INT_CARD_INT) {
841 printk(KERN_ERR "%s: Unexpected card interrupt. Please "
842 "report this to " BUGMAIL ".\n",
843 mmc_hostname(host->mmc));
844 sdhci_dumpregs(host);
845 }
846
847 if (intmask & SDHCI_INT_BUS_POWER) { 1013 if (intmask & SDHCI_INT_BUS_POWER) {
848 printk(KERN_ERR "%s: Unexpected bus power interrupt. Please " 1014 printk(KERN_ERR "%s: Card is consuming too much power!\n",
849 "report this to " BUGMAIL ".\n",
850 mmc_hostname(host->mmc)); 1015 mmc_hostname(host->mmc));
851 sdhci_dumpregs(host); 1016 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
852 } 1017 }
853 1018
854 if (intmask & SDHCI_INT_ACMD12ERR) { 1019 intmask &= SDHCI_INT_BUS_POWER;
855 printk(KERN_ERR "%s: Unexpected auto CMD12 error. Please " 1020
1021 if (intmask) {
1022 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x. Please "
856 "report this to " BUGMAIL ".\n", 1023 "report this to " BUGMAIL ".\n",
857 mmc_hostname(host->mmc)); 1024 mmc_hostname(host->mmc), intmask);
858 sdhci_dumpregs(host); 1025 sdhci_dumpregs(host);
859 1026
860 writew(~0, host->ioaddr + SDHCI_ACMD12_ERR);
861 }
862
863 if (intmask)
864 writel(intmask, host->ioaddr + SDHCI_INT_STATUS); 1027 writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
1028 }
865 1029
866 result = IRQ_HANDLED; 1030 result = IRQ_HANDLED;
867 1031
@@ -954,6 +1118,7 @@ static int sdhci_resume (struct pci_dev *pdev)
954static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) 1118static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
955{ 1119{
956 int ret; 1120 int ret;
1121 unsigned int version;
957 struct sdhci_chip *chip; 1122 struct sdhci_chip *chip;
958 struct mmc_host *mmc; 1123 struct mmc_host *mmc;
959 struct sdhci_host *host; 1124 struct sdhci_host *host;
@@ -985,6 +1150,16 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
985 return -ENODEV; 1150 return -ENODEV;
986 } 1151 }
987 1152
1153 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1154 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1155 return -ENODEV;
1156 }
1157
1158 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1159 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1160 return -ENODEV;
1161 }
1162
988 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); 1163 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
989 if (!mmc) 1164 if (!mmc)
990 return -ENOMEM; 1165 return -ENOMEM;
@@ -1012,9 +1187,30 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1012 goto release; 1187 goto release;
1013 } 1188 }
1014 1189
1190 sdhci_reset(host, SDHCI_RESET_ALL);
1191
1192 version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1193 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
1194 if (version != 0) {
1195 printk(KERN_ERR "%s: Unknown controller version (%d). "
1196 "You may experience problems.\n", host->slot_descr,
1197 version);
1198 }
1199
1015 caps = readl(host->ioaddr + SDHCI_CAPABILITIES); 1200 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1016 1201
1017 if ((caps & SDHCI_CAN_DO_DMA) && ((pdev->class & 0x0000FF) == 0x01)) 1202 if (debug_nodma)
1203 DBG("DMA forced off\n");
1204 else if (debug_forcedma) {
1205 DBG("DMA forced on\n");
1206 host->flags |= SDHCI_USE_DMA;
1207 } else if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
1208 host->flags |= SDHCI_USE_DMA;
1209 else if ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA)
1210 DBG("Controller doesn't have DMA interface\n");
1211 else if (!(caps & SDHCI_CAN_DO_DMA))
1212 DBG("Controller doesn't have DMA capability\n");
1213 else
1018 host->flags |= SDHCI_USE_DMA; 1214 host->flags |= SDHCI_USE_DMA;
1019 1215
1020 if (host->flags & SDHCI_USE_DMA) { 1216 if (host->flags & SDHCI_USE_DMA) {
@@ -1030,17 +1226,58 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1030 else /* XXX: Hack to get MMC layer to avoid highmem */ 1226 else /* XXX: Hack to get MMC layer to avoid highmem */
1031 pdev->dma_mask = 0; 1227 pdev->dma_mask = 0;
1032 1228
1033 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1229 host->max_clk =
1230 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1231 if (host->max_clk == 0) {
1232 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1233 "frequency.\n", host->slot_descr);
1234 ret = -ENODEV;
1235 goto unmap;
1236 }
1034 host->max_clk *= 1000000; 1237 host->max_clk *= 1000000;
1035 1238
1239 host->timeout_clk =
1240 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1241 if (host->timeout_clk == 0) {
1242 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1243 "frequency.\n", host->slot_descr);
1244 ret = -ENODEV;
1245 goto unmap;
1246 }
1247 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1248 host->timeout_clk *= 1000;
1249
1250 host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1251 if (host->max_block >= 3) {
1252 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1253 host->slot_descr);
1254 ret = -ENODEV;
1255 goto unmap;
1256 }
1257 host->max_block = 512 << host->max_block;
1258
1036 /* 1259 /*
1037 * Set host parameters. 1260 * Set host parameters.
1038 */ 1261 */
1039 mmc->ops = &sdhci_ops; 1262 mmc->ops = &sdhci_ops;
1040 mmc->f_min = host->max_clk / 256; 1263 mmc->f_min = host->max_clk / 256;
1041 mmc->f_max = host->max_clk; 1264 mmc->f_max = host->max_clk;
1042 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1265 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1043 mmc->caps = MMC_CAP_4_BIT_DATA; 1266
1267 mmc->ocr_avail = 0;
1268 if (caps & SDHCI_CAN_VDD_330)
1269 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1270 else if (caps & SDHCI_CAN_VDD_300)
1271 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1272 else if (caps & SDHCI_CAN_VDD_180)
1273 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1274
1275 if (mmc->ocr_avail == 0) {
1276 printk(KERN_ERR "%s: Hardware doesn't report any "
1277 "support voltages.\n", host->slot_descr);
1278 ret = -ENODEV;
1279 goto unmap;
1280 }
1044 1281
1045 spin_lock_init(&host->lock); 1282 spin_lock_init(&host->lock);
1046 1283
@@ -1054,10 +1291,10 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1054 mmc->max_phys_segs = 16; 1291 mmc->max_phys_segs = 16;
1055 1292
1056 /* 1293 /*
1057 * Maximum number of sectors in one transfer. Limited by sector 1294 * Maximum number of sectors in one transfer. Limited by DMA boundary
1058 * count register. 1295 * size (512KiB), which means (512 KiB/512=) 1024 entries.
1059 */ 1296 */
1060 mmc->max_sectors = 0x3FFF; 1297 mmc->max_sectors = 1024;
1061 1298
1062 /* 1299 /*
1063 * Maximum segment size. Could be one segment with the maximum number 1300 * Maximum segment size. Could be one segment with the maximum number
@@ -1075,10 +1312,10 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1075 1312
1076 setup_timer(&host->timer, sdhci_timeout_timer, (long)host); 1313 setup_timer(&host->timer, sdhci_timeout_timer, (long)host);
1077 1314
1078 ret = request_irq(host->irq, sdhci_irq, SA_SHIRQ, 1315 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1079 host->slot_descr, host); 1316 host->slot_descr, host);
1080 if (ret) 1317 if (ret)
1081 goto unmap; 1318 goto untasklet;
1082 1319
1083 sdhci_init(host); 1320 sdhci_init(host);
1084 1321
@@ -1097,10 +1334,10 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1097 1334
1098 return 0; 1335 return 0;
1099 1336
1100unmap: 1337untasklet:
1101 tasklet_kill(&host->card_tasklet); 1338 tasklet_kill(&host->card_tasklet);
1102 tasklet_kill(&host->finish_tasklet); 1339 tasklet_kill(&host->finish_tasklet);
1103 1340unmap:
1104 iounmap(host->ioaddr); 1341 iounmap(host->ioaddr);
1105release: 1342release:
1106 pci_release_region(pdev, host->bar); 1343 pci_release_region(pdev, host->bar);
@@ -1144,13 +1381,18 @@ static int __devinit sdhci_probe(struct pci_dev *pdev,
1144 const struct pci_device_id *ent) 1381 const struct pci_device_id *ent)
1145{ 1382{
1146 int ret, i; 1383 int ret, i;
1147 u8 slots; 1384 u8 slots, rev;
1148 struct sdhci_chip *chip; 1385 struct sdhci_chip *chip;
1149 1386
1150 BUG_ON(pdev == NULL); 1387 BUG_ON(pdev == NULL);
1151 BUG_ON(ent == NULL); 1388 BUG_ON(ent == NULL);
1152 1389
1153 DBG("found at %s\n", pci_name(pdev)); 1390 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
1391
1392 printk(KERN_INFO DRIVER_NAME
1393 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1394 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1395 (int)rev);
1154 1396
1155 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1397 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1156 if (ret) 1398 if (ret)
@@ -1173,6 +1415,10 @@ static int __devinit sdhci_probe(struct pci_dev *pdev,
1173 } 1415 }
1174 1416
1175 chip->pdev = pdev; 1417 chip->pdev = pdev;
1418 chip->quirks = ent->driver_data;
1419
1420 if (debug_quirks)
1421 chip->quirks = debug_quirks;
1176 1422
1177 chip->num_slots = slots; 1423 chip->num_slots = slots;
1178 pci_set_drvdata(pdev, chip); 1424 pci_set_drvdata(pdev, chip);
@@ -1251,7 +1497,15 @@ static void __exit sdhci_drv_exit(void)
1251module_init(sdhci_drv_init); 1497module_init(sdhci_drv_init);
1252module_exit(sdhci_drv_exit); 1498module_exit(sdhci_drv_exit);
1253 1499
1500module_param(debug_nodma, uint, 0444);
1501module_param(debug_forcedma, uint, 0444);
1502module_param(debug_quirks, uint, 0444);
1503
1254MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 1504MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1255MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); 1505MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
1256MODULE_VERSION(DRIVER_VERSION); 1506MODULE_VERSION(DRIVER_VERSION);
1257MODULE_LICENSE("GPL"); 1507MODULE_LICENSE("GPL");
1508
1509MODULE_PARM_DESC(debug_nodma, "Forcefully disable DMA transfers. (default 0)");
1510MODULE_PARM_DESC(debug_forcedma, "Forcefully enable DMA transfers. (default 0)");
1511MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h
index 3b270ef486b4..f2453343f783 100644
--- a/drivers/mmc/sdhci.h
+++ b/drivers/mmc/sdhci.h
@@ -12,6 +12,10 @@
12 * PCI registers 12 * PCI registers
13 */ 13 */
14 14
15#define PCI_SDHCI_IFPIO 0x00
16#define PCI_SDHCI_IFDMA 0x01
17#define PCI_SDHCI_IFVENDOR 0x02
18
15#define PCI_SLOT_INFO 0x40 /* 8 bits */ 19#define PCI_SLOT_INFO 0x40 /* 8 bits */
16#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) 20#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
17#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 21#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
@@ -23,6 +27,7 @@
23#define SDHCI_DMA_ADDRESS 0x00 27#define SDHCI_DMA_ADDRESS 0x00
24 28
25#define SDHCI_BLOCK_SIZE 0x04 29#define SDHCI_BLOCK_SIZE 0x04
30#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
26 31
27#define SDHCI_BLOCK_COUNT 0x06 32#define SDHCI_BLOCK_COUNT 0x06
28 33
@@ -67,6 +72,10 @@
67#define SDHCI_CTRL_4BITBUS 0x02 72#define SDHCI_CTRL_4BITBUS 0x02
68 73
69#define SDHCI_POWER_CONTROL 0x29 74#define SDHCI_POWER_CONTROL 0x29
75#define SDHCI_POWER_ON 0x01
76#define SDHCI_POWER_180 0x0A
77#define SDHCI_POWER_300 0x0C
78#define SDHCI_POWER_330 0x0E
70 79
71#define SDHCI_BLOCK_GAP_CONTROL 0x2A 80#define SDHCI_BLOCK_GAP_CONTROL 0x2A
72 81
@@ -91,8 +100,8 @@
91#define SDHCI_INT_RESPONSE 0x00000001 100#define SDHCI_INT_RESPONSE 0x00000001
92#define SDHCI_INT_DATA_END 0x00000002 101#define SDHCI_INT_DATA_END 0x00000002
93#define SDHCI_INT_DMA_END 0x00000008 102#define SDHCI_INT_DMA_END 0x00000008
94#define SDHCI_INT_BUF_EMPTY 0x00000010 103#define SDHCI_INT_SPACE_AVAIL 0x00000010
95#define SDHCI_INT_BUF_FULL 0x00000020 104#define SDHCI_INT_DATA_AVAIL 0x00000020
96#define SDHCI_INT_CARD_INSERT 0x00000040 105#define SDHCI_INT_CARD_INSERT 0x00000040
97#define SDHCI_INT_CARD_REMOVE 0x00000080 106#define SDHCI_INT_CARD_REMOVE 0x00000080
98#define SDHCI_INT_CARD_INT 0x00000100 107#define SDHCI_INT_CARD_INT 0x00000100
@@ -112,7 +121,7 @@
112#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \ 121#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
113 SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX) 122 SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
114#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ 123#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
115 SDHCI_INT_BUF_EMPTY | SDHCI_INT_BUF_FULL | \ 124 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
116 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ 125 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
117 SDHCI_INT_DATA_END_BIT) 126 SDHCI_INT_DATA_END_BIT)
118 127
@@ -121,9 +130,17 @@
121/* 3E-3F reserved */ 130/* 3E-3F reserved */
122 131
123#define SDHCI_CAPABILITIES 0x40 132#define SDHCI_CAPABILITIES 0x40
124#define SDHCI_CAN_DO_DMA 0x00400000 133#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
134#define SDHCI_TIMEOUT_CLK_SHIFT 0
135#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
125#define SDHCI_CLOCK_BASE_MASK 0x00003F00 136#define SDHCI_CLOCK_BASE_MASK 0x00003F00
126#define SDHCI_CLOCK_BASE_SHIFT 8 137#define SDHCI_CLOCK_BASE_SHIFT 8
138#define SDHCI_MAX_BLOCK_MASK 0x00030000
139#define SDHCI_MAX_BLOCK_SHIFT 16
140#define SDHCI_CAN_DO_DMA 0x00400000
141#define SDHCI_CAN_VDD_330 0x01000000
142#define SDHCI_CAN_VDD_300 0x02000000
143#define SDHCI_CAN_VDD_180 0x04000000
127 144
128/* 44-47 reserved for more caps */ 145/* 44-47 reserved for more caps */
129 146
@@ -136,6 +153,10 @@
136#define SDHCI_SLOT_INT_STATUS 0xFC 153#define SDHCI_SLOT_INT_STATUS 0xFC
137 154
138#define SDHCI_HOST_VERSION 0xFE 155#define SDHCI_HOST_VERSION 0xFE
156#define SDHCI_VENDOR_VER_MASK 0xFF00
157#define SDHCI_VENDOR_VER_SHIFT 8
158#define SDHCI_SPEC_VER_MASK 0x00FF
159#define SDHCI_SPEC_VER_SHIFT 0
139 160
140struct sdhci_chip; 161struct sdhci_chip;
141 162
@@ -149,8 +170,11 @@ struct sdhci_host {
149#define SDHCI_USE_DMA (1<<0) 170#define SDHCI_USE_DMA (1<<0)
150 171
151 unsigned int max_clk; /* Max possible freq (MHz) */ 172 unsigned int max_clk; /* Max possible freq (MHz) */
173 unsigned int timeout_clk; /* Timeout freq (KHz) */
174 unsigned int max_block; /* Max block size (bytes) */
152 175
153 unsigned int clock; /* Current clock (MHz) */ 176 unsigned int clock; /* Current clock (MHz) */
177 unsigned short power; /* Current voltage */
154 178
155 struct mmc_request *mrq; /* Current request */ 179 struct mmc_request *mrq; /* Current request */
156 struct mmc_command *cmd; /* Current command */ 180 struct mmc_command *cmd; /* Current command */
@@ -180,6 +204,8 @@ struct sdhci_host {
180struct sdhci_chip { 204struct sdhci_chip {
181 struct pci_dev *pdev; 205 struct pci_dev *pdev;
182 206
207 unsigned long quirks;
208
183 int num_slots; /* Slots on controller */ 209 int num_slots; /* Slots on controller */
184 struct sdhci_host *hosts[0]; /* Pointers to hosts */ 210 struct sdhci_host *hosts[0]; /* Pointers to hosts */
185}; 211};
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 8167332d4013..6435a6822ad3 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -21,7 +21,6 @@
21 * - On APIC systems the FIFO empty interrupt is sometimes lost. 21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */ 22 */
23 23
24#include <linux/config.h>
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
27#include <linux/init.h> 26#include <linux/init.h>
@@ -42,7 +41,7 @@
42#include "wbsd.h" 41#include "wbsd.h"
43 42
44#define DRIVER_NAME "wbsd" 43#define DRIVER_NAME "wbsd"
45#define DRIVER_VERSION "1.5" 44#define DRIVER_VERSION "1.6"
46 45
47#define DBG(x...) \ 46#define DBG(x...) \
48 pr_debug(DRIVER_NAME ": " x) 47 pr_debug(DRIVER_NAME ": " x)
@@ -1324,7 +1323,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1324 mmc->f_min = 375000; 1323 mmc->f_min = 375000;
1325 mmc->f_max = 24000000; 1324 mmc->f_max = 24000000;
1326 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1325 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1327 mmc->caps = MMC_CAP_4_BIT_DATA; 1326 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1328 1327
1329 spin_lock_init(&host->lock); 1328 spin_lock_init(&host->lock);
1330 1329
@@ -1440,13 +1439,13 @@ static int __devinit wbsd_scan(struct wbsd_host *host)
1440 1439
1441static int __devinit wbsd_request_region(struct wbsd_host *host, int base) 1440static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1442{ 1441{
1443 if (io & 0x7) 1442 if (base & 0x7)
1444 return -EINVAL; 1443 return -EINVAL;
1445 1444
1446 if (!request_region(base, 8, DRIVER_NAME)) 1445 if (!request_region(base, 8, DRIVER_NAME))
1447 return -EIO; 1446 return -EIO;
1448 1447
1449 host->base = io; 1448 host->base = base;
1450 1449
1451 return 0; 1450 return 0;
1452} 1451}
@@ -1554,7 +1553,7 @@ static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1554 * Allocate interrupt. 1553 * Allocate interrupt.
1555 */ 1554 */
1556 1555
1557 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host); 1556 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
1558 if (ret) 1557 if (ret)
1559 return ret; 1558 return ret;
1560 1559
@@ -1774,7 +1773,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1774 /* 1773 /*
1775 * Request resources. 1774 * Request resources.
1776 */ 1775 */
1777 ret = wbsd_request_resources(host, io, irq, dma); 1776 ret = wbsd_request_resources(host, base, irq, dma);
1778 if (ret) { 1777 if (ret) {
1779 wbsd_release_resources(host); 1778 wbsd_release_resources(host);
1780 wbsd_free_mmc(dev); 1779 wbsd_free_mmc(dev);
@@ -1862,6 +1861,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1862 1861
1863static int __devinit wbsd_probe(struct platform_device *dev) 1862static int __devinit wbsd_probe(struct platform_device *dev)
1864{ 1863{
1864 /* Use the module parameters for resources */
1865 return wbsd_init(&dev->dev, io, irq, dma, 0); 1865 return wbsd_init(&dev->dev, io, irq, dma, 0);
1866} 1866}
1867 1867