aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-16 18:11:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-16 18:17:52 -0400
commit8a0ca91e1db5de5eb5b18cfa919d52ff8be375af (patch)
treebd3a1564940d27ae7f6229089db1283ff2a636c8 /drivers/mmc
parent9c1be0c4712fe760d8969427ef91107e9c062d91 (diff)
parentc43d8636971c39da993e94082fd65bfff421618e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (68 commits) sdio_uart: Fix SDIO break control to now return success or an error mmc: host driver for Ricoh Bay1Controllers sdio: sdio_io.c Fix sparse warnings sdio: fix the use of hard coded timeout value. mmc: OLPC: update vdd/powerup quirk comment mmc: fix spares errors of sdhci.c mmc: remove multiwrite capability wbsd: fix bad dma_addr_t conversion atmel-mci: Driver for Atmel on-chip MMC controllers mmc: fix sdio_io sparse errors mmc: wbsd.c fix shadowing of 'dma' variable MMC: S3C24XX: Refuse incorrectly aligned transfers MMC: S3C24XX: Add maintainer entry MMC: S3C24XX: Update error debugging. MMC: S3C24XX: Add media presence test to request handling. MMC: S3C24XX: Fix use of msecs where jiffies are needed MMC: S3C24XX: Add MODULE_ALIAS() entries for the platform devices MMC: S3C24XX: Fix s3c2410_dma_request() return code check. MMC: S3C24XX: Allow card-detect on non-IRQ capable pin MMC: S3C24XX: Ensure host->mrq->data is valid ... Manually fixed up bogus executable bits on drivers/mmc/core/sdio_io.c and include/linux/mmc/sdio_func.h when merging.
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c60
-rw-r--r--drivers/mmc/card/mmc_test.c569
-rw-r--r--drivers/mmc/card/sdio_uart.c9
-rw-r--r--drivers/mmc/core/core.c41
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio_cis.c6
-rw-r--r--drivers/mmc/core/sdio_io.c167
-rw-r--r--drivers/mmc/host/Kconfig50
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/at91_mci.c257
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h91
-rw-r--r--drivers/mmc/host/atmel-mci.c981
-rw-r--r--drivers/mmc/host/au1xmmc.c792
-rw-r--r--drivers/mmc/host/au1xmmc.h96
-rw-r--r--drivers/mmc/host/imxmmc.c9
-rw-r--r--drivers/mmc/host/mmc_spi.c33
-rw-r--r--drivers/mmc/host/mmci.c1
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/pxamci.c9
-rw-r--r--drivers/mmc/host/s3cmci.c1446
-rw-r--r--drivers/mmc/host/s3cmci.h70
-rw-r--r--drivers/mmc/host/sdhci-pci.c732
-rw-r--r--drivers/mmc/host/sdhci.c994
-rw-r--r--drivers/mmc/host/sdhci.h120
-rw-r--r--drivers/mmc/host/sdricoh_cs.c575
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/wbsd.c38
28 files changed, 5889 insertions, 1273 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f9ad960d7c1a..66e5a5487c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2,7 +2,7 @@
2 * Block driver for media (i.e., flash cards) 2 * Block driver for media (i.e., flash cards)
3 * 3 *
4 * Copyright 2002 Hewlett-Packard Company 4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2007 Pierre Ossman 5 * Copyright 2005-2008 Pierre Ossman
6 * 6 *
7 * Use consistent with the GNU GPL is permitted, 7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is 8 * provided that this copyright notice is
@@ -237,17 +237,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 if (brq.data.blocks > card->host->max_blk_count) 237 if (brq.data.blocks > card->host->max_blk_count)
238 brq.data.blocks = card->host->max_blk_count; 238 brq.data.blocks = card->host->max_blk_count;
239 239
240 /*
241 * If the host doesn't support multiple block writes, force
242 * block writes to single block. SD cards are excepted from
243 * this rule as they support querying the number of
244 * successfully written sectors.
245 */
246 if (rq_data_dir(req) != READ &&
247 !(card->host->caps & MMC_CAP_MULTIWRITE) &&
248 !mmc_card_sd(card))
249 brq.data.blocks = 1;
250
251 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
252 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
253 * token, not a STOP_TRANSMISSION request. 242 * token, not a STOP_TRANSMISSION request.
@@ -296,22 +285,24 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
296 285
297 mmc_queue_bounce_post(mq); 286 mmc_queue_bounce_post(mq);
298 287
288 /*
289 * Check for errors here, but don't jump to cmd_err
290 * until later as we need to wait for the card to leave
291 * programming mode even when things go wrong.
292 */
299 if (brq.cmd.error) { 293 if (brq.cmd.error) {
300 printk(KERN_ERR "%s: error %d sending read/write command\n", 294 printk(KERN_ERR "%s: error %d sending read/write command\n",
301 req->rq_disk->disk_name, brq.cmd.error); 295 req->rq_disk->disk_name, brq.cmd.error);
302 goto cmd_err;
303 } 296 }
304 297
305 if (brq.data.error) { 298 if (brq.data.error) {
306 printk(KERN_ERR "%s: error %d transferring data\n", 299 printk(KERN_ERR "%s: error %d transferring data\n",
307 req->rq_disk->disk_name, brq.data.error); 300 req->rq_disk->disk_name, brq.data.error);
308 goto cmd_err;
309 } 301 }
310 302
311 if (brq.stop.error) { 303 if (brq.stop.error) {
312 printk(KERN_ERR "%s: error %d sending stop command\n", 304 printk(KERN_ERR "%s: error %d sending stop command\n",
313 req->rq_disk->disk_name, brq.stop.error); 305 req->rq_disk->disk_name, brq.stop.error);
314 goto cmd_err;
315 } 306 }
316 307
317 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 308 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
@@ -344,6 +335,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
344#endif 335#endif
345 } 336 }
346 337
338 if (brq.cmd.error || brq.data.error || brq.stop.error)
339 goto cmd_err;
340
347 /* 341 /*
348 * A block was successfully transferred. 342 * A block was successfully transferred.
349 */ 343 */
@@ -362,30 +356,32 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
362 * mark the known good sectors as ok. 356 * mark the known good sectors as ok.
363 * 357 *
364 * If the card is not SD, we can still ok written sectors 358 * If the card is not SD, we can still ok written sectors
365 * if the controller can do proper error reporting. 359 * as reported by the controller (which might be less than
360 * the real number of written sectors, but never more).
366 * 361 *
367 * For reads we just fail the entire chunk as that should 362 * For reads we just fail the entire chunk as that should
368 * be safe in all cases. 363 * be safe in all cases.
369 */ 364 */
370 if (rq_data_dir(req) != READ && mmc_card_sd(card)) { 365 if (rq_data_dir(req) != READ) {
371 u32 blocks; 366 if (mmc_card_sd(card)) {
372 unsigned int bytes; 367 u32 blocks;
373 368 unsigned int bytes;
374 blocks = mmc_sd_num_wr_blocks(card); 369
375 if (blocks != (u32)-1) { 370 blocks = mmc_sd_num_wr_blocks(card);
376 if (card->csd.write_partial) 371 if (blocks != (u32)-1) {
377 bytes = blocks << md->block_bits; 372 if (card->csd.write_partial)
378 else 373 bytes = blocks << md->block_bits;
379 bytes = blocks << 9; 374 else
375 bytes = blocks << 9;
376 spin_lock_irq(&md->lock);
377 ret = __blk_end_request(req, 0, bytes);
378 spin_unlock_irq(&md->lock);
379 }
380 } else {
380 spin_lock_irq(&md->lock); 381 spin_lock_irq(&md->lock);
381 ret = __blk_end_request(req, 0, bytes); 382 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
382 spin_unlock_irq(&md->lock); 383 spin_unlock_irq(&md->lock);
383 } 384 }
384 } else if (rq_data_dir(req) != READ &&
385 (card->host->caps & MMC_CAP_MULTIWRITE)) {
386 spin_lock_irq(&md->lock);
387 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
388 spin_unlock_irq(&md->lock);
389 } 385 }
390 386
391 mmc_release_host(card->host); 387 mmc_release_host(card->host);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index ffadee549a41..d6b9b486417c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/card/mmc_test.c 2 * linux/drivers/mmc/card/mmc_test.c
3 * 3 *
4 * Copyright 2007 Pierre Ossman 4 * Copyright 2007-2008 Pierre Ossman
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -26,13 +26,17 @@
26struct mmc_test_card { 26struct mmc_test_card {
27 struct mmc_card *card; 27 struct mmc_card *card;
28 28
29 u8 scratch[BUFFER_SIZE];
29 u8 *buffer; 30 u8 *buffer;
30}; 31};
31 32
32/*******************************************************************/ 33/*******************************************************************/
33/* Helper functions */ 34/* General helper functions */
34/*******************************************************************/ 35/*******************************************************************/
35 36
37/*
38 * Configure correct block size in card
39 */
36static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 40static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
37{ 41{
38 struct mmc_command cmd; 42 struct mmc_command cmd;
@@ -48,117 +52,61 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
48 return 0; 52 return 0;
49} 53}
50 54
51static int __mmc_test_transfer(struct mmc_test_card *test, int write, 55/*
52 unsigned broken_xfer, u8 *buffer, unsigned addr, 56 * Fill in the mmc_request structure given a set of transfer parameters.
53 unsigned blocks, unsigned blksz) 57 */
58static void mmc_test_prepare_mrq(struct mmc_test_card *test,
59 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
60 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
54{ 61{
55 int ret, busy; 62 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
56
57 struct mmc_request mrq;
58 struct mmc_command cmd;
59 struct mmc_command stop;
60 struct mmc_data data;
61
62 struct scatterlist sg;
63
64 memset(&mrq, 0, sizeof(struct mmc_request));
65
66 mrq.cmd = &cmd;
67 mrq.data = &data;
68
69 memset(&cmd, 0, sizeof(struct mmc_command));
70 63
71 if (broken_xfer) { 64 if (blocks > 1) {
72 if (blocks > 1) { 65 mrq->cmd->opcode = write ?
73 cmd.opcode = write ? 66 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
74 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
75 } else {
76 cmd.opcode = MMC_SEND_STATUS;
77 }
78 } else { 67 } else {
79 if (blocks > 1) { 68 mrq->cmd->opcode = write ?
80 cmd.opcode = write ? 69 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
81 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
82 } else {
83 cmd.opcode = write ?
84 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
85 }
86 } 70 }
87 71
88 if (broken_xfer && blocks == 1) 72 mrq->cmd->arg = dev_addr;
89 cmd.arg = test->card->rca << 16; 73 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
90 else
91 cmd.arg = addr;
92 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
93 74
94 memset(&stop, 0, sizeof(struct mmc_command)); 75 if (blocks == 1)
95 76 mrq->stop = NULL;
96 if (!broken_xfer && (blocks > 1)) { 77 else {
97 stop.opcode = MMC_STOP_TRANSMISSION; 78 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
98 stop.arg = 0; 79 mrq->stop->arg = 0;
99 stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 80 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
100
101 mrq.stop = &stop;
102 } 81 }
103 82
104 memset(&data, 0, sizeof(struct mmc_data)); 83 mrq->data->blksz = blksz;
105 84 mrq->data->blocks = blocks;
106 data.blksz = blksz; 85 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
107 data.blocks = blocks; 86 mrq->data->sg = sg;
108 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 87 mrq->data->sg_len = sg_len;
109 data.sg = &sg;
110 data.sg_len = 1;
111
112 sg_init_one(&sg, buffer, blocks * blksz);
113
114 mmc_set_data_timeout(&data, test->card);
115 88
116 mmc_wait_for_req(test->card->host, &mrq); 89 mmc_set_data_timeout(mrq->data, test->card);
117 90}
118 ret = 0;
119
120 if (broken_xfer) {
121 if (!ret && cmd.error)
122 ret = cmd.error;
123 if (!ret && data.error == 0)
124 ret = RESULT_FAIL;
125 if (!ret && data.error != -ETIMEDOUT)
126 ret = data.error;
127 if (!ret && stop.error)
128 ret = stop.error;
129 if (blocks > 1) {
130 if (!ret && data.bytes_xfered > blksz)
131 ret = RESULT_FAIL;
132 } else {
133 if (!ret && data.bytes_xfered > 0)
134 ret = RESULT_FAIL;
135 }
136 } else {
137 if (!ret && cmd.error)
138 ret = cmd.error;
139 if (!ret && data.error)
140 ret = data.error;
141 if (!ret && stop.error)
142 ret = stop.error;
143 if (!ret && data.bytes_xfered != blocks * blksz)
144 ret = RESULT_FAIL;
145 }
146 91
147 if (ret == -EINVAL) 92/*
148 ret = RESULT_UNSUP_HOST; 93 * Wait for the card to finish the busy state
94 */
95static int mmc_test_wait_busy(struct mmc_test_card *test)
96{
97 int ret, busy;
98 struct mmc_command cmd;
149 99
150 busy = 0; 100 busy = 0;
151 do { 101 do {
152 int ret2;
153
154 memset(&cmd, 0, sizeof(struct mmc_command)); 102 memset(&cmd, 0, sizeof(struct mmc_command));
155 103
156 cmd.opcode = MMC_SEND_STATUS; 104 cmd.opcode = MMC_SEND_STATUS;
157 cmd.arg = test->card->rca << 16; 105 cmd.arg = test->card->rca << 16;
158 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 106 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
159 107
160 ret2 = mmc_wait_for_cmd(test->card->host, &cmd, 0); 108 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
161 if (ret2) 109 if (ret)
162 break; 110 break;
163 111
164 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { 112 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
@@ -172,14 +120,57 @@ static int __mmc_test_transfer(struct mmc_test_card *test, int write,
172 return ret; 120 return ret;
173} 121}
174 122
175static int mmc_test_transfer(struct mmc_test_card *test, int write, 123/*
176 u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) 124 * Transfer a single sector of kernel addressable data
125 */
126static int mmc_test_buffer_transfer(struct mmc_test_card *test,
127 u8 *buffer, unsigned addr, unsigned blksz, int write)
177{ 128{
178 return __mmc_test_transfer(test, write, 0, buffer, 129 int ret;
179 addr, blocks, blksz); 130
131 struct mmc_request mrq;
132 struct mmc_command cmd;
133 struct mmc_command stop;
134 struct mmc_data data;
135
136 struct scatterlist sg;
137
138 memset(&mrq, 0, sizeof(struct mmc_request));
139 memset(&cmd, 0, sizeof(struct mmc_command));
140 memset(&data, 0, sizeof(struct mmc_data));
141 memset(&stop, 0, sizeof(struct mmc_command));
142
143 mrq.cmd = &cmd;
144 mrq.data = &data;
145 mrq.stop = &stop;
146
147 sg_init_one(&sg, buffer, blksz);
148
149 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
150
151 mmc_wait_for_req(test->card->host, &mrq);
152
153 if (cmd.error)
154 return cmd.error;
155 if (data.error)
156 return data.error;
157
158 ret = mmc_test_wait_busy(test);
159 if (ret)
160 return ret;
161
162 return 0;
180} 163}
181 164
182static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) 165/*******************************************************************/
166/* Test preparation and cleanup */
167/*******************************************************************/
168
169/*
170 * Fill the first couple of sectors of the card with known data
171 * so that bad reads/writes can be detected
172 */
173static int __mmc_test_prepare(struct mmc_test_card *test, int write)
183{ 174{
184 int ret, i; 175 int ret, i;
185 176
@@ -188,15 +179,14 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write)
188 return ret; 179 return ret;
189 180
190 if (write) 181 if (write)
191 memset(test->buffer, 0xDF, BUFFER_SIZE); 182 memset(test->buffer, 0xDF, 512);
192 else { 183 else {
193 for (i = 0;i < BUFFER_SIZE;i++) 184 for (i = 0;i < 512;i++)
194 test->buffer[i] = i; 185 test->buffer[i] = i;
195 } 186 }
196 187
197 for (i = 0;i < BUFFER_SIZE / 512;i++) { 188 for (i = 0;i < BUFFER_SIZE / 512;i++) {
198 ret = mmc_test_transfer(test, 1, test->buffer + i * 512, 189 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
199 i * 512, 1, 512);
200 if (ret) 190 if (ret)
201 return ret; 191 return ret;
202 } 192 }
@@ -204,41 +194,218 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write)
204 return 0; 194 return 0;
205} 195}
206 196
207static int mmc_test_prepare_verify_write(struct mmc_test_card *test) 197static int mmc_test_prepare_write(struct mmc_test_card *test)
198{
199 return __mmc_test_prepare(test, 1);
200}
201
202static int mmc_test_prepare_read(struct mmc_test_card *test)
203{
204 return __mmc_test_prepare(test, 0);
205}
206
207static int mmc_test_cleanup(struct mmc_test_card *test)
208{ 208{
209 return mmc_test_prepare_verify(test, 1); 209 int ret, i;
210
211 ret = mmc_test_set_blksize(test, 512);
212 if (ret)
213 return ret;
214
215 memset(test->buffer, 0, 512);
216
217 for (i = 0;i < BUFFER_SIZE / 512;i++) {
218 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
219 if (ret)
220 return ret;
221 }
222
223 return 0;
210} 224}
211 225
212static int mmc_test_prepare_verify_read(struct mmc_test_card *test) 226/*******************************************************************/
227/* Test execution helpers */
228/*******************************************************************/
229
230/*
231 * Modifies the mmc_request to perform the "short transfer" tests
232 */
233static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
234 struct mmc_request *mrq, int write)
213{ 235{
214 return mmc_test_prepare_verify(test, 0); 236 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
237
238 if (mrq->data->blocks > 1) {
239 mrq->cmd->opcode = write ?
240 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
241 mrq->stop = NULL;
242 } else {
243 mrq->cmd->opcode = MMC_SEND_STATUS;
244 mrq->cmd->arg = test->card->rca << 16;
245 }
215} 246}
216 247
217static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, 248/*
218 u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) 249 * Checks that a normal transfer didn't have any errors
250 */
251static int mmc_test_check_result(struct mmc_test_card *test,
252 struct mmc_request *mrq)
219{ 253{
220 int ret, i, sectors; 254 int ret;
221 255
222 /* 256 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
223 * It is assumed that the above preparation has been done. 257
224 */ 258 ret = 0;
225 259
226 memset(test->buffer, 0, BUFFER_SIZE); 260 if (!ret && mrq->cmd->error)
261 ret = mrq->cmd->error;
262 if (!ret && mrq->data->error)
263 ret = mrq->data->error;
264 if (!ret && mrq->stop && mrq->stop->error)
265 ret = mrq->stop->error;
266 if (!ret && mrq->data->bytes_xfered !=
267 mrq->data->blocks * mrq->data->blksz)
268 ret = RESULT_FAIL;
269
270 if (ret == -EINVAL)
271 ret = RESULT_UNSUP_HOST;
272
273 return ret;
274}
275
276/*
277 * Checks that a "short transfer" behaved as expected
278 */
279static int mmc_test_check_broken_result(struct mmc_test_card *test,
280 struct mmc_request *mrq)
281{
282 int ret;
283
284 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
285
286 ret = 0;
287
288 if (!ret && mrq->cmd->error)
289 ret = mrq->cmd->error;
290 if (!ret && mrq->data->error == 0)
291 ret = RESULT_FAIL;
292 if (!ret && mrq->data->error != -ETIMEDOUT)
293 ret = mrq->data->error;
294 if (!ret && mrq->stop && mrq->stop->error)
295 ret = mrq->stop->error;
296 if (mrq->data->blocks > 1) {
297 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
298 ret = RESULT_FAIL;
299 } else {
300 if (!ret && mrq->data->bytes_xfered > 0)
301 ret = RESULT_FAIL;
302 }
303
304 if (ret == -EINVAL)
305 ret = RESULT_UNSUP_HOST;
306
307 return ret;
308}
309
310/*
311 * Tests a basic transfer with certain parameters
312 */
313static int mmc_test_simple_transfer(struct mmc_test_card *test,
314 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
315 unsigned blocks, unsigned blksz, int write)
316{
317 struct mmc_request mrq;
318 struct mmc_command cmd;
319 struct mmc_command stop;
320 struct mmc_data data;
321
322 memset(&mrq, 0, sizeof(struct mmc_request));
323 memset(&cmd, 0, sizeof(struct mmc_command));
324 memset(&data, 0, sizeof(struct mmc_data));
325 memset(&stop, 0, sizeof(struct mmc_command));
326
327 mrq.cmd = &cmd;
328 mrq.data = &data;
329 mrq.stop = &stop;
330
331 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
332 blocks, blksz, write);
333
334 mmc_wait_for_req(test->card->host, &mrq);
335
336 mmc_test_wait_busy(test);
337
338 return mmc_test_check_result(test, &mrq);
339}
340
341/*
342 * Tests a transfer where the card will fail completely or partly
343 */
344static int mmc_test_broken_transfer(struct mmc_test_card *test,
345 unsigned blocks, unsigned blksz, int write)
346{
347 struct mmc_request mrq;
348 struct mmc_command cmd;
349 struct mmc_command stop;
350 struct mmc_data data;
351
352 struct scatterlist sg;
353
354 memset(&mrq, 0, sizeof(struct mmc_request));
355 memset(&cmd, 0, sizeof(struct mmc_command));
356 memset(&data, 0, sizeof(struct mmc_data));
357 memset(&stop, 0, sizeof(struct mmc_command));
358
359 mrq.cmd = &cmd;
360 mrq.data = &data;
361 mrq.stop = &stop;
362
363 sg_init_one(&sg, test->buffer, blocks * blksz);
364
365 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
366 mmc_test_prepare_broken_mrq(test, &mrq, write);
367
368 mmc_wait_for_req(test->card->host, &mrq);
369
370 mmc_test_wait_busy(test);
371
372 return mmc_test_check_broken_result(test, &mrq);
373}
374
375/*
376 * Does a complete transfer test where data is also validated
377 *
378 * Note: mmc_test_prepare() must have been done before this call
379 */
380static int mmc_test_transfer(struct mmc_test_card *test,
381 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
382 unsigned blocks, unsigned blksz, int write)
383{
384 int ret, i;
385 unsigned long flags;
227 386
228 if (write) { 387 if (write) {
229 for (i = 0;i < blocks * blksz;i++) 388 for (i = 0;i < blocks * blksz;i++)
230 buffer[i] = i; 389 test->scratch[i] = i;
390 } else {
391 memset(test->scratch, 0, BUFFER_SIZE);
231 } 392 }
393 local_irq_save(flags);
394 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
395 local_irq_restore(flags);
232 396
233 ret = mmc_test_set_blksize(test, blksz); 397 ret = mmc_test_set_blksize(test, blksz);
234 if (ret) 398 if (ret)
235 return ret; 399 return ret;
236 400
237 ret = mmc_test_transfer(test, write, buffer, addr, blocks, blksz); 401 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
402 blocks, blksz, write);
238 if (ret) 403 if (ret)
239 return ret; 404 return ret;
240 405
241 if (write) { 406 if (write) {
407 int sectors;
408
242 ret = mmc_test_set_blksize(test, 512); 409 ret = mmc_test_set_blksize(test, 512);
243 if (ret) 410 if (ret)
244 return ret; 411 return ret;
@@ -253,9 +420,9 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write,
253 memset(test->buffer, 0, sectors * 512); 420 memset(test->buffer, 0, sectors * 512);
254 421
255 for (i = 0;i < sectors;i++) { 422 for (i = 0;i < sectors;i++) {
256 ret = mmc_test_transfer(test, 0, 423 ret = mmc_test_buffer_transfer(test,
257 test->buffer + i * 512, 424 test->buffer + i * 512,
258 addr + i * 512, 1, 512); 425 dev_addr + i * 512, 512, 0);
259 if (ret) 426 if (ret)
260 return ret; 427 return ret;
261 } 428 }
@@ -270,8 +437,11 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write,
270 return RESULT_FAIL; 437 return RESULT_FAIL;
271 } 438 }
272 } else { 439 } else {
440 local_irq_save(flags);
441 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
442 local_irq_restore(flags);
273 for (i = 0;i < blocks * blksz;i++) { 443 for (i = 0;i < blocks * blksz;i++) {
274 if (buffer[i] != (u8)i) 444 if (test->scratch[i] != (u8)i)
275 return RESULT_FAIL; 445 return RESULT_FAIL;
276 } 446 }
277 } 447 }
@@ -279,26 +449,6 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write,
279 return 0; 449 return 0;
280} 450}
281 451
282static int mmc_test_cleanup_verify(struct mmc_test_card *test)
283{
284 int ret, i;
285
286 ret = mmc_test_set_blksize(test, 512);
287 if (ret)
288 return ret;
289
290 memset(test->buffer, 0, BUFFER_SIZE);
291
292 for (i = 0;i < BUFFER_SIZE / 512;i++) {
293 ret = mmc_test_transfer(test, 1, test->buffer + i * 512,
294 i * 512, 1, 512);
295 if (ret)
296 return ret;
297 }
298
299 return 0;
300}
301
302/*******************************************************************/ 452/*******************************************************************/
303/* Tests */ 453/* Tests */
304/*******************************************************************/ 454/*******************************************************************/
@@ -314,12 +464,15 @@ struct mmc_test_case {
314static int mmc_test_basic_write(struct mmc_test_card *test) 464static int mmc_test_basic_write(struct mmc_test_card *test)
315{ 465{
316 int ret; 466 int ret;
467 struct scatterlist sg;
317 468
318 ret = mmc_test_set_blksize(test, 512); 469 ret = mmc_test_set_blksize(test, 512);
319 if (ret) 470 if (ret)
320 return ret; 471 return ret;
321 472
322 ret = mmc_test_transfer(test, 1, test->buffer, 0, 1, 512); 473 sg_init_one(&sg, test->buffer, 512);
474
475 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
323 if (ret) 476 if (ret)
324 return ret; 477 return ret;
325 478
@@ -329,12 +482,15 @@ static int mmc_test_basic_write(struct mmc_test_card *test)
329static int mmc_test_basic_read(struct mmc_test_card *test) 482static int mmc_test_basic_read(struct mmc_test_card *test)
330{ 483{
331 int ret; 484 int ret;
485 struct scatterlist sg;
332 486
333 ret = mmc_test_set_blksize(test, 512); 487 ret = mmc_test_set_blksize(test, 512);
334 if (ret) 488 if (ret)
335 return ret; 489 return ret;
336 490
337 ret = mmc_test_transfer(test, 0, test->buffer, 0, 1, 512); 491 sg_init_one(&sg, test->buffer, 512);
492
493 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
338 if (ret) 494 if (ret)
339 return ret; 495 return ret;
340 496
@@ -344,8 +500,11 @@ static int mmc_test_basic_read(struct mmc_test_card *test)
344static int mmc_test_verify_write(struct mmc_test_card *test) 500static int mmc_test_verify_write(struct mmc_test_card *test)
345{ 501{
346 int ret; 502 int ret;
503 struct scatterlist sg;
504
505 sg_init_one(&sg, test->buffer, 512);
347 506
348 ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 1, 512); 507 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
349 if (ret) 508 if (ret)
350 return ret; 509 return ret;
351 510
@@ -355,8 +514,11 @@ static int mmc_test_verify_write(struct mmc_test_card *test)
355static int mmc_test_verify_read(struct mmc_test_card *test) 514static int mmc_test_verify_read(struct mmc_test_card *test)
356{ 515{
357 int ret; 516 int ret;
517 struct scatterlist sg;
518
519 sg_init_one(&sg, test->buffer, 512);
358 520
359 ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 1, 512); 521 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
360 if (ret) 522 if (ret)
361 return ret; 523 return ret;
362 524
@@ -367,6 +529,7 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
367{ 529{
368 int ret; 530 int ret;
369 unsigned int size; 531 unsigned int size;
532 struct scatterlist sg;
370 533
371 if (test->card->host->max_blk_count == 1) 534 if (test->card->host->max_blk_count == 1)
372 return RESULT_UNSUP_HOST; 535 return RESULT_UNSUP_HOST;
@@ -379,8 +542,9 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
379 if (size < 1024) 542 if (size < 1024)
380 return RESULT_UNSUP_HOST; 543 return RESULT_UNSUP_HOST;
381 544
382 ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 545 sg_init_one(&sg, test->buffer, size);
383 size / 512, 512); 546
547 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
384 if (ret) 548 if (ret)
385 return ret; 549 return ret;
386 550
@@ -391,6 +555,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
391{ 555{
392 int ret; 556 int ret;
393 unsigned int size; 557 unsigned int size;
558 struct scatterlist sg;
394 559
395 if (test->card->host->max_blk_count == 1) 560 if (test->card->host->max_blk_count == 1)
396 return RESULT_UNSUP_HOST; 561 return RESULT_UNSUP_HOST;
@@ -403,8 +568,9 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
403 if (size < 1024) 568 if (size < 1024)
404 return RESULT_UNSUP_HOST; 569 return RESULT_UNSUP_HOST;
405 570
406 ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 571 sg_init_one(&sg, test->buffer, size);
407 size / 512, 512); 572
573 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
408 if (ret) 574 if (ret)
409 return ret; 575 return ret;
410 576
@@ -414,13 +580,14 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
414static int mmc_test_pow2_write(struct mmc_test_card *test) 580static int mmc_test_pow2_write(struct mmc_test_card *test)
415{ 581{
416 int ret, i; 582 int ret, i;
583 struct scatterlist sg;
417 584
418 if (!test->card->csd.write_partial) 585 if (!test->card->csd.write_partial)
419 return RESULT_UNSUP_CARD; 586 return RESULT_UNSUP_CARD;
420 587
421 for (i = 1; i < 512;i <<= 1) { 588 for (i = 1; i < 512;i <<= 1) {
422 ret = mmc_test_verified_transfer(test, 1, 589 sg_init_one(&sg, test->buffer, i);
423 test->buffer, 0, 1, i); 590 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
424 if (ret) 591 if (ret)
425 return ret; 592 return ret;
426 } 593 }
@@ -431,13 +598,14 @@ static int mmc_test_pow2_write(struct mmc_test_card *test)
431static int mmc_test_pow2_read(struct mmc_test_card *test) 598static int mmc_test_pow2_read(struct mmc_test_card *test)
432{ 599{
433 int ret, i; 600 int ret, i;
601 struct scatterlist sg;
434 602
435 if (!test->card->csd.read_partial) 603 if (!test->card->csd.read_partial)
436 return RESULT_UNSUP_CARD; 604 return RESULT_UNSUP_CARD;
437 605
438 for (i = 1; i < 512;i <<= 1) { 606 for (i = 1; i < 512;i <<= 1) {
439 ret = mmc_test_verified_transfer(test, 0, 607 sg_init_one(&sg, test->buffer, i);
440 test->buffer, 0, 1, i); 608 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
441 if (ret) 609 if (ret)
442 return ret; 610 return ret;
443 } 611 }
@@ -448,13 +616,14 @@ static int mmc_test_pow2_read(struct mmc_test_card *test)
448static int mmc_test_weird_write(struct mmc_test_card *test) 616static int mmc_test_weird_write(struct mmc_test_card *test)
449{ 617{
450 int ret, i; 618 int ret, i;
619 struct scatterlist sg;
451 620
452 if (!test->card->csd.write_partial) 621 if (!test->card->csd.write_partial)
453 return RESULT_UNSUP_CARD; 622 return RESULT_UNSUP_CARD;
454 623
455 for (i = 3; i < 512;i += 7) { 624 for (i = 3; i < 512;i += 7) {
456 ret = mmc_test_verified_transfer(test, 1, 625 sg_init_one(&sg, test->buffer, i);
457 test->buffer, 0, 1, i); 626 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
458 if (ret) 627 if (ret)
459 return ret; 628 return ret;
460 } 629 }
@@ -465,13 +634,14 @@ static int mmc_test_weird_write(struct mmc_test_card *test)
465static int mmc_test_weird_read(struct mmc_test_card *test) 634static int mmc_test_weird_read(struct mmc_test_card *test)
466{ 635{
467 int ret, i; 636 int ret, i;
637 struct scatterlist sg;
468 638
469 if (!test->card->csd.read_partial) 639 if (!test->card->csd.read_partial)
470 return RESULT_UNSUP_CARD; 640 return RESULT_UNSUP_CARD;
471 641
472 for (i = 3; i < 512;i += 7) { 642 for (i = 3; i < 512;i += 7) {
473 ret = mmc_test_verified_transfer(test, 0, 643 sg_init_one(&sg, test->buffer, i);
474 test->buffer, 0, 1, i); 644 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
475 if (ret) 645 if (ret)
476 return ret; 646 return ret;
477 } 647 }
@@ -482,10 +652,11 @@ static int mmc_test_weird_read(struct mmc_test_card *test)
482static int mmc_test_align_write(struct mmc_test_card *test) 652static int mmc_test_align_write(struct mmc_test_card *test)
483{ 653{
484 int ret, i; 654 int ret, i;
655 struct scatterlist sg;
485 656
486 for (i = 1;i < 4;i++) { 657 for (i = 1;i < 4;i++) {
487 ret = mmc_test_verified_transfer(test, 1, test->buffer + i, 658 sg_init_one(&sg, test->buffer + i, 512);
488 0, 1, 512); 659 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
489 if (ret) 660 if (ret)
490 return ret; 661 return ret;
491 } 662 }
@@ -496,10 +667,11 @@ static int mmc_test_align_write(struct mmc_test_card *test)
496static int mmc_test_align_read(struct mmc_test_card *test) 667static int mmc_test_align_read(struct mmc_test_card *test)
497{ 668{
498 int ret, i; 669 int ret, i;
670 struct scatterlist sg;
499 671
500 for (i = 1;i < 4;i++) { 672 for (i = 1;i < 4;i++) {
501 ret = mmc_test_verified_transfer(test, 0, test->buffer + i, 673 sg_init_one(&sg, test->buffer + i, 512);
502 0, 1, 512); 674 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
503 if (ret) 675 if (ret)
504 return ret; 676 return ret;
505 } 677 }
@@ -511,6 +683,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
511{ 683{
512 int ret, i; 684 int ret, i;
513 unsigned int size; 685 unsigned int size;
686 struct scatterlist sg;
514 687
515 if (test->card->host->max_blk_count == 1) 688 if (test->card->host->max_blk_count == 1)
516 return RESULT_UNSUP_HOST; 689 return RESULT_UNSUP_HOST;
@@ -524,8 +697,8 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
524 return RESULT_UNSUP_HOST; 697 return RESULT_UNSUP_HOST;
525 698
526 for (i = 1;i < 4;i++) { 699 for (i = 1;i < 4;i++) {
527 ret = mmc_test_verified_transfer(test, 1, test->buffer + i, 700 sg_init_one(&sg, test->buffer + i, size);
528 0, size / 512, 512); 701 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
529 if (ret) 702 if (ret)
530 return ret; 703 return ret;
531 } 704 }
@@ -537,6 +710,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
537{ 710{
538 int ret, i; 711 int ret, i;
539 unsigned int size; 712 unsigned int size;
713 struct scatterlist sg;
540 714
541 if (test->card->host->max_blk_count == 1) 715 if (test->card->host->max_blk_count == 1)
542 return RESULT_UNSUP_HOST; 716 return RESULT_UNSUP_HOST;
@@ -550,8 +724,8 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
550 return RESULT_UNSUP_HOST; 724 return RESULT_UNSUP_HOST;
551 725
552 for (i = 1;i < 4;i++) { 726 for (i = 1;i < 4;i++) {
553 ret = mmc_test_verified_transfer(test, 0, test->buffer + i, 727 sg_init_one(&sg, test->buffer + i, size);
554 0, size / 512, 512); 728 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
555 if (ret) 729 if (ret)
556 return ret; 730 return ret;
557 } 731 }
@@ -567,7 +741,7 @@ static int mmc_test_xfersize_write(struct mmc_test_card *test)
567 if (ret) 741 if (ret)
568 return ret; 742 return ret;
569 743
570 ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 1, 512); 744 ret = mmc_test_broken_transfer(test, 1, 512, 1);
571 if (ret) 745 if (ret)
572 return ret; 746 return ret;
573 747
@@ -582,7 +756,7 @@ static int mmc_test_xfersize_read(struct mmc_test_card *test)
582 if (ret) 756 if (ret)
583 return ret; 757 return ret;
584 758
585 ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 1, 512); 759 ret = mmc_test_broken_transfer(test, 1, 512, 0);
586 if (ret) 760 if (ret)
587 return ret; 761 return ret;
588 762
@@ -600,7 +774,7 @@ static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
600 if (ret) 774 if (ret)
601 return ret; 775 return ret;
602 776
603 ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 2, 512); 777 ret = mmc_test_broken_transfer(test, 2, 512, 1);
604 if (ret) 778 if (ret)
605 return ret; 779 return ret;
606 780
@@ -618,7 +792,7 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
618 if (ret) 792 if (ret)
619 return ret; 793 return ret;
620 794
621 ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 2, 512); 795 ret = mmc_test_broken_transfer(test, 2, 512, 0);
622 if (ret) 796 if (ret)
623 return ret; 797 return ret;
624 798
@@ -638,86 +812,86 @@ static const struct mmc_test_case mmc_test_cases[] = {
638 812
639 { 813 {
640 .name = "Basic write (with data verification)", 814 .name = "Basic write (with data verification)",
641 .prepare = mmc_test_prepare_verify_write, 815 .prepare = mmc_test_prepare_write,
642 .run = mmc_test_verify_write, 816 .run = mmc_test_verify_write,
643 .cleanup = mmc_test_cleanup_verify, 817 .cleanup = mmc_test_cleanup,
644 }, 818 },
645 819
646 { 820 {
647 .name = "Basic read (with data verification)", 821 .name = "Basic read (with data verification)",
648 .prepare = mmc_test_prepare_verify_read, 822 .prepare = mmc_test_prepare_read,
649 .run = mmc_test_verify_read, 823 .run = mmc_test_verify_read,
650 .cleanup = mmc_test_cleanup_verify, 824 .cleanup = mmc_test_cleanup,
651 }, 825 },
652 826
653 { 827 {
654 .name = "Multi-block write", 828 .name = "Multi-block write",
655 .prepare = mmc_test_prepare_verify_write, 829 .prepare = mmc_test_prepare_write,
656 .run = mmc_test_multi_write, 830 .run = mmc_test_multi_write,
657 .cleanup = mmc_test_cleanup_verify, 831 .cleanup = mmc_test_cleanup,
658 }, 832 },
659 833
660 { 834 {
661 .name = "Multi-block read", 835 .name = "Multi-block read",
662 .prepare = mmc_test_prepare_verify_read, 836 .prepare = mmc_test_prepare_read,
663 .run = mmc_test_multi_read, 837 .run = mmc_test_multi_read,
664 .cleanup = mmc_test_cleanup_verify, 838 .cleanup = mmc_test_cleanup,
665 }, 839 },
666 840
667 { 841 {
668 .name = "Power of two block writes", 842 .name = "Power of two block writes",
669 .prepare = mmc_test_prepare_verify_write, 843 .prepare = mmc_test_prepare_write,
670 .run = mmc_test_pow2_write, 844 .run = mmc_test_pow2_write,
671 .cleanup = mmc_test_cleanup_verify, 845 .cleanup = mmc_test_cleanup,
672 }, 846 },
673 847
674 { 848 {
675 .name = "Power of two block reads", 849 .name = "Power of two block reads",
676 .prepare = mmc_test_prepare_verify_read, 850 .prepare = mmc_test_prepare_read,
677 .run = mmc_test_pow2_read, 851 .run = mmc_test_pow2_read,
678 .cleanup = mmc_test_cleanup_verify, 852 .cleanup = mmc_test_cleanup,
679 }, 853 },
680 854
681 { 855 {
682 .name = "Weird sized block writes", 856 .name = "Weird sized block writes",
683 .prepare = mmc_test_prepare_verify_write, 857 .prepare = mmc_test_prepare_write,
684 .run = mmc_test_weird_write, 858 .run = mmc_test_weird_write,
685 .cleanup = mmc_test_cleanup_verify, 859 .cleanup = mmc_test_cleanup,
686 }, 860 },
687 861
688 { 862 {
689 .name = "Weird sized block reads", 863 .name = "Weird sized block reads",
690 .prepare = mmc_test_prepare_verify_read, 864 .prepare = mmc_test_prepare_read,
691 .run = mmc_test_weird_read, 865 .run = mmc_test_weird_read,
692 .cleanup = mmc_test_cleanup_verify, 866 .cleanup = mmc_test_cleanup,
693 }, 867 },
694 868
695 { 869 {
696 .name = "Badly aligned write", 870 .name = "Badly aligned write",
697 .prepare = mmc_test_prepare_verify_write, 871 .prepare = mmc_test_prepare_write,
698 .run = mmc_test_align_write, 872 .run = mmc_test_align_write,
699 .cleanup = mmc_test_cleanup_verify, 873 .cleanup = mmc_test_cleanup,
700 }, 874 },
701 875
702 { 876 {
703 .name = "Badly aligned read", 877 .name = "Badly aligned read",
704 .prepare = mmc_test_prepare_verify_read, 878 .prepare = mmc_test_prepare_read,
705 .run = mmc_test_align_read, 879 .run = mmc_test_align_read,
706 .cleanup = mmc_test_cleanup_verify, 880 .cleanup = mmc_test_cleanup,
707 }, 881 },
708 882
709 { 883 {
710 .name = "Badly aligned multi-block write", 884 .name = "Badly aligned multi-block write",
711 .prepare = mmc_test_prepare_verify_write, 885 .prepare = mmc_test_prepare_write,
712 .run = mmc_test_align_multi_write, 886 .run = mmc_test_align_multi_write,
713 .cleanup = mmc_test_cleanup_verify, 887 .cleanup = mmc_test_cleanup,
714 }, 888 },
715 889
716 { 890 {
717 .name = "Badly aligned multi-block read", 891 .name = "Badly aligned multi-block read",
718 .prepare = mmc_test_prepare_verify_read, 892 .prepare = mmc_test_prepare_read,
719 .run = mmc_test_align_multi_read, 893 .run = mmc_test_align_multi_read,
720 .cleanup = mmc_test_cleanup_verify, 894 .cleanup = mmc_test_cleanup,
721 }, 895 },
722 896
723 { 897 {
@@ -743,7 +917,7 @@ static const struct mmc_test_case mmc_test_cases[] = {
743 917
744static struct mutex mmc_test_lock; 918static struct mutex mmc_test_lock;
745 919
746static void mmc_test_run(struct mmc_test_card *test) 920static void mmc_test_run(struct mmc_test_card *test, int testcase)
747{ 921{
748 int i, ret; 922 int i, ret;
749 923
@@ -753,6 +927,9 @@ static void mmc_test_run(struct mmc_test_card *test)
753 mmc_claim_host(test->card->host); 927 mmc_claim_host(test->card->host);
754 928
755 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 929 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
930 if (testcase && ((i + 1) != testcase))
931 continue;
932
756 printk(KERN_INFO "%s: Test case %d. %s...\n", 933 printk(KERN_INFO "%s: Test case %d. %s...\n",
757 mmc_hostname(test->card->host), i + 1, 934 mmc_hostname(test->card->host), i + 1,
758 mmc_test_cases[i].name); 935 mmc_test_cases[i].name);
@@ -824,9 +1001,12 @@ static ssize_t mmc_test_store(struct device *dev,
824{ 1001{
825 struct mmc_card *card; 1002 struct mmc_card *card;
826 struct mmc_test_card *test; 1003 struct mmc_test_card *test;
1004 int testcase;
827 1005
828 card = container_of(dev, struct mmc_card, dev); 1006 card = container_of(dev, struct mmc_card, dev);
829 1007
1008 testcase = simple_strtol(buf, NULL, 10);
1009
830 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 1010 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
831 if (!test) 1011 if (!test)
832 return -ENOMEM; 1012 return -ENOMEM;
@@ -836,7 +1016,7 @@ static ssize_t mmc_test_store(struct device *dev,
836 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 1016 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
837 if (test->buffer) { 1017 if (test->buffer) {
838 mutex_lock(&mmc_test_lock); 1018 mutex_lock(&mmc_test_lock);
839 mmc_test_run(test); 1019 mmc_test_run(test, testcase);
840 mutex_unlock(&mmc_test_lock); 1020 mutex_unlock(&mmc_test_lock);
841 } 1021 }
842 1022
@@ -852,6 +1032,9 @@ static int mmc_test_probe(struct mmc_card *card)
852{ 1032{
853 int ret; 1033 int ret;
854 1034
1035 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
1036 return -ENODEV;
1037
855 mutex_init(&mmc_test_lock); 1038 mutex_init(&mmc_test_lock);
856 1039
857 ret = device_create_file(&card->dev, &dev_attr_test); 1040 ret = device_create_file(&card->dev, &dev_attr_test);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index eeea84c309e6..78ad48718ab0 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -885,12 +885,14 @@ static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_t
885 sdio_uart_release_func(port); 885 sdio_uart_release_func(port);
886} 886}
887 887
888static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) 888static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
889{ 889{
890 struct sdio_uart_port *port = tty->driver_data; 890 struct sdio_uart_port *port = tty->driver_data;
891 int result;
891 892
892 if (sdio_uart_claim_func(port) != 0) 893 result = sdio_uart_claim_func(port);
893 return; 894 if (result != 0)
895 return result;
894 896
895 if (break_state == -1) 897 if (break_state == -1)
896 port->lcr |= UART_LCR_SBC; 898 port->lcr |= UART_LCR_SBC;
@@ -899,6 +901,7 @@ static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
899 sdio_out(port, UART_LCR, port->lcr); 901 sdio_out(port, UART_LCR, port->lcr);
900 902
901 sdio_uart_release_func(port); 903 sdio_uart_release_func(port);
904 return 0;
902} 905}
903 906
904static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) 907static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 01ced4c5a61d..3ee5b8c3b5ce 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -295,6 +295,33 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
295EXPORT_SYMBOL(mmc_set_data_timeout); 295EXPORT_SYMBOL(mmc_set_data_timeout);
296 296
297/** 297/**
298 * mmc_align_data_size - pads a transfer size to a more optimal value
299 * @card: the MMC card associated with the data transfer
300 * @sz: original transfer size
301 *
302 * Pads the original data size with a number of extra bytes in
303 * order to avoid controller bugs and/or performance hits
304 * (e.g. some controllers revert to PIO for certain sizes).
305 *
306 * Returns the improved size, which might be unmodified.
307 *
308 * Note that this function is only relevant when issuing a
309 * single scatter gather entry.
310 */
311unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
312{
313 /*
314 * FIXME: We don't have a system for the controller to tell
315 * the core about its problems yet, so for now we just 32-bit
316 * align the size.
317 */
318 sz = ((sz + 3) / 4) * 4;
319
320 return sz;
321}
322EXPORT_SYMBOL(mmc_align_data_size);
323
324/**
298 * __mmc_claim_host - exclusively claim a host 325 * __mmc_claim_host - exclusively claim a host
299 * @host: mmc host to claim 326 * @host: mmc host to claim
300 * @abort: whether or not the operation should be aborted 327 * @abort: whether or not the operation should be aborted
@@ -638,6 +665,9 @@ void mmc_rescan(struct work_struct *work)
638 */ 665 */
639 mmc_bus_put(host); 666 mmc_bus_put(host);
640 667
668 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
669 goto out;
670
641 mmc_claim_host(host); 671 mmc_claim_host(host);
642 672
643 mmc_power_up(host); 673 mmc_power_up(host);
@@ -652,7 +682,7 @@ void mmc_rescan(struct work_struct *work)
652 if (!err) { 682 if (!err) {
653 if (mmc_attach_sdio(host, ocr)) 683 if (mmc_attach_sdio(host, ocr))
654 mmc_power_off(host); 684 mmc_power_off(host);
655 return; 685 goto out;
656 } 686 }
657 687
658 /* 688 /*
@@ -662,7 +692,7 @@ void mmc_rescan(struct work_struct *work)
662 if (!err) { 692 if (!err) {
663 if (mmc_attach_sd(host, ocr)) 693 if (mmc_attach_sd(host, ocr))
664 mmc_power_off(host); 694 mmc_power_off(host);
665 return; 695 goto out;
666 } 696 }
667 697
668 /* 698 /*
@@ -672,7 +702,7 @@ void mmc_rescan(struct work_struct *work)
672 if (!err) { 702 if (!err) {
673 if (mmc_attach_mmc(host, ocr)) 703 if (mmc_attach_mmc(host, ocr))
674 mmc_power_off(host); 704 mmc_power_off(host);
675 return; 705 goto out;
676 } 706 }
677 707
678 mmc_release_host(host); 708 mmc_release_host(host);
@@ -683,6 +713,9 @@ void mmc_rescan(struct work_struct *work)
683 713
684 mmc_bus_put(host); 714 mmc_bus_put(host);
685 } 715 }
716out:
717 if (host->caps & MMC_CAP_NEEDS_POLL)
718 mmc_schedule_delayed_work(&host->detect, HZ);
686} 719}
687 720
688void mmc_start_host(struct mmc_host *host) 721void mmc_start_host(struct mmc_host *host)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3da29eef8f7d..fdd7c760be8c 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -288,7 +288,7 @@ static struct device_type mmc_type = {
288/* 288/*
289 * Handle the detection and initialisation of a card. 289 * Handle the detection and initialisation of a card.
290 * 290 *
291 * In the case of a resume, "curcard" will contain the card 291 * In the case of a resume, "oldcard" will contain the card
292 * we're trying to reinitialise. 292 * we're trying to reinitialise.
293 */ 293 */
294static int mmc_init_card(struct mmc_host *host, u32 ocr, 294static int mmc_init_card(struct mmc_host *host, u32 ocr,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 7ef3b15c5e3d..26fc098d77cd 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -326,7 +326,7 @@ static struct device_type sd_type = {
326/* 326/*
327 * Handle the detection and initialisation of a card. 327 * Handle the detection and initialisation of a card.
328 * 328 *
329 * In the case of a resume, "curcard" will contain the card 329 * In the case of a resume, "oldcard" will contain the card
330 * we're trying to reinitialise. 330 * we're trying to reinitialise.
331 */ 331 */
332static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, 332static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
@@ -494,13 +494,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
494 * Check if read-only switch is active. 494 * Check if read-only switch is active.
495 */ 495 */
496 if (!oldcard) { 496 if (!oldcard) {
497 if (!host->ops->get_ro) { 497 if (!host->ops->get_ro || host->ops->get_ro(host) < 0) {
498 printk(KERN_WARNING "%s: host does not " 498 printk(KERN_WARNING "%s: host does not "
499 "support reading read-only " 499 "support reading read-only "
500 "switch. assuming write-enable.\n", 500 "switch. assuming write-enable.\n",
501 mmc_hostname(host)); 501 mmc_hostname(host));
502 } else { 502 } else {
503 if (host->ops->get_ro(host)) 503 if (host->ops->get_ro(host) > 0)
504 mmc_card_set_readonly(card); 504 mmc_card_set_readonly(card);
505 } 505 }
506 } 506 }
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index d5e51b1c7b3f..956bd7677502 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -129,6 +129,12 @@ static int cistpl_funce_func(struct sdio_func *func,
129 /* TPLFE_MAX_BLK_SIZE */ 129 /* TPLFE_MAX_BLK_SIZE */
130 func->max_blksize = buf[12] | (buf[13] << 8); 130 func->max_blksize = buf[12] | (buf[13] << 8);
131 131
132 /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */
133 if (vsn > SDIO_SDIO_REV_1_00)
134 func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10;
135 else
136 func->enable_timeout = jiffies_to_msecs(HZ);
137
132 return 0; 138 return 0;
133} 139}
134 140
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 625b92ce9cef..f61fc2d4cd0a 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/core/sdio_io.c 2 * linux/drivers/mmc/core/sdio_io.c
3 * 3 *
4 * Copyright 2007 Pierre Ossman 4 * Copyright 2007-2008 Pierre Ossman
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -76,11 +76,7 @@ int sdio_enable_func(struct sdio_func *func)
76 if (ret) 76 if (ret)
77 goto err; 77 goto err;
78 78
79 /* 79 timeout = jiffies + msecs_to_jiffies(func->enable_timeout);
80 * FIXME: This should timeout based on information in the CIS,
81 * but we don't have card to parse that yet.
82 */
83 timeout = jiffies + HZ;
84 80
85 while (1) { 81 while (1) {
86 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg); 82 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg);
@@ -167,10 +163,8 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
167 return -EINVAL; 163 return -EINVAL;
168 164
169 if (blksz == 0) { 165 if (blksz == 0) {
170 blksz = min(min( 166 blksz = min(func->max_blksize, func->card->host->max_blk_size);
171 func->max_blksize, 167 blksz = min(blksz, 512u);
172 func->card->host->max_blk_size),
173 512u);
174 } 168 }
175 169
176 ret = mmc_io_rw_direct(func->card, 1, 0, 170 ret = mmc_io_rw_direct(func->card, 1, 0,
@@ -186,9 +180,116 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
186 func->cur_blksize = blksz; 180 func->cur_blksize = blksz;
187 return 0; 181 return 0;
188} 182}
189
190EXPORT_SYMBOL_GPL(sdio_set_block_size); 183EXPORT_SYMBOL_GPL(sdio_set_block_size);
191 184
185/*
186 * Calculate the maximum byte mode transfer size
187 */
188static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
189{
190 unsigned mval = min(func->card->host->max_seg_size,
191 func->card->host->max_blk_size);
192 mval = min(mval, func->max_blksize);
193 return min(mval, 512u); /* maximum size for byte mode */
194}
195
196/**
197 * sdio_align_size - pads a transfer size to a more optimal value
198 * @func: SDIO function
199 * @sz: original transfer size
200 *
201 * Pads the original data size with a number of extra bytes in
202 * order to avoid controller bugs and/or performance hits
203 * (e.g. some controllers revert to PIO for certain sizes).
204 *
205 * If possible, it will also adjust the size so that it can be
206 * handled in just a single request.
207 *
208 * Returns the improved size, which might be unmodified.
209 */
210unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
211{
212 unsigned int orig_sz;
213 unsigned int blk_sz, byte_sz;
214 unsigned chunk_sz;
215
216 orig_sz = sz;
217
218 /*
219 * Do a first check with the controller, in case it
220 * wants to increase the size up to a point where it
221 * might need more than one block.
222 */
223 sz = mmc_align_data_size(func->card, sz);
224
225 /*
226 * If we can still do this with just a byte transfer, then
227 * we're done.
228 */
229 if (sz <= sdio_max_byte_size(func))
230 return sz;
231
232 if (func->card->cccr.multi_block) {
233 /*
234 * Check if the transfer is already block aligned
235 */
236 if ((sz % func->cur_blksize) == 0)
237 return sz;
238
239 /*
240 * Realign it so that it can be done with one request,
241 * and recheck if the controller still likes it.
242 */
243 blk_sz = ((sz + func->cur_blksize - 1) /
244 func->cur_blksize) * func->cur_blksize;
245 blk_sz = mmc_align_data_size(func->card, blk_sz);
246
247 /*
248 * This value is only good if it is still just
249 * one request.
250 */
251 if ((blk_sz % func->cur_blksize) == 0)
252 return blk_sz;
253
254 /*
255 * We failed to do one request, but at least try to
256 * pad the remainder properly.
257 */
258 byte_sz = mmc_align_data_size(func->card,
259 sz % func->cur_blksize);
260 if (byte_sz <= sdio_max_byte_size(func)) {
261 blk_sz = sz / func->cur_blksize;
262 return blk_sz * func->cur_blksize + byte_sz;
263 }
264 } else {
265 /*
266 * We need multiple requests, so first check that the
267 * controller can handle the chunk size;
268 */
269 chunk_sz = mmc_align_data_size(func->card,
270 sdio_max_byte_size(func));
271 if (chunk_sz == sdio_max_byte_size(func)) {
272 /*
273 * Fix up the size of the remainder (if any)
274 */
275 byte_sz = orig_sz % chunk_sz;
276 if (byte_sz) {
277 byte_sz = mmc_align_data_size(func->card,
278 byte_sz);
279 }
280
281 return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
282 }
283 }
284
285 /*
286 * The controller is simply incapable of transferring the size
287 * we want in decent manner, so just return the original size.
288 */
289 return orig_sz;
290}
291EXPORT_SYMBOL_GPL(sdio_align_size);
292
192/* Split an arbitrarily sized data transfer into several 293/* Split an arbitrarily sized data transfer into several
193 * IO_RW_EXTENDED commands. */ 294 * IO_RW_EXTENDED commands. */
194static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, 295static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
@@ -199,14 +300,13 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
199 int ret; 300 int ret;
200 301
201 /* Do the bulk of the transfer using block mode (if supported). */ 302 /* Do the bulk of the transfer using block mode (if supported). */
202 if (func->card->cccr.multi_block) { 303 if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
203 /* Blocks per command is limited by host count, host transfer 304 /* Blocks per command is limited by host count, host transfer
204 * size (we only use a single sg entry) and the maximum for 305 * size (we only use a single sg entry) and the maximum for
205 * IO_RW_EXTENDED of 511 blocks. */ 306 * IO_RW_EXTENDED of 511 blocks. */
206 max_blocks = min(min( 307 max_blocks = min(func->card->host->max_blk_count,
207 func->card->host->max_blk_count, 308 func->card->host->max_seg_size / func->cur_blksize);
208 func->card->host->max_seg_size / func->cur_blksize), 309 max_blocks = min(max_blocks, 511u);
209 511u);
210 310
211 while (remainder > func->cur_blksize) { 311 while (remainder > func->cur_blksize) {
212 unsigned blocks; 312 unsigned blocks;
@@ -231,11 +331,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
231 331
232 /* Write the remainder using byte mode. */ 332 /* Write the remainder using byte mode. */
233 while (remainder > 0) { 333 while (remainder > 0) {
234 size = remainder; 334 size = min(remainder, sdio_max_byte_size(func));
235 if (size > func->cur_blksize)
236 size = func->cur_blksize;
237 if (size > 512)
238 size = 512; /* maximum size for byte mode */
239 335
240 ret = mmc_io_rw_extended(func->card, write, func->num, addr, 336 ret = mmc_io_rw_extended(func->card, write, func->num, addr,
241 incr_addr, buf, 1, size); 337 incr_addr, buf, 1, size);
@@ -260,11 +356,10 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
260 * function. If there is a problem reading the address, 0xff 356 * function. If there is a problem reading the address, 0xff
261 * is returned and @err_ret will contain the error code. 357 * is returned and @err_ret will contain the error code.
262 */ 358 */
263unsigned char sdio_readb(struct sdio_func *func, unsigned int addr, 359u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
264 int *err_ret)
265{ 360{
266 int ret; 361 int ret;
267 unsigned char val; 362 u8 val;
268 363
269 BUG_ON(!func); 364 BUG_ON(!func);
270 365
@@ -293,8 +388,7 @@ EXPORT_SYMBOL_GPL(sdio_readb);
293 * function. @err_ret will contain the status of the actual 388 * function. @err_ret will contain the status of the actual
294 * transfer. 389 * transfer.
295 */ 390 */
296void sdio_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, 391void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
297 int *err_ret)
298{ 392{
299 int ret; 393 int ret;
300 394
@@ -355,7 +449,6 @@ int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr,
355{ 449{
356 return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count); 450 return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count);
357} 451}
358
359EXPORT_SYMBOL_GPL(sdio_readsb); 452EXPORT_SYMBOL_GPL(sdio_readsb);
360 453
361/** 454/**
@@ -385,8 +478,7 @@ EXPORT_SYMBOL_GPL(sdio_writesb);
385 * function. If there is a problem reading the address, 0xffff 478 * function. If there is a problem reading the address, 0xffff
386 * is returned and @err_ret will contain the error code. 479 * is returned and @err_ret will contain the error code.
387 */ 480 */
388unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, 481u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret)
389 int *err_ret)
390{ 482{
391 int ret; 483 int ret;
392 484
@@ -400,7 +492,7 @@ unsigned short sdio_readw(struct sdio_func *func, unsigned int addr,
400 return 0xFFFF; 492 return 0xFFFF;
401 } 493 }
402 494
403 return le16_to_cpu(*(u16*)func->tmpbuf); 495 return le16_to_cpup((__le16 *)func->tmpbuf);
404} 496}
405EXPORT_SYMBOL_GPL(sdio_readw); 497EXPORT_SYMBOL_GPL(sdio_readw);
406 498
@@ -415,12 +507,11 @@ EXPORT_SYMBOL_GPL(sdio_readw);
415 * function. @err_ret will contain the status of the actual 507 * function. @err_ret will contain the status of the actual
416 * transfer. 508 * transfer.
417 */ 509 */
418void sdio_writew(struct sdio_func *func, unsigned short b, unsigned int addr, 510void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret)
419 int *err_ret)
420{ 511{
421 int ret; 512 int ret;
422 513
423 *(u16*)func->tmpbuf = cpu_to_le16(b); 514 *(__le16 *)func->tmpbuf = cpu_to_le16(b);
424 515
425 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2); 516 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2);
426 if (err_ret) 517 if (err_ret)
@@ -439,8 +530,7 @@ EXPORT_SYMBOL_GPL(sdio_writew);
439 * 0xffffffff is returned and @err_ret will contain the error 530 * 0xffffffff is returned and @err_ret will contain the error
440 * code. 531 * code.
441 */ 532 */
442unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, 533u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret)
443 int *err_ret)
444{ 534{
445 int ret; 535 int ret;
446 536
@@ -454,7 +544,7 @@ unsigned long sdio_readl(struct sdio_func *func, unsigned int addr,
454 return 0xFFFFFFFF; 544 return 0xFFFFFFFF;
455 } 545 }
456 546
457 return le32_to_cpu(*(u32*)func->tmpbuf); 547 return le32_to_cpup((__le32 *)func->tmpbuf);
458} 548}
459EXPORT_SYMBOL_GPL(sdio_readl); 549EXPORT_SYMBOL_GPL(sdio_readl);
460 550
@@ -469,12 +559,11 @@ EXPORT_SYMBOL_GPL(sdio_readl);
469 * function. @err_ret will contain the status of the actual 559 * function. @err_ret will contain the status of the actual
470 * transfer. 560 * transfer.
471 */ 561 */
472void sdio_writel(struct sdio_func *func, unsigned long b, unsigned int addr, 562void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret)
473 int *err_ret)
474{ 563{
475 int ret; 564 int ret;
476 565
477 *(u32*)func->tmpbuf = cpu_to_le32(b); 566 *(__le32 *)func->tmpbuf = cpu_to_le32(b);
478 567
479 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4); 568 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4);
480 if (err_ret) 569 if (err_ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index dead61754ad7..dc6f2579f85c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -26,18 +26,31 @@ config MMC_PXA
26 26
27config MMC_SDHCI 27config MMC_SDHCI
28 tristate "Secure Digital Host Controller Interface support" 28 tristate "Secure Digital Host Controller Interface support"
29 depends on PCI 29 depends on HAS_DMA
30 help 30 help
31 This select the generic Secure Digital Host Controller Interface. 31 This selects the generic Secure Digital Host Controller Interface.
32 It is used by manufacturers such as Texas Instruments(R), Ricoh(R) 32 It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
33 and Toshiba(R). Most controllers found in laptops are of this type. 33 and Toshiba(R). Most controllers found in laptops are of this type.
34
35 If you have a controller with this interface, say Y or M here. You
36 also need to enable an appropriate bus interface.
37
38 If unsure, say N.
39
40config MMC_SDHCI_PCI
41 tristate "SDHCI support on PCI bus"
42 depends on MMC_SDHCI && PCI
43 help
44 This selects the PCI Secure Digital Host Controller Interface.
45 Most controllers found today are PCI devices.
46
34 If you have a controller with this interface, say Y or M here. 47 If you have a controller with this interface, say Y or M here.
35 48
36 If unsure, say N. 49 If unsure, say N.
37 50
38config MMC_RICOH_MMC 51config MMC_RICOH_MMC
39 tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)" 52 tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
40 depends on PCI && EXPERIMENTAL && MMC_SDHCI 53 depends on MMC_SDHCI_PCI
41 help 54 help
42 This selects the disabler for the Ricoh MMC Controller. This 55 This selects the disabler for the Ricoh MMC Controller. This
43 proprietary controller is unnecessary because the SDHCI driver 56 proprietary controller is unnecessary because the SDHCI driver
@@ -91,6 +104,16 @@ config MMC_AT91
91 104
92 If unsure, say N. 105 If unsure, say N.
93 106
107config MMC_ATMELMCI
108 tristate "Atmel Multimedia Card Interface support"
109 depends on AVR32
110 help
111 This selects the Atmel Multimedia Card Interface driver. If
112 you have an AT32 (AVR32) platform with a Multimedia Card
113 slot, say Y or M here.
114
115 If unsure, say N.
116
94config MMC_IMX 117config MMC_IMX
95 tristate "Motorola i.MX Multimedia Card Interface support" 118 tristate "Motorola i.MX Multimedia Card Interface support"
96 depends on ARCH_IMX 119 depends on ARCH_IMX
@@ -130,3 +153,24 @@ config MMC_SPI
130 153
131 If unsure, or if your system has no SPI master driver, say N. 154 If unsure, or if your system has no SPI master driver, say N.
132 155
156config MMC_S3C
157 tristate "Samsung S3C SD/MMC Card Interface support"
158 depends on ARCH_S3C2410 && MMC
159 help
160 This selects a driver for the MCI interface found in
161 Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
162 If you have a board based on one of those and a MMC/SD
163 slot, say Y or M here.
164
165 If unsure, say N.
166
167config MMC_SDRICOH_CS
168 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
169 depends on EXPERIMENTAL && MMC && PCI && PCMCIA
170 help
171 Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
172 card whenever you insert a MMC or SD card into the card slot.
173
174 To compile this driver as a module, choose M here: the
175 module will be called sdricoh_cs.
176
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3877c87e6da2..db52eebfb50e 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -10,11 +10,15 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
10obj-$(CONFIG_MMC_PXA) += pxamci.o 10obj-$(CONFIG_MMC_PXA) += pxamci.o
11obj-$(CONFIG_MMC_IMX) += imxmmc.o 11obj-$(CONFIG_MMC_IMX) += imxmmc.o
12obj-$(CONFIG_MMC_SDHCI) += sdhci.o 12obj-$(CONFIG_MMC_SDHCI) += sdhci.o
13obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
13obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 14obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
14obj-$(CONFIG_MMC_WBSD) += wbsd.o 15obj-$(CONFIG_MMC_WBSD) += wbsd.o
15obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 16obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
16obj-$(CONFIG_MMC_OMAP) += omap.o 17obj-$(CONFIG_MMC_OMAP) += omap.o
17obj-$(CONFIG_MMC_AT91) += at91_mci.o 18obj-$(CONFIG_MMC_AT91) += at91_mci.o
19obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
18obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 20obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
19obj-$(CONFIG_MMC_SPI) += mmc_spi.o 21obj-$(CONFIG_MMC_SPI) += mmc_spi.o
22obj-$(CONFIG_MMC_S3C) += s3cmci.o
23obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
20 24
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 8979ad330a4d..f15e2064305c 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -125,9 +125,72 @@ struct at91mci_host
125 125
126 /* Latest in the scatterlist that has been enabled for transfer */ 126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index; 127 int transfer_index;
128
129 /* Timer for timeouts */
130 struct timer_list timer;
128}; 131};
129 132
130/* 133/*
134 * Reset the controller and restore most of the state
135 */
136static void at91_reset_host(struct at91mci_host *host)
137{
138 unsigned long flags;
139 u32 mr;
140 u32 sdcr;
141 u32 dtor;
142 u32 imr;
143
144 local_irq_save(flags);
145 imr = at91_mci_read(host, AT91_MCI_IMR);
146
147 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
148
149 /* save current state */
150 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
151 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
152 dtor = at91_mci_read(host, AT91_MCI_DTOR);
153
154 /* reset the controller */
155 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
156
157 /* restore state */
158 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
159 at91_mci_write(host, AT91_MCI_MR, mr);
160 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
161 at91_mci_write(host, AT91_MCI_DTOR, dtor);
162 at91_mci_write(host, AT91_MCI_IER, imr);
163
164 /* make sure sdio interrupts will fire */
165 at91_mci_read(host, AT91_MCI_SR);
166
167 local_irq_restore(flags);
168}
169
170static void at91_timeout_timer(unsigned long data)
171{
172 struct at91mci_host *host;
173
174 host = (struct at91mci_host *)data;
175
176 if (host->request) {
177 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
178
179 if (host->cmd && host->cmd->data) {
180 host->cmd->data->error = -ETIMEDOUT;
181 } else {
182 if (host->cmd)
183 host->cmd->error = -ETIMEDOUT;
184 else
185 host->request->cmd->error = -ETIMEDOUT;
186 }
187
188 at91_reset_host(host);
189 mmc_request_done(host->mmc, host->request);
190 }
191}
192
193/*
131 * Copy from sg to a dma block - used for transfers 194 * Copy from sg to a dma block - used for transfers
132 */ 195 */
133static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) 196static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
@@ -135,9 +198,14 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
135 unsigned int len, i, size; 198 unsigned int len, i, size;
136 unsigned *dmabuf = host->buffer; 199 unsigned *dmabuf = host->buffer;
137 200
138 size = host->total_length; 201 size = data->blksz * data->blocks;
139 len = data->sg_len; 202 len = data->sg_len;
140 203
204 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
205 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
206 if (host->total_length == 12)
207 memset(dmabuf, 0, 12);
208
141 /* 209 /*
142 * Just loop through all entries. Size might not 210 * Just loop through all entries. Size might not
143 * be the entire list though so make sure that 211 * be the entire list though so make sure that
@@ -159,9 +227,10 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
159 227
160 for (index = 0; index < (amount / 4); index++) 228 for (index = 0; index < (amount / 4); index++)
161 *dmabuf++ = swab32(sgbuffer[index]); 229 *dmabuf++ = swab32(sgbuffer[index]);
162 } 230 } else {
163 else
164 memcpy(dmabuf, sgbuffer, amount); 231 memcpy(dmabuf, sgbuffer, amount);
232 dmabuf += amount;
233 }
165 234
166 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 235 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
167 236
@@ -233,11 +302,11 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host)
233 302
234 if (i == 0) { 303 if (i == 0) {
235 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address); 304 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
236 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4); 305 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
237 } 306 }
238 else { 307 else {
239 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address); 308 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
240 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4); 309 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
241 } 310 }
242 } 311 }
243 312
@@ -277,8 +346,6 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
277 346
278 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE); 347 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
279 348
280 data->bytes_xfered += sg->length;
281
282 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ 349 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
283 unsigned int *buffer; 350 unsigned int *buffer;
284 int index; 351 int index;
@@ -294,6 +361,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
294 } 361 }
295 362
296 flush_dcache_page(sg_page(sg)); 363 flush_dcache_page(sg_page(sg));
364
365 data->bytes_xfered += sg->length;
297 } 366 }
298 367
299 /* Is there another transfer to trigger? */ 368 /* Is there another transfer to trigger? */
@@ -334,10 +403,32 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host)
334 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); 403 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
335 } else 404 } else
336 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); 405 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
406}
407
408/*
409 * Update bytes tranfered count during a write operation
410 */
411static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
412{
413 struct mmc_data *data;
414
415 /* always deal with the effective request (and not the current cmd) */
416
417 if (host->request->cmd && host->request->cmd->error != 0)
418 return;
337 419
338 data->bytes_xfered = host->total_length; 420 if (host->request->data) {
421 data = host->request->data;
422 if (data->flags & MMC_DATA_WRITE) {
423 /* card is in IDLE mode now */
424 pr_debug("-> bytes_xfered %d, total_length = %d\n",
425 data->bytes_xfered, host->total_length);
426 data->bytes_xfered = data->blksz * data->blocks;
427 }
428 }
339} 429}
340 430
431
341/*Handle after command sent ready*/ 432/*Handle after command sent ready*/
342static int at91_mci_handle_cmdrdy(struct at91mci_host *host) 433static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
343{ 434{
@@ -350,8 +441,7 @@ static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
350 } else return 1; 441 } else return 1;
351 } else if (host->cmd->data->flags & MMC_DATA_WRITE) { 442 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
352 /*After sendding multi-block-write command, start DMA transfer*/ 443 /*After sendding multi-block-write command, start DMA transfer*/
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE); 444 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
354 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
355 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 445 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
356 } 446 }
357 447
@@ -430,11 +520,19 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
430 520
431 if (data) { 521 if (data) {
432 522
433 if ( data->blksz & 0x3 ) { 523 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
434 pr_debug("Unsupported block size\n"); 524 if (data->blksz & 0x3) {
435 cmd->error = -EINVAL; 525 pr_debug("Unsupported block size\n");
436 mmc_request_done(host->mmc, host->request); 526 cmd->error = -EINVAL;
437 return; 527 mmc_request_done(host->mmc, host->request);
528 return;
529 }
530 if (data->flags & MMC_DATA_STREAM) {
531 pr_debug("Stream commands not supported\n");
532 cmd->error = -EINVAL;
533 mmc_request_done(host->mmc, host->request);
534 return;
535 }
438 } 536 }
439 537
440 block_length = data->blksz; 538 block_length = data->blksz;
@@ -481,8 +579,16 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
481 ier = AT91_MCI_CMDRDY; 579 ier = AT91_MCI_CMDRDY;
482 } else { 580 } else {
483 /* zero block length and PDC mode */ 581 /* zero block length and PDC mode */
484 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; 582 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
485 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); 583 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
584 mr |= (block_length << 16);
585 mr |= AT91_MCI_PDCMODE;
586 at91_mci_write(host, AT91_MCI_MR, mr);
587
588 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
589 at91_mci_write(host, AT91_MCI_BLKR,
590 AT91_MCI_BLKR_BCNT(blocks) |
591 AT91_MCI_BLKR_BLKLEN(block_length));
486 592
487 /* 593 /*
488 * Disable the PDC controller 594 * Disable the PDC controller
@@ -508,6 +614,13 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
508 * Handle a write 614 * Handle a write
509 */ 615 */
510 host->total_length = block_length * blocks; 616 host->total_length = block_length * blocks;
617 /*
618 * AT91SAM926[0/3] Data Write Operation and
619 * number of bytes erratum
620 */
621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
622 if (host->total_length < 12)
623 host->total_length = 12;
511 host->buffer = dma_alloc_coherent(NULL, 624 host->buffer = dma_alloc_coherent(NULL,
512 host->total_length, 625 host->total_length,
513 &host->physical_address, GFP_KERNEL); 626 &host->physical_address, GFP_KERNEL);
@@ -517,7 +630,9 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
517 pr_debug("Transmitting %d bytes\n", host->total_length); 630 pr_debug("Transmitting %d bytes\n", host->total_length);
518 631
519 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); 632 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
520 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4); 633 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
634 host->total_length : host->total_length / 4);
635
521 ier = AT91_MCI_CMDRDY; 636 ier = AT91_MCI_CMDRDY;
522 } 637 }
523 } 638 }
@@ -552,20 +667,26 @@ static void at91_mci_process_next(struct at91mci_host *host)
552 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { 667 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
553 host->flags |= FL_SENT_STOP; 668 host->flags |= FL_SENT_STOP;
554 at91_mci_send_command(host, host->request->stop); 669 at91_mci_send_command(host, host->request->stop);
555 } 670 } else {
556 else 671 del_timer(&host->timer);
672 /* the at91rm9200 mci controller hangs after some transfers,
673 * and the workaround is to reset it after each transfer.
674 */
675 if (cpu_is_at91rm9200())
676 at91_reset_host(host);
557 mmc_request_done(host->mmc, host->request); 677 mmc_request_done(host->mmc, host->request);
678 }
558} 679}
559 680
560/* 681/*
561 * Handle a command that has been completed 682 * Handle a command that has been completed
562 */ 683 */
563static void at91_mci_completed_command(struct at91mci_host *host) 684static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
564{ 685{
565 struct mmc_command *cmd = host->cmd; 686 struct mmc_command *cmd = host->cmd;
566 unsigned int status; 687 struct mmc_data *data = cmd->data;
567 688
568 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 689 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
569 690
570 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); 691 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
571 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); 692 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
@@ -577,25 +698,34 @@ static void at91_mci_completed_command(struct at91mci_host *host)
577 host->buffer = NULL; 698 host->buffer = NULL;
578 } 699 }
579 700
580 status = at91_mci_read(host, AT91_MCI_SR); 701 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
581 702 status, at91_mci_read(host, AT91_MCI_SR),
582 pr_debug("Status = %08X [%08X %08X %08X %08X]\n", 703 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
583 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
584 704
585 if (status & AT91_MCI_ERRORS) { 705 if (status & AT91_MCI_ERRORS) {
586 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { 706 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
587 cmd->error = 0; 707 cmd->error = 0;
588 } 708 }
589 else { 709 else {
590 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE)) 710 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
591 cmd->error = -ETIMEDOUT; 711 if (data) {
592 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE)) 712 if (status & AT91_MCI_DTOE)
593 cmd->error = -EILSEQ; 713 data->error = -ETIMEDOUT;
594 else 714 else if (status & AT91_MCI_DCRCE)
595 cmd->error = -EIO; 715 data->error = -EILSEQ;
716 }
717 } else {
718 if (status & AT91_MCI_RTOE)
719 cmd->error = -ETIMEDOUT;
720 else if (status & AT91_MCI_RCRCE)
721 cmd->error = -EILSEQ;
722 else
723 cmd->error = -EIO;
724 }
596 725
597 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n", 726 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
598 cmd->error, cmd->opcode, cmd->retries); 727 cmd->error, data ? data->error : 0,
728 cmd->opcode, cmd->retries);
599 } 729 }
600 } 730 }
601 else 731 else
@@ -613,6 +743,8 @@ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
613 host->request = mrq; 743 host->request = mrq;
614 host->flags = 0; 744 host->flags = 0;
615 745
746 mod_timer(&host->timer, jiffies + HZ);
747
616 at91_mci_process_next(host); 748 at91_mci_process_next(host);
617} 749}
618 750
@@ -736,6 +868,7 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
736 868
737 if (int_status & AT91_MCI_NOTBUSY) { 869 if (int_status & AT91_MCI_NOTBUSY) {
738 pr_debug("Card is ready\n"); 870 pr_debug("Card is ready\n");
871 at91_mci_update_bytes_xfered(host);
739 completed = 1; 872 completed = 1;
740 } 873 }
741 874
@@ -744,9 +877,21 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
744 877
745 if (int_status & AT91_MCI_BLKE) { 878 if (int_status & AT91_MCI_BLKE) {
746 pr_debug("Block transfer has ended\n"); 879 pr_debug("Block transfer has ended\n");
747 completed = 1; 880 if (host->request->data && host->request->data->blocks > 1) {
881 /* multi block write : complete multi write
882 * command and send stop */
883 completed = 1;
884 } else {
885 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
886 }
748 } 887 }
749 888
889 if (int_status & AT91_MCI_SDIOIRQA)
890 mmc_signal_sdio_irq(host->mmc);
891
892 if (int_status & AT91_MCI_SDIOIRQB)
893 mmc_signal_sdio_irq(host->mmc);
894
750 if (int_status & AT91_MCI_TXRDY) 895 if (int_status & AT91_MCI_TXRDY)
751 pr_debug("Ready to transmit\n"); 896 pr_debug("Ready to transmit\n");
752 897
@@ -761,10 +906,10 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
761 906
762 if (completed) { 907 if (completed) {
763 pr_debug("Completed command\n"); 908 pr_debug("Completed command\n");
764 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 909 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
765 at91_mci_completed_command(host); 910 at91_mci_completed_command(host, int_status);
766 } else 911 } else
767 at91_mci_write(host, AT91_MCI_IDR, int_status); 912 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
768 913
769 return IRQ_HANDLED; 914 return IRQ_HANDLED;
770} 915}
@@ -793,25 +938,33 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
793 938
794static int at91_mci_get_ro(struct mmc_host *mmc) 939static int at91_mci_get_ro(struct mmc_host *mmc)
795{ 940{
796 int read_only = 0;
797 struct at91mci_host *host = mmc_priv(mmc); 941 struct at91mci_host *host = mmc_priv(mmc);
798 942
799 if (host->board->wp_pin) { 943 if (host->board->wp_pin)
800 read_only = gpio_get_value(host->board->wp_pin); 944 return !!gpio_get_value(host->board->wp_pin);
801 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc), 945 /*
802 (read_only ? "read-only" : "read-write") ); 946 * Board doesn't support read only detection; let the mmc core
803 } 947 * decide what to do.
804 else { 948 */
805 printk(KERN_WARNING "%s: host does not support reading read-only " 949 return -ENOSYS;
806 "switch. Assuming write-enable.\n", mmc_hostname(mmc)); 950}
807 } 951
808 return read_only; 952static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
953{
954 struct at91mci_host *host = mmc_priv(mmc);
955
956 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
957 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
958 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
959 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
960
809} 961}
810 962
811static const struct mmc_host_ops at91_mci_ops = { 963static const struct mmc_host_ops at91_mci_ops = {
812 .request = at91_mci_request, 964 .request = at91_mci_request,
813 .set_ios = at91_mci_set_ios, 965 .set_ios = at91_mci_set_ios,
814 .get_ro = at91_mci_get_ro, 966 .get_ro = at91_mci_get_ro,
967 .enable_sdio_irq = at91_mci_enable_sdio_irq,
815}; 968};
816 969
817/* 970/*
@@ -842,6 +995,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
842 mmc->f_min = 375000; 995 mmc->f_min = 375000;
843 mmc->f_max = 25000000; 996 mmc->f_max = 25000000;
844 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 997 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
998 mmc->caps = MMC_CAP_SDIO_IRQ;
845 999
846 mmc->max_blk_size = 4095; 1000 mmc->max_blk_size = 4095;
847 mmc->max_blk_count = mmc->max_req_size; 1001 mmc->max_blk_count = mmc->max_req_size;
@@ -935,6 +1089,8 @@ static int __init at91_mci_probe(struct platform_device *pdev)
935 1089
936 mmc_add_host(mmc); 1090 mmc_add_host(mmc);
937 1091
1092 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1093
938 /* 1094 /*
939 * monitor card insertion/removal if we can 1095 * monitor card insertion/removal if we can
940 */ 1096 */
@@ -995,6 +1151,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
995 } 1151 }
996 1152
997 at91_mci_disable(host); 1153 at91_mci_disable(host);
1154 del_timer_sync(&host->timer);
998 mmc_remove_host(mmc); 1155 mmc_remove_host(mmc);
999 free_irq(host->irq, host); 1156 free_irq(host->irq, host);
1000 1157
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
new file mode 100644
index 000000000000..a9a5657706c6
--- /dev/null
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -0,0 +1,91 @@
1/*
2 * Atmel MultiMedia Card Interface driver
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __DRIVERS_MMC_ATMEL_MCI_H__
11#define __DRIVERS_MMC_ATMEL_MCI_H__
12
13/* MCI Register Definitions */
14#define MCI_CR 0x0000 /* Control */
15# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
16# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
17# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */
18#define MCI_MR 0x0004 /* Mode */
19# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
20# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
21# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
22#define MCI_DTOR 0x0008 /* Data Timeout */
23# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
24# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
25#define MCI_SDCR 0x000c /* SD Card / SDIO */
26# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
27# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
28# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */
29# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */
30#define MCI_ARGR 0x0010 /* Command Argument */
31#define MCI_CMDR 0x0014 /* Command */
32# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
33# define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */
34# define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */
35# define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */
36# define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */
37# define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */
38# define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */
39# define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */
40# define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */
41# define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */
42# define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */
43# define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */
44# define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */
45# define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */
46# define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */
47# define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */
48# define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */
49# define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */
50# define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */
51# define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */
52# define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */
53# define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */
54#define MCI_BLKR 0x0018 /* Block */
55# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */
56# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
57#define MCI_RSPR 0x0020 /* Response 0 */
58#define MCI_RSPR1 0x0024 /* Response 1 */
59#define MCI_RSPR2 0x0028 /* Response 2 */
60#define MCI_RSPR3 0x002c /* Response 3 */
61#define MCI_RDR 0x0030 /* Receive Data */
62#define MCI_TDR 0x0034 /* Transmit Data */
63#define MCI_SR 0x0040 /* Status */
64#define MCI_IER 0x0044 /* Interrupt Enable */
65#define MCI_IDR 0x0048 /* Interrupt Disable */
66#define MCI_IMR 0x004c /* Interrupt Mask */
67# define MCI_CMDRDY ( 1 << 0) /* Command Ready */
68# define MCI_RXRDY ( 1 << 1) /* Receiver Ready */
69# define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */
70# define MCI_BLKE ( 1 << 3) /* Data Block Ended */
71# define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */
72# define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */
73# define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */
74# define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */
75# define MCI_RINDE ( 1 << 16) /* Response Index Error */
76# define MCI_RDIRE ( 1 << 17) /* Response Direction Error */
77# define MCI_RCRCE ( 1 << 18) /* Response CRC Error */
78# define MCI_RENDE ( 1 << 19) /* Response End Bit Error */
79# define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */
80# define MCI_DCRCE ( 1 << 21) /* Data CRC Error */
81# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */
82# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
83# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
84
85/* Register access macros */
86#define mci_readl(port,reg) \
87 __raw_readl((port)->regs + MCI_##reg)
88#define mci_writel(port,reg,value) \
89 __raw_writel((value), (port)->regs + MCI_##reg)
90
91#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
new file mode 100644
index 000000000000..cce873c5a149
--- /dev/null
+++ b/drivers/mmc/host/atmel-mci.c
@@ -0,0 +1,981 @@
1/*
2 * Atmel MultiMedia Card Interface driver
3 *
4 * Copyright (C) 2004-2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/device.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/scatterlist.h>
19
20#include <linux/mmc/host.h>
21
22#include <asm/atmel-mci.h>
23#include <asm/io.h>
24#include <asm/unaligned.h>
25
26#include <asm/arch/board.h>
27#include <asm/arch/gpio.h>
28
29#include "atmel-mci-regs.h"
30
31#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
32
33enum {
34 EVENT_CMD_COMPLETE = 0,
35 EVENT_DATA_ERROR,
36 EVENT_DATA_COMPLETE,
37 EVENT_STOP_SENT,
38 EVENT_STOP_COMPLETE,
39 EVENT_XFER_COMPLETE,
40};
41
42struct atmel_mci {
43 struct mmc_host *mmc;
44 void __iomem *regs;
45
46 struct scatterlist *sg;
47 unsigned int pio_offset;
48
49 struct mmc_request *mrq;
50 struct mmc_command *cmd;
51 struct mmc_data *data;
52
53 u32 cmd_status;
54 u32 data_status;
55 u32 stop_status;
56 u32 stop_cmdr;
57
58 u32 mode_reg;
59 u32 sdc_reg;
60
61 struct tasklet_struct tasklet;
62 unsigned long pending_events;
63 unsigned long completed_events;
64
65 int present;
66 int detect_pin;
67 int wp_pin;
68
69 /* For detect pin debouncing */
70 struct timer_list detect_timer;
71
72 unsigned long bus_hz;
73 unsigned long mapbase;
74 struct clk *mck;
75 struct platform_device *pdev;
76};
77
78#define atmci_is_completed(host, event) \
79 test_bit(event, &host->completed_events)
80#define atmci_test_and_clear_pending(host, event) \
81 test_and_clear_bit(event, &host->pending_events)
82#define atmci_test_and_set_completed(host, event) \
83 test_and_set_bit(event, &host->completed_events)
84#define atmci_set_completed(host, event) \
85 set_bit(event, &host->completed_events)
86#define atmci_set_pending(host, event) \
87 set_bit(event, &host->pending_events)
88#define atmci_clear_pending(host, event) \
89 clear_bit(event, &host->pending_events)
90
91
92static void atmci_enable(struct atmel_mci *host)
93{
94 clk_enable(host->mck);
95 mci_writel(host, CR, MCI_CR_MCIEN);
96 mci_writel(host, MR, host->mode_reg);
97 mci_writel(host, SDCR, host->sdc_reg);
98}
99
100static void atmci_disable(struct atmel_mci *host)
101{
102 mci_writel(host, CR, MCI_CR_SWRST);
103
104 /* Stall until write is complete, then disable the bus clock */
105 mci_readl(host, SR);
106 clk_disable(host->mck);
107}
108
109static inline unsigned int ns_to_clocks(struct atmel_mci *host,
110 unsigned int ns)
111{
112 return (ns * (host->bus_hz / 1000000) + 999) / 1000;
113}
114
115static void atmci_set_timeout(struct atmel_mci *host,
116 struct mmc_data *data)
117{
118 static unsigned dtomul_to_shift[] = {
119 0, 4, 7, 8, 10, 12, 16, 20
120 };
121 unsigned timeout;
122 unsigned dtocyc;
123 unsigned dtomul;
124
125 timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks;
126
127 for (dtomul = 0; dtomul < 8; dtomul++) {
128 unsigned shift = dtomul_to_shift[dtomul];
129 dtocyc = (timeout + (1 << shift) - 1) >> shift;
130 if (dtocyc < 15)
131 break;
132 }
133
134 if (dtomul >= 8) {
135 dtomul = 7;
136 dtocyc = 15;
137 }
138
139 dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n",
140 dtocyc << dtomul_to_shift[dtomul]);
141 mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
142}
143
144/*
145 * Return mask with command flags to be enabled for this command.
146 */
147static u32 atmci_prepare_command(struct mmc_host *mmc,
148 struct mmc_command *cmd)
149{
150 struct mmc_data *data;
151 u32 cmdr;
152
153 cmd->error = -EINPROGRESS;
154
155 cmdr = MCI_CMDR_CMDNB(cmd->opcode);
156
157 if (cmd->flags & MMC_RSP_PRESENT) {
158 if (cmd->flags & MMC_RSP_136)
159 cmdr |= MCI_CMDR_RSPTYP_136BIT;
160 else
161 cmdr |= MCI_CMDR_RSPTYP_48BIT;
162 }
163
164 /*
165 * This should really be MAXLAT_5 for CMD2 and ACMD41, but
166 * it's too difficult to determine whether this is an ACMD or
167 * not. Better make it 64.
168 */
169 cmdr |= MCI_CMDR_MAXLAT_64CYC;
170
171 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
172 cmdr |= MCI_CMDR_OPDCMD;
173
174 data = cmd->data;
175 if (data) {
176 cmdr |= MCI_CMDR_START_XFER;
177 if (data->flags & MMC_DATA_STREAM)
178 cmdr |= MCI_CMDR_STREAM;
179 else if (data->blocks > 1)
180 cmdr |= MCI_CMDR_MULTI_BLOCK;
181 else
182 cmdr |= MCI_CMDR_BLOCK;
183
184 if (data->flags & MMC_DATA_READ)
185 cmdr |= MCI_CMDR_TRDIR_READ;
186 }
187
188 return cmdr;
189}
190
191static void atmci_start_command(struct atmel_mci *host,
192 struct mmc_command *cmd,
193 u32 cmd_flags)
194{
195 /* Must read host->cmd after testing event flags */
196 smp_rmb();
197 WARN_ON(host->cmd);
198 host->cmd = cmd;
199
200 dev_vdbg(&host->mmc->class_dev,
201 "start command: ARGR=0x%08x CMDR=0x%08x\n",
202 cmd->arg, cmd_flags);
203
204 mci_writel(host, ARGR, cmd->arg);
205 mci_writel(host, CMDR, cmd_flags);
206}
207
208static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data)
209{
210 struct atmel_mci *host = mmc_priv(mmc);
211
212 atmci_start_command(host, data->stop, host->stop_cmdr);
213 mci_writel(host, IER, MCI_CMDRDY);
214}
215
216static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq)
217{
218 struct atmel_mci *host = mmc_priv(mmc);
219
220 WARN_ON(host->cmd || host->data);
221 host->mrq = NULL;
222
223 atmci_disable(host);
224
225 mmc_request_done(mmc, mrq);
226}
227
228/*
229 * Returns a mask of interrupt flags to be enabled after the whole
230 * request has been prepared.
231 */
232static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data)
233{
234 struct atmel_mci *host = mmc_priv(mmc);
235 u32 iflags;
236
237 data->error = -EINPROGRESS;
238
239 WARN_ON(host->data);
240 host->sg = NULL;
241 host->data = data;
242
243 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
244 | MCI_BLKLEN(data->blksz));
245 dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n",
246 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
247
248 iflags = ATMCI_DATA_ERROR_FLAGS;
249 host->sg = data->sg;
250 host->pio_offset = 0;
251 if (data->flags & MMC_DATA_READ)
252 iflags |= MCI_RXRDY;
253 else
254 iflags |= MCI_TXRDY;
255
256 return iflags;
257}
258
259static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
260{
261 struct atmel_mci *host = mmc_priv(mmc);
262 struct mmc_data *data;
263 struct mmc_command *cmd;
264 u32 iflags;
265 u32 cmdflags = 0;
266
267 iflags = mci_readl(host, IMR);
268 if (iflags)
269 dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n",
270 mci_readl(host, IMR));
271
272 WARN_ON(host->mrq != NULL);
273
274 /*
275 * We may "know" the card is gone even though there's still an
276 * electrical connection. If so, we really need to communicate
277 * this to the MMC core since there won't be any more
278 * interrupts as the card is completely removed. Otherwise,
279 * the MMC core might believe the card is still there even
280 * though the card was just removed very slowly.
281 */
282 if (!host->present) {
283 mrq->cmd->error = -ENOMEDIUM;
284 mmc_request_done(mmc, mrq);
285 return;
286 }
287
288 host->mrq = mrq;
289 host->pending_events = 0;
290 host->completed_events = 0;
291
292 atmci_enable(host);
293
294 /* We don't support multiple blocks of weird lengths. */
295 data = mrq->data;
296 if (data) {
297 if (data->blocks > 1 && data->blksz & 3)
298 goto fail;
299 atmci_set_timeout(host, data);
300 }
301
302 iflags = MCI_CMDRDY;
303 cmd = mrq->cmd;
304 cmdflags = atmci_prepare_command(mmc, cmd);
305 atmci_start_command(host, cmd, cmdflags);
306
307 if (data)
308 iflags |= atmci_submit_data(mmc, data);
309
310 if (mrq->stop) {
311 host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop);
312 host->stop_cmdr |= MCI_CMDR_STOP_XFER;
313 if (!(data->flags & MMC_DATA_WRITE))
314 host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
315 if (data->flags & MMC_DATA_STREAM)
316 host->stop_cmdr |= MCI_CMDR_STREAM;
317 else
318 host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK;
319 }
320
321 /*
322 * We could have enabled interrupts earlier, but I suspect
323 * that would open up a nice can of interesting race
324 * conditions (e.g. command and data complete, but stop not
325 * prepared yet.)
326 */
327 mci_writel(host, IER, iflags);
328
329 return;
330
331fail:
332 atmci_disable(host);
333 host->mrq = NULL;
334 mrq->cmd->error = -EINVAL;
335 mmc_request_done(mmc, mrq);
336}
337
338static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
339{
340 struct atmel_mci *host = mmc_priv(mmc);
341
342 if (ios->clock) {
343 u32 clkdiv;
344
345 /* Set clock rate */
346 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1;
347 if (clkdiv > 255) {
348 dev_warn(&mmc->class_dev,
349 "clock %u too slow; using %lu\n",
350 ios->clock, host->bus_hz / (2 * 256));
351 clkdiv = 255;
352 }
353
354 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF
355 | MCI_MR_RDPROOF;
356 }
357
358 switch (ios->bus_width) {
359 case MMC_BUS_WIDTH_1:
360 host->sdc_reg = 0;
361 break;
362 case MMC_BUS_WIDTH_4:
363 host->sdc_reg = MCI_SDCBUS_4BIT;
364 break;
365 }
366
367 switch (ios->power_mode) {
368 case MMC_POWER_ON:
369 /* Send init sequence (74 clock cycles) */
370 atmci_enable(host);
371 mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
372 while (!(mci_readl(host, SR) & MCI_CMDRDY))
373 cpu_relax();
374 atmci_disable(host);
375 break;
376 default:
377 /*
378 * TODO: None of the currently available AVR32-based
379 * boards allow MMC power to be turned off. Implement
380 * power control when this can be tested properly.
381 */
382 break;
383 }
384}
385
386static int atmci_get_ro(struct mmc_host *mmc)
387{
388 int read_only = 0;
389 struct atmel_mci *host = mmc_priv(mmc);
390
391 if (host->wp_pin >= 0) {
392 read_only = gpio_get_value(host->wp_pin);
393 dev_dbg(&mmc->class_dev, "card is %s\n",
394 read_only ? "read-only" : "read-write");
395 } else {
396 dev_dbg(&mmc->class_dev,
397 "no pin for checking read-only switch."
398 " Assuming write-enable.\n");
399 }
400
401 return read_only;
402}
403
404static struct mmc_host_ops atmci_ops = {
405 .request = atmci_request,
406 .set_ios = atmci_set_ios,
407 .get_ro = atmci_get_ro,
408};
409
410static void atmci_command_complete(struct atmel_mci *host,
411 struct mmc_command *cmd, u32 status)
412{
413 /* Read the response from the card (up to 16 bytes) */
414 cmd->resp[0] = mci_readl(host, RSPR);
415 cmd->resp[1] = mci_readl(host, RSPR);
416 cmd->resp[2] = mci_readl(host, RSPR);
417 cmd->resp[3] = mci_readl(host, RSPR);
418
419 if (status & MCI_RTOE)
420 cmd->error = -ETIMEDOUT;
421 else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE))
422 cmd->error = -EILSEQ;
423 else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE))
424 cmd->error = -EIO;
425 else
426 cmd->error = 0;
427
428 if (cmd->error) {
429 dev_dbg(&host->mmc->class_dev,
430 "command error: status=0x%08x\n", status);
431
432 if (cmd->data) {
433 host->data = NULL;
434 mci_writel(host, IDR, MCI_NOTBUSY
435 | MCI_TXRDY | MCI_RXRDY
436 | ATMCI_DATA_ERROR_FLAGS);
437 }
438 }
439}
440
441static void atmci_detect_change(unsigned long data)
442{
443 struct atmel_mci *host = (struct atmel_mci *)data;
444 struct mmc_request *mrq = host->mrq;
445 int present;
446
447 /*
448 * atmci_remove() sets detect_pin to -1 before freeing the
449 * interrupt. We must not re-enable the interrupt if it has
450 * been freed.
451 */
452 smp_rmb();
453 if (host->detect_pin < 0)
454 return;
455
456 enable_irq(gpio_to_irq(host->detect_pin));
457 present = !gpio_get_value(host->detect_pin);
458
459 dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n",
460 present, host->present);
461
462 if (present != host->present) {
463 dev_dbg(&host->mmc->class_dev, "card %s\n",
464 present ? "inserted" : "removed");
465 host->present = present;
466
467 /* Reset controller if card is gone */
468 if (!present) {
469 mci_writel(host, CR, MCI_CR_SWRST);
470 mci_writel(host, IDR, ~0UL);
471 mci_writel(host, CR, MCI_CR_MCIEN);
472 }
473
474 /* Clean up queue if present */
475 if (mrq) {
476 /*
477 * Reset controller to terminate any ongoing
478 * commands or data transfers.
479 */
480 mci_writel(host, CR, MCI_CR_SWRST);
481
482 if (!atmci_is_completed(host, EVENT_CMD_COMPLETE))
483 mrq->cmd->error = -ENOMEDIUM;
484
485 if (mrq->data && !atmci_is_completed(host,
486 EVENT_DATA_COMPLETE)) {
487 host->data = NULL;
488 mrq->data->error = -ENOMEDIUM;
489 }
490 if (mrq->stop && !atmci_is_completed(host,
491 EVENT_STOP_COMPLETE))
492 mrq->stop->error = -ENOMEDIUM;
493
494 host->cmd = NULL;
495 atmci_request_end(host->mmc, mrq);
496 }
497
498 mmc_detect_change(host->mmc, 0);
499 }
500}
501
502static void atmci_tasklet_func(unsigned long priv)
503{
504 struct mmc_host *mmc = (struct mmc_host *)priv;
505 struct atmel_mci *host = mmc_priv(mmc);
506 struct mmc_request *mrq = host->mrq;
507 struct mmc_data *data = host->data;
508
509 dev_vdbg(&mmc->class_dev,
510 "tasklet: pending/completed/mask %lx/%lx/%x\n",
511 host->pending_events, host->completed_events,
512 mci_readl(host, IMR));
513
514 if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) {
515 /*
516 * host->cmd must be set to NULL before the interrupt
517 * handler sees EVENT_CMD_COMPLETE
518 */
519 host->cmd = NULL;
520 smp_wmb();
521 atmci_set_completed(host, EVENT_CMD_COMPLETE);
522 atmci_command_complete(host, mrq->cmd, host->cmd_status);
523
524 if (!mrq->cmd->error && mrq->stop
525 && atmci_is_completed(host, EVENT_XFER_COMPLETE)
526 && !atmci_test_and_set_completed(host,
527 EVENT_STOP_SENT))
528 send_stop_cmd(host->mmc, mrq->data);
529 }
530 if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) {
531 /*
532 * host->cmd must be set to NULL before the interrupt
533 * handler sees EVENT_STOP_COMPLETE
534 */
535 host->cmd = NULL;
536 smp_wmb();
537 atmci_set_completed(host, EVENT_STOP_COMPLETE);
538 atmci_command_complete(host, mrq->stop, host->stop_status);
539 }
540 if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) {
541 u32 status = host->data_status;
542
543 dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status);
544
545 atmci_set_completed(host, EVENT_DATA_ERROR);
546 atmci_set_completed(host, EVENT_DATA_COMPLETE);
547
548 if (status & MCI_DTOE) {
549 dev_dbg(&mmc->class_dev,
550 "data timeout error\n");
551 data->error = -ETIMEDOUT;
552 } else if (status & MCI_DCRCE) {
553 dev_dbg(&mmc->class_dev, "data CRC error\n");
554 data->error = -EILSEQ;
555 } else {
556 dev_dbg(&mmc->class_dev,
557 "data FIFO error (status=%08x)\n",
558 status);
559 data->error = -EIO;
560 }
561
562 if (host->present && data->stop
563 && atmci_is_completed(host, EVENT_CMD_COMPLETE)
564 && !atmci_test_and_set_completed(
565 host, EVENT_STOP_SENT))
566 send_stop_cmd(host->mmc, data);
567
568 host->data = NULL;
569 }
570 if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) {
571 atmci_set_completed(host, EVENT_DATA_COMPLETE);
572
573 if (!atmci_is_completed(host, EVENT_DATA_ERROR)) {
574 data->bytes_xfered = data->blocks * data->blksz;
575 data->error = 0;
576 }
577
578 host->data = NULL;
579 }
580
581 if (host->mrq && !host->cmd && !host->data)
582 atmci_request_end(mmc, host->mrq);
583}
584
585static void atmci_read_data_pio(struct atmel_mci *host)
586{
587 struct scatterlist *sg = host->sg;
588 void *buf = sg_virt(sg);
589 unsigned int offset = host->pio_offset;
590 struct mmc_data *data = host->data;
591 u32 value;
592 u32 status;
593 unsigned int nbytes = 0;
594
595 do {
596 value = mci_readl(host, RDR);
597 if (likely(offset + 4 <= sg->length)) {
598 put_unaligned(value, (u32 *)(buf + offset));
599
600 offset += 4;
601 nbytes += 4;
602
603 if (offset == sg->length) {
604 host->sg = sg = sg_next(sg);
605 if (!sg)
606 goto done;
607
608 offset = 0;
609 buf = sg_virt(sg);
610 }
611 } else {
612 unsigned int remaining = sg->length - offset;
613 memcpy(buf + offset, &value, remaining);
614 nbytes += remaining;
615
616 flush_dcache_page(sg_page(sg));
617 host->sg = sg = sg_next(sg);
618 if (!sg)
619 goto done;
620
621 offset = 4 - remaining;
622 buf = sg_virt(sg);
623 memcpy(buf, (u8 *)&value + remaining, offset);
624 nbytes += offset;
625 }
626
627 status = mci_readl(host, SR);
628 if (status & ATMCI_DATA_ERROR_FLAGS) {
629 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
630 | ATMCI_DATA_ERROR_FLAGS));
631 host->data_status = status;
632 atmci_set_pending(host, EVENT_DATA_ERROR);
633 tasklet_schedule(&host->tasklet);
634 break;
635 }
636 } while (status & MCI_RXRDY);
637
638 host->pio_offset = offset;
639 data->bytes_xfered += nbytes;
640
641 return;
642
643done:
644 mci_writel(host, IDR, MCI_RXRDY);
645 mci_writel(host, IER, MCI_NOTBUSY);
646 data->bytes_xfered += nbytes;
647 atmci_set_completed(host, EVENT_XFER_COMPLETE);
648 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE)
649 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
650 send_stop_cmd(host->mmc, data);
651}
652
653static void atmci_write_data_pio(struct atmel_mci *host)
654{
655 struct scatterlist *sg = host->sg;
656 void *buf = sg_virt(sg);
657 unsigned int offset = host->pio_offset;
658 struct mmc_data *data = host->data;
659 u32 value;
660 u32 status;
661 unsigned int nbytes = 0;
662
663 do {
664 if (likely(offset + 4 <= sg->length)) {
665 value = get_unaligned((u32 *)(buf + offset));
666 mci_writel(host, TDR, value);
667
668 offset += 4;
669 nbytes += 4;
670 if (offset == sg->length) {
671 host->sg = sg = sg_next(sg);
672 if (!sg)
673 goto done;
674
675 offset = 0;
676 buf = sg_virt(sg);
677 }
678 } else {
679 unsigned int remaining = sg->length - offset;
680
681 value = 0;
682 memcpy(&value, buf + offset, remaining);
683 nbytes += remaining;
684
685 host->sg = sg = sg_next(sg);
686 if (!sg) {
687 mci_writel(host, TDR, value);
688 goto done;
689 }
690
691 offset = 4 - remaining;
692 buf = sg_virt(sg);
693 memcpy((u8 *)&value + remaining, buf, offset);
694 mci_writel(host, TDR, value);
695 nbytes += offset;
696 }
697
698 status = mci_readl(host, SR);
699 if (status & ATMCI_DATA_ERROR_FLAGS) {
700 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
701 | ATMCI_DATA_ERROR_FLAGS));
702 host->data_status = status;
703 atmci_set_pending(host, EVENT_DATA_ERROR);
704 tasklet_schedule(&host->tasklet);
705 break;
706 }
707 } while (status & MCI_TXRDY);
708
709 host->pio_offset = offset;
710 data->bytes_xfered += nbytes;
711
712 return;
713
714done:
715 mci_writel(host, IDR, MCI_TXRDY);
716 mci_writel(host, IER, MCI_NOTBUSY);
717 data->bytes_xfered += nbytes;
718 atmci_set_completed(host, EVENT_XFER_COMPLETE);
719 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE)
720 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
721 send_stop_cmd(host->mmc, data);
722}
723
724static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status)
725{
726 struct atmel_mci *host = mmc_priv(mmc);
727
728 mci_writel(host, IDR, MCI_CMDRDY);
729
730 if (atmci_is_completed(host, EVENT_STOP_SENT)) {
731 host->stop_status = status;
732 atmci_set_pending(host, EVENT_STOP_COMPLETE);
733 } else {
734 host->cmd_status = status;
735 atmci_set_pending(host, EVENT_CMD_COMPLETE);
736 }
737
738 tasklet_schedule(&host->tasklet);
739}
740
741static irqreturn_t atmci_interrupt(int irq, void *dev_id)
742{
743 struct mmc_host *mmc = dev_id;
744 struct atmel_mci *host = mmc_priv(mmc);
745 u32 status, mask, pending;
746 unsigned int pass_count = 0;
747
748 spin_lock(&mmc->lock);
749
750 do {
751 status = mci_readl(host, SR);
752 mask = mci_readl(host, IMR);
753 pending = status & mask;
754 if (!pending)
755 break;
756
757 if (pending & ATMCI_DATA_ERROR_FLAGS) {
758 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
759 | MCI_RXRDY | MCI_TXRDY);
760 pending &= mci_readl(host, IMR);
761 host->data_status = status;
762 atmci_set_pending(host, EVENT_DATA_ERROR);
763 tasklet_schedule(&host->tasklet);
764 }
765 if (pending & MCI_NOTBUSY) {
766 mci_writel(host, IDR, (MCI_NOTBUSY
767 | ATMCI_DATA_ERROR_FLAGS));
768 atmci_set_pending(host, EVENT_DATA_COMPLETE);
769 tasklet_schedule(&host->tasklet);
770 }
771 if (pending & MCI_RXRDY)
772 atmci_read_data_pio(host);
773 if (pending & MCI_TXRDY)
774 atmci_write_data_pio(host);
775
776 if (pending & MCI_CMDRDY)
777 atmci_cmd_interrupt(mmc, status);
778 } while (pass_count++ < 5);
779
780 spin_unlock(&mmc->lock);
781
782 return pass_count ? IRQ_HANDLED : IRQ_NONE;
783}
784
785static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
786{
787 struct mmc_host *mmc = dev_id;
788 struct atmel_mci *host = mmc_priv(mmc);
789
790 /*
791 * Disable interrupts until the pin has stabilized and check
792 * the state then. Use mod_timer() since we may be in the
793 * middle of the timer routine when this interrupt triggers.
794 */
795 disable_irq_nosync(irq);
796 mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20));
797
798 return IRQ_HANDLED;
799}
800
801static int __init atmci_probe(struct platform_device *pdev)
802{
803 struct mci_platform_data *pdata;
804 struct atmel_mci *host;
805 struct mmc_host *mmc;
806 struct resource *regs;
807 int irq;
808 int ret;
809
810 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
811 if (!regs)
812 return -ENXIO;
813 pdata = pdev->dev.platform_data;
814 if (!pdata)
815 return -ENXIO;
816 irq = platform_get_irq(pdev, 0);
817 if (irq < 0)
818 return irq;
819
820 mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev);
821 if (!mmc)
822 return -ENOMEM;
823
824 host = mmc_priv(mmc);
825 host->pdev = pdev;
826 host->mmc = mmc;
827 host->detect_pin = pdata->detect_pin;
828 host->wp_pin = pdata->wp_pin;
829
830 host->mck = clk_get(&pdev->dev, "mci_clk");
831 if (IS_ERR(host->mck)) {
832 ret = PTR_ERR(host->mck);
833 goto err_clk_get;
834 }
835
836 ret = -ENOMEM;
837 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
838 if (!host->regs)
839 goto err_ioremap;
840
841 clk_enable(host->mck);
842 mci_writel(host, CR, MCI_CR_SWRST);
843 host->bus_hz = clk_get_rate(host->mck);
844 clk_disable(host->mck);
845
846 host->mapbase = regs->start;
847
848 mmc->ops = &atmci_ops;
849 mmc->f_min = (host->bus_hz + 511) / 512;
850 mmc->f_max = host->bus_hz / 2;
851 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
852 mmc->caps |= MMC_CAP_4_BIT_DATA;
853
854 mmc->max_hw_segs = 64;
855 mmc->max_phys_segs = 64;
856 mmc->max_req_size = 32768 * 512;
857 mmc->max_blk_size = 32768;
858 mmc->max_blk_count = 512;
859
860 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc);
861
862 ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc);
863 if (ret)
864 goto err_request_irq;
865
866 /* Assume card is present if we don't have a detect pin */
867 host->present = 1;
868 if (host->detect_pin >= 0) {
869 if (gpio_request(host->detect_pin, "mmc_detect")) {
870 dev_dbg(&mmc->class_dev, "no detect pin available\n");
871 host->detect_pin = -1;
872 } else {
873 host->present = !gpio_get_value(host->detect_pin);
874 }
875 }
876 if (host->wp_pin >= 0) {
877 if (gpio_request(host->wp_pin, "mmc_wp")) {
878 dev_dbg(&mmc->class_dev, "no WP pin available\n");
879 host->wp_pin = -1;
880 }
881 }
882
883 platform_set_drvdata(pdev, host);
884
885 mmc_add_host(mmc);
886
887 if (host->detect_pin >= 0) {
888 setup_timer(&host->detect_timer, atmci_detect_change,
889 (unsigned long)host);
890
891 ret = request_irq(gpio_to_irq(host->detect_pin),
892 atmci_detect_interrupt,
893 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
894 "mmc-detect", mmc);
895 if (ret) {
896 dev_dbg(&mmc->class_dev,
897 "could not request IRQ %d for detect pin\n",
898 gpio_to_irq(host->detect_pin));
899 gpio_free(host->detect_pin);
900 host->detect_pin = -1;
901 }
902 }
903
904 dev_info(&mmc->class_dev,
905 "Atmel MCI controller at 0x%08lx irq %d\n",
906 host->mapbase, irq);
907
908 return 0;
909
910err_request_irq:
911 iounmap(host->regs);
912err_ioremap:
913 clk_put(host->mck);
914err_clk_get:
915 mmc_free_host(mmc);
916 return ret;
917}
918
919static int __exit atmci_remove(struct platform_device *pdev)
920{
921 struct atmel_mci *host = platform_get_drvdata(pdev);
922
923 platform_set_drvdata(pdev, NULL);
924
925 if (host) {
926 if (host->detect_pin >= 0) {
927 int pin = host->detect_pin;
928
929 /* Make sure the timer doesn't enable the interrupt */
930 host->detect_pin = -1;
931 smp_wmb();
932
933 free_irq(gpio_to_irq(pin), host->mmc);
934 del_timer_sync(&host->detect_timer);
935 gpio_free(pin);
936 }
937
938 mmc_remove_host(host->mmc);
939
940 clk_enable(host->mck);
941 mci_writel(host, IDR, ~0UL);
942 mci_writel(host, CR, MCI_CR_MCIDIS);
943 mci_readl(host, SR);
944 clk_disable(host->mck);
945
946 if (host->wp_pin >= 0)
947 gpio_free(host->wp_pin);
948
949 free_irq(platform_get_irq(pdev, 0), host->mmc);
950 iounmap(host->regs);
951
952 clk_put(host->mck);
953
954 mmc_free_host(host->mmc);
955 }
956 return 0;
957}
958
959static struct platform_driver atmci_driver = {
960 .remove = __exit_p(atmci_remove),
961 .driver = {
962 .name = "atmel_mci",
963 },
964};
965
966static int __init atmci_init(void)
967{
968 return platform_driver_probe(&atmci_driver, atmci_probe);
969}
970
971static void __exit atmci_exit(void)
972{
973 platform_driver_unregister(&atmci_driver);
974}
975
976module_init(atmci_init);
977module_exit(atmci_exit);
978
979MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
980MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
981MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index cc5f7bc546af..3f15eb204895 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -21,7 +21,7 @@
21 * published by the Free Software Foundation. 21 * published by the Free Software Foundation.
22 */ 22 */
23 23
24/* Why is a timer used to detect insert events? 24/* Why don't we use the SD controllers' carddetect feature?
25 * 25 *
26 * From the AU1100 MMC application guide: 26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards 27 * If the Au1100-based design is intended to support both MultiMediaCards
@@ -30,8 +30,6 @@
30 * In doing so, a MMC card never enters SPI-mode communications, 30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective 31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur). 32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */ 33 */
36 34
37#include <linux/module.h> 35#include <linux/module.h>
@@ -41,51 +39,110 @@
41#include <linux/interrupt.h> 39#include <linux/interrupt.h>
42#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
43#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
44 42#include <linux/leds.h>
45#include <linux/mmc/host.h> 43#include <linux/mmc/host.h>
44
46#include <asm/io.h> 45#include <asm/io.h>
47#include <asm/mach-au1x00/au1000.h> 46#include <asm/mach-au1x00/au1000.h>
48#include <asm/mach-au1x00/au1xxx_dbdma.h> 47#include <asm/mach-au1x00/au1xxx_dbdma.h>
49#include <asm/mach-au1x00/au1100_mmc.h> 48#include <asm/mach-au1x00/au1100_mmc.h>
50 49
51#include <au1xxx.h>
52#include "au1xmmc.h"
53
54#define DRIVER_NAME "au1xxx-mmc" 50#define DRIVER_NAME "au1xxx-mmc"
55 51
56/* Set this to enable special debugging macros */ 52/* Set this to enable special debugging macros */
53/* #define DEBUG */
57 54
58#ifdef DEBUG 55#ifdef DEBUG
59#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args) 56#define DBG(fmt, idx, args...) \
57 printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args)
60#else 58#else
61#define DBG(fmt, idx, args...) 59#define DBG(fmt, idx, args...) do {} while (0)
62#endif 60#endif
63 61
64const struct { 62/* Hardware definitions */
63#define AU1XMMC_DESCRIPTOR_COUNT 1
64#define AU1XMMC_DESCRIPTOR_SIZE 2048
65
66#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
67 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
68 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
69
70/* This gives us a hard value for the stop command that we can write directly
71 * to the command register.
72 */
73#define STOP_CMD \
74 (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
75
76/* This is the set of interrupts that we configure by default. */
77#define AU1XMMC_INTERRUPTS \
78 (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \
79 SD_CONFIG_CR | SD_CONFIG_I)
80
81/* The poll event (looking for insert/remove events runs twice a second. */
82#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
83
84struct au1xmmc_host {
85 struct mmc_host *mmc;
86 struct mmc_request *mrq;
87
88 u32 flags;
65 u32 iobase; 89 u32 iobase;
66 u32 tx_devid, rx_devid; 90 u32 clock;
67 u16 bcsrpwr; 91 u32 bus_width;
68 u16 bcsrstatus; 92 u32 power_mode;
69 u16 wpstatus;
70} au1xmmc_card_table[] = {
71 { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72 BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73#ifndef CONFIG_MIPS_DB1200
74 { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75 BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
76#endif
77};
78 93
79#define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table)) 94 int status;
80 95
81/* This array stores pointers for the hosts (used by the IRQ handler) */ 96 struct {
82struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT]; 97 int len;
83static int dma = 1; 98 int dir;
99 } dma;
84 100
85#ifdef MODULE 101 struct {
86module_param(dma, bool, 0); 102 int index;
87MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)"); 103 int offset;
88#endif 104 int len;
105 } pio;
106
107 u32 tx_chan;
108 u32 rx_chan;
109
110 int irq;
111
112 struct tasklet_struct finish_task;
113 struct tasklet_struct data_task;
114 struct au1xmmc_platform_data *platdata;
115 struct platform_device *pdev;
116 struct resource *ioarea;
117};
118
119/* Status flags used by the host structure */
120#define HOST_F_XMIT 0x0001
121#define HOST_F_RECV 0x0002
122#define HOST_F_DMA 0x0010
123#define HOST_F_ACTIVE 0x0100
124#define HOST_F_STOP 0x1000
125
126#define HOST_S_IDLE 0x0001
127#define HOST_S_CMD 0x0002
128#define HOST_S_DATA 0x0003
129#define HOST_S_STOP 0x0004
130
131/* Easy access macros */
132#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
133#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
134#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
135#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
136#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
137#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
138#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
139#define HOST_CMD(h) ((h)->iobase + SD_CMD)
140#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
141#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
142#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
143
144#define DMA_CHANNEL(h) \
145 (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
89 146
90static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) 147static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
91{ 148{
@@ -119,14 +176,13 @@ static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
119 176
120static inline void SEND_STOP(struct au1xmmc_host *host) 177static inline void SEND_STOP(struct au1xmmc_host *host)
121{ 178{
122 179 u32 config2;
123 /* We know the value of CONFIG2, so avoid a read we don't need */
124 u32 mask = SD_CONFIG2_EN;
125 180
126 WARN_ON(host->status != HOST_S_DATA); 181 WARN_ON(host->status != HOST_S_DATA);
127 host->status = HOST_S_STOP; 182 host->status = HOST_S_STOP;
128 183
129 au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host)); 184 config2 = au_readl(HOST_CONFIG2(host));
185 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
130 au_sync(); 186 au_sync();
131 187
132 /* Send the stop commmand */ 188 /* Send the stop commmand */
@@ -135,35 +191,36 @@ static inline void SEND_STOP(struct au1xmmc_host *host)
135 191
136static void au1xmmc_set_power(struct au1xmmc_host *host, int state) 192static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
137{ 193{
138 194 if (host->platdata && host->platdata->set_power)
139 u32 val = au1xmmc_card_table[host->id].bcsrpwr; 195 host->platdata->set_power(host->mmc, state);
140
141 bcsr->board &= ~val;
142 if (state) bcsr->board |= val;
143
144 au_sync_delay(1);
145} 196}
146 197
147static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) 198static int au1xmmc_card_inserted(struct mmc_host *mmc)
148{ 199{
149 return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus) 200 struct au1xmmc_host *host = mmc_priv(mmc);
150 ? 1 : 0; 201
202 if (host->platdata && host->platdata->card_inserted)
203 return !!host->platdata->card_inserted(host->mmc);
204
205 return -ENOSYS;
151} 206}
152 207
153static int au1xmmc_card_readonly(struct mmc_host *mmc) 208static int au1xmmc_card_readonly(struct mmc_host *mmc)
154{ 209{
155 struct au1xmmc_host *host = mmc_priv(mmc); 210 struct au1xmmc_host *host = mmc_priv(mmc);
156 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 211
157 ? 1 : 0; 212 if (host->platdata && host->platdata->card_readonly)
213 return !!host->platdata->card_readonly(mmc);
214
215 return -ENOSYS;
158} 216}
159 217
160static void au1xmmc_finish_request(struct au1xmmc_host *host) 218static void au1xmmc_finish_request(struct au1xmmc_host *host)
161{ 219{
162
163 struct mmc_request *mrq = host->mrq; 220 struct mmc_request *mrq = host->mrq;
164 221
165 host->mrq = NULL; 222 host->mrq = NULL;
166 host->flags &= HOST_F_ACTIVE; 223 host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
167 224
168 host->dma.len = 0; 225 host->dma.len = 0;
169 host->dma.dir = 0; 226 host->dma.dir = 0;
@@ -174,8 +231,6 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host)
174 231
175 host->status = HOST_S_IDLE; 232 host->status = HOST_S_IDLE;
176 233
177 bcsr->disk_leds |= (1 << 8);
178
179 mmc_request_done(host->mmc, mrq); 234 mmc_request_done(host->mmc, mrq);
180} 235}
181 236
@@ -235,18 +290,14 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
235 au_sync(); 290 au_sync();
236 291
237 /* Wait for the command to go on the line */ 292 /* Wait for the command to go on the line */
238 293 while (au_readl(HOST_CMD(host)) & SD_CMD_GO)
239 while(1) { 294 /* nop */;
240 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
241 break;
242 }
243 295
244 /* Wait for the command to come back */ 296 /* Wait for the command to come back */
245
246 if (wait) { 297 if (wait) {
247 u32 status = au_readl(HOST_STATUS(host)); 298 u32 status = au_readl(HOST_STATUS(host));
248 299
249 while(!(status & SD_STATUS_CR)) 300 while (!(status & SD_STATUS_CR))
250 status = au_readl(HOST_STATUS(host)); 301 status = au_readl(HOST_STATUS(host));
251 302
252 /* Clear the CR status */ 303 /* Clear the CR status */
@@ -260,12 +311,11 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
260 311
261static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) 312static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
262{ 313{
263
264 struct mmc_request *mrq = host->mrq; 314 struct mmc_request *mrq = host->mrq;
265 struct mmc_data *data; 315 struct mmc_data *data;
266 u32 crc; 316 u32 crc;
267 317
268 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP); 318 WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
269 319
270 if (host->mrq == NULL) 320 if (host->mrq == NULL)
271 return; 321 return;
@@ -276,15 +326,13 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
276 status = au_readl(HOST_STATUS(host)); 326 status = au_readl(HOST_STATUS(host));
277 327
278 /* The transaction is really over when the SD_STATUS_DB bit is clear */ 328 /* The transaction is really over when the SD_STATUS_DB bit is clear */
279 329 while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
280 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
281 status = au_readl(HOST_STATUS(host)); 330 status = au_readl(HOST_STATUS(host));
282 331
283 data->error = 0; 332 data->error = 0;
284 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); 333 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
285 334
286 /* Process any errors */ 335 /* Process any errors */
287
288 crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); 336 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
289 if (host->flags & HOST_F_XMIT) 337 if (host->flags & HOST_F_XMIT)
290 crc |= ((status & 0x07) == 0x02) ? 0 : 1; 338 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
@@ -299,16 +347,16 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
299 347
300 if (!data->error) { 348 if (!data->error) {
301 if (host->flags & HOST_F_DMA) { 349 if (host->flags & HOST_F_DMA) {
350#ifdef CONFIG_SOC_AU1200 /* DBDMA */
302 u32 chan = DMA_CHANNEL(host); 351 u32 chan = DMA_CHANNEL(host);
303 352
304 chan_tab_t *c = *((chan_tab_t **) chan); 353 chan_tab_t *c = *((chan_tab_t **)chan);
305 au1x_dma_chan_t *cp = c->chan_ptr; 354 au1x_dma_chan_t *cp = c->chan_ptr;
306 data->bytes_xfered = cp->ddma_bytecnt; 355 data->bytes_xfered = cp->ddma_bytecnt;
307 } 356#endif
308 else 357 } else
309 data->bytes_xfered = 358 data->bytes_xfered =
310 (data->blocks * data->blksz) - 359 (data->blocks * data->blksz) - host->pio.len;
311 host->pio.len;
312 } 360 }
313 361
314 au1xmmc_finish_request(host); 362 au1xmmc_finish_request(host);
@@ -316,7 +364,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
316 364
317static void au1xmmc_tasklet_data(unsigned long param) 365static void au1xmmc_tasklet_data(unsigned long param)
318{ 366{
319 struct au1xmmc_host *host = (struct au1xmmc_host *) param; 367 struct au1xmmc_host *host = (struct au1xmmc_host *)param;
320 368
321 u32 status = au_readl(HOST_STATUS(host)); 369 u32 status = au_readl(HOST_STATUS(host));
322 au1xmmc_data_complete(host, status); 370 au1xmmc_data_complete(host, status);
@@ -326,11 +374,10 @@ static void au1xmmc_tasklet_data(unsigned long param)
326 374
327static void au1xmmc_send_pio(struct au1xmmc_host *host) 375static void au1xmmc_send_pio(struct au1xmmc_host *host)
328{ 376{
329 377 struct mmc_data *data;
330 struct mmc_data *data = 0; 378 int sg_len, max, count;
331 int sg_len, max, count = 0; 379 unsigned char *sg_ptr, val;
332 unsigned char *sg_ptr; 380 u32 status;
333 u32 status = 0;
334 struct scatterlist *sg; 381 struct scatterlist *sg;
335 382
336 data = host->mrq->data; 383 data = host->mrq->data;
@@ -345,14 +392,12 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
345 /* This is the space left inside the buffer */ 392 /* This is the space left inside the buffer */
346 sg_len = data->sg[host->pio.index].length - host->pio.offset; 393 sg_len = data->sg[host->pio.index].length - host->pio.offset;
347 394
348 /* Check to if we need less then the size of the sg_buffer */ 395 /* Check if we need less than the size of the sg_buffer */
349
350 max = (sg_len > host->pio.len) ? host->pio.len : sg_len; 396 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
351 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; 397 if (max > AU1XMMC_MAX_TRANSFER)
352 398 max = AU1XMMC_MAX_TRANSFER;
353 for(count = 0; count < max; count++ ) {
354 unsigned char val;
355 399
400 for (count = 0; count < max; count++) {
356 status = au_readl(HOST_STATUS(host)); 401 status = au_readl(HOST_STATUS(host));
357 402
358 if (!(status & SD_STATUS_TH)) 403 if (!(status & SD_STATUS_TH))
@@ -360,7 +405,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
360 405
361 val = *sg_ptr++; 406 val = *sg_ptr++;
362 407
363 au_writel((unsigned long) val, HOST_TXPORT(host)); 408 au_writel((unsigned long)val, HOST_TXPORT(host));
364 au_sync(); 409 au_sync();
365 } 410 }
366 411
@@ -384,11 +429,10 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
384 429
385static void au1xmmc_receive_pio(struct au1xmmc_host *host) 430static void au1xmmc_receive_pio(struct au1xmmc_host *host)
386{ 431{
387 432 struct mmc_data *data;
388 struct mmc_data *data = 0; 433 int max, count, sg_len = 0;
389 int sg_len = 0, max = 0, count = 0; 434 unsigned char *sg_ptr = NULL;
390 unsigned char *sg_ptr = 0; 435 u32 status, val;
391 u32 status = 0;
392 struct scatterlist *sg; 436 struct scatterlist *sg;
393 437
394 data = host->mrq->data; 438 data = host->mrq->data;
@@ -405,33 +449,33 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
405 /* This is the space left inside the buffer */ 449 /* This is the space left inside the buffer */
406 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; 450 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
407 451
408 /* Check to if we need less then the size of the sg_buffer */ 452 /* Check if we need less than the size of the sg_buffer */
409 if (sg_len < max) max = sg_len; 453 if (sg_len < max)
454 max = sg_len;
410 } 455 }
411 456
412 if (max > AU1XMMC_MAX_TRANSFER) 457 if (max > AU1XMMC_MAX_TRANSFER)
413 max = AU1XMMC_MAX_TRANSFER; 458 max = AU1XMMC_MAX_TRANSFER;
414 459
415 for(count = 0; count < max; count++ ) { 460 for (count = 0; count < max; count++) {
416 u32 val;
417 status = au_readl(HOST_STATUS(host)); 461 status = au_readl(HOST_STATUS(host));
418 462
419 if (!(status & SD_STATUS_NE)) 463 if (!(status & SD_STATUS_NE))
420 break; 464 break;
421 465
422 if (status & SD_STATUS_RC) { 466 if (status & SD_STATUS_RC) {
423 DBG("RX CRC Error [%d + %d].\n", host->id, 467 DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
424 host->pio.len, count); 468 host->pio.len, count);
425 break; 469 break;
426 } 470 }
427 471
428 if (status & SD_STATUS_RO) { 472 if (status & SD_STATUS_RO) {
429 DBG("RX Overrun [%d + %d]\n", host->id, 473 DBG("RX Overrun [%d + %d]\n", host->pdev->id,
430 host->pio.len, count); 474 host->pio.len, count);
431 break; 475 break;
432 } 476 }
433 else if (status & SD_STATUS_RU) { 477 else if (status & SD_STATUS_RU) {
434 DBG("RX Underrun [%d + %d]\n", host->id, 478 DBG("RX Underrun [%d + %d]\n", host->pdev->id,
435 host->pio.len, count); 479 host->pio.len, count);
436 break; 480 break;
437 } 481 }
@@ -439,7 +483,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
439 val = au_readl(HOST_RXPORT(host)); 483 val = au_readl(HOST_RXPORT(host));
440 484
441 if (sg_ptr) 485 if (sg_ptr)
442 *sg_ptr++ = (unsigned char) (val & 0xFF); 486 *sg_ptr++ = (unsigned char)(val & 0xFF);
443 } 487 }
444 488
445 host->pio.len -= count; 489 host->pio.len -= count;
@@ -451,7 +495,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
451 } 495 }
452 496
453 if (host->pio.len == 0) { 497 if (host->pio.len == 0) {
454 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); 498 /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
455 IRQ_OFF(host, SD_CONFIG_NE); 499 IRQ_OFF(host, SD_CONFIG_NE);
456 500
457 if (host->flags & HOST_F_STOP) 501 if (host->flags & HOST_F_STOP)
@@ -461,17 +505,15 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
461 } 505 }
462} 506}
463 507
464/* static void au1xmmc_cmd_complete 508/* This is called when a command has been completed - grab the response
465 This is called when a command has been completed - grab the response 509 * and check for errors. Then start the data transfer if it is indicated.
466 and check for errors. Then start the data transfer if it is indicated. 510 */
467*/
468
469static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) 511static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
470{ 512{
471
472 struct mmc_request *mrq = host->mrq; 513 struct mmc_request *mrq = host->mrq;
473 struct mmc_command *cmd; 514 struct mmc_command *cmd;
474 int trans; 515 u32 r[4];
516 int i, trans;
475 517
476 if (!host->mrq) 518 if (!host->mrq)
477 return; 519 return;
@@ -481,9 +523,6 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
481 523
482 if (cmd->flags & MMC_RSP_PRESENT) { 524 if (cmd->flags & MMC_RSP_PRESENT) {
483 if (cmd->flags & MMC_RSP_136) { 525 if (cmd->flags & MMC_RSP_136) {
484 u32 r[4];
485 int i;
486
487 r[0] = au_readl(host->iobase + SD_RESP3); 526 r[0] = au_readl(host->iobase + SD_RESP3);
488 r[1] = au_readl(host->iobase + SD_RESP2); 527 r[1] = au_readl(host->iobase + SD_RESP2);
489 r[2] = au_readl(host->iobase + SD_RESP1); 528 r[2] = au_readl(host->iobase + SD_RESP1);
@@ -491,10 +530,9 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
491 530
492 /* The CRC is omitted from the response, so really 531 /* The CRC is omitted from the response, so really
493 * we only got 120 bytes, but the engine expects 532 * we only got 120 bytes, but the engine expects
494 * 128 bits, so we have to shift things up 533 * 128 bits, so we have to shift things up.
495 */ 534 */
496 535 for (i = 0; i < 4; i++) {
497 for(i = 0; i < 4; i++) {
498 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; 536 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
499 if (i != 3) 537 if (i != 3)
500 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; 538 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
@@ -505,22 +543,20 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
505 * our response omits the CRC, our data ends up 543 * our response omits the CRC, our data ends up
506 * being shifted 8 bits to the right. In this case, 544 * being shifted 8 bits to the right. In this case,
507 * that means that the OSR data starts at bit 31, 545 * that means that the OSR data starts at bit 31,
508 * so we can just read RESP0 and return that 546 * so we can just read RESP0 and return that.
509 */ 547 */
510 cmd->resp[0] = au_readl(host->iobase + SD_RESP0); 548 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
511 } 549 }
512 } 550 }
513 551
514 /* Figure out errors */ 552 /* Figure out errors */
515
516 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) 553 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
517 cmd->error = -EILSEQ; 554 cmd->error = -EILSEQ;
518 555
519 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); 556 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
520 557
521 if (!trans || cmd->error) { 558 if (!trans || cmd->error) {
522 559 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
523 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
524 tasklet_schedule(&host->finish_task); 560 tasklet_schedule(&host->finish_task);
525 return; 561 return;
526 } 562 }
@@ -528,6 +564,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
528 host->status = HOST_S_DATA; 564 host->status = HOST_S_DATA;
529 565
530 if (host->flags & HOST_F_DMA) { 566 if (host->flags & HOST_F_DMA) {
567#ifdef CONFIG_SOC_AU1200 /* DBDMA */
531 u32 channel = DMA_CHANNEL(host); 568 u32 channel = DMA_CHANNEL(host);
532 569
533 /* Start the DMA as soon as the buffer gets something in it */ 570 /* Start the DMA as soon as the buffer gets something in it */
@@ -540,23 +577,21 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
540 } 577 }
541 578
542 au1xxx_dbdma_start(channel); 579 au1xxx_dbdma_start(channel);
580#endif
543 } 581 }
544} 582}
545 583
546static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) 584static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
547{ 585{
548
549 unsigned int pbus = get_au1x00_speed(); 586 unsigned int pbus = get_au1x00_speed();
550 unsigned int divisor; 587 unsigned int divisor;
551 u32 config; 588 u32 config;
552 589
553 /* From databook: 590 /* From databook:
554 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 591 * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
555 */ 592 */
556
557 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); 593 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
558 pbus /= 2; 594 pbus /= 2;
559
560 divisor = ((pbus / rate) / 2) - 1; 595 divisor = ((pbus / rate) / 2) - 1;
561 596
562 config = au_readl(HOST_CONFIG(host)); 597 config = au_readl(HOST_CONFIG(host));
@@ -568,15 +603,11 @@ static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
568 au_sync(); 603 au_sync();
569} 604}
570 605
571static int 606static int au1xmmc_prepare_data(struct au1xmmc_host *host,
572au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) 607 struct mmc_data *data)
573{ 608{
574
575 int datalen = data->blocks * data->blksz; 609 int datalen = data->blocks * data->blksz;
576 610
577 if (dma != 0)
578 host->flags |= HOST_F_DMA;
579
580 if (data->flags & MMC_DATA_READ) 611 if (data->flags & MMC_DATA_READ)
581 host->flags |= HOST_F_RECV; 612 host->flags |= HOST_F_RECV;
582 else 613 else
@@ -596,12 +627,13 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
596 au_writel(data->blksz - 1, HOST_BLKSIZE(host)); 627 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
597 628
598 if (host->flags & HOST_F_DMA) { 629 if (host->flags & HOST_F_DMA) {
630#ifdef CONFIG_SOC_AU1200 /* DBDMA */
599 int i; 631 int i;
600 u32 channel = DMA_CHANNEL(host); 632 u32 channel = DMA_CHANNEL(host);
601 633
602 au1xxx_dbdma_stop(channel); 634 au1xxx_dbdma_stop(channel);
603 635
604 for(i = 0; i < host->dma.len; i++) { 636 for (i = 0; i < host->dma.len; i++) {
605 u32 ret = 0, flags = DDMA_FLAGS_NOIE; 637 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
606 struct scatterlist *sg = &data->sg[i]; 638 struct scatterlist *sg = &data->sg[i];
607 int sg_len = sg->length; 639 int sg_len = sg->length;
@@ -611,23 +643,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
611 if (i == host->dma.len - 1) 643 if (i == host->dma.len - 1)
612 flags = DDMA_FLAGS_IE; 644 flags = DDMA_FLAGS_IE;
613 645
614 if (host->flags & HOST_F_XMIT){ 646 if (host->flags & HOST_F_XMIT) {
615 ret = au1xxx_dbdma_put_source_flags(channel, 647 ret = au1xxx_dbdma_put_source_flags(channel,
616 (void *) sg_virt(sg), len, flags); 648 (void *)sg_virt(sg), len, flags);
617 } 649 } else {
618 else { 650 ret = au1xxx_dbdma_put_dest_flags(channel,
619 ret = au1xxx_dbdma_put_dest_flags(channel, 651 (void *)sg_virt(sg), len, flags);
620 (void *) sg_virt(sg),
621 len, flags);
622 } 652 }
623 653
624 if (!ret) 654 if (!ret)
625 goto dataerr; 655 goto dataerr;
626 656
627 datalen -= len; 657 datalen -= len;
628 } 658 }
629 } 659#endif
630 else { 660 } else {
631 host->pio.index = 0; 661 host->pio.index = 0;
632 host->pio.offset = 0; 662 host->pio.offset = 0;
633 host->pio.len = datalen; 663 host->pio.len = datalen;
@@ -636,25 +666,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
636 IRQ_ON(host, SD_CONFIG_TH); 666 IRQ_ON(host, SD_CONFIG_TH);
637 else 667 else
638 IRQ_ON(host, SD_CONFIG_NE); 668 IRQ_ON(host, SD_CONFIG_NE);
639 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF); 669 /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
640 } 670 }
641 671
642 return 0; 672 return 0;
643 673
644 dataerr: 674dataerr:
645 dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir); 675 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
676 host->dma.dir);
646 return -ETIMEDOUT; 677 return -ETIMEDOUT;
647} 678}
648 679
649/* static void au1xmmc_request 680/* This actually starts a command or data transaction */
650 This actually starts a command or data transaction
651*/
652
653static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) 681static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
654{ 682{
655
656 struct au1xmmc_host *host = mmc_priv(mmc); 683 struct au1xmmc_host *host = mmc_priv(mmc);
657 unsigned int flags = 0;
658 int ret = 0; 684 int ret = 0;
659 685
660 WARN_ON(irqs_disabled()); 686 WARN_ON(irqs_disabled());
@@ -663,11 +689,15 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
663 host->mrq = mrq; 689 host->mrq = mrq;
664 host->status = HOST_S_CMD; 690 host->status = HOST_S_CMD;
665 691
666 bcsr->disk_leds &= ~(1 << 8); 692 /* fail request immediately if no card is present */
693 if (0 == au1xmmc_card_inserted(mmc)) {
694 mrq->cmd->error = -ENOMEDIUM;
695 au1xmmc_finish_request(host);
696 return;
697 }
667 698
668 if (mrq->data) { 699 if (mrq->data) {
669 FLUSH_FIFO(host); 700 FLUSH_FIFO(host);
670 flags = mrq->data->flags;
671 ret = au1xmmc_prepare_data(host, mrq->data); 701 ret = au1xmmc_prepare_data(host, mrq->data);
672 } 702 }
673 703
@@ -682,7 +712,6 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
682 712
683static void au1xmmc_reset_controller(struct au1xmmc_host *host) 713static void au1xmmc_reset_controller(struct au1xmmc_host *host)
684{ 714{
685
686 /* Apply the clock */ 715 /* Apply the clock */
687 au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); 716 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
688 au_sync_delay(1); 717 au_sync_delay(1);
@@ -712,9 +741,10 @@ static void au1xmmc_reset_controller(struct au1xmmc_host *host)
712} 741}
713 742
714 743
715static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) 744static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
716{ 745{
717 struct au1xmmc_host *host = mmc_priv(mmc); 746 struct au1xmmc_host *host = mmc_priv(mmc);
747 u32 config2;
718 748
719 if (ios->power_mode == MMC_POWER_OFF) 749 if (ios->power_mode == MMC_POWER_OFF)
720 au1xmmc_set_power(host, 0); 750 au1xmmc_set_power(host, 0);
@@ -726,21 +756,18 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
726 au1xmmc_set_clock(host, ios->clock); 756 au1xmmc_set_clock(host, ios->clock);
727 host->clock = ios->clock; 757 host->clock = ios->clock;
728 } 758 }
729}
730
731static void au1xmmc_dma_callback(int irq, void *dev_id)
732{
733 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
734
735 /* Avoid spurious interrupts */
736 759
737 if (!host->mrq) 760 config2 = au_readl(HOST_CONFIG2(host));
738 return; 761 switch (ios->bus_width) {
739 762 case MMC_BUS_WIDTH_4:
740 if (host->flags & HOST_F_STOP) 763 config2 |= SD_CONFIG2_WB;
741 SEND_STOP(host); 764 break;
742 765 case MMC_BUS_WIDTH_1:
743 tasklet_schedule(&host->data_task); 766 config2 &= ~SD_CONFIG2_WB;
767 break;
768 }
769 au_writel(config2, HOST_CONFIG2(host));
770 au_sync();
744} 771}
745 772
746#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) 773#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
@@ -749,245 +776,354 @@ static void au1xmmc_dma_callback(int irq, void *dev_id)
749 776
750static irqreturn_t au1xmmc_irq(int irq, void *dev_id) 777static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
751{ 778{
752 779 struct au1xmmc_host *host = dev_id;
753 u32 status; 780 u32 status;
754 int i, ret = 0;
755
756 disable_irq(AU1100_SD_IRQ);
757 781
758 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 782 status = au_readl(HOST_STATUS(host));
759 struct au1xmmc_host * host = au1xmmc_hosts[i];
760 u32 handled = 1;
761 783
762 status = au_readl(HOST_STATUS(host)); 784 if (!(status & SD_STATUS_I))
785 return IRQ_NONE; /* not ours */
763 786
764 if (host->mrq && (status & STATUS_TIMEOUT)) { 787 if (status & SD_STATUS_SI) /* SDIO */
765 if (status & SD_STATUS_RAT) 788 mmc_signal_sdio_irq(host->mmc);
766 host->mrq->cmd->error = -ETIMEDOUT;
767 789
768 else if (status & SD_STATUS_DT) 790 if (host->mrq && (status & STATUS_TIMEOUT)) {
769 host->mrq->data->error = -ETIMEDOUT; 791 if (status & SD_STATUS_RAT)
792 host->mrq->cmd->error = -ETIMEDOUT;
793 else if (status & SD_STATUS_DT)
794 host->mrq->data->error = -ETIMEDOUT;
770 795
771 /* In PIO mode, interrupts might still be enabled */ 796 /* In PIO mode, interrupts might still be enabled */
772 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); 797 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
773 798
774 //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF); 799 /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
775 tasklet_schedule(&host->finish_task); 800 tasklet_schedule(&host->finish_task);
776 } 801 }
777#if 0 802#if 0
778 else if (status & SD_STATUS_DD) { 803 else if (status & SD_STATUS_DD) {
779 804 /* Sometimes we get a DD before a NE in PIO mode */
780 /* Sometimes we get a DD before a NE in PIO mode */ 805 if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
781 806 au1xmmc_receive_pio(host);
782 if (!(host->flags & HOST_F_DMA) && 807 else {
783 (status & SD_STATUS_NE)) 808 au1xmmc_data_complete(host, status);
784 au1xmmc_receive_pio(host); 809 /* tasklet_schedule(&host->data_task); */
785 else {
786 au1xmmc_data_complete(host, status);
787 //tasklet_schedule(&host->data_task);
788 }
789 } 810 }
811 }
790#endif 812#endif
791 else if (status & (SD_STATUS_CR)) { 813 else if (status & SD_STATUS_CR) {
792 if (host->status == HOST_S_CMD) 814 if (host->status == HOST_S_CMD)
793 au1xmmc_cmd_complete(host,status); 815 au1xmmc_cmd_complete(host, status);
794 } 816
795 else if (!(host->flags & HOST_F_DMA)) { 817 } else if (!(host->flags & HOST_F_DMA)) {
796 if ((host->flags & HOST_F_XMIT) && 818 if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
797 (status & STATUS_DATA_OUT)) 819 au1xmmc_send_pio(host);
798 au1xmmc_send_pio(host); 820 else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
799 else if ((host->flags & HOST_F_RECV) && 821 au1xmmc_receive_pio(host);
800 (status & STATUS_DATA_IN)) 822
801 au1xmmc_receive_pio(host); 823 } else if (status & 0x203F3C70) {
802 } 824 DBG("Unhandled status %8.8x\n", host->pdev->id,
803 else if (status & 0x203FBC70) { 825 status);
804 DBG("Unhandled status %8.8x\n", host->id, status);
805 handled = 0;
806 }
807
808 au_writel(status, HOST_STATUS(host));
809 au_sync();
810
811 ret |= handled;
812 } 826 }
813 827
814 enable_irq(AU1100_SD_IRQ); 828 au_writel(status, HOST_STATUS(host));
815 return ret; 829 au_sync();
830
831 return IRQ_HANDLED;
816} 832}
817 833
818static void au1xmmc_poll_event(unsigned long arg) 834#ifdef CONFIG_SOC_AU1200
819{ 835/* 8bit memory DMA device */
820 struct au1xmmc_host *host = (struct au1xmmc_host *) arg; 836static dbdev_tab_t au1xmmc_mem_dbdev = {
837 .dev_id = DSCR_CMD0_ALWAYS,
838 .dev_flags = DEV_FLAGS_ANYUSE,
839 .dev_tsize = 0,
840 .dev_devwidth = 8,
841 .dev_physaddr = 0x00000000,
842 .dev_intlevel = 0,
843 .dev_intpolarity = 0,
844};
845static int memid;
821 846
822 int card = au1xmmc_card_inserted(host); 847static void au1xmmc_dbdma_callback(int irq, void *dev_id)
823 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0; 848{
849 struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
824 850
825 if (card != controller) { 851 /* Avoid spurious interrupts */
826 host->flags &= ~HOST_F_ACTIVE; 852 if (!host->mrq)
827 if (card) host->flags |= HOST_F_ACTIVE; 853 return;
828 mmc_detect_change(host->mmc, 0);
829 }
830 854
831 if (host->mrq != NULL) { 855 if (host->flags & HOST_F_STOP)
832 u32 status = au_readl(HOST_STATUS(host)); 856 SEND_STOP(host);
833 DBG("PENDING - %8.8x\n", host->id, status);
834 }
835 857
836 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT); 858 tasklet_schedule(&host->data_task);
837} 859}
838 860
839static dbdev_tab_t au1xmmc_mem_dbdev = 861static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
840{
841 DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
842};
843
844static void au1xmmc_init_dma(struct au1xmmc_host *host)
845{ 862{
863 struct resource *res;
864 int txid, rxid;
865
866 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
867 if (!res)
868 return -ENODEV;
869 txid = res->start;
870
871 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
872 if (!res)
873 return -ENODEV;
874 rxid = res->start;
875
876 if (!memid)
877 return -ENODEV;
878
879 host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
880 au1xmmc_dbdma_callback, (void *)host);
881 if (!host->tx_chan) {
882 dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
883 return -ENODEV;
884 }
846 885
847 u32 rxchan, txchan; 886 host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
848 887 au1xmmc_dbdma_callback, (void *)host);
849 int txid = au1xmmc_card_table[host->id].tx_devid; 888 if (!host->rx_chan) {
850 int rxid = au1xmmc_card_table[host->id].rx_devid; 889 dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
890 au1xxx_dbdma_chan_free(host->tx_chan);
891 return -ENODEV;
892 }
851 893
852 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride 894 au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
853 of 8 bits. And since devices are shared, we need to create 895 au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
854 our own to avoid freaking out other devices
855 */
856 896
857 int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 897 au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
898 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
858 899
859 txchan = au1xxx_dbdma_chan_alloc(memid, txid, 900 /* DBDMA is good to go */
860 au1xmmc_dma_callback, (void *) host); 901 host->flags |= HOST_F_DMA;
861 902
862 rxchan = au1xxx_dbdma_chan_alloc(rxid, memid, 903 return 0;
863 au1xmmc_dma_callback, (void *) host); 904}
864 905
865 au1xxx_dbdma_set_devwidth(txchan, 8); 906static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
866 au1xxx_dbdma_set_devwidth(rxchan, 8); 907{
908 if (host->flags & HOST_F_DMA) {
909 host->flags &= ~HOST_F_DMA;
910 au1xxx_dbdma_chan_free(host->tx_chan);
911 au1xxx_dbdma_chan_free(host->rx_chan);
912 }
913}
914#endif
867 915
868 au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT); 916static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
869 au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT); 917{
918 struct au1xmmc_host *host = mmc_priv(mmc);
870 919
871 host->tx_chan = txchan; 920 if (en)
872 host->rx_chan = rxchan; 921 IRQ_ON(host, SD_CONFIG_SI);
922 else
923 IRQ_OFF(host, SD_CONFIG_SI);
873} 924}
874 925
875static const struct mmc_host_ops au1xmmc_ops = { 926static const struct mmc_host_ops au1xmmc_ops = {
876 .request = au1xmmc_request, 927 .request = au1xmmc_request,
877 .set_ios = au1xmmc_set_ios, 928 .set_ios = au1xmmc_set_ios,
878 .get_ro = au1xmmc_card_readonly, 929 .get_ro = au1xmmc_card_readonly,
930 .get_cd = au1xmmc_card_inserted,
931 .enable_sdio_irq = au1xmmc_enable_sdio_irq,
879}; 932};
880 933
881static int __devinit au1xmmc_probe(struct platform_device *pdev) 934static int __devinit au1xmmc_probe(struct platform_device *pdev)
882{ 935{
936 struct mmc_host *mmc;
937 struct au1xmmc_host *host;
938 struct resource *r;
939 int ret;
940
941 mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
942 if (!mmc) {
943 dev_err(&pdev->dev, "no memory for mmc_host\n");
944 ret = -ENOMEM;
945 goto out0;
946 }
883 947
884 int i, ret = 0; 948 host = mmc_priv(mmc);
885 949 host->mmc = mmc;
886 /* THe interrupt is shared among all controllers */ 950 host->platdata = pdev->dev.platform_data;
887 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0); 951 host->pdev = pdev;
888 952
889 if (ret) { 953 ret = -ENODEV;
890 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n", 954 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
891 AU1100_SD_IRQ, ret); 955 if (!r) {
892 return -ENXIO; 956 dev_err(&pdev->dev, "no mmio defined\n");
957 goto out1;
893 } 958 }
894 959
895 disable_irq(AU1100_SD_IRQ); 960 host->ioarea = request_mem_region(r->start, r->end - r->start + 1,
961 pdev->name);
962 if (!host->ioarea) {
963 dev_err(&pdev->dev, "mmio already in use\n");
964 goto out1;
965 }
896 966
897 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 967 host->iobase = (unsigned long)ioremap(r->start, 0x3c);
898 struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); 968 if (!host->iobase) {
899 struct au1xmmc_host *host = 0; 969 dev_err(&pdev->dev, "cannot remap mmio\n");
970 goto out2;
971 }
900 972
901 if (!mmc) { 973 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
902 printk(DRIVER_NAME "ERROR: no mem for host %d\n", i); 974 if (!r) {
903 au1xmmc_hosts[i] = 0; 975 dev_err(&pdev->dev, "no IRQ defined\n");
904 continue; 976 goto out3;
905 } 977 }
906 978
907 mmc->ops = &au1xmmc_ops; 979 host->irq = r->start;
980 /* IRQ is shared among both SD controllers */
981 ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED,
982 DRIVER_NAME, host);
983 if (ret) {
984 dev_err(&pdev->dev, "cannot grab IRQ\n");
985 goto out3;
986 }
908 987
909 mmc->f_min = 450000; 988 mmc->ops = &au1xmmc_ops;
910 mmc->f_max = 24000000;
911 989
912 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 990 mmc->f_min = 450000;
913 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 991 mmc->f_max = 24000000;
914 992
915 mmc->max_blk_size = 2048; 993 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
916 mmc->max_blk_count = 512; 994 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
917 995
918 mmc->ocr_avail = AU1XMMC_OCR; 996 mmc->max_blk_size = 2048;
997 mmc->max_blk_count = 512;
919 998
920 host = mmc_priv(mmc); 999 mmc->ocr_avail = AU1XMMC_OCR;
921 host->mmc = mmc; 1000 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
922 1001
923 host->id = i; 1002 host->status = HOST_S_IDLE;
924 host->iobase = au1xmmc_card_table[host->id].iobase;
925 host->clock = 0;
926 host->power_mode = MMC_POWER_OFF;
927 1003
928 host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0; 1004 /* board-specific carddetect setup, if any */
929 host->status = HOST_S_IDLE; 1005 if (host->platdata && host->platdata->cd_setup) {
1006 ret = host->platdata->cd_setup(mmc, 1);
1007 if (ret) {
1008 dev_warn(&pdev->dev, "board CD setup failed\n");
1009 mmc->caps |= MMC_CAP_NEEDS_POLL;
1010 }
1011 } else
1012 mmc->caps |= MMC_CAP_NEEDS_POLL;
930 1013
931 init_timer(&host->timer); 1014 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
1015 (unsigned long)host);
932 1016
933 host->timer.function = au1xmmc_poll_event; 1017 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
934 host->timer.data = (unsigned long) host; 1018 (unsigned long)host);
935 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
936 1019
937 tasklet_init(&host->data_task, au1xmmc_tasklet_data, 1020#ifdef CONFIG_SOC_AU1200
938 (unsigned long) host); 1021 ret = au1xmmc_dbdma_init(host);
1022 if (ret)
1023 printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n");
1024#endif
939 1025
940 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, 1026#ifdef CONFIG_LEDS_CLASS
941 (unsigned long) host); 1027 if (host->platdata && host->platdata->led) {
1028 struct led_classdev *led = host->platdata->led;
1029 led->name = mmc_hostname(mmc);
1030 led->brightness = LED_OFF;
1031 led->default_trigger = mmc_hostname(mmc);
1032 ret = led_classdev_register(mmc_dev(mmc), led);
1033 if (ret)
1034 goto out5;
1035 }
1036#endif
942 1037
943 spin_lock_init(&host->lock); 1038 au1xmmc_reset_controller(host);
944 1039
945 if (dma != 0) 1040 ret = mmc_add_host(mmc);
946 au1xmmc_init_dma(host); 1041 if (ret) {
1042 dev_err(&pdev->dev, "cannot add mmc host\n");
1043 goto out6;
1044 }
947 1045
948 au1xmmc_reset_controller(host); 1046 platform_set_drvdata(pdev, mmc);
949 1047
950 mmc_add_host(mmc); 1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
951 au1xmmc_hosts[i] = host; 1049 " (mode=%s)\n", pdev->id, host->iobase,
1050 host->flags & HOST_F_DMA ? "dma" : "pio");
952 1051
953 add_timer(&host->timer); 1052 return 0; /* all ok */
954 1053
955 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n", 1054out6:
956 host->id, host->iobase, dma ? "dma" : "pio"); 1055#ifdef CONFIG_LEDS_CLASS
957 } 1056 if (host->platdata && host->platdata->led)
1057 led_classdev_unregister(host->platdata->led);
1058out5:
1059#endif
1060 au_writel(0, HOST_ENABLE(host));
1061 au_writel(0, HOST_CONFIG(host));
1062 au_writel(0, HOST_CONFIG2(host));
1063 au_sync();
958 1064
959 enable_irq(AU1100_SD_IRQ); 1065#ifdef CONFIG_SOC_AU1200
1066 au1xmmc_dbdma_shutdown(host);
1067#endif
960 1068
961 return 0; 1069 tasklet_kill(&host->data_task);
1070 tasklet_kill(&host->finish_task);
1071
1072 if (host->platdata && host->platdata->cd_setup &&
1073 !(mmc->caps & MMC_CAP_NEEDS_POLL))
1074 host->platdata->cd_setup(mmc, 0);
1075
1076 free_irq(host->irq, host);
1077out3:
1078 iounmap((void *)host->iobase);
1079out2:
1080 release_resource(host->ioarea);
1081 kfree(host->ioarea);
1082out1:
1083 mmc_free_host(mmc);
1084out0:
1085 return ret;
962} 1086}
963 1087
964static int __devexit au1xmmc_remove(struct platform_device *pdev) 1088static int __devexit au1xmmc_remove(struct platform_device *pdev)
965{ 1089{
1090 struct mmc_host *mmc = platform_get_drvdata(pdev);
1091 struct au1xmmc_host *host;
1092
1093 if (mmc) {
1094 host = mmc_priv(mmc);
966 1095
967 int i; 1096 mmc_remove_host(mmc);
968 1097
969 disable_irq(AU1100_SD_IRQ); 1098#ifdef CONFIG_LEDS_CLASS
1099 if (host->platdata && host->platdata->led)
1100 led_classdev_unregister(host->platdata->led);
1101#endif
970 1102
971 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 1103 if (host->platdata && host->platdata->cd_setup &&
972 struct au1xmmc_host *host = au1xmmc_hosts[i]; 1104 !(mmc->caps & MMC_CAP_NEEDS_POLL))
973 if (!host) continue; 1105 host->platdata->cd_setup(mmc, 0);
1106
1107 au_writel(0, HOST_ENABLE(host));
1108 au_writel(0, HOST_CONFIG(host));
1109 au_writel(0, HOST_CONFIG2(host));
1110 au_sync();
974 1111
975 tasklet_kill(&host->data_task); 1112 tasklet_kill(&host->data_task);
976 tasklet_kill(&host->finish_task); 1113 tasklet_kill(&host->finish_task);
977 1114
978 del_timer_sync(&host->timer); 1115#ifdef CONFIG_SOC_AU1200
1116 au1xmmc_dbdma_shutdown(host);
1117#endif
979 au1xmmc_set_power(host, 0); 1118 au1xmmc_set_power(host, 0);
980 1119
981 mmc_remove_host(host->mmc); 1120 free_irq(host->irq, host);
982 1121 iounmap((void *)host->iobase);
983 au1xxx_dbdma_chan_free(host->tx_chan); 1122 release_resource(host->ioarea);
984 au1xxx_dbdma_chan_free(host->rx_chan); 1123 kfree(host->ioarea);
985 1124
986 au_writel(0x0, HOST_ENABLE(host)); 1125 mmc_free_host(mmc);
987 au_sync();
988 } 1126 }
989
990 free_irq(AU1100_SD_IRQ, 0);
991 return 0; 1127 return 0;
992} 1128}
993 1129
@@ -1004,21 +1140,31 @@ static struct platform_driver au1xmmc_driver = {
1004 1140
1005static int __init au1xmmc_init(void) 1141static int __init au1xmmc_init(void)
1006{ 1142{
1143#ifdef CONFIG_SOC_AU1200
1144 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
1145 * of 8 bits. And since devices are shared, we need to create
1146 * our own to avoid freaking out other devices.
1147 */
1148 memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1149 if (!memid)
1150 printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n");
1151#endif
1007 return platform_driver_register(&au1xmmc_driver); 1152 return platform_driver_register(&au1xmmc_driver);
1008} 1153}
1009 1154
1010static void __exit au1xmmc_exit(void) 1155static void __exit au1xmmc_exit(void)
1011{ 1156{
1157#ifdef CONFIG_SOC_AU1200
1158 if (memid)
1159 au1xxx_ddma_del_device(memid);
1160#endif
1012 platform_driver_unregister(&au1xmmc_driver); 1161 platform_driver_unregister(&au1xmmc_driver);
1013} 1162}
1014 1163
1015module_init(au1xmmc_init); 1164module_init(au1xmmc_init);
1016module_exit(au1xmmc_exit); 1165module_exit(au1xmmc_exit);
1017 1166
1018#ifdef MODULE
1019MODULE_AUTHOR("Advanced Micro Devices, Inc"); 1167MODULE_AUTHOR("Advanced Micro Devices, Inc");
1020MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); 1168MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1021MODULE_LICENSE("GPL"); 1169MODULE_LICENSE("GPL");
1022MODULE_ALIAS("platform:au1xxx-mmc"); 1170MODULE_ALIAS("platform:au1xxx-mmc");
1023#endif
1024
diff --git a/drivers/mmc/host/au1xmmc.h b/drivers/mmc/host/au1xmmc.h
deleted file mode 100644
index 341cbdf0baca..000000000000
--- a/drivers/mmc/host/au1xmmc.h
+++ /dev/null
@@ -1,96 +0,0 @@
1#ifndef _AU1XMMC_H_
2#define _AU1XMMC_H_
3
4/* Hardware definitions */
5
6#define AU1XMMC_DESCRIPTOR_COUNT 1
7#define AU1XMMC_DESCRIPTOR_SIZE 2048
8
9#define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
10 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
11 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
12
13/* Easy access macros */
14
15#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
16#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
17#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
18#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
19#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
20#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
21#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
22#define HOST_CMD(h) ((h)->iobase + SD_CMD)
23#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
24#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
25#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
26
27#define DMA_CHANNEL(h) \
28 ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
29
30/* This gives us a hard value for the stop command that we can write directly
31 * to the command register
32 */
33
34#define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO)
35
36/* This is the set of interrupts that we configure by default */
37
38#if 0
39#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \
40 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
41#endif
42
43#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \
44 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
45/* The poll event (looking for insert/remove events runs twice a second */
46#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
47
48struct au1xmmc_host {
49 struct mmc_host *mmc;
50 struct mmc_request *mrq;
51
52 u32 id;
53
54 u32 flags;
55 u32 iobase;
56 u32 clock;
57 u32 bus_width;
58 u32 power_mode;
59
60 int status;
61
62 struct {
63 int len;
64 int dir;
65 } dma;
66
67 struct {
68 int index;
69 int offset;
70 int len;
71 } pio;
72
73 u32 tx_chan;
74 u32 rx_chan;
75
76 struct timer_list timer;
77 struct tasklet_struct finish_task;
78 struct tasklet_struct data_task;
79
80 spinlock_t lock;
81};
82
83/* Status flags used by the host structure */
84
85#define HOST_F_XMIT 0x0001
86#define HOST_F_RECV 0x0002
87#define HOST_F_DMA 0x0010
88#define HOST_F_ACTIVE 0x0100
89#define HOST_F_STOP 0x1000
90
91#define HOST_S_IDLE 0x0001
92#define HOST_S_CMD 0x0002
93#define HOST_S_DATA 0x0003
94#define HOST_S_STOP 0x0004
95
96#endif
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index eed211b2ac70..5e880c0f1349 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -892,9 +892,12 @@ static int imxmci_get_ro(struct mmc_host *mmc)
892 struct imxmci_host *host = mmc_priv(mmc); 892 struct imxmci_host *host = mmc_priv(mmc);
893 893
894 if (host->pdata && host->pdata->get_ro) 894 if (host->pdata && host->pdata->get_ro)
895 return host->pdata->get_ro(mmc_dev(mmc)); 895 return !!host->pdata->get_ro(mmc_dev(mmc));
896 /* Host doesn't support read only detection so assume writeable */ 896 /*
897 return 0; 897 * Board doesn't support read only detection; let the mmc core
898 * decide what to do.
899 */
900 return -ENOSYS;
898} 901}
899 902
900 903
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 35508584ac2a..41cc63360e43 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1126,16 +1126,28 @@ static int mmc_spi_get_ro(struct mmc_host *mmc)
1126 struct mmc_spi_host *host = mmc_priv(mmc); 1126 struct mmc_spi_host *host = mmc_priv(mmc);
1127 1127
1128 if (host->pdata && host->pdata->get_ro) 1128 if (host->pdata && host->pdata->get_ro)
1129 return host->pdata->get_ro(mmc->parent); 1129 return !!host->pdata->get_ro(mmc->parent);
1130 /* board doesn't support read only detection; assume writeable */ 1130 /*
1131 return 0; 1131 * Board doesn't support read only detection; let the mmc core
1132 * decide what to do.
1133 */
1134 return -ENOSYS;
1132} 1135}
1133 1136
1137static int mmc_spi_get_cd(struct mmc_host *mmc)
1138{
1139 struct mmc_spi_host *host = mmc_priv(mmc);
1140
1141 if (host->pdata && host->pdata->get_cd)
1142 return !!host->pdata->get_cd(mmc->parent);
1143 return -ENOSYS;
1144}
1134 1145
1135static const struct mmc_host_ops mmc_spi_ops = { 1146static const struct mmc_host_ops mmc_spi_ops = {
1136 .request = mmc_spi_request, 1147 .request = mmc_spi_request,
1137 .set_ios = mmc_spi_set_ios, 1148 .set_ios = mmc_spi_set_ios,
1138 .get_ro = mmc_spi_get_ro, 1149 .get_ro = mmc_spi_get_ro,
1150 .get_cd = mmc_spi_get_cd,
1139}; 1151};
1140 1152
1141 1153
@@ -1240,10 +1252,7 @@ static int mmc_spi_probe(struct spi_device *spi)
1240 mmc->ops = &mmc_spi_ops; 1252 mmc->ops = &mmc_spi_ops;
1241 mmc->max_blk_size = MMC_SPI_BLOCKSIZE; 1253 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1242 1254
1243 /* As long as we keep track of the number of successfully 1255 mmc->caps = MMC_CAP_SPI;
1244 * transmitted blocks, we're good for multiwrite.
1245 */
1246 mmc->caps = MMC_CAP_SPI | MMC_CAP_MULTIWRITE;
1247 1256
1248 /* SPI doesn't need the lowspeed device identification thing for 1257 /* SPI doesn't need the lowspeed device identification thing for
1249 * MMC or SD cards, since it never comes up in open drain mode. 1258 * MMC or SD cards, since it never comes up in open drain mode.
@@ -1319,17 +1328,23 @@ static int mmc_spi_probe(struct spi_device *spi)
1319 goto fail_glue_init; 1328 goto fail_glue_init;
1320 } 1329 }
1321 1330
1331 /* pass platform capabilities, if any */
1332 if (host->pdata)
1333 mmc->caps |= host->pdata->caps;
1334
1322 status = mmc_add_host(mmc); 1335 status = mmc_add_host(mmc);
1323 if (status != 0) 1336 if (status != 0)
1324 goto fail_add_host; 1337 goto fail_add_host;
1325 1338
1326 dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", 1339 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1327 mmc->class_dev.bus_id, 1340 mmc->class_dev.bus_id,
1328 host->dma_dev ? "" : ", no DMA", 1341 host->dma_dev ? "" : ", no DMA",
1329 (host->pdata && host->pdata->get_ro) 1342 (host->pdata && host->pdata->get_ro)
1330 ? "" : ", no WP", 1343 ? "" : ", no WP",
1331 (host->pdata && host->pdata->setpower) 1344 (host->pdata && host->pdata->setpower)
1332 ? "" : ", no poweroff"); 1345 ? "" : ", no poweroff",
1346 (mmc->caps & MMC_CAP_NEEDS_POLL)
1347 ? ", cd polling" : "");
1333 return 0; 1348 return 0;
1334 1349
1335fail_add_host: 1350fail_add_host:
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index da5fecad74d9..696cf3647ceb 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -535,7 +535,6 @@ static int mmci_probe(struct amba_device *dev, void *id)
535 mmc->f_min = (host->mclk + 511) / 512; 535 mmc->f_min = (host->mclk + 511) / 512;
536 mmc->f_max = min(host->mclk, fmax); 536 mmc->f_max = min(host->mclk, fmax);
537 mmc->ocr_avail = plat->ocr_mask; 537 mmc->ocr_avail = plat->ocr_mask;
538 mmc->caps = MMC_CAP_MULTIWRITE;
539 538
540 /* 539 /*
541 * We can do SGIO 540 * We can do SGIO
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 549517c35675..dbc26eb6a89e 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1317,7 +1317,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1317 1317
1318 host->slots[id] = slot; 1318 host->slots[id] = slot;
1319 1319
1320 mmc->caps = MMC_CAP_MULTIWRITE; 1320 mmc->caps = 0;
1321 if (host->pdata->conf.wire4) 1321 if (host->pdata->conf.wire4)
1322 mmc->caps |= MMC_CAP_4_BIT_DATA; 1322 mmc->caps |= MMC_CAP_4_BIT_DATA;
1323 1323
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d89475d36988..d39f59738866 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -374,9 +374,12 @@ static int pxamci_get_ro(struct mmc_host *mmc)
374 struct pxamci_host *host = mmc_priv(mmc); 374 struct pxamci_host *host = mmc_priv(mmc);
375 375
376 if (host->pdata && host->pdata->get_ro) 376 if (host->pdata && host->pdata->get_ro)
377 return host->pdata->get_ro(mmc_dev(mmc)); 377 return !!host->pdata->get_ro(mmc_dev(mmc));
378 /* Host doesn't support read only detection so assume writeable */ 378 /*
379 return 0; 379 * Board doesn't support read only detection; let the mmc core
380 * decide what to do.
381 */
382 return -ENOSYS;
380} 383}
381 384
382static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 385static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
new file mode 100644
index 000000000000..6a1e4994b724
--- /dev/null
+++ b/drivers/mmc/host/s3cmci.c
@@ -0,0 +1,1446 @@
1/*
2 * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
3 *
4 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/dma-mapping.h>
13#include <linux/clk.h>
14#include <linux/mmc/host.h>
15#include <linux/platform_device.h>
16#include <linux/irq.h>
17#include <linux/io.h>
18
19#include <asm/dma.h>
20
21#include <asm/arch/regs-sdi.h>
22#include <asm/arch/regs-gpio.h>
23
24#include <asm/plat-s3c24xx/mci.h>
25
26#include "s3cmci.h"
27
28#define DRIVER_NAME "s3c-mci"
29
30enum dbg_channels {
31 dbg_err = (1 << 0),
32 dbg_debug = (1 << 1),
33 dbg_info = (1 << 2),
34 dbg_irq = (1 << 3),
35 dbg_sg = (1 << 4),
36 dbg_dma = (1 << 5),
37 dbg_pio = (1 << 6),
38 dbg_fail = (1 << 7),
39 dbg_conf = (1 << 8),
40};
41
42static const int dbgmap_err = dbg_err | dbg_fail;
43static const int dbgmap_info = dbg_info | dbg_conf;
44static const int dbgmap_debug = dbg_debug;
45
46#define dbg(host, channels, args...) \
47 do { \
48 if (dbgmap_err & channels) \
49 dev_err(&host->pdev->dev, args); \
50 else if (dbgmap_info & channels) \
51 dev_info(&host->pdev->dev, args); \
52 else if (dbgmap_debug & channels) \
53 dev_dbg(&host->pdev->dev, args); \
54 } while (0)
55
56#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
57
58static struct s3c2410_dma_client s3cmci_dma_client = {
59 .name = "s3c-mci",
60};
61
62static void finalize_request(struct s3cmci_host *host);
63static void s3cmci_send_request(struct mmc_host *mmc);
64static void s3cmci_reset(struct s3cmci_host *host);
65
66#ifdef CONFIG_MMC_DEBUG
67
68static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
69{
70 u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize;
71 u32 datcon, datcnt, datsta, fsta, imask;
72
73 con = readl(host->base + S3C2410_SDICON);
74 pre = readl(host->base + S3C2410_SDIPRE);
75 cmdarg = readl(host->base + S3C2410_SDICMDARG);
76 cmdcon = readl(host->base + S3C2410_SDICMDCON);
77 cmdsta = readl(host->base + S3C2410_SDICMDSTAT);
78 r0 = readl(host->base + S3C2410_SDIRSP0);
79 r1 = readl(host->base + S3C2410_SDIRSP1);
80 r2 = readl(host->base + S3C2410_SDIRSP2);
81 r3 = readl(host->base + S3C2410_SDIRSP3);
82 timer = readl(host->base + S3C2410_SDITIMER);
83 bsize = readl(host->base + S3C2410_SDIBSIZE);
84 datcon = readl(host->base + S3C2410_SDIDCON);
85 datcnt = readl(host->base + S3C2410_SDIDCNT);
86 datsta = readl(host->base + S3C2410_SDIDSTA);
87 fsta = readl(host->base + S3C2410_SDIFSTA);
88 imask = readl(host->base + host->sdiimsk);
89
90 dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
91 prefix, con, pre, timer);
92
93 dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n",
94 prefix, cmdcon, cmdarg, cmdsta);
95
96 dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]"
97 " DSTA:[%08x] DCNT:[%08x]\n",
98 prefix, datcon, fsta, datsta, datcnt);
99
100 dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]"
101 " R2:[%08x] R3:[%08x]\n",
102 prefix, r0, r1, r2, r3);
103}
104
105static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
106 int stop)
107{
108 snprintf(host->dbgmsg_cmd, 300,
109 "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u",
110 host->ccnt, (stop ? " (STOP)" : ""),
111 cmd->opcode, cmd->arg, cmd->flags, cmd->retries);
112
113 if (cmd->data) {
114 snprintf(host->dbgmsg_dat, 300,
115 "#%u bsize:%u blocks:%u bytes:%u",
116 host->dcnt, cmd->data->blksz,
117 cmd->data->blocks,
118 cmd->data->blocks * cmd->data->blksz);
119 } else {
120 host->dbgmsg_dat[0] = '\0';
121 }
122}
123
124static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd,
125 int fail)
126{
127 unsigned int dbglvl = fail ? dbg_fail : dbg_debug;
128
129 if (!cmd)
130 return;
131
132 if (cmd->error == 0) {
133 dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n",
134 host->dbgmsg_cmd, cmd->resp[0]);
135 } else {
136 dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n",
137 cmd->error, host->dbgmsg_cmd, host->status);
138 }
139
140 if (!cmd->data)
141 return;
142
143 if (cmd->data->error == 0) {
144 dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat);
145 } else {
146 dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n",
147 cmd->data->error, host->dbgmsg_dat,
148 readl(host->base + S3C2410_SDIDCNT));
149 }
150}
151#else
152static void dbg_dumpcmd(struct s3cmci_host *host,
153 struct mmc_command *cmd, int fail) { }
154
155static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
156 int stop) { }
157
158static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
159
160#endif /* CONFIG_MMC_DEBUG */
161
162static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
163{
164 u32 newmask;
165
166 newmask = readl(host->base + host->sdiimsk);
167 newmask |= imask;
168
169 writel(newmask, host->base + host->sdiimsk);
170
171 return newmask;
172}
173
174static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
175{
176 u32 newmask;
177
178 newmask = readl(host->base + host->sdiimsk);
179 newmask &= ~imask;
180
181 writel(newmask, host->base + host->sdiimsk);
182
183 return newmask;
184}
185
186static inline void clear_imask(struct s3cmci_host *host)
187{
188 writel(0, host->base + host->sdiimsk);
189}
190
191static inline int get_data_buffer(struct s3cmci_host *host,
192 u32 *words, u32 **pointer)
193{
194 struct scatterlist *sg;
195
196 if (host->pio_active == XFER_NONE)
197 return -EINVAL;
198
199 if ((!host->mrq) || (!host->mrq->data))
200 return -EINVAL;
201
202 if (host->pio_sgptr >= host->mrq->data->sg_len) {
203 dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
204 host->pio_sgptr, host->mrq->data->sg_len);
205 return -EBUSY;
206 }
207 sg = &host->mrq->data->sg[host->pio_sgptr];
208
209 *words = sg->length >> 2;
210 *pointer = sg_virt(sg);
211
212 host->pio_sgptr++;
213
214 dbg(host, dbg_sg, "new buffer (%i/%i)\n",
215 host->pio_sgptr, host->mrq->data->sg_len);
216
217 return 0;
218}
219
220static inline u32 fifo_count(struct s3cmci_host *host)
221{
222 u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
223
224 fifostat &= S3C2410_SDIFSTA_COUNTMASK;
225 return fifostat >> 2;
226}
227
228static inline u32 fifo_free(struct s3cmci_host *host)
229{
230 u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
231
232 fifostat &= S3C2410_SDIFSTA_COUNTMASK;
233 return (63 - fifostat) >> 2;
234}
235
236static void do_pio_read(struct s3cmci_host *host)
237{
238 int res;
239 u32 fifo;
240 void __iomem *from_ptr;
241
242 /* write real prescaler to host, it might be set slow to fix */
243 writel(host->prescaler, host->base + S3C2410_SDIPRE);
244
245 from_ptr = host->base + host->sdidata;
246
247 while ((fifo = fifo_count(host))) {
248 if (!host->pio_words) {
249 res = get_data_buffer(host, &host->pio_words,
250 &host->pio_ptr);
251 if (res) {
252 host->pio_active = XFER_NONE;
253 host->complete_what = COMPLETION_FINALIZE;
254
255 dbg(host, dbg_pio, "pio_read(): "
256 "complete (no more data).\n");
257 return;
258 }
259
260 dbg(host, dbg_pio,
261 "pio_read(): new target: [%i]@[%p]\n",
262 host->pio_words, host->pio_ptr);
263 }
264
265 dbg(host, dbg_pio,
266 "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
267 fifo, host->pio_words,
268 readl(host->base + S3C2410_SDIDCNT));
269
270 if (fifo > host->pio_words)
271 fifo = host->pio_words;
272
273 host->pio_words -= fifo;
274 host->pio_count += fifo;
275
276 while (fifo--)
277 *(host->pio_ptr++) = readl(from_ptr);
278 }
279
280 if (!host->pio_words) {
281 res = get_data_buffer(host, &host->pio_words, &host->pio_ptr);
282 if (res) {
283 dbg(host, dbg_pio,
284 "pio_read(): complete (no more buffers).\n");
285 host->pio_active = XFER_NONE;
286 host->complete_what = COMPLETION_FINALIZE;
287
288 return;
289 }
290 }
291
292 enable_imask(host,
293 S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
294}
295
296static void do_pio_write(struct s3cmci_host *host)
297{
298 void __iomem *to_ptr;
299 int res;
300 u32 fifo;
301
302 to_ptr = host->base + host->sdidata;
303
304 while ((fifo = fifo_free(host))) {
305 if (!host->pio_words) {
306 res = get_data_buffer(host, &host->pio_words,
307 &host->pio_ptr);
308 if (res) {
309 dbg(host, dbg_pio,
310 "pio_write(): complete (no more data).\n");
311 host->pio_active = XFER_NONE;
312
313 return;
314 }
315
316 dbg(host, dbg_pio,
317 "pio_write(): new source: [%i]@[%p]\n",
318 host->pio_words, host->pio_ptr);
319
320 }
321
322 if (fifo > host->pio_words)
323 fifo = host->pio_words;
324
325 host->pio_words -= fifo;
326 host->pio_count += fifo;
327
328 while (fifo--)
329 writel(*(host->pio_ptr++), to_ptr);
330 }
331
332 enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
333}
334
335static void pio_tasklet(unsigned long data)
336{
337 struct s3cmci_host *host = (struct s3cmci_host *) data;
338
339
340 disable_irq(host->irq);
341
342 if (host->pio_active == XFER_WRITE)
343 do_pio_write(host);
344
345 if (host->pio_active == XFER_READ)
346 do_pio_read(host);
347
348 if (host->complete_what == COMPLETION_FINALIZE) {
349 clear_imask(host);
350 if (host->pio_active != XFER_NONE) {
351 dbg(host, dbg_err, "unfinished %s "
352 "- pio_count:[%u] pio_words:[%u]\n",
353 (host->pio_active == XFER_READ) ? "read" : "write",
354 host->pio_count, host->pio_words);
355
356 if (host->mrq->data)
357 host->mrq->data->error = -EINVAL;
358 }
359
360 finalize_request(host);
361 } else
362 enable_irq(host->irq);
363}
364
365/*
366 * ISR for SDI Interface IRQ
367 * Communication between driver and ISR works as follows:
368 * host->mrq points to current request
369 * host->complete_what Indicates when the request is considered done
370 * COMPLETION_CMDSENT when the command was sent
371 * COMPLETION_RSPFIN when a response was received
372 * COMPLETION_XFERFINISH when the data transfer is finished
373 * COMPLETION_XFERFINISH_RSPFIN both of the above.
374 * host->complete_request is the completion-object the driver waits for
375 *
376 * 1) Driver sets up host->mrq and host->complete_what
377 * 2) Driver prepares the transfer
378 * 3) Driver enables interrupts
379 * 4) Driver starts transfer
380 * 5) Driver waits for host->complete_rquest
381 * 6) ISR checks for request status (errors and success)
382 * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error
383 * 7) ISR completes host->complete_request
384 * 8) ISR disables interrupts
385 * 9) Driver wakes up and takes care of the request
386 *
387 * Note: "->error"-fields are expected to be set to 0 before the request
388 * was issued by mmc.c - therefore they are only set, when an error
389 * contition comes up
390 */
391
392static irqreturn_t s3cmci_irq(int irq, void *dev_id)
393{
394 struct s3cmci_host *host = dev_id;
395 struct mmc_command *cmd;
396 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
397 u32 mci_cclear, mci_dclear;
398 unsigned long iflags;
399
400 spin_lock_irqsave(&host->complete_lock, iflags);
401
402 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
403 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
404 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
405 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
406 mci_imsk = readl(host->base + host->sdiimsk);
407 mci_cclear = 0;
408 mci_dclear = 0;
409
410 if ((host->complete_what == COMPLETION_NONE) ||
411 (host->complete_what == COMPLETION_FINALIZE)) {
412 host->status = "nothing to complete";
413 clear_imask(host);
414 goto irq_out;
415 }
416
417 if (!host->mrq) {
418 host->status = "no active mrq";
419 clear_imask(host);
420 goto irq_out;
421 }
422
423 cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
424
425 if (!cmd) {
426 host->status = "no active cmd";
427 clear_imask(host);
428 goto irq_out;
429 }
430
431 if (!host->dodma) {
432 if ((host->pio_active == XFER_WRITE) &&
433 (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
434
435 disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
436 tasklet_schedule(&host->pio_tasklet);
437 host->status = "pio tx";
438 }
439
440 if ((host->pio_active == XFER_READ) &&
441 (mci_fsta & S3C2410_SDIFSTA_RFDET)) {
442
443 disable_imask(host,
444 S3C2410_SDIIMSK_RXFIFOHALF |
445 S3C2410_SDIIMSK_RXFIFOLAST);
446
447 tasklet_schedule(&host->pio_tasklet);
448 host->status = "pio rx";
449 }
450 }
451
452 if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) {
453 dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n");
454 cmd->error = -ETIMEDOUT;
455 host->status = "error: command timeout";
456 goto fail_transfer;
457 }
458
459 if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) {
460 if (host->complete_what == COMPLETION_CMDSENT) {
461 host->status = "ok: command sent";
462 goto close_transfer;
463 }
464
465 mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT;
466 }
467
468 if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) {
469 if (cmd->flags & MMC_RSP_CRC) {
470 if (host->mrq->cmd->flags & MMC_RSP_136) {
471 dbg(host, dbg_irq,
472 "fixup: ignore CRC fail with long rsp\n");
473 } else {
474 /* note, we used to fail the transfer
475 * here, but it seems that this is just
476 * the hardware getting it wrong.
477 *
478 * cmd->error = -EILSEQ;
479 * host->status = "error: bad command crc";
480 * goto fail_transfer;
481 */
482 }
483 }
484
485 mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL;
486 }
487
488 if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) {
489 if (host->complete_what == COMPLETION_RSPFIN) {
490 host->status = "ok: command response received";
491 goto close_transfer;
492 }
493
494 if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
495 host->complete_what = COMPLETION_XFERFINISH;
496
497 mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN;
498 }
499
500 /* errors handled after this point are only relevant
501 when a data transfer is in progress */
502
503 if (!cmd->data)
504 goto clear_status_bits;
505
506 /* Check for FIFO failure */
507 if (host->is2440) {
508 if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) {
509 dbg(host, dbg_err, "FIFO failure\n");
510 host->mrq->data->error = -EILSEQ;
511 host->status = "error: 2440 fifo failure";
512 goto fail_transfer;
513 }
514 } else {
515 if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) {
516 dbg(host, dbg_err, "FIFO failure\n");
517 cmd->data->error = -EILSEQ;
518 host->status = "error: fifo failure";
519 goto fail_transfer;
520 }
521 }
522
523 if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) {
524 dbg(host, dbg_err, "bad data crc (outgoing)\n");
525 cmd->data->error = -EILSEQ;
526 host->status = "error: bad data crc (outgoing)";
527 goto fail_transfer;
528 }
529
530 if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) {
531 dbg(host, dbg_err, "bad data crc (incoming)\n");
532 cmd->data->error = -EILSEQ;
533 host->status = "error: bad data crc (incoming)";
534 goto fail_transfer;
535 }
536
537 if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) {
538 dbg(host, dbg_err, "data timeout\n");
539 cmd->data->error = -ETIMEDOUT;
540 host->status = "error: data timeout";
541 goto fail_transfer;
542 }
543
544 if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) {
545 if (host->complete_what == COMPLETION_XFERFINISH) {
546 host->status = "ok: data transfer completed";
547 goto close_transfer;
548 }
549
550 if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
551 host->complete_what = COMPLETION_RSPFIN;
552
553 mci_dclear |= S3C2410_SDIDSTA_XFERFINISH;
554 }
555
556clear_status_bits:
557 writel(mci_cclear, host->base + S3C2410_SDICMDSTAT);
558 writel(mci_dclear, host->base + S3C2410_SDIDSTA);
559
560 goto irq_out;
561
562fail_transfer:
563 host->pio_active = XFER_NONE;
564
565close_transfer:
566 host->complete_what = COMPLETION_FINALIZE;
567
568 clear_imask(host);
569 tasklet_schedule(&host->pio_tasklet);
570
571 goto irq_out;
572
573irq_out:
574 dbg(host, dbg_irq,
575 "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n",
576 mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status);
577
578 spin_unlock_irqrestore(&host->complete_lock, iflags);
579 return IRQ_HANDLED;
580
581}
582
583/*
584 * ISR for the CardDetect Pin
585*/
586
587static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
588{
589 struct s3cmci_host *host = (struct s3cmci_host *)dev_id;
590
591 dbg(host, dbg_irq, "card detect\n");
592
593 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
594
595 return IRQ_HANDLED;
596}
597
598void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id,
599 int size, enum s3c2410_dma_buffresult result)
600{
601 struct s3cmci_host *host = buf_id;
602 unsigned long iflags;
603 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt;
604
605 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
606 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
607 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
608 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
609
610 BUG_ON(!host->mrq);
611 BUG_ON(!host->mrq->data);
612 BUG_ON(!host->dmatogo);
613
614 spin_lock_irqsave(&host->complete_lock, iflags);
615
616 if (result != S3C2410_RES_OK) {
617 dbg(host, dbg_fail, "DMA FAILED: csta=0x%08x dsta=0x%08x "
618 "fsta=0x%08x dcnt:0x%08x result:0x%08x toGo:%u\n",
619 mci_csta, mci_dsta, mci_fsta,
620 mci_dcnt, result, host->dmatogo);
621
622 goto fail_request;
623 }
624
625 host->dmatogo--;
626 if (host->dmatogo) {
627 dbg(host, dbg_dma, "DMA DONE Size:%i DSTA:[%08x] "
628 "DCNT:[%08x] toGo:%u\n",
629 size, mci_dsta, mci_dcnt, host->dmatogo);
630
631 goto out;
632 }
633
634 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
635 size, mci_dsta, mci_dcnt);
636
637 host->complete_what = COMPLETION_FINALIZE;
638
639out:
640 tasklet_schedule(&host->pio_tasklet);
641 spin_unlock_irqrestore(&host->complete_lock, iflags);
642 return;
643
644fail_request:
645 host->mrq->data->error = -EINVAL;
646 host->complete_what = COMPLETION_FINALIZE;
647 writel(0, host->base + host->sdiimsk);
648 goto out;
649
650}
651
652static void finalize_request(struct s3cmci_host *host)
653{
654 struct mmc_request *mrq = host->mrq;
655 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
656 int debug_as_failure = 0;
657
658 if (host->complete_what != COMPLETION_FINALIZE)
659 return;
660
661 if (!mrq)
662 return;
663
664 if (cmd->data && (cmd->error == 0) &&
665 (cmd->data->error == 0)) {
666 if (host->dodma && (!host->dma_complete)) {
667 dbg(host, dbg_dma, "DMA Missing!\n");
668 return;
669 }
670 }
671
672 /* Read response from controller. */
673 cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0);
674 cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1);
675 cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2);
676 cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3);
677
678 writel(host->prescaler, host->base + S3C2410_SDIPRE);
679
680 if (cmd->error)
681 debug_as_failure = 1;
682
683 if (cmd->data && cmd->data->error)
684 debug_as_failure = 1;
685
686 dbg_dumpcmd(host, cmd, debug_as_failure);
687
688 /* Cleanup controller */
689 writel(0, host->base + S3C2410_SDICMDARG);
690 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
691 writel(0, host->base + S3C2410_SDICMDCON);
692 writel(0, host->base + host->sdiimsk);
693
694 if (cmd->data && cmd->error)
695 cmd->data->error = cmd->error;
696
697 if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
698 host->cmd_is_stop = 1;
699 s3cmci_send_request(host->mmc);
700 return;
701 }
702
703 /* If we have no data transfer we are finished here */
704 if (!mrq->data)
705 goto request_done;
706
707 /* Calulate the amout of bytes transfer if there was no error */
708 if (mrq->data->error == 0) {
709 mrq->data->bytes_xfered =
710 (mrq->data->blocks * mrq->data->blksz);
711 } else {
712 mrq->data->bytes_xfered = 0;
713 }
714
715 /* If we had an error while transfering data we flush the
716 * DMA channel and the fifo to clear out any garbage. */
717 if (mrq->data->error != 0) {
718 if (host->dodma)
719 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
720
721 if (host->is2440) {
722 /* Clear failure register and reset fifo. */
723 writel(S3C2440_SDIFSTA_FIFORESET |
724 S3C2440_SDIFSTA_FIFOFAIL,
725 host->base + S3C2410_SDIFSTA);
726 } else {
727 u32 mci_con;
728
729 /* reset fifo */
730 mci_con = readl(host->base + S3C2410_SDICON);
731 mci_con |= S3C2410_SDICON_FIFORESET;
732
733 writel(mci_con, host->base + S3C2410_SDICON);
734 }
735 }
736
737request_done:
738 host->complete_what = COMPLETION_NONE;
739 host->mrq = NULL;
740 mmc_request_done(host->mmc, mrq);
741}
742
743
744void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source)
745{
746 static enum s3c2410_dmasrc last_source = -1;
747 static int setup_ok;
748
749 if (last_source == source)
750 return;
751
752 last_source = source;
753
754 s3c2410_dma_devconfig(host->dma, source, 3,
755 host->mem->start + host->sdidata);
756
757 if (!setup_ok) {
758 s3c2410_dma_config(host->dma, 4,
759 (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI));
760 s3c2410_dma_set_buffdone_fn(host->dma,
761 s3cmci_dma_done_callback);
762 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
763 setup_ok = 1;
764 }
765}
766
767static void s3cmci_send_command(struct s3cmci_host *host,
768 struct mmc_command *cmd)
769{
770 u32 ccon, imsk;
771
772 imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT |
773 S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT |
774 S3C2410_SDIIMSK_RESPONSECRC;
775
776 enable_imask(host, imsk);
777
778 if (cmd->data)
779 host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
780 else if (cmd->flags & MMC_RSP_PRESENT)
781 host->complete_what = COMPLETION_RSPFIN;
782 else
783 host->complete_what = COMPLETION_CMDSENT;
784
785 writel(cmd->arg, host->base + S3C2410_SDICMDARG);
786
787 ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX;
788 ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART;
789
790 if (cmd->flags & MMC_RSP_PRESENT)
791 ccon |= S3C2410_SDICMDCON_WAITRSP;
792
793 if (cmd->flags & MMC_RSP_136)
794 ccon |= S3C2410_SDICMDCON_LONGRSP;
795
796 writel(ccon, host->base + S3C2410_SDICMDCON);
797}
798
799static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
800{
801 u32 dcon, imsk, stoptries = 3;
802
803 /* write DCON register */
804
805 if (!data) {
806 writel(0, host->base + S3C2410_SDIDCON);
807 return 0;
808 }
809
810 if ((data->blksz & 3) != 0) {
811 /* We cannot deal with unaligned blocks with more than
812 * one block being transfered. */
813
814 if (data->blocks > 1)
815 return -EINVAL;
816
817 /* No support yet for non-word block transfers. */
818 return -EINVAL;
819 }
820
821 while (readl(host->base + S3C2410_SDIDSTA) &
822 (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) {
823
824 dbg(host, dbg_err,
825 "mci_setup_data() transfer stillin progress.\n");
826
827 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
828 s3cmci_reset(host);
829
830 if ((stoptries--) == 0) {
831 dbg_dumpregs(host, "DRF");
832 return -EINVAL;
833 }
834 }
835
836 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
837
838 if (host->dodma)
839 dcon |= S3C2410_SDIDCON_DMAEN;
840
841 if (host->bus_width == MMC_BUS_WIDTH_4)
842 dcon |= S3C2410_SDIDCON_WIDEBUS;
843
844 if (!(data->flags & MMC_DATA_STREAM))
845 dcon |= S3C2410_SDIDCON_BLOCKMODE;
846
847 if (data->flags & MMC_DATA_WRITE) {
848 dcon |= S3C2410_SDIDCON_TXAFTERRESP;
849 dcon |= S3C2410_SDIDCON_XFER_TXSTART;
850 }
851
852 if (data->flags & MMC_DATA_READ) {
853 dcon |= S3C2410_SDIDCON_RXAFTERCMD;
854 dcon |= S3C2410_SDIDCON_XFER_RXSTART;
855 }
856
857 if (host->is2440) {
858 dcon |= S3C2440_SDIDCON_DS_WORD;
859 dcon |= S3C2440_SDIDCON_DATSTART;
860 }
861
862 writel(dcon, host->base + S3C2410_SDIDCON);
863
864 /* write BSIZE register */
865
866 writel(data->blksz, host->base + S3C2410_SDIBSIZE);
867
868 /* add to IMASK register */
869 imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC |
870 S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH;
871
872 enable_imask(host, imsk);
873
874 /* write TIMER register */
875
876 if (host->is2440) {
877 writel(0x007FFFFF, host->base + S3C2410_SDITIMER);
878 } else {
879 writel(0x0000FFFF, host->base + S3C2410_SDITIMER);
880
881 /* FIX: set slow clock to prevent timeouts on read */
882 if (data->flags & MMC_DATA_READ)
883 writel(0xFF, host->base + S3C2410_SDIPRE);
884 }
885
886 return 0;
887}
888
889#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ)
890
891static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
892{
893 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
894
895 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
896
897 host->pio_sgptr = 0;
898 host->pio_words = 0;
899 host->pio_count = 0;
900 host->pio_active = rw ? XFER_WRITE : XFER_READ;
901
902 if (rw) {
903 do_pio_write(host);
904 enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
905 } else {
906 enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF
907 | S3C2410_SDIIMSK_RXFIFOLAST);
908 }
909
910 return 0;
911}
912
913static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
914{
915 int dma_len, i;
916 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
917
918 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
919
920 s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW);
921 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
922
923 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
924 (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
925
926 if (dma_len == 0)
927 return -ENOMEM;
928
929 host->dma_complete = 0;
930 host->dmatogo = dma_len;
931
932 for (i = 0; i < dma_len; i++) {
933 int res;
934
935 dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i,
936 sg_dma_address(&data->sg[i]),
937 sg_dma_len(&data->sg[i]));
938
939 res = s3c2410_dma_enqueue(host->dma, (void *) host,
940 sg_dma_address(&data->sg[i]),
941 sg_dma_len(&data->sg[i]));
942
943 if (res) {
944 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
945 return -EBUSY;
946 }
947 }
948
949 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START);
950
951 return 0;
952}
953
954static void s3cmci_send_request(struct mmc_host *mmc)
955{
956 struct s3cmci_host *host = mmc_priv(mmc);
957 struct mmc_request *mrq = host->mrq;
958 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
959
960 host->ccnt++;
961 prepare_dbgmsg(host, cmd, host->cmd_is_stop);
962
963 /* Clear command, data and fifo status registers
964 Fifo clear only necessary on 2440, but doesn't hurt on 2410
965 */
966 writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT);
967 writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA);
968 writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA);
969
970 if (cmd->data) {
971 int res = s3cmci_setup_data(host, cmd->data);
972
973 host->dcnt++;
974
975 if (res) {
976 dbg(host, dbg_err, "setup data error %d\n", res);
977 cmd->error = res;
978 cmd->data->error = res;
979
980 mmc_request_done(mmc, mrq);
981 return;
982 }
983
984 if (host->dodma)
985 res = s3cmci_prepare_dma(host, cmd->data);
986 else
987 res = s3cmci_prepare_pio(host, cmd->data);
988
989 if (res) {
990 dbg(host, dbg_err, "data prepare error %d\n", res);
991 cmd->error = res;
992 cmd->data->error = res;
993
994 mmc_request_done(mmc, mrq);
995 return;
996 }
997 }
998
999 /* Send command */
1000 s3cmci_send_command(host, cmd);
1001
1002 /* Enable Interrupt */
1003 enable_irq(host->irq);
1004}
1005
1006static int s3cmci_card_present(struct s3cmci_host *host)
1007{
1008 struct s3c24xx_mci_pdata *pdata = host->pdata;
1009 int ret;
1010
1011 if (pdata->gpio_detect == 0)
1012 return -ENOSYS;
1013
1014 ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1;
1015 return ret ^ pdata->detect_invert;
1016}
1017
1018static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1019{
1020 struct s3cmci_host *host = mmc_priv(mmc);
1021
1022 host->status = "mmc request";
1023 host->cmd_is_stop = 0;
1024 host->mrq = mrq;
1025
1026 if (s3cmci_card_present(host) == 0) {
1027 dbg(host, dbg_err, "%s: no medium present\n", __func__);
1028 host->mrq->cmd->error = -ENOMEDIUM;
1029 mmc_request_done(mmc, mrq);
1030 } else
1031 s3cmci_send_request(mmc);
1032}
1033
1034static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1035{
1036 struct s3cmci_host *host = mmc_priv(mmc);
1037 u32 mci_psc, mci_con;
1038
1039 /* Set the power state */
1040
1041 mci_con = readl(host->base + S3C2410_SDICON);
1042
1043 switch (ios->power_mode) {
1044 case MMC_POWER_ON:
1045 case MMC_POWER_UP:
1046 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK);
1047 s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD);
1048 s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0);
1049 s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1);
1050 s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2);
1051 s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3);
1052
1053 if (host->pdata->set_power)
1054 host->pdata->set_power(ios->power_mode, ios->vdd);
1055
1056 if (!host->is2440)
1057 mci_con |= S3C2410_SDICON_FIFORESET;
1058
1059 break;
1060
1061 case MMC_POWER_OFF:
1062 default:
1063 s3c2410_gpio_setpin(S3C2410_GPE5, 0);
1064 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP);
1065
1066 if (host->is2440)
1067 mci_con |= S3C2440_SDICON_SDRESET;
1068
1069 if (host->pdata->set_power)
1070 host->pdata->set_power(ios->power_mode, ios->vdd);
1071
1072 break;
1073 }
1074
1075 /* Set clock */
1076 for (mci_psc = 0; mci_psc < 255; mci_psc++) {
1077 host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
1078
1079 if (host->real_rate <= ios->clock)
1080 break;
1081 }
1082
1083 if (mci_psc > 255)
1084 mci_psc = 255;
1085
1086 host->prescaler = mci_psc;
1087 writel(host->prescaler, host->base + S3C2410_SDIPRE);
1088
1089 /* If requested clock is 0, real_rate will be 0, too */
1090 if (ios->clock == 0)
1091 host->real_rate = 0;
1092
1093 /* Set CLOCK_ENABLE */
1094 if (ios->clock)
1095 mci_con |= S3C2410_SDICON_CLOCKTYPE;
1096 else
1097 mci_con &= ~S3C2410_SDICON_CLOCKTYPE;
1098
1099 writel(mci_con, host->base + S3C2410_SDICON);
1100
1101 if ((ios->power_mode == MMC_POWER_ON) ||
1102 (ios->power_mode == MMC_POWER_UP)) {
1103 dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n",
1104 host->real_rate/1000, ios->clock/1000);
1105 } else {
1106 dbg(host, dbg_conf, "powered down.\n");
1107 }
1108
1109 host->bus_width = ios->bus_width;
1110}
1111
1112static void s3cmci_reset(struct s3cmci_host *host)
1113{
1114 u32 con = readl(host->base + S3C2410_SDICON);
1115
1116 con |= S3C2440_SDICON_SDRESET;
1117 writel(con, host->base + S3C2410_SDICON);
1118}
1119
1120static int s3cmci_get_ro(struct mmc_host *mmc)
1121{
1122 struct s3cmci_host *host = mmc_priv(mmc);
1123 struct s3c24xx_mci_pdata *pdata = host->pdata;
1124 int ret;
1125
1126 if (pdata->gpio_wprotect == 0)
1127 return 0;
1128
1129 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
1130
1131 if (pdata->wprotect_invert)
1132 ret = !ret;
1133
1134 return ret;
1135}
1136
1137static struct mmc_host_ops s3cmci_ops = {
1138 .request = s3cmci_request,
1139 .set_ios = s3cmci_set_ios,
1140 .get_ro = s3cmci_get_ro,
1141};
1142
1143static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
1144 /* This is currently here to avoid a number of if (host->pdata)
1145 * checks. Any zero fields to ensure reaonable defaults are picked. */
1146};
1147
1148static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1149{
1150 struct s3cmci_host *host;
1151 struct mmc_host *mmc;
1152 int ret;
1153
1154 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1155 if (!mmc) {
1156 ret = -ENOMEM;
1157 goto probe_out;
1158 }
1159
1160 host = mmc_priv(mmc);
1161 host->mmc = mmc;
1162 host->pdev = pdev;
1163 host->is2440 = is2440;
1164
1165 host->pdata = pdev->dev.platform_data;
1166 if (!host->pdata) {
1167 pdev->dev.platform_data = &s3cmci_def_pdata;
1168 host->pdata = &s3cmci_def_pdata;
1169 }
1170
1171 spin_lock_init(&host->complete_lock);
1172 tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
1173
1174 if (is2440) {
1175 host->sdiimsk = S3C2440_SDIIMSK;
1176 host->sdidata = S3C2440_SDIDATA;
1177 host->clk_div = 1;
1178 } else {
1179 host->sdiimsk = S3C2410_SDIIMSK;
1180 host->sdidata = S3C2410_SDIDATA;
1181 host->clk_div = 2;
1182 }
1183
1184 host->dodma = 0;
1185 host->complete_what = COMPLETION_NONE;
1186 host->pio_active = XFER_NONE;
1187
1188 host->dma = S3CMCI_DMA;
1189
1190 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1191 if (!host->mem) {
1192 dev_err(&pdev->dev,
1193 "failed to get io memory region resouce.\n");
1194
1195 ret = -ENOENT;
1196 goto probe_free_host;
1197 }
1198
1199 host->mem = request_mem_region(host->mem->start,
1200 RESSIZE(host->mem), pdev->name);
1201
1202 if (!host->mem) {
1203 dev_err(&pdev->dev, "failed to request io memory region.\n");
1204 ret = -ENOENT;
1205 goto probe_free_host;
1206 }
1207
1208 host->base = ioremap(host->mem->start, RESSIZE(host->mem));
1209 if (host->base == 0) {
1210 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1211 ret = -EINVAL;
1212 goto probe_free_mem_region;
1213 }
1214
1215 host->irq = platform_get_irq(pdev, 0);
1216 if (host->irq == 0) {
1217 dev_err(&pdev->dev, "failed to get interrupt resouce.\n");
1218 ret = -EINVAL;
1219 goto probe_iounmap;
1220 }
1221
1222 if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
1223 dev_err(&pdev->dev, "failed to request mci interrupt.\n");
1224 ret = -ENOENT;
1225 goto probe_iounmap;
1226 }
1227
1228 /* We get spurious interrupts even when we have set the IMSK
1229 * register to ignore everything, so use disable_irq() to make
1230 * ensure we don't lock the system with un-serviceable requests. */
1231
1232 disable_irq(host->irq);
1233
1234 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
1235
1236 if (host->irq_cd >= 0) {
1237 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1238 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1239 DRIVER_NAME, host)) {
1240 dev_err(&pdev->dev, "can't get card detect irq.\n");
1241 ret = -ENOENT;
1242 goto probe_free_irq;
1243 }
1244 } else {
1245 dev_warn(&pdev->dev, "host detect has no irq available\n");
1246 s3c2410_gpio_cfgpin(host->pdata->gpio_detect,
1247 S3C2410_GPIO_INPUT);
1248 }
1249
1250 if (host->pdata->gpio_wprotect)
1251 s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
1252 S3C2410_GPIO_INPUT);
1253
1254 if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) {
1255 dev_err(&pdev->dev, "unable to get DMA channel.\n");
1256 ret = -EBUSY;
1257 goto probe_free_irq_cd;
1258 }
1259
1260 host->clk = clk_get(&pdev->dev, "sdi");
1261 if (IS_ERR(host->clk)) {
1262 dev_err(&pdev->dev, "failed to find clock source.\n");
1263 ret = PTR_ERR(host->clk);
1264 host->clk = NULL;
1265 goto probe_free_host;
1266 }
1267
1268 ret = clk_enable(host->clk);
1269 if (ret) {
1270 dev_err(&pdev->dev, "failed to enable clock source.\n");
1271 goto clk_free;
1272 }
1273
1274 host->clk_rate = clk_get_rate(host->clk);
1275
1276 mmc->ops = &s3cmci_ops;
1277 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1278 mmc->caps = MMC_CAP_4_BIT_DATA;
1279 mmc->f_min = host->clk_rate / (host->clk_div * 256);
1280 mmc->f_max = host->clk_rate / host->clk_div;
1281
1282 if (host->pdata->ocr_avail)
1283 mmc->ocr_avail = host->pdata->ocr_avail;
1284
1285 mmc->max_blk_count = 4095;
1286 mmc->max_blk_size = 4095;
1287 mmc->max_req_size = 4095 * 512;
1288 mmc->max_seg_size = mmc->max_req_size;
1289
1290 mmc->max_phys_segs = 128;
1291 mmc->max_hw_segs = 128;
1292
1293 dbg(host, dbg_debug,
1294 "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
1295 (host->is2440?"2440":""),
1296 host->base, host->irq, host->irq_cd, host->dma);
1297
1298 ret = mmc_add_host(mmc);
1299 if (ret) {
1300 dev_err(&pdev->dev, "failed to add mmc host.\n");
1301 goto free_dmabuf;
1302 }
1303
1304 platform_set_drvdata(pdev, mmc);
1305 dev_info(&pdev->dev, "initialisation done.\n");
1306
1307 return 0;
1308
1309 free_dmabuf:
1310 clk_disable(host->clk);
1311
1312 clk_free:
1313 clk_put(host->clk);
1314
1315 probe_free_irq_cd:
1316 if (host->irq_cd >= 0)
1317 free_irq(host->irq_cd, host);
1318
1319 probe_free_irq:
1320 free_irq(host->irq, host);
1321
1322 probe_iounmap:
1323 iounmap(host->base);
1324
1325 probe_free_mem_region:
1326 release_mem_region(host->mem->start, RESSIZE(host->mem));
1327
1328 probe_free_host:
1329 mmc_free_host(mmc);
1330 probe_out:
1331 return ret;
1332}
1333
1334static int __devexit s3cmci_remove(struct platform_device *pdev)
1335{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc);
1338
1339 mmc_remove_host(mmc);
1340
1341 clk_disable(host->clk);
1342 clk_put(host->clk);
1343
1344 tasklet_disable(&host->pio_tasklet);
1345 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
1346
1347 if (host->irq_cd >= 0)
1348 free_irq(host->irq_cd, host);
1349 free_irq(host->irq, host);
1350
1351 iounmap(host->base);
1352 release_mem_region(host->mem->start, RESSIZE(host->mem));
1353
1354 mmc_free_host(mmc);
1355 return 0;
1356}
1357
1358static int __devinit s3cmci_probe_2410(struct platform_device *dev)
1359{
1360 return s3cmci_probe(dev, 0);
1361}
1362
1363static int __devinit s3cmci_probe_2412(struct platform_device *dev)
1364{
1365 return s3cmci_probe(dev, 1);
1366}
1367
1368static int __devinit s3cmci_probe_2440(struct platform_device *dev)
1369{
1370 return s3cmci_probe(dev, 1);
1371}
1372
1373#ifdef CONFIG_PM
1374
1375static int s3cmci_suspend(struct platform_device *dev, pm_message_t state)
1376{
1377 struct mmc_host *mmc = platform_get_drvdata(dev);
1378
1379 return mmc_suspend_host(mmc, state);
1380}
1381
1382static int s3cmci_resume(struct platform_device *dev)
1383{
1384 struct mmc_host *mmc = platform_get_drvdata(dev);
1385
1386 return mmc_resume_host(mmc);
1387}
1388
1389#else /* CONFIG_PM */
1390#define s3cmci_suspend NULL
1391#define s3cmci_resume NULL
1392#endif /* CONFIG_PM */
1393
1394
1395static struct platform_driver s3cmci_driver_2410 = {
1396 .driver.name = "s3c2410-sdi",
1397 .driver.owner = THIS_MODULE,
1398 .probe = s3cmci_probe_2410,
1399 .remove = __devexit_p(s3cmci_remove),
1400 .suspend = s3cmci_suspend,
1401 .resume = s3cmci_resume,
1402};
1403
1404static struct platform_driver s3cmci_driver_2412 = {
1405 .driver.name = "s3c2412-sdi",
1406 .driver.owner = THIS_MODULE,
1407 .probe = s3cmci_probe_2412,
1408 .remove = __devexit_p(s3cmci_remove),
1409 .suspend = s3cmci_suspend,
1410 .resume = s3cmci_resume,
1411};
1412
1413static struct platform_driver s3cmci_driver_2440 = {
1414 .driver.name = "s3c2440-sdi",
1415 .driver.owner = THIS_MODULE,
1416 .probe = s3cmci_probe_2440,
1417 .remove = __devexit_p(s3cmci_remove),
1418 .suspend = s3cmci_suspend,
1419 .resume = s3cmci_resume,
1420};
1421
1422
1423static int __init s3cmci_init(void)
1424{
1425 platform_driver_register(&s3cmci_driver_2410);
1426 platform_driver_register(&s3cmci_driver_2412);
1427 platform_driver_register(&s3cmci_driver_2440);
1428 return 0;
1429}
1430
1431static void __exit s3cmci_exit(void)
1432{
1433 platform_driver_unregister(&s3cmci_driver_2410);
1434 platform_driver_unregister(&s3cmci_driver_2412);
1435 platform_driver_unregister(&s3cmci_driver_2440);
1436}
1437
1438module_init(s3cmci_init);
1439module_exit(s3cmci_exit);
1440
1441MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
1442MODULE_LICENSE("GPL v2");
1443MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>");
1444MODULE_ALIAS("platform:s3c2410-sdi");
1445MODULE_ALIAS("platform:s3c2412-sdi");
1446MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
new file mode 100644
index 000000000000..37d9c60010c9
--- /dev/null
+++ b/drivers/mmc/host/s3cmci.h
@@ -0,0 +1,70 @@
1/*
2 * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
3 *
4 * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/* FIXME: DMA Resource management ?! */
12#define S3CMCI_DMA 0
13
14enum s3cmci_waitfor {
15 COMPLETION_NONE,
16 COMPLETION_FINALIZE,
17 COMPLETION_CMDSENT,
18 COMPLETION_RSPFIN,
19 COMPLETION_XFERFINISH,
20 COMPLETION_XFERFINISH_RSPFIN,
21};
22
23struct s3cmci_host {
24 struct platform_device *pdev;
25 struct s3c24xx_mci_pdata *pdata;
26 struct mmc_host *mmc;
27 struct resource *mem;
28 struct clk *clk;
29 void __iomem *base;
30 int irq;
31 int irq_cd;
32 int dma;
33
34 unsigned long clk_rate;
35 unsigned long clk_div;
36 unsigned long real_rate;
37 u8 prescaler;
38
39 int is2440;
40 unsigned sdiimsk;
41 unsigned sdidata;
42 int dodma;
43 int dmatogo;
44
45 struct mmc_request *mrq;
46 int cmd_is_stop;
47
48 spinlock_t complete_lock;
49 enum s3cmci_waitfor complete_what;
50
51 int dma_complete;
52
53 u32 pio_sgptr;
54 u32 pio_words;
55 u32 pio_count;
56 u32 *pio_ptr;
57#define XFER_NONE 0
58#define XFER_READ 1
59#define XFER_WRITE 2
60 u32 pio_active;
61
62 int bus_width;
63
64 char dbgmsg_cmd[301];
65 char dbgmsg_dat[301];
66 char *status;
67
68 unsigned int ccnt, dcnt;
69 struct tasklet_struct pio_tasklet;
70};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
new file mode 100644
index 000000000000..deb607c52c0d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -0,0 +1,732 @@
1/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
2 *
3 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 *
10 * Thanks to the following companies for their support:
11 *
12 * - JMicron (hardware and technical support)
13 */
14
15#include <linux/delay.h>
16#include <linux/highmem.h>
17#include <linux/pci.h>
18#include <linux/dma-mapping.h>
19
20#include <linux/mmc/host.h>
21
22#include <asm/scatterlist.h>
23#include <asm/io.h>
24
25#include "sdhci.h"
26
27/*
28 * PCI registers
29 */
30
31#define PCI_SDHCI_IFPIO 0x00
32#define PCI_SDHCI_IFDMA 0x01
33#define PCI_SDHCI_IFVENDOR 0x02
34
35#define PCI_SLOT_INFO 0x40 /* 8 bits */
36#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
37#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
38
39#define MAX_SLOTS 8
40
41struct sdhci_pci_chip;
42struct sdhci_pci_slot;
43
44struct sdhci_pci_fixes {
45 unsigned int quirks;
46
47 int (*probe)(struct sdhci_pci_chip*);
48
49 int (*probe_slot)(struct sdhci_pci_slot*);
50 void (*remove_slot)(struct sdhci_pci_slot*, int);
51
52 int (*suspend)(struct sdhci_pci_chip*,
53 pm_message_t);
54 int (*resume)(struct sdhci_pci_chip*);
55};
56
57struct sdhci_pci_slot {
58 struct sdhci_pci_chip *chip;
59 struct sdhci_host *host;
60
61 int pci_bar;
62};
63
64struct sdhci_pci_chip {
65 struct pci_dev *pdev;
66
67 unsigned int quirks;
68 const struct sdhci_pci_fixes *fixes;
69
70 int num_slots; /* Slots on controller */
71 struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
72};
73
74
75/*****************************************************************************\
76 * *
77 * Hardware specific quirk handling *
78 * *
79\*****************************************************************************/
80
81static int ricoh_probe(struct sdhci_pci_chip *chip)
82{
83 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
84 chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET;
85
86 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)
87 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
88
89 return 0;
90}
91
92static const struct sdhci_pci_fixes sdhci_ricoh = {
93 .probe = ricoh_probe,
94 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR,
95};
96
97static const struct sdhci_pci_fixes sdhci_ene_712 = {
98 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
99 SDHCI_QUIRK_BROKEN_DMA,
100};
101
102static const struct sdhci_pci_fixes sdhci_ene_714 = {
103 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
104 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
105 SDHCI_QUIRK_BROKEN_DMA,
106};
107
108static const struct sdhci_pci_fixes sdhci_cafe = {
109 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
110 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
111};
112
113static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
114{
115 u8 scratch;
116 int ret;
117
118 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
119 if (ret)
120 return ret;
121
122 /*
123 * Turn PMOS on [bit 0], set over current detection to 2.4 V
124 * [bit 1:2] and enable over current debouncing [bit 6].
125 */
126 if (on)
127 scratch |= 0x47;
128 else
129 scratch &= ~0x47;
130
131 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
132 if (ret)
133 return ret;
134
135 return 0;
136}
137
138static int jmicron_probe(struct sdhci_pci_chip *chip)
139{
140 int ret;
141
142 if (chip->pdev->revision == 0) {
143 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
144 SDHCI_QUIRK_32BIT_DMA_SIZE |
145 SDHCI_QUIRK_32BIT_ADMA_SIZE |
146 SDHCI_QUIRK_RESET_AFTER_REQUEST;
147 }
148
149 /*
150 * JMicron chips can have two interfaces to the same hardware
151 * in order to work around limitations in Microsoft's driver.
152 * We need to make sure we only bind to one of them.
153 *
154 * This code assumes two things:
155 *
156 * 1. The PCI code adds subfunctions in order.
157 *
158 * 2. The MMC interface has a lower subfunction number
159 * than the SD interface.
160 */
161 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
162 struct pci_dev *sd_dev;
163
164 sd_dev = NULL;
165 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
166 PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
167 if ((PCI_SLOT(chip->pdev->devfn) ==
168 PCI_SLOT(sd_dev->devfn)) &&
169 (chip->pdev->bus == sd_dev->bus))
170 break;
171 }
172
173 if (sd_dev) {
174 pci_dev_put(sd_dev);
175 dev_info(&chip->pdev->dev, "Refusing to bind to "
176 "secondary interface.\n");
177 return -ENODEV;
178 }
179 }
180
181 /*
182 * JMicron chips need a bit of a nudge to enable the power
183 * output pins.
184 */
185 ret = jmicron_pmos(chip, 1);
186 if (ret) {
187 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
188 return ret;
189 }
190
191 return 0;
192}
193
194static void jmicron_enable_mmc(struct sdhci_host *host, int on)
195{
196 u8 scratch;
197
198 scratch = readb(host->ioaddr + 0xC0);
199
200 if (on)
201 scratch |= 0x01;
202 else
203 scratch &= ~0x01;
204
205 writeb(scratch, host->ioaddr + 0xC0);
206}
207
208static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
209{
210 if (slot->chip->pdev->revision == 0) {
211 u16 version;
212
213 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
214 version = (version & SDHCI_VENDOR_VER_MASK) >>
215 SDHCI_VENDOR_VER_SHIFT;
216
217 /*
218 * Older versions of the chip have lots of nasty glitches
219 * in the ADMA engine. It's best just to avoid it
220 * completely.
221 */
222 if (version < 0xAC)
223 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
224 }
225
226 /*
227 * The secondary interface requires a bit set to get the
228 * interrupts.
229 */
230 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
231 jmicron_enable_mmc(slot->host, 1);
232
233 return 0;
234}
235
236static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
237{
238 if (dead)
239 return;
240
241 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
242 jmicron_enable_mmc(slot->host, 0);
243}
244
245static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
246{
247 int i;
248
249 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
250 for (i = 0;i < chip->num_slots;i++)
251 jmicron_enable_mmc(chip->slots[i]->host, 0);
252 }
253
254 return 0;
255}
256
257static int jmicron_resume(struct sdhci_pci_chip *chip)
258{
259 int ret, i;
260
261 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
262 for (i = 0;i < chip->num_slots;i++)
263 jmicron_enable_mmc(chip->slots[i]->host, 1);
264 }
265
266 ret = jmicron_pmos(chip, 1);
267 if (ret) {
268 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
269 return ret;
270 }
271
272 return 0;
273}
274
275static const struct sdhci_pci_fixes sdhci_jmicron = {
276 .probe = jmicron_probe,
277
278 .probe_slot = jmicron_probe_slot,
279 .remove_slot = jmicron_remove_slot,
280
281 .suspend = jmicron_suspend,
282 .resume = jmicron_resume,
283};
284
285static const struct pci_device_id pci_ids[] __devinitdata = {
286 {
287 .vendor = PCI_VENDOR_ID_RICOH,
288 .device = PCI_DEVICE_ID_RICOH_R5C822,
289 .subvendor = PCI_ANY_ID,
290 .subdevice = PCI_ANY_ID,
291 .driver_data = (kernel_ulong_t)&sdhci_ricoh,
292 },
293
294 {
295 .vendor = PCI_VENDOR_ID_ENE,
296 .device = PCI_DEVICE_ID_ENE_CB712_SD,
297 .subvendor = PCI_ANY_ID,
298 .subdevice = PCI_ANY_ID,
299 .driver_data = (kernel_ulong_t)&sdhci_ene_712,
300 },
301
302 {
303 .vendor = PCI_VENDOR_ID_ENE,
304 .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
305 .subvendor = PCI_ANY_ID,
306 .subdevice = PCI_ANY_ID,
307 .driver_data = (kernel_ulong_t)&sdhci_ene_712,
308 },
309
310 {
311 .vendor = PCI_VENDOR_ID_ENE,
312 .device = PCI_DEVICE_ID_ENE_CB714_SD,
313 .subvendor = PCI_ANY_ID,
314 .subdevice = PCI_ANY_ID,
315 .driver_data = (kernel_ulong_t)&sdhci_ene_714,
316 },
317
318 {
319 .vendor = PCI_VENDOR_ID_ENE,
320 .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
321 .subvendor = PCI_ANY_ID,
322 .subdevice = PCI_ANY_ID,
323 .driver_data = (kernel_ulong_t)&sdhci_ene_714,
324 },
325
326 {
327 .vendor = PCI_VENDOR_ID_MARVELL,
328 .device = PCI_DEVICE_ID_MARVELL_CAFE_SD,
329 .subvendor = PCI_ANY_ID,
330 .subdevice = PCI_ANY_ID,
331 .driver_data = (kernel_ulong_t)&sdhci_cafe,
332 },
333
334 {
335 .vendor = PCI_VENDOR_ID_JMICRON,
336 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
337 .subvendor = PCI_ANY_ID,
338 .subdevice = PCI_ANY_ID,
339 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
340 },
341
342 {
343 .vendor = PCI_VENDOR_ID_JMICRON,
344 .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC,
345 .subvendor = PCI_ANY_ID,
346 .subdevice = PCI_ANY_ID,
347 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
348 },
349
350 { /* Generic SD host controller */
351 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
352 },
353
354 { /* end: all zeroes */ },
355};
356
357MODULE_DEVICE_TABLE(pci, pci_ids);
358
359/*****************************************************************************\
360 * *
361 * SDHCI core callbacks *
362 * *
363\*****************************************************************************/
364
365static int sdhci_pci_enable_dma(struct sdhci_host *host)
366{
367 struct sdhci_pci_slot *slot;
368 struct pci_dev *pdev;
369 int ret;
370
371 slot = sdhci_priv(host);
372 pdev = slot->chip->pdev;
373
374 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
375 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
376 (host->flags & SDHCI_USE_DMA)) {
377 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
378 "doesn't fully claim to support it.\n");
379 }
380
381 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
382 if (ret)
383 return ret;
384
385 pci_set_master(pdev);
386
387 return 0;
388}
389
390static struct sdhci_ops sdhci_pci_ops = {
391 .enable_dma = sdhci_pci_enable_dma,
392};
393
394/*****************************************************************************\
395 * *
396 * Suspend/resume *
397 * *
398\*****************************************************************************/
399
400#ifdef CONFIG_PM
401
402static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
403{
404 struct sdhci_pci_chip *chip;
405 struct sdhci_pci_slot *slot;
406 int i, ret;
407
408 chip = pci_get_drvdata(pdev);
409 if (!chip)
410 return 0;
411
412 for (i = 0;i < chip->num_slots;i++) {
413 slot = chip->slots[i];
414 if (!slot)
415 continue;
416
417 ret = sdhci_suspend_host(slot->host, state);
418
419 if (ret) {
420 for (i--;i >= 0;i--)
421 sdhci_resume_host(chip->slots[i]->host);
422 return ret;
423 }
424 }
425
426 if (chip->fixes && chip->fixes->suspend) {
427 ret = chip->fixes->suspend(chip, state);
428 if (ret) {
429 for (i = chip->num_slots - 1;i >= 0;i--)
430 sdhci_resume_host(chip->slots[i]->host);
431 return ret;
432 }
433 }
434
435 pci_save_state(pdev);
436 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
437 pci_disable_device(pdev);
438 pci_set_power_state(pdev, pci_choose_state(pdev, state));
439
440 return 0;
441}
442
443static int sdhci_pci_resume (struct pci_dev *pdev)
444{
445 struct sdhci_pci_chip *chip;
446 struct sdhci_pci_slot *slot;
447 int i, ret;
448
449 chip = pci_get_drvdata(pdev);
450 if (!chip)
451 return 0;
452
453 pci_set_power_state(pdev, PCI_D0);
454 pci_restore_state(pdev);
455 ret = pci_enable_device(pdev);
456 if (ret)
457 return ret;
458
459 if (chip->fixes && chip->fixes->resume) {
460 ret = chip->fixes->resume(chip);
461 if (ret)
462 return ret;
463 }
464
465 for (i = 0;i < chip->num_slots;i++) {
466 slot = chip->slots[i];
467 if (!slot)
468 continue;
469
470 ret = sdhci_resume_host(slot->host);
471 if (ret)
472 return ret;
473 }
474
475 return 0;
476}
477
478#else /* CONFIG_PM */
479
480#define sdhci_pci_suspend NULL
481#define sdhci_pci_resume NULL
482
483#endif /* CONFIG_PM */
484
485/*****************************************************************************\
486 * *
487 * Device probing/removal *
488 * *
489\*****************************************************************************/
490
491static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
492 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar)
493{
494 struct sdhci_pci_slot *slot;
495 struct sdhci_host *host;
496
497 resource_size_t addr;
498
499 int ret;
500
501 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
502 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
503 return ERR_PTR(-ENODEV);
504 }
505
506 if (pci_resource_len(pdev, bar) != 0x100) {
507 dev_err(&pdev->dev, "Invalid iomem size. You may "
508 "experience problems.\n");
509 }
510
511 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
512 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
513 return ERR_PTR(-ENODEV);
514 }
515
516 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
517 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
518 return ERR_PTR(-ENODEV);
519 }
520
521 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
522 if (IS_ERR(host)) {
523 ret = PTR_ERR(host);
524 goto unmap;
525 }
526
527 slot = sdhci_priv(host);
528
529 slot->chip = chip;
530 slot->host = host;
531 slot->pci_bar = bar;
532
533 host->hw_name = "PCI";
534 host->ops = &sdhci_pci_ops;
535 host->quirks = chip->quirks;
536
537 host->irq = pdev->irq;
538
539 ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
540 if (ret) {
541 dev_err(&pdev->dev, "cannot request region\n");
542 return ERR_PTR(ret);
543 }
544
545 addr = pci_resource_start(pdev, bar);
546 host->ioaddr = ioremap_nocache(addr, pci_resource_len(pdev, bar));
547 if (!host->ioaddr) {
548 dev_err(&pdev->dev, "failed to remap registers\n");
549 goto release;
550 }
551
552 if (chip->fixes && chip->fixes->probe_slot) {
553 ret = chip->fixes->probe_slot(slot);
554 if (ret)
555 goto unmap;
556 }
557
558 ret = sdhci_add_host(host);
559 if (ret)
560 goto remove;
561
562 return slot;
563
564remove:
565 if (chip->fixes && chip->fixes->remove_slot)
566 chip->fixes->remove_slot(slot, 0);
567
568unmap:
569 iounmap(host->ioaddr);
570
571release:
572 pci_release_region(pdev, bar);
573 sdhci_free_host(host);
574
575 return ERR_PTR(ret);
576}
577
578static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
579{
580 int dead;
581 u32 scratch;
582
583 dead = 0;
584 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
585 if (scratch == (u32)-1)
586 dead = 1;
587
588 sdhci_remove_host(slot->host, dead);
589
590 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
591 slot->chip->fixes->remove_slot(slot, dead);
592
593 pci_release_region(slot->chip->pdev, slot->pci_bar);
594
595 sdhci_free_host(slot->host);
596}
597
598static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
599 const struct pci_device_id *ent)
600{
601 struct sdhci_pci_chip *chip;
602 struct sdhci_pci_slot *slot;
603
604 u8 slots, rev, first_bar;
605 int ret, i;
606
607 BUG_ON(pdev == NULL);
608 BUG_ON(ent == NULL);
609
610 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
611
612 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
613 (int)pdev->vendor, (int)pdev->device, (int)rev);
614
615 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
616 if (ret)
617 return ret;
618
619 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
620 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
621 if (slots == 0)
622 return -ENODEV;
623
624 BUG_ON(slots > MAX_SLOTS);
625
626 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
627 if (ret)
628 return ret;
629
630 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
631
632 if (first_bar > 5) {
633 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
634 return -ENODEV;
635 }
636
637 ret = pci_enable_device(pdev);
638 if (ret)
639 return ret;
640
641 chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL);
642 if (!chip) {
643 ret = -ENOMEM;
644 goto err;
645 }
646
647 chip->pdev = pdev;
648 chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data;
649 if (chip->fixes)
650 chip->quirks = chip->fixes->quirks;
651 chip->num_slots = slots;
652
653 pci_set_drvdata(pdev, chip);
654
655 if (chip->fixes && chip->fixes->probe) {
656 ret = chip->fixes->probe(chip);
657 if (ret)
658 goto free;
659 }
660
661 for (i = 0;i < slots;i++) {
662 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
663 if (IS_ERR(slot)) {
664 for (i--;i >= 0;i--)
665 sdhci_pci_remove_slot(chip->slots[i]);
666 ret = PTR_ERR(slot);
667 goto free;
668 }
669
670 chip->slots[i] = slot;
671 }
672
673 return 0;
674
675free:
676 pci_set_drvdata(pdev, NULL);
677 kfree(chip);
678
679err:
680 pci_disable_device(pdev);
681 return ret;
682}
683
684static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
685{
686 int i;
687 struct sdhci_pci_chip *chip;
688
689 chip = pci_get_drvdata(pdev);
690
691 if (chip) {
692 for (i = 0;i < chip->num_slots; i++)
693 sdhci_pci_remove_slot(chip->slots[i]);
694
695 pci_set_drvdata(pdev, NULL);
696 kfree(chip);
697 }
698
699 pci_disable_device(pdev);
700}
701
702static struct pci_driver sdhci_driver = {
703 .name = "sdhci-pci",
704 .id_table = pci_ids,
705 .probe = sdhci_pci_probe,
706 .remove = __devexit_p(sdhci_pci_remove),
707 .suspend = sdhci_pci_suspend,
708 .resume = sdhci_pci_resume,
709};
710
711/*****************************************************************************\
712 * *
713 * Driver init/exit *
714 * *
715\*****************************************************************************/
716
717static int __init sdhci_drv_init(void)
718{
719 return pci_register_driver(&sdhci_driver);
720}
721
722static void __exit sdhci_drv_exit(void)
723{
724 pci_unregister_driver(&sdhci_driver);
725}
726
727module_init(sdhci_drv_init);
728module_exit(sdhci_drv_exit);
729
730MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
731MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
732MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b413aa6c246b..17701c3da733 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/highmem.h> 17#include <linux/highmem.h>
18#include <linux/pci.h> 18#include <linux/io.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/scatterlist.h> 20#include <linux/scatterlist.h>
21 21
@@ -32,135 +32,6 @@
32 32
33static unsigned int debug_quirks = 0; 33static unsigned int debug_quirks = 0;
34 34
35/*
36 * Different quirks to handle when the hardware deviates from a strict
37 * interpretation of the SDHCI specification.
38 */
39
40/* Controller doesn't honor resets unless we touch the clock register */
41#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
42/* Controller has bad caps bits, but really supports DMA */
43#define SDHCI_QUIRK_FORCE_DMA (1<<1)
44/* Controller doesn't like to be reset when there is no card inserted. */
45#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
46/* Controller doesn't like clearing the power reg before a change */
47#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
48/* Controller has flaky internal state so reset it on each ios change */
49#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
50/* Controller has an unusable DMA engine */
51#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
52/* Controller can only DMA from 32-bit aligned addresses */
53#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6)
54/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
55#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7)
56/* Controller needs to be reset after each request to stay stable */
57#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8)
58/* Controller needs voltage and power writes to happen separately */
59#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9)
60/* Controller has an off-by-one issue with timeout value */
61#define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10)
62
63static const struct pci_device_id pci_ids[] __devinitdata = {
64 {
65 .vendor = PCI_VENDOR_ID_RICOH,
66 .device = PCI_DEVICE_ID_RICOH_R5C822,
67 .subvendor = PCI_VENDOR_ID_IBM,
68 .subdevice = PCI_ANY_ID,
69 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
70 SDHCI_QUIRK_FORCE_DMA,
71 },
72
73 {
74 .vendor = PCI_VENDOR_ID_RICOH,
75 .device = PCI_DEVICE_ID_RICOH_R5C822,
76 .subvendor = PCI_VENDOR_ID_SAMSUNG,
77 .subdevice = PCI_ANY_ID,
78 .driver_data = SDHCI_QUIRK_FORCE_DMA |
79 SDHCI_QUIRK_NO_CARD_NO_RESET,
80 },
81
82 {
83 .vendor = PCI_VENDOR_ID_RICOH,
84 .device = PCI_DEVICE_ID_RICOH_R5C822,
85 .subvendor = PCI_ANY_ID,
86 .subdevice = PCI_ANY_ID,
87 .driver_data = SDHCI_QUIRK_FORCE_DMA,
88 },
89
90 {
91 .vendor = PCI_VENDOR_ID_TI,
92 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
93 .subvendor = PCI_ANY_ID,
94 .subdevice = PCI_ANY_ID,
95 .driver_data = SDHCI_QUIRK_FORCE_DMA,
96 },
97
98 {
99 .vendor = PCI_VENDOR_ID_ENE,
100 .device = PCI_DEVICE_ID_ENE_CB712_SD,
101 .subvendor = PCI_ANY_ID,
102 .subdevice = PCI_ANY_ID,
103 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
104 SDHCI_QUIRK_BROKEN_DMA,
105 },
106
107 {
108 .vendor = PCI_VENDOR_ID_ENE,
109 .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
110 .subvendor = PCI_ANY_ID,
111 .subdevice = PCI_ANY_ID,
112 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
113 SDHCI_QUIRK_BROKEN_DMA,
114 },
115
116 {
117 .vendor = PCI_VENDOR_ID_ENE,
118 .device = PCI_DEVICE_ID_ENE_CB714_SD,
119 .subvendor = PCI_ANY_ID,
120 .subdevice = PCI_ANY_ID,
121 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
122 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
123 SDHCI_QUIRK_BROKEN_DMA,
124 },
125
126 {
127 .vendor = PCI_VENDOR_ID_ENE,
128 .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
129 .subvendor = PCI_ANY_ID,
130 .subdevice = PCI_ANY_ID,
131 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
132 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
133 SDHCI_QUIRK_BROKEN_DMA,
134 },
135
136 {
137 .vendor = PCI_VENDOR_ID_MARVELL,
138 .device = PCI_DEVICE_ID_MARVELL_CAFE_SD,
139 .subvendor = PCI_ANY_ID,
140 .subdevice = PCI_ANY_ID,
141 .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
142 SDHCI_QUIRK_INCR_TIMEOUT_CONTROL,
143 },
144
145 {
146 .vendor = PCI_VENDOR_ID_JMICRON,
147 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
148 .subvendor = PCI_ANY_ID,
149 .subdevice = PCI_ANY_ID,
150 .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR |
151 SDHCI_QUIRK_32BIT_DMA_SIZE |
152 SDHCI_QUIRK_RESET_AFTER_REQUEST,
153 },
154
155 { /* Generic SD host controller */
156 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
157 },
158
159 { /* end: all zeroes */ },
160};
161
162MODULE_DEVICE_TABLE(pci, pci_ids);
163
164static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); 35static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
165static void sdhci_finish_data(struct sdhci_host *); 36static void sdhci_finish_data(struct sdhci_host *);
166 37
@@ -215,7 +86,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
215{ 86{
216 unsigned long timeout; 87 unsigned long timeout;
217 88
218 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 89 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
219 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & 90 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
220 SDHCI_CARD_PRESENT)) 91 SDHCI_CARD_PRESENT))
221 return; 92 return;
@@ -253,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host)
253 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 124 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
254 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | 125 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
255 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
256 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; 127 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
128 SDHCI_INT_ADMA_ERROR;
257 129
258 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); 130 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
259 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); 131 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
@@ -443,23 +315,226 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
443 DBG("PIO transfer complete.\n"); 315 DBG("PIO transfer complete.\n");
444} 316}
445 317
446static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 318static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
447{ 319{
448 u8 count; 320 local_irq_save(*flags);
449 unsigned target_timeout, current_timeout; 321 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
322}
450 323
451 WARN_ON(host->data); 324static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
325{
326 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
327 local_irq_restore(*flags);
328}
452 329
453 if (data == NULL) 330static int sdhci_adma_table_pre(struct sdhci_host *host,
454 return; 331 struct mmc_data *data)
332{
333 int direction;
455 334
456 /* Sanity checks */ 335 u8 *desc;
457 BUG_ON(data->blksz * data->blocks > 524288); 336 u8 *align;
458 BUG_ON(data->blksz > host->mmc->max_blk_size); 337 dma_addr_t addr;
459 BUG_ON(data->blocks > 65535); 338 dma_addr_t align_addr;
339 int len, offset;
460 340
461 host->data = data; 341 struct scatterlist *sg;
462 host->data_early = 0; 342 int i;
343 char *buffer;
344 unsigned long flags;
345
346 /*
347 * The spec does not specify endianness of descriptor table.
348 * We currently guess that it is LE.
349 */
350
351 if (data->flags & MMC_DATA_READ)
352 direction = DMA_FROM_DEVICE;
353 else
354 direction = DMA_TO_DEVICE;
355
356 /*
357 * The ADMA descriptor table is mapped further down as we
358 * need to fill it with data first.
359 */
360
361 host->align_addr = dma_map_single(mmc_dev(host->mmc),
362 host->align_buffer, 128 * 4, direction);
363 if (dma_mapping_error(host->align_addr))
364 goto fail;
365 BUG_ON(host->align_addr & 0x3);
366
367 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
368 data->sg, data->sg_len, direction);
369 if (host->sg_count == 0)
370 goto unmap_align;
371
372 desc = host->adma_desc;
373 align = host->align_buffer;
374
375 align_addr = host->align_addr;
376
377 for_each_sg(data->sg, sg, host->sg_count, i) {
378 addr = sg_dma_address(sg);
379 len = sg_dma_len(sg);
380
381 /*
382 * The SDHCI specification states that ADMA
383 * addresses must be 32-bit aligned. If they
384 * aren't, then we use a bounce buffer for
385 * the (up to three) bytes that screw up the
386 * alignment.
387 */
388 offset = (4 - (addr & 0x3)) & 0x3;
389 if (offset) {
390 if (data->flags & MMC_DATA_WRITE) {
391 buffer = sdhci_kmap_atomic(sg, &flags);
392 memcpy(align, buffer, offset);
393 sdhci_kunmap_atomic(buffer, &flags);
394 }
395
396 desc[7] = (align_addr >> 24) & 0xff;
397 desc[6] = (align_addr >> 16) & 0xff;
398 desc[5] = (align_addr >> 8) & 0xff;
399 desc[4] = (align_addr >> 0) & 0xff;
400
401 BUG_ON(offset > 65536);
402
403 desc[3] = (offset >> 8) & 0xff;
404 desc[2] = (offset >> 0) & 0xff;
405
406 desc[1] = 0x00;
407 desc[0] = 0x21; /* tran, valid */
408
409 align += 4;
410 align_addr += 4;
411
412 desc += 8;
413
414 addr += offset;
415 len -= offset;
416 }
417
418 desc[7] = (addr >> 24) & 0xff;
419 desc[6] = (addr >> 16) & 0xff;
420 desc[5] = (addr >> 8) & 0xff;
421 desc[4] = (addr >> 0) & 0xff;
422
423 BUG_ON(len > 65536);
424
425 desc[3] = (len >> 8) & 0xff;
426 desc[2] = (len >> 0) & 0xff;
427
428 desc[1] = 0x00;
429 desc[0] = 0x21; /* tran, valid */
430
431 desc += 8;
432
433 /*
434 * If this triggers then we have a calculation bug
435 * somewhere. :/
436 */
437 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
438 }
439
440 /*
441 * Add a terminating entry.
442 */
443 desc[7] = 0;
444 desc[6] = 0;
445 desc[5] = 0;
446 desc[4] = 0;
447
448 desc[3] = 0;
449 desc[2] = 0;
450
451 desc[1] = 0x00;
452 desc[0] = 0x03; /* nop, end, valid */
453
454 /*
455 * Resync align buffer as we might have changed it.
456 */
457 if (data->flags & MMC_DATA_WRITE) {
458 dma_sync_single_for_device(mmc_dev(host->mmc),
459 host->align_addr, 128 * 4, direction);
460 }
461
462 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
463 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
464 if (dma_mapping_error(host->align_addr))
465 goto unmap_entries;
466 BUG_ON(host->adma_addr & 0x3);
467
468 return 0;
469
470unmap_entries:
471 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
472 data->sg_len, direction);
473unmap_align:
474 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
475 128 * 4, direction);
476fail:
477 return -EINVAL;
478}
479
480static void sdhci_adma_table_post(struct sdhci_host *host,
481 struct mmc_data *data)
482{
483 int direction;
484
485 struct scatterlist *sg;
486 int i, size;
487 u8 *align;
488 char *buffer;
489 unsigned long flags;
490
491 if (data->flags & MMC_DATA_READ)
492 direction = DMA_FROM_DEVICE;
493 else
494 direction = DMA_TO_DEVICE;
495
496 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
497 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
498
499 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
500 128 * 4, direction);
501
502 if (data->flags & MMC_DATA_READ) {
503 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
504 data->sg_len, direction);
505
506 align = host->align_buffer;
507
508 for_each_sg(data->sg, sg, host->sg_count, i) {
509 if (sg_dma_address(sg) & 0x3) {
510 size = 4 - (sg_dma_address(sg) & 0x3);
511
512 buffer = sdhci_kmap_atomic(sg, &flags);
513 memcpy(buffer, align, size);
514 sdhci_kunmap_atomic(buffer, &flags);
515
516 align += 4;
517 }
518 }
519 }
520
521 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
522 data->sg_len, direction);
523}
524
525static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
526{
527 u8 count;
528 unsigned target_timeout, current_timeout;
529
530 /*
531 * If the host controller provides us with an incorrect timeout
532 * value, just skip the check and use 0xE. The hardware may take
533 * longer to time out, but that's much better than having a too-short
534 * timeout value.
535 */
536 if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
537 return 0xE;
463 538
464 /* timeout in us */ 539 /* timeout in us */
465 target_timeout = data->timeout_ns / 1000 + 540 target_timeout = data->timeout_ns / 1000 +
@@ -484,52 +559,158 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
484 break; 559 break;
485 } 560 }
486 561
487 /*
488 * Compensate for an off-by-one error in the CaFe hardware; otherwise,
489 * a too-small count gives us interrupt timeouts.
490 */
491 if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL))
492 count++;
493
494 if (count >= 0xF) { 562 if (count >= 0xF) {
495 printk(KERN_WARNING "%s: Too large timeout requested!\n", 563 printk(KERN_WARNING "%s: Too large timeout requested!\n",
496 mmc_hostname(host->mmc)); 564 mmc_hostname(host->mmc));
497 count = 0xE; 565 count = 0xE;
498 } 566 }
499 567
568 return count;
569}
570
571static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
572{
573 u8 count;
574 u8 ctrl;
575 int ret;
576
577 WARN_ON(host->data);
578
579 if (data == NULL)
580 return;
581
582 /* Sanity checks */
583 BUG_ON(data->blksz * data->blocks > 524288);
584 BUG_ON(data->blksz > host->mmc->max_blk_size);
585 BUG_ON(data->blocks > 65535);
586
587 host->data = data;
588 host->data_early = 0;
589
590 count = sdhci_calc_timeout(host, data);
500 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); 591 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
501 592
502 if (host->flags & SDHCI_USE_DMA) 593 if (host->flags & SDHCI_USE_DMA)
503 host->flags |= SDHCI_REQ_USE_DMA; 594 host->flags |= SDHCI_REQ_USE_DMA;
504 595
505 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 596 /*
506 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 597 * FIXME: This doesn't account for merging when mapping the
507 ((data->blksz * data->blocks) & 0x3))) { 598 * scatterlist.
508 DBG("Reverting to PIO because of transfer size (%d)\n", 599 */
509 data->blksz * data->blocks); 600 if (host->flags & SDHCI_REQ_USE_DMA) {
510 host->flags &= ~SDHCI_REQ_USE_DMA; 601 int broken, i;
602 struct scatterlist *sg;
603
604 broken = 0;
605 if (host->flags & SDHCI_USE_ADMA) {
606 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
607 broken = 1;
608 } else {
609 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
610 broken = 1;
611 }
612
613 if (unlikely(broken)) {
614 for_each_sg(data->sg, sg, data->sg_len, i) {
615 if (sg->length & 0x3) {
616 DBG("Reverting to PIO because of "
617 "transfer size (%d)\n",
618 sg->length);
619 host->flags &= ~SDHCI_REQ_USE_DMA;
620 break;
621 }
622 }
623 }
511 } 624 }
512 625
513 /* 626 /*
514 * The assumption here being that alignment is the same after 627 * The assumption here being that alignment is the same after
515 * translation to device address space. 628 * translation to device address space.
516 */ 629 */
517 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 630 if (host->flags & SDHCI_REQ_USE_DMA) {
518 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 631 int broken, i;
519 (data->sg->offset & 0x3))) { 632 struct scatterlist *sg;
520 DBG("Reverting to PIO because of bad alignment\n"); 633
521 host->flags &= ~SDHCI_REQ_USE_DMA; 634 broken = 0;
635 if (host->flags & SDHCI_USE_ADMA) {
636 /*
637 * As we use 3 byte chunks to work around
638 * alignment problems, we need to check this
639 * quirk.
640 */
641 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
642 broken = 1;
643 } else {
644 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
645 broken = 1;
646 }
647
648 if (unlikely(broken)) {
649 for_each_sg(data->sg, sg, data->sg_len, i) {
650 if (sg->offset & 0x3) {
651 DBG("Reverting to PIO because of "
652 "bad alignment\n");
653 host->flags &= ~SDHCI_REQ_USE_DMA;
654 break;
655 }
656 }
657 }
522 } 658 }
523 659
524 if (host->flags & SDHCI_REQ_USE_DMA) { 660 if (host->flags & SDHCI_REQ_USE_DMA) {
525 int count; 661 if (host->flags & SDHCI_USE_ADMA) {
662 ret = sdhci_adma_table_pre(host, data);
663 if (ret) {
664 /*
665 * This only happens when someone fed
666 * us an invalid request.
667 */
668 WARN_ON(1);
669 host->flags &= ~SDHCI_USE_DMA;
670 } else {
671 writel(host->adma_addr,
672 host->ioaddr + SDHCI_ADMA_ADDRESS);
673 }
674 } else {
675 int sg_cnt;
676
677 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
678 data->sg, data->sg_len,
679 (data->flags & MMC_DATA_READ) ?
680 DMA_FROM_DEVICE :
681 DMA_TO_DEVICE);
682 if (sg_cnt == 0) {
683 /*
684 * This only happens when someone fed
685 * us an invalid request.
686 */
687 WARN_ON(1);
688 host->flags &= ~SDHCI_USE_DMA;
689 } else {
690 WARN_ON(count != 1);
691 writel(sg_dma_address(data->sg),
692 host->ioaddr + SDHCI_DMA_ADDRESS);
693 }
694 }
695 }
526 696
527 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, 697 /*
528 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 698 * Always adjust the DMA selection as some controllers
529 BUG_ON(count != 1); 699 * (e.g. JMicron) can't do PIO properly when the selection
700 * is ADMA.
701 */
702 if (host->version >= SDHCI_SPEC_200) {
703 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
704 ctrl &= ~SDHCI_CTRL_DMA_MASK;
705 if ((host->flags & SDHCI_REQ_USE_DMA) &&
706 (host->flags & SDHCI_USE_ADMA))
707 ctrl |= SDHCI_CTRL_ADMA32;
708 else
709 ctrl |= SDHCI_CTRL_SDMA;
710 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
711 }
530 712
531 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); 713 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
532 } else {
533 host->cur_sg = data->sg; 714 host->cur_sg = data->sg;
534 host->num_sg = data->sg_len; 715 host->num_sg = data->sg_len;
535 716
@@ -567,7 +748,6 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
567static void sdhci_finish_data(struct sdhci_host *host) 748static void sdhci_finish_data(struct sdhci_host *host)
568{ 749{
569 struct mmc_data *data; 750 struct mmc_data *data;
570 u16 blocks;
571 751
572 BUG_ON(!host->data); 752 BUG_ON(!host->data);
573 753
@@ -575,25 +755,26 @@ static void sdhci_finish_data(struct sdhci_host *host)
575 host->data = NULL; 755 host->data = NULL;
576 756
577 if (host->flags & SDHCI_REQ_USE_DMA) { 757 if (host->flags & SDHCI_REQ_USE_DMA) {
578 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 758 if (host->flags & SDHCI_USE_ADMA)
579 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 759 sdhci_adma_table_post(host, data);
760 else {
761 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
762 data->sg_len, (data->flags & MMC_DATA_READ) ?
763 DMA_FROM_DEVICE : DMA_TO_DEVICE);
764 }
580 } 765 }
581 766
582 /* 767 /*
583 * Controller doesn't count down when in single block mode. 768 * The specification states that the block count register must
769 * be updated, but it does not specify at what point in the
770 * data flow. That makes the register entirely useless to read
771 * back so we have to assume that nothing made it to the card
772 * in the event of an error.
584 */ 773 */
585 if (data->blocks == 1) 774 if (data->error)
586 blocks = (data->error == 0) ? 0 : 1; 775 data->bytes_xfered = 0;
587 else 776 else
588 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); 777 data->bytes_xfered = data->blksz * data->blocks;
589 data->bytes_xfered = data->blksz * (data->blocks - blocks);
590
591 if (!data->error && blocks) {
592 printk(KERN_ERR "%s: Controller signalled completion even "
593 "though there were blocks left.\n",
594 mmc_hostname(host->mmc));
595 data->error = -EIO;
596 }
597 778
598 if (data->stop) { 779 if (data->stop) {
599 /* 780 /*
@@ -775,7 +956,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
775 * Spec says that we should clear the power reg before setting 956 * Spec says that we should clear the power reg before setting
776 * a new value. Some controllers don't seem to like this though. 957 * a new value. Some controllers don't seem to like this though.
777 */ 958 */
778 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 959 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
779 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 960 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
780 961
781 pwr = SDHCI_POWER_ON; 962 pwr = SDHCI_POWER_ON;
@@ -797,10 +978,10 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
797 } 978 }
798 979
799 /* 980 /*
800 * At least the CaFe chip gets confused if we set the voltage 981 * At least the Marvell CaFe chip gets confused if we set the voltage
801 * and set turn on power at the same time, so set the voltage first. 982 * and set turn on power at the same time, so set the voltage first.
802 */ 983 */
803 if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 984 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
804 writeb(pwr & ~SDHCI_POWER_ON, 985 writeb(pwr & ~SDHCI_POWER_ON,
805 host->ioaddr + SDHCI_POWER_CONTROL); 986 host->ioaddr + SDHCI_POWER_CONTROL);
806 987
@@ -833,7 +1014,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
833 1014
834 host->mrq = mrq; 1015 host->mrq = mrq;
835 1016
836 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1017 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
1018 || (host->flags & SDHCI_DEVICE_DEAD)) {
837 host->mrq->cmd->error = -ENOMEDIUM; 1019 host->mrq->cmd->error = -ENOMEDIUM;
838 tasklet_schedule(&host->finish_tasklet); 1020 tasklet_schedule(&host->finish_tasklet);
839 } else 1021 } else
@@ -853,6 +1035,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
853 1035
854 spin_lock_irqsave(&host->lock, flags); 1036 spin_lock_irqsave(&host->lock, flags);
855 1037
1038 if (host->flags & SDHCI_DEVICE_DEAD)
1039 goto out;
1040
856 /* 1041 /*
857 * Reset the chip on each power off. 1042 * Reset the chip on each power off.
858 * Should clear out any weird states. 1043 * Should clear out any weird states.
@@ -888,9 +1073,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
888 * signalling timeout and CRC errors even on CMD0. Resetting 1073 * signalling timeout and CRC errors even on CMD0. Resetting
889 * it on each ios seems to solve the problem. 1074 * it on each ios seems to solve the problem.
890 */ 1075 */
891 if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1076 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
892 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1077 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
893 1078
1079out:
894 mmiowb(); 1080 mmiowb();
895 spin_unlock_irqrestore(&host->lock, flags); 1081 spin_unlock_irqrestore(&host->lock, flags);
896} 1082}
@@ -905,7 +1091,10 @@ static int sdhci_get_ro(struct mmc_host *mmc)
905 1091
906 spin_lock_irqsave(&host->lock, flags); 1092 spin_lock_irqsave(&host->lock, flags);
907 1093
908 present = readl(host->ioaddr + SDHCI_PRESENT_STATE); 1094 if (host->flags & SDHCI_DEVICE_DEAD)
1095 present = 0;
1096 else
1097 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
909 1098
910 spin_unlock_irqrestore(&host->lock, flags); 1099 spin_unlock_irqrestore(&host->lock, flags);
911 1100
@@ -922,6 +1111,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
922 1111
923 spin_lock_irqsave(&host->lock, flags); 1112 spin_lock_irqsave(&host->lock, flags);
924 1113
1114 if (host->flags & SDHCI_DEVICE_DEAD)
1115 goto out;
1116
925 ier = readl(host->ioaddr + SDHCI_INT_ENABLE); 1117 ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
926 1118
927 ier &= ~SDHCI_INT_CARD_INT; 1119 ier &= ~SDHCI_INT_CARD_INT;
@@ -931,6 +1123,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
931 writel(ier, host->ioaddr + SDHCI_INT_ENABLE); 1123 writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
932 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); 1124 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
933 1125
1126out:
934 mmiowb(); 1127 mmiowb();
935 1128
936 spin_unlock_irqrestore(&host->lock, flags); 1129 spin_unlock_irqrestore(&host->lock, flags);
@@ -996,13 +1189,14 @@ static void sdhci_tasklet_finish(unsigned long param)
996 * The controller needs a reset of internal state machines 1189 * The controller needs a reset of internal state machines
997 * upon error conditions. 1190 * upon error conditions.
998 */ 1191 */
999 if (mrq->cmd->error || 1192 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1000 (mrq->data && (mrq->data->error || 1193 (mrq->cmd->error ||
1001 (mrq->data->stop && mrq->data->stop->error))) || 1194 (mrq->data && (mrq->data->error ||
1002 (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 1195 (mrq->data->stop && mrq->data->stop->error))) ||
1196 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1003 1197
1004 /* Some controllers need this kick or reset won't work here */ 1198 /* Some controllers need this kick or reset won't work here */
1005 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 1199 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1006 unsigned int clock; 1200 unsigned int clock;
1007 1201
1008 /* This is to force an update */ 1202 /* This is to force an update */
@@ -1116,6 +1310,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1116 host->data->error = -ETIMEDOUT; 1310 host->data->error = -ETIMEDOUT;
1117 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1311 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1118 host->data->error = -EILSEQ; 1312 host->data->error = -EILSEQ;
1313 else if (intmask & SDHCI_INT_ADMA_ERROR)
1314 host->data->error = -EIO;
1119 1315
1120 if (host->data->error) 1316 if (host->data->error)
1121 sdhci_finish_data(host); 1317 sdhci_finish_data(host);
@@ -1234,218 +1430,167 @@ out:
1234 1430
1235#ifdef CONFIG_PM 1431#ifdef CONFIG_PM
1236 1432
1237static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) 1433int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1238{ 1434{
1239 struct sdhci_chip *chip; 1435 int ret;
1240 int i, ret;
1241
1242 chip = pci_get_drvdata(pdev);
1243 if (!chip)
1244 return 0;
1245
1246 DBG("Suspending...\n");
1247
1248 for (i = 0;i < chip->num_slots;i++) {
1249 if (!chip->hosts[i])
1250 continue;
1251 ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
1252 if (ret) {
1253 for (i--;i >= 0;i--)
1254 mmc_resume_host(chip->hosts[i]->mmc);
1255 return ret;
1256 }
1257 }
1258
1259 pci_save_state(pdev);
1260 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1261 1436
1262 for (i = 0;i < chip->num_slots;i++) { 1437 ret = mmc_suspend_host(host->mmc, state);
1263 if (!chip->hosts[i]) 1438 if (ret)
1264 continue; 1439 return ret;
1265 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1266 }
1267 1440
1268 pci_disable_device(pdev); 1441 free_irq(host->irq, host);
1269 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1270 1442
1271 return 0; 1443 return 0;
1272} 1444}
1273 1445
1274static int sdhci_resume (struct pci_dev *pdev) 1446EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1275{
1276 struct sdhci_chip *chip;
1277 int i, ret;
1278 1447
1279 chip = pci_get_drvdata(pdev); 1448int sdhci_resume_host(struct sdhci_host *host)
1280 if (!chip) 1449{
1281 return 0; 1450 int ret;
1282 1451
1283 DBG("Resuming...\n"); 1452 if (host->flags & SDHCI_USE_DMA) {
1453 if (host->ops->enable_dma)
1454 host->ops->enable_dma(host);
1455 }
1284 1456
1285 pci_set_power_state(pdev, PCI_D0); 1457 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1286 pci_restore_state(pdev); 1458 mmc_hostname(host->mmc), host);
1287 ret = pci_enable_device(pdev);
1288 if (ret) 1459 if (ret)
1289 return ret; 1460 return ret;
1290 1461
1291 for (i = 0;i < chip->num_slots;i++) { 1462 sdhci_init(host);
1292 if (!chip->hosts[i]) 1463 mmiowb();
1293 continue; 1464
1294 if (chip->hosts[i]->flags & SDHCI_USE_DMA) 1465 ret = mmc_resume_host(host->mmc);
1295 pci_set_master(pdev); 1466 if (ret)
1296 ret = request_irq(chip->hosts[i]->irq, sdhci_irq, 1467 return ret;
1297 IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc),
1298 chip->hosts[i]);
1299 if (ret)
1300 return ret;
1301 sdhci_init(chip->hosts[i]);
1302 mmiowb();
1303 ret = mmc_resume_host(chip->hosts[i]->mmc);
1304 if (ret)
1305 return ret;
1306 }
1307 1468
1308 return 0; 1469 return 0;
1309} 1470}
1310 1471
1311#else /* CONFIG_PM */ 1472EXPORT_SYMBOL_GPL(sdhci_resume_host);
1312
1313#define sdhci_suspend NULL
1314#define sdhci_resume NULL
1315 1473
1316#endif /* CONFIG_PM */ 1474#endif /* CONFIG_PM */
1317 1475
1318/*****************************************************************************\ 1476/*****************************************************************************\
1319 * * 1477 * *
1320 * Device probing/removal * 1478 * Device allocation/registration *
1321 * * 1479 * *
1322\*****************************************************************************/ 1480\*****************************************************************************/
1323 1481
1324static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) 1482struct sdhci_host *sdhci_alloc_host(struct device *dev,
1483 size_t priv_size)
1325{ 1484{
1326 int ret;
1327 unsigned int version;
1328 struct sdhci_chip *chip;
1329 struct mmc_host *mmc; 1485 struct mmc_host *mmc;
1330 struct sdhci_host *host; 1486 struct sdhci_host *host;
1331 1487
1332 u8 first_bar; 1488 WARN_ON(dev == NULL);
1333 unsigned int caps;
1334
1335 chip = pci_get_drvdata(pdev);
1336 BUG_ON(!chip);
1337
1338 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
1339 if (ret)
1340 return ret;
1341
1342 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
1343
1344 if (first_bar > 5) {
1345 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
1346 return -ENODEV;
1347 }
1348
1349 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
1350 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
1351 return -ENODEV;
1352 }
1353
1354 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1355 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1356 "You may experience problems.\n");
1357 }
1358
1359 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1360 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1361 return -ENODEV;
1362 }
1363
1364 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1365 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1366 return -ENODEV;
1367 }
1368 1489
1369 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); 1490 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1370 if (!mmc) 1491 if (!mmc)
1371 return -ENOMEM; 1492 return ERR_PTR(-ENOMEM);
1372 1493
1373 host = mmc_priv(mmc); 1494 host = mmc_priv(mmc);
1374 host->mmc = mmc; 1495 host->mmc = mmc;
1375 1496
1376 host->chip = chip; 1497 return host;
1377 chip->hosts[slot] = host; 1498}
1378 1499
1379 host->bar = first_bar + slot; 1500EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1380 1501
1381 host->addr = pci_resource_start(pdev, host->bar); 1502int sdhci_add_host(struct sdhci_host *host)
1382 host->irq = pdev->irq; 1503{
1504 struct mmc_host *mmc;
1505 unsigned int caps;
1506 int ret;
1383 1507
1384 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); 1508 WARN_ON(host == NULL);
1509 if (host == NULL)
1510 return -EINVAL;
1385 1511
1386 ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc)); 1512 mmc = host->mmc;
1387 if (ret)
1388 goto free;
1389 1513
1390 host->ioaddr = ioremap_nocache(host->addr, 1514 if (debug_quirks)
1391 pci_resource_len(pdev, host->bar)); 1515 host->quirks = debug_quirks;
1392 if (!host->ioaddr) {
1393 ret = -ENOMEM;
1394 goto release;
1395 }
1396 1516
1397 sdhci_reset(host, SDHCI_RESET_ALL); 1517 sdhci_reset(host, SDHCI_RESET_ALL);
1398 1518
1399 version = readw(host->ioaddr + SDHCI_HOST_VERSION); 1519 host->version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1400 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 1520 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1401 if (version > 1) { 1521 >> SDHCI_SPEC_VER_SHIFT;
1522 if (host->version > SDHCI_SPEC_200) {
1402 printk(KERN_ERR "%s: Unknown controller version (%d). " 1523 printk(KERN_ERR "%s: Unknown controller version (%d). "
1403 "You may experience problems.\n", mmc_hostname(mmc), 1524 "You may experience problems.\n", mmc_hostname(mmc),
1404 version); 1525 host->version);
1405 } 1526 }
1406 1527
1407 caps = readl(host->ioaddr + SDHCI_CAPABILITIES); 1528 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1408 1529
1409 if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) 1530 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1410 host->flags |= SDHCI_USE_DMA; 1531 host->flags |= SDHCI_USE_DMA;
1411 else if (!(caps & SDHCI_CAN_DO_DMA)) 1532 else if (!(caps & SDHCI_CAN_DO_DMA))
1412 DBG("Controller doesn't have DMA capability\n"); 1533 DBG("Controller doesn't have DMA capability\n");
1413 else 1534 else
1414 host->flags |= SDHCI_USE_DMA; 1535 host->flags |= SDHCI_USE_DMA;
1415 1536
1416 if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1537 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1417 (host->flags & SDHCI_USE_DMA)) { 1538 (host->flags & SDHCI_USE_DMA)) {
1418 DBG("Disabling DMA as it is marked broken\n"); 1539 DBG("Disabling DMA as it is marked broken\n");
1419 host->flags &= ~SDHCI_USE_DMA; 1540 host->flags &= ~SDHCI_USE_DMA;
1420 } 1541 }
1421 1542
1422 if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1543 if (host->flags & SDHCI_USE_DMA) {
1423 (host->flags & SDHCI_USE_DMA)) { 1544 if ((host->version >= SDHCI_SPEC_200) &&
1424 printk(KERN_WARNING "%s: Will use DMA " 1545 (caps & SDHCI_CAN_DO_ADMA2))
1425 "mode even though HW doesn't fully " 1546 host->flags |= SDHCI_USE_ADMA;
1426 "claim to support it.\n", mmc_hostname(mmc)); 1547 }
1548
1549 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1550 (host->flags & SDHCI_USE_ADMA)) {
1551 DBG("Disabling ADMA as it is marked broken\n");
1552 host->flags &= ~SDHCI_USE_ADMA;
1427 } 1553 }
1428 1554
1429 if (host->flags & SDHCI_USE_DMA) { 1555 if (host->flags & SDHCI_USE_DMA) {
1430 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 1556 if (host->ops->enable_dma) {
1431 printk(KERN_WARNING "%s: No suitable DMA available. " 1557 if (host->ops->enable_dma(host)) {
1432 "Falling back to PIO.\n", mmc_hostname(mmc)); 1558 printk(KERN_WARNING "%s: No suitable DMA "
1433 host->flags &= ~SDHCI_USE_DMA; 1559 "available. Falling back to PIO.\n",
1560 mmc_hostname(mmc));
1561 host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
1562 }
1434 } 1563 }
1435 } 1564 }
1436 1565
1437 if (host->flags & SDHCI_USE_DMA) 1566 if (host->flags & SDHCI_USE_ADMA) {
1438 pci_set_master(pdev); 1567 /*
1439 else /* XXX: Hack to get MMC layer to avoid highmem */ 1568 * We need to allocate descriptors for all sg entries
1440 pdev->dma_mask = 0; 1569 * (128) and potentially one alignment transfer for
1570 * each of those entries.
1571 */
1572 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1573 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1574 if (!host->adma_desc || !host->align_buffer) {
1575 kfree(host->adma_desc);
1576 kfree(host->align_buffer);
1577 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1578 "buffers. Falling back to standard DMA.\n",
1579 mmc_hostname(mmc));
1580 host->flags &= ~SDHCI_USE_ADMA;
1581 }
1582 }
1583
1584 /* XXX: Hack to get MMC layer to avoid highmem */
1585 if (!(host->flags & SDHCI_USE_DMA))
1586 mmc_dev(host->mmc)->dma_mask = NULL;
1441 1587
1442 host->max_clk = 1588 host->max_clk =
1443 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1589 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1444 if (host->max_clk == 0) { 1590 if (host->max_clk == 0) {
1445 printk(KERN_ERR "%s: Hardware doesn't specify base clock " 1591 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1446 "frequency.\n", mmc_hostname(mmc)); 1592 "frequency.\n", mmc_hostname(mmc));
1447 ret = -ENODEV; 1593 return -ENODEV;
1448 goto unmap;
1449 } 1594 }
1450 host->max_clk *= 1000000; 1595 host->max_clk *= 1000000;
1451 1596
@@ -1454,8 +1599,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1454 if (host->timeout_clk == 0) { 1599 if (host->timeout_clk == 0) {
1455 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " 1600 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1456 "frequency.\n", mmc_hostname(mmc)); 1601 "frequency.\n", mmc_hostname(mmc));
1457 ret = -ENODEV; 1602 return -ENODEV;
1458 goto unmap;
1459 } 1603 }
1460 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1604 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1461 host->timeout_clk *= 1000; 1605 host->timeout_clk *= 1000;
@@ -1466,7 +1610,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1466 mmc->ops = &sdhci_ops; 1610 mmc->ops = &sdhci_ops;
1467 mmc->f_min = host->max_clk / 256; 1611 mmc->f_min = host->max_clk / 256;
1468 mmc->f_max = host->max_clk; 1612 mmc->f_max = host->max_clk;
1469 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; 1613 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1470 1614
1471 if (caps & SDHCI_CAN_DO_HISPD) 1615 if (caps & SDHCI_CAN_DO_HISPD)
1472 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1616 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1482,20 +1626,22 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1482 if (mmc->ocr_avail == 0) { 1626 if (mmc->ocr_avail == 0) {
1483 printk(KERN_ERR "%s: Hardware doesn't report any " 1627 printk(KERN_ERR "%s: Hardware doesn't report any "
1484 "support voltages.\n", mmc_hostname(mmc)); 1628 "support voltages.\n", mmc_hostname(mmc));
1485 ret = -ENODEV; 1629 return -ENODEV;
1486 goto unmap;
1487 } 1630 }
1488 1631
1489 spin_lock_init(&host->lock); 1632 spin_lock_init(&host->lock);
1490 1633
1491 /* 1634 /*
1492 * Maximum number of segments. Hardware cannot do scatter lists. 1635 * Maximum number of segments. Depends on if the hardware
1636 * can do scatter/gather or not.
1493 */ 1637 */
1494 if (host->flags & SDHCI_USE_DMA) 1638 if (host->flags & SDHCI_USE_ADMA)
1639 mmc->max_hw_segs = 128;
1640 else if (host->flags & SDHCI_USE_DMA)
1495 mmc->max_hw_segs = 1; 1641 mmc->max_hw_segs = 1;
1496 else 1642 else /* PIO */
1497 mmc->max_hw_segs = 16; 1643 mmc->max_hw_segs = 128;
1498 mmc->max_phys_segs = 16; 1644 mmc->max_phys_segs = 128;
1499 1645
1500 /* 1646 /*
1501 * Maximum number of sectors in one transfer. Limited by DMA boundary 1647 * Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -1505,9 +1651,13 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1505 1651
1506 /* 1652 /*
1507 * Maximum segment size. Could be one segment with the maximum number 1653 * Maximum segment size. Could be one segment with the maximum number
1508 * of bytes. 1654 * of bytes. When doing hardware scatter/gather, each entry cannot
1655 * be larger than 64 KiB though.
1509 */ 1656 */
1510 mmc->max_seg_size = mmc->max_req_size; 1657 if (host->flags & SDHCI_USE_ADMA)
1658 mmc->max_seg_size = 65536;
1659 else
1660 mmc->max_seg_size = mmc->max_req_size;
1511 1661
1512 /* 1662 /*
1513 * Maximum block size. This varies from controller to controller and 1663 * Maximum block size. This varies from controller to controller and
@@ -1553,7 +1703,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1553 host->led.default_trigger = mmc_hostname(mmc); 1703 host->led.default_trigger = mmc_hostname(mmc);
1554 host->led.brightness_set = sdhci_led_control; 1704 host->led.brightness_set = sdhci_led_control;
1555 1705
1556 ret = led_classdev_register(&pdev->dev, &host->led); 1706 ret = led_classdev_register(mmc_dev(mmc), &host->led);
1557 if (ret) 1707 if (ret)
1558 goto reset; 1708 goto reset;
1559#endif 1709#endif
@@ -1562,8 +1712,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1562 1712
1563 mmc_add_host(mmc); 1713 mmc_add_host(mmc);
1564 1714
1565 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", 1715 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1566 mmc_hostname(mmc), host->addr, host->irq, 1716 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id,
1717 (host->flags & SDHCI_USE_ADMA)?"A":"",
1567 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1718 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1568 1719
1569 return 0; 1720 return 0;
@@ -1576,35 +1727,40 @@ reset:
1576untasklet: 1727untasklet:
1577 tasklet_kill(&host->card_tasklet); 1728 tasklet_kill(&host->card_tasklet);
1578 tasklet_kill(&host->finish_tasklet); 1729 tasklet_kill(&host->finish_tasklet);
1579unmap:
1580 iounmap(host->ioaddr);
1581release:
1582 pci_release_region(pdev, host->bar);
1583free:
1584 mmc_free_host(mmc);
1585 1730
1586 return ret; 1731 return ret;
1587} 1732}
1588 1733
1589static void sdhci_remove_slot(struct pci_dev *pdev, int slot) 1734EXPORT_SYMBOL_GPL(sdhci_add_host);
1735
1736void sdhci_remove_host(struct sdhci_host *host, int dead)
1590{ 1737{
1591 struct sdhci_chip *chip; 1738 unsigned long flags;
1592 struct mmc_host *mmc;
1593 struct sdhci_host *host;
1594 1739
1595 chip = pci_get_drvdata(pdev); 1740 if (dead) {
1596 host = chip->hosts[slot]; 1741 spin_lock_irqsave(&host->lock, flags);
1597 mmc = host->mmc; 1742
1743 host->flags |= SDHCI_DEVICE_DEAD;
1744
1745 if (host->mrq) {
1746 printk(KERN_ERR "%s: Controller removed during "
1747 " transfer!\n", mmc_hostname(host->mmc));
1598 1748
1599 chip->hosts[slot] = NULL; 1749 host->mrq->cmd->error = -ENOMEDIUM;
1750 tasklet_schedule(&host->finish_tasklet);
1751 }
1752
1753 spin_unlock_irqrestore(&host->lock, flags);
1754 }
1600 1755
1601 mmc_remove_host(mmc); 1756 mmc_remove_host(host->mmc);
1602 1757
1603#ifdef CONFIG_LEDS_CLASS 1758#ifdef CONFIG_LEDS_CLASS
1604 led_classdev_unregister(&host->led); 1759 led_classdev_unregister(&host->led);
1605#endif 1760#endif
1606 1761
1607 sdhci_reset(host, SDHCI_RESET_ALL); 1762 if (!dead)
1763 sdhci_reset(host, SDHCI_RESET_ALL);
1608 1764
1609 free_irq(host->irq, host); 1765 free_irq(host->irq, host);
1610 1766
@@ -1613,106 +1769,21 @@ static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
1613 tasklet_kill(&host->card_tasklet); 1769 tasklet_kill(&host->card_tasklet);
1614 tasklet_kill(&host->finish_tasklet); 1770 tasklet_kill(&host->finish_tasklet);
1615 1771
1616 iounmap(host->ioaddr); 1772 kfree(host->adma_desc);
1617 1773 kfree(host->align_buffer);
1618 pci_release_region(pdev, host->bar);
1619 1774
1620 mmc_free_host(mmc); 1775 host->adma_desc = NULL;
1776 host->align_buffer = NULL;
1621} 1777}
1622 1778
1623static int __devinit sdhci_probe(struct pci_dev *pdev, 1779EXPORT_SYMBOL_GPL(sdhci_remove_host);
1624 const struct pci_device_id *ent)
1625{
1626 int ret, i;
1627 u8 slots, rev;
1628 struct sdhci_chip *chip;
1629
1630 BUG_ON(pdev == NULL);
1631 BUG_ON(ent == NULL);
1632 1780
1633 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 1781void sdhci_free_host(struct sdhci_host *host)
1634
1635 printk(KERN_INFO DRIVER_NAME
1636 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1637 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1638 (int)rev);
1639
1640 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1641 if (ret)
1642 return ret;
1643
1644 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
1645 DBG("found %d slot(s)\n", slots);
1646 if (slots == 0)
1647 return -ENODEV;
1648
1649 ret = pci_enable_device(pdev);
1650 if (ret)
1651 return ret;
1652
1653 chip = kzalloc(sizeof(struct sdhci_chip) +
1654 sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
1655 if (!chip) {
1656 ret = -ENOMEM;
1657 goto err;
1658 }
1659
1660 chip->pdev = pdev;
1661 chip->quirks = ent->driver_data;
1662
1663 if (debug_quirks)
1664 chip->quirks = debug_quirks;
1665
1666 chip->num_slots = slots;
1667 pci_set_drvdata(pdev, chip);
1668
1669 for (i = 0;i < slots;i++) {
1670 ret = sdhci_probe_slot(pdev, i);
1671 if (ret) {
1672 for (i--;i >= 0;i--)
1673 sdhci_remove_slot(pdev, i);
1674 goto free;
1675 }
1676 }
1677
1678 return 0;
1679
1680free:
1681 pci_set_drvdata(pdev, NULL);
1682 kfree(chip);
1683
1684err:
1685 pci_disable_device(pdev);
1686 return ret;
1687}
1688
1689static void __devexit sdhci_remove(struct pci_dev *pdev)
1690{ 1782{
1691 int i; 1783 mmc_free_host(host->mmc);
1692 struct sdhci_chip *chip;
1693
1694 chip = pci_get_drvdata(pdev);
1695
1696 if (chip) {
1697 for (i = 0;i < chip->num_slots;i++)
1698 sdhci_remove_slot(pdev, i);
1699
1700 pci_set_drvdata(pdev, NULL);
1701
1702 kfree(chip);
1703 }
1704
1705 pci_disable_device(pdev);
1706} 1784}
1707 1785
1708static struct pci_driver sdhci_driver = { 1786EXPORT_SYMBOL_GPL(sdhci_free_host);
1709 .name = DRIVER_NAME,
1710 .id_table = pci_ids,
1711 .probe = sdhci_probe,
1712 .remove = __devexit_p(sdhci_remove),
1713 .suspend = sdhci_suspend,
1714 .resume = sdhci_resume,
1715};
1716 1787
1717/*****************************************************************************\ 1788/*****************************************************************************\
1718 * * 1789 * *
@@ -1726,14 +1797,11 @@ static int __init sdhci_drv_init(void)
1726 ": Secure Digital Host Controller Interface driver\n"); 1797 ": Secure Digital Host Controller Interface driver\n");
1727 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 1798 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1728 1799
1729 return pci_register_driver(&sdhci_driver); 1800 return 0;
1730} 1801}
1731 1802
1732static void __exit sdhci_drv_exit(void) 1803static void __exit sdhci_drv_exit(void)
1733{ 1804{
1734 DBG("Exiting\n");
1735
1736 pci_unregister_driver(&sdhci_driver);
1737} 1805}
1738 1806
1739module_init(sdhci_drv_init); 1807module_init(sdhci_drv_init);
@@ -1742,7 +1810,7 @@ module_exit(sdhci_drv_exit);
1742module_param(debug_quirks, uint, 0444); 1810module_param(debug_quirks, uint, 0444);
1743 1811
1744MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 1812MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1745MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); 1813MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
1746MODULE_LICENSE("GPL"); 1814MODULE_LICENSE("GPL");
1747 1815
1748MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 1816MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 299118de8933..5bb355281765 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -10,18 +10,6 @@
10 */ 10 */
11 11
12/* 12/*
13 * PCI registers
14 */
15
16#define PCI_SDHCI_IFPIO 0x00
17#define PCI_SDHCI_IFDMA 0x01
18#define PCI_SDHCI_IFVENDOR 0x02
19
20#define PCI_SLOT_INFO 0x40 /* 8 bits */
21#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
22#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
23
24/*
25 * Controller registers 13 * Controller registers
26 */ 14 */
27 15
@@ -72,6 +60,11 @@
72#define SDHCI_CTRL_LED 0x01 60#define SDHCI_CTRL_LED 0x01
73#define SDHCI_CTRL_4BITBUS 0x02 61#define SDHCI_CTRL_4BITBUS 0x02
74#define SDHCI_CTRL_HISPD 0x04 62#define SDHCI_CTRL_HISPD 0x04
63#define SDHCI_CTRL_DMA_MASK 0x18
64#define SDHCI_CTRL_SDMA 0x00
65#define SDHCI_CTRL_ADMA1 0x08
66#define SDHCI_CTRL_ADMA32 0x10
67#define SDHCI_CTRL_ADMA64 0x18
75 68
76#define SDHCI_POWER_CONTROL 0x29 69#define SDHCI_POWER_CONTROL 0x29
77#define SDHCI_POWER_ON 0x01 70#define SDHCI_POWER_ON 0x01
@@ -117,6 +110,7 @@
117#define SDHCI_INT_DATA_END_BIT 0x00400000 110#define SDHCI_INT_DATA_END_BIT 0x00400000
118#define SDHCI_INT_BUS_POWER 0x00800000 111#define SDHCI_INT_BUS_POWER 0x00800000
119#define SDHCI_INT_ACMD12ERR 0x01000000 112#define SDHCI_INT_ACMD12ERR 0x01000000
113#define SDHCI_INT_ADMA_ERROR 0x02000000
120 114
121#define SDHCI_INT_NORMAL_MASK 0x00007FFF 115#define SDHCI_INT_NORMAL_MASK 0x00007FFF
122#define SDHCI_INT_ERROR_MASK 0xFFFF8000 116#define SDHCI_INT_ERROR_MASK 0xFFFF8000
@@ -140,11 +134,14 @@
140#define SDHCI_CLOCK_BASE_SHIFT 8 134#define SDHCI_CLOCK_BASE_SHIFT 8
141#define SDHCI_MAX_BLOCK_MASK 0x00030000 135#define SDHCI_MAX_BLOCK_MASK 0x00030000
142#define SDHCI_MAX_BLOCK_SHIFT 16 136#define SDHCI_MAX_BLOCK_SHIFT 16
137#define SDHCI_CAN_DO_ADMA2 0x00080000
138#define SDHCI_CAN_DO_ADMA1 0x00100000
143#define SDHCI_CAN_DO_HISPD 0x00200000 139#define SDHCI_CAN_DO_HISPD 0x00200000
144#define SDHCI_CAN_DO_DMA 0x00400000 140#define SDHCI_CAN_DO_DMA 0x00400000
145#define SDHCI_CAN_VDD_330 0x01000000 141#define SDHCI_CAN_VDD_330 0x01000000
146#define SDHCI_CAN_VDD_300 0x02000000 142#define SDHCI_CAN_VDD_300 0x02000000
147#define SDHCI_CAN_VDD_180 0x04000000 143#define SDHCI_CAN_VDD_180 0x04000000
144#define SDHCI_CAN_64BIT 0x10000000
148 145
149/* 44-47 reserved for more caps */ 146/* 44-47 reserved for more caps */
150 147
@@ -152,7 +149,16 @@
152 149
153/* 4C-4F reserved for more max current */ 150/* 4C-4F reserved for more max current */
154 151
155/* 50-FB reserved */ 152#define SDHCI_SET_ACMD12_ERROR 0x50
153#define SDHCI_SET_INT_ERROR 0x52
154
155#define SDHCI_ADMA_ERROR 0x54
156
157/* 55-57 reserved */
158
159#define SDHCI_ADMA_ADDRESS 0x58
160
161/* 60-FB reserved */
156 162
157#define SDHCI_SLOT_INT_STATUS 0xFC 163#define SDHCI_SLOT_INT_STATUS 0xFC
158 164
@@ -161,11 +167,50 @@
161#define SDHCI_VENDOR_VER_SHIFT 8 167#define SDHCI_VENDOR_VER_SHIFT 8
162#define SDHCI_SPEC_VER_MASK 0x00FF 168#define SDHCI_SPEC_VER_MASK 0x00FF
163#define SDHCI_SPEC_VER_SHIFT 0 169#define SDHCI_SPEC_VER_SHIFT 0
170#define SDHCI_SPEC_100 0
171#define SDHCI_SPEC_200 1
164 172
165struct sdhci_chip; 173struct sdhci_ops;
166 174
167struct sdhci_host { 175struct sdhci_host {
168 struct sdhci_chip *chip; 176 /* Data set by hardware interface driver */
177 const char *hw_name; /* Hardware bus name */
178
179 unsigned int quirks; /* Deviations from spec. */
180
181/* Controller doesn't honor resets unless we touch the clock register */
182#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
183/* Controller has bad caps bits, but really supports DMA */
184#define SDHCI_QUIRK_FORCE_DMA (1<<1)
185/* Controller doesn't like to be reset when there is no card inserted. */
186#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
187/* Controller doesn't like clearing the power reg before a change */
188#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
189/* Controller has flaky internal state so reset it on each ios change */
190#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
191/* Controller has an unusable DMA engine */
192#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
193/* Controller has an unusable ADMA engine */
194#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
195/* Controller can only DMA from 32-bit aligned addresses */
196#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
197/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
198#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
199/* Controller can only ADMA chunks that are a multiple of 32 bits */
200#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
201/* Controller needs to be reset after each request to stay stable */
202#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
203/* Controller needs voltage and power writes to happen separately */
204#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
205/* Controller provides an incorrect timeout value for transfers */
206#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
207
208 int irq; /* Device IRQ */
209 void __iomem * ioaddr; /* Mapped address */
210
211 const struct sdhci_ops *ops; /* Low level hw interface */
212
213 /* Internal data */
169 struct mmc_host *mmc; /* MMC structure */ 214 struct mmc_host *mmc; /* MMC structure */
170 215
171#ifdef CONFIG_LEDS_CLASS 216#ifdef CONFIG_LEDS_CLASS
@@ -176,7 +221,11 @@ struct sdhci_host {
176 221
177 int flags; /* Host attributes */ 222 int flags; /* Host attributes */
178#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ 223#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */
179#define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ 224#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
225#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
226#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
227
228 unsigned int version; /* SDHCI spec. version */
180 229
181 unsigned int max_clk; /* Max possible freq (MHz) */ 230 unsigned int max_clk; /* Max possible freq (MHz) */
182 unsigned int timeout_clk; /* Timeout freq (KHz) */ 231 unsigned int timeout_clk; /* Timeout freq (KHz) */
@@ -194,22 +243,41 @@ struct sdhci_host {
194 int offset; /* Offset into current sg */ 243 int offset; /* Offset into current sg */
195 int remain; /* Bytes left in current */ 244 int remain; /* Bytes left in current */
196 245
197 int irq; /* Device IRQ */ 246 int sg_count; /* Mapped sg entries */
198 int bar; /* PCI BAR index */ 247
199 unsigned long addr; /* Bus address */ 248 u8 *adma_desc; /* ADMA descriptor table */
200 void __iomem * ioaddr; /* Mapped address */ 249 u8 *align_buffer; /* Bounce buffer */
250
251 dma_addr_t adma_addr; /* Mapped ADMA descr. table */
252 dma_addr_t align_addr; /* Mapped bounce buffer */
201 253
202 struct tasklet_struct card_tasklet; /* Tasklet structures */ 254 struct tasklet_struct card_tasklet; /* Tasklet structures */
203 struct tasklet_struct finish_tasklet; 255 struct tasklet_struct finish_tasklet;
204 256
205 struct timer_list timer; /* Timer for timeouts */ 257 struct timer_list timer; /* Timer for timeouts */
206};
207 258
208struct sdhci_chip { 259 unsigned long private[0] ____cacheline_aligned;
209 struct pci_dev *pdev; 260};
210 261
211 unsigned long quirks;
212 262
213 int num_slots; /* Slots on controller */ 263struct sdhci_ops {
214 struct sdhci_host *hosts[0]; /* Pointers to hosts */ 264 int (*enable_dma)(struct sdhci_host *host);
215}; 265};
266
267
268extern struct sdhci_host *sdhci_alloc_host(struct device *dev,
269 size_t priv_size);
270extern void sdhci_free_host(struct sdhci_host *host);
271
272static inline void *sdhci_priv(struct sdhci_host *host)
273{
274 return (void *)host->private;
275}
276
277extern int sdhci_add_host(struct sdhci_host *host);
278extern void sdhci_remove_host(struct sdhci_host *host, int dead);
279
280#ifdef CONFIG_PM
281extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
282extern int sdhci_resume_host(struct sdhci_host *host);
283#endif
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
new file mode 100644
index 000000000000..f99e9f721629
--- /dev/null
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -0,0 +1,575 @@
1/*
2 * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be
3 * found on some Ricoh RL5c476 II cardbus bridge
4 *
5 * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23/*
24#define DEBUG
25#define VERBOSE_DEBUG
26*/
27#include <linux/delay.h>
28#include <linux/highmem.h>
29#include <linux/pci.h>
30#include <linux/ioport.h>
31#include <linux/scatterlist.h>
32#include <linux/version.h>
33
34#include <pcmcia/cs_types.h>
35#include <pcmcia/cs.h>
36#include <pcmcia/cistpl.h>
37#include <pcmcia/ds.h>
38#include <linux/io.h>
39
40#include <linux/mmc/host.h>
41
42#define DRIVER_NAME "sdricoh_cs"
43
44static unsigned int switchlocked;
45
46/* i/o region */
47#define SDRICOH_PCI_REGION 0
48#define SDRICOH_PCI_REGION_SIZE 0x1000
49
50/* registers */
51#define R104_VERSION 0x104
52#define R200_CMD 0x200
53#define R204_CMD_ARG 0x204
54#define R208_DATAIO 0x208
55#define R20C_RESP 0x20c
56#define R21C_STATUS 0x21c
57#define R2E0_INIT 0x2e0
58#define R2E4_STATUS_RESP 0x2e4
59#define R2F0_RESET 0x2f0
60#define R224_MODE 0x224
61#define R226_BLOCKSIZE 0x226
62#define R228_POWER 0x228
63#define R230_DATA 0x230
64
65/* flags for the R21C_STATUS register */
66#define STATUS_CMD_FINISHED 0x00000001
67#define STATUS_TRANSFER_FINISHED 0x00000004
68#define STATUS_CARD_INSERTED 0x00000020
69#define STATUS_CARD_LOCKED 0x00000080
70#define STATUS_CMD_TIMEOUT 0x00400000
71#define STATUS_READY_TO_READ 0x01000000
72#define STATUS_READY_TO_WRITE 0x02000000
73#define STATUS_BUSY 0x40000000
74
75/* timeouts */
76#define INIT_TIMEOUT 100
77#define CMD_TIMEOUT 100000
78#define TRANSFER_TIMEOUT 100000
79#define BUSY_TIMEOUT 32767
80
81/* list of supported pcmcia devices */
82static struct pcmcia_device_id pcmcia_ids[] = {
83 /* vendor and device strings followed by their crc32 hashes */
84 PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,
85 0xc3901202),
86 PCMCIA_DEVICE_NULL,
87};
88
89MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids);
90
91/* mmc privdata */
92struct sdricoh_host {
93 struct device *dev;
94 struct mmc_host *mmc; /* MMC structure */
95 unsigned char __iomem *iobase;
96 struct pci_dev *pci_dev;
97 int app_cmd;
98};
99
100/***************** register i/o helper functions *****************************/
101
102static inline unsigned int sdricoh_readl(struct sdricoh_host *host,
103 unsigned int reg)
104{
105 unsigned int value = readl(host->iobase + reg);
106 dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value);
107 return value;
108}
109
110static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg,
111 unsigned int value)
112{
113 writel(value, host->iobase + reg);
114 dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value);
115
116}
117
118static inline unsigned int sdricoh_readw(struct sdricoh_host *host,
119 unsigned int reg)
120{
121 unsigned int value = readw(host->iobase + reg);
122 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
123 return value;
124}
125
126static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg,
127 unsigned short value)
128{
129 writew(value, host->iobase + reg);
130 dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value);
131}
132
133static inline unsigned int sdricoh_readb(struct sdricoh_host *host,
134 unsigned int reg)
135{
136 unsigned int value = readb(host->iobase + reg);
137 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
138 return value;
139}
140
141static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted,
142 unsigned int timeout){
143 unsigned int loop;
144 unsigned int status = 0;
145 struct device *dev = host->dev;
146 for (loop = 0; loop < timeout; loop++) {
147 status = sdricoh_readl(host, R21C_STATUS);
148 sdricoh_writel(host, R2E4_STATUS_RESP, status);
149 if (status & wanted)
150 break;
151 }
152
153 if (loop == timeout) {
154 dev_err(dev, "query_status: timeout waiting for %x\n", wanted);
155 return -ETIMEDOUT;
156 }
157
158 /* do not do this check in the loop as some commands fail otherwise */
159 if (status & 0x7F0000) {
160 dev_err(dev, "waiting for status bit %x failed\n", wanted);
161 return -EINVAL;
162 }
163 return 0;
164
165}
166
167static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode,
168 unsigned int arg)
169{
170 unsigned int status;
171 int result = 0;
172 unsigned int loop = 0;
173 /* reset status reg? */
174 sdricoh_writel(host, R21C_STATUS, 0x18);
175 /* fill parameters */
176 sdricoh_writel(host, R204_CMD_ARG, arg);
177 sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode);
178 /* wait for command completion */
179 if (opcode) {
180 for (loop = 0; loop < CMD_TIMEOUT; loop++) {
181 status = sdricoh_readl(host, R21C_STATUS);
182 sdricoh_writel(host, R2E4_STATUS_RESP, status);
183 if (status & STATUS_CMD_FINISHED)
184 break;
185 }
186 /* don't check for timeout in the loop it is not always
187 reset correctly
188 */
189 if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT)
190 result = -ETIMEDOUT;
191
192 }
193
194 return result;
195
196}
197
198static int sdricoh_reset(struct sdricoh_host *host)
199{
200 dev_dbg(host->dev, "reset\n");
201 sdricoh_writel(host, R2F0_RESET, 0x10001);
202 sdricoh_writel(host, R2E0_INIT, 0x10000);
203 if (sdricoh_readl(host, R2E0_INIT) != 0x10000)
204 return -EIO;
205 sdricoh_writel(host, R2E0_INIT, 0x10007);
206
207 sdricoh_writel(host, R224_MODE, 0x2000000);
208 sdricoh_writel(host, R228_POWER, 0xe0);
209
210
211 /* status register ? */
212 sdricoh_writel(host, R21C_STATUS, 0x18);
213
214 return 0;
215}
216
217static int sdricoh_blockio(struct sdricoh_host *host, int read,
218 u8 *buf, int len)
219{
220 int size;
221 u32 data = 0;
222 /* wait until the data is available */
223 if (read) {
224 if (sdricoh_query_status(host, STATUS_READY_TO_READ,
225 TRANSFER_TIMEOUT))
226 return -ETIMEDOUT;
227 sdricoh_writel(host, R21C_STATUS, 0x18);
228 /* read data */
229 while (len) {
230 data = sdricoh_readl(host, R230_DATA);
231 size = min(len, 4);
232 len -= size;
233 while (size) {
234 *buf = data & 0xFF;
235 buf++;
236 data >>= 8;
237 size--;
238 }
239 }
240 } else {
241 if (sdricoh_query_status(host, STATUS_READY_TO_WRITE,
242 TRANSFER_TIMEOUT))
243 return -ETIMEDOUT;
244 sdricoh_writel(host, R21C_STATUS, 0x18);
245 /* write data */
246 while (len) {
247 size = min(len, 4);
248 len -= size;
249 while (size) {
250 data >>= 8;
251 data |= (u32)*buf << 24;
252 buf++;
253 size--;
254 }
255 sdricoh_writel(host, R230_DATA, data);
256 }
257 }
258
259 if (len)
260 return -EIO;
261
262 return 0;
263}
264
265static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq)
266{
267 struct sdricoh_host *host = mmc_priv(mmc);
268 struct mmc_command *cmd = mrq->cmd;
269 struct mmc_data *data = cmd->data;
270 struct device *dev = host->dev;
271 unsigned char opcode = cmd->opcode;
272 int i;
273
274 dev_dbg(dev, "=============================\n");
275 dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode);
276
277 sdricoh_writel(host, R21C_STATUS, 0x18);
278
279 /* MMC_APP_CMDs need some special handling */
280 if (host->app_cmd) {
281 opcode |= 64;
282 host->app_cmd = 0;
283 } else if (opcode == 55)
284 host->app_cmd = 1;
285
286 /* read/write commands seem to require this */
287 if (data) {
288 sdricoh_writew(host, R226_BLOCKSIZE, data->blksz);
289 sdricoh_writel(host, R208_DATAIO, 0);
290 }
291
292 cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg);
293
294 /* read response buffer */
295 if (cmd->flags & MMC_RSP_PRESENT) {
296 if (cmd->flags & MMC_RSP_136) {
297 /* CRC is stripped so we need to do some shifting. */
298 for (i = 0; i < 4; i++) {
299 cmd->resp[i] =
300 sdricoh_readl(host,
301 R20C_RESP + (3 - i) * 4) << 8;
302 if (i != 3)
303 cmd->resp[i] |=
304 sdricoh_readb(host, R20C_RESP +
305 (3 - i) * 4 - 1);
306 }
307 } else
308 cmd->resp[0] = sdricoh_readl(host, R20C_RESP);
309 }
310
311 /* transfer data */
312 if (data && cmd->error == 0) {
313 dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i "
314 "sg length %i\n", data->blksz, data->blocks,
315 data->sg_len, data->sg->length);
316
317 /* enter data reading mode */
318 sdricoh_writel(host, R21C_STATUS, 0x837f031e);
319 for (i = 0; i < data->blocks; i++) {
320 size_t len = data->blksz;
321 u8 *buf;
322 struct page *page;
323 int result;
324 page = sg_page(data->sg);
325
326 buf = kmap(page) + data->sg->offset + (len * i);
327 result =
328 sdricoh_blockio(host,
329 data->flags & MMC_DATA_READ, buf, len);
330 kunmap(page);
331 flush_dcache_page(page);
332 if (result) {
333 dev_err(dev, "sdricoh_request: cmd %i "
334 "block transfer failed\n", cmd->opcode);
335 cmd->error = result;
336 break;
337 } else
338 data->bytes_xfered += len;
339 }
340
341 sdricoh_writel(host, R208_DATAIO, 1);
342
343 if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED,
344 TRANSFER_TIMEOUT)) {
345 dev_err(dev, "sdricoh_request: transfer end error\n");
346 cmd->error = -EINVAL;
347 }
348 }
349 /* FIXME check busy flag */
350
351 mmc_request_done(mmc, mrq);
352 dev_dbg(dev, "=============================\n");
353}
354
355static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
356{
357 struct sdricoh_host *host = mmc_priv(mmc);
358 dev_dbg(host->dev, "set_ios\n");
359
360 if (ios->power_mode == MMC_POWER_ON) {
361 sdricoh_writel(host, R228_POWER, 0xc0e0);
362
363 if (ios->bus_width == MMC_BUS_WIDTH_4) {
364 sdricoh_writel(host, R224_MODE, 0x2000300);
365 sdricoh_writel(host, R228_POWER, 0x40e0);
366 } else {
367 sdricoh_writel(host, R224_MODE, 0x2000340);
368 }
369
370 } else if (ios->power_mode == MMC_POWER_UP) {
371 sdricoh_writel(host, R224_MODE, 0x2000320);
372 sdricoh_writel(host, R228_POWER, 0xe0);
373 }
374}
375
376static int sdricoh_get_ro(struct mmc_host *mmc)
377{
378 struct sdricoh_host *host = mmc_priv(mmc);
379 unsigned int status;
380
381 status = sdricoh_readl(host, R21C_STATUS);
382 sdricoh_writel(host, R2E4_STATUS_RESP, status);
383
384 /* some notebooks seem to have the locked flag switched */
385 if (switchlocked)
386 return !(status & STATUS_CARD_LOCKED);
387
388 return (status & STATUS_CARD_LOCKED);
389}
390
391static struct mmc_host_ops sdricoh_ops = {
392 .request = sdricoh_request,
393 .set_ios = sdricoh_set_ios,
394 .get_ro = sdricoh_get_ro,
395};
396
397/* initialize the control and register it to the mmc framework */
398static int sdricoh_init_mmc(struct pci_dev *pci_dev,
399 struct pcmcia_device *pcmcia_dev)
400{
401 int result = 0;
402 void __iomem *iobase = NULL;
403 struct mmc_host *mmc = NULL;
404 struct sdricoh_host *host = NULL;
405 struct device *dev = &pcmcia_dev->dev;
406 /* map iomem */
407 if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
408 SDRICOH_PCI_REGION_SIZE) {
409 dev_dbg(dev, "unexpected pci resource len\n");
410 return -ENODEV;
411 }
412 iobase =
413 pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE);
414 if (!iobase) {
415 dev_err(dev, "unable to map iobase\n");
416 return -ENODEV;
417 }
418 /* check version? */
419 if (readl(iobase + R104_VERSION) != 0x4000) {
420 dev_dbg(dev, "no supported mmc controller found\n");
421 result = -ENODEV;
422 goto err;
423 }
424 /* allocate privdata */
425 mmc = pcmcia_dev->priv =
426 mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev);
427 if (!mmc) {
428 dev_err(dev, "mmc_alloc_host failed\n");
429 result = -ENOMEM;
430 goto err;
431 }
432 host = mmc_priv(mmc);
433
434 host->iobase = iobase;
435 host->dev = dev;
436 host->pci_dev = pci_dev;
437
438 mmc->ops = &sdricoh_ops;
439
440 /* FIXME: frequency and voltage handling is done by the controller
441 */
442 mmc->f_min = 450000;
443 mmc->f_max = 24000000;
444 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
445 mmc->caps |= MMC_CAP_4_BIT_DATA;
446
447 mmc->max_seg_size = 1024 * 512;
448 mmc->max_blk_size = 512;
449
450 /* reset the controler */
451 if (sdricoh_reset(host)) {
452 dev_dbg(dev, "could not reset\n");
453 result = -EIO;
454 goto err;
455
456 }
457
458 result = mmc_add_host(mmc);
459
460 if (!result) {
461 dev_dbg(dev, "mmc host registered\n");
462 return 0;
463 }
464
465err:
466 if (iobase)
467 iounmap(iobase);
468 if (mmc)
469 mmc_free_host(mmc);
470
471 return result;
472}
473
474/* search for supported mmc controllers */
475static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
476{
477 struct pci_dev *pci_dev = NULL;
478
479 dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
480 " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
481
482 /* search pci cardbus bridge that contains the mmc controler */
483 /* the io region is already claimed by yenta_socket... */
484 while ((pci_dev =
485 pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
486 pci_dev))) {
487 /* try to init the device */
488 if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) {
489 dev_info(&pcmcia_dev->dev, "MMC controller found\n");
490 return 0;
491 }
492
493 }
494 dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n");
495 return -ENODEV;
496}
497
498static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
499{
500 struct mmc_host *mmc = link->priv;
501
502 dev_dbg(&link->dev, "detach\n");
503
504 /* remove mmc host */
505 if (mmc) {
506 struct sdricoh_host *host = mmc_priv(mmc);
507 mmc_remove_host(mmc);
508 pci_iounmap(host->pci_dev, host->iobase);
509 pci_dev_put(host->pci_dev);
510 mmc_free_host(mmc);
511 }
512 pcmcia_disable_device(link);
513
514}
515
516#ifdef CONFIG_PM
517static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
518{
519 struct mmc_host *mmc = link->priv;
520 dev_dbg(&link->dev, "suspend\n");
521 mmc_suspend_host(mmc, PMSG_SUSPEND);
522 return 0;
523}
524
525static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
526{
527 struct mmc_host *mmc = link->priv;
528 dev_dbg(&link->dev, "resume\n");
529 sdricoh_reset(mmc_priv(mmc));
530 mmc_resume_host(mmc);
531 return 0;
532}
533#else
534#define sdricoh_pcmcia_suspend NULL
535#define sdricoh_pcmcia_resume NULL
536#endif
537
538static struct pcmcia_driver sdricoh_driver = {
539 .drv = {
540 .name = DRIVER_NAME,
541 },
542 .probe = sdricoh_pcmcia_probe,
543 .remove = sdricoh_pcmcia_detach,
544 .id_table = pcmcia_ids,
545 .suspend = sdricoh_pcmcia_suspend,
546 .resume = sdricoh_pcmcia_resume,
547};
548
549/*****************************************************************************\
550 * *
551 * Driver init/exit *
552 * *
553\*****************************************************************************/
554
555static int __init sdricoh_drv_init(void)
556{
557 return pcmcia_register_driver(&sdricoh_driver);
558}
559
560static void __exit sdricoh_drv_exit(void)
561{
562 pcmcia_unregister_driver(&sdricoh_driver);
563}
564
565module_init(sdricoh_drv_init);
566module_exit(sdricoh_drv_exit);
567
568module_param(switchlocked, uint, 0444);
569
570MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>");
571MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver");
572MODULE_LICENSE("GPL");
573
574MODULE_PARM_DESC(switchlocked, "Switch the cards locked status."
575 "Use this when unlocked cards are shown readonly (default 0)");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 1c14a186f000..13844843e8de 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -973,7 +973,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
973 973
974 mmc->ops = &tifm_sd_ops; 974 mmc->ops = &tifm_sd_ops;
975 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 975 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
976 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; 976 mmc->caps = MMC_CAP_4_BIT_DATA;
977 mmc->f_min = 20000000 / 60; 977 mmc->f_min = 20000000 / 60;
978 mmc->f_max = 24000000; 978 mmc->f_max = 24000000;
979 979
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index c303e7f57ab4..adda37952032 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -68,16 +68,16 @@ static const int unlock_codes[] = { 0x83, 0x87 };
68 68
69static const int valid_ids[] = { 69static const int valid_ids[] = {
70 0x7112, 70 0x7112,
71 }; 71};
72 72
73#ifdef CONFIG_PNP 73#ifdef CONFIG_PNP
74static unsigned int nopnp = 0; 74static unsigned int param_nopnp = 0;
75#else 75#else
76static const unsigned int nopnp = 1; 76static const unsigned int param_nopnp = 1;
77#endif 77#endif
78static unsigned int io = 0x248; 78static unsigned int param_io = 0x248;
79static unsigned int irq = 6; 79static unsigned int param_irq = 6;
80static int dma = 2; 80static int param_dma = 2;
81 81
82/* 82/*
83 * Basic functions 83 * Basic functions
@@ -939,7 +939,7 @@ static int wbsd_get_ro(struct mmc_host *mmc)
939 939
940 spin_unlock_bh(&host->lock); 940 spin_unlock_bh(&host->lock);
941 941
942 return csr & WBSD_WRPT; 942 return !!(csr & WBSD_WRPT);
943} 943}
944 944
945static const struct mmc_host_ops wbsd_ops = { 945static const struct mmc_host_ops wbsd_ops = {
@@ -1219,7 +1219,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1219 mmc->f_min = 375000; 1219 mmc->f_min = 375000;
1220 mmc->f_max = 24000000; 1220 mmc->f_max = 24000000;
1221 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1221 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1222 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; 1222 mmc->caps = MMC_CAP_4_BIT_DATA;
1223 1223
1224 spin_lock_init(&host->lock); 1224 spin_lock_init(&host->lock);
1225 1225
@@ -1420,7 +1420,7 @@ kfree:
1420 1420
1421 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, 1421 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1422 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); 1422 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1423 host->dma_addr = (dma_addr_t)NULL; 1423 host->dma_addr = 0;
1424 1424
1425 kfree(host->dma_buffer); 1425 kfree(host->dma_buffer);
1426 host->dma_buffer = NULL; 1426 host->dma_buffer = NULL;
@@ -1445,7 +1445,7 @@ static void wbsd_release_dma(struct wbsd_host *host)
1445 1445
1446 host->dma = -1; 1446 host->dma = -1;
1447 host->dma_buffer = NULL; 1447 host->dma_buffer = NULL;
1448 host->dma_addr = (dma_addr_t)NULL; 1448 host->dma_addr = 0;
1449} 1449}
1450 1450
1451/* 1451/*
@@ -1765,7 +1765,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1765static int __devinit wbsd_probe(struct platform_device *dev) 1765static int __devinit wbsd_probe(struct platform_device *dev)
1766{ 1766{
1767 /* Use the module parameters for resources */ 1767 /* Use the module parameters for resources */
1768 return wbsd_init(&dev->dev, io, irq, dma, 0); 1768 return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
1769} 1769}
1770 1770
1771static int __devexit wbsd_remove(struct platform_device *dev) 1771static int __devexit wbsd_remove(struct platform_device *dev)
@@ -1979,14 +1979,14 @@ static int __init wbsd_drv_init(void)
1979 1979
1980#ifdef CONFIG_PNP 1980#ifdef CONFIG_PNP
1981 1981
1982 if (!nopnp) { 1982 if (!param_nopnp) {
1983 result = pnp_register_driver(&wbsd_pnp_driver); 1983 result = pnp_register_driver(&wbsd_pnp_driver);
1984 if (result < 0) 1984 if (result < 0)
1985 return result; 1985 return result;
1986 } 1986 }
1987#endif /* CONFIG_PNP */ 1987#endif /* CONFIG_PNP */
1988 1988
1989 if (nopnp) { 1989 if (param_nopnp) {
1990 result = platform_driver_register(&wbsd_driver); 1990 result = platform_driver_register(&wbsd_driver);
1991 if (result < 0) 1991 if (result < 0)
1992 return result; 1992 return result;
@@ -2012,12 +2012,12 @@ static void __exit wbsd_drv_exit(void)
2012{ 2012{
2013#ifdef CONFIG_PNP 2013#ifdef CONFIG_PNP
2014 2014
2015 if (!nopnp) 2015 if (!param_nopnp)
2016 pnp_unregister_driver(&wbsd_pnp_driver); 2016 pnp_unregister_driver(&wbsd_pnp_driver);
2017 2017
2018#endif /* CONFIG_PNP */ 2018#endif /* CONFIG_PNP */
2019 2019
2020 if (nopnp) { 2020 if (param_nopnp) {
2021 platform_device_unregister(wbsd_device); 2021 platform_device_unregister(wbsd_device);
2022 2022
2023 platform_driver_unregister(&wbsd_driver); 2023 platform_driver_unregister(&wbsd_driver);
@@ -2029,11 +2029,11 @@ static void __exit wbsd_drv_exit(void)
2029module_init(wbsd_drv_init); 2029module_init(wbsd_drv_init);
2030module_exit(wbsd_drv_exit); 2030module_exit(wbsd_drv_exit);
2031#ifdef CONFIG_PNP 2031#ifdef CONFIG_PNP
2032module_param(nopnp, uint, 0444); 2032module_param_named(nopnp, param_nopnp, uint, 0444);
2033#endif 2033#endif
2034module_param(io, uint, 0444); 2034module_param_named(io, param_io, uint, 0444);
2035module_param(irq, uint, 0444); 2035module_param_named(irq, param_irq, uint, 0444);
2036module_param(dma, int, 0444); 2036module_param_named(dma, param_dma, int, 0444);
2037 2037
2038MODULE_LICENSE("GPL"); 2038MODULE_LICENSE("GPL");
2039MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 2039MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");