aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/Kconfig63
-rw-r--r--drivers/mmc/Makefile22
-rw-r--r--drivers/mmc/mmc.c914
-rw-r--r--drivers/mmc/mmc.h16
-rw-r--r--drivers/mmc/mmc_block.c509
-rw-r--r--drivers/mmc/mmc_queue.c238
-rw-r--r--drivers/mmc/mmc_queue.h33
-rw-r--r--drivers/mmc/mmc_sysfs.c238
-rw-r--r--drivers/mmc/mmci.c680
-rw-r--r--drivers/mmc/mmci.h179
-rw-r--r--drivers/mmc/pxamci.c610
-rw-r--r--drivers/mmc/pxamci.h124
-rw-r--r--drivers/mmc/wbsd.c1651
-rw-r--r--drivers/mmc/wbsd.h178
14 files changed, 5455 insertions, 0 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
new file mode 100644
index 000000000000..72f2b466b816
--- /dev/null
+++ b/drivers/mmc/Kconfig
@@ -0,0 +1,63 @@
1#
2# MMC subsystem configuration
3#
4
5menu "MMC/SD Card support"
6
7config MMC
8 tristate "MMC support"
9 help
10 MMC is the "multi-media card" bus protocol.
11
12 If you want MMC support, you should say Y here and also
13 to the specific driver for your MMC interface.
14
15config MMC_DEBUG
16 bool "MMC debugging"
17 depends on MMC != n
18 help
19 This is an option for use by developers; most people should
20 say N here. This enables MMC core and driver debugging.
21
22config MMC_BLOCK
23 tristate "MMC block device driver"
24 depends on MMC
25 default y
26 help
27 Say Y here to enable the MMC block device driver support.
28 This provides a block device driver, which you can use to
29 mount the filesystem. Almost everyone wishing MMC support
30 should say Y or M here.
31
32config MMC_ARMMMCI
33 tristate "ARM AMBA Multimedia Card Interface support"
34 depends on ARM_AMBA && MMC
35 help
36 This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card
37 Interface (PL180 and PL181) support. If you have an ARM(R)
38 platform with a Multimedia Card slot, say Y or M here.
39
40 If unsure, say N.
41
42config MMC_PXA
43 tristate "Intel PXA255 Multimedia Card Interface support"
44 depends on ARCH_PXA && MMC
45 help
46 This selects the Intel(R) PXA(R) Multimedia card Interface.
47 If you have a PXA(R) platform with a Multimedia Card slot,
48 say Y or M here.
49
50 If unsure, say N.
51
52config MMC_WBSD
53 tristate "Winbond W83L51xD SD/MMC Card Interface support"
54 depends on MMC && ISA
55 help
56 This selects the Winbond(R) W83L51xD Secure digital and
57 Multimedia card Interface.
58 If you have a machine with a integrated W83L518D or W83L519D
59 SD/MMC card reader, say Y or M here.
60
61 If unsure, say N.
62
63endmenu
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
new file mode 100644
index 000000000000..89510c2086c7
--- /dev/null
+++ b/drivers/mmc/Makefile
@@ -0,0 +1,22 @@
1#
2# Makefile for the kernel mmc device drivers.
3#
4
5#
6# Core
7#
8obj-$(CONFIG_MMC) += mmc_core.o
9
10#
11# Media drivers
12#
13obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
14
15#
16# Host drivers
17#
18obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
19obj-$(CONFIG_MMC_PXA) += pxamci.o
20obj-$(CONFIG_MMC_WBSD) += wbsd.o
21
22mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
new file mode 100644
index 000000000000..e40026328251
--- /dev/null
+++ b/drivers/mmc/mmc.c
@@ -0,0 +1,914 @@
1/*
2 * linux/drivers/mmc/mmc.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/completion.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/pagemap.h>
18#include <linux/err.h>
19
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/protocol.h>
23
24#include "mmc.h"
25
26#ifdef CONFIG_MMC_DEBUG
27#define DBG(x...) printk(KERN_DEBUG x)
28#else
29#define DBG(x...) do { } while (0)
30#endif
31
32#define CMD_RETRIES 3
33
34/*
35 * OCR Bit positions to 10s of Vdd mV.
36 */
37static const unsigned short mmc_ocr_bit_to_vdd[] = {
38 150, 155, 160, 165, 170, 180, 190, 200,
39 210, 220, 230, 240, 250, 260, 270, 280,
40 290, 300, 310, 320, 330, 340, 350, 360
41};
42
43static const unsigned int tran_exp[] = {
44 10000, 100000, 1000000, 10000000,
45 0, 0, 0, 0
46};
47
48static const unsigned char tran_mant[] = {
49 0, 10, 12, 13, 15, 20, 25, 30,
50 35, 40, 45, 50, 55, 60, 70, 80,
51};
52
53static const unsigned int tacc_exp[] = {
54 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
55};
56
57static const unsigned int tacc_mant[] = {
58 0, 10, 12, 13, 15, 20, 25, 30,
59 35, 40, 45, 50, 55, 60, 70, 80,
60};
61
62
63/**
64 * mmc_request_done - finish processing an MMC command
65 * @host: MMC host which completed command
66 * @mrq: MMC request which completed
67 *
68 * MMC drivers should call this function when they have completed
69 * their processing of a command. This should be called before the
70 * data part of the command has completed.
71 */
72void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
73{
74 struct mmc_command *cmd = mrq->cmd;
75 int err = mrq->cmd->error;
76 DBG("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", cmd->opcode,
77 err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
78
79 if (err && cmd->retries) {
80 cmd->retries--;
81 cmd->error = 0;
82 host->ops->request(host, mrq);
83 } else if (mrq->done) {
84 mrq->done(mrq);
85 }
86}
87
88EXPORT_SYMBOL(mmc_request_done);
89
90/**
91 * mmc_start_request - start a command on a host
92 * @host: MMC host to start command on
93 * @mrq: MMC request to start
94 *
95 * Queue a command on the specified host. We expect the
96 * caller to be holding the host lock with interrupts disabled.
97 */
98void
99mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
100{
101 DBG("MMC: starting cmd %02x arg %08x flags %08x\n",
102 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
103
104 WARN_ON(host->card_busy == NULL);
105
106 mrq->cmd->error = 0;
107 mrq->cmd->mrq = mrq;
108 if (mrq->data) {
109 mrq->cmd->data = mrq->data;
110 mrq->data->error = 0;
111 mrq->data->mrq = mrq;
112 if (mrq->stop) {
113 mrq->data->stop = mrq->stop;
114 mrq->stop->error = 0;
115 mrq->stop->mrq = mrq;
116 }
117 }
118 host->ops->request(host, mrq);
119}
120
121EXPORT_SYMBOL(mmc_start_request);
122
123static void mmc_wait_done(struct mmc_request *mrq)
124{
125 complete(mrq->done_data);
126}
127
128int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
129{
130 DECLARE_COMPLETION(complete);
131
132 mrq->done_data = &complete;
133 mrq->done = mmc_wait_done;
134
135 mmc_start_request(host, mrq);
136
137 wait_for_completion(&complete);
138
139 return 0;
140}
141
142EXPORT_SYMBOL(mmc_wait_for_req);
143
144/**
145 * mmc_wait_for_cmd - start a command and wait for completion
146 * @host: MMC host to start command
147 * @cmd: MMC command to start
148 * @retries: maximum number of retries
149 *
150 * Start a new MMC command for a host, and wait for the command
151 * to complete. Return any error that occurred while the command
152 * was executing. Do not attempt to parse the response.
153 */
154int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
155{
156 struct mmc_request mrq;
157
158 BUG_ON(host->card_busy == NULL);
159
160 memset(&mrq, 0, sizeof(struct mmc_request));
161
162 memset(cmd->resp, 0, sizeof(cmd->resp));
163 cmd->retries = retries;
164
165 mrq.cmd = cmd;
166 cmd->data = NULL;
167
168 mmc_wait_for_req(host, &mrq);
169
170 return cmd->error;
171}
172
173EXPORT_SYMBOL(mmc_wait_for_cmd);
174
175
176
177/**
178 * __mmc_claim_host - exclusively claim a host
179 * @host: mmc host to claim
180 * @card: mmc card to claim host for
181 *
182 * Claim a host for a set of operations. If a valid card
183 * is passed and this wasn't the last card selected, select
184 * the card before returning.
185 *
186 * Note: you should use mmc_card_claim_host or mmc_claim_host.
187 */
188int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
189{
190 DECLARE_WAITQUEUE(wait, current);
191 unsigned long flags;
192 int err = 0;
193
194 add_wait_queue(&host->wq, &wait);
195 spin_lock_irqsave(&host->lock, flags);
196 while (1) {
197 set_current_state(TASK_UNINTERRUPTIBLE);
198 if (host->card_busy == NULL)
199 break;
200 spin_unlock_irqrestore(&host->lock, flags);
201 schedule();
202 spin_lock_irqsave(&host->lock, flags);
203 }
204 set_current_state(TASK_RUNNING);
205 host->card_busy = card;
206 spin_unlock_irqrestore(&host->lock, flags);
207 remove_wait_queue(&host->wq, &wait);
208
209 if (card != (void *)-1 && host->card_selected != card) {
210 struct mmc_command cmd;
211
212 host->card_selected = card;
213
214 cmd.opcode = MMC_SELECT_CARD;
215 cmd.arg = card->rca << 16;
216 cmd.flags = MMC_RSP_R1;
217
218 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
219 }
220
221 return err;
222}
223
224EXPORT_SYMBOL(__mmc_claim_host);
225
226/**
227 * mmc_release_host - release a host
228 * @host: mmc host to release
229 *
230 * Release a MMC host, allowing others to claim the host
231 * for their operations.
232 */
233void mmc_release_host(struct mmc_host *host)
234{
235 unsigned long flags;
236
237 BUG_ON(host->card_busy == NULL);
238
239 spin_lock_irqsave(&host->lock, flags);
240 host->card_busy = NULL;
241 spin_unlock_irqrestore(&host->lock, flags);
242
243 wake_up(&host->wq);
244}
245
246EXPORT_SYMBOL(mmc_release_host);
247
248/*
249 * Ensure that no card is selected.
250 */
251static void mmc_deselect_cards(struct mmc_host *host)
252{
253 struct mmc_command cmd;
254
255 if (host->card_selected) {
256 host->card_selected = NULL;
257
258 cmd.opcode = MMC_SELECT_CARD;
259 cmd.arg = 0;
260 cmd.flags = MMC_RSP_NONE;
261
262 mmc_wait_for_cmd(host, &cmd, 0);
263 }
264}
265
266
267static inline void mmc_delay(unsigned int ms)
268{
269 if (ms < HZ / 1000) {
270 yield();
271 mdelay(ms);
272 } else {
273 msleep_interruptible (ms);
274 }
275}
276
277/*
278 * Mask off any voltages we don't support and select
279 * the lowest voltage
280 */
281static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
282{
283 int bit;
284
285 ocr &= host->ocr_avail;
286
287 bit = ffs(ocr);
288 if (bit) {
289 bit -= 1;
290
291 ocr = 3 << bit;
292
293 host->ios.vdd = bit;
294 host->ops->set_ios(host, &host->ios);
295 } else {
296 ocr = 0;
297 }
298
299 return ocr;
300}
301
302#define UNSTUFF_BITS(resp,start,size) \
303 ({ \
304 const int __size = size; \
305 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
306 const int __off = 3 - ((start) / 32); \
307 const int __shft = (start) & 31; \
308 u32 __res; \
309 \
310 __res = resp[__off] >> __shft; \
311 if (__size + __shft > 32) \
312 __res |= resp[__off-1] << ((32 - __shft) % 32); \
313 __res & __mask; \
314 })
315
316/*
317 * Given the decoded CSD structure, decode the raw CID to our CID structure.
318 */
319static void mmc_decode_cid(struct mmc_card *card)
320{
321 u32 *resp = card->raw_cid;
322
323 memset(&card->cid, 0, sizeof(struct mmc_cid));
324
325 /*
326 * The selection of the format here is guesswork based upon
327 * information people have sent to date.
328 */
329 switch (card->csd.mmca_vsn) {
330 case 0: /* MMC v1.? */
331 case 1: /* MMC v1.4 */
332 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
333 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
334 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
335 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
336 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
337 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
338 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
339 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
340 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
341 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
342 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
343 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
344 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
345 break;
346
347 case 2: /* MMC v2.x ? */
348 case 3: /* MMC v3.x ? */
349 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
350 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
351 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
352 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
353 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
354 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
355 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
356 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
357 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
358 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
359 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
360 break;
361
362 default:
363 printk("%s: card has unknown MMCA version %d\n",
364 card->host->host_name, card->csd.mmca_vsn);
365 mmc_card_set_bad(card);
366 break;
367 }
368}
369
370/*
371 * Given a 128-bit response, decode to our card CSD structure.
372 */
373static void mmc_decode_csd(struct mmc_card *card)
374{
375 struct mmc_csd *csd = &card->csd;
376 unsigned int e, m, csd_struct;
377 u32 *resp = card->raw_csd;
378
379 /*
380 * We only understand CSD structure v1.1 and v2.
381 * v2 has extra information in bits 15, 11 and 10.
382 */
383 csd_struct = UNSTUFF_BITS(resp, 126, 2);
384 if (csd_struct != 1 && csd_struct != 2) {
385 printk("%s: unrecognised CSD structure version %d\n",
386 card->host->host_name, csd_struct);
387 mmc_card_set_bad(card);
388 return;
389 }
390
391 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
392 m = UNSTUFF_BITS(resp, 115, 4);
393 e = UNSTUFF_BITS(resp, 112, 3);
394 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
395 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
396
397 m = UNSTUFF_BITS(resp, 99, 4);
398 e = UNSTUFF_BITS(resp, 96, 3);
399 csd->max_dtr = tran_exp[e] * tran_mant[m];
400 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
401
402 e = UNSTUFF_BITS(resp, 47, 3);
403 m = UNSTUFF_BITS(resp, 62, 12);
404 csd->capacity = (1 + m) << (e + 2);
405
406 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
407}
408
409/*
410 * Locate a MMC card on this MMC host given a raw CID.
411 */
412static struct mmc_card *mmc_find_card(struct mmc_host *host, u32 *raw_cid)
413{
414 struct mmc_card *card;
415
416 list_for_each_entry(card, &host->cards, node) {
417 if (memcmp(card->raw_cid, raw_cid, sizeof(card->raw_cid)) == 0)
418 return card;
419 }
420 return NULL;
421}
422
423/*
424 * Allocate a new MMC card, and assign a unique RCA.
425 */
426static struct mmc_card *
427mmc_alloc_card(struct mmc_host *host, u32 *raw_cid, unsigned int *frca)
428{
429 struct mmc_card *card, *c;
430 unsigned int rca = *frca;
431
432 card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL);
433 if (!card)
434 return ERR_PTR(-ENOMEM);
435
436 mmc_init_card(card, host);
437 memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid));
438
439 again:
440 list_for_each_entry(c, &host->cards, node)
441 if (c->rca == rca) {
442 rca++;
443 goto again;
444 }
445
446 card->rca = rca;
447
448 *frca = rca;
449
450 return card;
451}
452
453/*
454 * Tell attached cards to go to IDLE state
455 */
456static void mmc_idle_cards(struct mmc_host *host)
457{
458 struct mmc_command cmd;
459
460 cmd.opcode = MMC_GO_IDLE_STATE;
461 cmd.arg = 0;
462 cmd.flags = MMC_RSP_NONE;
463
464 mmc_wait_for_cmd(host, &cmd, 0);
465
466 mmc_delay(1);
467}
468
469/*
470 * Apply power to the MMC stack.
471 */
472static void mmc_power_up(struct mmc_host *host)
473{
474 int bit = fls(host->ocr_avail) - 1;
475
476 host->ios.vdd = bit;
477 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
478 host->ios.power_mode = MMC_POWER_UP;
479 host->ops->set_ios(host, &host->ios);
480
481 mmc_delay(1);
482
483 host->ios.clock = host->f_min;
484 host->ios.power_mode = MMC_POWER_ON;
485 host->ops->set_ios(host, &host->ios);
486
487 mmc_delay(2);
488}
489
490static void mmc_power_off(struct mmc_host *host)
491{
492 host->ios.clock = 0;
493 host->ios.vdd = 0;
494 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
495 host->ios.power_mode = MMC_POWER_OFF;
496 host->ops->set_ios(host, &host->ios);
497}
498
499static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
500{
501 struct mmc_command cmd;
502 int i, err = 0;
503
504 cmd.opcode = MMC_SEND_OP_COND;
505 cmd.arg = ocr;
506 cmd.flags = MMC_RSP_R3;
507
508 for (i = 100; i; i--) {
509 err = mmc_wait_for_cmd(host, &cmd, 0);
510 if (err != MMC_ERR_NONE)
511 break;
512
513 if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0)
514 break;
515
516 err = MMC_ERR_TIMEOUT;
517
518 mmc_delay(10);
519 }
520
521 if (rocr)
522 *rocr = cmd.resp[0];
523
524 return err;
525}
526
527/*
528 * Discover cards by requesting their CID. If this command
529 * times out, it is not an error; there are no further cards
530 * to be discovered. Add new cards to the list.
531 *
532 * Create a mmc_card entry for each discovered card, assigning
533 * it an RCA, and save the raw CID for decoding later.
534 */
535static void mmc_discover_cards(struct mmc_host *host)
536{
537 struct mmc_card *card;
538 unsigned int first_rca = 1, err;
539
540 while (1) {
541 struct mmc_command cmd;
542
543 cmd.opcode = MMC_ALL_SEND_CID;
544 cmd.arg = 0;
545 cmd.flags = MMC_RSP_R2;
546
547 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
548 if (err == MMC_ERR_TIMEOUT) {
549 err = MMC_ERR_NONE;
550 break;
551 }
552 if (err != MMC_ERR_NONE) {
553 printk(KERN_ERR "%s: error requesting CID: %d\n",
554 host->host_name, err);
555 break;
556 }
557
558 card = mmc_find_card(host, cmd.resp);
559 if (!card) {
560 card = mmc_alloc_card(host, cmd.resp, &first_rca);
561 if (IS_ERR(card)) {
562 err = PTR_ERR(card);
563 break;
564 }
565 list_add(&card->node, &host->cards);
566 }
567
568 card->state &= ~MMC_STATE_DEAD;
569
570 cmd.opcode = MMC_SET_RELATIVE_ADDR;
571 cmd.arg = card->rca << 16;
572 cmd.flags = MMC_RSP_R1;
573
574 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
575 if (err != MMC_ERR_NONE)
576 mmc_card_set_dead(card);
577 }
578}
579
580static void mmc_read_csds(struct mmc_host *host)
581{
582 struct mmc_card *card;
583
584 list_for_each_entry(card, &host->cards, node) {
585 struct mmc_command cmd;
586 int err;
587
588 if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
589 continue;
590
591 cmd.opcode = MMC_SEND_CSD;
592 cmd.arg = card->rca << 16;
593 cmd.flags = MMC_RSP_R2;
594
595 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
596 if (err != MMC_ERR_NONE) {
597 mmc_card_set_dead(card);
598 continue;
599 }
600
601 memcpy(card->raw_csd, cmd.resp, sizeof(card->raw_csd));
602
603 mmc_decode_csd(card);
604 mmc_decode_cid(card);
605 }
606}
607
608static unsigned int mmc_calculate_clock(struct mmc_host *host)
609{
610 struct mmc_card *card;
611 unsigned int max_dtr = host->f_max;
612
613 list_for_each_entry(card, &host->cards, node)
614 if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
615 max_dtr = card->csd.max_dtr;
616
617 DBG("MMC: selected %d.%03dMHz transfer rate\n",
618 max_dtr / 1000000, (max_dtr / 1000) % 1000);
619
620 return max_dtr;
621}
622
623/*
624 * Check whether cards we already know about are still present.
625 * We do this by requesting status, and checking whether a card
626 * responds.
627 *
628 * A request for status does not cause a state change in data
629 * transfer mode.
630 */
631static void mmc_check_cards(struct mmc_host *host)
632{
633 struct list_head *l, *n;
634
635 mmc_deselect_cards(host);
636
637 list_for_each_safe(l, n, &host->cards) {
638 struct mmc_card *card = mmc_list_to_card(l);
639 struct mmc_command cmd;
640 int err;
641
642 cmd.opcode = MMC_SEND_STATUS;
643 cmd.arg = card->rca << 16;
644 cmd.flags = MMC_RSP_R1;
645
646 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
647 if (err == MMC_ERR_NONE)
648 continue;
649
650 mmc_card_set_dead(card);
651 }
652}
653
654static void mmc_setup(struct mmc_host *host)
655{
656 if (host->ios.power_mode != MMC_POWER_ON) {
657 int err;
658 u32 ocr;
659
660 mmc_power_up(host);
661 mmc_idle_cards(host);
662
663 err = mmc_send_op_cond(host, 0, &ocr);
664 if (err != MMC_ERR_NONE)
665 return;
666
667 host->ocr = mmc_select_voltage(host, ocr);
668
669 /*
670 * Since we're changing the OCR value, we seem to
671 * need to tell some cards to go back to the idle
672 * state. We wait 1ms to give cards time to
673 * respond.
674 */
675 if (host->ocr)
676 mmc_idle_cards(host);
677 } else {
678 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
679 host->ios.clock = host->f_min;
680 host->ops->set_ios(host, &host->ios);
681
682 /*
683 * We should remember the OCR mask from the existing
684 * cards, and detect the new cards OCR mask, combine
685 * the two and re-select the VDD. However, if we do
686 * change VDD, we should do an idle, and then do a
687 * full re-initialisation. We would need to notify
688 * drivers so that they can re-setup the cards as
689 * well, while keeping their queues at bay.
690 *
691 * For the moment, we take the easy way out - if the
692 * new cards don't like our currently selected VDD,
693 * they drop off the bus.
694 */
695 }
696
697 if (host->ocr == 0)
698 return;
699
700 /*
701 * Send the selected OCR multiple times... until the cards
702 * all get the idea that they should be ready for CMD2.
703 * (My SanDisk card seems to need this.)
704 */
705 mmc_send_op_cond(host, host->ocr, NULL);
706
707 mmc_discover_cards(host);
708
709 /*
710 * Ok, now switch to push-pull mode.
711 */
712 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
713 host->ops->set_ios(host, &host->ios);
714
715 mmc_read_csds(host);
716}
717
718
719/**
720 * mmc_detect_change - process change of state on a MMC socket
721 * @host: host which changed state.
722 *
723 * All we know is that card(s) have been inserted or removed
724 * from the socket(s). We don't know which socket or cards.
725 */
726void mmc_detect_change(struct mmc_host *host)
727{
728 schedule_work(&host->detect);
729}
730
731EXPORT_SYMBOL(mmc_detect_change);
732
733
734static void mmc_rescan(void *data)
735{
736 struct mmc_host *host = data;
737 struct list_head *l, *n;
738
739 mmc_claim_host(host);
740
741 if (host->ios.power_mode == MMC_POWER_ON)
742 mmc_check_cards(host);
743
744 mmc_setup(host);
745
746 if (!list_empty(&host->cards)) {
747 /*
748 * (Re-)calculate the fastest clock rate which the
749 * attached cards and the host support.
750 */
751 host->ios.clock = mmc_calculate_clock(host);
752 host->ops->set_ios(host, &host->ios);
753 }
754
755 mmc_release_host(host);
756
757 list_for_each_safe(l, n, &host->cards) {
758 struct mmc_card *card = mmc_list_to_card(l);
759
760 /*
761 * If this is a new and good card, register it.
762 */
763 if (!mmc_card_present(card) && !mmc_card_dead(card)) {
764 if (mmc_register_card(card))
765 mmc_card_set_dead(card);
766 else
767 mmc_card_set_present(card);
768 }
769
770 /*
771 * If this card is dead, destroy it.
772 */
773 if (mmc_card_dead(card)) {
774 list_del(&card->node);
775 mmc_remove_card(card);
776 }
777 }
778
779 /*
780 * If we discover that there are no cards on the
781 * bus, turn off the clock and power down.
782 */
783 if (list_empty(&host->cards))
784 mmc_power_off(host);
785}
786
787
788/**
789 * mmc_alloc_host - initialise the per-host structure.
790 * @extra: sizeof private data structure
791 * @dev: pointer to host device model structure
792 *
793 * Initialise the per-host structure.
794 */
795struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
796{
797 struct mmc_host *host;
798
799 host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
800 if (host) {
801 memset(host, 0, sizeof(struct mmc_host) + extra);
802
803 spin_lock_init(&host->lock);
804 init_waitqueue_head(&host->wq);
805 INIT_LIST_HEAD(&host->cards);
806 INIT_WORK(&host->detect, mmc_rescan, host);
807
808 host->dev = dev;
809
810 /*
811 * By default, hosts do not support SGIO or large requests.
812 * They have to set these according to their abilities.
813 */
814 host->max_hw_segs = 1;
815 host->max_phys_segs = 1;
816 host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
817 host->max_seg_size = PAGE_CACHE_SIZE;
818 }
819
820 return host;
821}
822
823EXPORT_SYMBOL(mmc_alloc_host);
824
825/**
826 * mmc_add_host - initialise host hardware
827 * @host: mmc host
828 */
829int mmc_add_host(struct mmc_host *host)
830{
831 static unsigned int host_num;
832
833 snprintf(host->host_name, sizeof(host->host_name),
834 "mmc%d", host_num++);
835
836 mmc_power_off(host);
837 mmc_detect_change(host);
838
839 return 0;
840}
841
842EXPORT_SYMBOL(mmc_add_host);
843
844/**
845 * mmc_remove_host - remove host hardware
846 * @host: mmc host
847 *
848 * Unregister and remove all cards associated with this host,
849 * and power down the MMC bus.
850 */
851void mmc_remove_host(struct mmc_host *host)
852{
853 struct list_head *l, *n;
854
855 list_for_each_safe(l, n, &host->cards) {
856 struct mmc_card *card = mmc_list_to_card(l);
857
858 mmc_remove_card(card);
859 }
860
861 mmc_power_off(host);
862}
863
864EXPORT_SYMBOL(mmc_remove_host);
865
866/**
867 * mmc_free_host - free the host structure
868 * @host: mmc host
869 *
870 * Free the host once all references to it have been dropped.
871 */
872void mmc_free_host(struct mmc_host *host)
873{
874 flush_scheduled_work();
875 kfree(host);
876}
877
878EXPORT_SYMBOL(mmc_free_host);
879
880#ifdef CONFIG_PM
881
882/**
883 * mmc_suspend_host - suspend a host
884 * @host: mmc host
885 * @state: suspend mode (PM_SUSPEND_xxx)
886 */
887int mmc_suspend_host(struct mmc_host *host, u32 state)
888{
889 mmc_claim_host(host);
890 mmc_deselect_cards(host);
891 mmc_power_off(host);
892 mmc_release_host(host);
893
894 return 0;
895}
896
897EXPORT_SYMBOL(mmc_suspend_host);
898
899/**
900 * mmc_resume_host - resume a previously suspended host
901 * @host: mmc host
902 */
903int mmc_resume_host(struct mmc_host *host)
904{
905 mmc_detect_change(host);
906
907 return 0;
908}
909
910EXPORT_SYMBOL(mmc_resume_host);
911
912#endif
913
914MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
new file mode 100644
index 000000000000..b498dffe0b11
--- /dev/null
+++ b/drivers/mmc/mmc.h
@@ -0,0 +1,16 @@
1/*
2 * linux/drivers/mmc/mmc.h
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _MMC_H
11#define _MMC_H
12/* core-internal functions */
13void mmc_init_card(struct mmc_card *card, struct mmc_host *host);
14int mmc_register_card(struct mmc_card *card);
15void mmc_remove_card(struct mmc_card *card);
16#endif
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
new file mode 100644
index 000000000000..b5b4a7b11903
--- /dev/null
+++ b/drivers/mmc/mmc_block.c
@@ -0,0 +1,509 @@
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 *
6 * Use consistent with the GNU GPL is permitted,
7 * provided that this copyright notice is
8 * preserved in its entirety in all copies and derived works.
9 *
10 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
11 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
12 * FITNESS FOR ANY PARTICULAR PURPOSE.
13 *
14 * Many thanks to Alessandro Rubini and Jonathan Corbet!
15 *
16 * Author: Andrew Christian
17 * 28 May 2002
18 */
19#include <linux/moduleparam.h>
20#include <linux/module.h>
21#include <linux/init.h>
22
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/errno.h>
27#include <linux/hdreg.h>
28#include <linux/kdev_t.h>
29#include <linux/blkdev.h>
30#include <linux/devfs_fs_kernel.h>
31
32#include <linux/mmc/card.h>
33#include <linux/mmc/protocol.h>
34
35#include <asm/system.h>
36#include <asm/uaccess.h>
37
38#include "mmc_queue.h"
39
40/*
41 * max 8 partitions per card
42 */
43#define MMC_SHIFT 3
44
45static int major;
46
47/*
48 * There is one mmc_blk_data per slot.
49 */
50struct mmc_blk_data {
51 spinlock_t lock;
52 struct gendisk *disk;
53 struct mmc_queue queue;
54
55 unsigned int usage;
56 unsigned int block_bits;
57};
58
59static DECLARE_MUTEX(open_lock);
60
61static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
62{
63 struct mmc_blk_data *md;
64
65 down(&open_lock);
66 md = disk->private_data;
67 if (md && md->usage == 0)
68 md = NULL;
69 if (md)
70 md->usage++;
71 up(&open_lock);
72
73 return md;
74}
75
76static void mmc_blk_put(struct mmc_blk_data *md)
77{
78 down(&open_lock);
79 md->usage--;
80 if (md->usage == 0) {
81 put_disk(md->disk);
82 mmc_cleanup_queue(&md->queue);
83 kfree(md);
84 }
85 up(&open_lock);
86}
87
88static int mmc_blk_open(struct inode *inode, struct file *filp)
89{
90 struct mmc_blk_data *md;
91 int ret = -ENXIO;
92
93 md = mmc_blk_get(inode->i_bdev->bd_disk);
94 if (md) {
95 if (md->usage == 2)
96 check_disk_change(inode->i_bdev);
97 ret = 0;
98 }
99
100 return ret;
101}
102
103static int mmc_blk_release(struct inode *inode, struct file *filp)
104{
105 struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
106
107 mmc_blk_put(md);
108 return 0;
109}
110
111static int
112mmc_blk_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
113{
114 struct block_device *bdev = inode->i_bdev;
115
116 if (cmd == HDIO_GETGEO) {
117 struct hd_geometry geo;
118
119 memset(&geo, 0, sizeof(struct hd_geometry));
120
121 geo.cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
122 geo.heads = 4;
123 geo.sectors = 16;
124 geo.start = get_start_sect(bdev);
125
126 return copy_to_user((void __user *)arg, &geo, sizeof(geo))
127 ? -EFAULT : 0;
128 }
129
130 return -ENOTTY;
131}
132
133static struct block_device_operations mmc_bdops = {
134 .open = mmc_blk_open,
135 .release = mmc_blk_release,
136 .ioctl = mmc_blk_ioctl,
137 .owner = THIS_MODULE,
138};
139
140struct mmc_blk_request {
141 struct mmc_request mrq;
142 struct mmc_command cmd;
143 struct mmc_command stop;
144 struct mmc_data data;
145};
146
147static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
148{
149 struct mmc_blk_data *md = mq->data;
150 int stat = BLKPREP_OK;
151
152 /*
153 * If we have no device, we haven't finished initialising.
154 */
155 if (!md || !mq->card) {
156 printk(KERN_ERR "%s: killing request - no device/host\n",
157 req->rq_disk->disk_name);
158 stat = BLKPREP_KILL;
159 }
160
161 return stat;
162}
163
164static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
165{
166 struct mmc_blk_data *md = mq->data;
167 struct mmc_card *card = md->queue.card;
168 int ret;
169
170 if (mmc_card_claim_host(card))
171 goto cmd_err;
172
173 do {
174 struct mmc_blk_request brq;
175 struct mmc_command cmd;
176
177 memset(&brq, 0, sizeof(struct mmc_blk_request));
178 brq.mrq.cmd = &brq.cmd;
179 brq.mrq.data = &brq.data;
180
181 brq.cmd.arg = req->sector << 9;
182 brq.cmd.flags = MMC_RSP_R1;
183 brq.data.timeout_ns = card->csd.tacc_ns * 10;
184 brq.data.timeout_clks = card->csd.tacc_clks * 10;
185 brq.data.blksz_bits = md->block_bits;
186 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
187 brq.stop.opcode = MMC_STOP_TRANSMISSION;
188 brq.stop.arg = 0;
189 brq.stop.flags = MMC_RSP_R1B;
190
191 if (rq_data_dir(req) == READ) {
192 brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
193 brq.data.flags |= MMC_DATA_READ;
194 } else {
195 brq.cmd.opcode = MMC_WRITE_BLOCK;
196 brq.cmd.flags = MMC_RSP_R1B;
197 brq.data.flags |= MMC_DATA_WRITE;
198 brq.data.blocks = 1;
199 }
200 brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;
201
202 brq.data.sg = mq->sg;
203 brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);
204
205 mmc_wait_for_req(card->host, &brq.mrq);
206 if (brq.cmd.error) {
207 printk(KERN_ERR "%s: error %d sending read/write command\n",
208 req->rq_disk->disk_name, brq.cmd.error);
209 goto cmd_err;
210 }
211
212 if (brq.data.error) {
213 printk(KERN_ERR "%s: error %d transferring data\n",
214 req->rq_disk->disk_name, brq.data.error);
215 goto cmd_err;
216 }
217
218 if (brq.stop.error) {
219 printk(KERN_ERR "%s: error %d sending stop command\n",
220 req->rq_disk->disk_name, brq.stop.error);
221 goto cmd_err;
222 }
223
224 do {
225 int err;
226
227 cmd.opcode = MMC_SEND_STATUS;
228 cmd.arg = card->rca << 16;
229 cmd.flags = MMC_RSP_R1;
230 err = mmc_wait_for_cmd(card->host, &cmd, 5);
231 if (err) {
232 printk(KERN_ERR "%s: error %d requesting status\n",
233 req->rq_disk->disk_name, err);
234 goto cmd_err;
235 }
236 } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
237
238#if 0
239 if (cmd.resp[0] & ~0x00000900)
240 printk(KERN_ERR "%s: status = %08x\n",
241 req->rq_disk->disk_name, cmd.resp[0]);
242 if (mmc_decode_status(cmd.resp))
243 goto cmd_err;
244#endif
245
246 /*
247 * A block was successfully transferred.
248 */
249 spin_lock_irq(&md->lock);
250 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
251 if (!ret) {
252 /*
253 * The whole request completed successfully.
254 */
255 add_disk_randomness(req->rq_disk);
256 blkdev_dequeue_request(req);
257 end_that_request_last(req);
258 }
259 spin_unlock_irq(&md->lock);
260 } while (ret);
261
262 mmc_card_release_host(card);
263
264 return 1;
265
266 cmd_err:
267 mmc_card_release_host(card);
268
269 /*
270 * This is a little draconian, but until we get proper
271 * error handling sorted out here, its the best we can
272 * do - especially as some hosts have no idea how much
273 * data was transferred before the error occurred.
274 */
275 spin_lock_irq(&md->lock);
276 do {
277 ret = end_that_request_chunk(req, 0,
278 req->current_nr_sectors << 9);
279 } while (ret);
280
281 add_disk_randomness(req->rq_disk);
282 blkdev_dequeue_request(req);
283 end_that_request_last(req);
284 spin_unlock_irq(&md->lock);
285
286 return 0;
287}
288
289#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
290
291static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
292
293static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
294{
295 struct mmc_blk_data *md;
296 int devidx, ret;
297
298 devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
299 if (devidx >= MMC_NUM_MINORS)
300 return ERR_PTR(-ENOSPC);
301 __set_bit(devidx, dev_use);
302
303 md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
304 if (md) {
305 memset(md, 0, sizeof(struct mmc_blk_data));
306
307 md->disk = alloc_disk(1 << MMC_SHIFT);
308 if (md->disk == NULL) {
309 kfree(md);
310 md = ERR_PTR(-ENOMEM);
311 goto out;
312 }
313
314 spin_lock_init(&md->lock);
315 md->usage = 1;
316
317 ret = mmc_init_queue(&md->queue, card, &md->lock);
318 if (ret) {
319 put_disk(md->disk);
320 kfree(md);
321 md = ERR_PTR(ret);
322 goto out;
323 }
324 md->queue.prep_fn = mmc_blk_prep_rq;
325 md->queue.issue_fn = mmc_blk_issue_rq;
326 md->queue.data = md;
327
328 md->disk->major = major;
329 md->disk->first_minor = devidx << MMC_SHIFT;
330 md->disk->fops = &mmc_bdops;
331 md->disk->private_data = md;
332 md->disk->queue = md->queue.queue;
333 md->disk->driverfs_dev = &card->dev;
334
335 /*
336 * As discussed on lkml, GENHD_FL_REMOVABLE should:
337 *
338 * - be set for removable media with permanent block devices
339 * - be unset for removable block devices with permanent media
340 *
341 * Since MMC block devices clearly fall under the second
342 * case, we do not set GENHD_FL_REMOVABLE. Userspace
343 * should use the block device creation/destruction hotplug
344 * messages to tell when the card is present.
345 */
346
347 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
348 sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
349
350 md->block_bits = card->csd.read_blkbits;
351
352 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
353 set_capacity(md->disk, card->csd.capacity);
354 }
355 out:
356 return md;
357}
358
359static int
360mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
361{
362 struct mmc_command cmd;
363 int err;
364
365 mmc_card_claim_host(card);
366 cmd.opcode = MMC_SET_BLOCKLEN;
367 cmd.arg = 1 << card->csd.read_blkbits;
368 cmd.flags = MMC_RSP_R1;
369 err = mmc_wait_for_cmd(card->host, &cmd, 5);
370 mmc_card_release_host(card);
371
372 if (err) {
373 printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
374 md->disk->disk_name, cmd.arg, err);
375 return -EINVAL;
376 }
377
378 return 0;
379}
380
381static int mmc_blk_probe(struct mmc_card *card)
382{
383 struct mmc_blk_data *md;
384 int err;
385
386 if (card->csd.cmdclass & ~0x1ff)
387 return -ENODEV;
388
389 if (card->csd.read_blkbits < 9) {
390 printk(KERN_WARNING "%s: read blocksize too small (%u)\n",
391 mmc_card_id(card), 1 << card->csd.read_blkbits);
392 return -ENODEV;
393 }
394
395 md = mmc_blk_alloc(card);
396 if (IS_ERR(md))
397 return PTR_ERR(md);
398
399 err = mmc_blk_set_blksize(md, card);
400 if (err)
401 goto out;
402
403 printk(KERN_INFO "%s: %s %s %dKiB\n",
404 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
405 (card->csd.capacity << card->csd.read_blkbits) / 1024);
406
407 mmc_set_drvdata(card, md);
408 add_disk(md->disk);
409 return 0;
410
411 out:
412 mmc_blk_put(md);
413
414 return err;
415}
416
417static void mmc_blk_remove(struct mmc_card *card)
418{
419 struct mmc_blk_data *md = mmc_get_drvdata(card);
420
421 if (md) {
422 int devidx;
423
424 del_gendisk(md->disk);
425
426 /*
427 * I think this is needed.
428 */
429 md->disk->queue = NULL;
430
431 devidx = md->disk->first_minor >> MMC_SHIFT;
432 __clear_bit(devidx, dev_use);
433
434 mmc_blk_put(md);
435 }
436 mmc_set_drvdata(card, NULL);
437}
438
439#ifdef CONFIG_PM
440static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
441{
442 struct mmc_blk_data *md = mmc_get_drvdata(card);
443
444 if (md) {
445 mmc_queue_suspend(&md->queue);
446 }
447 return 0;
448}
449
450static int mmc_blk_resume(struct mmc_card *card)
451{
452 struct mmc_blk_data *md = mmc_get_drvdata(card);
453
454 if (md) {
455 mmc_blk_set_blksize(md, card);
456 mmc_queue_resume(&md->queue);
457 }
458 return 0;
459}
460#else
461#define mmc_blk_suspend NULL
462#define mmc_blk_resume NULL
463#endif
464
465static struct mmc_driver mmc_driver = {
466 .drv = {
467 .name = "mmcblk",
468 },
469 .probe = mmc_blk_probe,
470 .remove = mmc_blk_remove,
471 .suspend = mmc_blk_suspend,
472 .resume = mmc_blk_resume,
473};
474
475static int __init mmc_blk_init(void)
476{
477 int res = -ENOMEM;
478
479 res = register_blkdev(major, "mmc");
480 if (res < 0) {
481 printk(KERN_WARNING "Unable to get major %d for MMC media: %d\n",
482 major, res);
483 goto out;
484 }
485 if (major == 0)
486 major = res;
487
488 devfs_mk_dir("mmc");
489 return mmc_register_driver(&mmc_driver);
490
491 out:
492 return res;
493}
494
495static void __exit mmc_blk_exit(void)
496{
497 mmc_unregister_driver(&mmc_driver);
498 devfs_remove("mmc");
499 unregister_blkdev(major, "mmc");
500}
501
502module_init(mmc_blk_init);
503module_exit(mmc_blk_exit);
504
505MODULE_LICENSE("GPL");
506MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
507
508module_param(major, int, 0444);
509MODULE_PARM_DESC(major, "specify the major device number for MMC block driver");
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
new file mode 100644
index 000000000000..0b9682e9a357
--- /dev/null
+++ b/drivers/mmc/mmc_queue.c
@@ -0,0 +1,238 @@
1/*
2 * linux/drivers/mmc/mmc_queue.c
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/module.h>
12#include <linux/blkdev.h>
13
14#include <linux/mmc/card.h>
15#include <linux/mmc/host.h>
16#include "mmc_queue.h"
17
18#define MMC_QUEUE_EXIT (1 << 0)
19#define MMC_QUEUE_SUSPENDED (1 << 1)
20
21/*
22 * Prepare a MMC request. Essentially, this means passing the
23 * preparation off to the media driver. The media driver will
24 * create a mmc_io_request in req->special.
25 */
26static int mmc_prep_request(struct request_queue *q, struct request *req)
27{
28 struct mmc_queue *mq = q->queuedata;
29 int ret = BLKPREP_KILL;
30
31 if (req->flags & REQ_SPECIAL) {
32 /*
33 * Special commands already have the command
34 * blocks already setup in req->special.
35 */
36 BUG_ON(!req->special);
37
38 ret = BLKPREP_OK;
39 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
40 /*
41 * Block I/O requests need translating according
42 * to the protocol.
43 */
44 ret = mq->prep_fn(mq, req);
45 } else {
46 /*
47 * Everything else is invalid.
48 */
49 blk_dump_rq_flags(req, "MMC bad request");
50 }
51
52 if (ret == BLKPREP_OK)
53 req->flags |= REQ_DONTPREP;
54
55 return ret;
56}
57
58static int mmc_queue_thread(void *d)
59{
60 struct mmc_queue *mq = d;
61 struct request_queue *q = mq->queue;
62 DECLARE_WAITQUEUE(wait, current);
63
64 /*
65 * Set iothread to ensure that we aren't put to sleep by
66 * the process freezing. We handle suspension ourselves.
67 */
68 current->flags |= PF_MEMALLOC|PF_NOFREEZE;
69
70 daemonize("mmcqd");
71
72 complete(&mq->thread_complete);
73
74 down(&mq->thread_sem);
75 add_wait_queue(&mq->thread_wq, &wait);
76 do {
77 struct request *req = NULL;
78
79 spin_lock_irq(q->queue_lock);
80 set_current_state(TASK_INTERRUPTIBLE);
81 if (!blk_queue_plugged(q))
82 mq->req = req = elv_next_request(q);
83 spin_unlock_irq(q->queue_lock);
84
85 if (!req) {
86 if (mq->flags & MMC_QUEUE_EXIT)
87 break;
88 up(&mq->thread_sem);
89 schedule();
90 down(&mq->thread_sem);
91 continue;
92 }
93 set_current_state(TASK_RUNNING);
94
95 mq->issue_fn(mq, req);
96 } while (1);
97 remove_wait_queue(&mq->thread_wq, &wait);
98 up(&mq->thread_sem);
99
100 complete_and_exit(&mq->thread_complete, 0);
101 return 0;
102}
103
104/*
105 * Generic MMC request handler. This is called for any queue on a
106 * particular host. When the host is not busy, we look for a request
107 * on any queue on this host, and attempt to issue it. This may
108 * not be the queue we were asked to process.
109 */
110static void mmc_request(request_queue_t *q)
111{
112 struct mmc_queue *mq = q->queuedata;
113
114 if (!mq->req)
115 wake_up(&mq->thread_wq);
116}
117
118/**
119 * mmc_init_queue - initialise a queue structure.
120 * @mq: mmc queue
121 * @card: mmc card to attach this queue
122 * @lock: queue lock
123 *
124 * Initialise a MMC card request queue.
125 */
126int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
127{
128 struct mmc_host *host = card->host;
129 u64 limit = BLK_BOUNCE_HIGH;
130 int ret;
131
132 if (host->dev->dma_mask && *host->dev->dma_mask)
133 limit = *host->dev->dma_mask;
134
135 mq->card = card;
136 mq->queue = blk_init_queue(mmc_request, lock);
137 if (!mq->queue)
138 return -ENOMEM;
139
140 blk_queue_prep_rq(mq->queue, mmc_prep_request);
141 blk_queue_bounce_limit(mq->queue, limit);
142 blk_queue_max_sectors(mq->queue, host->max_sectors);
143 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
144 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
145 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
146
147 mq->queue->queuedata = mq;
148 mq->req = NULL;
149
150 mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
151 GFP_KERNEL);
152 if (!mq->sg) {
153 ret = -ENOMEM;
154 goto cleanup;
155 }
156
157 init_completion(&mq->thread_complete);
158 init_waitqueue_head(&mq->thread_wq);
159 init_MUTEX(&mq->thread_sem);
160
161 ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
162 if (ret >= 0) {
163 wait_for_completion(&mq->thread_complete);
164 init_completion(&mq->thread_complete);
165 ret = 0;
166 goto out;
167 }
168
169 cleanup:
170 kfree(mq->sg);
171 mq->sg = NULL;
172
173 blk_cleanup_queue(mq->queue);
174 out:
175 return ret;
176}
177EXPORT_SYMBOL(mmc_init_queue);
178
179void mmc_cleanup_queue(struct mmc_queue *mq)
180{
181 mq->flags |= MMC_QUEUE_EXIT;
182 wake_up(&mq->thread_wq);
183 wait_for_completion(&mq->thread_complete);
184
185 kfree(mq->sg);
186 mq->sg = NULL;
187
188 blk_cleanup_queue(mq->queue);
189
190 mq->card = NULL;
191}
192EXPORT_SYMBOL(mmc_cleanup_queue);
193
194/**
195 * mmc_queue_suspend - suspend a MMC request queue
196 * @mq: MMC queue to suspend
197 *
198 * Stop the block request queue, and wait for our thread to
199 * complete any outstanding requests. This ensures that we
200 * won't suspend while a request is being processed.
201 */
202void mmc_queue_suspend(struct mmc_queue *mq)
203{
204 request_queue_t *q = mq->queue;
205 unsigned long flags;
206
207 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
208 mq->flags |= MMC_QUEUE_SUSPENDED;
209
210 spin_lock_irqsave(q->queue_lock, flags);
211 blk_stop_queue(q);
212 spin_unlock_irqrestore(q->queue_lock, flags);
213
214 down(&mq->thread_sem);
215 }
216}
217EXPORT_SYMBOL(mmc_queue_suspend);
218
219/**
220 * mmc_queue_resume - resume a previously suspended MMC request queue
221 * @mq: MMC queue to resume
222 */
223void mmc_queue_resume(struct mmc_queue *mq)
224{
225 request_queue_t *q = mq->queue;
226 unsigned long flags;
227
228 if (mq->flags & MMC_QUEUE_SUSPENDED) {
229 mq->flags &= ~MMC_QUEUE_SUSPENDED;
230
231 up(&mq->thread_sem);
232
233 spin_lock_irqsave(q->queue_lock, flags);
234 blk_start_queue(q);
235 spin_unlock_irqrestore(q->queue_lock, flags);
236 }
237}
238EXPORT_SYMBOL(mmc_queue_resume);
diff --git a/drivers/mmc/mmc_queue.h b/drivers/mmc/mmc_queue.h
new file mode 100644
index 000000000000..7182d2f69b4e
--- /dev/null
+++ b/drivers/mmc/mmc_queue.h
@@ -0,0 +1,33 @@
1#ifndef MMC_QUEUE_H
2#define MMC_QUEUE_H
3
4struct request;
5struct task_struct;
6
7struct mmc_queue {
8 struct mmc_card *card;
9 struct completion thread_complete;
10 wait_queue_head_t thread_wq;
11 struct semaphore thread_sem;
12 unsigned int flags;
13 struct request *req;
14 int (*prep_fn)(struct mmc_queue *, struct request *);
15 int (*issue_fn)(struct mmc_queue *, struct request *);
16 void *data;
17 struct request_queue *queue;
18 struct scatterlist *sg;
19};
20
21struct mmc_io_request {
22 struct request *rq;
23 int num;
24 struct mmc_command selcmd; /* mmc_queue private */
25 struct mmc_command cmd[4]; /* max 4 commands */
26};
27
28extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
29extern void mmc_cleanup_queue(struct mmc_queue *);
30extern void mmc_queue_suspend(struct mmc_queue *);
31extern void mmc_queue_resume(struct mmc_queue *);
32
33#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
new file mode 100644
index 000000000000..29a56e9cd5b3
--- /dev/null
+++ b/drivers/mmc/mmc_sysfs.c
@@ -0,0 +1,238 @@
1/*
2 * linux/drivers/mmc/mmc_sysfs.c
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * MMC sysfs/driver model support.
11 */
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/device.h>
15
16#include <linux/mmc/card.h>
17#include <linux/mmc/host.h>
18
19#include "mmc.h"
20
21#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
22#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
23
24#define MMC_ATTR(name, fmt, args...) \
25static ssize_t mmc_##name##_show (struct device *dev, char *buf) \
26{ \
27 struct mmc_card *card = dev_to_mmc_card(dev); \
28 return sprintf(buf, fmt, args); \
29}
30
31MMC_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
32 card->raw_cid[2], card->raw_cid[3]);
33MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
34 card->raw_csd[2], card->raw_csd[3]);
35MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
36MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
37MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
38MMC_ATTR(manfid, "0x%06x\n", card->cid.manfid);
39MMC_ATTR(name, "%s\n", card->cid.prod_name);
40MMC_ATTR(oemid, "0x%04x\n", card->cid.oemid);
41MMC_ATTR(serial, "0x%08x\n", card->cid.serial);
42
43#define MMC_ATTR_RO(name) __ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
44
45static struct device_attribute mmc_dev_attrs[] = {
46 MMC_ATTR_RO(cid),
47 MMC_ATTR_RO(csd),
48 MMC_ATTR_RO(date),
49 MMC_ATTR_RO(fwrev),
50 MMC_ATTR_RO(hwrev),
51 MMC_ATTR_RO(manfid),
52 MMC_ATTR_RO(name),
53 MMC_ATTR_RO(oemid),
54 MMC_ATTR_RO(serial),
55 __ATTR_NULL
56};
57
58
59static void mmc_release_card(struct device *dev)
60{
61 struct mmc_card *card = dev_to_mmc_card(dev);
62
63 kfree(card);
64}
65
66/*
67 * This currently matches any MMC driver to any MMC card - drivers
68 * themselves make the decision whether to drive this card in their
69 * probe method. However, we force "bad" cards to fail.
70 */
71static int mmc_bus_match(struct device *dev, struct device_driver *drv)
72{
73 struct mmc_card *card = dev_to_mmc_card(dev);
74 return !mmc_card_bad(card);
75}
76
77static int
78mmc_bus_hotplug(struct device *dev, char **envp, int num_envp, char *buf,
79 int buf_size)
80{
81 struct mmc_card *card = dev_to_mmc_card(dev);
82 char ccc[13];
83 int i = 0;
84
85#define add_env(fmt,val) \
86 ({ \
87 int len, ret = -ENOMEM; \
88 if (i < num_envp) { \
89 envp[i++] = buf; \
90 len = snprintf(buf, buf_size, fmt, val) + 1; \
91 buf_size -= len; \
92 buf += len; \
93 if (buf_size >= 0) \
94 ret = 0; \
95 } \
96 ret; \
97 })
98
99 for (i = 0; i < 12; i++)
100 ccc[i] = card->csd.cmdclass & (1 << i) ? '1' : '0';
101 ccc[12] = '\0';
102
103 i = 0;
104 add_env("MMC_CCC=%s", ccc);
105 add_env("MMC_MANFID=%06x", card->cid.manfid);
106 add_env("MMC_NAME=%s", mmc_card_name(card));
107 add_env("MMC_OEMID=%04x", card->cid.oemid);
108
109 return 0;
110}
111
112static int mmc_bus_suspend(struct device *dev, pm_message_t state)
113{
114 struct mmc_driver *drv = to_mmc_driver(dev->driver);
115 struct mmc_card *card = dev_to_mmc_card(dev);
116 int ret = 0;
117
118 if (dev->driver && drv->suspend)
119 ret = drv->suspend(card, state);
120 return ret;
121}
122
123static int mmc_bus_resume(struct device *dev)
124{
125 struct mmc_driver *drv = to_mmc_driver(dev->driver);
126 struct mmc_card *card = dev_to_mmc_card(dev);
127 int ret = 0;
128
129 if (dev->driver && drv->resume)
130 ret = drv->resume(card);
131 return ret;
132}
133
134static struct bus_type mmc_bus_type = {
135 .name = "mmc",
136 .dev_attrs = mmc_dev_attrs,
137 .match = mmc_bus_match,
138 .hotplug = mmc_bus_hotplug,
139 .suspend = mmc_bus_suspend,
140 .resume = mmc_bus_resume,
141};
142
143
144static int mmc_drv_probe(struct device *dev)
145{
146 struct mmc_driver *drv = to_mmc_driver(dev->driver);
147 struct mmc_card *card = dev_to_mmc_card(dev);
148
149 return drv->probe(card);
150}
151
152static int mmc_drv_remove(struct device *dev)
153{
154 struct mmc_driver *drv = to_mmc_driver(dev->driver);
155 struct mmc_card *card = dev_to_mmc_card(dev);
156
157 drv->remove(card);
158
159 return 0;
160}
161
162
163/**
164 * mmc_register_driver - register a media driver
165 * @drv: MMC media driver
166 */
167int mmc_register_driver(struct mmc_driver *drv)
168{
169 drv->drv.bus = &mmc_bus_type;
170 drv->drv.probe = mmc_drv_probe;
171 drv->drv.remove = mmc_drv_remove;
172 return driver_register(&drv->drv);
173}
174
175EXPORT_SYMBOL(mmc_register_driver);
176
177/**
178 * mmc_unregister_driver - unregister a media driver
179 * @drv: MMC media driver
180 */
181void mmc_unregister_driver(struct mmc_driver *drv)
182{
183 drv->drv.bus = &mmc_bus_type;
184 driver_unregister(&drv->drv);
185}
186
187EXPORT_SYMBOL(mmc_unregister_driver);
188
189
190/*
191 * Internal function. Initialise a MMC card structure.
192 */
193void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
194{
195 memset(card, 0, sizeof(struct mmc_card));
196 card->host = host;
197 device_initialize(&card->dev);
198 card->dev.parent = card->host->dev;
199 card->dev.bus = &mmc_bus_type;
200 card->dev.release = mmc_release_card;
201}
202
203/*
204 * Internal function. Register a new MMC card with the driver model.
205 */
206int mmc_register_card(struct mmc_card *card)
207{
208 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
209 "%s:%04x", card->host->host_name, card->rca);
210
211 return device_add(&card->dev);
212}
213
214/*
215 * Internal function. Unregister a new MMC card with the
216 * driver model, and (eventually) free it.
217 */
218void mmc_remove_card(struct mmc_card *card)
219{
220 if (mmc_card_present(card))
221 device_del(&card->dev);
222
223 put_device(&card->dev);
224}
225
226
227static int __init mmc_init(void)
228{
229 return bus_register(&mmc_bus_type);
230}
231
232static void __exit mmc_exit(void)
233{
234 bus_unregister(&mmc_bus_type);
235}
236
237module_init(mmc_init);
238module_exit(mmc_exit);
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
new file mode 100644
index 000000000000..3ee961c206bd
--- /dev/null
+++ b/drivers/mmc/mmci.c
@@ -0,0 +1,680 @@
1/*
2 * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/protocol.h>
22
23#include <asm/io.h>
24#include <asm/irq.h>
25#include <asm/scatterlist.h>
26#include <asm/hardware/amba.h>
27#include <asm/hardware/clock.h>
28#include <asm/mach/mmc.h>
29
30#include "mmci.h"
31
32#define DRIVER_NAME "mmci-pl18x"
33
34#ifdef CONFIG_MMC_DEBUG
35#define DBG(host,fmt,args...) \
36 pr_debug("%s: %s: " fmt, host->mmc->host_name, __func__ , args)
37#else
38#define DBG(host,fmt,args...) do { } while (0)
39#endif
40
41static unsigned int fmax = 515633;
42
43static void
44mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
45{
46 writel(0, host->base + MMCICOMMAND);
47
48 host->mrq = NULL;
49 host->cmd = NULL;
50
51 if (mrq->data)
52 mrq->data->bytes_xfered = host->data_xfered;
53
54 /*
55 * Need to drop the host lock here; mmc_request_done may call
56 * back into the driver...
57 */
58 spin_unlock(&host->lock);
59 mmc_request_done(host->mmc, mrq);
60 spin_lock(&host->lock);
61}
62
63static void mmci_stop_data(struct mmci_host *host)
64{
65 writel(0, host->base + MMCIDATACTRL);
66 writel(0, host->base + MMCIMASK1);
67 host->data = NULL;
68}
69
70static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
71{
72 unsigned int datactrl, timeout, irqmask;
73 void __iomem *base;
74
75 DBG(host, "blksz %04x blks %04x flags %08x\n",
76 1 << data->blksz_bits, data->blocks, data->flags);
77
78 host->data = data;
79 host->size = data->blocks << data->blksz_bits;
80 host->data_xfered = 0;
81
82 mmci_init_sg(host, data);
83
84 timeout = data->timeout_clks +
85 ((unsigned long long)data->timeout_ns * host->cclk) /
86 1000000000ULL;
87
88 base = host->base;
89 writel(timeout, base + MMCIDATATIMER);
90 writel(host->size, base + MMCIDATALENGTH);
91
92 datactrl = MCI_DPSM_ENABLE | data->blksz_bits << 4;
93 if (data->flags & MMC_DATA_READ) {
94 datactrl |= MCI_DPSM_DIRECTION;
95 irqmask = MCI_RXFIFOHALFFULLMASK;
96 } else {
97 /*
98 * We don't actually need to include "FIFO empty" here
99 * since its implicit in "FIFO half empty".
100 */
101 irqmask = MCI_TXFIFOHALFEMPTYMASK;
102 }
103
104 writel(datactrl, base + MMCIDATACTRL);
105 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
106 writel(irqmask, base + MMCIMASK1);
107}
108
109static void
110mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
111{
112 void __iomem *base = host->base;
113
114 DBG(host, "op %02x arg %08x flags %08x\n",
115 cmd->opcode, cmd->arg, cmd->flags);
116
117 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
118 writel(0, base + MMCICOMMAND);
119 udelay(1);
120 }
121
122 c |= cmd->opcode | MCI_CPSM_ENABLE;
123 switch (cmd->flags & MMC_RSP_MASK) {
124 case MMC_RSP_NONE:
125 default:
126 break;
127 case MMC_RSP_LONG:
128 c |= MCI_CPSM_LONGRSP;
129 case MMC_RSP_SHORT:
130 c |= MCI_CPSM_RESPONSE;
131 break;
132 }
133 if (/*interrupt*/0)
134 c |= MCI_CPSM_INTERRUPT;
135
136 host->cmd = cmd;
137
138 writel(cmd->arg, base + MMCIARGUMENT);
139 writel(c, base + MMCICOMMAND);
140}
141
142static void
143mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
144 unsigned int status)
145{
146 if (status & MCI_DATABLOCKEND) {
147 host->data_xfered += 1 << data->blksz_bits;
148 }
149 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
150 if (status & MCI_DATACRCFAIL)
151 data->error = MMC_ERR_BADCRC;
152 else if (status & MCI_DATATIMEOUT)
153 data->error = MMC_ERR_TIMEOUT;
154 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
155 data->error = MMC_ERR_FIFO;
156 status |= MCI_DATAEND;
157 }
158 if (status & MCI_DATAEND) {
159 mmci_stop_data(host);
160
161 if (!data->stop) {
162 mmci_request_end(host, data->mrq);
163 } else {
164 mmci_start_command(host, data->stop, 0);
165 }
166 }
167}
168
169static void
170mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
171 unsigned int status)
172{
173 void __iomem *base = host->base;
174
175 host->cmd = NULL;
176
177 cmd->resp[0] = readl(base + MMCIRESPONSE0);
178 cmd->resp[1] = readl(base + MMCIRESPONSE1);
179 cmd->resp[2] = readl(base + MMCIRESPONSE2);
180 cmd->resp[3] = readl(base + MMCIRESPONSE3);
181
182 if (status & MCI_CMDTIMEOUT) {
183 cmd->error = MMC_ERR_TIMEOUT;
184 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
185 cmd->error = MMC_ERR_BADCRC;
186 }
187
188 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
189 mmci_request_end(host, cmd->mrq);
190 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
191 mmci_start_data(host, cmd->data);
192 }
193}
194
195static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
196{
197 void __iomem *base = host->base;
198 char *ptr = buffer;
199 u32 status;
200
201 do {
202 int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
203
204 if (count > remain)
205 count = remain;
206
207 if (count <= 0)
208 break;
209
210 readsl(base + MMCIFIFO, ptr, count >> 2);
211
212 ptr += count;
213 remain -= count;
214
215 if (remain == 0)
216 break;
217
218 status = readl(base + MMCISTATUS);
219 } while (status & MCI_RXDATAAVLBL);
220
221 return ptr - buffer;
222}
223
224static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
225{
226 void __iomem *base = host->base;
227 char *ptr = buffer;
228
229 do {
230 unsigned int count, maxcnt;
231
232 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
233 count = min(remain, maxcnt);
234
235 writesl(base + MMCIFIFO, ptr, count >> 2);
236
237 ptr += count;
238 remain -= count;
239
240 if (remain == 0)
241 break;
242
243 status = readl(base + MMCISTATUS);
244 } while (status & MCI_TXFIFOHALFEMPTY);
245
246 return ptr - buffer;
247}
248
249/*
250 * PIO data transfer IRQ handler.
251 */
252static irqreturn_t mmci_pio_irq(int irq, void *dev_id, struct pt_regs *regs)
253{
254 struct mmci_host *host = dev_id;
255 void __iomem *base = host->base;
256 u32 status;
257
258 status = readl(base + MMCISTATUS);
259
260 DBG(host, "irq1 %08x\n", status);
261
262 do {
263 unsigned long flags;
264 unsigned int remain, len;
265 char *buffer;
266
267 /*
268 * For write, we only need to test the half-empty flag
269 * here - if the FIFO is completely empty, then by
270 * definition it is more than half empty.
271 *
272 * For read, check for data available.
273 */
274 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
275 break;
276
277 /*
278 * Map the current scatter buffer.
279 */
280 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
281 remain = host->sg_ptr->length - host->sg_off;
282
283 len = 0;
284 if (status & MCI_RXACTIVE)
285 len = mmci_pio_read(host, buffer, remain);
286 if (status & MCI_TXACTIVE)
287 len = mmci_pio_write(host, buffer, remain, status);
288
289 /*
290 * Unmap the buffer.
291 */
292 mmci_kunmap_atomic(host, &flags);
293
294 host->sg_off += len;
295 host->size -= len;
296 remain -= len;
297
298 if (remain)
299 break;
300
301 if (!mmci_next_sg(host))
302 break;
303
304 status = readl(base + MMCISTATUS);
305 } while (1);
306
307 /*
308 * If we're nearing the end of the read, switch to
309 * "any data available" mode.
310 */
311 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
312 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
313
314 /*
315 * If we run out of data, disable the data IRQs; this
316 * prevents a race where the FIFO becomes empty before
317 * the chip itself has disabled the data path, and
318 * stops us racing with our data end IRQ.
319 */
320 if (host->size == 0) {
321 writel(0, base + MMCIMASK1);
322 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
323 }
324
325 return IRQ_HANDLED;
326}
327
328/*
329 * Handle completion of command and data transfers.
330 */
331static irqreturn_t mmci_irq(int irq, void *dev_id, struct pt_regs *regs)
332{
333 struct mmci_host *host = dev_id;
334 u32 status;
335 int ret = 0;
336
337 spin_lock(&host->lock);
338
339 do {
340 struct mmc_command *cmd;
341 struct mmc_data *data;
342
343 status = readl(host->base + MMCISTATUS);
344 status &= readl(host->base + MMCIMASK0);
345 writel(status, host->base + MMCICLEAR);
346
347 DBG(host, "irq0 %08x\n", status);
348
349 data = host->data;
350 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
351 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
352 mmci_data_irq(host, data, status);
353
354 cmd = host->cmd;
355 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
356 mmci_cmd_irq(host, cmd, status);
357
358 ret = 1;
359 } while (status);
360
361 spin_unlock(&host->lock);
362
363 return IRQ_RETVAL(ret);
364}
365
366static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
367{
368 struct mmci_host *host = mmc_priv(mmc);
369
370 WARN_ON(host->mrq != NULL);
371
372 spin_lock_irq(&host->lock);
373
374 host->mrq = mrq;
375
376 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
377 mmci_start_data(host, mrq->data);
378
379 mmci_start_command(host, mrq->cmd, 0);
380
381 spin_unlock_irq(&host->lock);
382}
383
384static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
385{
386 struct mmci_host *host = mmc_priv(mmc);
387 u32 clk = 0, pwr = 0;
388
389 DBG(host, "clock %uHz busmode %u powermode %u Vdd %u\n",
390 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
391
392 if (ios->clock) {
393 if (ios->clock >= host->mclk) {
394 clk = MCI_CLK_BYPASS;
395 host->cclk = host->mclk;
396 } else {
397 clk = host->mclk / (2 * ios->clock) - 1;
398 if (clk > 256)
399 clk = 255;
400 host->cclk = host->mclk / (2 * (clk + 1));
401 }
402 clk |= MCI_CLK_ENABLE;
403 }
404
405 if (host->plat->translate_vdd)
406 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
407
408 switch (ios->power_mode) {
409 case MMC_POWER_OFF:
410 break;
411 case MMC_POWER_UP:
412 pwr |= MCI_PWR_UP;
413 break;
414 case MMC_POWER_ON:
415 pwr |= MCI_PWR_ON;
416 break;
417 }
418
419 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
420 pwr |= MCI_ROD;
421
422 writel(clk, host->base + MMCICLOCK);
423
424 if (host->pwr != pwr) {
425 host->pwr = pwr;
426 writel(pwr, host->base + MMCIPOWER);
427 }
428}
429
430static struct mmc_host_ops mmci_ops = {
431 .request = mmci_request,
432 .set_ios = mmci_set_ios,
433};
434
435static void mmci_check_status(unsigned long data)
436{
437 struct mmci_host *host = (struct mmci_host *)data;
438 unsigned int status;
439
440 status = host->plat->status(mmc_dev(host->mmc));
441 if (status ^ host->oldstat)
442 mmc_detect_change(host->mmc);
443
444 host->oldstat = status;
445 mod_timer(&host->timer, jiffies + HZ);
446}
447
448static int mmci_probe(struct amba_device *dev, void *id)
449{
450 struct mmc_platform_data *plat = dev->dev.platform_data;
451 struct mmci_host *host;
452 struct mmc_host *mmc;
453 int ret;
454
455 /* must have platform data */
456 if (!plat) {
457 ret = -EINVAL;
458 goto out;
459 }
460
461 ret = amba_request_regions(dev, DRIVER_NAME);
462 if (ret)
463 goto out;
464
465 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
466 if (!mmc) {
467 ret = -ENOMEM;
468 goto rel_regions;
469 }
470
471 host = mmc_priv(mmc);
472 host->clk = clk_get(&dev->dev, "MCLK");
473 if (IS_ERR(host->clk)) {
474 ret = PTR_ERR(host->clk);
475 host->clk = NULL;
476 goto host_free;
477 }
478
479 ret = clk_use(host->clk);
480 if (ret)
481 goto clk_free;
482
483 ret = clk_enable(host->clk);
484 if (ret)
485 goto clk_unuse;
486
487 host->plat = plat;
488 host->mclk = clk_get_rate(host->clk);
489 host->mmc = mmc;
490 host->base = ioremap(dev->res.start, SZ_4K);
491 if (!host->base) {
492 ret = -ENOMEM;
493 goto clk_disable;
494 }
495
496 mmc->ops = &mmci_ops;
497 mmc->f_min = (host->mclk + 511) / 512;
498 mmc->f_max = min(host->mclk, fmax);
499 mmc->ocr_avail = plat->ocr_mask;
500
501 /*
502 * We can do SGIO
503 */
504 mmc->max_hw_segs = 16;
505 mmc->max_phys_segs = NR_SG;
506
507 /*
508 * Since we only have a 16-bit data length register, we must
509 * ensure that we don't exceed 2^16-1 bytes in a single request.
510 * Choose 64 (512-byte) sectors as the limit.
511 */
512 mmc->max_sectors = 64;
513
514 /*
515 * Set the maximum segment size. Since we aren't doing DMA
516 * (yet) we are only limited by the data length register.
517 */
518 mmc->max_seg_size = mmc->max_sectors << 9;
519
520 spin_lock_init(&host->lock);
521
522 writel(0, host->base + MMCIMASK0);
523 writel(0, host->base + MMCIMASK1);
524 writel(0xfff, host->base + MMCICLEAR);
525
526 ret = request_irq(dev->irq[0], mmci_irq, SA_SHIRQ, DRIVER_NAME " (cmd)", host);
527 if (ret)
528 goto unmap;
529
530 ret = request_irq(dev->irq[1], mmci_pio_irq, SA_SHIRQ, DRIVER_NAME " (pio)", host);
531 if (ret)
532 goto irq0_free;
533
534 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
535
536 amba_set_drvdata(dev, mmc);
537
538 mmc_add_host(mmc);
539
540 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n",
541 mmc->host_name, amba_rev(dev), amba_config(dev),
542 dev->res.start, dev->irq[0], dev->irq[1]);
543
544 init_timer(&host->timer);
545 host->timer.data = (unsigned long)host;
546 host->timer.function = mmci_check_status;
547 host->timer.expires = jiffies + HZ;
548 add_timer(&host->timer);
549
550 return 0;
551
552 irq0_free:
553 free_irq(dev->irq[0], host);
554 unmap:
555 iounmap(host->base);
556 clk_disable:
557 clk_disable(host->clk);
558 clk_unuse:
559 clk_unuse(host->clk);
560 clk_free:
561 clk_put(host->clk);
562 host_free:
563 mmc_free_host(mmc);
564 rel_regions:
565 amba_release_regions(dev);
566 out:
567 return ret;
568}
569
570static int mmci_remove(struct amba_device *dev)
571{
572 struct mmc_host *mmc = amba_get_drvdata(dev);
573
574 amba_set_drvdata(dev, NULL);
575
576 if (mmc) {
577 struct mmci_host *host = mmc_priv(mmc);
578
579 del_timer_sync(&host->timer);
580
581 mmc_remove_host(mmc);
582
583 writel(0, host->base + MMCIMASK0);
584 writel(0, host->base + MMCIMASK1);
585
586 writel(0, host->base + MMCICOMMAND);
587 writel(0, host->base + MMCIDATACTRL);
588
589 free_irq(dev->irq[0], host);
590 free_irq(dev->irq[1], host);
591
592 iounmap(host->base);
593 clk_disable(host->clk);
594 clk_unuse(host->clk);
595 clk_put(host->clk);
596
597 mmc_free_host(mmc);
598
599 amba_release_regions(dev);
600 }
601
602 return 0;
603}
604
605#ifdef CONFIG_PM
606static int mmci_suspend(struct amba_device *dev, u32 state)
607{
608 struct mmc_host *mmc = amba_get_drvdata(dev);
609 int ret = 0;
610
611 if (mmc) {
612 struct mmci_host *host = mmc_priv(mmc);
613
614 ret = mmc_suspend_host(mmc, state);
615 if (ret == 0)
616 writel(0, host->base + MMCIMASK0);
617 }
618
619 return ret;
620}
621
622static int mmci_resume(struct amba_device *dev)
623{
624 struct mmc_host *mmc = amba_get_drvdata(dev);
625 int ret = 0;
626
627 if (mmc) {
628 struct mmci_host *host = mmc_priv(mmc);
629
630 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
631
632 ret = mmc_resume_host(mmc);
633 }
634
635 return ret;
636}
637#else
638#define mmci_suspend NULL
639#define mmci_resume NULL
640#endif
641
642static struct amba_id mmci_ids[] = {
643 {
644 .id = 0x00041180,
645 .mask = 0x000fffff,
646 },
647 {
648 .id = 0x00041181,
649 .mask = 0x000fffff,
650 },
651 { 0, 0 },
652};
653
654static struct amba_driver mmci_driver = {
655 .drv = {
656 .name = DRIVER_NAME,
657 },
658 .probe = mmci_probe,
659 .remove = mmci_remove,
660 .suspend = mmci_suspend,
661 .resume = mmci_resume,
662 .id_table = mmci_ids,
663};
664
665static int __init mmci_init(void)
666{
667 return amba_driver_register(&mmci_driver);
668}
669
670static void __exit mmci_exit(void)
671{
672 amba_driver_unregister(&mmci_driver);
673}
674
675module_init(mmci_init);
676module_exit(mmci_exit);
677module_param(fmax, uint, 0444);
678
679MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
680MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/mmci.h b/drivers/mmc/mmci.h
new file mode 100644
index 000000000000..4589bbd68192
--- /dev/null
+++ b/drivers/mmc/mmci.h
@@ -0,0 +1,179 @@
1/*
2 * linux/drivers/mmc/mmci.h - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define MMCIPOWER 0x000
11#define MCI_PWR_OFF 0x00
12#define MCI_PWR_UP 0x02
13#define MCI_PWR_ON 0x03
14#define MCI_OD (1 << 6)
15#define MCI_ROD (1 << 7)
16
17#define MMCICLOCK 0x004
18#define MCI_CLK_ENABLE (1 << 8)
19#define MCI_CLK_PWRSAVE (1 << 9)
20#define MCI_CLK_BYPASS (1 << 10)
21
22#define MMCIARGUMENT 0x008
23#define MMCICOMMAND 0x00c
24#define MCI_CPSM_RESPONSE (1 << 6)
25#define MCI_CPSM_LONGRSP (1 << 7)
26#define MCI_CPSM_INTERRUPT (1 << 8)
27#define MCI_CPSM_PENDING (1 << 9)
28#define MCI_CPSM_ENABLE (1 << 10)
29
30#define MMCIRESPCMD 0x010
31#define MMCIRESPONSE0 0x014
32#define MMCIRESPONSE1 0x018
33#define MMCIRESPONSE2 0x01c
34#define MMCIRESPONSE3 0x020
35#define MMCIDATATIMER 0x024
36#define MMCIDATALENGTH 0x028
37#define MMCIDATACTRL 0x02c
38#define MCI_DPSM_ENABLE (1 << 0)
39#define MCI_DPSM_DIRECTION (1 << 1)
40#define MCI_DPSM_MODE (1 << 2)
41#define MCI_DPSM_DMAENABLE (1 << 3)
42
43#define MMCIDATACNT 0x030
44#define MMCISTATUS 0x034
45#define MCI_CMDCRCFAIL (1 << 0)
46#define MCI_DATACRCFAIL (1 << 1)
47#define MCI_CMDTIMEOUT (1 << 2)
48#define MCI_DATATIMEOUT (1 << 3)
49#define MCI_TXUNDERRUN (1 << 4)
50#define MCI_RXOVERRUN (1 << 5)
51#define MCI_CMDRESPEND (1 << 6)
52#define MCI_CMDSENT (1 << 7)
53#define MCI_DATAEND (1 << 8)
54#define MCI_DATABLOCKEND (1 << 10)
55#define MCI_CMDACTIVE (1 << 11)
56#define MCI_TXACTIVE (1 << 12)
57#define MCI_RXACTIVE (1 << 13)
58#define MCI_TXFIFOHALFEMPTY (1 << 14)
59#define MCI_RXFIFOHALFFULL (1 << 15)
60#define MCI_TXFIFOFULL (1 << 16)
61#define MCI_RXFIFOFULL (1 << 17)
62#define MCI_TXFIFOEMPTY (1 << 18)
63#define MCI_RXFIFOEMPTY (1 << 19)
64#define MCI_TXDATAAVLBL (1 << 20)
65#define MCI_RXDATAAVLBL (1 << 21)
66
67#define MMCICLEAR 0x038
68#define MCI_CMDCRCFAILCLR (1 << 0)
69#define MCI_DATACRCFAILCLR (1 << 1)
70#define MCI_CMDTIMEOUTCLR (1 << 2)
71#define MCI_DATATIMEOUTCLR (1 << 3)
72#define MCI_TXUNDERRUNCLR (1 << 4)
73#define MCI_RXOVERRUNCLR (1 << 5)
74#define MCI_CMDRESPENDCLR (1 << 6)
75#define MCI_CMDSENTCLR (1 << 7)
76#define MCI_DATAENDCLR (1 << 8)
77#define MCI_DATABLOCKENDCLR (1 << 10)
78
79#define MMCIMASK0 0x03c
80#define MCI_CMDCRCFAILMASK (1 << 0)
81#define MCI_DATACRCFAILMASK (1 << 1)
82#define MCI_CMDTIMEOUTMASK (1 << 2)
83#define MCI_DATATIMEOUTMASK (1 << 3)
84#define MCI_TXUNDERRUNMASK (1 << 4)
85#define MCI_RXOVERRUNMASK (1 << 5)
86#define MCI_CMDRESPENDMASK (1 << 6)
87#define MCI_CMDSENTMASK (1 << 7)
88#define MCI_DATAENDMASK (1 << 8)
89#define MCI_DATABLOCKENDMASK (1 << 10)
90#define MCI_CMDACTIVEMASK (1 << 11)
91#define MCI_TXACTIVEMASK (1 << 12)
92#define MCI_RXACTIVEMASK (1 << 13)
93#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
94#define MCI_RXFIFOHALFFULLMASK (1 << 15)
95#define MCI_TXFIFOFULLMASK (1 << 16)
96#define MCI_RXFIFOFULLMASK (1 << 17)
97#define MCI_TXFIFOEMPTYMASK (1 << 18)
98#define MCI_RXFIFOEMPTYMASK (1 << 19)
99#define MCI_TXDATAAVLBLMASK (1 << 20)
100#define MCI_RXDATAAVLBLMASK (1 << 21)
101
102#define MMCIMASK1 0x040
103#define MMCIFIFOCNT 0x048
104#define MMCIFIFO 0x080 /* to 0x0bc */
105
106#define MCI_IRQENABLE \
107 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
108 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
109 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
110
111/*
112 * The size of the FIFO in bytes.
113 */
114#define MCI_FIFOSIZE (16*4)
115
116#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
117
118#define NR_SG 16
119
120struct clk;
121
122struct mmci_host {
123 void __iomem *base;
124 struct mmc_request *mrq;
125 struct mmc_command *cmd;
126 struct mmc_data *data;
127 struct mmc_host *mmc;
128 struct clk *clk;
129
130 unsigned int data_xfered;
131
132 spinlock_t lock;
133
134 unsigned int mclk;
135 unsigned int cclk;
136 u32 pwr;
137 struct mmc_platform_data *plat;
138
139 struct timer_list timer;
140 unsigned int oldstat;
141
142 unsigned int sg_len;
143
144 /* pio stuff */
145 struct scatterlist *sg_ptr;
146 unsigned int sg_off;
147 unsigned int size;
148};
149
150static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
151{
152 /*
153 * Ideally, we want the higher levels to pass us a scatter list.
154 */
155 host->sg_len = data->sg_len;
156 host->sg_ptr = data->sg;
157 host->sg_off = 0;
158}
159
160static inline int mmci_next_sg(struct mmci_host *host)
161{
162 host->sg_ptr++;
163 host->sg_off = 0;
164 return --host->sg_len;
165}
166
167static inline char *mmci_kmap_atomic(struct mmci_host *host, unsigned long *flags)
168{
169 struct scatterlist *sg = host->sg_ptr;
170
171 local_irq_save(*flags);
172 return kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
173}
174
175static inline void mmci_kunmap_atomic(struct mmci_host *host, unsigned long *flags)
176{
177 kunmap_atomic(host->sg_ptr->page, KM_BIO_SRC_IRQ);
178 local_irq_restore(*flags);
179}
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
new file mode 100644
index 000000000000..f76deedf5355
--- /dev/null
+++ b/drivers/mmc/pxamci.c
@@ -0,0 +1,610 @@
1/*
2 * linux/drivers/mmc/pxa.c - PXA MMCI driver
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This hardware is really sick:
11 * - No way to clear interrupts.
12 * - Have to turn off the clock whenever we touch the device.
13 * - Doesn't tell you how many data blocks were transferred.
14 * Yuck!
15 *
16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023
18 */
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/ioport.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/mmc/host.h>
28#include <linux/mmc/protocol.h>
29
30#include <asm/dma.h>
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <asm/scatterlist.h>
34#include <asm/sizes.h>
35
36#include <asm/arch/pxa-regs.h>
37#include <asm/arch/mmc.h>
38
39#include "pxamci.h"
40
41#ifdef CONFIG_MMC_DEBUG
42#define DBG(x...) printk(KERN_DEBUG x)
43#else
44#define DBG(x...) do { } while (0)
45#endif
46
47#define DRIVER_NAME "pxa2xx-mci"
48
49#define NR_SG 1
50
51struct pxamci_host {
52 struct mmc_host *mmc;
53 spinlock_t lock;
54 struct resource *res;
55 void __iomem *base;
56 int irq;
57 int dma;
58 unsigned int clkrt;
59 unsigned int cmdat;
60 unsigned int imask;
61 unsigned int power_mode;
62 struct pxamci_platform_data *pdata;
63
64 struct mmc_request *mrq;
65 struct mmc_command *cmd;
66 struct mmc_data *data;
67
68 dma_addr_t sg_dma;
69 struct pxa_dma_desc *sg_cpu;
70 unsigned int dma_len;
71
72 unsigned int dma_dir;
73};
74
75static inline unsigned int ns_to_clocks(unsigned int ns)
76{
77 return (ns * (CLOCKRATE / 1000000) + 999) / 1000;
78}
79
80static void pxamci_stop_clock(struct pxamci_host *host)
81{
82 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
83 unsigned long timeout = 10000;
84 unsigned int v;
85
86 writel(STOP_CLOCK, host->base + MMC_STRPCL);
87
88 do {
89 v = readl(host->base + MMC_STAT);
90 if (!(v & STAT_CLK_EN))
91 break;
92 udelay(1);
93 } while (timeout--);
94
95 if (v & STAT_CLK_EN)
96 dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
97 }
98}
99
100static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&host->lock, flags);
105 host->imask &= ~mask;
106 writel(host->imask, host->base + MMC_I_MASK);
107 spin_unlock_irqrestore(&host->lock, flags);
108}
109
110static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
111{
112 unsigned long flags;
113
114 spin_lock_irqsave(&host->lock, flags);
115 host->imask |= mask;
116 writel(host->imask, host->base + MMC_I_MASK);
117 spin_unlock_irqrestore(&host->lock, flags);
118}
119
120static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
121{
122 unsigned int nob = data->blocks;
123 unsigned int timeout;
124 u32 dcmd;
125 int i;
126
127 host->data = data;
128
129 if (data->flags & MMC_DATA_STREAM)
130 nob = 0xffff;
131
132 writel(nob, host->base + MMC_NOB);
133 writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
134
135 timeout = ns_to_clocks(data->timeout_ns) + data->timeout_clks;
136 writel((timeout + 255) / 256, host->base + MMC_RDTO);
137
138 if (data->flags & MMC_DATA_READ) {
139 host->dma_dir = DMA_FROM_DEVICE;
140 dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
141 DRCMRTXMMC = 0;
142 DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
143 } else {
144 host->dma_dir = DMA_TO_DEVICE;
145 dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
146 DRCMRRXMMC = 0;
147 DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
148 }
149
150 dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
151
152 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
153 host->dma_dir);
154
155 for (i = 0; i < host->dma_len; i++) {
156 if (data->flags & MMC_DATA_READ) {
157 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
158 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
159 } else {
160 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
161 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
162 }
163 host->sg_cpu[i].dcmd = dcmd | sg_dma_len(&data->sg[i]);
164 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
165 sizeof(struct pxa_dma_desc);
166 }
167 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
168 wmb();
169
170 DDADR(host->dma) = host->sg_dma;
171 DCSR(host->dma) = DCSR_RUN;
172}
173
174static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
175{
176 WARN_ON(host->cmd != NULL);
177 host->cmd = cmd;
178
179 if (cmd->flags & MMC_RSP_BUSY)
180 cmdat |= CMDAT_BUSY;
181
182 switch (cmd->flags & (MMC_RSP_MASK | MMC_RSP_CRC)) {
183 case MMC_RSP_SHORT | MMC_RSP_CRC:
184 cmdat |= CMDAT_RESP_SHORT;
185 break;
186 case MMC_RSP_SHORT:
187 cmdat |= CMDAT_RESP_R3;
188 break;
189 case MMC_RSP_LONG | MMC_RSP_CRC:
190 cmdat |= CMDAT_RESP_R2;
191 break;
192 default:
193 break;
194 }
195
196 writel(cmd->opcode, host->base + MMC_CMD);
197 writel(cmd->arg >> 16, host->base + MMC_ARGH);
198 writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
199 writel(cmdat, host->base + MMC_CMDAT);
200 writel(host->clkrt, host->base + MMC_CLKRT);
201
202 writel(START_CLOCK, host->base + MMC_STRPCL);
203
204 pxamci_enable_irq(host, END_CMD_RES);
205}
206
207static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
208{
209 DBG("PXAMCI: request done\n");
210 host->mrq = NULL;
211 host->cmd = NULL;
212 host->data = NULL;
213 mmc_request_done(host->mmc, mrq);
214}
215
216static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
217{
218 struct mmc_command *cmd = host->cmd;
219 int i;
220 u32 v;
221
222 if (!cmd)
223 return 0;
224
225 host->cmd = NULL;
226
227 /*
228 * Did I mention this is Sick. We always need to
229 * discard the upper 8 bits of the first 16-bit word.
230 */
231 v = readl(host->base + MMC_RES) & 0xffff;
232 for (i = 0; i < 4; i++) {
233 u32 w1 = readl(host->base + MMC_RES) & 0xffff;
234 u32 w2 = readl(host->base + MMC_RES) & 0xffff;
235 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
236 v = w2;
237 }
238
239 if (stat & STAT_TIME_OUT_RESPONSE) {
240 cmd->error = MMC_ERR_TIMEOUT;
241 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
242#ifdef CONFIG_PXA27x
243 /*
244 * workaround for erratum #42:
245 * Intel PXA27x Family Processor Specification Update Rev 001
246 */
247 if (cmd->opcode == MMC_ALL_SEND_CID ||
248 cmd->opcode == MMC_SEND_CSD ||
249 cmd->opcode == MMC_SEND_CID) {
250 /* a bogus CRC error can appear if the msb of
251 the 15 byte response is a one */
252 if ((cmd->resp[0] & 0x80000000) == 0)
253 cmd->error = MMC_ERR_BADCRC;
254 } else {
255 DBG("ignoring CRC from command %d - *risky*\n",cmd->opcode);
256 }
257#else
258 cmd->error = MMC_ERR_BADCRC;
259#endif
260 }
261
262 pxamci_disable_irq(host, END_CMD_RES);
263 if (host->data && cmd->error == MMC_ERR_NONE) {
264 pxamci_enable_irq(host, DATA_TRAN_DONE);
265 } else {
266 pxamci_finish_request(host, host->mrq);
267 }
268
269 return 1;
270}
271
272static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
273{
274 struct mmc_data *data = host->data;
275
276 if (!data)
277 return 0;
278
279 DCSR(host->dma) = 0;
280 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
281 host->dma_dir);
282
283 if (stat & STAT_READ_TIME_OUT)
284 data->error = MMC_ERR_TIMEOUT;
285 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
286 data->error = MMC_ERR_BADCRC;
287
288 /*
289 * There appears to be a hardware design bug here. There seems to
290 * be no way to find out how much data was transferred to the card.
291 * This means that if there was an error on any block, we mark all
292 * data blocks as being in error.
293 */
294 if (data->error == MMC_ERR_NONE)
295 data->bytes_xfered = data->blocks << data->blksz_bits;
296 else
297 data->bytes_xfered = 0;
298
299 pxamci_disable_irq(host, DATA_TRAN_DONE);
300
301 host->data = NULL;
302 if (host->mrq->stop && data->error == MMC_ERR_NONE) {
303 pxamci_stop_clock(host);
304 pxamci_start_cmd(host, host->mrq->stop, 0);
305 } else {
306 pxamci_finish_request(host, host->mrq);
307 }
308
309 return 1;
310}
311
312static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
313{
314 struct pxamci_host *host = devid;
315 unsigned int ireg;
316 int handled = 0;
317
318 ireg = readl(host->base + MMC_I_REG);
319
320 DBG("PXAMCI: irq %08x\n", ireg);
321
322 if (ireg) {
323 unsigned stat = readl(host->base + MMC_STAT);
324
325 DBG("PXAMCI: stat %08x\n", stat);
326
327 if (ireg & END_CMD_RES)
328 handled |= pxamci_cmd_done(host, stat);
329 if (ireg & DATA_TRAN_DONE)
330 handled |= pxamci_data_done(host, stat);
331 }
332
333 return IRQ_RETVAL(handled);
334}
335
336static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
337{
338 struct pxamci_host *host = mmc_priv(mmc);
339 unsigned int cmdat;
340
341 WARN_ON(host->mrq != NULL);
342
343 host->mrq = mrq;
344
345 pxamci_stop_clock(host);
346
347 cmdat = host->cmdat;
348 host->cmdat &= ~CMDAT_INIT;
349
350 if (mrq->data) {
351 pxamci_setup_data(host, mrq->data);
352
353 cmdat &= ~CMDAT_BUSY;
354 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
355 if (mrq->data->flags & MMC_DATA_WRITE)
356 cmdat |= CMDAT_WRITE;
357
358 if (mrq->data->flags & MMC_DATA_STREAM)
359 cmdat |= CMDAT_STREAM;
360 }
361
362 pxamci_start_cmd(host, mrq->cmd, cmdat);
363}
364
365static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
366{
367 struct pxamci_host *host = mmc_priv(mmc);
368
369 DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
370 ios->clock, ios->power_mode, ios->vdd / 100,
371 ios->vdd % 100);
372
373 if (ios->clock) {
374 unsigned int clk = CLOCKRATE / ios->clock;
375 if (CLOCKRATE / clk > ios->clock)
376 clk <<= 1;
377 host->clkrt = fls(clk) - 1;
378 pxa_set_cken(CKEN12_MMC, 1);
379
380 /*
381 * we write clkrt on the next command
382 */
383 } else {
384 pxamci_stop_clock(host);
385 pxa_set_cken(CKEN12_MMC, 0);
386 }
387
388 if (host->power_mode != ios->power_mode) {
389 host->power_mode = ios->power_mode;
390
391 if (host->pdata && host->pdata->setpower)
392 host->pdata->setpower(mmc->dev, ios->vdd);
393
394 if (ios->power_mode == MMC_POWER_ON)
395 host->cmdat |= CMDAT_INIT;
396 }
397
398 DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n",
399 host->clkrt, host->cmdat);
400}
401
402static struct mmc_host_ops pxamci_ops = {
403 .request = pxamci_request,
404 .set_ios = pxamci_set_ios,
405};
406
407static void pxamci_dma_irq(int dma, void *devid, struct pt_regs *regs)
408{
409 printk(KERN_ERR "DMA%d: IRQ???\n", dma);
410 DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
411}
412
413static irqreturn_t pxamci_detect_irq(int irq, void *devid, struct pt_regs *regs)
414{
415 mmc_detect_change(devid);
416 return IRQ_HANDLED;
417}
418
419static int pxamci_probe(struct device *dev)
420{
421 struct platform_device *pdev = to_platform_device(dev);
422 struct mmc_host *mmc;
423 struct pxamci_host *host = NULL;
424 struct resource *r;
425 int ret, irq;
426
427 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
428 irq = platform_get_irq(pdev, 0);
429 if (!r || irq == NO_IRQ)
430 return -ENXIO;
431
432 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
433 if (!r)
434 return -EBUSY;
435
436 mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
437 if (!mmc) {
438 ret = -ENOMEM;
439 goto out;
440 }
441
442 mmc->ops = &pxamci_ops;
443 mmc->f_min = CLOCKRATE_MIN;
444 mmc->f_max = CLOCKRATE_MAX;
445
446 /*
447 * We can do SG-DMA, but we don't because we never know how much
448 * data we successfully wrote to the card.
449 */
450 mmc->max_phys_segs = NR_SG;
451
452 /*
453 * Our hardware DMA can handle a maximum of one page per SG entry.
454 */
455 mmc->max_seg_size = PAGE_SIZE;
456
457 host = mmc_priv(mmc);
458 host->mmc = mmc;
459 host->dma = -1;
460 host->pdata = pdev->dev.platform_data;
461 mmc->ocr_avail = host->pdata ?
462 host->pdata->ocr_mask :
463 MMC_VDD_32_33|MMC_VDD_33_34;
464
465 host->sg_cpu = dma_alloc_coherent(dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
466 if (!host->sg_cpu) {
467 ret = -ENOMEM;
468 goto out;
469 }
470
471 spin_lock_init(&host->lock);
472 host->res = r;
473 host->irq = irq;
474 host->imask = MMC_I_MASK_ALL;
475
476 host->base = ioremap(r->start, SZ_4K);
477 if (!host->base) {
478 ret = -ENOMEM;
479 goto out;
480 }
481
482 /*
483 * Ensure that the host controller is shut down, and setup
484 * with our defaults.
485 */
486 pxamci_stop_clock(host);
487 writel(0, host->base + MMC_SPI);
488 writel(64, host->base + MMC_RESTO);
489 writel(host->imask, host->base + MMC_I_MASK);
490
491 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
492 pxamci_dma_irq, host);
493 if (host->dma < 0) {
494 ret = -EBUSY;
495 goto out;
496 }
497
498 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
499 if (ret)
500 goto out;
501
502 dev_set_drvdata(dev, mmc);
503
504 if (host->pdata && host->pdata->init)
505 host->pdata->init(dev, pxamci_detect_irq, mmc);
506
507 mmc_add_host(mmc);
508
509 return 0;
510
511 out:
512 if (host) {
513 if (host->dma >= 0)
514 pxa_free_dma(host->dma);
515 if (host->base)
516 iounmap(host->base);
517 if (host->sg_cpu)
518 dma_free_coherent(dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
519 }
520 if (mmc)
521 mmc_free_host(mmc);
522 release_resource(r);
523 return ret;
524}
525
526static int pxamci_remove(struct device *dev)
527{
528 struct mmc_host *mmc = dev_get_drvdata(dev);
529
530 dev_set_drvdata(dev, NULL);
531
532 if (mmc) {
533 struct pxamci_host *host = mmc_priv(mmc);
534
535 if (host->pdata && host->pdata->exit)
536 host->pdata->exit(dev, mmc);
537
538 mmc_remove_host(mmc);
539
540 pxamci_stop_clock(host);
541 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
542 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
543 host->base + MMC_I_MASK);
544
545 DRCMRRXMMC = 0;
546 DRCMRTXMMC = 0;
547
548 free_irq(host->irq, host);
549 pxa_free_dma(host->dma);
550 iounmap(host->base);
551 dma_free_coherent(dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
552
553 release_resource(host->res);
554
555 mmc_free_host(mmc);
556 }
557 return 0;
558}
559
560#ifdef CONFIG_PM
561static int pxamci_suspend(struct device *dev, u32 state, u32 level)
562{
563 struct mmc_host *mmc = dev_get_drvdata(dev);
564 int ret = 0;
565
566 if (mmc && level == SUSPEND_DISABLE)
567 ret = mmc_suspend_host(mmc, state);
568
569 return ret;
570}
571
572static int pxamci_resume(struct device *dev, u32 level)
573{
574 struct mmc_host *mmc = dev_get_drvdata(dev);
575 int ret = 0;
576
577 if (mmc && level == RESUME_ENABLE)
578 ret = mmc_resume_host(mmc);
579
580 return ret;
581}
582#else
583#define pxamci_suspend NULL
584#define pxamci_resume NULL
585#endif
586
587static struct device_driver pxamci_driver = {
588 .name = DRIVER_NAME,
589 .bus = &platform_bus_type,
590 .probe = pxamci_probe,
591 .remove = pxamci_remove,
592 .suspend = pxamci_suspend,
593 .resume = pxamci_resume,
594};
595
596static int __init pxamci_init(void)
597{
598 return driver_register(&pxamci_driver);
599}
600
601static void __exit pxamci_exit(void)
602{
603 driver_unregister(&pxamci_driver);
604}
605
606module_init(pxamci_init);
607module_exit(pxamci_exit);
608
609MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
610MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/pxamci.h b/drivers/mmc/pxamci.h
new file mode 100644
index 000000000000..1b163220df2b
--- /dev/null
+++ b/drivers/mmc/pxamci.h
@@ -0,0 +1,124 @@
1#undef MMC_STRPCL
2#undef MMC_STAT
3#undef MMC_CLKRT
4#undef MMC_SPI
5#undef MMC_CMDAT
6#undef MMC_RESTO
7#undef MMC_RDTO
8#undef MMC_BLKLEN
9#undef MMC_NOB
10#undef MMC_PRTBUF
11#undef MMC_I_MASK
12#undef END_CMD_RES
13#undef PRG_DONE
14#undef DATA_TRAN_DONE
15#undef MMC_I_REG
16#undef MMC_CMD
17#undef MMC_ARGH
18#undef MMC_ARGL
19#undef MMC_RES
20#undef MMC_RXFIFO
21#undef MMC_TXFIFO
22
23#define MMC_STRPCL 0x0000
24#define STOP_CLOCK (1 << 0)
25#define START_CLOCK (2 << 0)
26
27#define MMC_STAT 0x0004
28#define STAT_END_CMD_RES (1 << 13)
29#define STAT_PRG_DONE (1 << 12)
30#define STAT_DATA_TRAN_DONE (1 << 11)
31#define STAT_CLK_EN (1 << 8)
32#define STAT_RECV_FIFO_FULL (1 << 7)
33#define STAT_XMIT_FIFO_EMPTY (1 << 6)
34#define STAT_RES_CRC_ERR (1 << 5)
35#define STAT_SPI_READ_ERROR_TOKEN (1 << 4)
36#define STAT_CRC_READ_ERROR (1 << 3)
37#define STAT_CRC_WRITE_ERROR (1 << 2)
38#define STAT_TIME_OUT_RESPONSE (1 << 1)
39#define STAT_READ_TIME_OUT (1 << 0)
40
41#define MMC_CLKRT 0x0008 /* 3 bit */
42
43#define MMC_SPI 0x000c
44#define SPI_CS_ADDRESS (1 << 3)
45#define SPI_CS_EN (1 << 2)
46#define CRC_ON (1 << 1)
47#define SPI_EN (1 << 0)
48
49#define MMC_CMDAT 0x0010
50#define CMDAT_DMAEN (1 << 7)
51#define CMDAT_INIT (1 << 6)
52#define CMDAT_BUSY (1 << 5)
53#define CMDAT_STREAM (1 << 4) /* 1 = stream */
54#define CMDAT_WRITE (1 << 3) /* 1 = write */
55#define CMDAT_DATAEN (1 << 2)
56#define CMDAT_RESP_NONE (0 << 0)
57#define CMDAT_RESP_SHORT (1 << 0)
58#define CMDAT_RESP_R2 (2 << 0)
59#define CMDAT_RESP_R3 (3 << 0)
60
61#define MMC_RESTO 0x0014 /* 7 bit */
62
63#define MMC_RDTO 0x0018 /* 16 bit */
64
65#define MMC_BLKLEN 0x001c /* 10 bit */
66
67#define MMC_NOB 0x0020 /* 16 bit */
68
69#define MMC_PRTBUF 0x0024
70#define BUF_PART_FULL (1 << 0)
71
72#define MMC_I_MASK 0x0028
73
74/*PXA27x MMC interrupts*/
75#define SDIO_SUSPEND_ACK (1 << 12)
76#define SDIO_INT (1 << 11)
77#define RD_STALLED (1 << 10)
78#define RES_ERR (1 << 9)
79#define DAT_ERR (1 << 8)
80#define TINT (1 << 7)
81
82/*PXA2xx MMC interrupts*/
83#define TXFIFO_WR_REQ (1 << 6)
84#define RXFIFO_RD_REQ (1 << 5)
85#define CLK_IS_OFF (1 << 4)
86#define STOP_CMD (1 << 3)
87#define END_CMD_RES (1 << 2)
88#define PRG_DONE (1 << 1)
89#define DATA_TRAN_DONE (1 << 0)
90
91#ifdef CONFIG_PXA27x
92#define MMC_I_MASK_ALL 0x00001fff
93#else
94#define MMC_I_MASK_ALL 0x0000007f
95#endif
96
97#define MMC_I_REG 0x002c
98/* same as MMC_I_MASK */
99
100#define MMC_CMD 0x0030
101
102#define MMC_ARGH 0x0034 /* 16 bit */
103
104#define MMC_ARGL 0x0038 /* 16 bit */
105
106#define MMC_RES 0x003c /* 16 bit */
107
108#define MMC_RXFIFO 0x0040 /* 8 bit */
109
110#define MMC_TXFIFO 0x0044 /* 8 bit */
111
112/*
113 * The base MMC clock rate
114 */
115#ifdef CONFIG_PXA27x
116#define CLOCKRATE_MIN 304688
117#define CLOCKRATE_MAX 19500000
118#else
119#define CLOCKRATE_MIN 312500
120#define CLOCKRATE_MAX 20000000
121#endif
122
123#define CLOCKRATE CLOCKRATE_MAX
124
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
new file mode 100644
index 000000000000..938bca0414e5
--- /dev/null
+++ b/drivers/mmc/wbsd.c
@@ -0,0 +1,1651 @@
1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * Warning!
12 *
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
15 *
16 * - FIFO size field in FSR is always zero.
17 *
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
20 *
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/device.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/highmem.h>
33#include <linux/mmc/host.h>
34#include <linux/mmc/protocol.h>
35
36#include <asm/io.h>
37#include <asm/dma.h>
38#include <asm/scatterlist.h>
39
40#include "wbsd.h"
41
42#define DRIVER_NAME "wbsd"
43#define DRIVER_VERSION "1.1"
44
45#ifdef CONFIG_MMC_DEBUG
46#define DBG(x...) \
47 printk(KERN_DEBUG DRIVER_NAME ": " x)
48#define DBGF(f, x...) \
49 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
50#else
51#define DBG(x...) do { } while (0)
52#define DBGF(x...) do { } while (0)
53#endif
54
55static unsigned int io = 0x248;
56static unsigned int irq = 6;
57static int dma = 2;
58
59#ifdef CONFIG_MMC_DEBUG
60void DBG_REG(int reg, u8 value)
61{
62 int i;
63
64 printk(KERN_DEBUG "wbsd: Register %d: 0x%02X %3d '%c' ",
65 reg, (int)value, (int)value, (value < 0x20)?'.':value);
66
67 for (i = 7;i >= 0;i--)
68 {
69 if (value & (1 << i))
70 printk("x");
71 else
72 printk(".");
73 }
74
75 printk("\n");
76}
77#else
78#define DBG_REG(r, v) do {} while (0)
79#endif
80
81/*
82 * Basic functions
83 */
84
85static inline void wbsd_unlock_config(struct wbsd_host* host)
86{
87 outb(host->unlock_code, host->config);
88 outb(host->unlock_code, host->config);
89}
90
91static inline void wbsd_lock_config(struct wbsd_host* host)
92{
93 outb(LOCK_CODE, host->config);
94}
95
96static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
97{
98 outb(reg, host->config);
99 outb(value, host->config + 1);
100}
101
102static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
103{
104 outb(reg, host->config);
105 return inb(host->config + 1);
106}
107
108static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
109{
110 outb(index, host->base + WBSD_IDXR);
111 outb(value, host->base + WBSD_DATAR);
112}
113
114static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
115{
116 outb(index, host->base + WBSD_IDXR);
117 return inb(host->base + WBSD_DATAR);
118}
119
120/*
121 * Common routines
122 */
123
124static void wbsd_init_device(struct wbsd_host* host)
125{
126 u8 setup, ier;
127
128 /*
129 * Reset chip (SD/MMC part) and fifo.
130 */
131 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
132 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
133 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
134
135 /*
136 * Read back default clock.
137 */
138 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
139
140 /*
141 * Power down port.
142 */
143 outb(WBSD_POWER_N, host->base + WBSD_CSR);
144
145 /*
146 * Set maximum timeout.
147 */
148 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
149
150 /*
151 * Enable interesting interrupts.
152 */
153 ier = 0;
154 ier |= WBSD_EINT_CARD;
155 ier |= WBSD_EINT_FIFO_THRE;
156 ier |= WBSD_EINT_CCRC;
157 ier |= WBSD_EINT_TIMEOUT;
158 ier |= WBSD_EINT_CRC;
159 ier |= WBSD_EINT_TC;
160
161 outb(ier, host->base + WBSD_EIR);
162
163 /*
164 * Clear interrupts.
165 */
166 inb(host->base + WBSD_ISR);
167}
168
169static void wbsd_reset(struct wbsd_host* host)
170{
171 u8 setup;
172
173 printk(KERN_ERR DRIVER_NAME ": Resetting chip\n");
174
175 /*
176 * Soft reset of chip (SD/MMC part).
177 */
178 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
179 setup |= WBSD_SOFT_RESET;
180 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
181}
182
183static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
184{
185 unsigned long dmaflags;
186
187 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
188
189 if (host->dma >= 0)
190 {
191 /*
192 * Release ISA DMA controller.
193 */
194 dmaflags = claim_dma_lock();
195 disable_dma(host->dma);
196 clear_dma_ff(host->dma);
197 release_dma_lock(dmaflags);
198
199 /*
200 * Disable DMA on host.
201 */
202 wbsd_write_index(host, WBSD_IDX_DMA, 0);
203 }
204
205 host->mrq = NULL;
206
207 /*
208 * MMC layer might call back into the driver so first unlock.
209 */
210 spin_unlock(&host->lock);
211 mmc_request_done(host->mmc, mrq);
212 spin_lock(&host->lock);
213}
214
215/*
216 * Scatter/gather functions
217 */
218
219static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
220{
221 /*
222 * Get info. about SG list from data structure.
223 */
224 host->cur_sg = data->sg;
225 host->num_sg = data->sg_len;
226
227 host->offset = 0;
228 host->remain = host->cur_sg->length;
229}
230
231static inline int wbsd_next_sg(struct wbsd_host* host)
232{
233 /*
234 * Skip to next SG entry.
235 */
236 host->cur_sg++;
237 host->num_sg--;
238
239 /*
240 * Any entries left?
241 */
242 if (host->num_sg > 0)
243 {
244 host->offset = 0;
245 host->remain = host->cur_sg->length;
246 }
247
248 return host->num_sg;
249}
250
251static inline char* wbsd_kmap_sg(struct wbsd_host* host)
252{
253 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
254 host->cur_sg->offset;
255 return host->mapped_sg;
256}
257
258static inline void wbsd_kunmap_sg(struct wbsd_host* host)
259{
260 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
261}
262
263static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
264{
265 unsigned int len, i, size;
266 struct scatterlist* sg;
267 char* dmabuf = host->dma_buffer;
268 char* sgbuf;
269
270 size = host->size;
271
272 sg = data->sg;
273 len = data->sg_len;
274
275 /*
276 * Just loop through all entries. Size might not
277 * be the entire list though so make sure that
278 * we do not transfer too much.
279 */
280 for (i = 0;i < len;i++)
281 {
282 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
283 if (size < sg[i].length)
284 memcpy(dmabuf, sgbuf, size);
285 else
286 memcpy(dmabuf, sgbuf, sg[i].length);
287 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
288 dmabuf += sg[i].length;
289
290 if (size < sg[i].length)
291 size = 0;
292 else
293 size -= sg[i].length;
294
295 if (size == 0)
296 break;
297 }
298
299 /*
300 * Check that we didn't get a request to transfer
301 * more data than can fit into the SG list.
302 */
303
304 BUG_ON(size != 0);
305
306 host->size -= size;
307}
308
309static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
310{
311 unsigned int len, i, size;
312 struct scatterlist* sg;
313 char* dmabuf = host->dma_buffer;
314 char* sgbuf;
315
316 size = host->size;
317
318 sg = data->sg;
319 len = data->sg_len;
320
321 /*
322 * Just loop through all entries. Size might not
323 * be the entire list though so make sure that
324 * we do not transfer too much.
325 */
326 for (i = 0;i < len;i++)
327 {
328 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
329 if (size < sg[i].length)
330 memcpy(sgbuf, dmabuf, size);
331 else
332 memcpy(sgbuf, dmabuf, sg[i].length);
333 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
334 dmabuf += sg[i].length;
335
336 if (size < sg[i].length)
337 size = 0;
338 else
339 size -= sg[i].length;
340
341 if (size == 0)
342 break;
343 }
344
345 /*
346 * Check that we didn't get a request to transfer
347 * more data than can fit into the SG list.
348 */
349
350 BUG_ON(size != 0);
351
352 host->size -= size;
353}
354
355/*
356 * Command handling
357 */
358
359static inline void wbsd_get_short_reply(struct wbsd_host* host,
360 struct mmc_command* cmd)
361{
362 /*
363 * Correct response type?
364 */
365 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
366 {
367 cmd->error = MMC_ERR_INVALID;
368 return;
369 }
370
371 cmd->resp[0] =
372 wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
373 cmd->resp[0] |=
374 wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
375 cmd->resp[0] |=
376 wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
377 cmd->resp[0] |=
378 wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
379 cmd->resp[1] =
380 wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
381}
382
383static inline void wbsd_get_long_reply(struct wbsd_host* host,
384 struct mmc_command* cmd)
385{
386 int i;
387
388 /*
389 * Correct response type?
390 */
391 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
392 {
393 cmd->error = MMC_ERR_INVALID;
394 return;
395 }
396
397 for (i = 0;i < 4;i++)
398 {
399 cmd->resp[i] =
400 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
401 cmd->resp[i] |=
402 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
403 cmd->resp[i] |=
404 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
405 cmd->resp[i] |=
406 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
407 }
408}
409
410static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs);
411
412static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
413{
414 int i;
415 u8 status, isr;
416
417 DBGF("Sending cmd (%x)\n", cmd->opcode);
418
419 /*
420 * Clear accumulated ISR. The interrupt routine
421 * will fill this one with events that occur during
422 * transfer.
423 */
424 host->isr = 0;
425
426 /*
427 * Send the command (CRC calculated by host).
428 */
429 outb(cmd->opcode, host->base + WBSD_CMDR);
430 for (i = 3;i >= 0;i--)
431 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
432
433 cmd->error = MMC_ERR_NONE;
434
435 /*
436 * Wait for the request to complete.
437 */
438 do {
439 status = wbsd_read_index(host, WBSD_IDX_STATUS);
440 } while (status & WBSD_CARDTRAFFIC);
441
442 /*
443 * Do we expect a reply?
444 */
445 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
446 {
447 /*
448 * Read back status.
449 */
450 isr = host->isr;
451
452 /* Card removed? */
453 if (isr & WBSD_INT_CARD)
454 cmd->error = MMC_ERR_TIMEOUT;
455 /* Timeout? */
456 else if (isr & WBSD_INT_TIMEOUT)
457 cmd->error = MMC_ERR_TIMEOUT;
458 /* CRC? */
459 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
460 cmd->error = MMC_ERR_BADCRC;
461 /* All ok */
462 else
463 {
464 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
465 wbsd_get_short_reply(host, cmd);
466 else
467 wbsd_get_long_reply(host, cmd);
468 }
469 }
470
471 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
472}
473
474/*
475 * Data functions
476 */
477
478static void wbsd_empty_fifo(struct wbsd_host* host)
479{
480 struct mmc_data* data = host->mrq->cmd->data;
481 char* buffer;
482 int i, fsr, fifo;
483
484 /*
485 * Handle excessive data.
486 */
487 if (data->bytes_xfered == host->size)
488 return;
489
490 buffer = wbsd_kmap_sg(host) + host->offset;
491
492 /*
493 * Drain the fifo. This has a tendency to loop longer
494 * than the FIFO length (usually one block).
495 */
496 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
497 {
498 /*
499 * The size field in the FSR is broken so we have to
500 * do some guessing.
501 */
502 if (fsr & WBSD_FIFO_FULL)
503 fifo = 16;
504 else if (fsr & WBSD_FIFO_FUTHRE)
505 fifo = 8;
506 else
507 fifo = 1;
508
509 for (i = 0;i < fifo;i++)
510 {
511 *buffer = inb(host->base + WBSD_DFR);
512 buffer++;
513 host->offset++;
514 host->remain--;
515
516 data->bytes_xfered++;
517
518 /*
519 * Transfer done?
520 */
521 if (data->bytes_xfered == host->size)
522 {
523 wbsd_kunmap_sg(host);
524 return;
525 }
526
527 /*
528 * End of scatter list entry?
529 */
530 if (host->remain == 0)
531 {
532 wbsd_kunmap_sg(host);
533
534 /*
535 * Get next entry. Check if last.
536 */
537 if (!wbsd_next_sg(host))
538 {
539 /*
540 * We should never reach this point.
541 * It means that we're trying to
542 * transfer more blocks than can fit
543 * into the scatter list.
544 */
545 BUG_ON(1);
546
547 host->size = data->bytes_xfered;
548
549 return;
550 }
551
552 buffer = wbsd_kmap_sg(host);
553 }
554 }
555 }
556
557 wbsd_kunmap_sg(host);
558
559 /*
560 * This is a very dirty hack to solve a
561 * hardware problem. The chip doesn't trigger
562 * FIFO threshold interrupts properly.
563 */
564 if ((host->size - data->bytes_xfered) < 16)
565 tasklet_schedule(&host->fifo_tasklet);
566}
567
568static void wbsd_fill_fifo(struct wbsd_host* host)
569{
570 struct mmc_data* data = host->mrq->cmd->data;
571 char* buffer;
572 int i, fsr, fifo;
573
574 /*
575 * Check that we aren't being called after the
576 * entire buffer has been transfered.
577 */
578 if (data->bytes_xfered == host->size)
579 return;
580
581 buffer = wbsd_kmap_sg(host) + host->offset;
582
583 /*
584 * Fill the fifo. This has a tendency to loop longer
585 * than the FIFO length (usually one block).
586 */
587 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
588 {
589 /*
590 * The size field in the FSR is broken so we have to
591 * do some guessing.
592 */
593 if (fsr & WBSD_FIFO_EMPTY)
594 fifo = 0;
595 else if (fsr & WBSD_FIFO_EMTHRE)
596 fifo = 8;
597 else
598 fifo = 15;
599
600 for (i = 16;i > fifo;i--)
601 {
602 outb(*buffer, host->base + WBSD_DFR);
603 buffer++;
604 host->offset++;
605 host->remain--;
606
607 data->bytes_xfered++;
608
609 /*
610 * Transfer done?
611 */
612 if (data->bytes_xfered == host->size)
613 {
614 wbsd_kunmap_sg(host);
615 return;
616 }
617
618 /*
619 * End of scatter list entry?
620 */
621 if (host->remain == 0)
622 {
623 wbsd_kunmap_sg(host);
624
625 /*
626 * Get next entry. Check if last.
627 */
628 if (!wbsd_next_sg(host))
629 {
630 /*
631 * We should never reach this point.
632 * It means that we're trying to
633 * transfer more blocks than can fit
634 * into the scatter list.
635 */
636 BUG_ON(1);
637
638 host->size = data->bytes_xfered;
639
640 return;
641 }
642
643 buffer = wbsd_kmap_sg(host);
644 }
645 }
646 }
647
648 wbsd_kunmap_sg(host);
649}
650
651static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
652{
653 u16 blksize;
654 u8 setup;
655 unsigned long dmaflags;
656
657 DBGF("blksz %04x blks %04x flags %08x\n",
658 1 << data->blksz_bits, data->blocks, data->flags);
659 DBGF("tsac %d ms nsac %d clk\n",
660 data->timeout_ns / 1000000, data->timeout_clks);
661
662 /*
663 * Calculate size.
664 */
665 host->size = data->blocks << data->blksz_bits;
666
667 /*
668 * Check timeout values for overflow.
669 * (Yes, some cards cause this value to overflow).
670 */
671 if (data->timeout_ns > 127000000)
672 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
673 else
674 wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
675
676 if (data->timeout_clks > 255)
677 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
678 else
679 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
680
681 /*
682 * Inform the chip of how large blocks will be
683 * sent. It needs this to determine when to
684 * calculate CRC.
685 *
686 * Space for CRC must be included in the size.
687 */
688 blksize = (1 << data->blksz_bits) + 2;
689
690 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
691 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
692
693 /*
694 * Clear the FIFO. This is needed even for DMA
695 * transfers since the chip still uses the FIFO
696 * internally.
697 */
698 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
699 setup |= WBSD_FIFO_RESET;
700 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
701
702 /*
703 * DMA transfer?
704 */
705 if (host->dma >= 0)
706 {
707 /*
708 * The buffer for DMA is only 64 kB.
709 */
710 BUG_ON(host->size > 0x10000);
711 if (host->size > 0x10000)
712 {
713 data->error = MMC_ERR_INVALID;
714 return;
715 }
716
717 /*
718 * Transfer data from the SG list to
719 * the DMA buffer.
720 */
721 if (data->flags & MMC_DATA_WRITE)
722 wbsd_sg_to_dma(host, data);
723
724 /*
725 * Initialise the ISA DMA controller.
726 */
727 dmaflags = claim_dma_lock();
728 disable_dma(host->dma);
729 clear_dma_ff(host->dma);
730 if (data->flags & MMC_DATA_READ)
731 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
732 else
733 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
734 set_dma_addr(host->dma, host->dma_addr);
735 set_dma_count(host->dma, host->size);
736
737 enable_dma(host->dma);
738 release_dma_lock(dmaflags);
739
740 /*
741 * Enable DMA on the host.
742 */
743 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
744 }
745 else
746 {
747 /*
748 * This flag is used to keep printk
749 * output to a minimum.
750 */
751 host->firsterr = 1;
752
753 /*
754 * Initialise the SG list.
755 */
756 wbsd_init_sg(host, data);
757
758 /*
759 * Turn off DMA.
760 */
761 wbsd_write_index(host, WBSD_IDX_DMA, 0);
762
763 /*
764 * Set up FIFO threshold levels (and fill
765 * buffer if doing a write).
766 */
767 if (data->flags & MMC_DATA_READ)
768 {
769 wbsd_write_index(host, WBSD_IDX_FIFOEN,
770 WBSD_FIFOEN_FULL | 8);
771 }
772 else
773 {
774 wbsd_write_index(host, WBSD_IDX_FIFOEN,
775 WBSD_FIFOEN_EMPTY | 8);
776 wbsd_fill_fifo(host);
777 }
778 }
779
780 data->error = MMC_ERR_NONE;
781}
782
783static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
784{
785 unsigned long dmaflags;
786 int count;
787 u8 status;
788
789 WARN_ON(host->mrq == NULL);
790
791 /*
792 * Send a stop command if needed.
793 */
794 if (data->stop)
795 wbsd_send_command(host, data->stop);
796
797 /*
798 * Wait for the controller to leave data
799 * transfer state.
800 */
801 do
802 {
803 status = wbsd_read_index(host, WBSD_IDX_STATUS);
804 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
805
806 /*
807 * DMA transfer?
808 */
809 if (host->dma >= 0)
810 {
811 /*
812 * Disable DMA on the host.
813 */
814 wbsd_write_index(host, WBSD_IDX_DMA, 0);
815
816 /*
817 * Turn of ISA DMA controller.
818 */
819 dmaflags = claim_dma_lock();
820 disable_dma(host->dma);
821 clear_dma_ff(host->dma);
822 count = get_dma_residue(host->dma);
823 release_dma_lock(dmaflags);
824
825 /*
826 * Any leftover data?
827 */
828 if (count)
829 {
830 printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
831 "transfer. %d bytes left.\n", count);
832
833 data->error = MMC_ERR_FAILED;
834 }
835 else
836 {
837 /*
838 * Transfer data from DMA buffer to
839 * SG list.
840 */
841 if (data->flags & MMC_DATA_READ)
842 wbsd_dma_to_sg(host, data);
843
844 data->bytes_xfered = host->size;
845 }
846 }
847
848 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
849
850 wbsd_request_end(host, host->mrq);
851}
852
853/*
854 * MMC Callbacks
855 */
856
857static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
858{
859 struct wbsd_host* host = mmc_priv(mmc);
860 struct mmc_command* cmd;
861
862 /*
863 * Disable tasklets to avoid a deadlock.
864 */
865 spin_lock_bh(&host->lock);
866
867 BUG_ON(host->mrq != NULL);
868
869 cmd = mrq->cmd;
870
871 host->mrq = mrq;
872
873 /*
874 * If there is no card in the slot then
875 * timeout immediatly.
876 */
877 if (!(inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT))
878 {
879 cmd->error = MMC_ERR_TIMEOUT;
880 goto done;
881 }
882
883 /*
884 * Does the request include data?
885 */
886 if (cmd->data)
887 {
888 wbsd_prepare_data(host, cmd->data);
889
890 if (cmd->data->error != MMC_ERR_NONE)
891 goto done;
892 }
893
894 wbsd_send_command(host, cmd);
895
896 /*
897 * If this is a data transfer the request
898 * will be finished after the data has
899 * transfered.
900 */
901 if (cmd->data && (cmd->error == MMC_ERR_NONE))
902 {
903 /*
904 * Dirty fix for hardware bug.
905 */
906 if (host->dma == -1)
907 tasklet_schedule(&host->fifo_tasklet);
908
909 spin_unlock_bh(&host->lock);
910
911 return;
912 }
913
914done:
915 wbsd_request_end(host, mrq);
916
917 spin_unlock_bh(&host->lock);
918}
919
920static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
921{
922 struct wbsd_host* host = mmc_priv(mmc);
923 u8 clk, setup, pwr;
924
925 DBGF("clock %uHz busmode %u powermode %u Vdd %u\n",
926 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
927
928 spin_lock_bh(&host->lock);
929
930 /*
931 * Reset the chip on each power off.
932 * Should clear out any weird states.
933 */
934 if (ios->power_mode == MMC_POWER_OFF)
935 wbsd_init_device(host);
936
937 if (ios->clock >= 24000000)
938 clk = WBSD_CLK_24M;
939 else if (ios->clock >= 16000000)
940 clk = WBSD_CLK_16M;
941 else if (ios->clock >= 12000000)
942 clk = WBSD_CLK_12M;
943 else
944 clk = WBSD_CLK_375K;
945
946 /*
947 * Only write to the clock register when
948 * there is an actual change.
949 */
950 if (clk != host->clk)
951 {
952 wbsd_write_index(host, WBSD_IDX_CLK, clk);
953 host->clk = clk;
954 }
955
956 if (ios->power_mode != MMC_POWER_OFF)
957 {
958 /*
959 * Power up card.
960 */
961 pwr = inb(host->base + WBSD_CSR);
962 pwr &= ~WBSD_POWER_N;
963 outb(pwr, host->base + WBSD_CSR);
964
965 /*
966 * This behaviour is stolen from the
967 * Windows driver. Don't know why, but
968 * it is needed.
969 */
970 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
971 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
972 setup |= WBSD_DAT3_H;
973 else
974 setup &= ~WBSD_DAT3_H;
975 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
976
977 mdelay(1);
978 }
979
980 spin_unlock_bh(&host->lock);
981}
982
983/*
984 * Tasklets
985 */
986
987inline static struct mmc_data* wbsd_get_data(struct wbsd_host* host)
988{
989 WARN_ON(!host->mrq);
990 if (!host->mrq)
991 return NULL;
992
993 WARN_ON(!host->mrq->cmd);
994 if (!host->mrq->cmd)
995 return NULL;
996
997 WARN_ON(!host->mrq->cmd->data);
998 if (!host->mrq->cmd->data)
999 return NULL;
1000
1001 return host->mrq->cmd->data;
1002}
1003
1004static void wbsd_tasklet_card(unsigned long param)
1005{
1006 struct wbsd_host* host = (struct wbsd_host*)param;
1007 u8 csr;
1008
1009 spin_lock(&host->lock);
1010
1011 csr = inb(host->base + WBSD_CSR);
1012 WARN_ON(csr == 0xff);
1013
1014 if (csr & WBSD_CARDPRESENT)
1015 DBG("Card inserted\n");
1016 else
1017 {
1018 DBG("Card removed\n");
1019
1020 if (host->mrq)
1021 {
1022 printk(KERN_ERR DRIVER_NAME
1023 ": Card removed during transfer!\n");
1024 wbsd_reset(host);
1025
1026 host->mrq->cmd->error = MMC_ERR_FAILED;
1027 tasklet_schedule(&host->finish_tasklet);
1028 }
1029 }
1030
1031 /*
1032 * Unlock first since we might get a call back.
1033 */
1034 spin_unlock(&host->lock);
1035
1036 mmc_detect_change(host->mmc);
1037}
1038
1039static void wbsd_tasklet_fifo(unsigned long param)
1040{
1041 struct wbsd_host* host = (struct wbsd_host*)param;
1042 struct mmc_data* data;
1043
1044 spin_lock(&host->lock);
1045
1046 if (!host->mrq)
1047 goto end;
1048
1049 data = wbsd_get_data(host);
1050 if (!data)
1051 goto end;
1052
1053 if (data->flags & MMC_DATA_WRITE)
1054 wbsd_fill_fifo(host);
1055 else
1056 wbsd_empty_fifo(host);
1057
1058 /*
1059 * Done?
1060 */
1061 if (host->size == data->bytes_xfered)
1062 {
1063 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1064 tasklet_schedule(&host->finish_tasklet);
1065 }
1066
1067end:
1068 spin_unlock(&host->lock);
1069}
1070
1071static void wbsd_tasklet_crc(unsigned long param)
1072{
1073 struct wbsd_host* host = (struct wbsd_host*)param;
1074 struct mmc_data* data;
1075
1076 spin_lock(&host->lock);
1077
1078 if (!host->mrq)
1079 goto end;
1080
1081 data = wbsd_get_data(host);
1082 if (!data)
1083 goto end;
1084
1085 DBGF("CRC error\n");
1086
1087 data->error = MMC_ERR_BADCRC;
1088
1089 tasklet_schedule(&host->finish_tasklet);
1090
1091end:
1092 spin_unlock(&host->lock);
1093}
1094
1095static void wbsd_tasklet_timeout(unsigned long param)
1096{
1097 struct wbsd_host* host = (struct wbsd_host*)param;
1098 struct mmc_data* data;
1099
1100 spin_lock(&host->lock);
1101
1102 if (!host->mrq)
1103 goto end;
1104
1105 data = wbsd_get_data(host);
1106 if (!data)
1107 goto end;
1108
1109 DBGF("Timeout\n");
1110
1111 data->error = MMC_ERR_TIMEOUT;
1112
1113 tasklet_schedule(&host->finish_tasklet);
1114
1115end:
1116 spin_unlock(&host->lock);
1117}
1118
1119static void wbsd_tasklet_finish(unsigned long param)
1120{
1121 struct wbsd_host* host = (struct wbsd_host*)param;
1122 struct mmc_data* data;
1123
1124 spin_lock(&host->lock);
1125
1126 WARN_ON(!host->mrq);
1127 if (!host->mrq)
1128 goto end;
1129
1130 data = wbsd_get_data(host);
1131 if (!data)
1132 goto end;
1133
1134 wbsd_finish_data(host, data);
1135
1136end:
1137 spin_unlock(&host->lock);
1138}
1139
1140static void wbsd_tasklet_block(unsigned long param)
1141{
1142 struct wbsd_host* host = (struct wbsd_host*)param;
1143 struct mmc_data* data;
1144
1145 spin_lock(&host->lock);
1146
1147 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1148 WBSD_CRC_OK)
1149 {
1150 data = wbsd_get_data(host);
1151 if (!data)
1152 goto end;
1153
1154 DBGF("CRC error\n");
1155
1156 data->error = MMC_ERR_BADCRC;
1157
1158 tasklet_schedule(&host->finish_tasklet);
1159 }
1160
1161end:
1162 spin_unlock(&host->lock);
1163}
1164
1165/*
1166 * Interrupt handling
1167 */
1168
1169static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1170{
1171 struct wbsd_host* host = dev_id;
1172 int isr;
1173
1174 isr = inb(host->base + WBSD_ISR);
1175
1176 /*
1177 * Was it actually our hardware that caused the interrupt?
1178 */
1179 if (isr == 0xff || isr == 0x00)
1180 return IRQ_NONE;
1181
1182 host->isr |= isr;
1183
1184 /*
1185 * Schedule tasklets as needed.
1186 */
1187 if (isr & WBSD_INT_CARD)
1188 tasklet_schedule(&host->card_tasklet);
1189 if (isr & WBSD_INT_FIFO_THRE)
1190 tasklet_schedule(&host->fifo_tasklet);
1191 if (isr & WBSD_INT_CRC)
1192 tasklet_hi_schedule(&host->crc_tasklet);
1193 if (isr & WBSD_INT_TIMEOUT)
1194 tasklet_hi_schedule(&host->timeout_tasklet);
1195 if (isr & WBSD_INT_BUSYEND)
1196 tasklet_hi_schedule(&host->block_tasklet);
1197 if (isr & WBSD_INT_TC)
1198 tasklet_schedule(&host->finish_tasklet);
1199
1200 return IRQ_HANDLED;
1201}
1202
1203/*
1204 * Support functions for probe
1205 */
1206
1207static int wbsd_scan(struct wbsd_host* host)
1208{
1209 int i, j, k;
1210 int id;
1211
1212 /*
1213 * Iterate through all ports, all codes to
1214 * find hardware that is in our known list.
1215 */
1216 for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
1217 {
1218 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1219 continue;
1220
1221 for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
1222 {
1223 id = 0xFFFF;
1224
1225 outb(unlock_codes[j], config_ports[i]);
1226 outb(unlock_codes[j], config_ports[i]);
1227
1228 outb(WBSD_CONF_ID_HI, config_ports[i]);
1229 id = inb(config_ports[i] + 1) << 8;
1230
1231 outb(WBSD_CONF_ID_LO, config_ports[i]);
1232 id |= inb(config_ports[i] + 1);
1233
1234 for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
1235 {
1236 if (id == valid_ids[k])
1237 {
1238 host->chip_id = id;
1239 host->config = config_ports[i];
1240 host->unlock_code = unlock_codes[i];
1241
1242 return 0;
1243 }
1244 }
1245
1246 if (id != 0xFFFF)
1247 {
1248 DBG("Unknown hardware (id %x) found at %x\n",
1249 id, config_ports[i]);
1250 }
1251
1252 outb(LOCK_CODE, config_ports[i]);
1253 }
1254
1255 release_region(config_ports[i], 2);
1256 }
1257
1258 return -ENODEV;
1259}
1260
1261static int wbsd_request_regions(struct wbsd_host* host)
1262{
1263 if (io & 0x7)
1264 return -EINVAL;
1265
1266 if (!request_region(io, 8, DRIVER_NAME))
1267 return -EIO;
1268
1269 host->base = io;
1270
1271 return 0;
1272}
1273
1274static void wbsd_release_regions(struct wbsd_host* host)
1275{
1276 if (host->base)
1277 release_region(host->base, 8);
1278
1279 if (host->config)
1280 release_region(host->config, 2);
1281}
1282
1283static void wbsd_init_dma(struct wbsd_host* host)
1284{
1285 host->dma = -1;
1286
1287 if (dma < 0)
1288 return;
1289
1290 if (request_dma(dma, DRIVER_NAME))
1291 goto err;
1292
1293 /*
1294 * We need to allocate a special buffer in
1295 * order for ISA to be able to DMA to it.
1296 */
1297 host->dma_buffer = kmalloc(65536,
1298 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1299 if (!host->dma_buffer)
1300 goto free;
1301
1302 /*
1303 * Translate the address to a physical address.
1304 */
1305 host->dma_addr = isa_virt_to_bus(host->dma_buffer);
1306
1307 /*
1308 * ISA DMA must be aligned on a 64k basis.
1309 */
1310 if ((host->dma_addr & 0xffff) != 0)
1311 goto kfree;
1312 /*
1313 * ISA cannot access memory above 16 MB.
1314 */
1315 else if (host->dma_addr >= 0x1000000)
1316 goto kfree;
1317
1318 host->dma = dma;
1319
1320 return;
1321
1322kfree:
1323 /*
1324 * If we've gotten here then there is some kind of alignment bug
1325 */
1326 BUG_ON(1);
1327
1328 kfree(host->dma_buffer);
1329 host->dma_buffer = NULL;
1330
1331free:
1332 free_dma(dma);
1333
1334err:
1335 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1336 "Falling back on FIFO.\n", dma);
1337}
1338
1339static struct mmc_host_ops wbsd_ops = {
1340 .request = wbsd_request,
1341 .set_ios = wbsd_set_ios,
1342};
1343
1344/*
1345 * Device probe
1346 */
1347
1348static int wbsd_probe(struct device* dev)
1349{
1350 struct wbsd_host* host = NULL;
1351 struct mmc_host* mmc = NULL;
1352 int ret;
1353
1354 /*
1355 * Allocate MMC structure.
1356 */
1357 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1358 if (!mmc)
1359 return -ENOMEM;
1360
1361 host = mmc_priv(mmc);
1362 host->mmc = mmc;
1363
1364 /*
1365 * Scan for hardware.
1366 */
1367 ret = wbsd_scan(host);
1368 if (ret)
1369 goto freemmc;
1370
1371 /*
1372 * Reset the chip.
1373 */
1374 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1375 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1376
1377 /*
1378 * Allocate I/O ports.
1379 */
1380 ret = wbsd_request_regions(host);
1381 if (ret)
1382 goto release;
1383
1384 /*
1385 * Set host parameters.
1386 */
1387 mmc->ops = &wbsd_ops;
1388 mmc->f_min = 375000;
1389 mmc->f_max = 24000000;
1390 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1391
1392 spin_lock_init(&host->lock);
1393
1394 /*
1395 * Select SD/MMC function.
1396 */
1397 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1398
1399 /*
1400 * Set up card detection.
1401 */
1402 wbsd_write_config(host, WBSD_CONF_PINS, 0x02);
1403
1404 /*
1405 * Configure I/O port.
1406 */
1407 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1408 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1409
1410 /*
1411 * Allocate interrupt.
1412 */
1413 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
1414 if (ret)
1415 goto release;
1416
1417 host->irq = irq;
1418
1419 /*
1420 * Set up tasklets.
1421 */
1422 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
1423 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
1424 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
1425 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
1426 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
1427 tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
1428
1429 /*
1430 * Configure interrupt.
1431 */
1432 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1433
1434 /*
1435 * Allocate DMA.
1436 */
1437 wbsd_init_dma(host);
1438
1439 /*
1440 * If all went well, then configure DMA.
1441 */
1442 if (host->dma >= 0)
1443 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1444
1445 /*
1446 * Maximum number of segments. Worst case is one sector per segment
1447 * so this will be 64kB/512.
1448 */
1449 mmc->max_hw_segs = 128;
1450 mmc->max_phys_segs = 128;
1451
1452 /*
1453 * Maximum number of sectors in one transfer. Also limited by 64kB
1454 * buffer.
1455 */
1456 mmc->max_sectors = 128;
1457
1458 /*
1459 * Maximum segment size. Could be one segment with the maximum number
1460 * of segments.
1461 */
1462 mmc->max_seg_size = mmc->max_sectors * 512;
1463
1464 /*
1465 * Enable chip.
1466 */
1467 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1468
1469 /*
1470 * Power up chip.
1471 */
1472 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1473
1474 /*
1475 * Power Management stuff. No idea how this works.
1476 * Not tested.
1477 */
1478#ifdef CONFIG_PM
1479 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1480#endif
1481
1482 /*
1483 * Reset the chip into a known state.
1484 */
1485 wbsd_init_device(host);
1486
1487 dev_set_drvdata(dev, mmc);
1488
1489 /*
1490 * Add host to MMC layer.
1491 */
1492 mmc_add_host(mmc);
1493
1494 printk(KERN_INFO "%s: W83L51xD id %x at 0x%x irq %d dma %d\n",
1495 mmc->host_name, (int)host->chip_id, (int)host->base,
1496 (int)host->irq, (int)host->dma);
1497
1498 return 0;
1499
1500release:
1501 wbsd_release_regions(host);
1502
1503freemmc:
1504 mmc_free_host(mmc);
1505
1506 return ret;
1507}
1508
1509/*
1510 * Device remove
1511 */
1512
1513static int wbsd_remove(struct device* dev)
1514{
1515 struct mmc_host* mmc = dev_get_drvdata(dev);
1516 struct wbsd_host* host;
1517
1518 if (!mmc)
1519 return 0;
1520
1521 host = mmc_priv(mmc);
1522
1523 /*
1524 * Unregister host with MMC layer.
1525 */
1526 mmc_remove_host(mmc);
1527
1528 /*
1529 * Power down the SD/MMC function.
1530 */
1531 wbsd_unlock_config(host);
1532 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1533 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1534 wbsd_lock_config(host);
1535
1536 /*
1537 * Free resources.
1538 */
1539 if (host->dma_buffer)
1540 kfree(host->dma_buffer);
1541
1542 if (host->dma >= 0)
1543 free_dma(host->dma);
1544
1545 free_irq(host->irq, host);
1546
1547 tasklet_kill(&host->card_tasklet);
1548 tasklet_kill(&host->fifo_tasklet);
1549 tasklet_kill(&host->crc_tasklet);
1550 tasklet_kill(&host->timeout_tasklet);
1551 tasklet_kill(&host->finish_tasklet);
1552 tasklet_kill(&host->block_tasklet);
1553
1554 wbsd_release_regions(host);
1555
1556 mmc_free_host(mmc);
1557
1558 return 0;
1559}
1560
1561/*
1562 * Power management
1563 */
1564
1565#ifdef CONFIG_PM
1566static int wbsd_suspend(struct device *dev, u32 state, u32 level)
1567{
1568 DBGF("Not yet supported\n");
1569
1570 return 0;
1571}
1572
1573static int wbsd_resume(struct device *dev, u32 level)
1574{
1575 DBGF("Not yet supported\n");
1576
1577 return 0;
1578}
1579#else
1580#define wbsd_suspend NULL
1581#define wbsd_resume NULL
1582#endif
1583
1584static void wbsd_release(struct device *dev)
1585{
1586}
1587
1588static struct platform_device wbsd_device = {
1589 .name = DRIVER_NAME,
1590 .id = -1,
1591 .dev = {
1592 .release = wbsd_release,
1593 },
1594};
1595
1596static struct device_driver wbsd_driver = {
1597 .name = DRIVER_NAME,
1598 .bus = &platform_bus_type,
1599 .probe = wbsd_probe,
1600 .remove = wbsd_remove,
1601
1602 .suspend = wbsd_suspend,
1603 .resume = wbsd_resume,
1604};
1605
1606/*
1607 * Module loading/unloading
1608 */
1609
1610static int __init wbsd_drv_init(void)
1611{
1612 int result;
1613
1614 printk(KERN_INFO DRIVER_NAME
1615 ": Winbond W83L51xD SD/MMC card interface driver, "
1616 DRIVER_VERSION "\n");
1617 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1618
1619 result = driver_register(&wbsd_driver);
1620 if (result < 0)
1621 return result;
1622
1623 result = platform_device_register(&wbsd_device);
1624 if (result < 0)
1625 return result;
1626
1627 return 0;
1628}
1629
1630static void __exit wbsd_drv_exit(void)
1631{
1632 platform_device_unregister(&wbsd_device);
1633
1634 driver_unregister(&wbsd_driver);
1635
1636 DBG("unloaded\n");
1637}
1638
1639module_init(wbsd_drv_init);
1640module_exit(wbsd_drv_exit);
1641module_param(io, uint, 0444);
1642module_param(irq, uint, 0444);
1643module_param(dma, int, 0444);
1644
1645MODULE_LICENSE("GPL");
1646MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
1647MODULE_VERSION(DRIVER_VERSION);
1648
1649MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
1650MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
1651MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
new file mode 100644
index 000000000000..fdc03b56a81f
--- /dev/null
+++ b/drivers/mmc/wbsd.h
@@ -0,0 +1,178 @@
1/*
2 * linux/drivers/mmc/wbsd.h - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11const int config_ports[] = { 0x2E, 0x4E };
12const int unlock_codes[] = { 0x83, 0x87 };
13
14const int valid_ids[] = {
15 0x7112,
16 };
17
18#define LOCK_CODE 0xAA
19
20#define WBSD_CONF_SWRST 0x02
21#define WBSD_CONF_DEVICE 0x07
22#define WBSD_CONF_ID_HI 0x20
23#define WBSD_CONF_ID_LO 0x21
24#define WBSD_CONF_POWER 0x22
25#define WBSD_CONF_PME 0x23
26#define WBSD_CONF_PMES 0x24
27
28#define WBSD_CONF_ENABLE 0x30
29#define WBSD_CONF_PORT_HI 0x60
30#define WBSD_CONF_PORT_LO 0x61
31#define WBSD_CONF_IRQ 0x70
32#define WBSD_CONF_DRQ 0x74
33
34#define WBSD_CONF_PINS 0xF0
35
36#define DEVICE_SD 0x03
37
38#define WBSD_CMDR 0x00
39#define WBSD_DFR 0x01
40#define WBSD_EIR 0x02
41#define WBSD_ISR 0x03
42#define WBSD_FSR 0x04
43#define WBSD_IDXR 0x05
44#define WBSD_DATAR 0x06
45#define WBSD_CSR 0x07
46
47#define WBSD_EINT_CARD 0x40
48#define WBSD_EINT_FIFO_THRE 0x20
49#define WBSD_EINT_CCRC 0x10
50#define WBSD_EINT_TIMEOUT 0x08
51#define WBSD_EINT_PROGEND 0x04
52#define WBSD_EINT_CRC 0x02
53#define WBSD_EINT_TC 0x01
54
55#define WBSD_INT_PENDING 0x80
56#define WBSD_INT_CARD 0x40
57#define WBSD_INT_FIFO_THRE 0x20
58#define WBSD_INT_CRC 0x10
59#define WBSD_INT_TIMEOUT 0x08
60#define WBSD_INT_PROGEND 0x04
61#define WBSD_INT_BUSYEND 0x02
62#define WBSD_INT_TC 0x01
63
64#define WBSD_FIFO_EMPTY 0x80
65#define WBSD_FIFO_FULL 0x40
66#define WBSD_FIFO_EMTHRE 0x20
67#define WBSD_FIFO_FUTHRE 0x10
68#define WBSD_FIFO_SZMASK 0x0F
69
70#define WBSD_MSLED 0x20
71#define WBSD_POWER_N 0x10
72#define WBSD_WRPT 0x04
73#define WBSD_CARDPRESENT 0x01
74
75#define WBSD_IDX_CLK 0x01
76#define WBSD_IDX_PBSMSB 0x02
77#define WBSD_IDX_TAAC 0x03
78#define WBSD_IDX_NSAC 0x04
79#define WBSD_IDX_PBSLSB 0x05
80#define WBSD_IDX_SETUP 0x06
81#define WBSD_IDX_DMA 0x07
82#define WBSD_IDX_FIFOEN 0x08
83#define WBSD_IDX_STATUS 0x10
84#define WBSD_IDX_RSPLEN 0x1E
85#define WBSD_IDX_RESP0 0x1F
86#define WBSD_IDX_RESP1 0x20
87#define WBSD_IDX_RESP2 0x21
88#define WBSD_IDX_RESP3 0x22
89#define WBSD_IDX_RESP4 0x23
90#define WBSD_IDX_RESP5 0x24
91#define WBSD_IDX_RESP6 0x25
92#define WBSD_IDX_RESP7 0x26
93#define WBSD_IDX_RESP8 0x27
94#define WBSD_IDX_RESP9 0x28
95#define WBSD_IDX_RESP10 0x29
96#define WBSD_IDX_RESP11 0x2A
97#define WBSD_IDX_RESP12 0x2B
98#define WBSD_IDX_RESP13 0x2C
99#define WBSD_IDX_RESP14 0x2D
100#define WBSD_IDX_RESP15 0x2E
101#define WBSD_IDX_RESP16 0x2F
102#define WBSD_IDX_CRCSTATUS 0x30
103#define WBSD_IDX_ISR 0x3F
104
105#define WBSD_CLK_375K 0x00
106#define WBSD_CLK_12M 0x01
107#define WBSD_CLK_16M 0x02
108#define WBSD_CLK_24M 0x03
109
110#define WBSD_DAT3_H 0x08
111#define WBSD_FIFO_RESET 0x04
112#define WBSD_SOFT_RESET 0x02
113#define WBSD_INC_INDEX 0x01
114
115#define WBSD_DMA_SINGLE 0x02
116#define WBSD_DMA_ENABLE 0x01
117
118#define WBSD_FIFOEN_EMPTY 0x20
119#define WBSD_FIFOEN_FULL 0x10
120#define WBSD_FIFO_THREMASK 0x0F
121
122#define WBSD_BLOCK_READ 0x80
123#define WBSD_BLOCK_WRITE 0x40
124#define WBSD_BUSY 0x20
125#define WBSD_CARDTRAFFIC 0x04
126#define WBSD_SENDCMD 0x02
127#define WBSD_RECVRES 0x01
128
129#define WBSD_RSP_SHORT 0x00
130#define WBSD_RSP_LONG 0x01
131
132#define WBSD_CRC_MASK 0x1F
133#define WBSD_CRC_OK 0x05 /* S010E (00101) */
134#define WBSD_CRC_FAIL 0x0B /* S101E (01011) */
135
136
137struct wbsd_host
138{
139 struct mmc_host* mmc; /* MMC structure */
140
141 spinlock_t lock; /* Mutex */
142
143 struct mmc_request* mrq; /* Current request */
144
145 u8 isr; /* Accumulated ISR */
146
147 struct scatterlist* cur_sg; /* Current SG entry */
148 unsigned int num_sg; /* Number of entries left */
149 void* mapped_sg; /* vaddr of mapped sg */
150
151 unsigned int offset; /* Offset into current entry */
152 unsigned int remain; /* Data left in curren entry */
153
154 int size; /* Total size of transfer */
155
156 char* dma_buffer; /* ISA DMA buffer */
157 dma_addr_t dma_addr; /* Physical address for same */
158
159 int firsterr; /* See fifo functions */
160
161 u8 clk; /* Current clock speed */
162
163 int config; /* Config port */
164 u8 unlock_code; /* Code to unlock config */
165
166 int chip_id; /* ID of controller */
167
168 int base; /* I/O port base */
169 int irq; /* Interrupt */
170 int dma; /* DMA channel */
171
172 struct tasklet_struct card_tasklet; /* Tasklet structures */
173 struct tasklet_struct fifo_tasklet;
174 struct tasklet_struct crc_tasklet;
175 struct tasklet_struct timeout_tasklet;
176 struct tasklet_struct finish_tasklet;
177 struct tasklet_struct block_tasklet;
178};