diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-06 17:54:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-06 17:54:54 -0500 |
commit | 2442d3109943bafbdfc4f0495e3d10eeedc8390c (patch) | |
tree | 9700491eb6ca418ee226c5d8438464a1a2c87281 /drivers | |
parent | 02aedd69e2ef31b0fca1e8960cb1e7fd0c343110 (diff) | |
parent | f9d429a2e579ed7c51c49a81265f7e7d2c59c197 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (32 commits)
mmc: tifm: replace kmap with page_address
mmc: sdhci: fix voltage ocr
mmc: sdhci: replace kmap with page_address
mmc: wbsd: replace kmap with page_address
mmc: handle pci_enable_device() return value in sdhci
mmc: Proper unclaim in mmc_block
mmc: change wbsd mailing list
mmc: Graceful fallback for fancy features
mmc: Handle wbsd's stupid command list
mmc: Allow host drivers to specify max block count
mmc: Allow host drivers to specify a max block size
tifm_sd: add suspend and resume functionality
tifm_core: add suspend/resume infrastructure for tifm devices
tifm_7xx1: prettify
tifm_7xx1: recognize device 0xac8f as supported
tifm_7xx1: switch from workqueue to kthread
tifm_7xx1: Merge media insert and media remove functions
tifm_7xx1: simplify eject function
Add dummy_signal_irq function to save check in ISR
Remove unused return value from signal_irq callback
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/tifm_7xx1.c | 402 | ||||
-rw-r--r-- | drivers/misc/tifm_core.c | 65 | ||||
-rw-r--r-- | drivers/mmc/at91_mci.c | 3 | ||||
-rw-r--r-- | drivers/mmc/au1xmmc.c | 13 | ||||
-rw-r--r-- | drivers/mmc/imxmmc.c | 4 | ||||
-rw-r--r-- | drivers/mmc/mmc.c | 182 | ||||
-rw-r--r-- | drivers/mmc/mmc_block.c | 15 | ||||
-rw-r--r-- | drivers/mmc/mmc_queue.c | 2 | ||||
-rw-r--r-- | drivers/mmc/mmc_sysfs.c | 2 | ||||
-rw-r--r-- | drivers/mmc/mmci.c | 15 | ||||
-rw-r--r-- | drivers/mmc/omap.c | 6 | ||||
-rw-r--r-- | drivers/mmc/pxamci.c | 10 | ||||
-rw-r--r-- | drivers/mmc/sdhci.c | 91 | ||||
-rw-r--r-- | drivers/mmc/sdhci.h | 2 | ||||
-rw-r--r-- | drivers/mmc/tifm_sd.c | 487 | ||||
-rw-r--r-- | drivers/mmc/wbsd.c | 102 | ||||
-rw-r--r-- | drivers/mmc/wbsd.h | 1 |
17 files changed, 847 insertions, 555 deletions
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 2ab7add78f94..e21e490fedb0 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c | |||
@@ -11,66 +11,25 @@ | |||
11 | 11 | ||
12 | #include <linux/tifm.h> | 12 | #include <linux/tifm.h> |
13 | #include <linux/dma-mapping.h> | 13 | #include <linux/dma-mapping.h> |
14 | #include <linux/freezer.h> | ||
14 | 15 | ||
15 | #define DRIVER_NAME "tifm_7xx1" | 16 | #define DRIVER_NAME "tifm_7xx1" |
16 | #define DRIVER_VERSION "0.6" | 17 | #define DRIVER_VERSION "0.7" |
17 | 18 | ||
18 | static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) | 19 | static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) |
19 | { | 20 | { |
20 | int cnt; | ||
21 | unsigned long flags; | ||
22 | |||
23 | spin_lock_irqsave(&fm->lock, flags); | ||
24 | if (!fm->inhibit_new_cards) { | ||
25 | for (cnt = 0; cnt < fm->max_sockets; cnt++) { | ||
26 | if (fm->sockets[cnt] == sock) { | ||
27 | fm->remove_mask |= (1 << cnt); | ||
28 | queue_work(fm->wq, &fm->media_remover); | ||
29 | break; | ||
30 | } | ||
31 | } | ||
32 | } | ||
33 | spin_unlock_irqrestore(&fm->lock, flags); | ||
34 | } | ||
35 | |||
36 | static void tifm_7xx1_remove_media(struct work_struct *work) | ||
37 | { | ||
38 | struct tifm_adapter *fm = | ||
39 | container_of(work, struct tifm_adapter, media_remover); | ||
40 | unsigned long flags; | 21 | unsigned long flags; |
41 | int cnt; | ||
42 | struct tifm_dev *sock; | ||
43 | 22 | ||
44 | if (!class_device_get(&fm->cdev)) | ||
45 | return; | ||
46 | spin_lock_irqsave(&fm->lock, flags); | 23 | spin_lock_irqsave(&fm->lock, flags); |
47 | for (cnt = 0; cnt < fm->max_sockets; cnt++) { | 24 | fm->socket_change_set |= 1 << sock->socket_id; |
48 | if (fm->sockets[cnt] && (fm->remove_mask & (1 << cnt))) { | 25 | wake_up_all(&fm->change_set_notify); |
49 | printk(KERN_INFO DRIVER_NAME | ||
50 | ": demand removing card from socket %d\n", cnt); | ||
51 | sock = fm->sockets[cnt]; | ||
52 | fm->sockets[cnt] = NULL; | ||
53 | fm->remove_mask &= ~(1 << cnt); | ||
54 | |||
55 | writel(0x0e00, sock->addr + SOCK_CONTROL); | ||
56 | |||
57 | writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt, | ||
58 | fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
59 | writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt, | ||
60 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
61 | |||
62 | spin_unlock_irqrestore(&fm->lock, flags); | ||
63 | device_unregister(&sock->dev); | ||
64 | spin_lock_irqsave(&fm->lock, flags); | ||
65 | } | ||
66 | } | ||
67 | spin_unlock_irqrestore(&fm->lock, flags); | 26 | spin_unlock_irqrestore(&fm->lock, flags); |
68 | class_device_put(&fm->cdev); | ||
69 | } | 27 | } |
70 | 28 | ||
71 | static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) | 29 | static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) |
72 | { | 30 | { |
73 | struct tifm_adapter *fm = dev_id; | 31 | struct tifm_adapter *fm = dev_id; |
32 | struct tifm_dev *sock; | ||
74 | unsigned int irq_status; | 33 | unsigned int irq_status; |
75 | unsigned int sock_irq_status, cnt; | 34 | unsigned int sock_irq_status, cnt; |
76 | 35 | ||
@@ -84,42 +43,32 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) | |||
84 | if (irq_status & TIFM_IRQ_ENABLE) { | 43 | if (irq_status & TIFM_IRQ_ENABLE) { |
85 | writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | 44 | writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); |
86 | 45 | ||
87 | for (cnt = 0; cnt < fm->max_sockets; cnt++) { | 46 | for (cnt = 0; cnt < fm->num_sockets; cnt++) { |
88 | sock_irq_status = (irq_status >> cnt) & | 47 | sock = fm->sockets[cnt]; |
89 | (TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK); | 48 | sock_irq_status = (irq_status >> cnt) |
90 | 49 | & (TIFM_IRQ_FIFOMASK(1) | |
91 | if (fm->sockets[cnt]) { | 50 | | TIFM_IRQ_CARDMASK(1)); |
92 | if (sock_irq_status && | ||
93 | fm->sockets[cnt]->signal_irq) | ||
94 | sock_irq_status = fm->sockets[cnt]-> | ||
95 | signal_irq(fm->sockets[cnt], | ||
96 | sock_irq_status); | ||
97 | 51 | ||
98 | if (irq_status & (1 << cnt)) | 52 | if (sock && sock_irq_status) |
99 | fm->remove_mask |= 1 << cnt; | 53 | sock->signal_irq(sock, sock_irq_status); |
100 | } else { | ||
101 | if (irq_status & (1 << cnt)) | ||
102 | fm->insert_mask |= 1 << cnt; | ||
103 | } | ||
104 | } | 54 | } |
55 | |||
56 | fm->socket_change_set |= irq_status | ||
57 | & ((1 << fm->num_sockets) - 1); | ||
105 | } | 58 | } |
106 | writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); | 59 | writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); |
107 | 60 | ||
108 | if (!fm->inhibit_new_cards) { | 61 | if (!fm->socket_change_set) |
109 | if (!fm->remove_mask && !fm->insert_mask) { | 62 | writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); |
110 | writel(TIFM_IRQ_ENABLE, | 63 | else |
111 | fm->addr + FM_SET_INTERRUPT_ENABLE); | 64 | wake_up_all(&fm->change_set_notify); |
112 | } else { | ||
113 | queue_work(fm->wq, &fm->media_remover); | ||
114 | queue_work(fm->wq, &fm->media_inserter); | ||
115 | } | ||
116 | } | ||
117 | 65 | ||
118 | spin_unlock(&fm->lock); | 66 | spin_unlock(&fm->lock); |
119 | return IRQ_HANDLED; | 67 | return IRQ_HANDLED; |
120 | } | 68 | } |
121 | 69 | ||
122 | static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is_x2) | 70 | static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, |
71 | int is_x2) | ||
123 | { | 72 | { |
124 | unsigned int s_state; | 73 | unsigned int s_state; |
125 | int cnt; | 74 | int cnt; |
@@ -127,8 +76,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is | |||
127 | writel(0x0e00, sock_addr + SOCK_CONTROL); | 76 | writel(0x0e00, sock_addr + SOCK_CONTROL); |
128 | 77 | ||
129 | for (cnt = 0; cnt < 100; cnt++) { | 78 | for (cnt = 0; cnt < 100; cnt++) { |
130 | if (!(TIFM_SOCK_STATE_POWERED & | 79 | if (!(TIFM_SOCK_STATE_POWERED |
131 | readl(sock_addr + SOCK_PRESENT_STATE))) | 80 | & readl(sock_addr + SOCK_PRESENT_STATE))) |
132 | break; | 81 | break; |
133 | msleep(10); | 82 | msleep(10); |
134 | } | 83 | } |
@@ -151,8 +100,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is | |||
151 | } | 100 | } |
152 | 101 | ||
153 | for (cnt = 0; cnt < 100; cnt++) { | 102 | for (cnt = 0; cnt < 100; cnt++) { |
154 | if ((TIFM_SOCK_STATE_POWERED & | 103 | if ((TIFM_SOCK_STATE_POWERED |
155 | readl(sock_addr + SOCK_PRESENT_STATE))) | 104 | & readl(sock_addr + SOCK_PRESENT_STATE))) |
156 | break; | 105 | break; |
157 | msleep(10); | 106 | msleep(10); |
158 | } | 107 | } |
@@ -170,130 +119,209 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) | |||
170 | return base_addr + ((sock_num + 1) << 10); | 119 | return base_addr + ((sock_num + 1) << 10); |
171 | } | 120 | } |
172 | 121 | ||
173 | static void tifm_7xx1_insert_media(struct work_struct *work) | 122 | static int tifm_7xx1_switch_media(void *data) |
174 | { | 123 | { |
175 | struct tifm_adapter *fm = | 124 | struct tifm_adapter *fm = data; |
176 | container_of(work, struct tifm_adapter, media_inserter); | ||
177 | unsigned long flags; | 125 | unsigned long flags; |
178 | tifm_media_id media_id; | 126 | tifm_media_id media_id; |
179 | char *card_name = "xx"; | 127 | char *card_name = "xx"; |
180 | int cnt, ok_to_register; | 128 | int cnt, rc; |
181 | unsigned int insert_mask; | 129 | struct tifm_dev *sock; |
182 | struct tifm_dev *new_sock = NULL; | 130 | unsigned int socket_change_set; |
183 | 131 | ||
184 | if (!class_device_get(&fm->cdev)) | 132 | while (1) { |
185 | return; | 133 | rc = wait_event_interruptible(fm->change_set_notify, |
186 | spin_lock_irqsave(&fm->lock, flags); | 134 | fm->socket_change_set); |
187 | insert_mask = fm->insert_mask; | 135 | if (rc == -ERESTARTSYS) |
188 | fm->insert_mask = 0; | 136 | try_to_freeze(); |
189 | if (fm->inhibit_new_cards) { | 137 | |
138 | spin_lock_irqsave(&fm->lock, flags); | ||
139 | socket_change_set = fm->socket_change_set; | ||
140 | fm->socket_change_set = 0; | ||
141 | |||
142 | dev_dbg(fm->dev, "checking media set %x\n", | ||
143 | socket_change_set); | ||
144 | |||
145 | if (kthread_should_stop()) | ||
146 | socket_change_set = (1 << fm->num_sockets) - 1; | ||
190 | spin_unlock_irqrestore(&fm->lock, flags); | 147 | spin_unlock_irqrestore(&fm->lock, flags); |
191 | class_device_put(&fm->cdev); | ||
192 | return; | ||
193 | } | ||
194 | spin_unlock_irqrestore(&fm->lock, flags); | ||
195 | 148 | ||
196 | for (cnt = 0; cnt < fm->max_sockets; cnt++) { | 149 | if (!socket_change_set) |
197 | if (!(insert_mask & (1 << cnt))) | ||
198 | continue; | 150 | continue; |
199 | 151 | ||
200 | media_id = tifm_7xx1_toggle_sock_power(tifm_7xx1_sock_addr(fm->addr, cnt), | 152 | spin_lock_irqsave(&fm->lock, flags); |
201 | fm->max_sockets == 2); | 153 | for (cnt = 0; cnt < fm->num_sockets; cnt++) { |
202 | if (media_id) { | 154 | if (!(socket_change_set & (1 << cnt))) |
203 | ok_to_register = 0; | 155 | continue; |
204 | new_sock = tifm_alloc_device(fm, cnt); | 156 | sock = fm->sockets[cnt]; |
205 | if (new_sock) { | 157 | if (sock) { |
206 | new_sock->addr = tifm_7xx1_sock_addr(fm->addr, | ||
207 | cnt); | ||
208 | new_sock->media_id = media_id; | ||
209 | switch (media_id) { | ||
210 | case 1: | ||
211 | card_name = "xd"; | ||
212 | break; | ||
213 | case 2: | ||
214 | card_name = "ms"; | ||
215 | break; | ||
216 | case 3: | ||
217 | card_name = "sd"; | ||
218 | break; | ||
219 | default: | ||
220 | break; | ||
221 | } | ||
222 | snprintf(new_sock->dev.bus_id, BUS_ID_SIZE, | ||
223 | "tifm_%s%u:%u", card_name, fm->id, cnt); | ||
224 | printk(KERN_INFO DRIVER_NAME | 158 | printk(KERN_INFO DRIVER_NAME |
225 | ": %s card detected in socket %d\n", | 159 | ": demand removing card from socket %d\n", |
226 | card_name, cnt); | 160 | cnt); |
161 | fm->sockets[cnt] = NULL; | ||
162 | spin_unlock_irqrestore(&fm->lock, flags); | ||
163 | device_unregister(&sock->dev); | ||
227 | spin_lock_irqsave(&fm->lock, flags); | 164 | spin_lock_irqsave(&fm->lock, flags); |
228 | if (!fm->sockets[cnt]) { | 165 | writel(0x0e00, |
229 | fm->sockets[cnt] = new_sock; | 166 | tifm_7xx1_sock_addr(fm->addr, cnt) |
230 | ok_to_register = 1; | 167 | + SOCK_CONTROL); |
168 | } | ||
169 | if (kthread_should_stop()) | ||
170 | continue; | ||
171 | |||
172 | spin_unlock_irqrestore(&fm->lock, flags); | ||
173 | media_id = tifm_7xx1_toggle_sock_power( | ||
174 | tifm_7xx1_sock_addr(fm->addr, cnt), | ||
175 | fm->num_sockets == 2); | ||
176 | if (media_id) { | ||
177 | sock = tifm_alloc_device(fm); | ||
178 | if (sock) { | ||
179 | sock->addr = tifm_7xx1_sock_addr(fm->addr, | ||
180 | cnt); | ||
181 | sock->media_id = media_id; | ||
182 | sock->socket_id = cnt; | ||
183 | switch (media_id) { | ||
184 | case 1: | ||
185 | card_name = "xd"; | ||
186 | break; | ||
187 | case 2: | ||
188 | card_name = "ms"; | ||
189 | break; | ||
190 | case 3: | ||
191 | card_name = "sd"; | ||
192 | break; | ||
193 | default: | ||
194 | tifm_free_device(&sock->dev); | ||
195 | spin_lock_irqsave(&fm->lock, flags); | ||
196 | continue; | ||
197 | } | ||
198 | snprintf(sock->dev.bus_id, BUS_ID_SIZE, | ||
199 | "tifm_%s%u:%u", card_name, | ||
200 | fm->id, cnt); | ||
201 | printk(KERN_INFO DRIVER_NAME | ||
202 | ": %s card detected in socket %d\n", | ||
203 | card_name, cnt); | ||
204 | if (!device_register(&sock->dev)) { | ||
205 | spin_lock_irqsave(&fm->lock, flags); | ||
206 | if (!fm->sockets[cnt]) { | ||
207 | fm->sockets[cnt] = sock; | ||
208 | sock = NULL; | ||
209 | } | ||
210 | spin_unlock_irqrestore(&fm->lock, flags); | ||
211 | } | ||
212 | if (sock) | ||
213 | tifm_free_device(&sock->dev); | ||
231 | } | 214 | } |
215 | spin_lock_irqsave(&fm->lock, flags); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | if (!kthread_should_stop()) { | ||
220 | writel(TIFM_IRQ_FIFOMASK(socket_change_set) | ||
221 | | TIFM_IRQ_CARDMASK(socket_change_set), | ||
222 | fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
223 | writel(TIFM_IRQ_FIFOMASK(socket_change_set) | ||
224 | | TIFM_IRQ_CARDMASK(socket_change_set), | ||
225 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
226 | writel(TIFM_IRQ_ENABLE, | ||
227 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
228 | spin_unlock_irqrestore(&fm->lock, flags); | ||
229 | } else { | ||
230 | for (cnt = 0; cnt < fm->num_sockets; cnt++) { | ||
231 | if (fm->sockets[cnt]) | ||
232 | fm->socket_change_set |= 1 << cnt; | ||
233 | } | ||
234 | if (!fm->socket_change_set) { | ||
235 | spin_unlock_irqrestore(&fm->lock, flags); | ||
236 | return 0; | ||
237 | } else { | ||
232 | spin_unlock_irqrestore(&fm->lock, flags); | 238 | spin_unlock_irqrestore(&fm->lock, flags); |
233 | if (!ok_to_register || | ||
234 | device_register(&new_sock->dev)) { | ||
235 | spin_lock_irqsave(&fm->lock, flags); | ||
236 | fm->sockets[cnt] = NULL; | ||
237 | spin_unlock_irqrestore(&fm->lock, | ||
238 | flags); | ||
239 | tifm_free_device(&new_sock->dev); | ||
240 | } | ||
241 | } | 239 | } |
242 | } | 240 | } |
243 | writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt, | ||
244 | fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
245 | writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt, | ||
246 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
247 | } | 241 | } |
248 | 242 | return 0; | |
249 | writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
250 | class_device_put(&fm->cdev); | ||
251 | } | 243 | } |
252 | 244 | ||
245 | #ifdef CONFIG_PM | ||
246 | |||
253 | static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) | 247 | static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) |
254 | { | 248 | { |
255 | struct tifm_adapter *fm = pci_get_drvdata(dev); | 249 | dev_dbg(&dev->dev, "suspending host\n"); |
256 | unsigned long flags; | ||
257 | 250 | ||
258 | spin_lock_irqsave(&fm->lock, flags); | 251 | pci_save_state(dev); |
259 | fm->inhibit_new_cards = 1; | 252 | pci_enable_wake(dev, pci_choose_state(dev, state), 0); |
260 | fm->remove_mask = 0xf; | 253 | pci_disable_device(dev); |
261 | fm->insert_mask = 0; | 254 | pci_set_power_state(dev, pci_choose_state(dev, state)); |
262 | writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
263 | spin_unlock_irqrestore(&fm->lock, flags); | ||
264 | flush_workqueue(fm->wq); | ||
265 | |||
266 | tifm_7xx1_remove_media(&fm->media_remover); | ||
267 | |||
268 | pci_set_power_state(dev, PCI_D3hot); | ||
269 | pci_disable_device(dev); | ||
270 | pci_save_state(dev); | ||
271 | return 0; | 255 | return 0; |
272 | } | 256 | } |
273 | 257 | ||
274 | static int tifm_7xx1_resume(struct pci_dev *dev) | 258 | static int tifm_7xx1_resume(struct pci_dev *dev) |
275 | { | 259 | { |
276 | struct tifm_adapter *fm = pci_get_drvdata(dev); | 260 | struct tifm_adapter *fm = pci_get_drvdata(dev); |
261 | int cnt, rc; | ||
277 | unsigned long flags; | 262 | unsigned long flags; |
263 | tifm_media_id new_ids[fm->num_sockets]; | ||
278 | 264 | ||
265 | pci_set_power_state(dev, PCI_D0); | ||
279 | pci_restore_state(dev); | 266 | pci_restore_state(dev); |
280 | pci_enable_device(dev); | 267 | rc = pci_enable_device(dev); |
281 | pci_set_power_state(dev, PCI_D0); | 268 | if (rc) |
282 | pci_set_master(dev); | 269 | return rc; |
270 | pci_set_master(dev); | ||
283 | 271 | ||
272 | dev_dbg(&dev->dev, "resuming host\n"); | ||
273 | |||
274 | for (cnt = 0; cnt < fm->num_sockets; cnt++) | ||
275 | new_ids[cnt] = tifm_7xx1_toggle_sock_power( | ||
276 | tifm_7xx1_sock_addr(fm->addr, cnt), | ||
277 | fm->num_sockets == 2); | ||
284 | spin_lock_irqsave(&fm->lock, flags); | 278 | spin_lock_irqsave(&fm->lock, flags); |
285 | fm->inhibit_new_cards = 0; | 279 | fm->socket_change_set = 0; |
286 | writel(TIFM_IRQ_SETALL, fm->addr + FM_INTERRUPT_STATUS); | 280 | for (cnt = 0; cnt < fm->num_sockets; cnt++) { |
287 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | 281 | if (fm->sockets[cnt]) { |
288 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, | 282 | if (fm->sockets[cnt]->media_id == new_ids[cnt]) |
289 | fm->addr + FM_SET_INTERRUPT_ENABLE); | 283 | fm->socket_change_set |= 1 << cnt; |
290 | fm->insert_mask = 0xf; | 284 | |
285 | fm->sockets[cnt]->media_id = new_ids[cnt]; | ||
286 | } | ||
287 | } | ||
288 | |||
289 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), | ||
290 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
291 | if (!fm->socket_change_set) { | ||
292 | spin_unlock_irqrestore(&fm->lock, flags); | ||
293 | return 0; | ||
294 | } else { | ||
295 | fm->socket_change_set = 0; | ||
296 | spin_unlock_irqrestore(&fm->lock, flags); | ||
297 | } | ||
298 | |||
299 | wait_event_timeout(fm->change_set_notify, fm->socket_change_set, HZ); | ||
300 | |||
301 | spin_lock_irqsave(&fm->lock, flags); | ||
302 | writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set) | ||
303 | | TIFM_IRQ_CARDMASK(fm->socket_change_set), | ||
304 | fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
305 | writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set) | ||
306 | | TIFM_IRQ_CARDMASK(fm->socket_change_set), | ||
307 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
308 | writel(TIFM_IRQ_ENABLE, | ||
309 | fm->addr + FM_SET_INTERRUPT_ENABLE); | ||
310 | fm->socket_change_set = 0; | ||
311 | |||
291 | spin_unlock_irqrestore(&fm->lock, flags); | 312 | spin_unlock_irqrestore(&fm->lock, flags); |
292 | return 0; | 313 | return 0; |
293 | } | 314 | } |
294 | 315 | ||
316 | #else | ||
317 | |||
318 | #define tifm_7xx1_suspend NULL | ||
319 | #define tifm_7xx1_resume NULL | ||
320 | |||
321 | #endif /* CONFIG_PM */ | ||
322 | |||
295 | static int tifm_7xx1_probe(struct pci_dev *dev, | 323 | static int tifm_7xx1_probe(struct pci_dev *dev, |
296 | const struct pci_device_id *dev_id) | 324 | const struct pci_device_id *dev_id) |
297 | { | 325 | { |
298 | struct tifm_adapter *fm; | 326 | struct tifm_adapter *fm; |
299 | int pci_dev_busy = 0; | 327 | int pci_dev_busy = 0; |
@@ -324,19 +352,18 @@ static int tifm_7xx1_probe(struct pci_dev *dev, | |||
324 | } | 352 | } |
325 | 353 | ||
326 | fm->dev = &dev->dev; | 354 | fm->dev = &dev->dev; |
327 | fm->max_sockets = (dev->device == 0x803B) ? 2 : 4; | 355 | fm->num_sockets = (dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM) |
328 | fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->max_sockets, | 356 | ? 4 : 2; |
329 | GFP_KERNEL); | 357 | fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->num_sockets, |
358 | GFP_KERNEL); | ||
330 | if (!fm->sockets) | 359 | if (!fm->sockets) |
331 | goto err_out_free; | 360 | goto err_out_free; |
332 | 361 | ||
333 | INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media); | ||
334 | INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media); | ||
335 | fm->eject = tifm_7xx1_eject; | 362 | fm->eject = tifm_7xx1_eject; |
336 | pci_set_drvdata(dev, fm); | 363 | pci_set_drvdata(dev, fm); |
337 | 364 | ||
338 | fm->addr = ioremap(pci_resource_start(dev, 0), | 365 | fm->addr = ioremap(pci_resource_start(dev, 0), |
339 | pci_resource_len(dev, 0)); | 366 | pci_resource_len(dev, 0)); |
340 | if (!fm->addr) | 367 | if (!fm->addr) |
341 | goto err_out_free; | 368 | goto err_out_free; |
342 | 369 | ||
@@ -344,16 +371,15 @@ static int tifm_7xx1_probe(struct pci_dev *dev, | |||
344 | if (rc) | 371 | if (rc) |
345 | goto err_out_unmap; | 372 | goto err_out_unmap; |
346 | 373 | ||
347 | rc = tifm_add_adapter(fm); | 374 | init_waitqueue_head(&fm->change_set_notify); |
375 | rc = tifm_add_adapter(fm, tifm_7xx1_switch_media); | ||
348 | if (rc) | 376 | if (rc) |
349 | goto err_out_irq; | 377 | goto err_out_irq; |
350 | 378 | ||
351 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | 379 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); |
352 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, | 380 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), |
353 | fm->addr + FM_SET_INTERRUPT_ENABLE); | 381 | fm->addr + FM_SET_INTERRUPT_ENABLE); |
354 | 382 | wake_up_process(fm->media_switcher); | |
355 | fm->insert_mask = 0xf; | ||
356 | |||
357 | return 0; | 383 | return 0; |
358 | 384 | ||
359 | err_out_irq: | 385 | err_out_irq: |
@@ -377,19 +403,15 @@ static void tifm_7xx1_remove(struct pci_dev *dev) | |||
377 | struct tifm_adapter *fm = pci_get_drvdata(dev); | 403 | struct tifm_adapter *fm = pci_get_drvdata(dev); |
378 | unsigned long flags; | 404 | unsigned long flags; |
379 | 405 | ||
406 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
407 | mmiowb(); | ||
408 | free_irq(dev->irq, fm); | ||
409 | |||
380 | spin_lock_irqsave(&fm->lock, flags); | 410 | spin_lock_irqsave(&fm->lock, flags); |
381 | fm->inhibit_new_cards = 1; | 411 | fm->socket_change_set = (1 << fm->num_sockets) - 1; |
382 | fm->remove_mask = 0xf; | ||
383 | fm->insert_mask = 0; | ||
384 | writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
385 | spin_unlock_irqrestore(&fm->lock, flags); | 412 | spin_unlock_irqrestore(&fm->lock, flags); |
386 | 413 | ||
387 | flush_workqueue(fm->wq); | 414 | kthread_stop(fm->media_switcher); |
388 | |||
389 | tifm_7xx1_remove_media(&fm->media_remover); | ||
390 | |||
391 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
392 | free_irq(dev->irq, fm); | ||
393 | 415 | ||
394 | tifm_remove_adapter(fm); | 416 | tifm_remove_adapter(fm); |
395 | 417 | ||
@@ -404,10 +426,12 @@ static void tifm_7xx1_remove(struct pci_dev *dev) | |||
404 | } | 426 | } |
405 | 427 | ||
406 | static struct pci_device_id tifm_7xx1_pci_tbl [] = { | 428 | static struct pci_device_id tifm_7xx1_pci_tbl [] = { |
407 | { PCI_VENDOR_ID_TI, 0x8033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 429 | { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID, |
408 | 0 }, /* xx21 - the one I have */ | 430 | PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */ |
409 | { PCI_VENDOR_ID_TI, 0x803B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 431 | { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID, |
410 | 0 }, /* xx12 - should be also supported */ | 432 | PCI_ANY_ID, 0, 0, 0 }, |
433 | { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID, | ||
434 | PCI_ANY_ID, 0, 0, 0 }, | ||
411 | { } | 435 | { } |
412 | }; | 436 | }; |
413 | 437 | ||
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index d61df5c3ac36..6b10ebe9d936 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/idr.h> | 14 | #include <linux/idr.h> |
15 | 15 | ||
16 | #define DRIVER_NAME "tifm_core" | 16 | #define DRIVER_NAME "tifm_core" |
17 | #define DRIVER_VERSION "0.6" | 17 | #define DRIVER_VERSION "0.7" |
18 | 18 | ||
19 | static DEFINE_IDR(tifm_adapter_idr); | 19 | static DEFINE_IDR(tifm_adapter_idr); |
20 | static DEFINE_SPINLOCK(tifm_adapter_lock); | 20 | static DEFINE_SPINLOCK(tifm_adapter_lock); |
@@ -60,10 +60,41 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp, | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | #ifdef CONFIG_PM | ||
64 | |||
65 | static int tifm_device_suspend(struct device *dev, pm_message_t state) | ||
66 | { | ||
67 | struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); | ||
68 | struct tifm_driver *drv = fm_dev->drv; | ||
69 | |||
70 | if (drv && drv->suspend) | ||
71 | return drv->suspend(fm_dev, state); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int tifm_device_resume(struct device *dev) | ||
76 | { | ||
77 | struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); | ||
78 | struct tifm_driver *drv = fm_dev->drv; | ||
79 | |||
80 | if (drv && drv->resume) | ||
81 | return drv->resume(fm_dev); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | #else | ||
86 | |||
87 | #define tifm_device_suspend NULL | ||
88 | #define tifm_device_resume NULL | ||
89 | |||
90 | #endif /* CONFIG_PM */ | ||
91 | |||
63 | static struct bus_type tifm_bus_type = { | 92 | static struct bus_type tifm_bus_type = { |
64 | .name = "tifm", | 93 | .name = "tifm", |
65 | .match = tifm_match, | 94 | .match = tifm_match, |
66 | .uevent = tifm_uevent, | 95 | .uevent = tifm_uevent, |
96 | .suspend = tifm_device_suspend, | ||
97 | .resume = tifm_device_resume | ||
67 | }; | 98 | }; |
68 | 99 | ||
69 | static void tifm_free(struct class_device *cdev) | 100 | static void tifm_free(struct class_device *cdev) |
@@ -71,8 +102,6 @@ static void tifm_free(struct class_device *cdev) | |||
71 | struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); | 102 | struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); |
72 | 103 | ||
73 | kfree(fm->sockets); | 104 | kfree(fm->sockets); |
74 | if (fm->wq) | ||
75 | destroy_workqueue(fm->wq); | ||
76 | kfree(fm); | 105 | kfree(fm); |
77 | } | 106 | } |
78 | 107 | ||
@@ -101,7 +130,8 @@ void tifm_free_adapter(struct tifm_adapter *fm) | |||
101 | } | 130 | } |
102 | EXPORT_SYMBOL(tifm_free_adapter); | 131 | EXPORT_SYMBOL(tifm_free_adapter); |
103 | 132 | ||
104 | int tifm_add_adapter(struct tifm_adapter *fm) | 133 | int tifm_add_adapter(struct tifm_adapter *fm, |
134 | int (*mediathreadfn)(void *data)) | ||
105 | { | 135 | { |
106 | int rc; | 136 | int rc; |
107 | 137 | ||
@@ -113,10 +143,10 @@ int tifm_add_adapter(struct tifm_adapter *fm) | |||
113 | spin_unlock(&tifm_adapter_lock); | 143 | spin_unlock(&tifm_adapter_lock); |
114 | if (!rc) { | 144 | if (!rc) { |
115 | snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); | 145 | snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); |
116 | strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN); | 146 | fm->media_switcher = kthread_create(mediathreadfn, |
147 | fm, "tifm/%u", fm->id); | ||
117 | 148 | ||
118 | fm->wq = create_singlethread_workqueue(fm->wq_name); | 149 | if (!IS_ERR(fm->media_switcher)) |
119 | if (fm->wq) | ||
120 | return class_device_add(&fm->cdev); | 150 | return class_device_add(&fm->cdev); |
121 | 151 | ||
122 | spin_lock(&tifm_adapter_lock); | 152 | spin_lock(&tifm_adapter_lock); |
@@ -141,27 +171,27 @@ EXPORT_SYMBOL(tifm_remove_adapter); | |||
141 | void tifm_free_device(struct device *dev) | 171 | void tifm_free_device(struct device *dev) |
142 | { | 172 | { |
143 | struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); | 173 | struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); |
144 | if (fm_dev->wq) | ||
145 | destroy_workqueue(fm_dev->wq); | ||
146 | kfree(fm_dev); | 174 | kfree(fm_dev); |
147 | } | 175 | } |
148 | EXPORT_SYMBOL(tifm_free_device); | 176 | EXPORT_SYMBOL(tifm_free_device); |
149 | 177 | ||
150 | struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id) | 178 | static void tifm_dummy_signal_irq(struct tifm_dev *sock, |
179 | unsigned int sock_irq_status) | ||
180 | { | ||
181 | return; | ||
182 | } | ||
183 | |||
184 | struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm) | ||
151 | { | 185 | { |
152 | struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); | 186 | struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); |
153 | 187 | ||
154 | if (dev) { | 188 | if (dev) { |
155 | spin_lock_init(&dev->lock); | 189 | spin_lock_init(&dev->lock); |
156 | snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id); | 190 | |
157 | dev->wq = create_singlethread_workqueue(dev->wq_name); | ||
158 | if (!dev->wq) { | ||
159 | kfree(dev); | ||
160 | return NULL; | ||
161 | } | ||
162 | dev->dev.parent = fm->dev; | 191 | dev->dev.parent = fm->dev; |
163 | dev->dev.bus = &tifm_bus_type; | 192 | dev->dev.bus = &tifm_bus_type; |
164 | dev->dev.release = tifm_free_device; | 193 | dev->dev.release = tifm_free_device; |
194 | dev->signal_irq = tifm_dummy_signal_irq; | ||
165 | } | 195 | } |
166 | return dev; | 196 | return dev; |
167 | } | 197 | } |
@@ -219,6 +249,7 @@ static int tifm_device_remove(struct device *dev) | |||
219 | struct tifm_driver *drv = fm_dev->drv; | 249 | struct tifm_driver *drv = fm_dev->drv; |
220 | 250 | ||
221 | if (drv) { | 251 | if (drv) { |
252 | fm_dev->signal_irq = tifm_dummy_signal_irq; | ||
222 | if (drv->remove) | 253 | if (drv->remove) |
223 | drv->remove(fm_dev); | 254 | drv->remove(fm_dev); |
224 | fm_dev->drv = NULL; | 255 | fm_dev->drv = NULL; |
@@ -233,6 +264,8 @@ int tifm_register_driver(struct tifm_driver *drv) | |||
233 | drv->driver.bus = &tifm_bus_type; | 264 | drv->driver.bus = &tifm_bus_type; |
234 | drv->driver.probe = tifm_device_probe; | 265 | drv->driver.probe = tifm_device_probe; |
235 | drv->driver.remove = tifm_device_remove; | 266 | drv->driver.remove = tifm_device_remove; |
267 | drv->driver.suspend = tifm_device_suspend; | ||
268 | drv->driver.resume = tifm_device_resume; | ||
236 | 269 | ||
237 | return driver_register(&drv->driver); | 270 | return driver_register(&drv->driver); |
238 | } | 271 | } |
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c index aa152f31851e..2ce50f38e3c7 100644 --- a/drivers/mmc/at91_mci.c +++ b/drivers/mmc/at91_mci.c | |||
@@ -823,6 +823,9 @@ static int __init at91_mci_probe(struct platform_device *pdev) | |||
823 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 823 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
824 | mmc->caps = MMC_CAP_BYTEBLOCK; | 824 | mmc->caps = MMC_CAP_BYTEBLOCK; |
825 | 825 | ||
826 | mmc->max_blk_size = 4095; | ||
827 | mmc->max_blk_count = mmc->max_req_size; | ||
828 | |||
826 | host = mmc_priv(mmc); | 829 | host = mmc_priv(mmc); |
827 | host->mmc = mmc; | 830 | host->mmc = mmc; |
828 | host->buffer = NULL; | 831 | host->buffer = NULL; |
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c index 800527cf40d5..b834be261ab7 100644 --- a/drivers/mmc/au1xmmc.c +++ b/drivers/mmc/au1xmmc.c | |||
@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) | |||
152 | ? 1 : 0; | 152 | ? 1 : 0; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline int au1xmmc_card_readonly(struct au1xmmc_host *host) | 155 | static int au1xmmc_card_readonly(struct mmc_host *mmc) |
156 | { | 156 | { |
157 | struct au1xmmc_host *host = mmc_priv(mmc); | ||
157 | return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) | 158 | return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) |
158 | ? 1 : 0; | 159 | ? 1 : 0; |
159 | } | 160 | } |
@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
193 | u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); | 194 | u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); |
194 | 195 | ||
195 | switch (mmc_resp_type(cmd)) { | 196 | switch (mmc_resp_type(cmd)) { |
197 | case MMC_RSP_NONE: | ||
198 | break; | ||
196 | case MMC_RSP_R1: | 199 | case MMC_RSP_R1: |
197 | mmccmd |= SD_CMD_RT_1; | 200 | mmccmd |= SD_CMD_RT_1; |
198 | break; | 201 | break; |
@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
205 | case MMC_RSP_R3: | 208 | case MMC_RSP_R3: |
206 | mmccmd |= SD_CMD_RT_3; | 209 | mmccmd |= SD_CMD_RT_3; |
207 | break; | 210 | break; |
211 | default: | ||
212 | printk(KERN_INFO "au1xmmc: unhandled response type %02x\n", | ||
213 | mmc_resp_type(cmd)); | ||
214 | return MMC_ERR_INVALID; | ||
208 | } | 215 | } |
209 | 216 | ||
210 | switch(cmd->opcode) { | 217 | switch(cmd->opcode) { |
@@ -878,6 +885,7 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host) | |||
878 | static const struct mmc_host_ops au1xmmc_ops = { | 885 | static const struct mmc_host_ops au1xmmc_ops = { |
879 | .request = au1xmmc_request, | 886 | .request = au1xmmc_request, |
880 | .set_ios = au1xmmc_set_ios, | 887 | .set_ios = au1xmmc_set_ios, |
888 | .get_ro = au1xmmc_card_readonly, | ||
881 | }; | 889 | }; |
882 | 890 | ||
883 | static int __devinit au1xmmc_probe(struct platform_device *pdev) | 891 | static int __devinit au1xmmc_probe(struct platform_device *pdev) |
@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev) | |||
914 | mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; | 922 | mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; |
915 | mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; | 923 | mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; |
916 | 924 | ||
925 | mmc->max_blk_size = 2048; | ||
926 | mmc->max_blk_count = 512; | ||
927 | |||
917 | mmc->ocr_avail = AU1XMMC_OCR; | 928 | mmc->ocr_avail = AU1XMMC_OCR; |
918 | 929 | ||
919 | host = mmc_priv(mmc); | 930 | host = mmc_priv(mmc); |
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c index bfb9ff693208..b060d4bfba29 100644 --- a/drivers/mmc/imxmmc.c +++ b/drivers/mmc/imxmmc.c | |||
@@ -958,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev) | |||
958 | /* MMC core transfer sizes tunable parameters */ | 958 | /* MMC core transfer sizes tunable parameters */ |
959 | mmc->max_hw_segs = 64; | 959 | mmc->max_hw_segs = 64; |
960 | mmc->max_phys_segs = 64; | 960 | mmc->max_phys_segs = 64; |
961 | mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */ | ||
962 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ | 961 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ |
962 | mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
963 | mmc->max_blk_size = 2048; | ||
964 | mmc->max_blk_count = 65535; | ||
963 | 965 | ||
964 | host = mmc_priv(mmc); | 966 | host = mmc_priv(mmc); |
965 | host->mmc = mmc; | 967 | host->mmc = mmc; |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 6f2a282e2b97..5046a1661342 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
103 | mmc_hostname(host), mrq->cmd->opcode, | 103 | mmc_hostname(host), mrq->cmd->opcode, |
104 | mrq->cmd->arg, mrq->cmd->flags); | 104 | mrq->cmd->arg, mrq->cmd->flags); |
105 | 105 | ||
106 | WARN_ON(host->card_busy == NULL); | 106 | WARN_ON(!host->claimed); |
107 | 107 | ||
108 | mrq->cmd->error = 0; | 108 | mrq->cmd->error = 0; |
109 | mrq->cmd->mrq = mrq; | 109 | mrq->cmd->mrq = mrq; |
110 | if (mrq->data) { | 110 | if (mrq->data) { |
111 | BUG_ON(mrq->data->blksz > host->max_blk_size); | ||
112 | BUG_ON(mrq->data->blocks > host->max_blk_count); | ||
113 | BUG_ON(mrq->data->blocks * mrq->data->blksz > | ||
114 | host->max_req_size); | ||
115 | |||
111 | mrq->cmd->data = mrq->data; | 116 | mrq->cmd->data = mrq->data; |
112 | mrq->data->error = 0; | 117 | mrq->data->error = 0; |
113 | mrq->data->mrq = mrq; | 118 | mrq->data->mrq = mrq; |
@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries | |||
157 | { | 162 | { |
158 | struct mmc_request mrq; | 163 | struct mmc_request mrq; |
159 | 164 | ||
160 | BUG_ON(host->card_busy == NULL); | 165 | BUG_ON(!host->claimed); |
161 | 166 | ||
162 | memset(&mrq, 0, sizeof(struct mmc_request)); | 167 | memset(&mrq, 0, sizeof(struct mmc_request)); |
163 | 168 | ||
@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca, | |||
195 | 200 | ||
196 | int i, err; | 201 | int i, err; |
197 | 202 | ||
198 | BUG_ON(host->card_busy == NULL); | 203 | BUG_ON(!host->claimed); |
199 | BUG_ON(retries < 0); | 204 | BUG_ON(retries < 0); |
200 | 205 | ||
201 | err = MMC_ERR_INVALID; | 206 | err = MMC_ERR_INVALID; |
@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card, | |||
289 | else | 294 | else |
290 | limit_us = 100000; | 295 | limit_us = 100000; |
291 | 296 | ||
292 | if (timeout_us > limit_us) { | 297 | /* |
298 | * SDHC cards always use these fixed values. | ||
299 | */ | ||
300 | if (timeout_us > limit_us || mmc_card_blockaddr(card)) { | ||
293 | data->timeout_ns = limit_us * 1000; | 301 | data->timeout_ns = limit_us * 1000; |
294 | data->timeout_clks = 0; | 302 | data->timeout_clks = 0; |
295 | } | 303 | } |
@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card) | |||
320 | spin_lock_irqsave(&host->lock, flags); | 328 | spin_lock_irqsave(&host->lock, flags); |
321 | while (1) { | 329 | while (1) { |
322 | set_current_state(TASK_UNINTERRUPTIBLE); | 330 | set_current_state(TASK_UNINTERRUPTIBLE); |
323 | if (host->card_busy == NULL) | 331 | if (!host->claimed) |
324 | break; | 332 | break; |
325 | spin_unlock_irqrestore(&host->lock, flags); | 333 | spin_unlock_irqrestore(&host->lock, flags); |
326 | schedule(); | 334 | schedule(); |
327 | spin_lock_irqsave(&host->lock, flags); | 335 | spin_lock_irqsave(&host->lock, flags); |
328 | } | 336 | } |
329 | set_current_state(TASK_RUNNING); | 337 | set_current_state(TASK_RUNNING); |
330 | host->card_busy = card; | 338 | host->claimed = 1; |
331 | spin_unlock_irqrestore(&host->lock, flags); | 339 | spin_unlock_irqrestore(&host->lock, flags); |
332 | remove_wait_queue(&host->wq, &wait); | 340 | remove_wait_queue(&host->wq, &wait); |
333 | 341 | ||
@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host) | |||
353 | { | 361 | { |
354 | unsigned long flags; | 362 | unsigned long flags; |
355 | 363 | ||
356 | BUG_ON(host->card_busy == NULL); | 364 | BUG_ON(!host->claimed); |
357 | 365 | ||
358 | spin_lock_irqsave(&host->lock, flags); | 366 | spin_lock_irqsave(&host->lock, flags); |
359 | host->card_busy = NULL; | 367 | host->claimed = 0; |
360 | spin_unlock_irqrestore(&host->lock, flags); | 368 | spin_unlock_irqrestore(&host->lock, flags); |
361 | 369 | ||
362 | wake_up(&host->wq); | 370 | wake_up(&host->wq); |
@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host) | |||
372 | mmc_hostname(host), ios->clock, ios->bus_mode, | 380 | mmc_hostname(host), ios->clock, ios->bus_mode, |
373 | ios->power_mode, ios->chip_select, ios->vdd, | 381 | ios->power_mode, ios->chip_select, ios->vdd, |
374 | ios->bus_width); | 382 | ios->bus_width); |
375 | 383 | ||
376 | host->ops->set_ios(host, ios); | 384 | host->ops->set_ios(host, ios); |
377 | } | 385 | } |
378 | 386 | ||
@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) | |||
381 | int err; | 389 | int err; |
382 | struct mmc_command cmd; | 390 | struct mmc_command cmd; |
383 | 391 | ||
384 | BUG_ON(host->card_busy == NULL); | 392 | BUG_ON(!host->claimed); |
385 | 393 | ||
386 | if (host->card_selected == card) | 394 | if (host->card_selected == card) |
387 | return MMC_ERR_NONE; | 395 | return MMC_ERR_NONE; |
@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card) | |||
588 | 596 | ||
589 | if (mmc_card_sd(card)) { | 597 | if (mmc_card_sd(card)) { |
590 | csd_struct = UNSTUFF_BITS(resp, 126, 2); | 598 | csd_struct = UNSTUFF_BITS(resp, 126, 2); |
591 | if (csd_struct != 0) { | 599 | |
600 | switch (csd_struct) { | ||
601 | case 0: | ||
602 | m = UNSTUFF_BITS(resp, 115, 4); | ||
603 | e = UNSTUFF_BITS(resp, 112, 3); | ||
604 | csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; | ||
605 | csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; | ||
606 | |||
607 | m = UNSTUFF_BITS(resp, 99, 4); | ||
608 | e = UNSTUFF_BITS(resp, 96, 3); | ||
609 | csd->max_dtr = tran_exp[e] * tran_mant[m]; | ||
610 | csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); | ||
611 | |||
612 | e = UNSTUFF_BITS(resp, 47, 3); | ||
613 | m = UNSTUFF_BITS(resp, 62, 12); | ||
614 | csd->capacity = (1 + m) << (e + 2); | ||
615 | |||
616 | csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); | ||
617 | csd->read_partial = UNSTUFF_BITS(resp, 79, 1); | ||
618 | csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); | ||
619 | csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); | ||
620 | csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); | ||
621 | csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); | ||
622 | csd->write_partial = UNSTUFF_BITS(resp, 21, 1); | ||
623 | break; | ||
624 | case 1: | ||
625 | /* | ||
626 | * This is a block-addressed SDHC card. Most | ||
627 | * interesting fields are unused and have fixed | ||
628 | * values. To avoid getting tripped by buggy cards, | ||
629 | * we assume those fixed values ourselves. | ||
630 | */ | ||
631 | mmc_card_set_blockaddr(card); | ||
632 | |||
633 | csd->tacc_ns = 0; /* Unused */ | ||
634 | csd->tacc_clks = 0; /* Unused */ | ||
635 | |||
636 | m = UNSTUFF_BITS(resp, 99, 4); | ||
637 | e = UNSTUFF_BITS(resp, 96, 3); | ||
638 | csd->max_dtr = tran_exp[e] * tran_mant[m]; | ||
639 | csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); | ||
640 | |||
641 | m = UNSTUFF_BITS(resp, 48, 22); | ||
642 | csd->capacity = (1 + m) << 10; | ||
643 | |||
644 | csd->read_blkbits = 9; | ||
645 | csd->read_partial = 0; | ||
646 | csd->write_misalign = 0; | ||
647 | csd->read_misalign = 0; | ||
648 | csd->r2w_factor = 4; /* Unused */ | ||
649 | csd->write_blkbits = 9; | ||
650 | csd->write_partial = 0; | ||
651 | break; | ||
652 | default: | ||
592 | printk("%s: unrecognised CSD structure version %d\n", | 653 | printk("%s: unrecognised CSD structure version %d\n", |
593 | mmc_hostname(card->host), csd_struct); | 654 | mmc_hostname(card->host), csd_struct); |
594 | mmc_card_set_bad(card); | 655 | mmc_card_set_bad(card); |
595 | return; | 656 | return; |
596 | } | 657 | } |
597 | |||
598 | m = UNSTUFF_BITS(resp, 115, 4); | ||
599 | e = UNSTUFF_BITS(resp, 112, 3); | ||
600 | csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; | ||
601 | csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; | ||
602 | |||
603 | m = UNSTUFF_BITS(resp, 99, 4); | ||
604 | e = UNSTUFF_BITS(resp, 96, 3); | ||
605 | csd->max_dtr = tran_exp[e] * tran_mant[m]; | ||
606 | csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); | ||
607 | |||
608 | e = UNSTUFF_BITS(resp, 47, 3); | ||
609 | m = UNSTUFF_BITS(resp, 62, 12); | ||
610 | csd->capacity = (1 + m) << (e + 2); | ||
611 | |||
612 | csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); | ||
613 | csd->read_partial = UNSTUFF_BITS(resp, 79, 1); | ||
614 | csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); | ||
615 | csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); | ||
616 | csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); | ||
617 | csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); | ||
618 | csd->write_partial = UNSTUFF_BITS(resp, 21, 1); | ||
619 | } else { | 658 | } else { |
620 | /* | 659 | /* |
621 | * We only understand CSD structure v1.1 and v1.2. | 660 | * We only understand CSD structure v1.1 and v1.2. |
@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) | |||
848 | return err; | 887 | return err; |
849 | } | 888 | } |
850 | 889 | ||
890 | static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) | ||
891 | { | ||
892 | struct mmc_command cmd; | ||
893 | int err, sd2; | ||
894 | static const u8 test_pattern = 0xAA; | ||
895 | |||
896 | /* | ||
897 | * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND | ||
898 | * before SD_APP_OP_COND. This command will harmlessly fail for | ||
899 | * SD 1.0 cards. | ||
900 | */ | ||
901 | cmd.opcode = SD_SEND_IF_COND; | ||
902 | cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; | ||
903 | cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; | ||
904 | |||
905 | err = mmc_wait_for_cmd(host, &cmd, 0); | ||
906 | if (err == MMC_ERR_NONE) { | ||
907 | if ((cmd.resp[0] & 0xFF) == test_pattern) { | ||
908 | sd2 = 1; | ||
909 | } else { | ||
910 | sd2 = 0; | ||
911 | err = MMC_ERR_FAILED; | ||
912 | } | ||
913 | } else { | ||
914 | /* | ||
915 | * Treat errors as SD 1.0 card. | ||
916 | */ | ||
917 | sd2 = 0; | ||
918 | err = MMC_ERR_NONE; | ||
919 | } | ||
920 | if (rsd2) | ||
921 | *rsd2 = sd2; | ||
922 | return err; | ||
923 | } | ||
924 | |||
851 | /* | 925 | /* |
852 | * Discover cards by requesting their CID. If this command | 926 | * Discover cards by requesting their CID. If this command |
853 | * times out, it is not an error; there are no further cards | 927 | * times out, it is not an error; there are no further cards |
@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host) | |||
1018 | mmc_wait_for_req(host, &mrq); | 1092 | mmc_wait_for_req(host, &mrq); |
1019 | 1093 | ||
1020 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { | 1094 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { |
1021 | mmc_card_set_dead(card); | 1095 | printk("%s: unable to read EXT_CSD, performance " |
1096 | "might suffer.\n", mmc_hostname(card->host)); | ||
1022 | continue; | 1097 | continue; |
1023 | } | 1098 | } |
1024 | 1099 | ||
@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host) | |||
1034 | printk("%s: card is mmc v4 but doesn't support " | 1109 | printk("%s: card is mmc v4 but doesn't support " |
1035 | "any high-speed modes.\n", | 1110 | "any high-speed modes.\n", |
1036 | mmc_hostname(card->host)); | 1111 | mmc_hostname(card->host)); |
1037 | mmc_card_set_bad(card); | ||
1038 | continue; | 1112 | continue; |
1039 | } | 1113 | } |
1040 | 1114 | ||
@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1215 | mmc_wait_for_req(host, &mrq); | 1289 | mmc_wait_for_req(host, &mrq); |
1216 | 1290 | ||
1217 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { | 1291 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { |
1218 | mmc_card_set_dead(card); | 1292 | printk("%s: unable to read switch capabilities, " |
1293 | "performance might suffer.\n", | ||
1294 | mmc_hostname(card->host)); | ||
1219 | continue; | 1295 | continue; |
1220 | } | 1296 | } |
1221 | 1297 | ||
@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1247 | 1323 | ||
1248 | mmc_wait_for_req(host, &mrq); | 1324 | mmc_wait_for_req(host, &mrq); |
1249 | 1325 | ||
1250 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { | 1326 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || |
1251 | mmc_card_set_dead(card); | 1327 | (status[16] & 0xF) != 1) { |
1252 | continue; | ||
1253 | } | ||
1254 | |||
1255 | if ((status[16] & 0xF) != 1) { | ||
1256 | printk(KERN_WARNING "%s: Problem switching card " | 1328 | printk(KERN_WARNING "%s: Problem switching card " |
1257 | "into high-speed mode!\n", | 1329 | "into high-speed mode!\n", |
1258 | mmc_hostname(host)); | 1330 | mmc_hostname(host)); |
@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host) | |||
1334 | mmc_power_up(host); | 1406 | mmc_power_up(host); |
1335 | mmc_idle_cards(host); | 1407 | mmc_idle_cards(host); |
1336 | 1408 | ||
1409 | err = mmc_send_if_cond(host, host->ocr_avail, NULL); | ||
1410 | if (err != MMC_ERR_NONE) { | ||
1411 | return; | ||
1412 | } | ||
1337 | err = mmc_send_app_op_cond(host, 0, &ocr); | 1413 | err = mmc_send_app_op_cond(host, 0, &ocr); |
1338 | 1414 | ||
1339 | /* | 1415 | /* |
@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host) | |||
1386 | * all get the idea that they should be ready for CMD2. | 1462 | * all get the idea that they should be ready for CMD2. |
1387 | * (My SanDisk card seems to need this.) | 1463 | * (My SanDisk card seems to need this.) |
1388 | */ | 1464 | */ |
1389 | if (host->mode == MMC_MODE_SD) | 1465 | if (host->mode == MMC_MODE_SD) { |
1390 | mmc_send_app_op_cond(host, host->ocr, NULL); | 1466 | int err, sd2; |
1391 | else | 1467 | err = mmc_send_if_cond(host, host->ocr, &sd2); |
1468 | if (err == MMC_ERR_NONE) { | ||
1469 | /* | ||
1470 | * If SD_SEND_IF_COND indicates an SD 2.0 | ||
1471 | * compliant card and we should set bit 30 | ||
1472 | * of the ocr to indicate that we can handle | ||
1473 | * block-addressed SDHC cards. | ||
1474 | */ | ||
1475 | mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL); | ||
1476 | } | ||
1477 | } else { | ||
1392 | mmc_send_op_cond(host, host->ocr, NULL); | 1478 | mmc_send_op_cond(host, host->ocr, NULL); |
1479 | } | ||
1393 | 1480 | ||
1394 | mmc_discover_cards(host); | 1481 | mmc_discover_cards(host); |
1395 | 1482 | ||
@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
1519 | */ | 1606 | */ |
1520 | host->max_hw_segs = 1; | 1607 | host->max_hw_segs = 1; |
1521 | host->max_phys_segs = 1; | 1608 | host->max_phys_segs = 1; |
1522 | host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
1523 | host->max_seg_size = PAGE_CACHE_SIZE; | 1609 | host->max_seg_size = PAGE_CACHE_SIZE; |
1610 | |||
1611 | host->max_req_size = PAGE_CACHE_SIZE; | ||
1612 | host->max_blk_size = 512; | ||
1613 | host->max_blk_count = PAGE_CACHE_SIZE / 512; | ||
1524 | } | 1614 | } |
1525 | 1615 | ||
1526 | return host; | 1616 | return host; |
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index 87713572293f..05ba8ace70e7 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c | |||
@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
237 | brq.mrq.cmd = &brq.cmd; | 237 | brq.mrq.cmd = &brq.cmd; |
238 | brq.mrq.data = &brq.data; | 238 | brq.mrq.data = &brq.data; |
239 | 239 | ||
240 | brq.cmd.arg = req->sector << 9; | 240 | brq.cmd.arg = req->sector; |
241 | if (!mmc_card_blockaddr(card)) | ||
242 | brq.cmd.arg <<= 9; | ||
241 | brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; | 243 | brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; |
242 | brq.data.blksz = 1 << md->block_bits; | 244 | brq.data.blksz = 1 << md->block_bits; |
243 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); | ||
244 | brq.stop.opcode = MMC_STOP_TRANSMISSION; | 245 | brq.stop.opcode = MMC_STOP_TRANSMISSION; |
245 | brq.stop.arg = 0; | 246 | brq.stop.arg = 0; |
246 | brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; | 247 | brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; |
248 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); | ||
249 | if (brq.data.blocks > card->host->max_blk_count) | ||
250 | brq.data.blocks = card->host->max_blk_count; | ||
247 | 251 | ||
248 | mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); | 252 | mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); |
249 | 253 | ||
@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
375 | spin_unlock_irq(&md->lock); | 379 | spin_unlock_irq(&md->lock); |
376 | } | 380 | } |
377 | 381 | ||
382 | flush_queue: | ||
383 | |||
378 | mmc_card_release_host(card); | 384 | mmc_card_release_host(card); |
379 | 385 | ||
380 | flush_queue: | ||
381 | spin_lock_irq(&md->lock); | 386 | spin_lock_irq(&md->lock); |
382 | while (ret) { | 387 | while (ret) { |
383 | ret = end_that_request_chunk(req, 0, | 388 | ret = end_that_request_chunk(req, 0, |
@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) | |||
494 | struct mmc_command cmd; | 499 | struct mmc_command cmd; |
495 | int err; | 500 | int err; |
496 | 501 | ||
502 | /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */ | ||
503 | if (mmc_card_blockaddr(card)) | ||
504 | return 0; | ||
505 | |||
497 | mmc_card_claim_host(card); | 506 | mmc_card_claim_host(card); |
498 | cmd.opcode = MMC_SET_BLOCKLEN; | 507 | cmd.opcode = MMC_SET_BLOCKLEN; |
499 | cmd.arg = 1 << md->block_bits; | 508 | cmd.arg = 1 << md->block_bits; |
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c index 3e35a43819fb..c27e42645cdb 100644 --- a/drivers/mmc/mmc_queue.c +++ b/drivers/mmc/mmc_queue.c | |||
@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
147 | 147 | ||
148 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 148 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
149 | blk_queue_bounce_limit(mq->queue, limit); | 149 | blk_queue_bounce_limit(mq->queue, limit); |
150 | blk_queue_max_sectors(mq->queue, host->max_sectors); | 150 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); |
151 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | 151 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); |
152 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | 152 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); |
153 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 153 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c index e334acd045bc..d32698b02d7f 100644 --- a/drivers/mmc/mmc_sysfs.c +++ b/drivers/mmc/mmc_sysfs.c | |||
@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host) | |||
199 | memset(card, 0, sizeof(struct mmc_card)); | 199 | memset(card, 0, sizeof(struct mmc_card)); |
200 | card->host = host; | 200 | card->host = host; |
201 | device_initialize(&card->dev); | 201 | device_initialize(&card->dev); |
202 | card->dev.parent = mmc_dev(host); | 202 | card->dev.parent = mmc_classdev(host); |
203 | card->dev.bus = &mmc_bus_type; | 203 | card->dev.bus = &mmc_bus_type; |
204 | card->dev.release = mmc_release_card; | 204 | card->dev.release = mmc_release_card; |
205 | } | 205 | } |
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c index ccfe6561be24..5941dd951e82 100644 --- a/drivers/mmc/mmci.c +++ b/drivers/mmc/mmci.c | |||
@@ -524,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id) | |||
524 | /* | 524 | /* |
525 | * Since we only have a 16-bit data length register, we must | 525 | * Since we only have a 16-bit data length register, we must |
526 | * ensure that we don't exceed 2^16-1 bytes in a single request. | 526 | * ensure that we don't exceed 2^16-1 bytes in a single request. |
527 | * Choose 64 (512-byte) sectors as the limit. | ||
528 | */ | 527 | */ |
529 | mmc->max_sectors = 64; | 528 | mmc->max_req_size = 65535; |
530 | 529 | ||
531 | /* | 530 | /* |
532 | * Set the maximum segment size. Since we aren't doing DMA | 531 | * Set the maximum segment size. Since we aren't doing DMA |
533 | * (yet) we are only limited by the data length register. | 532 | * (yet) we are only limited by the data length register. |
534 | */ | 533 | */ |
535 | mmc->max_seg_size = mmc->max_sectors << 9; | 534 | mmc->max_seg_size = mmc->max_req_size; |
535 | |||
536 | /* | ||
537 | * Block size can be up to 2048 bytes, but must be a power of two. | ||
538 | */ | ||
539 | mmc->max_blk_size = 2048; | ||
540 | |||
541 | /* | ||
542 | * No limit on the number of blocks transferred. | ||
543 | */ | ||
544 | mmc->max_blk_count = mmc->max_req_size; | ||
536 | 545 | ||
537 | spin_lock_init(&host->lock); | 546 | spin_lock_init(&host->lock); |
538 | 547 | ||
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c index d30540b27614..1e96a2f65022 100644 --- a/drivers/mmc/omap.c +++ b/drivers/mmc/omap.c | |||
@@ -1099,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev) | |||
1099 | */ | 1099 | */ |
1100 | mmc->max_phys_segs = 32; | 1100 | mmc->max_phys_segs = 32; |
1101 | mmc->max_hw_segs = 32; | 1101 | mmc->max_hw_segs = 32; |
1102 | mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ | 1102 | mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ |
1103 | mmc->max_seg_size = mmc->max_sectors * 512; | 1103 | mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ |
1104 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
1105 | mmc->max_seg_size = mmc->max_req_size; | ||
1104 | 1106 | ||
1105 | if (host->power_pin >= 0) { | 1107 | if (host->power_pin >= 0) { |
1106 | if ((ret = omap_request_gpio(host->power_pin)) != 0) { | 1108 | if ((ret = omap_request_gpio(host->power_pin)) != 0) { |
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c index 6073d998b11f..9774fc68b61a 100644 --- a/drivers/mmc/pxamci.c +++ b/drivers/mmc/pxamci.c | |||
@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev) | |||
450 | */ | 450 | */ |
451 | mmc->max_seg_size = PAGE_SIZE; | 451 | mmc->max_seg_size = PAGE_SIZE; |
452 | 452 | ||
453 | /* | ||
454 | * Block length register is 10 bits. | ||
455 | */ | ||
456 | mmc->max_blk_size = 1023; | ||
457 | |||
458 | /* | ||
459 | * Block count register is 16 bits. | ||
460 | */ | ||
461 | mmc->max_blk_count = 65535; | ||
462 | |||
453 | host = mmc_priv(mmc); | 463 | host = mmc_priv(mmc); |
454 | host->mmc = mmc; | 464 | host->mmc = mmc; |
455 | host->dma = -1; | 465 | host->dma = -1; |
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index c2d13d7e9911..4bf1fea5e2c4 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c | |||
@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0; | |||
37 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) | 37 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) |
38 | /* Controller doesn't like some resets when there is no card inserted. */ | 38 | /* Controller doesn't like some resets when there is no card inserted. */ |
39 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) | 39 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) |
40 | #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) | ||
40 | 41 | ||
41 | static const struct pci_device_id pci_ids[] __devinitdata = { | 42 | static const struct pci_device_id pci_ids[] __devinitdata = { |
42 | { | 43 | { |
@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
65 | .driver_data = SDHCI_QUIRK_FORCE_DMA, | 66 | .driver_data = SDHCI_QUIRK_FORCE_DMA, |
66 | }, | 67 | }, |
67 | 68 | ||
69 | { | ||
70 | .vendor = PCI_VENDOR_ID_ENE, | ||
71 | .device = PCI_DEVICE_ID_ENE_CB712_SD, | ||
72 | .subvendor = PCI_ANY_ID, | ||
73 | .subdevice = PCI_ANY_ID, | ||
74 | .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE, | ||
75 | }, | ||
76 | |||
68 | { /* Generic SD host controller */ | 77 | { /* Generic SD host controller */ |
69 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) | 78 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) |
70 | }, | 79 | }, |
@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host) | |||
197 | * * | 206 | * * |
198 | \*****************************************************************************/ | 207 | \*****************************************************************************/ |
199 | 208 | ||
200 | static inline char* sdhci_kmap_sg(struct sdhci_host* host) | 209 | static inline char* sdhci_sg_to_buffer(struct sdhci_host* host) |
201 | { | 210 | { |
202 | host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ); | 211 | return page_address(host->cur_sg->page) + host->cur_sg->offset; |
203 | return host->mapped_sg + host->cur_sg->offset; | ||
204 | } | ||
205 | |||
206 | static inline void sdhci_kunmap_sg(struct sdhci_host* host) | ||
207 | { | ||
208 | kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ); | ||
209 | } | 212 | } |
210 | 213 | ||
211 | static inline int sdhci_next_sg(struct sdhci_host* host) | 214 | static inline int sdhci_next_sg(struct sdhci_host* host) |
@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host) | |||
240 | chunk_remain = 0; | 243 | chunk_remain = 0; |
241 | data = 0; | 244 | data = 0; |
242 | 245 | ||
243 | buffer = sdhci_kmap_sg(host) + host->offset; | 246 | buffer = sdhci_sg_to_buffer(host) + host->offset; |
244 | 247 | ||
245 | while (blksize) { | 248 | while (blksize) { |
246 | if (chunk_remain == 0) { | 249 | if (chunk_remain == 0) { |
@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host) | |||
264 | } | 267 | } |
265 | 268 | ||
266 | if (host->remain == 0) { | 269 | if (host->remain == 0) { |
267 | sdhci_kunmap_sg(host); | ||
268 | if (sdhci_next_sg(host) == 0) { | 270 | if (sdhci_next_sg(host) == 0) { |
269 | BUG_ON(blksize != 0); | 271 | BUG_ON(blksize != 0); |
270 | return; | 272 | return; |
271 | } | 273 | } |
272 | buffer = sdhci_kmap_sg(host); | 274 | buffer = sdhci_sg_to_buffer(host); |
273 | } | 275 | } |
274 | } | 276 | } |
275 | |||
276 | sdhci_kunmap_sg(host); | ||
277 | } | 277 | } |
278 | 278 | ||
279 | static void sdhci_write_block_pio(struct sdhci_host *host) | 279 | static void sdhci_write_block_pio(struct sdhci_host *host) |
@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host) | |||
290 | data = 0; | 290 | data = 0; |
291 | 291 | ||
292 | bytes = 0; | 292 | bytes = 0; |
293 | buffer = sdhci_kmap_sg(host) + host->offset; | 293 | buffer = sdhci_sg_to_buffer(host) + host->offset; |
294 | 294 | ||
295 | while (blksize) { | 295 | while (blksize) { |
296 | size = min(host->size, host->remain); | 296 | size = min(host->size, host->remain); |
@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host) | |||
314 | } | 314 | } |
315 | 315 | ||
316 | if (host->remain == 0) { | 316 | if (host->remain == 0) { |
317 | sdhci_kunmap_sg(host); | ||
318 | if (sdhci_next_sg(host) == 0) { | 317 | if (sdhci_next_sg(host) == 0) { |
319 | BUG_ON(blksize != 0); | 318 | BUG_ON(blksize != 0); |
320 | return; | 319 | return; |
321 | } | 320 | } |
322 | buffer = sdhci_kmap_sg(host); | 321 | buffer = sdhci_sg_to_buffer(host); |
323 | } | 322 | } |
324 | } | 323 | } |
325 | |||
326 | sdhci_kunmap_sg(host); | ||
327 | } | 324 | } |
328 | 325 | ||
329 | static void sdhci_transfer_pio(struct sdhci_host *host) | 326 | static void sdhci_transfer_pio(struct sdhci_host *host) |
@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
372 | 369 | ||
373 | /* Sanity checks */ | 370 | /* Sanity checks */ |
374 | BUG_ON(data->blksz * data->blocks > 524288); | 371 | BUG_ON(data->blksz * data->blocks > 524288); |
375 | BUG_ON(data->blksz > host->max_block); | 372 | BUG_ON(data->blksz > host->mmc->max_blk_size); |
376 | BUG_ON(data->blocks > 65535); | 373 | BUG_ON(data->blocks > 65535); |
377 | 374 | ||
378 | /* timeout in us */ | 375 | /* timeout in us */ |
@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
674 | if (host->power == power) | 671 | if (host->power == power) |
675 | return; | 672 | return; |
676 | 673 | ||
677 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | 674 | if (power == (unsigned short)-1) { |
678 | 675 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | |
679 | if (power == (unsigned short)-1) | ||
680 | goto out; | 676 | goto out; |
677 | } | ||
678 | |||
679 | /* | ||
680 | * Spec says that we should clear the power reg before setting | ||
681 | * a new value. Some controllers don't seem to like this though. | ||
682 | */ | ||
683 | if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) | ||
684 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | ||
681 | 685 | ||
682 | pwr = SDHCI_POWER_ON; | 686 | pwr = SDHCI_POWER_ON; |
683 | 687 | ||
@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev) | |||
1109 | 1113 | ||
1110 | pci_set_power_state(pdev, PCI_D0); | 1114 | pci_set_power_state(pdev, PCI_D0); |
1111 | pci_restore_state(pdev); | 1115 | pci_restore_state(pdev); |
1112 | pci_enable_device(pdev); | 1116 | ret = pci_enable_device(pdev); |
1117 | if (ret) | ||
1118 | return ret; | ||
1113 | 1119 | ||
1114 | for (i = 0;i < chip->num_slots;i++) { | 1120 | for (i = 0;i < chip->num_slots;i++) { |
1115 | if (!chip->hosts[i]) | 1121 | if (!chip->hosts[i]) |
@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1274 | if (caps & SDHCI_TIMEOUT_CLK_UNIT) | 1280 | if (caps & SDHCI_TIMEOUT_CLK_UNIT) |
1275 | host->timeout_clk *= 1000; | 1281 | host->timeout_clk *= 1000; |
1276 | 1282 | ||
1277 | host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; | ||
1278 | if (host->max_block >= 3) { | ||
1279 | printk(KERN_ERR "%s: Invalid maximum block size.\n", | ||
1280 | host->slot_descr); | ||
1281 | ret = -ENODEV; | ||
1282 | goto unmap; | ||
1283 | } | ||
1284 | host->max_block = 512 << host->max_block; | ||
1285 | |||
1286 | /* | 1283 | /* |
1287 | * Set host parameters. | 1284 | * Set host parameters. |
1288 | */ | 1285 | */ |
@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1294 | mmc->ocr_avail = 0; | 1291 | mmc->ocr_avail = 0; |
1295 | if (caps & SDHCI_CAN_VDD_330) | 1292 | if (caps & SDHCI_CAN_VDD_330) |
1296 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; | 1293 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; |
1297 | else if (caps & SDHCI_CAN_VDD_300) | 1294 | if (caps & SDHCI_CAN_VDD_300) |
1298 | mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; | 1295 | mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; |
1299 | else if (caps & SDHCI_CAN_VDD_180) | 1296 | if (caps & SDHCI_CAN_VDD_180) |
1300 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; | 1297 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; |
1301 | 1298 | ||
1302 | if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { | 1299 | if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { |
@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1326 | 1323 | ||
1327 | /* | 1324 | /* |
1328 | * Maximum number of sectors in one transfer. Limited by DMA boundary | 1325 | * Maximum number of sectors in one transfer. Limited by DMA boundary |
1329 | * size (512KiB), which means (512 KiB/512=) 1024 entries. | 1326 | * size (512KiB). |
1330 | */ | 1327 | */ |
1331 | mmc->max_sectors = 1024; | 1328 | mmc->max_req_size = 524288; |
1332 | 1329 | ||
1333 | /* | 1330 | /* |
1334 | * Maximum segment size. Could be one segment with the maximum number | 1331 | * Maximum segment size. Could be one segment with the maximum number |
1335 | * of sectors. | 1332 | * of bytes. |
1333 | */ | ||
1334 | mmc->max_seg_size = mmc->max_req_size; | ||
1335 | |||
1336 | /* | ||
1337 | * Maximum block size. This varies from controller to controller and | ||
1338 | * is specified in the capabilities register. | ||
1339 | */ | ||
1340 | mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; | ||
1341 | if (mmc->max_blk_size >= 3) { | ||
1342 | printk(KERN_ERR "%s: Invalid maximum block size.\n", | ||
1343 | host->slot_descr); | ||
1344 | ret = -ENODEV; | ||
1345 | goto unmap; | ||
1346 | } | ||
1347 | mmc->max_blk_size = 512 << mmc->max_blk_size; | ||
1348 | |||
1349 | /* | ||
1350 | * Maximum block count. | ||
1336 | */ | 1351 | */ |
1337 | mmc->max_seg_size = mmc->max_sectors * 512; | 1352 | mmc->max_blk_count = 65535; |
1338 | 1353 | ||
1339 | /* | 1354 | /* |
1340 | * Init tasklets. | 1355 | * Init tasklets. |
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h index f9d1a0a6f03a..e324f0a623dc 100644 --- a/drivers/mmc/sdhci.h +++ b/drivers/mmc/sdhci.h | |||
@@ -174,7 +174,6 @@ struct sdhci_host { | |||
174 | 174 | ||
175 | unsigned int max_clk; /* Max possible freq (MHz) */ | 175 | unsigned int max_clk; /* Max possible freq (MHz) */ |
176 | unsigned int timeout_clk; /* Timeout freq (KHz) */ | 176 | unsigned int timeout_clk; /* Timeout freq (KHz) */ |
177 | unsigned int max_block; /* Max block size (bytes) */ | ||
178 | 177 | ||
179 | unsigned int clock; /* Current clock (MHz) */ | 178 | unsigned int clock; /* Current clock (MHz) */ |
180 | unsigned short power; /* Current voltage */ | 179 | unsigned short power; /* Current voltage */ |
@@ -184,7 +183,6 @@ struct sdhci_host { | |||
184 | struct mmc_data *data; /* Current data request */ | 183 | struct mmc_data *data; /* Current data request */ |
185 | 184 | ||
186 | struct scatterlist *cur_sg; /* We're working on this */ | 185 | struct scatterlist *cur_sg; /* We're working on this */ |
187 | char *mapped_sg; /* This is where it's mapped */ | ||
188 | int num_sg; /* Entries left */ | 186 | int num_sg; /* Entries left */ |
189 | int offset; /* Offset into current sg */ | 187 | int offset; /* Offset into current sg */ |
190 | int remain; /* Bytes left in current */ | 188 | int remain; /* Bytes left in current */ |
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index fa4a52886b97..e65f8a0a9349 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | #define DRIVER_NAME "tifm_sd" | 19 | #define DRIVER_NAME "tifm_sd" |
20 | #define DRIVER_VERSION "0.6" | 20 | #define DRIVER_VERSION "0.7" |
21 | 21 | ||
22 | static int no_dma = 0; | 22 | static int no_dma = 0; |
23 | static int fixed_timeout = 0; | 23 | static int fixed_timeout = 0; |
@@ -79,7 +79,6 @@ typedef enum { | |||
79 | 79 | ||
80 | enum { | 80 | enum { |
81 | FIFO_RDY = 0x0001, /* hardware dependent value */ | 81 | FIFO_RDY = 0x0001, /* hardware dependent value */ |
82 | HOST_REG = 0x0002, | ||
83 | EJECT = 0x0004, | 82 | EJECT = 0x0004, |
84 | EJECT_DONE = 0x0008, | 83 | EJECT_DONE = 0x0008, |
85 | CARD_BUSY = 0x0010, | 84 | CARD_BUSY = 0x0010, |
@@ -95,46 +94,53 @@ struct tifm_sd { | |||
95 | card_state_t state; | 94 | card_state_t state; |
96 | unsigned int clk_freq; | 95 | unsigned int clk_freq; |
97 | unsigned int clk_div; | 96 | unsigned int clk_div; |
98 | unsigned long timeout_jiffies; // software timeout - 2 sec | 97 | unsigned long timeout_jiffies; |
99 | 98 | ||
99 | struct tasklet_struct finish_tasklet; | ||
100 | struct timer_list timer; | ||
100 | struct mmc_request *req; | 101 | struct mmc_request *req; |
101 | struct work_struct cmd_handler; | 102 | wait_queue_head_t notify; |
102 | struct delayed_work abort_handler; | ||
103 | wait_queue_head_t can_eject; | ||
104 | 103 | ||
105 | size_t written_blocks; | 104 | size_t written_blocks; |
106 | char *buffer; | ||
107 | size_t buffer_size; | 105 | size_t buffer_size; |
108 | size_t buffer_pos; | 106 | size_t buffer_pos; |
109 | 107 | ||
110 | }; | 108 | }; |
111 | 109 | ||
110 | static char* tifm_sd_data_buffer(struct mmc_data *data) | ||
111 | { | ||
112 | return page_address(data->sg->page) + data->sg->offset; | ||
113 | } | ||
114 | |||
112 | static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, | 115 | static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, |
113 | unsigned int host_status) | 116 | unsigned int host_status) |
114 | { | 117 | { |
115 | struct mmc_command *cmd = host->req->cmd; | 118 | struct mmc_command *cmd = host->req->cmd; |
116 | unsigned int t_val = 0, cnt = 0; | 119 | unsigned int t_val = 0, cnt = 0; |
120 | char *buffer; | ||
117 | 121 | ||
118 | if (host_status & TIFM_MMCSD_BRS) { | 122 | if (host_status & TIFM_MMCSD_BRS) { |
119 | /* in non-dma rx mode BRS fires when fifo is still not empty */ | 123 | /* in non-dma rx mode BRS fires when fifo is still not empty */ |
120 | if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) { | 124 | if (no_dma && (cmd->data->flags & MMC_DATA_READ)) { |
125 | buffer = tifm_sd_data_buffer(host->req->data); | ||
121 | while (host->buffer_size > host->buffer_pos) { | 126 | while (host->buffer_size > host->buffer_pos) { |
122 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); | 127 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); |
123 | host->buffer[host->buffer_pos++] = t_val & 0xff; | 128 | buffer[host->buffer_pos++] = t_val & 0xff; |
124 | host->buffer[host->buffer_pos++] = | 129 | buffer[host->buffer_pos++] = |
125 | (t_val >> 8) & 0xff; | 130 | (t_val >> 8) & 0xff; |
126 | } | 131 | } |
127 | } | 132 | } |
128 | return 1; | 133 | return 1; |
129 | } else if (host->buffer) { | 134 | } else if (no_dma) { |
135 | buffer = tifm_sd_data_buffer(host->req->data); | ||
130 | if ((cmd->data->flags & MMC_DATA_READ) && | 136 | if ((cmd->data->flags & MMC_DATA_READ) && |
131 | (host_status & TIFM_MMCSD_AF)) { | 137 | (host_status & TIFM_MMCSD_AF)) { |
132 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { | 138 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { |
133 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); | 139 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); |
134 | if (host->buffer_size > host->buffer_pos) { | 140 | if (host->buffer_size > host->buffer_pos) { |
135 | host->buffer[host->buffer_pos++] = | 141 | buffer[host->buffer_pos++] = |
136 | t_val & 0xff; | 142 | t_val & 0xff; |
137 | host->buffer[host->buffer_pos++] = | 143 | buffer[host->buffer_pos++] = |
138 | (t_val >> 8) & 0xff; | 144 | (t_val >> 8) & 0xff; |
139 | } | 145 | } |
140 | } | 146 | } |
@@ -142,11 +148,12 @@ static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, | |||
142 | && (host_status & TIFM_MMCSD_AE)) { | 148 | && (host_status & TIFM_MMCSD_AE)) { |
143 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { | 149 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { |
144 | if (host->buffer_size > host->buffer_pos) { | 150 | if (host->buffer_size > host->buffer_pos) { |
145 | t_val = host->buffer[host->buffer_pos++] & 0x00ff; | 151 | t_val = buffer[host->buffer_pos++] |
146 | t_val |= ((host->buffer[host->buffer_pos++]) << 8) | 152 | & 0x00ff; |
147 | & 0xff00; | 153 | t_val |= ((buffer[host->buffer_pos++]) |
154 | << 8) & 0xff00; | ||
148 | writel(t_val, | 155 | writel(t_val, |
149 | sock->addr + SOCK_MMCSD_DATA); | 156 | sock->addr + SOCK_MMCSD_DATA); |
150 | } | 157 | } |
151 | } | 158 | } |
152 | } | 159 | } |
@@ -206,7 +213,7 @@ static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd) | |||
206 | cmd_mask |= TIFM_MMCSD_READ; | 213 | cmd_mask |= TIFM_MMCSD_READ; |
207 | 214 | ||
208 | dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", | 215 | dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", |
209 | cmd->opcode, cmd->arg, cmd_mask); | 216 | cmd->opcode, cmd->arg, cmd_mask); |
210 | 217 | ||
211 | writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); | 218 | writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); |
212 | writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); | 219 | writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); |
@@ -239,65 +246,78 @@ change_state: | |||
239 | tifm_sd_fetch_resp(cmd, sock); | 246 | tifm_sd_fetch_resp(cmd, sock); |
240 | if (cmd->data) { | 247 | if (cmd->data) { |
241 | host->state = BRS; | 248 | host->state = BRS; |
242 | } else | 249 | } else { |
243 | host->state = READY; | 250 | host->state = READY; |
251 | } | ||
244 | goto change_state; | 252 | goto change_state; |
245 | } | 253 | } |
246 | break; | 254 | break; |
247 | case BRS: | 255 | case BRS: |
248 | if (tifm_sd_transfer_data(sock, host, host_status)) { | 256 | if (tifm_sd_transfer_data(sock, host, host_status)) { |
249 | if (!host->req->stop) { | 257 | if (cmd->data->flags & MMC_DATA_WRITE) { |
250 | if (cmd->data->flags & MMC_DATA_WRITE) { | 258 | host->state = CARD; |
251 | host->state = CARD; | 259 | } else { |
260 | if (no_dma) { | ||
261 | if (host->req->stop) { | ||
262 | tifm_sd_exec(host, host->req->stop); | ||
263 | host->state = SCMD; | ||
264 | } else { | ||
265 | host->state = READY; | ||
266 | } | ||
252 | } else { | 267 | } else { |
253 | host->state = | 268 | host->state = FIFO; |
254 | host->buffer ? READY : FIFO; | ||
255 | } | 269 | } |
256 | goto change_state; | ||
257 | } | 270 | } |
258 | tifm_sd_exec(host, host->req->stop); | 271 | goto change_state; |
259 | host->state = SCMD; | ||
260 | } | 272 | } |
261 | break; | 273 | break; |
262 | case SCMD: | 274 | case SCMD: |
263 | if (host_status & TIFM_MMCSD_EOC) { | 275 | if (host_status & TIFM_MMCSD_EOC) { |
264 | tifm_sd_fetch_resp(host->req->stop, sock); | 276 | tifm_sd_fetch_resp(host->req->stop, sock); |
265 | if (cmd->error) { | 277 | host->state = READY; |
266 | host->state = READY; | ||
267 | } else if (cmd->data->flags & MMC_DATA_WRITE) { | ||
268 | host->state = CARD; | ||
269 | } else { | ||
270 | host->state = host->buffer ? READY : FIFO; | ||
271 | } | ||
272 | goto change_state; | 278 | goto change_state; |
273 | } | 279 | } |
274 | break; | 280 | break; |
275 | case CARD: | 281 | case CARD: |
282 | dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n", | ||
283 | host->written_blocks); | ||
276 | if (!(host->flags & CARD_BUSY) | 284 | if (!(host->flags & CARD_BUSY) |
277 | && (host->written_blocks == cmd->data->blocks)) { | 285 | && (host->written_blocks == cmd->data->blocks)) { |
278 | host->state = host->buffer ? READY : FIFO; | 286 | if (no_dma) { |
287 | if (host->req->stop) { | ||
288 | tifm_sd_exec(host, host->req->stop); | ||
289 | host->state = SCMD; | ||
290 | } else { | ||
291 | host->state = READY; | ||
292 | } | ||
293 | } else { | ||
294 | host->state = FIFO; | ||
295 | } | ||
279 | goto change_state; | 296 | goto change_state; |
280 | } | 297 | } |
281 | break; | 298 | break; |
282 | case FIFO: | 299 | case FIFO: |
283 | if (host->flags & FIFO_RDY) { | 300 | if (host->flags & FIFO_RDY) { |
284 | host->state = READY; | ||
285 | host->flags &= ~FIFO_RDY; | 301 | host->flags &= ~FIFO_RDY; |
302 | if (host->req->stop) { | ||
303 | tifm_sd_exec(host, host->req->stop); | ||
304 | host->state = SCMD; | ||
305 | } else { | ||
306 | host->state = READY; | ||
307 | } | ||
286 | goto change_state; | 308 | goto change_state; |
287 | } | 309 | } |
288 | break; | 310 | break; |
289 | case READY: | 311 | case READY: |
290 | queue_work(sock->wq, &host->cmd_handler); | 312 | tasklet_schedule(&host->finish_tasklet); |
291 | return; | 313 | return; |
292 | } | 314 | } |
293 | 315 | ||
294 | queue_delayed_work(sock->wq, &host->abort_handler, | ||
295 | host->timeout_jiffies); | ||
296 | } | 316 | } |
297 | 317 | ||
298 | /* Called from interrupt handler */ | 318 | /* Called from interrupt handler */ |
299 | static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | 319 | static void tifm_sd_signal_irq(struct tifm_dev *sock, |
300 | unsigned int sock_irq_status) | 320 | unsigned int sock_irq_status) |
301 | { | 321 | { |
302 | struct tifm_sd *host; | 322 | struct tifm_sd *host; |
303 | unsigned int host_status = 0, fifo_status = 0; | 323 | unsigned int host_status = 0, fifo_status = 0; |
@@ -305,7 +325,6 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
305 | 325 | ||
306 | spin_lock(&sock->lock); | 326 | spin_lock(&sock->lock); |
307 | host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); | 327 | host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); |
308 | cancel_delayed_work(&host->abort_handler); | ||
309 | 328 | ||
310 | if (sock_irq_status & FIFO_EVENT) { | 329 | if (sock_irq_status & FIFO_EVENT) { |
311 | fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); | 330 | fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); |
@@ -318,19 +337,17 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
318 | host_status = readl(sock->addr + SOCK_MMCSD_STATUS); | 337 | host_status = readl(sock->addr + SOCK_MMCSD_STATUS); |
319 | writel(host_status, sock->addr + SOCK_MMCSD_STATUS); | 338 | writel(host_status, sock->addr + SOCK_MMCSD_STATUS); |
320 | 339 | ||
321 | if (!(host->flags & HOST_REG)) | ||
322 | queue_work(sock->wq, &host->cmd_handler); | ||
323 | if (!host->req) | 340 | if (!host->req) |
324 | goto done; | 341 | goto done; |
325 | 342 | ||
326 | if (host_status & TIFM_MMCSD_ERRMASK) { | 343 | if (host_status & TIFM_MMCSD_ERRMASK) { |
327 | if (host_status & TIFM_MMCSD_CERR) | 344 | if (host_status & TIFM_MMCSD_CERR) |
328 | error_code = MMC_ERR_FAILED; | 345 | error_code = MMC_ERR_FAILED; |
329 | else if (host_status & | 346 | else if (host_status |
330 | (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) | 347 | & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) |
331 | error_code = MMC_ERR_TIMEOUT; | 348 | error_code = MMC_ERR_TIMEOUT; |
332 | else if (host_status & | 349 | else if (host_status |
333 | (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) | 350 | & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) |
334 | error_code = MMC_ERR_BADCRC; | 351 | error_code = MMC_ERR_BADCRC; |
335 | 352 | ||
336 | writel(TIFM_FIFO_INT_SETALL, | 353 | writel(TIFM_FIFO_INT_SETALL, |
@@ -340,12 +357,11 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
340 | if (host->req->stop) { | 357 | if (host->req->stop) { |
341 | if (host->state == SCMD) { | 358 | if (host->state == SCMD) { |
342 | host->req->stop->error = error_code; | 359 | host->req->stop->error = error_code; |
343 | } else if(host->state == BRS) { | 360 | } else if (host->state == BRS |
361 | || host->state == CARD | ||
362 | || host->state == FIFO) { | ||
344 | host->req->cmd->error = error_code; | 363 | host->req->cmd->error = error_code; |
345 | tifm_sd_exec(host, host->req->stop); | 364 | tifm_sd_exec(host, host->req->stop); |
346 | queue_delayed_work(sock->wq, | ||
347 | &host->abort_handler, | ||
348 | host->timeout_jiffies); | ||
349 | host->state = SCMD; | 365 | host->state = SCMD; |
350 | goto done; | 366 | goto done; |
351 | } else { | 367 | } else { |
@@ -359,8 +375,8 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
359 | 375 | ||
360 | if (host_status & TIFM_MMCSD_CB) | 376 | if (host_status & TIFM_MMCSD_CB) |
361 | host->flags |= CARD_BUSY; | 377 | host->flags |= CARD_BUSY; |
362 | if ((host_status & TIFM_MMCSD_EOFB) && | 378 | if ((host_status & TIFM_MMCSD_EOFB) |
363 | (host->flags & CARD_BUSY)) { | 379 | && (host->flags & CARD_BUSY)) { |
364 | host->written_blocks++; | 380 | host->written_blocks++; |
365 | host->flags &= ~CARD_BUSY; | 381 | host->flags &= ~CARD_BUSY; |
366 | } | 382 | } |
@@ -370,22 +386,22 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
370 | tifm_sd_process_cmd(sock, host, host_status); | 386 | tifm_sd_process_cmd(sock, host, host_status); |
371 | done: | 387 | done: |
372 | dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", | 388 | dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", |
373 | host_status, fifo_status); | 389 | host_status, fifo_status); |
374 | spin_unlock(&sock->lock); | 390 | spin_unlock(&sock->lock); |
375 | return sock_irq_status; | ||
376 | } | 391 | } |
377 | 392 | ||
378 | static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) | 393 | static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd) |
379 | { | 394 | { |
380 | struct tifm_dev *sock = card->dev; | 395 | struct tifm_dev *sock = host->dev; |
381 | unsigned int dest_cnt; | 396 | unsigned int dest_cnt; |
382 | 397 | ||
383 | /* DMA style IO */ | 398 | /* DMA style IO */ |
384 | 399 | dev_dbg(&sock->dev, "setting dma for %d blocks\n", | |
400 | cmd->data->blocks); | ||
385 | writel(TIFM_FIFO_INT_SETALL, | 401 | writel(TIFM_FIFO_INT_SETALL, |
386 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | 402 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); |
387 | writel(ilog2(cmd->data->blksz) - 2, | 403 | writel(ilog2(cmd->data->blksz) - 2, |
388 | sock->addr + SOCK_FIFO_PAGE_SIZE); | 404 | sock->addr + SOCK_FIFO_PAGE_SIZE); |
389 | writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); | 405 | writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); |
390 | writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | 406 | writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); |
391 | 407 | ||
@@ -399,7 +415,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) | |||
399 | if (cmd->data->flags & MMC_DATA_WRITE) { | 415 | if (cmd->data->flags & MMC_DATA_WRITE) { |
400 | writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 416 | writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); |
401 | writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, | 417 | writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, |
402 | sock->addr + SOCK_DMA_CONTROL); | 418 | sock->addr + SOCK_DMA_CONTROL); |
403 | } else { | 419 | } else { |
404 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 420 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); |
405 | writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); | 421 | writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); |
@@ -407,7 +423,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) | |||
407 | } | 423 | } |
408 | 424 | ||
409 | static void tifm_sd_set_data_timeout(struct tifm_sd *host, | 425 | static void tifm_sd_set_data_timeout(struct tifm_sd *host, |
410 | struct mmc_data *data) | 426 | struct mmc_data *data) |
411 | { | 427 | { |
412 | struct tifm_dev *sock = host->dev; | 428 | struct tifm_dev *sock = host->dev; |
413 | unsigned int data_timeout = data->timeout_clks; | 429 | unsigned int data_timeout = data->timeout_clks; |
@@ -416,22 +432,21 @@ static void tifm_sd_set_data_timeout(struct tifm_sd *host, | |||
416 | return; | 432 | return; |
417 | 433 | ||
418 | data_timeout += data->timeout_ns / | 434 | data_timeout += data->timeout_ns / |
419 | ((1000000000 / host->clk_freq) * host->clk_div); | 435 | ((1000000000UL / host->clk_freq) * host->clk_div); |
420 | data_timeout *= 10; // call it fudge factor for now | ||
421 | 436 | ||
422 | if (data_timeout < 0xffff) { | 437 | if (data_timeout < 0xffff) { |
423 | writel((~TIFM_MMCSD_DPE) & | ||
424 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
425 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
426 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); | 438 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); |
439 | writel((~TIFM_MMCSD_DPE) | ||
440 | & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
441 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
427 | } else { | 442 | } else { |
428 | writel(TIFM_MMCSD_DPE | | ||
429 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
430 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
431 | data_timeout = (data_timeout >> 10) + 1; | 443 | data_timeout = (data_timeout >> 10) + 1; |
432 | if(data_timeout > 0xffff) | 444 | if (data_timeout > 0xffff) |
433 | data_timeout = 0; /* set to unlimited */ | 445 | data_timeout = 0; /* set to unlimited */ |
434 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); | 446 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); |
447 | writel(TIFM_MMCSD_DPE | ||
448 | | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
449 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
435 | } | 450 | } |
436 | } | 451 | } |
437 | 452 | ||
@@ -474,11 +489,10 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
474 | } | 489 | } |
475 | 490 | ||
476 | host->req = mrq; | 491 | host->req = mrq; |
492 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); | ||
477 | host->state = CMD; | 493 | host->state = CMD; |
478 | queue_delayed_work(sock->wq, &host->abort_handler, | ||
479 | host->timeout_jiffies); | ||
480 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), | 494 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), |
481 | sock->addr + SOCK_CONTROL); | 495 | sock->addr + SOCK_CONTROL); |
482 | tifm_sd_exec(host, mrq->cmd); | 496 | tifm_sd_exec(host, mrq->cmd); |
483 | spin_unlock_irqrestore(&sock->lock, flags); | 497 | spin_unlock_irqrestore(&sock->lock, flags); |
484 | return; | 498 | return; |
@@ -493,9 +507,9 @@ err_out: | |||
493 | mmc_request_done(mmc, mrq); | 507 | mmc_request_done(mmc, mrq); |
494 | } | 508 | } |
495 | 509 | ||
496 | static void tifm_sd_end_cmd(struct work_struct *work) | 510 | static void tifm_sd_end_cmd(unsigned long data) |
497 | { | 511 | { |
498 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); | 512 | struct tifm_sd *host = (struct tifm_sd*)data; |
499 | struct tifm_dev *sock = host->dev; | 513 | struct tifm_dev *sock = host->dev; |
500 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 514 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
501 | struct mmc_request *mrq; | 515 | struct mmc_request *mrq; |
@@ -504,6 +518,7 @@ static void tifm_sd_end_cmd(struct work_struct *work) | |||
504 | 518 | ||
505 | spin_lock_irqsave(&sock->lock, flags); | 519 | spin_lock_irqsave(&sock->lock, flags); |
506 | 520 | ||
521 | del_timer(&host->timer); | ||
507 | mrq = host->req; | 522 | mrq = host->req; |
508 | host->req = NULL; | 523 | host->req = NULL; |
509 | host->state = IDLE; | 524 | host->state = IDLE; |
@@ -517,8 +532,8 @@ static void tifm_sd_end_cmd(struct work_struct *work) | |||
517 | r_data = mrq->cmd->data; | 532 | r_data = mrq->cmd->data; |
518 | if (r_data) { | 533 | if (r_data) { |
519 | if (r_data->flags & MMC_DATA_WRITE) { | 534 | if (r_data->flags & MMC_DATA_WRITE) { |
520 | r_data->bytes_xfered = host->written_blocks * | 535 | r_data->bytes_xfered = host->written_blocks |
521 | r_data->blksz; | 536 | * r_data->blksz; |
522 | } else { | 537 | } else { |
523 | r_data->bytes_xfered = r_data->blocks - | 538 | r_data->bytes_xfered = r_data->blocks - |
524 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; | 539 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; |
@@ -532,7 +547,7 @@ static void tifm_sd_end_cmd(struct work_struct *work) | |||
532 | } | 547 | } |
533 | 548 | ||
534 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), | 549 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), |
535 | sock->addr + SOCK_CONTROL); | 550 | sock->addr + SOCK_CONTROL); |
536 | 551 | ||
537 | spin_unlock_irqrestore(&sock->lock, flags); | 552 | spin_unlock_irqrestore(&sock->lock, flags); |
538 | mmc_request_done(mmc, mrq); | 553 | mmc_request_done(mmc, mrq); |
@@ -544,15 +559,6 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) | |||
544 | struct tifm_dev *sock = host->dev; | 559 | struct tifm_dev *sock = host->dev; |
545 | unsigned long flags; | 560 | unsigned long flags; |
546 | struct mmc_data *r_data = mrq->cmd->data; | 561 | struct mmc_data *r_data = mrq->cmd->data; |
547 | char *t_buffer = NULL; | ||
548 | |||
549 | if (r_data) { | ||
550 | t_buffer = kmap(r_data->sg->page); | ||
551 | if (!t_buffer) { | ||
552 | printk(KERN_ERR DRIVER_NAME ": kmap failed\n"); | ||
553 | goto err_out; | ||
554 | } | ||
555 | } | ||
556 | 562 | ||
557 | spin_lock_irqsave(&sock->lock, flags); | 563 | spin_lock_irqsave(&sock->lock, flags); |
558 | if (host->flags & EJECT) { | 564 | if (host->flags & EJECT) { |
@@ -569,15 +575,14 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) | |||
569 | if (r_data) { | 575 | if (r_data) { |
570 | tifm_sd_set_data_timeout(host, r_data); | 576 | tifm_sd_set_data_timeout(host, r_data); |
571 | 577 | ||
572 | host->buffer = t_buffer + r_data->sg->offset; | 578 | host->buffer_size = mrq->cmd->data->blocks |
573 | host->buffer_size = mrq->cmd->data->blocks * | 579 | * mrq->cmd->data->blksz; |
574 | mrq->cmd->data->blksz; | ||
575 | 580 | ||
576 | writel(TIFM_MMCSD_BUFINT | | 581 | writel(TIFM_MMCSD_BUFINT |
577 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), | 582 | | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), |
578 | sock->addr + SOCK_MMCSD_INT_ENABLE); | 583 | sock->addr + SOCK_MMCSD_INT_ENABLE); |
579 | writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | | 584 | writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) |
580 | (TIFM_MMCSD_FIFO_SIZE - 1), | 585 | | (TIFM_MMCSD_FIFO_SIZE - 1), |
581 | sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 586 | sock->addr + SOCK_MMCSD_BUFFER_CONFIG); |
582 | 587 | ||
583 | host->written_blocks = 0; | 588 | host->written_blocks = 0; |
@@ -588,26 +593,22 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) | |||
588 | } | 593 | } |
589 | 594 | ||
590 | host->req = mrq; | 595 | host->req = mrq; |
596 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); | ||
591 | host->state = CMD; | 597 | host->state = CMD; |
592 | queue_delayed_work(sock->wq, &host->abort_handler, | ||
593 | host->timeout_jiffies); | ||
594 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), | 598 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), |
595 | sock->addr + SOCK_CONTROL); | 599 | sock->addr + SOCK_CONTROL); |
596 | tifm_sd_exec(host, mrq->cmd); | 600 | tifm_sd_exec(host, mrq->cmd); |
597 | spin_unlock_irqrestore(&sock->lock, flags); | 601 | spin_unlock_irqrestore(&sock->lock, flags); |
598 | return; | 602 | return; |
599 | 603 | ||
600 | err_out: | 604 | err_out: |
601 | if (t_buffer) | ||
602 | kunmap(r_data->sg->page); | ||
603 | |||
604 | mrq->cmd->error = MMC_ERR_TIMEOUT; | 605 | mrq->cmd->error = MMC_ERR_TIMEOUT; |
605 | mmc_request_done(mmc, mrq); | 606 | mmc_request_done(mmc, mrq); |
606 | } | 607 | } |
607 | 608 | ||
608 | static void tifm_sd_end_cmd_nodma(struct work_struct *work) | 609 | static void tifm_sd_end_cmd_nodma(unsigned long data) |
609 | { | 610 | { |
610 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); | 611 | struct tifm_sd *host = (struct tifm_sd*)data; |
611 | struct tifm_dev *sock = host->dev; | 612 | struct tifm_dev *sock = host->dev; |
612 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 613 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
613 | struct mmc_request *mrq; | 614 | struct mmc_request *mrq; |
@@ -616,6 +617,7 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) | |||
616 | 617 | ||
617 | spin_lock_irqsave(&sock->lock, flags); | 618 | spin_lock_irqsave(&sock->lock, flags); |
618 | 619 | ||
620 | del_timer(&host->timer); | ||
619 | mrq = host->req; | 621 | mrq = host->req; |
620 | host->req = NULL; | 622 | host->req = NULL; |
621 | host->state = IDLE; | 623 | host->state = IDLE; |
@@ -633,8 +635,8 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) | |||
633 | sock->addr + SOCK_MMCSD_INT_ENABLE); | 635 | sock->addr + SOCK_MMCSD_INT_ENABLE); |
634 | 636 | ||
635 | if (r_data->flags & MMC_DATA_WRITE) { | 637 | if (r_data->flags & MMC_DATA_WRITE) { |
636 | r_data->bytes_xfered = host->written_blocks * | 638 | r_data->bytes_xfered = host->written_blocks |
637 | r_data->blksz; | 639 | * r_data->blksz; |
638 | } else { | 640 | } else { |
639 | r_data->bytes_xfered = r_data->blocks - | 641 | r_data->bytes_xfered = r_data->blocks - |
640 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; | 642 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; |
@@ -642,29 +644,44 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) | |||
642 | r_data->bytes_xfered += r_data->blksz - | 644 | r_data->bytes_xfered += r_data->blksz - |
643 | readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; | 645 | readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; |
644 | } | 646 | } |
645 | host->buffer = NULL; | ||
646 | host->buffer_pos = 0; | 647 | host->buffer_pos = 0; |
647 | host->buffer_size = 0; | 648 | host->buffer_size = 0; |
648 | } | 649 | } |
649 | 650 | ||
650 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), | 651 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), |
651 | sock->addr + SOCK_CONTROL); | 652 | sock->addr + SOCK_CONTROL); |
652 | 653 | ||
653 | spin_unlock_irqrestore(&sock->lock, flags); | 654 | spin_unlock_irqrestore(&sock->lock, flags); |
654 | 655 | ||
655 | if (r_data) | ||
656 | kunmap(r_data->sg->page); | ||
657 | |||
658 | mmc_request_done(mmc, mrq); | 656 | mmc_request_done(mmc, mrq); |
659 | } | 657 | } |
660 | 658 | ||
661 | static void tifm_sd_abort(struct work_struct *work) | 659 | static void tifm_sd_terminate(struct tifm_sd *host) |
660 | { | ||
661 | struct tifm_dev *sock = host->dev; | ||
662 | unsigned long flags; | ||
663 | |||
664 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
665 | mmiowb(); | ||
666 | spin_lock_irqsave(&sock->lock, flags); | ||
667 | host->flags |= EJECT; | ||
668 | if (host->req) { | ||
669 | writel(TIFM_FIFO_INT_SETALL, | ||
670 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | ||
671 | writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | ||
672 | tasklet_schedule(&host->finish_tasklet); | ||
673 | } | ||
674 | spin_unlock_irqrestore(&sock->lock, flags); | ||
675 | } | ||
676 | |||
677 | static void tifm_sd_abort(unsigned long data) | ||
662 | { | 678 | { |
663 | struct tifm_sd *host = | 679 | struct tifm_sd *host = (struct tifm_sd*)data; |
664 | container_of(work, struct tifm_sd, abort_handler.work); | ||
665 | 680 | ||
666 | printk(KERN_ERR DRIVER_NAME | 681 | printk(KERN_ERR DRIVER_NAME |
667 | ": card failed to respond for a long period of time"); | 682 | ": card failed to respond for a long period of time"); |
683 | |||
684 | tifm_sd_terminate(host); | ||
668 | tifm_eject(host->dev); | 685 | tifm_eject(host->dev); |
669 | } | 686 | } |
670 | 687 | ||
@@ -683,9 +700,9 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
683 | writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), | 700 | writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), |
684 | sock->addr + SOCK_MMCSD_CONFIG); | 701 | sock->addr + SOCK_MMCSD_CONFIG); |
685 | } else { | 702 | } else { |
686 | writel((~TIFM_MMCSD_4BBUS) & | 703 | writel((~TIFM_MMCSD_4BBUS) |
687 | readl(sock->addr + SOCK_MMCSD_CONFIG), | 704 | & readl(sock->addr + SOCK_MMCSD_CONFIG), |
688 | sock->addr + SOCK_MMCSD_CONFIG); | 705 | sock->addr + SOCK_MMCSD_CONFIG); |
689 | } | 706 | } |
690 | 707 | ||
691 | if (ios->clock) { | 708 | if (ios->clock) { |
@@ -704,23 +721,24 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
704 | if ((20000000 / clk_div1) > (24000000 / clk_div2)) { | 721 | if ((20000000 / clk_div1) > (24000000 / clk_div2)) { |
705 | host->clk_freq = 20000000; | 722 | host->clk_freq = 20000000; |
706 | host->clk_div = clk_div1; | 723 | host->clk_div = clk_div1; |
707 | writel((~TIFM_CTRL_FAST_CLK) & | 724 | writel((~TIFM_CTRL_FAST_CLK) |
708 | readl(sock->addr + SOCK_CONTROL), | 725 | & readl(sock->addr + SOCK_CONTROL), |
709 | sock->addr + SOCK_CONTROL); | 726 | sock->addr + SOCK_CONTROL); |
710 | } else { | 727 | } else { |
711 | host->clk_freq = 24000000; | 728 | host->clk_freq = 24000000; |
712 | host->clk_div = clk_div2; | 729 | host->clk_div = clk_div2; |
713 | writel(TIFM_CTRL_FAST_CLK | | 730 | writel(TIFM_CTRL_FAST_CLK |
714 | readl(sock->addr + SOCK_CONTROL), | 731 | | readl(sock->addr + SOCK_CONTROL), |
715 | sock->addr + SOCK_CONTROL); | 732 | sock->addr + SOCK_CONTROL); |
716 | } | 733 | } |
717 | } else { | 734 | } else { |
718 | host->clk_div = 0; | 735 | host->clk_div = 0; |
719 | } | 736 | } |
720 | host->clk_div &= TIFM_MMCSD_CLKMASK; | 737 | host->clk_div &= TIFM_MMCSD_CLKMASK; |
721 | writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & | 738 | writel(host->clk_div |
722 | readl(sock->addr + SOCK_MMCSD_CONFIG)), | 739 | | ((~TIFM_MMCSD_CLKMASK) |
723 | sock->addr + SOCK_MMCSD_CONFIG); | 740 | & readl(sock->addr + SOCK_MMCSD_CONFIG)), |
741 | sock->addr + SOCK_MMCSD_CONFIG); | ||
724 | 742 | ||
725 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | 743 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) |
726 | host->flags |= OPENDRAIN; | 744 | host->flags |= OPENDRAIN; |
@@ -734,7 +752,7 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
734 | // allow removal. | 752 | // allow removal. |
735 | if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { | 753 | if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { |
736 | host->flags |= EJECT_DONE; | 754 | host->flags |= EJECT_DONE; |
737 | wake_up_all(&host->can_eject); | 755 | wake_up_all(&host->notify); |
738 | } | 756 | } |
739 | 757 | ||
740 | spin_unlock_irqrestore(&sock->lock, flags); | 758 | spin_unlock_irqrestore(&sock->lock, flags); |
@@ -762,20 +780,67 @@ static struct mmc_host_ops tifm_sd_ops = { | |||
762 | .get_ro = tifm_sd_ro | 780 | .get_ro = tifm_sd_ro |
763 | }; | 781 | }; |
764 | 782 | ||
765 | static void tifm_sd_register_host(struct work_struct *work) | 783 | static int tifm_sd_initialize_host(struct tifm_sd *host) |
766 | { | 784 | { |
767 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); | 785 | int rc; |
786 | unsigned int host_status = 0; | ||
768 | struct tifm_dev *sock = host->dev; | 787 | struct tifm_dev *sock = host->dev; |
769 | struct mmc_host *mmc = tifm_get_drvdata(sock); | ||
770 | unsigned long flags; | ||
771 | 788 | ||
772 | spin_lock_irqsave(&sock->lock, flags); | 789 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); |
773 | host->flags |= HOST_REG; | 790 | mmiowb(); |
774 | PREPARE_WORK(&host->cmd_handler, | 791 | host->clk_div = 61; |
775 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); | 792 | host->clk_freq = 20000000; |
776 | spin_unlock_irqrestore(&sock->lock, flags); | 793 | writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); |
777 | dev_dbg(&sock->dev, "adding host\n"); | 794 | writel(host->clk_div | TIFM_MMCSD_POWER, |
778 | mmc_add_host(mmc); | 795 | sock->addr + SOCK_MMCSD_CONFIG); |
796 | |||
797 | /* wait up to 0.51 sec for reset */ | ||
798 | for (rc = 2; rc <= 256; rc <<= 1) { | ||
799 | if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { | ||
800 | rc = 0; | ||
801 | break; | ||
802 | } | ||
803 | msleep(rc); | ||
804 | } | ||
805 | |||
806 | if (rc) { | ||
807 | printk(KERN_ERR DRIVER_NAME | ||
808 | ": controller failed to reset\n"); | ||
809 | return -ENODEV; | ||
810 | } | ||
811 | |||
812 | writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); | ||
813 | writel(host->clk_div | TIFM_MMCSD_POWER, | ||
814 | sock->addr + SOCK_MMCSD_CONFIG); | ||
815 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | ||
816 | |||
817 | // command timeout fixed to 64 clocks for now | ||
818 | writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); | ||
819 | writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); | ||
820 | |||
821 | /* INAB should take much less than reset */ | ||
822 | for (rc = 1; rc <= 16; rc <<= 1) { | ||
823 | host_status = readl(sock->addr + SOCK_MMCSD_STATUS); | ||
824 | writel(host_status, sock->addr + SOCK_MMCSD_STATUS); | ||
825 | if (!(host_status & TIFM_MMCSD_ERRMASK) | ||
826 | && (host_status & TIFM_MMCSD_EOC)) { | ||
827 | rc = 0; | ||
828 | break; | ||
829 | } | ||
830 | msleep(rc); | ||
831 | } | ||
832 | |||
833 | if (rc) { | ||
834 | printk(KERN_ERR DRIVER_NAME | ||
835 | ": card not ready - probe failed on initialization\n"); | ||
836 | return -ENODEV; | ||
837 | } | ||
838 | |||
839 | writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK, | ||
840 | sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
841 | mmiowb(); | ||
842 | |||
843 | return 0; | ||
779 | } | 844 | } |
780 | 845 | ||
781 | static int tifm_sd_probe(struct tifm_dev *sock) | 846 | static int tifm_sd_probe(struct tifm_dev *sock) |
@@ -784,8 +849,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
784 | struct tifm_sd *host; | 849 | struct tifm_sd *host; |
785 | int rc = -EIO; | 850 | int rc = -EIO; |
786 | 851 | ||
787 | if (!(TIFM_SOCK_STATE_OCCUPIED & | 852 | if (!(TIFM_SOCK_STATE_OCCUPIED |
788 | readl(sock->addr + SOCK_PRESENT_STATE))) { | 853 | & readl(sock->addr + SOCK_PRESENT_STATE))) { |
789 | printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); | 854 | printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); |
790 | return rc; | 855 | return rc; |
791 | } | 856 | } |
@@ -795,109 +860,99 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
795 | return -ENOMEM; | 860 | return -ENOMEM; |
796 | 861 | ||
797 | host = mmc_priv(mmc); | 862 | host = mmc_priv(mmc); |
798 | host->dev = sock; | ||
799 | host->clk_div = 61; | ||
800 | init_waitqueue_head(&host->can_eject); | ||
801 | INIT_WORK(&host->cmd_handler, tifm_sd_register_host); | ||
802 | INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort); | ||
803 | |||
804 | tifm_set_drvdata(sock, mmc); | 863 | tifm_set_drvdata(sock, mmc); |
805 | sock->signal_irq = tifm_sd_signal_irq; | 864 | host->dev = sock; |
806 | |||
807 | host->clk_freq = 20000000; | ||
808 | host->timeout_jiffies = msecs_to_jiffies(1000); | 865 | host->timeout_jiffies = msecs_to_jiffies(1000); |
809 | 866 | ||
867 | init_waitqueue_head(&host->notify); | ||
868 | tasklet_init(&host->finish_tasklet, | ||
869 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, | ||
870 | (unsigned long)host); | ||
871 | setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host); | ||
872 | |||
810 | tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; | 873 | tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; |
811 | mmc->ops = &tifm_sd_ops; | 874 | mmc->ops = &tifm_sd_ops; |
812 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 875 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
813 | mmc->caps = MMC_CAP_4_BIT_DATA; | 876 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; |
814 | mmc->f_min = 20000000 / 60; | 877 | mmc->f_min = 20000000 / 60; |
815 | mmc->f_max = 24000000; | 878 | mmc->f_max = 24000000; |
816 | mmc->max_hw_segs = 1; | 879 | mmc->max_hw_segs = 1; |
817 | mmc->max_phys_segs = 1; | 880 | mmc->max_phys_segs = 1; |
818 | mmc->max_sectors = 127; | 881 | // limited by DMA counter - it's safer to stick with |
819 | mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length | 882 | // block counter has 11 bits though |
820 | 883 | mmc->max_blk_count = 256; | |
821 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); | 884 | // 2k maximum hw block length |
822 | writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); | 885 | mmc->max_blk_size = 2048; |
823 | writel(host->clk_div | TIFM_MMCSD_POWER, | 886 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; |
824 | sock->addr + SOCK_MMCSD_CONFIG); | 887 | mmc->max_seg_size = mmc->max_req_size; |
888 | sock->signal_irq = tifm_sd_signal_irq; | ||
889 | rc = tifm_sd_initialize_host(host); | ||
825 | 890 | ||
826 | for (rc = 0; rc < 50; rc++) { | 891 | if (!rc) |
827 | /* Wait for reset ack */ | 892 | rc = mmc_add_host(mmc); |
828 | if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { | 893 | if (rc) |
829 | rc = 0; | 894 | goto out_free_mmc; |
830 | break; | ||
831 | } | ||
832 | msleep(10); | ||
833 | } | ||
834 | 895 | ||
835 | if (rc) { | 896 | return 0; |
836 | printk(KERN_ERR DRIVER_NAME | 897 | out_free_mmc: |
837 | ": card not ready - probe failed\n"); | 898 | mmc_free_host(mmc); |
838 | mmc_free_host(mmc); | 899 | return rc; |
839 | return -ENODEV; | 900 | } |
840 | } | ||
841 | 901 | ||
842 | writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); | 902 | static void tifm_sd_remove(struct tifm_dev *sock) |
843 | writel(host->clk_div | TIFM_MMCSD_POWER, | 903 | { |
844 | sock->addr + SOCK_MMCSD_CONFIG); | 904 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
845 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 905 | struct tifm_sd *host = mmc_priv(mmc); |
846 | writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK, | ||
847 | sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
848 | 906 | ||
849 | writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now | 907 | del_timer_sync(&host->timer); |
850 | writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); | 908 | tifm_sd_terminate(host); |
851 | writel(host->clk_div | TIFM_MMCSD_POWER, | 909 | wait_event_timeout(host->notify, host->flags & EJECT_DONE, |
852 | sock->addr + SOCK_MMCSD_CONFIG); | 910 | host->timeout_jiffies); |
911 | tasklet_kill(&host->finish_tasklet); | ||
912 | mmc_remove_host(mmc); | ||
853 | 913 | ||
854 | queue_delayed_work(sock->wq, &host->abort_handler, | 914 | /* The meaning of the bit majority in this constant is unknown. */ |
855 | host->timeout_jiffies); | 915 | writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), |
916 | sock->addr + SOCK_CONTROL); | ||
856 | 917 | ||
857 | return 0; | 918 | tifm_set_drvdata(sock, NULL); |
919 | mmc_free_host(mmc); | ||
858 | } | 920 | } |
859 | 921 | ||
860 | static int tifm_sd_host_is_down(struct tifm_dev *sock) | 922 | #ifdef CONFIG_PM |
923 | |||
924 | static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) | ||
861 | { | 925 | { |
862 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 926 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
863 | struct tifm_sd *host = mmc_priv(mmc); | 927 | int rc; |
864 | unsigned long flags; | ||
865 | int rc = 0; | ||
866 | 928 | ||
867 | spin_lock_irqsave(&sock->lock, flags); | 929 | rc = mmc_suspend_host(mmc, state); |
868 | rc = (host->flags & EJECT_DONE); | 930 | /* The meaning of the bit majority in this constant is unknown. */ |
869 | spin_unlock_irqrestore(&sock->lock, flags); | 931 | writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), |
932 | sock->addr + SOCK_CONTROL); | ||
870 | return rc; | 933 | return rc; |
871 | } | 934 | } |
872 | 935 | ||
873 | static void tifm_sd_remove(struct tifm_dev *sock) | 936 | static int tifm_sd_resume(struct tifm_dev *sock) |
874 | { | 937 | { |
875 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 938 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
876 | struct tifm_sd *host = mmc_priv(mmc); | 939 | struct tifm_sd *host = mmc_priv(mmc); |
877 | unsigned long flags; | ||
878 | 940 | ||
879 | spin_lock_irqsave(&sock->lock, flags); | 941 | if (sock->media_id != FM_SD |
880 | host->flags |= EJECT; | 942 | || tifm_sd_initialize_host(host)) { |
881 | if (host->req) | 943 | tifm_eject(sock); |
882 | queue_work(sock->wq, &host->cmd_handler); | 944 | return 0; |
883 | spin_unlock_irqrestore(&sock->lock, flags); | 945 | } else { |
884 | wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock), | 946 | return mmc_resume_host(mmc); |
885 | host->timeout_jiffies); | 947 | } |
948 | } | ||
886 | 949 | ||
887 | if (host->flags & HOST_REG) | 950 | #else |
888 | mmc_remove_host(mmc); | ||
889 | 951 | ||
890 | /* The meaning of the bit majority in this constant is unknown. */ | 952 | #define tifm_sd_suspend NULL |
891 | writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), | 953 | #define tifm_sd_resume NULL |
892 | sock->addr + SOCK_CONTROL); | ||
893 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
894 | writel(TIFM_FIFO_INT_SETALL, | ||
895 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | ||
896 | writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | ||
897 | 954 | ||
898 | tifm_set_drvdata(sock, NULL); | 955 | #endif /* CONFIG_PM */ |
899 | mmc_free_host(mmc); | ||
900 | } | ||
901 | 956 | ||
902 | static tifm_media_id tifm_sd_id_tbl[] = { | 957 | static tifm_media_id tifm_sd_id_tbl[] = { |
903 | FM_SD, 0 | 958 | FM_SD, 0 |
@@ -910,7 +965,9 @@ static struct tifm_driver tifm_sd_driver = { | |||
910 | }, | 965 | }, |
911 | .id_table = tifm_sd_id_tbl, | 966 | .id_table = tifm_sd_id_tbl, |
912 | .probe = tifm_sd_probe, | 967 | .probe = tifm_sd_probe, |
913 | .remove = tifm_sd_remove | 968 | .remove = tifm_sd_remove, |
969 | .suspend = tifm_sd_suspend, | ||
970 | .resume = tifm_sd_resume | ||
914 | }; | 971 | }; |
915 | 972 | ||
916 | static int __init tifm_sd_init(void) | 973 | static int __init tifm_sd_init(void) |
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c index 7a282672f8e9..a44d8777ab9f 100644 --- a/drivers/mmc/wbsd.c +++ b/drivers/mmc/wbsd.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver | 2 | * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. | 4 | * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host) | |||
272 | return host->num_sg; | 272 | return host->num_sg; |
273 | } | 273 | } |
274 | 274 | ||
275 | static inline char *wbsd_kmap_sg(struct wbsd_host *host) | 275 | static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) |
276 | { | 276 | { |
277 | host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + | 277 | return page_address(host->cur_sg->page) + host->cur_sg->offset; |
278 | host->cur_sg->offset; | ||
279 | return host->mapped_sg; | ||
280 | } | ||
281 | |||
282 | static inline void wbsd_kunmap_sg(struct wbsd_host *host) | ||
283 | { | ||
284 | kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ); | ||
285 | } | 278 | } |
286 | 279 | ||
287 | static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) | 280 | static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) |
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) | |||
302 | * we do not transfer too much. | 295 | * we do not transfer too much. |
303 | */ | 296 | */ |
304 | for (i = 0; i < len; i++) { | 297 | for (i = 0; i < len; i++) { |
305 | sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; | 298 | sgbuf = page_address(sg[i].page) + sg[i].offset; |
306 | if (size < sg[i].length) | 299 | if (size < sg[i].length) |
307 | memcpy(dmabuf, sgbuf, size); | 300 | memcpy(dmabuf, sgbuf, size); |
308 | else | 301 | else |
309 | memcpy(dmabuf, sgbuf, sg[i].length); | 302 | memcpy(dmabuf, sgbuf, sg[i].length); |
310 | kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ); | ||
311 | dmabuf += sg[i].length; | 303 | dmabuf += sg[i].length; |
312 | 304 | ||
313 | if (size < sg[i].length) | 305 | if (size < sg[i].length) |
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) | |||
347 | * we do not transfer too much. | 339 | * we do not transfer too much. |
348 | */ | 340 | */ |
349 | for (i = 0; i < len; i++) { | 341 | for (i = 0; i < len; i++) { |
350 | sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; | 342 | sgbuf = page_address(sg[i].page) + sg[i].offset; |
351 | if (size < sg[i].length) | 343 | if (size < sg[i].length) |
352 | memcpy(sgbuf, dmabuf, size); | 344 | memcpy(sgbuf, dmabuf, size); |
353 | else | 345 | else |
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host) | |||
497 | if (data->bytes_xfered == host->size) | 489 | if (data->bytes_xfered == host->size) |
498 | return; | 490 | return; |
499 | 491 | ||
500 | buffer = wbsd_kmap_sg(host) + host->offset; | 492 | buffer = wbsd_sg_to_buffer(host) + host->offset; |
501 | 493 | ||
502 | /* | 494 | /* |
503 | * Drain the fifo. This has a tendency to loop longer | 495 | * Drain the fifo. This has a tendency to loop longer |
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host) | |||
526 | /* | 518 | /* |
527 | * Transfer done? | 519 | * Transfer done? |
528 | */ | 520 | */ |
529 | if (data->bytes_xfered == host->size) { | 521 | if (data->bytes_xfered == host->size) |
530 | wbsd_kunmap_sg(host); | ||
531 | return; | 522 | return; |
532 | } | ||
533 | 523 | ||
534 | /* | 524 | /* |
535 | * End of scatter list entry? | 525 | * End of scatter list entry? |
536 | */ | 526 | */ |
537 | if (host->remain == 0) { | 527 | if (host->remain == 0) { |
538 | wbsd_kunmap_sg(host); | ||
539 | |||
540 | /* | 528 | /* |
541 | * Get next entry. Check if last. | 529 | * Get next entry. Check if last. |
542 | */ | 530 | */ |
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host) | |||
554 | return; | 542 | return; |
555 | } | 543 | } |
556 | 544 | ||
557 | buffer = wbsd_kmap_sg(host); | 545 | buffer = wbsd_sg_to_buffer(host); |
558 | } | 546 | } |
559 | } | 547 | } |
560 | } | 548 | } |
561 | 549 | ||
562 | wbsd_kunmap_sg(host); | ||
563 | |||
564 | /* | 550 | /* |
565 | * This is a very dirty hack to solve a | 551 | * This is a very dirty hack to solve a |
566 | * hardware problem. The chip doesn't trigger | 552 | * hardware problem. The chip doesn't trigger |
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host) | |||
583 | if (data->bytes_xfered == host->size) | 569 | if (data->bytes_xfered == host->size) |
584 | return; | 570 | return; |
585 | 571 | ||
586 | buffer = wbsd_kmap_sg(host) + host->offset; | 572 | buffer = wbsd_sg_to_buffer(host) + host->offset; |
587 | 573 | ||
588 | /* | 574 | /* |
589 | * Fill the fifo. This has a tendency to loop longer | 575 | * Fill the fifo. This has a tendency to loop longer |
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host) | |||
612 | /* | 598 | /* |
613 | * Transfer done? | 599 | * Transfer done? |
614 | */ | 600 | */ |
615 | if (data->bytes_xfered == host->size) { | 601 | if (data->bytes_xfered == host->size) |
616 | wbsd_kunmap_sg(host); | ||
617 | return; | 602 | return; |
618 | } | ||
619 | 603 | ||
620 | /* | 604 | /* |
621 | * End of scatter list entry? | 605 | * End of scatter list entry? |
622 | */ | 606 | */ |
623 | if (host->remain == 0) { | 607 | if (host->remain == 0) { |
624 | wbsd_kunmap_sg(host); | ||
625 | |||
626 | /* | 608 | /* |
627 | * Get next entry. Check if last. | 609 | * Get next entry. Check if last. |
628 | */ | 610 | */ |
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host) | |||
640 | return; | 622 | return; |
641 | } | 623 | } |
642 | 624 | ||
643 | buffer = wbsd_kmap_sg(host); | 625 | buffer = wbsd_sg_to_buffer(host); |
644 | } | 626 | } |
645 | } | 627 | } |
646 | } | 628 | } |
647 | 629 | ||
648 | wbsd_kunmap_sg(host); | ||
649 | |||
650 | /* | 630 | /* |
651 | * The controller stops sending interrupts for | 631 | * The controller stops sending interrupts for |
652 | * 'FIFO empty' under certain conditions. So we | 632 | * 'FIFO empty' under certain conditions. So we |
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
910 | */ | 890 | */ |
911 | if (cmd->data && (cmd->error == MMC_ERR_NONE)) { | 891 | if (cmd->data && (cmd->error == MMC_ERR_NONE)) { |
912 | /* | 892 | /* |
893 | * The hardware is so delightfully stupid that it has a list | ||
894 | * of "data" commands. If a command isn't on this list, it'll | ||
895 | * just go back to the idle state and won't send any data | ||
896 | * interrupts. | ||
897 | */ | ||
898 | switch (cmd->opcode) { | ||
899 | case 11: | ||
900 | case 17: | ||
901 | case 18: | ||
902 | case 20: | ||
903 | case 24: | ||
904 | case 25: | ||
905 | case 26: | ||
906 | case 27: | ||
907 | case 30: | ||
908 | case 42: | ||
909 | case 56: | ||
910 | break; | ||
911 | |||
912 | /* ACMDs. We don't keep track of state, so we just treat them | ||
913 | * like any other command. */ | ||
914 | case 51: | ||
915 | break; | ||
916 | |||
917 | default: | ||
918 | #ifdef CONFIG_MMC_DEBUG | ||
919 | printk(KERN_WARNING "%s: Data command %d is not " | ||
920 | "supported by this controller.\n", | ||
921 | mmc_hostname(host->mmc), cmd->opcode); | ||
922 | #endif | ||
923 | cmd->data->error = MMC_ERR_INVALID; | ||
924 | |||
925 | if (cmd->data->stop) | ||
926 | wbsd_send_command(host, cmd->data->stop); | ||
927 | |||
928 | goto done; | ||
929 | }; | ||
930 | |||
931 | /* | ||
913 | * Dirty fix for hardware bug. | 932 | * Dirty fix for hardware bug. |
914 | */ | 933 | */ |
915 | if (host->dma == -1) | 934 | if (host->dma == -1) |
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev) | |||
1343 | mmc->max_phys_segs = 128; | 1362 | mmc->max_phys_segs = 128; |
1344 | 1363 | ||
1345 | /* | 1364 | /* |
1346 | * Maximum number of sectors in one transfer. Also limited by 64kB | 1365 | * Maximum request size. Also limited by 64KiB buffer. |
1347 | * buffer. | ||
1348 | */ | 1366 | */ |
1349 | mmc->max_sectors = 128; | 1367 | mmc->max_req_size = 65536; |
1350 | 1368 | ||
1351 | /* | 1369 | /* |
1352 | * Maximum segment size. Could be one segment with the maximum number | 1370 | * Maximum segment size. Could be one segment with the maximum number |
1353 | * of segments. | 1371 | * of bytes. |
1372 | */ | ||
1373 | mmc->max_seg_size = mmc->max_req_size; | ||
1374 | |||
1375 | /* | ||
1376 | * Maximum block size. We have 12 bits (= 4095) but have to subtract | ||
1377 | * space for CRC. So the maximum is 4095 - 4*2 = 4087. | ||
1378 | */ | ||
1379 | mmc->max_blk_size = 4087; | ||
1380 | |||
1381 | /* | ||
1382 | * Maximum block count. There is no real limit so the maximum | ||
1383 | * request size will be the only restriction. | ||
1354 | */ | 1384 | */ |
1355 | mmc->max_seg_size = mmc->max_sectors * 512; | 1385 | mmc->max_blk_count = mmc->max_req_size; |
1356 | 1386 | ||
1357 | dev_set_drvdata(dev, mmc); | 1387 | dev_set_drvdata(dev, mmc); |
1358 | 1388 | ||
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h index 6072993f01e3..d06718b0e2ab 100644 --- a/drivers/mmc/wbsd.h +++ b/drivers/mmc/wbsd.h | |||
@@ -154,7 +154,6 @@ struct wbsd_host | |||
154 | 154 | ||
155 | struct scatterlist* cur_sg; /* Current SG entry */ | 155 | struct scatterlist* cur_sg; /* Current SG entry */ |
156 | unsigned int num_sg; /* Number of entries left */ | 156 | unsigned int num_sg; /* Number of entries left */ |
157 | void* mapped_sg; /* vaddr of mapped sg */ | ||
158 | 157 | ||
159 | unsigned int offset; /* Offset into current entry */ | 158 | unsigned int offset; /* Offset into current entry */ |
160 | unsigned int remain; /* Data left in curren entry */ | 159 | unsigned int remain; /* Data left in curren entry */ |