diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 22:28:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 22:28:15 -0400 |
commit | 0df0914d414a504b975f3cc66ace0c16ef55b7f3 (patch) | |
tree | c97ffa357943a8b226cdec1b9632c4cede813205 /drivers | |
parent | 6899608533410557e6698cb9d4ff6df553916e98 (diff) | |
parent | 05f689400ea5fa3d71af82f910c8b140f87ad1f3 (diff) |
Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6
* 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6: (258 commits)
omap: zoom: host should not pull up wl1271's irq line
arm: plat-omap: iommu: fix request_mem_region() error path
OMAP2+: Common CPU DIE ID reading code reads wrong registers for OMAP4430
omap4: mux: Remove duplicate mux modes
omap: iovmm: don't check 'da' to set IOVMF_DA_FIXED flag
omap: iovmm: disallow mapping NULL address when IOVMF_DA_ANON is set
omap2+: mux: Fix compile when CONFIG_OMAP_MUX is not selected
omap4: board-omap4panda: Initialise the serial pads
omap3: board-3430sdp: Initialise the serial pads
omap4: board-4430sdp: Initialise the serial pads
omap2+: mux: Add macro for configuring static with omap_hwmod_mux_init
omap2+: mux: Remove the use of IDLE flag
omap2+: Add separate list for dynamic pads to mux
perf: add OMAP support for the new power events
OMAP4: Add IVA OPP enteries.
OMAP4: Update Voltage Rail Values for MPU, IVA and CORE
OMAP4: Enable 800 MHz and 1 GHz MPU-OPP
OMAP3+: OPP: Replace voltage values with Macros
OMAP3: wdtimer: Fix CORE idle transition
Watchdog: omap_wdt: add fine grain runtime-pm
...
Fix up various conflicts in
- arch/arm/mach-omap2/board-omap3evm.c
- arch/arm/mach-omap2/clock3xxx_data.c
- arch/arm/mach-omap2/usb-musb.c
- arch/arm/plat-omap/include/plat/usb.h
- drivers/usb/musb/musb_core.h
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/Makefile | 2 | ||||
-rw-r--r-- | drivers/hwspinlock/Kconfig | 22 | ||||
-rw-r--r-- | drivers/hwspinlock/Makefile | 6 | ||||
-rw-r--r-- | drivers/hwspinlock/hwspinlock_core.c | 548 | ||||
-rw-r--r-- | drivers/hwspinlock/hwspinlock_internal.h | 61 | ||||
-rw-r--r-- | drivers/hwspinlock/omap_hwspinlock.c | 231 | ||||
-rw-r--r-- | drivers/mmc/host/Kconfig | 2 | ||||
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 36 | ||||
-rw-r--r-- | drivers/mtd/nand/Kconfig | 17 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 367 | ||||
-rw-r--r-- | drivers/mtd/onenand/omap2.c | 36 | ||||
-rw-r--r-- | drivers/spi/omap2_mcspi.c | 222 | ||||
-rw-r--r-- | drivers/usb/musb/musb_core.c | 2 | ||||
-rw-r--r-- | drivers/usb/musb/musb_core.h | 4 | ||||
-rw-r--r-- | drivers/usb/musb/musbhsdma.h | 2 | ||||
-rw-r--r-- | drivers/usb/otg/isp1301_omap.c | 2 | ||||
-rw-r--r-- | drivers/w1/masters/Kconfig | 2 | ||||
-rw-r--r-- | drivers/watchdog/omap_wdt.c | 25 |
19 files changed, 1283 insertions, 306 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 9bfb71ff3a6a..177c7d156933 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig" | |||
117 | source "drivers/platform/Kconfig" | 117 | source "drivers/platform/Kconfig" |
118 | 118 | ||
119 | source "drivers/clk/Kconfig" | 119 | source "drivers/clk/Kconfig" |
120 | |||
121 | source "drivers/hwspinlock/Kconfig" | ||
120 | endmenu | 122 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index b423bb16c3a8..3f135b6fb014 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -117,3 +117,5 @@ obj-y += platform/ | |||
117 | obj-y += ieee802154/ | 117 | obj-y += ieee802154/ |
118 | #common clk code | 118 | #common clk code |
119 | obj-y += clk/ | 119 | obj-y += clk/ |
120 | |||
121 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ | ||
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig new file mode 100644 index 000000000000..eb4af28f8567 --- /dev/null +++ b/drivers/hwspinlock/Kconfig | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # Generic HWSPINLOCK framework | ||
3 | # | ||
4 | |||
5 | config HWSPINLOCK | ||
6 | tristate "Generic Hardware Spinlock framework" | ||
7 | help | ||
8 | Say y here to support the generic hardware spinlock framework. | ||
9 | You only need to enable this if you have hardware spinlock module | ||
10 | on your system (usually only relevant if your system has remote slave | ||
11 | coprocessors). | ||
12 | |||
13 | If unsure, say N. | ||
14 | |||
15 | config HWSPINLOCK_OMAP | ||
16 | tristate "OMAP Hardware Spinlock device" | ||
17 | depends on HWSPINLOCK && ARCH_OMAP4 | ||
18 | help | ||
19 | Say y here to support the OMAP Hardware Spinlock device (firstly | ||
20 | introduced in OMAP4). | ||
21 | |||
22 | If unsure, say N. | ||
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile new file mode 100644 index 000000000000..5729a3f7ed3d --- /dev/null +++ b/drivers/hwspinlock/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Generic Hardware Spinlock framework | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o | ||
6 | obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o | ||
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c new file mode 100644 index 000000000000..43a62714b4fb --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -0,0 +1,548 @@ | |||
1 | /* | ||
2 | * Hardware spinlock framework | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * | ||
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/jiffies.h> | ||
26 | #include <linux/radix-tree.h> | ||
27 | #include <linux/hwspinlock.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | |||
30 | #include "hwspinlock_internal.h" | ||
31 | |||
32 | /* radix tree tags */ | ||
33 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ | ||
34 | |||
35 | /* | ||
36 | * A radix tree is used to maintain the available hwspinlock instances. | ||
37 | * The tree associates hwspinlock pointers with their integer key id, | ||
38 | * and provides easy-to-use API which makes the hwspinlock core code simple | ||
39 | * and easy to read. | ||
40 | * | ||
41 | * Radix trees are quick on lookups, and reasonably efficient in terms of | ||
42 | * storage, especially with high density usages such as this framework | ||
43 | * requires (a continuous range of integer keys, beginning with zero, is | ||
44 | * used as the ID's of the hwspinlock instances). | ||
45 | * | ||
46 | * The radix tree API supports tagging items in the tree, which this | ||
47 | * framework uses to mark unused hwspinlock instances (see the | ||
48 | * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the | ||
49 | * tree, looking for an unused hwspinlock instance, is now reduced to a | ||
50 | * single radix tree API call. | ||
51 | */ | ||
52 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); | ||
53 | |||
54 | /* | ||
55 | * Synchronization of access to the tree is achieved using this spinlock, | ||
56 | * as the radix-tree API requires that users provide all synchronisation. | ||
57 | */ | ||
58 | static DEFINE_SPINLOCK(hwspinlock_tree_lock); | ||
59 | |||
60 | /** | ||
61 | * __hwspin_trylock() - attempt to lock a specific hwspinlock | ||
62 | * @hwlock: an hwspinlock which we want to trylock | ||
63 | * @mode: controls whether local interrupts are disabled or not | ||
64 | * @flags: a pointer where the caller's interrupt state will be saved at (if | ||
65 | * requested) | ||
66 | * | ||
67 | * This function attempts to lock an hwspinlock, and will immediately | ||
68 | * fail if the hwspinlock is already taken. | ||
69 | * | ||
70 | * Upon a successful return from this function, preemption (and possibly | ||
71 | * interrupts) is disabled, so the caller must not sleep, and is advised to | ||
72 | * release the hwspinlock as soon as possible. This is required in order to | ||
73 | * minimize remote cores polling on the hardware interconnect. | ||
74 | * | ||
75 | * The user decides whether local interrupts are disabled or not, and if yes, | ||
76 | * whether he wants their previous state to be saved. It is up to the user | ||
77 | * to choose the appropriate @mode of operation, exactly the same way users | ||
78 | * should decide between spin_trylock, spin_trylock_irq and | ||
79 | * spin_trylock_irqsave. | ||
80 | * | ||
81 | * Returns 0 if we successfully locked the hwspinlock or -EBUSY if | ||
82 | * the hwspinlock was already taken. | ||
83 | * This function will never sleep. | ||
84 | */ | ||
85 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | ||
86 | { | ||
87 | int ret; | ||
88 | |||
89 | BUG_ON(!hwlock); | ||
90 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); | ||
91 | |||
92 | /* | ||
93 | * This spin_lock{_irq, _irqsave} serves three purposes: | ||
94 | * | ||
95 | * 1. Disable preemption, in order to minimize the period of time | ||
96 | * in which the hwspinlock is taken. This is important in order | ||
97 | * to minimize the possible polling on the hardware interconnect | ||
98 | * by a remote user of this lock. | ||
99 | * 2. Make the hwspinlock SMP-safe (so we can take it from | ||
100 | * additional contexts on the local host). | ||
101 | * 3. Ensure that in_atomic/might_sleep checks catch potential | ||
102 | * problems with hwspinlock usage (e.g. scheduler checks like | ||
103 | * 'scheduling while atomic' etc.) | ||
104 | */ | ||
105 | if (mode == HWLOCK_IRQSTATE) | ||
106 | ret = spin_trylock_irqsave(&hwlock->lock, *flags); | ||
107 | else if (mode == HWLOCK_IRQ) | ||
108 | ret = spin_trylock_irq(&hwlock->lock); | ||
109 | else | ||
110 | ret = spin_trylock(&hwlock->lock); | ||
111 | |||
112 | /* is lock already taken by another context on the local cpu ? */ | ||
113 | if (!ret) | ||
114 | return -EBUSY; | ||
115 | |||
116 | /* try to take the hwspinlock device */ | ||
117 | ret = hwlock->ops->trylock(hwlock); | ||
118 | |||
119 | /* if hwlock is already taken, undo spin_trylock_* and exit */ | ||
120 | if (!ret) { | ||
121 | if (mode == HWLOCK_IRQSTATE) | ||
122 | spin_unlock_irqrestore(&hwlock->lock, *flags); | ||
123 | else if (mode == HWLOCK_IRQ) | ||
124 | spin_unlock_irq(&hwlock->lock); | ||
125 | else | ||
126 | spin_unlock(&hwlock->lock); | ||
127 | |||
128 | return -EBUSY; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * We can be sure the other core's memory operations | ||
133 | * are observable to us only _after_ we successfully take | ||
134 | * the hwspinlock, and we must make sure that subsequent memory | ||
135 | * operations (both reads and writes) will not be reordered before | ||
136 | * we actually took the hwspinlock. | ||
137 | * | ||
138 | * Note: the implicit memory barrier of the spinlock above is too | ||
139 | * early, so we need this additional explicit memory barrier. | ||
140 | */ | ||
141 | mb(); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(__hwspin_trylock); | ||
146 | |||
147 | /** | ||
148 | * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit | ||
149 | * @hwlock: the hwspinlock to be locked | ||
150 | * @timeout: timeout value in msecs | ||
151 | * @mode: mode which controls whether local interrupts are disabled or not | ||
152 | * @flags: a pointer to where the caller's interrupt state will be saved at (if | ||
153 | * requested) | ||
154 | * | ||
155 | * This function locks the given @hwlock. If the @hwlock | ||
156 | * is already taken, the function will busy loop waiting for it to | ||
157 | * be released, but give up after @timeout msecs have elapsed. | ||
158 | * | ||
159 | * Upon a successful return from this function, preemption is disabled | ||
160 | * (and possibly local interrupts, too), so the caller must not sleep, | ||
161 | * and is advised to release the hwspinlock as soon as possible. | ||
162 | * This is required in order to minimize remote cores polling on the | ||
163 | * hardware interconnect. | ||
164 | * | ||
165 | * The user decides whether local interrupts are disabled or not, and if yes, | ||
166 | * whether he wants their previous state to be saved. It is up to the user | ||
167 | * to choose the appropriate @mode of operation, exactly the same way users | ||
168 | * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. | ||
169 | * | ||
170 | * Returns 0 when the @hwlock was successfully taken, and an appropriate | ||
171 | * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still | ||
172 | * busy after @timeout msecs). The function will never sleep. | ||
173 | */ | ||
174 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, | ||
175 | int mode, unsigned long *flags) | ||
176 | { | ||
177 | int ret; | ||
178 | unsigned long expire; | ||
179 | |||
180 | expire = msecs_to_jiffies(to) + jiffies; | ||
181 | |||
182 | for (;;) { | ||
183 | /* Try to take the hwspinlock */ | ||
184 | ret = __hwspin_trylock(hwlock, mode, flags); | ||
185 | if (ret != -EBUSY) | ||
186 | break; | ||
187 | |||
188 | /* | ||
189 | * The lock is already taken, let's check if the user wants | ||
190 | * us to try again | ||
191 | */ | ||
192 | if (time_is_before_eq_jiffies(expire)) | ||
193 | return -ETIMEDOUT; | ||
194 | |||
195 | /* | ||
196 | * Allow platform-specific relax handlers to prevent | ||
197 | * hogging the interconnect (no sleeping, though) | ||
198 | */ | ||
199 | if (hwlock->ops->relax) | ||
200 | hwlock->ops->relax(hwlock); | ||
201 | } | ||
202 | |||
203 | return ret; | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); | ||
206 | |||
207 | /** | ||
208 | * __hwspin_unlock() - unlock a specific hwspinlock | ||
209 | * @hwlock: a previously-acquired hwspinlock which we want to unlock | ||
210 | * @mode: controls whether local interrupts needs to be restored or not | ||
211 | * @flags: previous caller's interrupt state to restore (if requested) | ||
212 | * | ||
213 | * This function will unlock a specific hwspinlock, enable preemption and | ||
214 | * (possibly) enable interrupts or restore their previous state. | ||
215 | * @hwlock must be already locked before calling this function: it is a bug | ||
216 | * to call unlock on a @hwlock that is already unlocked. | ||
217 | * | ||
218 | * The user decides whether local interrupts should be enabled or not, and | ||
219 | * if yes, whether he wants their previous state to be restored. It is up | ||
220 | * to the user to choose the appropriate @mode of operation, exactly the | ||
221 | * same way users decide between spin_unlock, spin_unlock_irq and | ||
222 | * spin_unlock_irqrestore. | ||
223 | * | ||
224 | * The function will never sleep. | ||
225 | */ | ||
226 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | ||
227 | { | ||
228 | BUG_ON(!hwlock); | ||
229 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); | ||
230 | |||
231 | /* | ||
232 | * We must make sure that memory operations (both reads and writes), | ||
233 | * done before unlocking the hwspinlock, will not be reordered | ||
234 | * after the lock is released. | ||
235 | * | ||
236 | * That's the purpose of this explicit memory barrier. | ||
237 | * | ||
238 | * Note: the memory barrier induced by the spin_unlock below is too | ||
239 | * late; the other core is going to access memory soon after it will | ||
240 | * take the hwspinlock, and by then we want to be sure our memory | ||
241 | * operations are already observable. | ||
242 | */ | ||
243 | mb(); | ||
244 | |||
245 | hwlock->ops->unlock(hwlock); | ||
246 | |||
247 | /* Undo the spin_trylock{_irq, _irqsave} called while locking */ | ||
248 | if (mode == HWLOCK_IRQSTATE) | ||
249 | spin_unlock_irqrestore(&hwlock->lock, *flags); | ||
250 | else if (mode == HWLOCK_IRQ) | ||
251 | spin_unlock_irq(&hwlock->lock); | ||
252 | else | ||
253 | spin_unlock(&hwlock->lock); | ||
254 | } | ||
255 | EXPORT_SYMBOL_GPL(__hwspin_unlock); | ||
256 | |||
257 | /** | ||
258 | * hwspin_lock_register() - register a new hw spinlock | ||
259 | * @hwlock: hwspinlock to register. | ||
260 | * | ||
261 | * This function should be called from the underlying platform-specific | ||
262 | * implementation, to register a new hwspinlock instance. | ||
263 | * | ||
264 | * Can be called from an atomic context (will not sleep) but not from | ||
265 | * within interrupt context. | ||
266 | * | ||
267 | * Returns 0 on success, or an appropriate error code on failure | ||
268 | */ | ||
269 | int hwspin_lock_register(struct hwspinlock *hwlock) | ||
270 | { | ||
271 | struct hwspinlock *tmp; | ||
272 | int ret; | ||
273 | |||
274 | if (!hwlock || !hwlock->ops || | ||
275 | !hwlock->ops->trylock || !hwlock->ops->unlock) { | ||
276 | pr_err("invalid parameters\n"); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | spin_lock_init(&hwlock->lock); | ||
281 | |||
282 | spin_lock(&hwspinlock_tree_lock); | ||
283 | |||
284 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); | ||
285 | if (ret) | ||
286 | goto out; | ||
287 | |||
288 | /* mark this hwspinlock as available */ | ||
289 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, | ||
290 | HWSPINLOCK_UNUSED); | ||
291 | |||
292 | /* self-sanity check which should never fail */ | ||
293 | WARN_ON(tmp != hwlock); | ||
294 | |||
295 | out: | ||
296 | spin_unlock(&hwspinlock_tree_lock); | ||
297 | return ret; | ||
298 | } | ||
299 | EXPORT_SYMBOL_GPL(hwspin_lock_register); | ||
300 | |||
301 | /** | ||
302 | * hwspin_lock_unregister() - unregister an hw spinlock | ||
303 | * @id: index of the specific hwspinlock to unregister | ||
304 | * | ||
305 | * This function should be called from the underlying platform-specific | ||
306 | * implementation, to unregister an existing (and unused) hwspinlock. | ||
307 | * | ||
308 | * Can be called from an atomic context (will not sleep) but not from | ||
309 | * within interrupt context. | ||
310 | * | ||
311 | * Returns the address of hwspinlock @id on success, or NULL on failure | ||
312 | */ | ||
313 | struct hwspinlock *hwspin_lock_unregister(unsigned int id) | ||
314 | { | ||
315 | struct hwspinlock *hwlock = NULL; | ||
316 | int ret; | ||
317 | |||
318 | spin_lock(&hwspinlock_tree_lock); | ||
319 | |||
320 | /* make sure the hwspinlock is not in use (tag is set) */ | ||
321 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | ||
322 | if (ret == 0) { | ||
323 | pr_err("hwspinlock %d still in use (or not present)\n", id); | ||
324 | goto out; | ||
325 | } | ||
326 | |||
327 | hwlock = radix_tree_delete(&hwspinlock_tree, id); | ||
328 | if (!hwlock) { | ||
329 | pr_err("failed to delete hwspinlock %d\n", id); | ||
330 | goto out; | ||
331 | } | ||
332 | |||
333 | out: | ||
334 | spin_unlock(&hwspinlock_tree_lock); | ||
335 | return hwlock; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); | ||
338 | |||
339 | /** | ||
340 | * __hwspin_lock_request() - tag an hwspinlock as used and power it up | ||
341 | * | ||
342 | * This is an internal function that prepares an hwspinlock instance | ||
343 | * before it is given to the user. The function assumes that | ||
344 | * hwspinlock_tree_lock is taken. | ||
345 | * | ||
346 | * Returns 0 or positive to indicate success, and a negative value to | ||
347 | * indicate an error (with the appropriate error code) | ||
348 | */ | ||
349 | static int __hwspin_lock_request(struct hwspinlock *hwlock) | ||
350 | { | ||
351 | struct hwspinlock *tmp; | ||
352 | int ret; | ||
353 | |||
354 | /* prevent underlying implementation from being removed */ | ||
355 | if (!try_module_get(hwlock->owner)) { | ||
356 | dev_err(hwlock->dev, "%s: can't get owner\n", __func__); | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | /* notify PM core that power is now needed */ | ||
361 | ret = pm_runtime_get_sync(hwlock->dev); | ||
362 | if (ret < 0) { | ||
363 | dev_err(hwlock->dev, "%s: can't power on device\n", __func__); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | /* mark hwspinlock as used, should not fail */ | ||
368 | tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id, | ||
369 | HWSPINLOCK_UNUSED); | ||
370 | |||
371 | /* self-sanity check that should never fail */ | ||
372 | WARN_ON(tmp != hwlock); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * hwspin_lock_get_id() - retrieve id number of a given hwspinlock | ||
379 | * @hwlock: a valid hwspinlock instance | ||
380 | * | ||
381 | * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. | ||
382 | */ | ||
383 | int hwspin_lock_get_id(struct hwspinlock *hwlock) | ||
384 | { | ||
385 | if (!hwlock) { | ||
386 | pr_err("invalid hwlock\n"); | ||
387 | return -EINVAL; | ||
388 | } | ||
389 | |||
390 | return hwlock->id; | ||
391 | } | ||
392 | EXPORT_SYMBOL_GPL(hwspin_lock_get_id); | ||
393 | |||
394 | /** | ||
395 | * hwspin_lock_request() - request an hwspinlock | ||
396 | * | ||
397 | * This function should be called by users of the hwspinlock device, | ||
398 | * in order to dynamically assign them an unused hwspinlock. | ||
399 | * Usually the user of this lock will then have to communicate the lock's id | ||
400 | * to the remote core before it can be used for synchronization (to get the | ||
401 | * id of a given hwlock, use hwspin_lock_get_id()). | ||
402 | * | ||
403 | * Can be called from an atomic context (will not sleep) but not from | ||
404 | * within interrupt context (simply because there is no use case for | ||
405 | * that yet). | ||
406 | * | ||
407 | * Returns the address of the assigned hwspinlock, or NULL on error | ||
408 | */ | ||
409 | struct hwspinlock *hwspin_lock_request(void) | ||
410 | { | ||
411 | struct hwspinlock *hwlock; | ||
412 | int ret; | ||
413 | |||
414 | spin_lock(&hwspinlock_tree_lock); | ||
415 | |||
416 | /* look for an unused lock */ | ||
417 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, | ||
418 | 0, 1, HWSPINLOCK_UNUSED); | ||
419 | if (ret == 0) { | ||
420 | pr_warn("a free hwspinlock is not available\n"); | ||
421 | hwlock = NULL; | ||
422 | goto out; | ||
423 | } | ||
424 | |||
425 | /* sanity check that should never fail */ | ||
426 | WARN_ON(ret > 1); | ||
427 | |||
428 | /* mark as used and power up */ | ||
429 | ret = __hwspin_lock_request(hwlock); | ||
430 | if (ret < 0) | ||
431 | hwlock = NULL; | ||
432 | |||
433 | out: | ||
434 | spin_unlock(&hwspinlock_tree_lock); | ||
435 | return hwlock; | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(hwspin_lock_request); | ||
438 | |||
439 | /** | ||
440 | * hwspin_lock_request_specific() - request for a specific hwspinlock | ||
441 | * @id: index of the specific hwspinlock that is requested | ||
442 | * | ||
443 | * This function should be called by users of the hwspinlock module, | ||
444 | * in order to assign them a specific hwspinlock. | ||
445 | * Usually early board code will be calling this function in order to | ||
446 | * reserve specific hwspinlock ids for predefined purposes. | ||
447 | * | ||
448 | * Can be called from an atomic context (will not sleep) but not from | ||
449 | * within interrupt context (simply because there is no use case for | ||
450 | * that yet). | ||
451 | * | ||
452 | * Returns the address of the assigned hwspinlock, or NULL on error | ||
453 | */ | ||
454 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | ||
455 | { | ||
456 | struct hwspinlock *hwlock; | ||
457 | int ret; | ||
458 | |||
459 | spin_lock(&hwspinlock_tree_lock); | ||
460 | |||
461 | /* make sure this hwspinlock exists */ | ||
462 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); | ||
463 | if (!hwlock) { | ||
464 | pr_warn("hwspinlock %u does not exist\n", id); | ||
465 | goto out; | ||
466 | } | ||
467 | |||
468 | /* sanity check (this shouldn't happen) */ | ||
469 | WARN_ON(hwlock->id != id); | ||
470 | |||
471 | /* make sure this hwspinlock is unused */ | ||
472 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | ||
473 | if (ret == 0) { | ||
474 | pr_warn("hwspinlock %u is already in use\n", id); | ||
475 | hwlock = NULL; | ||
476 | goto out; | ||
477 | } | ||
478 | |||
479 | /* mark as used and power up */ | ||
480 | ret = __hwspin_lock_request(hwlock); | ||
481 | if (ret < 0) | ||
482 | hwlock = NULL; | ||
483 | |||
484 | out: | ||
485 | spin_unlock(&hwspinlock_tree_lock); | ||
486 | return hwlock; | ||
487 | } | ||
488 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | ||
489 | |||
490 | /** | ||
491 | * hwspin_lock_free() - free a specific hwspinlock | ||
492 | * @hwlock: the specific hwspinlock to free | ||
493 | * | ||
494 | * This function mark @hwlock as free again. | ||
495 | * Should only be called with an @hwlock that was retrieved from | ||
496 | * an earlier call to omap_hwspin_lock_request{_specific}. | ||
497 | * | ||
498 | * Can be called from an atomic context (will not sleep) but not from | ||
499 | * within interrupt context (simply because there is no use case for | ||
500 | * that yet). | ||
501 | * | ||
502 | * Returns 0 on success, or an appropriate error code on failure | ||
503 | */ | ||
504 | int hwspin_lock_free(struct hwspinlock *hwlock) | ||
505 | { | ||
506 | struct hwspinlock *tmp; | ||
507 | int ret; | ||
508 | |||
509 | if (!hwlock) { | ||
510 | pr_err("invalid hwlock\n"); | ||
511 | return -EINVAL; | ||
512 | } | ||
513 | |||
514 | spin_lock(&hwspinlock_tree_lock); | ||
515 | |||
516 | /* make sure the hwspinlock is used */ | ||
517 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, | ||
518 | HWSPINLOCK_UNUSED); | ||
519 | if (ret == 1) { | ||
520 | dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__); | ||
521 | dump_stack(); | ||
522 | ret = -EINVAL; | ||
523 | goto out; | ||
524 | } | ||
525 | |||
526 | /* notify the underlying device that power is not needed */ | ||
527 | ret = pm_runtime_put(hwlock->dev); | ||
528 | if (ret < 0) | ||
529 | goto out; | ||
530 | |||
531 | /* mark this hwspinlock as available */ | ||
532 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, | ||
533 | HWSPINLOCK_UNUSED); | ||
534 | |||
535 | /* sanity check (this shouldn't happen) */ | ||
536 | WARN_ON(tmp != hwlock); | ||
537 | |||
538 | module_put(hwlock->owner); | ||
539 | |||
540 | out: | ||
541 | spin_unlock(&hwspinlock_tree_lock); | ||
542 | return ret; | ||
543 | } | ||
544 | EXPORT_SYMBOL_GPL(hwspin_lock_free); | ||
545 | |||
546 | MODULE_LICENSE("GPL v2"); | ||
547 | MODULE_DESCRIPTION("Hardware spinlock interface"); | ||
548 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); | ||
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h new file mode 100644 index 000000000000..69935e6b93e5 --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_internal.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Hardware spinlocks internal header | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * | ||
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef __HWSPINLOCK_HWSPINLOCK_H | ||
19 | #define __HWSPINLOCK_HWSPINLOCK_H | ||
20 | |||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/device.h> | ||
23 | |||
24 | /** | ||
25 | * struct hwspinlock_ops - platform-specific hwspinlock handlers | ||
26 | * | ||
27 | * @trylock: make a single attempt to take the lock. returns 0 on | ||
28 | * failure and true on success. may _not_ sleep. | ||
29 | * @unlock: release the lock. always succeed. may _not_ sleep. | ||
30 | * @relax: optional, platform-specific relax handler, called by hwspinlock | ||
31 | * core while spinning on a lock, between two successive | ||
32 | * invocations of @trylock. may _not_ sleep. | ||
33 | */ | ||
34 | struct hwspinlock_ops { | ||
35 | int (*trylock)(struct hwspinlock *lock); | ||
36 | void (*unlock)(struct hwspinlock *lock); | ||
37 | void (*relax)(struct hwspinlock *lock); | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * struct hwspinlock - this struct represents a single hwspinlock instance | ||
42 | * | ||
43 | * @dev: underlying device, will be used to invoke runtime PM api | ||
44 | * @ops: platform-specific hwspinlock handlers | ||
45 | * @id: a global, unique, system-wide, index of the lock. | ||
46 | * @lock: initialized and used by hwspinlock core | ||
47 | * @owner: underlying implementation module, used to maintain module ref count | ||
48 | * | ||
49 | * Note: currently simplicity was opted for, but later we can squeeze some | ||
50 | * memory bytes by grouping the dev, ops and owner members in a single | ||
51 | * per-platform struct, and have all hwspinlocks point at it. | ||
52 | */ | ||
53 | struct hwspinlock { | ||
54 | struct device *dev; | ||
55 | const struct hwspinlock_ops *ops; | ||
56 | int id; | ||
57 | spinlock_t lock; | ||
58 | struct module *owner; | ||
59 | }; | ||
60 | |||
61 | #endif /* __HWSPINLOCK_HWSPINLOCK_H */ | ||
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c new file mode 100644 index 000000000000..a8f02734c026 --- /dev/null +++ b/drivers/hwspinlock/omap_hwspinlock.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * OMAP hardware spinlock driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * | ||
6 | * Contact: Simon Que <sque@ti.com> | ||
7 | * Hari Kanigeri <h-kanigeri2@ti.com> | ||
8 | * Ohad Ben-Cohen <ohad@wizery.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * version 2 as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/hwspinlock.h> | ||
30 | #include <linux/platform_device.h> | ||
31 | |||
32 | #include "hwspinlock_internal.h" | ||
33 | |||
34 | /* Spinlock register offsets */ | ||
35 | #define SYSSTATUS_OFFSET 0x0014 | ||
36 | #define LOCK_BASE_OFFSET 0x0800 | ||
37 | |||
38 | #define SPINLOCK_NUMLOCKS_BIT_OFFSET (24) | ||
39 | |||
40 | /* Possible values of SPINLOCK_LOCK_REG */ | ||
41 | #define SPINLOCK_NOTTAKEN (0) /* free */ | ||
42 | #define SPINLOCK_TAKEN (1) /* locked */ | ||
43 | |||
44 | #define to_omap_hwspinlock(lock) \ | ||
45 | container_of(lock, struct omap_hwspinlock, lock) | ||
46 | |||
47 | struct omap_hwspinlock { | ||
48 | struct hwspinlock lock; | ||
49 | void __iomem *addr; | ||
50 | }; | ||
51 | |||
52 | struct omap_hwspinlock_state { | ||
53 | int num_locks; /* Total number of locks in system */ | ||
54 | void __iomem *io_base; /* Mapped base address */ | ||
55 | }; | ||
56 | |||
57 | static int omap_hwspinlock_trylock(struct hwspinlock *lock) | ||
58 | { | ||
59 | struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock); | ||
60 | |||
61 | /* attempt to acquire the lock by reading its value */ | ||
62 | return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr)); | ||
63 | } | ||
64 | |||
65 | static void omap_hwspinlock_unlock(struct hwspinlock *lock) | ||
66 | { | ||
67 | struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock); | ||
68 | |||
69 | /* release the lock by writing 0 to it */ | ||
70 | writel(SPINLOCK_NOTTAKEN, omap_lock->addr); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * relax the OMAP interconnect while spinning on it. | ||
75 | * | ||
76 | * The specs recommended that the retry delay time will be | ||
77 | * just over half of the time that a requester would be | ||
78 | * expected to hold the lock. | ||
79 | * | ||
80 | * The number below is taken from an hardware specs example, | ||
81 | * obviously it is somewhat arbitrary. | ||
82 | */ | ||
83 | static void omap_hwspinlock_relax(struct hwspinlock *lock) | ||
84 | { | ||
85 | ndelay(50); | ||
86 | } | ||
87 | |||
88 | static const struct hwspinlock_ops omap_hwspinlock_ops = { | ||
89 | .trylock = omap_hwspinlock_trylock, | ||
90 | .unlock = omap_hwspinlock_unlock, | ||
91 | .relax = omap_hwspinlock_relax, | ||
92 | }; | ||
93 | |||
94 | static int __devinit omap_hwspinlock_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | struct omap_hwspinlock *omap_lock; | ||
97 | struct omap_hwspinlock_state *state; | ||
98 | struct hwspinlock *lock; | ||
99 | struct resource *res; | ||
100 | void __iomem *io_base; | ||
101 | int i, ret; | ||
102 | |||
103 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
104 | if (!res) | ||
105 | return -ENODEV; | ||
106 | |||
107 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
108 | if (!state) | ||
109 | return -ENOMEM; | ||
110 | |||
111 | io_base = ioremap(res->start, resource_size(res)); | ||
112 | if (!io_base) { | ||
113 | ret = -ENOMEM; | ||
114 | goto free_state; | ||
115 | } | ||
116 | |||
117 | /* Determine number of locks */ | ||
118 | i = readl(io_base + SYSSTATUS_OFFSET); | ||
119 | i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET; | ||
120 | |||
121 | /* one of the four lsb's must be set, and nothing else */ | ||
122 | if (hweight_long(i & 0xf) != 1 || i > 8) { | ||
123 | ret = -EINVAL; | ||
124 | goto iounmap_base; | ||
125 | } | ||
126 | |||
127 | state->num_locks = i * 32; | ||
128 | state->io_base = io_base; | ||
129 | |||
130 | platform_set_drvdata(pdev, state); | ||
131 | |||
132 | /* | ||
133 | * runtime PM will make sure the clock of this module is | ||
134 | * enabled iff at least one lock is requested | ||
135 | */ | ||
136 | pm_runtime_enable(&pdev->dev); | ||
137 | |||
138 | for (i = 0; i < state->num_locks; i++) { | ||
139 | omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL); | ||
140 | if (!omap_lock) { | ||
141 | ret = -ENOMEM; | ||
142 | goto free_locks; | ||
143 | } | ||
144 | |||
145 | omap_lock->lock.dev = &pdev->dev; | ||
146 | omap_lock->lock.owner = THIS_MODULE; | ||
147 | omap_lock->lock.id = i; | ||
148 | omap_lock->lock.ops = &omap_hwspinlock_ops; | ||
149 | omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i; | ||
150 | |||
151 | ret = hwspin_lock_register(&omap_lock->lock); | ||
152 | if (ret) { | ||
153 | kfree(omap_lock); | ||
154 | goto free_locks; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | return 0; | ||
159 | |||
160 | free_locks: | ||
161 | while (--i >= 0) { | ||
162 | lock = hwspin_lock_unregister(i); | ||
163 | /* this should't happen, but let's give our best effort */ | ||
164 | if (!lock) { | ||
165 | dev_err(&pdev->dev, "%s: cleanups failed\n", __func__); | ||
166 | continue; | ||
167 | } | ||
168 | omap_lock = to_omap_hwspinlock(lock); | ||
169 | kfree(omap_lock); | ||
170 | } | ||
171 | pm_runtime_disable(&pdev->dev); | ||
172 | iounmap_base: | ||
173 | iounmap(io_base); | ||
174 | free_state: | ||
175 | kfree(state); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | static int omap_hwspinlock_remove(struct platform_device *pdev) | ||
180 | { | ||
181 | struct omap_hwspinlock_state *state = platform_get_drvdata(pdev); | ||
182 | struct hwspinlock *lock; | ||
183 | struct omap_hwspinlock *omap_lock; | ||
184 | int i; | ||
185 | |||
186 | for (i = 0; i < state->num_locks; i++) { | ||
187 | lock = hwspin_lock_unregister(i); | ||
188 | /* this shouldn't happen at this point. if it does, at least | ||
189 | * don't continue with the remove */ | ||
190 | if (!lock) { | ||
191 | dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i); | ||
192 | return -EBUSY; | ||
193 | } | ||
194 | |||
195 | omap_lock = to_omap_hwspinlock(lock); | ||
196 | kfree(omap_lock); | ||
197 | } | ||
198 | |||
199 | pm_runtime_disable(&pdev->dev); | ||
200 | iounmap(state->io_base); | ||
201 | kfree(state); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static struct platform_driver omap_hwspinlock_driver = { | ||
207 | .probe = omap_hwspinlock_probe, | ||
208 | .remove = omap_hwspinlock_remove, | ||
209 | .driver = { | ||
210 | .name = "omap_hwspinlock", | ||
211 | }, | ||
212 | }; | ||
213 | |||
214 | static int __init omap_hwspinlock_init(void) | ||
215 | { | ||
216 | return platform_driver_register(&omap_hwspinlock_driver); | ||
217 | } | ||
218 | /* board init code might need to reserve hwspinlocks for predefined purposes */ | ||
219 | postcore_initcall(omap_hwspinlock_init); | ||
220 | |||
221 | static void __exit omap_hwspinlock_exit(void) | ||
222 | { | ||
223 | platform_driver_unregister(&omap_hwspinlock_driver); | ||
224 | } | ||
225 | module_exit(omap_hwspinlock_exit); | ||
226 | |||
227 | MODULE_LICENSE("GPL v2"); | ||
228 | MODULE_DESCRIPTION("Hardware spinlock driver for OMAP"); | ||
229 | MODULE_AUTHOR("Simon Que <sque@ti.com>"); | ||
230 | MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>"); | ||
231 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index afe8c6fa166a..54f91321749a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -225,7 +225,7 @@ config MMC_OMAP | |||
225 | 225 | ||
226 | config MMC_OMAP_HS | 226 | config MMC_OMAP_HS |
227 | tristate "TI OMAP High Speed Multimedia Card Interface support" | 227 | tristate "TI OMAP High Speed Multimedia Card Interface support" |
228 | depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 | 228 | depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 |
229 | help | 229 | help |
230 | This selects the TI OMAP High Speed Multimedia card Interface. | 230 | This selects the TI OMAP High Speed Multimedia card Interface. |
231 | If you have an OMAP2430 or OMAP3 board or OMAP4 board with a | 231 | If you have an OMAP2430 or OMAP3 board or OMAP4 board with a |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 078fdf11af03..158c0ee53b2c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -118,7 +118,7 @@ | |||
118 | 118 | ||
119 | #define MMC_TIMEOUT_MS 20 | 119 | #define MMC_TIMEOUT_MS 20 |
120 | #define OMAP_MMC_MASTER_CLOCK 96000000 | 120 | #define OMAP_MMC_MASTER_CLOCK 96000000 |
121 | #define DRIVER_NAME "mmci-omap-hs" | 121 | #define DRIVER_NAME "omap_hsmmc" |
122 | 122 | ||
123 | /* Timeouts for entering power saving states on inactivity, msec */ | 123 | /* Timeouts for entering power saving states on inactivity, msec */ |
124 | #define OMAP_MMC_DISABLED_TIMEOUT 100 | 124 | #define OMAP_MMC_DISABLED_TIMEOUT 100 |
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, | |||
260 | return ret; | 260 | return ret; |
261 | } | 261 | } |
262 | 262 | ||
263 | static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, | 263 | static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, |
264 | int vdd) | 264 | int vdd) |
265 | { | 265 | { |
266 | struct omap_hsmmc_host *host = | 266 | struct omap_hsmmc_host *host = |
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, | |||
316 | return ret; | 316 | return ret; |
317 | } | 317 | } |
318 | 318 | ||
319 | static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, | ||
320 | int vdd) | ||
321 | { | ||
322 | return 0; | ||
323 | } | ||
324 | |||
319 | static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, | 325 | static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, |
320 | int vdd, int cardsleep) | 326 | int vdd, int cardsleep) |
321 | { | 327 | { |
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, | |||
326 | return regulator_set_mode(host->vcc, mode); | 332 | return regulator_set_mode(host->vcc, mode); |
327 | } | 333 | } |
328 | 334 | ||
329 | static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, | 335 | static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, |
330 | int vdd, int cardsleep) | 336 | int vdd, int cardsleep) |
331 | { | 337 | { |
332 | struct omap_hsmmc_host *host = | 338 | struct omap_hsmmc_host *host = |
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, | |||
365 | return regulator_enable(host->vcc_aux); | 371 | return regulator_enable(host->vcc_aux); |
366 | } | 372 | } |
367 | 373 | ||
374 | static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, | ||
375 | int vdd, int cardsleep) | ||
376 | { | ||
377 | return 0; | ||
378 | } | ||
379 | |||
368 | static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | 380 | static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) |
369 | { | 381 | { |
370 | struct regulator *reg; | 382 | struct regulator *reg; |
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
379 | break; | 391 | break; |
380 | case OMAP_MMC2_DEVID: | 392 | case OMAP_MMC2_DEVID: |
381 | case OMAP_MMC3_DEVID: | 393 | case OMAP_MMC3_DEVID: |
394 | case OMAP_MMC5_DEVID: | ||
382 | /* Off-chip level shifting, or none */ | 395 | /* Off-chip level shifting, or none */ |
383 | mmc_slot(host).set_power = omap_hsmmc_23_set_power; | 396 | mmc_slot(host).set_power = omap_hsmmc_235_set_power; |
384 | mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; | 397 | mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; |
385 | break; | 398 | break; |
399 | case OMAP_MMC4_DEVID: | ||
400 | mmc_slot(host).set_power = omap_hsmmc_4_set_power; | ||
401 | mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; | ||
386 | default: | 402 | default: |
387 | pr_err("MMC%d configuration not supported!\n", host->id); | 403 | pr_err("MMC%d configuration not supported!\n", host->id); |
388 | return -EINVAL; | 404 | return -EINVAL; |
@@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1555 | break; | 1571 | break; |
1556 | } | 1572 | } |
1557 | 1573 | ||
1558 | if (host->id == OMAP_MMC1_DEVID) { | 1574 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { |
1559 | /* Only MMC1 can interface at 3V without some flavor | 1575 | /* Only MMC1 can interface at 3V without some flavor |
1560 | * of external transceiver; but they all handle 1.8V. | 1576 | * of external transceiver; but they all handle 1.8V. |
1561 | */ | 1577 | */ |
@@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) | |||
1647 | u32 hctl, capa, value; | 1663 | u32 hctl, capa, value; |
1648 | 1664 | ||
1649 | /* Only MMC1 supports 3.0V */ | 1665 | /* Only MMC1 supports 3.0V */ |
1650 | if (host->id == OMAP_MMC1_DEVID) { | 1666 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { |
1651 | hctl = SDVS30; | 1667 | hctl = SDVS30; |
1652 | capa = VS30 | VS18; | 1668 | capa = VS30 | VS18; |
1653 | } else { | 1669 | } else { |
@@ -2101,14 +2117,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) | |||
2101 | /* we start off in DISABLED state */ | 2117 | /* we start off in DISABLED state */ |
2102 | host->dpm_state = DISABLED; | 2118 | host->dpm_state = DISABLED; |
2103 | 2119 | ||
2104 | if (mmc_host_enable(host->mmc) != 0) { | 2120 | if (clk_enable(host->iclk) != 0) { |
2105 | clk_put(host->iclk); | 2121 | clk_put(host->iclk); |
2106 | clk_put(host->fclk); | 2122 | clk_put(host->fclk); |
2107 | goto err1; | 2123 | goto err1; |
2108 | } | 2124 | } |
2109 | 2125 | ||
2110 | if (clk_enable(host->iclk) != 0) { | 2126 | if (mmc_host_enable(host->mmc) != 0) { |
2111 | mmc_host_disable(host->mmc); | 2127 | clk_disable(host->iclk); |
2112 | clk_put(host->iclk); | 2128 | clk_put(host->iclk); |
2113 | clk_put(host->fclk); | 2129 | clk_put(host->fclk); |
2114 | goto err1; | 2130 | goto err1; |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 450afc5df0bd..4f6c06f16328 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2 | |||
106 | help | 106 | help |
107 | Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. | 107 | Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. |
108 | 108 | ||
109 | config MTD_NAND_OMAP_PREFETCH | ||
110 | bool "GPMC prefetch support for NAND Flash device" | ||
111 | depends on MTD_NAND_OMAP2 | ||
112 | default y | ||
113 | help | ||
114 | The NAND device can be accessed for Read/Write using GPMC PREFETCH engine | ||
115 | to improve the performance. | ||
116 | |||
117 | config MTD_NAND_OMAP_PREFETCH_DMA | ||
118 | depends on MTD_NAND_OMAP_PREFETCH | ||
119 | bool "DMA mode" | ||
120 | default n | ||
121 | help | ||
122 | The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode | ||
123 | or in DMA interrupt mode. | ||
124 | Say y for DMA mode or MPU mode will be used | ||
125 | |||
126 | config MTD_NAND_IDS | 109 | config MTD_NAND_IDS |
127 | tristate | 110 | tristate |
128 | 111 | ||
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 28af71c61834..7b8f1fffc528 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/interrupt.h> | ||
14 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
@@ -24,6 +25,7 @@ | |||
24 | #include <plat/nand.h> | 25 | #include <plat/nand.h> |
25 | 26 | ||
26 | #define DRIVER_NAME "omap2-nand" | 27 | #define DRIVER_NAME "omap2-nand" |
28 | #define OMAP_NAND_TIMEOUT_MS 5000 | ||
27 | 29 | ||
28 | #define NAND_Ecc_P1e (1 << 0) | 30 | #define NAND_Ecc_P1e (1 << 0) |
29 | #define NAND_Ecc_P2e (1 << 1) | 31 | #define NAND_Ecc_P2e (1 << 1) |
@@ -96,26 +98,19 @@ | |||
96 | static const char *part_probes[] = { "cmdlinepart", NULL }; | 98 | static const char *part_probes[] = { "cmdlinepart", NULL }; |
97 | #endif | 99 | #endif |
98 | 100 | ||
99 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH | 101 | /* oob info generated runtime depending on ecc algorithm and layout selected */ |
100 | static int use_prefetch = 1; | 102 | static struct nand_ecclayout omap_oobinfo; |
101 | 103 | /* Define some generic bad / good block scan pattern which are used | |
102 | /* "modprobe ... use_prefetch=0" etc */ | 104 | * while scanning a device for factory marked good / bad blocks |
103 | module_param(use_prefetch, bool, 0); | 105 | */ |
104 | MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); | 106 | static uint8_t scan_ff_pattern[] = { 0xff }; |
105 | 107 | static struct nand_bbt_descr bb_descrip_flashbased = { | |
106 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | 108 | .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, |
107 | static int use_dma = 1; | 109 | .offs = 0, |
110 | .len = 1, | ||
111 | .pattern = scan_ff_pattern, | ||
112 | }; | ||
108 | 113 | ||
109 | /* "modprobe ... use_dma=0" etc */ | ||
110 | module_param(use_dma, bool, 0); | ||
111 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | ||
112 | #else | ||
113 | static const int use_dma; | ||
114 | #endif | ||
115 | #else | ||
116 | const int use_prefetch; | ||
117 | static const int use_dma; | ||
118 | #endif | ||
119 | 114 | ||
120 | struct omap_nand_info { | 115 | struct omap_nand_info { |
121 | struct nand_hw_control controller; | 116 | struct nand_hw_control controller; |
@@ -129,6 +124,13 @@ struct omap_nand_info { | |||
129 | unsigned long phys_base; | 124 | unsigned long phys_base; |
130 | struct completion comp; | 125 | struct completion comp; |
131 | int dma_ch; | 126 | int dma_ch; |
127 | int gpmc_irq; | ||
128 | enum { | ||
129 | OMAP_NAND_IO_READ = 0, /* read */ | ||
130 | OMAP_NAND_IO_WRITE, /* write */ | ||
131 | } iomode; | ||
132 | u_char *buf; | ||
133 | int buf_len; | ||
132 | }; | 134 | }; |
133 | 135 | ||
134 | /** | 136 | /** |
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
256 | } | 258 | } |
257 | 259 | ||
258 | /* configure and start prefetch transfer */ | 260 | /* configure and start prefetch transfer */ |
259 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); | 261 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
262 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); | ||
260 | if (ret) { | 263 | if (ret) { |
261 | /* PFPW engine is busy, use cpu copy method */ | 264 | /* PFPW engine is busy, use cpu copy method */ |
262 | if (info->nand.options & NAND_BUSWIDTH_16) | 265 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
288 | { | 291 | { |
289 | struct omap_nand_info *info = container_of(mtd, | 292 | struct omap_nand_info *info = container_of(mtd, |
290 | struct omap_nand_info, mtd); | 293 | struct omap_nand_info, mtd); |
291 | uint32_t pref_count = 0, w_count = 0; | 294 | uint32_t w_count = 0; |
292 | int i = 0, ret = 0; | 295 | int i = 0, ret = 0; |
293 | u16 *p; | 296 | u16 *p; |
297 | unsigned long tim, limit; | ||
294 | 298 | ||
295 | /* take care of subpage writes */ | 299 | /* take care of subpage writes */ |
296 | if (len % 2 != 0) { | 300 | if (len % 2 != 0) { |
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
300 | } | 304 | } |
301 | 305 | ||
302 | /* configure and start prefetch transfer */ | 306 | /* configure and start prefetch transfer */ |
303 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); | 307 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
308 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); | ||
304 | if (ret) { | 309 | if (ret) { |
305 | /* PFPW engine is busy, use cpu copy method */ | 310 | /* PFPW engine is busy, use cpu copy method */ |
306 | if (info->nand.options & NAND_BUSWIDTH_16) | 311 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
316 | iowrite16(*p++, info->nand.IO_ADDR_W); | 321 | iowrite16(*p++, info->nand.IO_ADDR_W); |
317 | } | 322 | } |
318 | /* wait for data to flushed-out before reset the prefetch */ | 323 | /* wait for data to flushed-out before reset the prefetch */ |
319 | do { | 324 | tim = 0; |
320 | pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); | 325 | limit = (loops_per_jiffy * |
321 | } while (pref_count); | 326 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
327 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
328 | cpu_relax(); | ||
329 | |||
322 | /* disable and stop the PFPW engine */ | 330 | /* disable and stop the PFPW engine */ |
323 | gpmc_prefetch_reset(info->gpmc_cs); | 331 | gpmc_prefetch_reset(info->gpmc_cs); |
324 | } | 332 | } |
325 | } | 333 | } |
326 | 334 | ||
327 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | ||
328 | /* | 335 | /* |
329 | * omap_nand_dma_cb: callback on the completion of dma transfer | 336 | * omap_nand_dma_cb: callback on the completion of dma transfer |
330 | * @lch: logical channel | 337 | * @lch: logical channel |
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
348 | { | 355 | { |
349 | struct omap_nand_info *info = container_of(mtd, | 356 | struct omap_nand_info *info = container_of(mtd, |
350 | struct omap_nand_info, mtd); | 357 | struct omap_nand_info, mtd); |
351 | uint32_t prefetch_status = 0; | ||
352 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : | 358 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : |
353 | DMA_FROM_DEVICE; | 359 | DMA_FROM_DEVICE; |
354 | dma_addr_t dma_addr; | 360 | dma_addr_t dma_addr; |
355 | int ret; | 361 | int ret; |
362 | unsigned long tim, limit; | ||
356 | 363 | ||
357 | /* The fifo depth is 64 bytes. We have a sync at each frame and frame | 364 | /* The fifo depth is 64 bytes max. |
358 | * length is 64 bytes. | 365 | * But configure the FIFO-threahold to 32 to get a sync at each frame |
366 | * and frame length is 32 bytes. | ||
359 | */ | 367 | */ |
360 | int buf_len = len >> 6; | 368 | int buf_len = len >> 6; |
361 | 369 | ||
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
396 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); | 404 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); |
397 | } | 405 | } |
398 | /* configure and start prefetch transfer */ | 406 | /* configure and start prefetch transfer */ |
399 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); | 407 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
408 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | ||
400 | if (ret) | 409 | if (ret) |
401 | /* PFPW engine is busy, use cpu copy methode */ | 410 | /* PFPW engine is busy, use cpu copy method */ |
402 | goto out_copy; | 411 | goto out_copy; |
403 | 412 | ||
404 | init_completion(&info->comp); | 413 | init_completion(&info->comp); |
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
407 | 416 | ||
408 | /* setup and start DMA using dma_addr */ | 417 | /* setup and start DMA using dma_addr */ |
409 | wait_for_completion(&info->comp); | 418 | wait_for_completion(&info->comp); |
419 | tim = 0; | ||
420 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
421 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
422 | cpu_relax(); | ||
410 | 423 | ||
411 | do { | ||
412 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | ||
413 | } while (prefetch_status); | ||
414 | /* disable and stop the PFPW engine */ | 424 | /* disable and stop the PFPW engine */ |
415 | gpmc_prefetch_reset(info->gpmc_cs); | 425 | gpmc_prefetch_reset(info->gpmc_cs); |
416 | 426 | ||
@@ -426,14 +436,6 @@ out_copy: | |||
426 | : omap_write_buf8(mtd, (u_char *) addr, len); | 436 | : omap_write_buf8(mtd, (u_char *) addr, len); |
427 | return 0; | 437 | return 0; |
428 | } | 438 | } |
429 | #else | ||
430 | static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} | ||
431 | static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | ||
432 | unsigned int len, int is_write) | ||
433 | { | ||
434 | return 0; | ||
435 | } | ||
436 | #endif | ||
437 | 439 | ||
438 | /** | 440 | /** |
439 | * omap_read_buf_dma_pref - read data from NAND controller into buffer | 441 | * omap_read_buf_dma_pref - read data from NAND controller into buffer |
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, | |||
466 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); | 468 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); |
467 | } | 469 | } |
468 | 470 | ||
471 | /* | ||
472 | * omap_nand_irq - GMPC irq handler | ||
473 | * @this_irq: gpmc irq number | ||
474 | * @dev: omap_nand_info structure pointer is passed here | ||
475 | */ | ||
476 | static irqreturn_t omap_nand_irq(int this_irq, void *dev) | ||
477 | { | ||
478 | struct omap_nand_info *info = (struct omap_nand_info *) dev; | ||
479 | u32 bytes; | ||
480 | u32 irq_stat; | ||
481 | |||
482 | irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); | ||
483 | bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | ||
484 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ | ||
485 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ | ||
486 | if (irq_stat & 0x2) | ||
487 | goto done; | ||
488 | |||
489 | if (info->buf_len && (info->buf_len < bytes)) | ||
490 | bytes = info->buf_len; | ||
491 | else if (!info->buf_len) | ||
492 | bytes = 0; | ||
493 | iowrite32_rep(info->nand.IO_ADDR_W, | ||
494 | (u32 *)info->buf, bytes >> 2); | ||
495 | info->buf = info->buf + bytes; | ||
496 | info->buf_len -= bytes; | ||
497 | |||
498 | } else { | ||
499 | ioread32_rep(info->nand.IO_ADDR_R, | ||
500 | (u32 *)info->buf, bytes >> 2); | ||
501 | info->buf = info->buf + bytes; | ||
502 | |||
503 | if (irq_stat & 0x2) | ||
504 | goto done; | ||
505 | } | ||
506 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
507 | |||
508 | return IRQ_HANDLED; | ||
509 | |||
510 | done: | ||
511 | complete(&info->comp); | ||
512 | /* disable irq */ | ||
513 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); | ||
514 | |||
515 | /* clear status */ | ||
516 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
517 | |||
518 | return IRQ_HANDLED; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * omap_read_buf_irq_pref - read data from NAND controller into buffer | ||
523 | * @mtd: MTD device structure | ||
524 | * @buf: buffer to store date | ||
525 | * @len: number of bytes to read | ||
526 | */ | ||
527 | static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) | ||
528 | { | ||
529 | struct omap_nand_info *info = container_of(mtd, | ||
530 | struct omap_nand_info, mtd); | ||
531 | int ret = 0; | ||
532 | |||
533 | if (len <= mtd->oobsize) { | ||
534 | omap_read_buf_pref(mtd, buf, len); | ||
535 | return; | ||
536 | } | ||
537 | |||
538 | info->iomode = OMAP_NAND_IO_READ; | ||
539 | info->buf = buf; | ||
540 | init_completion(&info->comp); | ||
541 | |||
542 | /* configure and start prefetch transfer */ | ||
543 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
544 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); | ||
545 | if (ret) | ||
546 | /* PFPW engine is busy, use cpu copy method */ | ||
547 | goto out_copy; | ||
548 | |||
549 | info->buf_len = len; | ||
550 | /* enable irq */ | ||
551 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
552 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
553 | |||
554 | /* waiting for read to complete */ | ||
555 | wait_for_completion(&info->comp); | ||
556 | |||
557 | /* disable and stop the PFPW engine */ | ||
558 | gpmc_prefetch_reset(info->gpmc_cs); | ||
559 | return; | ||
560 | |||
561 | out_copy: | ||
562 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
563 | omap_read_buf16(mtd, buf, len); | ||
564 | else | ||
565 | omap_read_buf8(mtd, buf, len); | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * omap_write_buf_irq_pref - write buffer to NAND controller | ||
570 | * @mtd: MTD device structure | ||
571 | * @buf: data buffer | ||
572 | * @len: number of bytes to write | ||
573 | */ | ||
574 | static void omap_write_buf_irq_pref(struct mtd_info *mtd, | ||
575 | const u_char *buf, int len) | ||
576 | { | ||
577 | struct omap_nand_info *info = container_of(mtd, | ||
578 | struct omap_nand_info, mtd); | ||
579 | int ret = 0; | ||
580 | unsigned long tim, limit; | ||
581 | |||
582 | if (len <= mtd->oobsize) { | ||
583 | omap_write_buf_pref(mtd, buf, len); | ||
584 | return; | ||
585 | } | ||
586 | |||
587 | info->iomode = OMAP_NAND_IO_WRITE; | ||
588 | info->buf = (u_char *) buf; | ||
589 | init_completion(&info->comp); | ||
590 | |||
591 | /* configure and start prefetch transfer : size=24 */ | ||
592 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
593 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); | ||
594 | if (ret) | ||
595 | /* PFPW engine is busy, use cpu copy method */ | ||
596 | goto out_copy; | ||
597 | |||
598 | info->buf_len = len; | ||
599 | /* enable irq */ | ||
600 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
601 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
602 | |||
603 | /* waiting for write to complete */ | ||
604 | wait_for_completion(&info->comp); | ||
605 | /* wait for data to flushed-out before reset the prefetch */ | ||
606 | tim = 0; | ||
607 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
608 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
609 | cpu_relax(); | ||
610 | |||
611 | /* disable and stop the PFPW engine */ | ||
612 | gpmc_prefetch_reset(info->gpmc_cs); | ||
613 | return; | ||
614 | |||
615 | out_copy: | ||
616 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
617 | omap_write_buf16(mtd, buf, len); | ||
618 | else | ||
619 | omap_write_buf8(mtd, buf, len); | ||
620 | } | ||
621 | |||
469 | /** | 622 | /** |
470 | * omap_verify_buf - Verify chip data against buffer | 623 | * omap_verify_buf - Verify chip data against buffer |
471 | * @mtd: MTD device structure | 624 | * @mtd: MTD device structure |
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) | |||
487 | return 0; | 640 | return 0; |
488 | } | 641 | } |
489 | 642 | ||
490 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | ||
491 | |||
492 | /** | 643 | /** |
493 | * gen_true_ecc - This function will generate true ECC value | 644 | * gen_true_ecc - This function will generate true ECC value |
494 | * @ecc_buf: buffer to store ecc code | 645 | * @ecc_buf: buffer to store ecc code |
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) | |||
708 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); | 859 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); |
709 | } | 860 | } |
710 | 861 | ||
711 | #endif | ||
712 | |||
713 | /** | 862 | /** |
714 | * omap_wait - wait until the command is done | 863 | * omap_wait - wait until the command is done |
715 | * @mtd: MTD device structure | 864 | * @mtd: MTD device structure |
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
779 | struct omap_nand_info *info; | 928 | struct omap_nand_info *info; |
780 | struct omap_nand_platform_data *pdata; | 929 | struct omap_nand_platform_data *pdata; |
781 | int err; | 930 | int err; |
931 | int i, offset; | ||
782 | 932 | ||
783 | pdata = pdev->dev.platform_data; | 933 | pdata = pdev->dev.platform_data; |
784 | if (pdata == NULL) { | 934 | if (pdata == NULL) { |
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
804 | info->mtd.name = dev_name(&pdev->dev); | 954 | info->mtd.name = dev_name(&pdev->dev); |
805 | info->mtd.owner = THIS_MODULE; | 955 | info->mtd.owner = THIS_MODULE; |
806 | 956 | ||
807 | info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; | 957 | info->nand.options = pdata->devsize; |
808 | info->nand.options |= NAND_SKIP_BBTSCAN; | 958 | info->nand.options |= NAND_SKIP_BBTSCAN; |
809 | 959 | ||
810 | /* NAND write protect off */ | 960 | /* NAND write protect off */ |
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
842 | info->nand.chip_delay = 50; | 992 | info->nand.chip_delay = 50; |
843 | } | 993 | } |
844 | 994 | ||
845 | if (use_prefetch) { | 995 | switch (pdata->xfer_type) { |
846 | 996 | case NAND_OMAP_PREFETCH_POLLED: | |
847 | info->nand.read_buf = omap_read_buf_pref; | 997 | info->nand.read_buf = omap_read_buf_pref; |
848 | info->nand.write_buf = omap_write_buf_pref; | 998 | info->nand.write_buf = omap_write_buf_pref; |
849 | if (use_dma) { | 999 | break; |
850 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | 1000 | |
851 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | 1001 | case NAND_OMAP_POLLED: |
852 | if (err < 0) { | ||
853 | info->dma_ch = -1; | ||
854 | printk(KERN_WARNING "DMA request failed." | ||
855 | " Non-dma data transfer mode\n"); | ||
856 | } else { | ||
857 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
858 | OMAP_DMA_DATA_BURST_16); | ||
859 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
860 | OMAP_DMA_DATA_BURST_16); | ||
861 | |||
862 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
863 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
864 | } | ||
865 | } | ||
866 | } else { | ||
867 | if (info->nand.options & NAND_BUSWIDTH_16) { | 1002 | if (info->nand.options & NAND_BUSWIDTH_16) { |
868 | info->nand.read_buf = omap_read_buf16; | 1003 | info->nand.read_buf = omap_read_buf16; |
869 | info->nand.write_buf = omap_write_buf16; | 1004 | info->nand.write_buf = omap_write_buf16; |
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
871 | info->nand.read_buf = omap_read_buf8; | 1006 | info->nand.read_buf = omap_read_buf8; |
872 | info->nand.write_buf = omap_write_buf8; | 1007 | info->nand.write_buf = omap_write_buf8; |
873 | } | 1008 | } |
1009 | break; | ||
1010 | |||
1011 | case NAND_OMAP_PREFETCH_DMA: | ||
1012 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | ||
1013 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | ||
1014 | if (err < 0) { | ||
1015 | info->dma_ch = -1; | ||
1016 | dev_err(&pdev->dev, "DMA request failed!\n"); | ||
1017 | goto out_release_mem_region; | ||
1018 | } else { | ||
1019 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
1020 | OMAP_DMA_DATA_BURST_16); | ||
1021 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
1022 | OMAP_DMA_DATA_BURST_16); | ||
1023 | |||
1024 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
1025 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
1026 | } | ||
1027 | break; | ||
1028 | |||
1029 | case NAND_OMAP_PREFETCH_IRQ: | ||
1030 | err = request_irq(pdata->gpmc_irq, | ||
1031 | omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); | ||
1032 | if (err) { | ||
1033 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | ||
1034 | pdata->gpmc_irq, err); | ||
1035 | goto out_release_mem_region; | ||
1036 | } else { | ||
1037 | info->gpmc_irq = pdata->gpmc_irq; | ||
1038 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1039 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1040 | } | ||
1041 | break; | ||
1042 | |||
1043 | default: | ||
1044 | dev_err(&pdev->dev, | ||
1045 | "xfer_type(%d) not supported!\n", pdata->xfer_type); | ||
1046 | err = -EINVAL; | ||
1047 | goto out_release_mem_region; | ||
874 | } | 1048 | } |
875 | info->nand.verify_buf = omap_verify_buf; | ||
876 | 1049 | ||
877 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | 1050 | info->nand.verify_buf = omap_verify_buf; |
878 | info->nand.ecc.bytes = 3; | ||
879 | info->nand.ecc.size = 512; | ||
880 | info->nand.ecc.calculate = omap_calculate_ecc; | ||
881 | info->nand.ecc.hwctl = omap_enable_hwecc; | ||
882 | info->nand.ecc.correct = omap_correct_data; | ||
883 | info->nand.ecc.mode = NAND_ECC_HW; | ||
884 | 1051 | ||
885 | #else | 1052 | /* selsect the ecc type */ |
886 | info->nand.ecc.mode = NAND_ECC_SOFT; | 1053 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) |
887 | #endif | 1054 | info->nand.ecc.mode = NAND_ECC_SOFT; |
1055 | else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || | ||
1056 | (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { | ||
1057 | info->nand.ecc.bytes = 3; | ||
1058 | info->nand.ecc.size = 512; | ||
1059 | info->nand.ecc.calculate = omap_calculate_ecc; | ||
1060 | info->nand.ecc.hwctl = omap_enable_hwecc; | ||
1061 | info->nand.ecc.correct = omap_correct_data; | ||
1062 | info->nand.ecc.mode = NAND_ECC_HW; | ||
1063 | } | ||
888 | 1064 | ||
889 | /* DIP switches on some boards change between 8 and 16 bit | 1065 | /* DIP switches on some boards change between 8 and 16 bit |
890 | * bus widths for flash. Try the other width if the first try fails. | 1066 | * bus widths for flash. Try the other width if the first try fails. |
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
897 | } | 1073 | } |
898 | } | 1074 | } |
899 | 1075 | ||
1076 | /* rom code layout */ | ||
1077 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { | ||
1078 | |||
1079 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
1080 | offset = 2; | ||
1081 | else { | ||
1082 | offset = 1; | ||
1083 | info->nand.badblock_pattern = &bb_descrip_flashbased; | ||
1084 | } | ||
1085 | omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16); | ||
1086 | for (i = 0; i < omap_oobinfo.eccbytes; i++) | ||
1087 | omap_oobinfo.eccpos[i] = i+offset; | ||
1088 | |||
1089 | omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes; | ||
1090 | omap_oobinfo.oobfree->length = info->mtd.oobsize - | ||
1091 | (offset + omap_oobinfo.eccbytes); | ||
1092 | |||
1093 | info->nand.ecc.layout = &omap_oobinfo; | ||
1094 | } | ||
1095 | |||
900 | #ifdef CONFIG_MTD_PARTITIONS | 1096 | #ifdef CONFIG_MTD_PARTITIONS |
901 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); | 1097 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); |
902 | if (err > 0) | 1098 | if (err > 0) |
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
926 | mtd); | 1122 | mtd); |
927 | 1123 | ||
928 | platform_set_drvdata(pdev, NULL); | 1124 | platform_set_drvdata(pdev, NULL); |
929 | if (use_dma) | 1125 | if (info->dma_ch != -1) |
930 | omap_free_dma(info->dma_ch); | 1126 | omap_free_dma(info->dma_ch); |
931 | 1127 | ||
1128 | if (info->gpmc_irq) | ||
1129 | free_irq(info->gpmc_irq, info); | ||
1130 | |||
932 | /* Release NAND device, its internal structures and partitions */ | 1131 | /* Release NAND device, its internal structures and partitions */ |
933 | nand_release(&info->mtd); | 1132 | nand_release(&info->mtd); |
934 | iounmap(info->nand.IO_ADDR_R); | 1133 | iounmap(info->nand.IO_ADDR_R); |
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = { | |||
947 | 1146 | ||
948 | static int __init omap_nand_init(void) | 1147 | static int __init omap_nand_init(void) |
949 | { | 1148 | { |
950 | printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); | 1149 | pr_info("%s driver initializing\n", DRIVER_NAME); |
951 | 1150 | ||
952 | /* This check is required if driver is being | ||
953 | * loaded run time as a module | ||
954 | */ | ||
955 | if ((1 == use_dma) && (0 == use_prefetch)) { | ||
956 | printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " | ||
957 | "without use_prefetch'. Prefetch will not be" | ||
958 | " used in either mode (mpu or dma)\n"); | ||
959 | } | ||
960 | return platform_driver_register(&omap_nand_driver); | 1151 | return platform_driver_register(&omap_nand_driver); |
961 | } | 1152 | } |
962 | 1153 | ||
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index c849cacf4b2f..14a49abe057e 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -63,7 +63,7 @@ struct omap2_onenand { | |||
63 | struct completion dma_done; | 63 | struct completion dma_done; |
64 | int dma_channel; | 64 | int dma_channel; |
65 | int freq; | 65 | int freq; |
66 | int (*setup)(void __iomem *base, int freq); | 66 | int (*setup)(void __iomem *base, int *freq_ptr); |
67 | struct regulator *regulator; | 67 | struct regulator *regulator; |
68 | }; | 68 | }; |
69 | 69 | ||
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) | |||
148 | wait_err("controller error", state, ctrl, intr); | 148 | wait_err("controller error", state, ctrl, intr); |
149 | return -EIO; | 149 | return -EIO; |
150 | } | 150 | } |
151 | if ((intr & intr_flags) != intr_flags) { | 151 | if ((intr & intr_flags) == intr_flags) |
152 | wait_err("timeout", state, ctrl, intr); | 152 | return 0; |
153 | return -EIO; | 153 | /* Continue in wait for interrupt branch */ |
154 | } | ||
155 | return 0; | ||
156 | } | 154 | } |
157 | 155 | ||
158 | if (state != FL_READING) { | 156 | if (state != FL_READING) { |
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data) | |||
581 | 579 | ||
582 | /* DMA is not in use so this is all that is needed */ | 580 | /* DMA is not in use so this is all that is needed */ |
583 | /* Revisit for OMAP3! */ | 581 | /* Revisit for OMAP3! */ |
584 | ret = c->setup(c->onenand.base, c->freq); | 582 | ret = c->setup(c->onenand.base, &c->freq); |
585 | 583 | ||
586 | return ret; | 584 | return ret; |
587 | } | 585 | } |
@@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
673 | } | 671 | } |
674 | 672 | ||
675 | if (pdata->onenand_setup != NULL) { | 673 | if (pdata->onenand_setup != NULL) { |
676 | r = pdata->onenand_setup(c->onenand.base, c->freq); | 674 | r = pdata->onenand_setup(c->onenand.base, &c->freq); |
677 | if (r < 0) { | 675 | if (r < 0) { |
678 | dev_err(&pdev->dev, "Onenand platform setup failed: " | 676 | dev_err(&pdev->dev, "Onenand platform setup failed: " |
679 | "%d\n", r); | 677 | "%d\n", r); |
@@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
718 | } | 716 | } |
719 | 717 | ||
720 | dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " | 718 | dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " |
721 | "base %p\n", c->gpmc_cs, c->phys_base, | 719 | "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base, |
722 | c->onenand.base); | 720 | c->onenand.base, c->freq); |
723 | 721 | ||
724 | c->pdev = pdev; | 722 | c->pdev = pdev; |
725 | c->mtd.name = dev_name(&pdev->dev); | 723 | c->mtd.name = dev_name(&pdev->dev); |
@@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
754 | if ((r = onenand_scan(&c->mtd, 1)) < 0) | 752 | if ((r = onenand_scan(&c->mtd, 1)) < 0) |
755 | goto err_release_regulator; | 753 | goto err_release_regulator; |
756 | 754 | ||
757 | switch ((c->onenand.version_id >> 4) & 0xf) { | ||
758 | case 0: | ||
759 | c->freq = 40; | ||
760 | break; | ||
761 | case 1: | ||
762 | c->freq = 54; | ||
763 | break; | ||
764 | case 2: | ||
765 | c->freq = 66; | ||
766 | break; | ||
767 | case 3: | ||
768 | c->freq = 83; | ||
769 | break; | ||
770 | case 4: | ||
771 | c->freq = 104; | ||
772 | break; | ||
773 | } | ||
774 | |||
775 | #ifdef CONFIG_MTD_PARTITIONS | 755 | #ifdef CONFIG_MTD_PARTITIONS |
776 | r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); | 756 | r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); |
777 | if (r > 0) | 757 | if (r > 0) |
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index abb1ffbf3d20..36501adc125d 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2005, 2006 Nokia Corporation | 4 | * Copyright (C) 2005, 2006 Nokia Corporation |
5 | * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and | 5 | * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and |
6 | * Juha Yrjölä <juha.yrjola@nokia.com> | 6 | * Juha Yrj�l� <juha.yrjola@nokia.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/clk.h> | 33 | #include <linux/clk.h> |
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/pm_runtime.h> | ||
36 | 37 | ||
37 | #include <linux/spi/spi.h> | 38 | #include <linux/spi/spi.h> |
38 | 39 | ||
@@ -46,7 +47,6 @@ | |||
46 | #define OMAP2_MCSPI_MAX_CTRL 4 | 47 | #define OMAP2_MCSPI_MAX_CTRL 4 |
47 | 48 | ||
48 | #define OMAP2_MCSPI_REVISION 0x00 | 49 | #define OMAP2_MCSPI_REVISION 0x00 |
49 | #define OMAP2_MCSPI_SYSCONFIG 0x10 | ||
50 | #define OMAP2_MCSPI_SYSSTATUS 0x14 | 50 | #define OMAP2_MCSPI_SYSSTATUS 0x14 |
51 | #define OMAP2_MCSPI_IRQSTATUS 0x18 | 51 | #define OMAP2_MCSPI_IRQSTATUS 0x18 |
52 | #define OMAP2_MCSPI_IRQENABLE 0x1c | 52 | #define OMAP2_MCSPI_IRQENABLE 0x1c |
@@ -63,13 +63,6 @@ | |||
63 | 63 | ||
64 | /* per-register bitmasks: */ | 64 | /* per-register bitmasks: */ |
65 | 65 | ||
66 | #define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4) | ||
67 | #define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2) | ||
68 | #define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0) | ||
69 | #define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1) | ||
70 | |||
71 | #define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0) | ||
72 | |||
73 | #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) | 66 | #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) |
74 | #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) | 67 | #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) |
75 | #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) | 68 | #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) |
@@ -122,13 +115,12 @@ struct omap2_mcspi { | |||
122 | spinlock_t lock; | 115 | spinlock_t lock; |
123 | struct list_head msg_queue; | 116 | struct list_head msg_queue; |
124 | struct spi_master *master; | 117 | struct spi_master *master; |
125 | struct clk *ick; | ||
126 | struct clk *fck; | ||
127 | /* Virtual base address of the controller */ | 118 | /* Virtual base address of the controller */ |
128 | void __iomem *base; | 119 | void __iomem *base; |
129 | unsigned long phys; | 120 | unsigned long phys; |
130 | /* SPI1 has 4 channels, while SPI2 has 2 */ | 121 | /* SPI1 has 4 channels, while SPI2 has 2 */ |
131 | struct omap2_mcspi_dma *dma_channels; | 122 | struct omap2_mcspi_dma *dma_channels; |
123 | struct device *dev; | ||
132 | }; | 124 | }; |
133 | 125 | ||
134 | struct omap2_mcspi_cs { | 126 | struct omap2_mcspi_cs { |
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs { | |||
144 | * corresponding registers are modified. | 136 | * corresponding registers are modified. |
145 | */ | 137 | */ |
146 | struct omap2_mcspi_regs { | 138 | struct omap2_mcspi_regs { |
147 | u32 sysconfig; | ||
148 | u32 modulctrl; | 139 | u32 modulctrl; |
149 | u32 wakeupenable; | 140 | u32 wakeupenable; |
150 | struct list_head cs; | 141 | struct list_head cs; |
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) | |||
268 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, | 259 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, |
269 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); | 260 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); |
270 | 261 | ||
271 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG, | ||
272 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig); | ||
273 | |||
274 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, | 262 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, |
275 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); | 263 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); |
276 | 264 | ||
@@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) | |||
280 | } | 268 | } |
281 | static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) | 269 | static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) |
282 | { | 270 | { |
283 | clk_disable(mcspi->ick); | 271 | pm_runtime_put_sync(mcspi->dev); |
284 | clk_disable(mcspi->fck); | ||
285 | } | 272 | } |
286 | 273 | ||
287 | static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) | 274 | static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) |
288 | { | 275 | { |
289 | if (clk_enable(mcspi->ick)) | 276 | return pm_runtime_get_sync(mcspi->dev); |
290 | return -ENODEV; | ||
291 | if (clk_enable(mcspi->fck)) | ||
292 | return -ENODEV; | ||
293 | |||
294 | omap2_mcspi_restore_ctx(mcspi); | ||
295 | |||
296 | return 0; | ||
297 | } | 277 | } |
298 | 278 | ||
299 | static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) | 279 | static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) |
@@ -819,8 +799,9 @@ static int omap2_mcspi_setup(struct spi_device *spi) | |||
819 | return ret; | 799 | return ret; |
820 | } | 800 | } |
821 | 801 | ||
822 | if (omap2_mcspi_enable_clocks(mcspi)) | 802 | ret = omap2_mcspi_enable_clocks(mcspi); |
823 | return -ENODEV; | 803 | if (ret < 0) |
804 | return ret; | ||
824 | 805 | ||
825 | ret = omap2_mcspi_setup_transfer(spi, NULL); | 806 | ret = omap2_mcspi_setup_transfer(spi, NULL); |
826 | omap2_mcspi_disable_clocks(mcspi); | 807 | omap2_mcspi_disable_clocks(mcspi); |
@@ -863,10 +844,11 @@ static void omap2_mcspi_work(struct work_struct *work) | |||
863 | struct omap2_mcspi *mcspi; | 844 | struct omap2_mcspi *mcspi; |
864 | 845 | ||
865 | mcspi = container_of(work, struct omap2_mcspi, work); | 846 | mcspi = container_of(work, struct omap2_mcspi, work); |
866 | spin_lock_irq(&mcspi->lock); | ||
867 | 847 | ||
868 | if (omap2_mcspi_enable_clocks(mcspi)) | 848 | if (omap2_mcspi_enable_clocks(mcspi) < 0) |
869 | goto out; | 849 | return; |
850 | |||
851 | spin_lock_irq(&mcspi->lock); | ||
870 | 852 | ||
871 | /* We only enable one channel at a time -- the one whose message is | 853 | /* We only enable one channel at a time -- the one whose message is |
872 | * at the head of the queue -- although this controller would gladly | 854 | * at the head of the queue -- although this controller would gladly |
@@ -979,10 +961,9 @@ static void omap2_mcspi_work(struct work_struct *work) | |||
979 | spin_lock_irq(&mcspi->lock); | 961 | spin_lock_irq(&mcspi->lock); |
980 | } | 962 | } |
981 | 963 | ||
982 | omap2_mcspi_disable_clocks(mcspi); | ||
983 | |||
984 | out: | ||
985 | spin_unlock_irq(&mcspi->lock); | 964 | spin_unlock_irq(&mcspi->lock); |
965 | |||
966 | omap2_mcspi_disable_clocks(mcspi); | ||
986 | } | 967 | } |
987 | 968 | ||
988 | static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | 969 | static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) |
@@ -1058,25 +1039,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
1058 | return 0; | 1039 | return 0; |
1059 | } | 1040 | } |
1060 | 1041 | ||
1061 | static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) | 1042 | static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) |
1062 | { | 1043 | { |
1063 | struct spi_master *master = mcspi->master; | 1044 | struct spi_master *master = mcspi->master; |
1064 | u32 tmp; | 1045 | u32 tmp; |
1046 | int ret = 0; | ||
1065 | 1047 | ||
1066 | if (omap2_mcspi_enable_clocks(mcspi)) | 1048 | ret = omap2_mcspi_enable_clocks(mcspi); |
1067 | return -1; | 1049 | if (ret < 0) |
1068 | 1050 | return ret; | |
1069 | mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, | ||
1070 | OMAP2_MCSPI_SYSCONFIG_SOFTRESET); | ||
1071 | do { | ||
1072 | tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS); | ||
1073 | } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); | ||
1074 | |||
1075 | tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | | ||
1076 | OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | | ||
1077 | OMAP2_MCSPI_SYSCONFIG_SMARTIDLE; | ||
1078 | mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp); | ||
1079 | omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp; | ||
1080 | 1051 | ||
1081 | tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; | 1052 | tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; |
1082 | mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); | 1053 | mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); |
@@ -1087,91 +1058,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) | |||
1087 | return 0; | 1058 | return 0; |
1088 | } | 1059 | } |
1089 | 1060 | ||
1090 | static u8 __initdata spi1_rxdma_id [] = { | 1061 | static int omap_mcspi_runtime_resume(struct device *dev) |
1091 | OMAP24XX_DMA_SPI1_RX0, | 1062 | { |
1092 | OMAP24XX_DMA_SPI1_RX1, | 1063 | struct omap2_mcspi *mcspi; |
1093 | OMAP24XX_DMA_SPI1_RX2, | 1064 | struct spi_master *master; |
1094 | OMAP24XX_DMA_SPI1_RX3, | ||
1095 | }; | ||
1096 | |||
1097 | static u8 __initdata spi1_txdma_id [] = { | ||
1098 | OMAP24XX_DMA_SPI1_TX0, | ||
1099 | OMAP24XX_DMA_SPI1_TX1, | ||
1100 | OMAP24XX_DMA_SPI1_TX2, | ||
1101 | OMAP24XX_DMA_SPI1_TX3, | ||
1102 | }; | ||
1103 | |||
1104 | static u8 __initdata spi2_rxdma_id[] = { | ||
1105 | OMAP24XX_DMA_SPI2_RX0, | ||
1106 | OMAP24XX_DMA_SPI2_RX1, | ||
1107 | }; | ||
1108 | |||
1109 | static u8 __initdata spi2_txdma_id[] = { | ||
1110 | OMAP24XX_DMA_SPI2_TX0, | ||
1111 | OMAP24XX_DMA_SPI2_TX1, | ||
1112 | }; | ||
1113 | |||
1114 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ | ||
1115 | || defined(CONFIG_ARCH_OMAP4) | ||
1116 | static u8 __initdata spi3_rxdma_id[] = { | ||
1117 | OMAP24XX_DMA_SPI3_RX0, | ||
1118 | OMAP24XX_DMA_SPI3_RX1, | ||
1119 | }; | ||
1120 | 1065 | ||
1121 | static u8 __initdata spi3_txdma_id[] = { | 1066 | master = dev_get_drvdata(dev); |
1122 | OMAP24XX_DMA_SPI3_TX0, | 1067 | mcspi = spi_master_get_devdata(master); |
1123 | OMAP24XX_DMA_SPI3_TX1, | 1068 | omap2_mcspi_restore_ctx(mcspi); |
1124 | }; | ||
1125 | #endif | ||
1126 | 1069 | ||
1127 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) | 1070 | return 0; |
1128 | static u8 __initdata spi4_rxdma_id[] = { | 1071 | } |
1129 | OMAP34XX_DMA_SPI4_RX0, | ||
1130 | }; | ||
1131 | 1072 | ||
1132 | static u8 __initdata spi4_txdma_id[] = { | ||
1133 | OMAP34XX_DMA_SPI4_TX0, | ||
1134 | }; | ||
1135 | #endif | ||
1136 | 1073 | ||
1137 | static int __init omap2_mcspi_probe(struct platform_device *pdev) | 1074 | static int __init omap2_mcspi_probe(struct platform_device *pdev) |
1138 | { | 1075 | { |
1139 | struct spi_master *master; | 1076 | struct spi_master *master; |
1077 | struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; | ||
1140 | struct omap2_mcspi *mcspi; | 1078 | struct omap2_mcspi *mcspi; |
1141 | struct resource *r; | 1079 | struct resource *r; |
1142 | int status = 0, i; | 1080 | int status = 0, i; |
1143 | const u8 *rxdma_id, *txdma_id; | ||
1144 | unsigned num_chipselect; | ||
1145 | |||
1146 | switch (pdev->id) { | ||
1147 | case 1: | ||
1148 | rxdma_id = spi1_rxdma_id; | ||
1149 | txdma_id = spi1_txdma_id; | ||
1150 | num_chipselect = 4; | ||
1151 | break; | ||
1152 | case 2: | ||
1153 | rxdma_id = spi2_rxdma_id; | ||
1154 | txdma_id = spi2_txdma_id; | ||
1155 | num_chipselect = 2; | ||
1156 | break; | ||
1157 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ | ||
1158 | || defined(CONFIG_ARCH_OMAP4) | ||
1159 | case 3: | ||
1160 | rxdma_id = spi3_rxdma_id; | ||
1161 | txdma_id = spi3_txdma_id; | ||
1162 | num_chipselect = 2; | ||
1163 | break; | ||
1164 | #endif | ||
1165 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) | ||
1166 | case 4: | ||
1167 | rxdma_id = spi4_rxdma_id; | ||
1168 | txdma_id = spi4_txdma_id; | ||
1169 | num_chipselect = 1; | ||
1170 | break; | ||
1171 | #endif | ||
1172 | default: | ||
1173 | return -EINVAL; | ||
1174 | } | ||
1175 | 1081 | ||
1176 | master = spi_alloc_master(&pdev->dev, sizeof *mcspi); | 1082 | master = spi_alloc_master(&pdev->dev, sizeof *mcspi); |
1177 | if (master == NULL) { | 1083 | if (master == NULL) { |
@@ -1188,7 +1094,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) | |||
1188 | master->setup = omap2_mcspi_setup; | 1094 | master->setup = omap2_mcspi_setup; |
1189 | master->transfer = omap2_mcspi_transfer; | 1095 | master->transfer = omap2_mcspi_transfer; |
1190 | master->cleanup = omap2_mcspi_cleanup; | 1096 | master->cleanup = omap2_mcspi_cleanup; |
1191 | master->num_chipselect = num_chipselect; | 1097 | master->num_chipselect = pdata->num_cs; |
1192 | 1098 | ||
1193 | dev_set_drvdata(&pdev->dev, master); | 1099 | dev_set_drvdata(&pdev->dev, master); |
1194 | 1100 | ||
@@ -1206,49 +1112,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) | |||
1206 | goto err1; | 1112 | goto err1; |
1207 | } | 1113 | } |
1208 | 1114 | ||
1115 | r->start += pdata->regs_offset; | ||
1116 | r->end += pdata->regs_offset; | ||
1209 | mcspi->phys = r->start; | 1117 | mcspi->phys = r->start; |
1210 | mcspi->base = ioremap(r->start, r->end - r->start + 1); | 1118 | mcspi->base = ioremap(r->start, r->end - r->start + 1); |
1211 | if (!mcspi->base) { | 1119 | if (!mcspi->base) { |
1212 | dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); | 1120 | dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); |
1213 | status = -ENOMEM; | 1121 | status = -ENOMEM; |
1214 | goto err1aa; | 1122 | goto err2; |
1215 | } | 1123 | } |
1216 | 1124 | ||
1125 | mcspi->dev = &pdev->dev; | ||
1217 | INIT_WORK(&mcspi->work, omap2_mcspi_work); | 1126 | INIT_WORK(&mcspi->work, omap2_mcspi_work); |
1218 | 1127 | ||
1219 | spin_lock_init(&mcspi->lock); | 1128 | spin_lock_init(&mcspi->lock); |
1220 | INIT_LIST_HEAD(&mcspi->msg_queue); | 1129 | INIT_LIST_HEAD(&mcspi->msg_queue); |
1221 | INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); | 1130 | INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); |
1222 | 1131 | ||
1223 | mcspi->ick = clk_get(&pdev->dev, "ick"); | ||
1224 | if (IS_ERR(mcspi->ick)) { | ||
1225 | dev_dbg(&pdev->dev, "can't get mcspi_ick\n"); | ||
1226 | status = PTR_ERR(mcspi->ick); | ||
1227 | goto err1a; | ||
1228 | } | ||
1229 | mcspi->fck = clk_get(&pdev->dev, "fck"); | ||
1230 | if (IS_ERR(mcspi->fck)) { | ||
1231 | dev_dbg(&pdev->dev, "can't get mcspi_fck\n"); | ||
1232 | status = PTR_ERR(mcspi->fck); | ||
1233 | goto err2; | ||
1234 | } | ||
1235 | |||
1236 | mcspi->dma_channels = kcalloc(master->num_chipselect, | 1132 | mcspi->dma_channels = kcalloc(master->num_chipselect, |
1237 | sizeof(struct omap2_mcspi_dma), | 1133 | sizeof(struct omap2_mcspi_dma), |
1238 | GFP_KERNEL); | 1134 | GFP_KERNEL); |
1239 | 1135 | ||
1240 | if (mcspi->dma_channels == NULL) | 1136 | if (mcspi->dma_channels == NULL) |
1241 | goto err3; | 1137 | goto err2; |
1138 | |||
1139 | for (i = 0; i < master->num_chipselect; i++) { | ||
1140 | char dma_ch_name[14]; | ||
1141 | struct resource *dma_res; | ||
1142 | |||
1143 | sprintf(dma_ch_name, "rx%d", i); | ||
1144 | dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, | ||
1145 | dma_ch_name); | ||
1146 | if (!dma_res) { | ||
1147 | dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); | ||
1148 | status = -ENODEV; | ||
1149 | break; | ||
1150 | } | ||
1242 | 1151 | ||
1243 | for (i = 0; i < num_chipselect; i++) { | ||
1244 | mcspi->dma_channels[i].dma_rx_channel = -1; | 1152 | mcspi->dma_channels[i].dma_rx_channel = -1; |
1245 | mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i]; | 1153 | mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; |
1154 | sprintf(dma_ch_name, "tx%d", i); | ||
1155 | dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, | ||
1156 | dma_ch_name); | ||
1157 | if (!dma_res) { | ||
1158 | dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); | ||
1159 | status = -ENODEV; | ||
1160 | break; | ||
1161 | } | ||
1162 | |||
1246 | mcspi->dma_channels[i].dma_tx_channel = -1; | 1163 | mcspi->dma_channels[i].dma_tx_channel = -1; |
1247 | mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i]; | 1164 | mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; |
1248 | } | 1165 | } |
1249 | 1166 | ||
1250 | if (omap2_mcspi_reset(mcspi) < 0) | 1167 | pm_runtime_enable(&pdev->dev); |
1251 | goto err4; | 1168 | |
1169 | if (status || omap2_mcspi_master_setup(mcspi) < 0) | ||
1170 | goto err3; | ||
1252 | 1171 | ||
1253 | status = spi_register_master(master); | 1172 | status = spi_register_master(master); |
1254 | if (status < 0) | 1173 | if (status < 0) |
@@ -1257,17 +1176,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) | |||
1257 | return status; | 1176 | return status; |
1258 | 1177 | ||
1259 | err4: | 1178 | err4: |
1260 | kfree(mcspi->dma_channels); | 1179 | spi_master_put(master); |
1261 | err3: | 1180 | err3: |
1262 | clk_put(mcspi->fck); | 1181 | kfree(mcspi->dma_channels); |
1263 | err2: | 1182 | err2: |
1264 | clk_put(mcspi->ick); | ||
1265 | err1a: | ||
1266 | iounmap(mcspi->base); | ||
1267 | err1aa: | ||
1268 | release_mem_region(r->start, (r->end - r->start) + 1); | 1183 | release_mem_region(r->start, (r->end - r->start) + 1); |
1184 | iounmap(mcspi->base); | ||
1269 | err1: | 1185 | err1: |
1270 | spi_master_put(master); | ||
1271 | return status; | 1186 | return status; |
1272 | } | 1187 | } |
1273 | 1188 | ||
@@ -1283,9 +1198,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) | |||
1283 | mcspi = spi_master_get_devdata(master); | 1198 | mcspi = spi_master_get_devdata(master); |
1284 | dma_channels = mcspi->dma_channels; | 1199 | dma_channels = mcspi->dma_channels; |
1285 | 1200 | ||
1286 | clk_put(mcspi->fck); | 1201 | omap2_mcspi_disable_clocks(mcspi); |
1287 | clk_put(mcspi->ick); | ||
1288 | |||
1289 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1202 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1290 | release_mem_region(r->start, (r->end - r->start) + 1); | 1203 | release_mem_region(r->start, (r->end - r->start) + 1); |
1291 | 1204 | ||
@@ -1336,6 +1249,7 @@ static int omap2_mcspi_resume(struct device *dev) | |||
1336 | 1249 | ||
1337 | static const struct dev_pm_ops omap2_mcspi_pm_ops = { | 1250 | static const struct dev_pm_ops omap2_mcspi_pm_ops = { |
1338 | .resume = omap2_mcspi_resume, | 1251 | .resume = omap2_mcspi_resume, |
1252 | .runtime_resume = omap_mcspi_runtime_resume, | ||
1339 | }; | 1253 | }; |
1340 | 1254 | ||
1341 | static struct platform_driver omap2_mcspi_driver = { | 1255 | static struct platform_driver omap2_mcspi_driver = { |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index a914010d9d12..630ae7f3cd4c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1530,7 +1530,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1530 | 1530 | ||
1531 | /*-------------------------------------------------------------------------*/ | 1531 | /*-------------------------------------------------------------------------*/ |
1532 | 1532 | ||
1533 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \ | 1533 | #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ |
1534 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ | 1534 | defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ |
1535 | defined(CONFIG_ARCH_U5500) | 1535 | defined(CONFIG_ARCH_U5500) |
1536 | 1536 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 4f0dd2ed3964..4bd9e2145ee4 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -212,8 +212,8 @@ enum musb_g_ep0_state { | |||
212 | * directly with the "flat" model, or after setting up an index register. | 212 | * directly with the "flat" model, or after setting up an index register. |
213 | */ | 213 | */ |
214 | 214 | ||
215 | #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ | 215 | #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \ |
216 | || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \ | 216 | || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \ |
217 | || defined(CONFIG_ARCH_OMAP4) | 217 | || defined(CONFIG_ARCH_OMAP4) |
218 | /* REVISIT indexed access seemed to | 218 | /* REVISIT indexed access seemed to |
219 | * misbehave (on DaVinci) for at least peripheral IN ... | 219 | * misbehave (on DaVinci) for at least peripheral IN ... |
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index 21056c924c74..320fd4afb93f 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h | |||
@@ -31,7 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | 34 | #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) |
35 | #include "omap2430.h" | 35 | #include "omap2430.h" |
36 | #endif | 36 | #endif |
37 | 37 | ||
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c index e00fa1b22ecd..8c6fdef61d1c 100644 --- a/drivers/usb/otg/isp1301_omap.c +++ b/drivers/usb/otg/isp1301_omap.c | |||
@@ -1510,7 +1510,7 @@ isp1301_start_hnp(struct otg_transceiver *dev) | |||
1510 | 1510 | ||
1511 | /*-------------------------------------------------------------------------*/ | 1511 | /*-------------------------------------------------------------------------*/ |
1512 | 1512 | ||
1513 | static int __init | 1513 | static int __devinit |
1514 | isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id) | 1514 | isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id) |
1515 | { | 1515 | { |
1516 | int status; | 1516 | int status; |
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 80b3b123dd7f..7c608c5ccf84 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig | |||
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO | |||
60 | 60 | ||
61 | config HDQ_MASTER_OMAP | 61 | config HDQ_MASTER_OMAP |
62 | tristate "OMAP HDQ driver" | 62 | tristate "OMAP HDQ driver" |
63 | depends on ARCH_OMAP2430 || ARCH_OMAP3 | 63 | depends on SOC_OMAP2430 || ARCH_OMAP3 |
64 | help | 64 | help |
65 | Say Y here if you want support for the 1-wire or HDQ Interface | 65 | Say Y here if you want support for the 1-wire or HDQ Interface |
66 | on an OMAP processor. | 66 | on an OMAP processor. |
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 3dd4971160ef..2b4acb86c191 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c | |||
@@ -124,6 +124,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev) | |||
124 | u32 pre_margin = GET_WLDR_VAL(timer_margin); | 124 | u32 pre_margin = GET_WLDR_VAL(timer_margin); |
125 | void __iomem *base = wdev->base; | 125 | void __iomem *base = wdev->base; |
126 | 126 | ||
127 | pm_runtime_get_sync(wdev->dev); | ||
128 | |||
127 | /* just count up at 32 KHz */ | 129 | /* just count up at 32 KHz */ |
128 | while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) | 130 | while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) |
129 | cpu_relax(); | 131 | cpu_relax(); |
@@ -131,6 +133,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev) | |||
131 | __raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR); | 133 | __raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR); |
132 | while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) | 134 | while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) |
133 | cpu_relax(); | 135 | cpu_relax(); |
136 | |||
137 | pm_runtime_put_sync(wdev->dev); | ||
134 | } | 138 | } |
135 | 139 | ||
136 | /* | 140 | /* |
@@ -160,6 +164,8 @@ static int omap_wdt_open(struct inode *inode, struct file *file) | |||
160 | omap_wdt_ping(wdev); /* trigger loading of new timeout value */ | 164 | omap_wdt_ping(wdev); /* trigger loading of new timeout value */ |
161 | omap_wdt_enable(wdev); | 165 | omap_wdt_enable(wdev); |
162 | 166 | ||
167 | pm_runtime_put_sync(wdev->dev); | ||
168 | |||
163 | return nonseekable_open(inode, file); | 169 | return nonseekable_open(inode, file); |
164 | } | 170 | } |
165 | 171 | ||
@@ -171,6 +177,7 @@ static int omap_wdt_release(struct inode *inode, struct file *file) | |||
171 | * Shut off the timer unless NOWAYOUT is defined. | 177 | * Shut off the timer unless NOWAYOUT is defined. |
172 | */ | 178 | */ |
173 | #ifndef CONFIG_WATCHDOG_NOWAYOUT | 179 | #ifndef CONFIG_WATCHDOG_NOWAYOUT |
180 | pm_runtime_get_sync(wdev->dev); | ||
174 | 181 | ||
175 | omap_wdt_disable(wdev); | 182 | omap_wdt_disable(wdev); |
176 | 183 | ||
@@ -190,9 +197,11 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data, | |||
190 | 197 | ||
191 | /* Refresh LOAD_TIME. */ | 198 | /* Refresh LOAD_TIME. */ |
192 | if (len) { | 199 | if (len) { |
200 | pm_runtime_get_sync(wdev->dev); | ||
193 | spin_lock(&wdt_lock); | 201 | spin_lock(&wdt_lock); |
194 | omap_wdt_ping(wdev); | 202 | omap_wdt_ping(wdev); |
195 | spin_unlock(&wdt_lock); | 203 | spin_unlock(&wdt_lock); |
204 | pm_runtime_put_sync(wdev->dev); | ||
196 | } | 205 | } |
197 | return len; | 206 | return len; |
198 | } | 207 | } |
@@ -224,15 +233,18 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd, | |||
224 | return put_user(omap_prcm_get_reset_sources(), | 233 | return put_user(omap_prcm_get_reset_sources(), |
225 | (int __user *)arg); | 234 | (int __user *)arg); |
226 | case WDIOC_KEEPALIVE: | 235 | case WDIOC_KEEPALIVE: |
236 | pm_runtime_get_sync(wdev->dev); | ||
227 | spin_lock(&wdt_lock); | 237 | spin_lock(&wdt_lock); |
228 | omap_wdt_ping(wdev); | 238 | omap_wdt_ping(wdev); |
229 | spin_unlock(&wdt_lock); | 239 | spin_unlock(&wdt_lock); |
240 | pm_runtime_put_sync(wdev->dev); | ||
230 | return 0; | 241 | return 0; |
231 | case WDIOC_SETTIMEOUT: | 242 | case WDIOC_SETTIMEOUT: |
232 | if (get_user(new_margin, (int __user *)arg)) | 243 | if (get_user(new_margin, (int __user *)arg)) |
233 | return -EFAULT; | 244 | return -EFAULT; |
234 | omap_wdt_adjust_timeout(new_margin); | 245 | omap_wdt_adjust_timeout(new_margin); |
235 | 246 | ||
247 | pm_runtime_get_sync(wdev->dev); | ||
236 | spin_lock(&wdt_lock); | 248 | spin_lock(&wdt_lock); |
237 | omap_wdt_disable(wdev); | 249 | omap_wdt_disable(wdev); |
238 | omap_wdt_set_timeout(wdev); | 250 | omap_wdt_set_timeout(wdev); |
@@ -240,6 +252,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd, | |||
240 | 252 | ||
241 | omap_wdt_ping(wdev); | 253 | omap_wdt_ping(wdev); |
242 | spin_unlock(&wdt_lock); | 254 | spin_unlock(&wdt_lock); |
255 | pm_runtime_put_sync(wdev->dev); | ||
243 | /* Fall */ | 256 | /* Fall */ |
244 | case WDIOC_GETTIMEOUT: | 257 | case WDIOC_GETTIMEOUT: |
245 | return put_user(timer_margin, (int __user *)arg); | 258 | return put_user(timer_margin, (int __user *)arg); |
@@ -345,8 +358,11 @@ static void omap_wdt_shutdown(struct platform_device *pdev) | |||
345 | { | 358 | { |
346 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); | 359 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); |
347 | 360 | ||
348 | if (wdev->omap_wdt_users) | 361 | if (wdev->omap_wdt_users) { |
362 | pm_runtime_get_sync(wdev->dev); | ||
349 | omap_wdt_disable(wdev); | 363 | omap_wdt_disable(wdev); |
364 | pm_runtime_put_sync(wdev->dev); | ||
365 | } | ||
350 | } | 366 | } |
351 | 367 | ||
352 | static int __devexit omap_wdt_remove(struct platform_device *pdev) | 368 | static int __devexit omap_wdt_remove(struct platform_device *pdev) |
@@ -381,8 +397,11 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state) | |||
381 | { | 397 | { |
382 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); | 398 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); |
383 | 399 | ||
384 | if (wdev->omap_wdt_users) | 400 | if (wdev->omap_wdt_users) { |
401 | pm_runtime_get_sync(wdev->dev); | ||
385 | omap_wdt_disable(wdev); | 402 | omap_wdt_disable(wdev); |
403 | pm_runtime_put_sync(wdev->dev); | ||
404 | } | ||
386 | 405 | ||
387 | return 0; | 406 | return 0; |
388 | } | 407 | } |
@@ -392,8 +411,10 @@ static int omap_wdt_resume(struct platform_device *pdev) | |||
392 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); | 411 | struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); |
393 | 412 | ||
394 | if (wdev->omap_wdt_users) { | 413 | if (wdev->omap_wdt_users) { |
414 | pm_runtime_get_sync(wdev->dev); | ||
395 | omap_wdt_enable(wdev); | 415 | omap_wdt_enable(wdev); |
396 | omap_wdt_ping(wdev); | 416 | omap_wdt_ping(wdev); |
417 | pm_runtime_put_sync(wdev->dev); | ||
397 | } | 418 | } |
398 | 419 | ||
399 | return 0; | 420 | return 0; |