aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/hwspinlock.txt293
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/Makefile4
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c21
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c80
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c3
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c22
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c4
-rw-r--r--arch/arm/mach-omap2/board-flash.c32
-rw-r--r--arch/arm/mach-omap2/board-flash.h4
-rw-r--r--arch/arm/mach-omap2/board-ldp.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c197
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c70
-rw-r--r--arch/arm/mach-omap2/board-zoom.c5
-rw-r--r--arch/arm/mach-omap2/clkt_clksel.c2
-rw-r--r--arch/arm/mach-omap2/devices.c187
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c7
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c113
-rw-r--r--arch/arm/mach-omap2/gpmc.c56
-rw-r--r--arch/arm/mach-omap2/hsmmc.c5
-rw-r--r--arch/arm/mach-omap2/hwspinlock.c63
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c156
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c320
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c445
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c27
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c93
-rw-r--r--arch/arm/mach-omap2/usb-musb.c219
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc.h18
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h2
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h9
-rw-r--r--arch/arm/plat-omap/include/plat/mcspi.h11
-rw-r--r--arch/arm/plat-omap/include/plat/nand.h11
-rw-r--r--arch/arm/plat-omap/include/plat/onenand.h10
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h4
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/hwspinlock/Kconfig22
-rw-r--r--drivers/hwspinlock/Makefile6
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c548
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h61
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c231
-rw-r--r--drivers/mmc/host/omap_hsmmc.c24
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/spi/omap2_mcspi.c222
-rw-r--r--include/linux/hwspinlock.h292
-rw-r--r--include/linux/mtd/onenand_regs.h1
49 files changed, 3609 insertions, 721 deletions
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt
new file mode 100644
index 000000000000..7dcd1a4e726c
--- /dev/null
+++ b/Documentation/hwspinlock.txt
@@ -0,0 +1,293 @@
1Hardware Spinlock Framework
2
31. Introduction
4
5Hardware spinlock modules provide hardware assistance for synchronization
6and mutual exclusion between heterogeneous processors and those not operating
7under a single, shared operating system.
8
9For example, OMAP4 has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP,
10each of which is running a different Operating System (the master, A9,
11is usually running Linux and the slave processors, the M3 and the DSP,
12are running some flavor of RTOS).
13
14A generic hwspinlock framework allows platform-independent drivers to use
15the hwspinlock device in order to access data structures that are shared
16between remote processors, that otherwise have no alternative mechanism
17to accomplish synchronization and mutual exclusion operations.
18
19This is necessary, for example, for Inter-processor communications:
20on OMAP4, cpu-intensive multimedia tasks are offloaded by the host to the
21remote M3 and/or C64x+ slave processors (by an IPC subsystem called Syslink).
22
23To achieve fast message-based communications, a minimal kernel support
24is needed to deliver messages arriving from a remote processor to the
25appropriate user process.
26
27This communication is based on simple data structures that is shared between
28the remote processors, and access to it is synchronized using the hwspinlock
29module (remote processor directly places new messages in this shared data
30structure).
31
32A common hwspinlock interface makes it possible to have generic, platform-
33independent, drivers.
34
352. User API
36
37 struct hwspinlock *hwspin_lock_request(void);
38 - dynamically assign an hwspinlock and return its address, or NULL
39 in case an unused hwspinlock isn't available. Users of this
40 API will usually want to communicate the lock's id to the remote core
41 before it can be used to achieve synchronization.
42 Can be called from an atomic context (this function will not sleep) but
43 not from within interrupt context.
44
45 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
46 - assign a specific hwspinlock id and return its address, or NULL
47 if that hwspinlock is already in use. Usually board code will
48 be calling this function in order to reserve specific hwspinlock
49 ids for predefined purposes.
50 Can be called from an atomic context (this function will not sleep) but
51 not from within interrupt context.
52
53 int hwspin_lock_free(struct hwspinlock *hwlock);
54 - free a previously-assigned hwspinlock; returns 0 on success, or an
55 appropriate error code on failure (e.g. -EINVAL if the hwspinlock
56 is already free).
57 Can be called from an atomic context (this function will not sleep) but
58 not from within interrupt context.
59
60 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
61 - lock a previously-assigned hwspinlock with a timeout limit (specified in
62 msecs). If the hwspinlock is already taken, the function will busy loop
63 waiting for it to be released, but give up when the timeout elapses.
64 Upon a successful return from this function, preemption is disabled so
65 the caller must not sleep, and is advised to release the hwspinlock as
66 soon as possible, in order to minimize remote cores polling on the
67 hardware interconnect.
68 Returns 0 when successful and an appropriate error code otherwise (most
69 notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
70 The function will never sleep.
71
72 int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int timeout);
73 - lock a previously-assigned hwspinlock with a timeout limit (specified in
74 msecs). If the hwspinlock is already taken, the function will busy loop
75 waiting for it to be released, but give up when the timeout elapses.
76 Upon a successful return from this function, preemption and the local
77 interrupts are disabled, so the caller must not sleep, and is advised to
78 release the hwspinlock as soon as possible.
79 Returns 0 when successful and an appropriate error code otherwise (most
80 notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
81 The function will never sleep.
82
83 int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to,
84 unsigned long *flags);
85 - lock a previously-assigned hwspinlock with a timeout limit (specified in
86 msecs). If the hwspinlock is already taken, the function will busy loop
87 waiting for it to be released, but give up when the timeout elapses.
88 Upon a successful return from this function, preemption is disabled,
89 local interrupts are disabled and their previous state is saved at the
90 given flags placeholder. The caller must not sleep, and is advised to
91 release the hwspinlock as soon as possible.
92 Returns 0 when successful and an appropriate error code otherwise (most
93 notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
94 The function will never sleep.
95
96 int hwspin_trylock(struct hwspinlock *hwlock);
97 - attempt to lock a previously-assigned hwspinlock, but immediately fail if
98 it is already taken.
99 Upon a successful return from this function, preemption is disabled so
100 caller must not sleep, and is advised to release the hwspinlock as soon as
101 possible, in order to minimize remote cores polling on the hardware
102 interconnect.
103 Returns 0 on success and an appropriate error code otherwise (most
104 notably -EBUSY if the hwspinlock was already taken).
105 The function will never sleep.
106
107 int hwspin_trylock_irq(struct hwspinlock *hwlock);
108 - attempt to lock a previously-assigned hwspinlock, but immediately fail if
109 it is already taken.
110 Upon a successful return from this function, preemption and the local
111 interrupts are disabled so caller must not sleep, and is advised to
112 release the hwspinlock as soon as possible.
113 Returns 0 on success and an appropriate error code otherwise (most
114 notably -EBUSY if the hwspinlock was already taken).
115 The function will never sleep.
116
117 int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags);
118 - attempt to lock a previously-assigned hwspinlock, but immediately fail if
119 it is already taken.
120 Upon a successful return from this function, preemption is disabled,
121 the local interrupts are disabled and their previous state is saved
122 at the given flags placeholder. The caller must not sleep, and is advised
123 to release the hwspinlock as soon as possible.
124 Returns 0 on success and an appropriate error code otherwise (most
125 notably -EBUSY if the hwspinlock was already taken).
126 The function will never sleep.
127
128 void hwspin_unlock(struct hwspinlock *hwlock);
129 - unlock a previously-locked hwspinlock. Always succeed, and can be called
130 from any context (the function never sleeps). Note: code should _never_
131 unlock an hwspinlock which is already unlocked (there is no protection
132 against this).
133
134 void hwspin_unlock_irq(struct hwspinlock *hwlock);
135 - unlock a previously-locked hwspinlock and enable local interrupts.
136 The caller should _never_ unlock an hwspinlock which is already unlocked.
137 Doing so is considered a bug (there is no protection against this).
138 Upon a successful return from this function, preemption and local
139 interrupts are enabled. This function will never sleep.
140
141 void
142 hwspin_unlock_irqrestore(struct hwspinlock *hwlock, unsigned long *flags);
143 - unlock a previously-locked hwspinlock.
144 The caller should _never_ unlock an hwspinlock which is already unlocked.
145 Doing so is considered a bug (there is no protection against this).
146 Upon a successful return from this function, preemption is reenabled,
147 and the state of the local interrupts is restored to the state saved at
148 the given flags. This function will never sleep.
149
150 int hwspin_lock_get_id(struct hwspinlock *hwlock);
151 - retrieve id number of a given hwspinlock. This is needed when an
152 hwspinlock is dynamically assigned: before it can be used to achieve
153 mutual exclusion with a remote cpu, the id number should be communicated
154 to the remote task with which we want to synchronize.
155 Returns the hwspinlock id number, or -EINVAL if hwlock is null.
156
1573. Typical usage
158
159#include <linux/hwspinlock.h>
160#include <linux/err.h>
161
162int hwspinlock_example1(void)
163{
164 struct hwspinlock *hwlock;
165 int ret;
166
167 /* dynamically assign a hwspinlock */
168 hwlock = hwspin_lock_request();
169 if (!hwlock)
170 ...
171
172 id = hwspin_lock_get_id(hwlock);
173 /* probably need to communicate id to a remote processor now */
174
175 /* take the lock, spin for 1 sec if it's already taken */
176 ret = hwspin_lock_timeout(hwlock, 1000);
177 if (ret)
178 ...
179
180 /*
181 * we took the lock, do our thing now, but do NOT sleep
182 */
183
184 /* release the lock */
185 hwspin_unlock(hwlock);
186
187 /* free the lock */
188 ret = hwspin_lock_free(hwlock);
189 if (ret)
190 ...
191
192 return ret;
193}
194
195int hwspinlock_example2(void)
196{
197 struct hwspinlock *hwlock;
198 int ret;
199
200 /*
201 * assign a specific hwspinlock id - this should be called early
202 * by board init code.
203 */
204 hwlock = hwspin_lock_request_specific(PREDEFINED_LOCK_ID);
205 if (!hwlock)
206 ...
207
208 /* try to take it, but don't spin on it */
209 ret = hwspin_trylock(hwlock);
210 if (!ret) {
211 pr_info("lock is already taken\n");
212 return -EBUSY;
213 }
214
215 /*
216 * we took the lock, do our thing now, but do NOT sleep
217 */
218
219 /* release the lock */
220 hwspin_unlock(hwlock);
221
222 /* free the lock */
223 ret = hwspin_lock_free(hwlock);
224 if (ret)
225 ...
226
227 return ret;
228}
229
230
2314. API for implementors
232
233 int hwspin_lock_register(struct hwspinlock *hwlock);
234 - to be called from the underlying platform-specific implementation, in
235 order to register a new hwspinlock instance. Can be called from an atomic
236 context (this function will not sleep) but not from within interrupt
237 context. Returns 0 on success, or appropriate error code on failure.
238
239 struct hwspinlock *hwspin_lock_unregister(unsigned int id);
240 - to be called from the underlying vendor-specific implementation, in order
241 to unregister an existing (and unused) hwspinlock instance.
242 Can be called from an atomic context (will not sleep) but not from
243 within interrupt context.
244 Returns the address of hwspinlock on success, or NULL on error (e.g.
245 if the hwspinlock is sill in use).
246
2475. struct hwspinlock
248
249This struct represents an hwspinlock instance. It is registered by the
250underlying hwspinlock implementation using the hwspin_lock_register() API.
251
252/**
253 * struct hwspinlock - vendor-specific hwspinlock implementation
254 *
255 * @dev: underlying device, will be used with runtime PM api
256 * @ops: vendor-specific hwspinlock handlers
257 * @id: a global, unique, system-wide, index of the lock.
258 * @lock: initialized and used by hwspinlock core
259 * @owner: underlying implementation module, used to maintain module ref count
260 */
261struct hwspinlock {
262 struct device *dev;
263 const struct hwspinlock_ops *ops;
264 int id;
265 spinlock_t lock;
266 struct module *owner;
267};
268
269The underlying implementation is responsible to assign the dev, ops, id and
270owner members. The lock member, OTOH, is initialized and used by the hwspinlock
271core.
272
2736. Implementation callbacks
274
275There are three possible callbacks defined in 'struct hwspinlock_ops':
276
277struct hwspinlock_ops {
278 int (*trylock)(struct hwspinlock *lock);
279 void (*unlock)(struct hwspinlock *lock);
280 void (*relax)(struct hwspinlock *lock);
281};
282
283The first two callbacks are mandatory:
284
285The ->trylock() callback should make a single attempt to take the lock, and
286return 0 on failure and 1 on success. This callback may _not_ sleep.
287
288The ->unlock() callback releases the lock. It always succeed, and it, too,
289may _not_ sleep.
290
291The ->relax() callback is optional. It is called by hwspinlock core while
292spinning on a lock, and can be used by the underlying implementation to force
293a delay between two successive invocations of ->trylock(). It may _not_ sleep.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 9b4e78fe3d1c..b9d8a7b2a862 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -310,6 +310,7 @@ config MACH_OMAP_4430SDP
310 depends on ARCH_OMAP4 310 depends on ARCH_OMAP4
311 select OMAP_PACKAGE_CBL 311 select OMAP_PACKAGE_CBL
312 select OMAP_PACKAGE_CBS 312 select OMAP_PACKAGE_CBS
313 select REGULATOR_FIXED_VOLTAGE
313 314
314config MACH_OMAP4_PANDA 315config MACH_OMAP4_PANDA
315 bool "OMAP4 Panda Board" 316 bool "OMAP4 Panda Board"
@@ -317,6 +318,7 @@ config MACH_OMAP4_PANDA
317 depends on ARCH_OMAP4 318 depends on ARCH_OMAP4
318 select OMAP_PACKAGE_CBL 319 select OMAP_PACKAGE_CBL
319 select OMAP_PACKAGE_CBS 320 select OMAP_PACKAGE_CBS
321 select REGULATOR_FIXED_VOLTAGE
320 322
321config OMAP3_EMU 323config OMAP3_EMU
322 bool "OMAP3 debugging peripherals" 324 bool "OMAP3 debugging peripherals"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index a9e3974d015f..ee72a9787bf1 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -218,7 +218,8 @@ obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o \
218 hsmmc.o \ 218 hsmmc.o \
219 omap_phy_internal.o 219 omap_phy_internal.o
220 220
221obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o 221obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o \
222 omap_phy_internal.o \
222 223
223obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o 224obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
224 225
@@ -243,3 +244,4 @@ obj-y += $(smc91x-m) $(smc91x-y)
243 244
244smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o 245smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
245obj-y += $(smsc911x-m) $(smsc911x-y) 246obj-y += $(smsc911x-m) $(smsc911x-y)
247obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index ec74c0f2051c..cc42d474c443 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -22,6 +22,7 @@
22#include <linux/mmc/host.h> 22#include <linux/mmc/host.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/i2c/twl.h> 24#include <linux/i2c/twl.h>
25#include <linux/regulator/machine.h>
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/clk.h> 27#include <linux/clk.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -147,6 +148,25 @@ static void __init omap_2430sdp_init_early(void)
147 omap2_init_common_devices(NULL, NULL); 148 omap2_init_common_devices(NULL, NULL);
148} 149}
149 150
151static struct regulator_consumer_supply sdp2430_vmmc1_supplies[] = {
152 REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0"),
153};
154
155/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
156static struct regulator_init_data sdp2430_vmmc1 = {
157 .constraints = {
158 .min_uV = 1850000,
159 .max_uV = 3150000,
160 .valid_modes_mask = REGULATOR_MODE_NORMAL
161 | REGULATOR_MODE_STANDBY,
162 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
163 | REGULATOR_CHANGE_MODE
164 | REGULATOR_CHANGE_STATUS,
165 },
166 .num_consumer_supplies = ARRAY_SIZE(sdp2430_vmmc1_supplies),
167 .consumer_supplies = &sdp2430_vmmc1_supplies[0],
168};
169
150static struct twl4030_gpio_platform_data sdp2430_gpio_data = { 170static struct twl4030_gpio_platform_data sdp2430_gpio_data = {
151 .gpio_base = OMAP_MAX_GPIO_LINES, 171 .gpio_base = OMAP_MAX_GPIO_LINES,
152 .irq_base = TWL4030_GPIO_IRQ_BASE, 172 .irq_base = TWL4030_GPIO_IRQ_BASE,
@@ -159,6 +179,7 @@ static struct twl4030_platform_data sdp2430_twldata = {
159 179
160 /* platform_data for children goes here */ 180 /* platform_data for children goes here */
161 .gpio = &sdp2430_gpio_data, 181 .gpio = &sdp2430_gpio_data,
182 .vmmc1 = &sdp2430_vmmc1,
162}; 183};
163 184
164static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = { 185static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = {
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 31085883199e..76a260f7c00e 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -315,11 +315,6 @@ static struct platform_device sdp3430_dss_device = {
315 }, 315 },
316}; 316};
317 317
318static struct regulator_consumer_supply sdp3430_vdda_dac_supply = {
319 .supply = "vdda_dac",
320 .dev = &sdp3430_dss_device.dev,
321};
322
323static struct platform_device *sdp3430_devices[] __initdata = { 318static struct platform_device *sdp3430_devices[] __initdata = {
324 &sdp3430_dss_device, 319 &sdp3430_dss_device,
325}; 320};
@@ -369,18 +364,6 @@ static struct omap2_hsmmc_info mmc[] = {
369 {} /* Terminator */ 364 {} /* Terminator */
370}; 365};
371 366
372static struct regulator_consumer_supply sdp3430_vmmc1_supply = {
373 .supply = "vmmc",
374};
375
376static struct regulator_consumer_supply sdp3430_vsim_supply = {
377 .supply = "vmmc_aux",
378};
379
380static struct regulator_consumer_supply sdp3430_vmmc2_supply = {
381 .supply = "vmmc",
382};
383
384static int sdp3430_twl_gpio_setup(struct device *dev, 367static int sdp3430_twl_gpio_setup(struct device *dev,
385 unsigned gpio, unsigned ngpio) 368 unsigned gpio, unsigned ngpio)
386{ 369{
@@ -391,13 +374,6 @@ static int sdp3430_twl_gpio_setup(struct device *dev,
391 mmc[1].gpio_cd = gpio + 1; 374 mmc[1].gpio_cd = gpio + 1;
392 omap2_hsmmc_init(mmc); 375 omap2_hsmmc_init(mmc);
393 376
394 /* link regulators to MMC adapters ... we "know" the
395 * regulators will be set up only *after* we return.
396 */
397 sdp3430_vmmc1_supply.dev = mmc[0].dev;
398 sdp3430_vsim_supply.dev = mmc[0].dev;
399 sdp3430_vmmc2_supply.dev = mmc[1].dev;
400
401 /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */ 377 /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */
402 gpio_request(gpio + 7, "sub_lcd_en_bkl"); 378 gpio_request(gpio + 7, "sub_lcd_en_bkl");
403 gpio_direction_output(gpio + 7, 0); 379 gpio_direction_output(gpio + 7, 0);
@@ -426,6 +402,34 @@ static struct twl4030_madc_platform_data sdp3430_madc_data = {
426 .irq_line = 1, 402 .irq_line = 1,
427}; 403};
428 404
405/* regulator consumer mappings */
406
407/* ads7846 on SPI */
408static struct regulator_consumer_supply sdp3430_vaux3_supplies[] = {
409 REGULATOR_SUPPLY("vcc", "spi1.0"),
410};
411
412static struct regulator_consumer_supply sdp3430_vdda_dac_supplies[] = {
413 REGULATOR_SUPPLY("vdda_dac", "omapdss"),
414};
415
416/* VPLL2 for digital video outputs */
417static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
418 REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
419};
420
421static struct regulator_consumer_supply sdp3430_vmmc1_supplies[] = {
422 REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0"),
423};
424
425static struct regulator_consumer_supply sdp3430_vsim_supplies[] = {
426 REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.0"),
427};
428
429static struct regulator_consumer_supply sdp3430_vmmc2_supplies[] = {
430 REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"),
431};
432
429/* 433/*
430 * Apply all the fixed voltages since most versions of U-Boot 434 * Apply all the fixed voltages since most versions of U-Boot
431 * don't bother with that initialization. 435 * don't bother with that initialization.
@@ -468,6 +472,8 @@ static struct regulator_init_data sdp3430_vaux3 = {
468 .valid_ops_mask = REGULATOR_CHANGE_MODE 472 .valid_ops_mask = REGULATOR_CHANGE_MODE
469 | REGULATOR_CHANGE_STATUS, 473 | REGULATOR_CHANGE_STATUS,
470 }, 474 },
475 .num_consumer_supplies = ARRAY_SIZE(sdp3430_vaux3_supplies),
476 .consumer_supplies = sdp3430_vaux3_supplies,
471}; 477};
472 478
473/* VAUX4 for OMAP VDD_CSI2 (camera) */ 479/* VAUX4 for OMAP VDD_CSI2 (camera) */
@@ -494,8 +500,8 @@ static struct regulator_init_data sdp3430_vmmc1 = {
494 | REGULATOR_CHANGE_MODE 500 | REGULATOR_CHANGE_MODE
495 | REGULATOR_CHANGE_STATUS, 501 | REGULATOR_CHANGE_STATUS,
496 }, 502 },
497 .num_consumer_supplies = 1, 503 .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc1_supplies),
498 .consumer_supplies = &sdp3430_vmmc1_supply, 504 .consumer_supplies = sdp3430_vmmc1_supplies,
499}; 505};
500 506
501/* VMMC2 for MMC2 card */ 507/* VMMC2 for MMC2 card */
@@ -509,8 +515,8 @@ static struct regulator_init_data sdp3430_vmmc2 = {
509 .valid_ops_mask = REGULATOR_CHANGE_MODE 515 .valid_ops_mask = REGULATOR_CHANGE_MODE
510 | REGULATOR_CHANGE_STATUS, 516 | REGULATOR_CHANGE_STATUS,
511 }, 517 },
512 .num_consumer_supplies = 1, 518 .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc2_supplies),
513 .consumer_supplies = &sdp3430_vmmc2_supply, 519 .consumer_supplies = sdp3430_vmmc2_supplies,
514}; 520};
515 521
516/* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */ 522/* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */
@@ -524,8 +530,8 @@ static struct regulator_init_data sdp3430_vsim = {
524 | REGULATOR_CHANGE_MODE 530 | REGULATOR_CHANGE_MODE
525 | REGULATOR_CHANGE_STATUS, 531 | REGULATOR_CHANGE_STATUS,
526 }, 532 },
527 .num_consumer_supplies = 1, 533 .num_consumer_supplies = ARRAY_SIZE(sdp3430_vsim_supplies),
528 .consumer_supplies = &sdp3430_vsim_supply, 534 .consumer_supplies = sdp3430_vsim_supplies,
529}; 535};
530 536
531/* VDAC for DSS driving S-Video */ 537/* VDAC for DSS driving S-Video */
@@ -539,16 +545,8 @@ static struct regulator_init_data sdp3430_vdac = {
539 .valid_ops_mask = REGULATOR_CHANGE_MODE 545 .valid_ops_mask = REGULATOR_CHANGE_MODE
540 | REGULATOR_CHANGE_STATUS, 546 | REGULATOR_CHANGE_STATUS,
541 }, 547 },
542 .num_consumer_supplies = 1, 548 .num_consumer_supplies = ARRAY_SIZE(sdp3430_vdda_dac_supplies),
543 .consumer_supplies = &sdp3430_vdda_dac_supply, 549 .consumer_supplies = sdp3430_vdda_dac_supplies,
544};
545
546/* VPLL2 for digital video outputs */
547static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
548 {
549 .supply = "vdds_dsi",
550 .dev = &sdp3430_dss_device.dev,
551 }
552}; 550};
553 551
554static struct regulator_init_data sdp3430_vpll2 = { 552static struct regulator_init_data sdp3430_vpll2 = {
@@ -812,7 +810,7 @@ static void __init omap_3430sdp_init(void)
812 omap_serial_init(); 810 omap_serial_init();
813 usb_musb_init(&musb_board_data); 811 usb_musb_init(&musb_board_data);
814 board_smc91x_init(); 812 board_smc91x_init();
815 board_flash_init(sdp_flash_partitions, chip_sel_3430); 813 board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
816 sdp3430_display_init(); 814 sdp3430_display_init();
817 enable_board_wakeup_source(); 815 enable_board_wakeup_source();
818 usb_ehci_init(&ehci_pdata); 816 usb_ehci_init(&ehci_pdata);
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 16538757291a..8d1c4358ecf9 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/input.h> 12#include <linux/input.h>
13#include <linux/gpio.h> 13#include <linux/gpio.h>
14#include <linux/mtd/nand.h>
14 15
15#include <asm/mach-types.h> 16#include <asm/mach-types.h>
16#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
@@ -208,7 +209,7 @@ static void __init omap_sdp_init(void)
208 zoom_peripherals_init(); 209 zoom_peripherals_init();
209 zoom_display_init(); 210 zoom_display_init();
210 board_smc91x_init(); 211 board_smc91x_init();
211 board_flash_init(sdp_flash_partitions, chip_sel_sdp); 212 board_flash_init(sdp_flash_partitions, chip_sel_sdp, NAND_BUSWIDTH_16);
212 enable_board_wakeup_source(); 213 enable_board_wakeup_source();
213 usb_ehci_init(&ehci_pdata); 214 usb_ehci_init(&ehci_pdata);
214} 215}
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index dcc8b27e3033..1a943be822c3 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -45,7 +45,6 @@
45#define ETH_KS8851_IRQ 34 45#define ETH_KS8851_IRQ 34
46#define ETH_KS8851_POWER_ON 48 46#define ETH_KS8851_POWER_ON 48
47#define ETH_KS8851_QUART 138 47#define ETH_KS8851_QUART 138
48#define OMAP4SDP_MDM_PWR_EN_GPIO 157
49#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 48#define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
50#define OMAP4_SFH7741_ENABLE_GPIO 188 49#define OMAP4_SFH7741_ENABLE_GPIO 188
51 50
@@ -335,16 +334,6 @@ static void __init omap_4430sdp_init_early(void)
335#endif 334#endif
336} 335}
337 336
338static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
339 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
340 .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
341 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
342 .phy_reset = false,
343 .reset_gpio_port[0] = -EINVAL,
344 .reset_gpio_port[1] = -EINVAL,
345 .reset_gpio_port[2] = -EINVAL,
346};
347
348static struct omap_musb_board_data musb_board_data = { 337static struct omap_musb_board_data musb_board_data = {
349 .interface_type = MUSB_INTERFACE_UTMI, 338 .interface_type = MUSB_INTERFACE_UTMI,
350 .mode = MUSB_OTG, 339 .mode = MUSB_OTG,
@@ -518,7 +507,6 @@ static struct regulator_init_data sdp4430_vana = {
518 .constraints = { 507 .constraints = {
519 .min_uV = 2100000, 508 .min_uV = 2100000,
520 .max_uV = 2100000, 509 .max_uV = 2100000,
521 .apply_uV = true,
522 .valid_modes_mask = REGULATOR_MODE_NORMAL 510 .valid_modes_mask = REGULATOR_MODE_NORMAL
523 | REGULATOR_MODE_STANDBY, 511 | REGULATOR_MODE_STANDBY,
524 .valid_ops_mask = REGULATOR_CHANGE_MODE 512 .valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -530,7 +518,6 @@ static struct regulator_init_data sdp4430_vcxio = {
530 .constraints = { 518 .constraints = {
531 .min_uV = 1800000, 519 .min_uV = 1800000,
532 .max_uV = 1800000, 520 .max_uV = 1800000,
533 .apply_uV = true,
534 .valid_modes_mask = REGULATOR_MODE_NORMAL 521 .valid_modes_mask = REGULATOR_MODE_NORMAL
535 | REGULATOR_MODE_STANDBY, 522 | REGULATOR_MODE_STANDBY,
536 .valid_ops_mask = REGULATOR_CHANGE_MODE 523 .valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -542,7 +529,6 @@ static struct regulator_init_data sdp4430_vdac = {
542 .constraints = { 529 .constraints = {
543 .min_uV = 1800000, 530 .min_uV = 1800000,
544 .max_uV = 1800000, 531 .max_uV = 1800000,
545 .apply_uV = true,
546 .valid_modes_mask = REGULATOR_MODE_NORMAL 532 .valid_modes_mask = REGULATOR_MODE_NORMAL
547 | REGULATOR_MODE_STANDBY, 533 | REGULATOR_MODE_STANDBY,
548 .valid_ops_mask = REGULATOR_CHANGE_MODE 534 .valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -660,14 +646,6 @@ static void __init omap_4430sdp_init(void)
660 omap_serial_init(); 646 omap_serial_init();
661 omap4_twl6030_hsmmc_init(mmc); 647 omap4_twl6030_hsmmc_init(mmc);
662 648
663 /* Power on the ULPI PHY */
664 status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
665 if (status)
666 pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__);
667 else
668 gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
669
670 usb_ehci_init(&ehci_pdata);
671 usb_musb_init(&musb_board_data); 649 usb_musb_init(&musb_board_data);
672 650
673 status = omap_ethernet_init(); 651 status = omap_ethernet_init();
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index d0d0f5528132..8532d6e0d53a 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -408,6 +408,10 @@ static struct omap_musb_board_data musb_board_data = {
408 .interface_type = MUSB_INTERFACE_ULPI, 408 .interface_type = MUSB_INTERFACE_ULPI,
409 .mode = MUSB_OTG, 409 .mode = MUSB_OTG,
410 .power = 500, 410 .power = 500,
411 .set_phy_power = am35x_musb_phy_power,
412 .clear_irq = am35x_musb_clear_irq,
413 .set_mode = am35x_musb_set_mode,
414 .reset = am35x_musb_reset,
411}; 415};
412 416
413static __init void am3517_evm_musb_init(void) 417static __init void am3517_evm_musb_init(void)
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index fd38c05bb47f..c32c06828f08 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * board-sdp-flash.c 2 * board-flash.c
3 * Modified from mach-omap2/board-3430sdp-flash.c 3 * Modified from mach-omap2/board-3430sdp-flash.c
4 * 4 *
5 * Copyright (C) 2009 Nokia Corporation 5 * Copyright (C) 2009 Nokia Corporation
@@ -16,6 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/mtd/physmap.h> 17#include <linux/mtd/physmap.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <plat/irqs.h>
19 20
20#include <plat/gpmc.h> 21#include <plat/gpmc.h>
21#include <plat/nand.h> 22#include <plat/nand.h>
@@ -73,11 +74,11 @@ __init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
73 + FLASH_SIZE_SDPV1 - 1; 74 + FLASH_SIZE_SDPV1 - 1;
74 } 75 }
75 if (err < 0) { 76 if (err < 0) {
76 printk(KERN_ERR "NOR: Can't request GPMC CS\n"); 77 pr_err("NOR: Can't request GPMC CS\n");
77 return; 78 return;
78 } 79 }
79 if (platform_device_register(&board_nor_device) < 0) 80 if (platform_device_register(&board_nor_device) < 0)
80 printk(KERN_ERR "Unable to register NOR device\n"); 81 pr_err("Unable to register NOR device\n");
81} 82}
82 83
83#if defined(CONFIG_MTD_ONENAND_OMAP2) || \ 84#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
@@ -139,12 +140,16 @@ static struct omap_nand_platform_data board_nand_data = {
139}; 140};
140 141
141void 142void
142__init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs) 143__init board_nand_init(struct mtd_partition *nand_parts,
144 u8 nr_parts, u8 cs, int nand_type)
143{ 145{
144 board_nand_data.cs = cs; 146 board_nand_data.cs = cs;
145 board_nand_data.parts = nand_parts; 147 board_nand_data.parts = nand_parts;
146 board_nand_data.nr_parts = nr_parts; 148 board_nand_data.nr_parts = nr_parts;
149 board_nand_data.devsize = nand_type;
147 150
151 board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
152 board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs;
148 gpmc_nand_init(&board_nand_data); 153 gpmc_nand_init(&board_nand_data);
149} 154}
150#else 155#else
@@ -189,12 +194,12 @@ unmap:
189} 194}
190 195
191/** 196/**
192 * sdp3430_flash_init - Identify devices connected to GPMC and register. 197 * board_flash_init - Identify devices connected to GPMC and register.
193 * 198 *
194 * @return - void. 199 * @return - void.
195 */ 200 */
196void board_flash_init(struct flash_partitions partition_info[], 201void board_flash_init(struct flash_partitions partition_info[],
197 char chip_sel_board[][GPMC_CS_NUM]) 202 char chip_sel_board[][GPMC_CS_NUM], int nand_type)
198{ 203{
199 u8 cs = 0; 204 u8 cs = 0;
200 u8 norcs = GPMC_CS_NUM + 1; 205 u8 norcs = GPMC_CS_NUM + 1;
@@ -208,7 +213,7 @@ void board_flash_init(struct flash_partitions partition_info[],
208 */ 213 */
209 idx = get_gpmc0_type(); 214 idx = get_gpmc0_type();
210 if (idx >= MAX_SUPPORTED_GPMC_CONFIG) { 215 if (idx >= MAX_SUPPORTED_GPMC_CONFIG) {
211 printk(KERN_ERR "%s: Invalid chip select: %d\n", __func__, cs); 216 pr_err("%s: Invalid chip select: %d\n", __func__, cs);
212 return; 217 return;
213 } 218 }
214 config_sel = (unsigned char *)(chip_sel_board[idx]); 219 config_sel = (unsigned char *)(chip_sel_board[idx]);
@@ -232,23 +237,20 @@ void board_flash_init(struct flash_partitions partition_info[],
232 } 237 }
233 238
234 if (norcs > GPMC_CS_NUM) 239 if (norcs > GPMC_CS_NUM)
235 printk(KERN_INFO "NOR: Unable to find configuration " 240 pr_err("NOR: Unable to find configuration in GPMC\n");
236 "in GPMC\n");
237 else 241 else
238 board_nor_init(partition_info[0].parts, 242 board_nor_init(partition_info[0].parts,
239 partition_info[0].nr_parts, norcs); 243 partition_info[0].nr_parts, norcs);
240 244
241 if (onenandcs > GPMC_CS_NUM) 245 if (onenandcs > GPMC_CS_NUM)
242 printk(KERN_INFO "OneNAND: Unable to find configuration " 246 pr_err("OneNAND: Unable to find configuration in GPMC\n");
243 "in GPMC\n");
244 else 247 else
245 board_onenand_init(partition_info[1].parts, 248 board_onenand_init(partition_info[1].parts,
246 partition_info[1].nr_parts, onenandcs); 249 partition_info[1].nr_parts, onenandcs);
247 250
248 if (nandcs > GPMC_CS_NUM) 251 if (nandcs > GPMC_CS_NUM)
249 printk(KERN_INFO "NAND: Unable to find configuration " 252 pr_err("NAND: Unable to find configuration in GPMC\n");
250 "in GPMC\n");
251 else 253 else
252 board_nand_init(partition_info[2].parts, 254 board_nand_init(partition_info[2].parts,
253 partition_info[2].nr_parts, nandcs); 255 partition_info[2].nr_parts, nandcs, nand_type);
254} 256}
diff --git a/arch/arm/mach-omap2/board-flash.h b/arch/arm/mach-omap2/board-flash.h
index 69befe00dd2f..c240a3f8d163 100644
--- a/arch/arm/mach-omap2/board-flash.h
+++ b/arch/arm/mach-omap2/board-flash.h
@@ -25,6 +25,6 @@ struct flash_partitions {
25}; 25};
26 26
27extern void board_flash_init(struct flash_partitions [], 27extern void board_flash_init(struct flash_partitions [],
28 char chip_sel[][GPMC_CS_NUM]); 28 char chip_sel[][GPMC_CS_NUM], int nand_type);
29extern void board_nand_init(struct mtd_partition *nand_parts, 29extern void board_nand_init(struct mtd_partition *nand_parts,
30 u8 nr_parts, u8 cs); 30 u8 nr_parts, u8 cs, int nand_type);
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index d8eb2cb7cbc7..a3fae5697a72 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -433,7 +433,7 @@ static void __init omap_ldp_init(void)
433 omap_serial_init(); 433 omap_serial_init();
434 usb_musb_init(&musb_board_data); 434 usb_musb_init(&musb_board_data);
435 board_nand_init(ldp_nand_partitions, 435 board_nand_init(ldp_nand_partitions,
436 ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS); 436 ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
437 437
438 omap2_hsmmc_init(mmc); 438 omap2_hsmmc_init(mmc);
439 /* link regulators to MMC adapters */ 439 /* link regulators to MMC adapters */
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index c2a0fca4aa53..d4a115712290 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -30,6 +30,8 @@
30#include <linux/usb/otg.h> 30#include <linux/usb/otg.h>
31#include <linux/smsc911x.h> 31#include <linux/smsc911x.h>
32 32
33#include <linux/wl12xx.h>
34#include <linux/regulator/fixed.h>
33#include <linux/regulator/machine.h> 35#include <linux/regulator/machine.h>
34#include <linux/mmc/host.h> 36#include <linux/mmc/host.h>
35 37
@@ -58,6 +60,13 @@
58#define OMAP3EVM_ETHR_ID_REV 0x50 60#define OMAP3EVM_ETHR_ID_REV 0x50
59#define OMAP3EVM_ETHR_GPIO_IRQ 176 61#define OMAP3EVM_ETHR_GPIO_IRQ 176
60#define OMAP3EVM_SMSC911X_CS 5 62#define OMAP3EVM_SMSC911X_CS 5
63/*
64 * Eth Reset signal
65 * 64 = Generation 1 (<=RevD)
66 * 7 = Generation 2 (>=RevE)
67 */
68#define OMAP3EVM_GEN1_ETHR_GPIO_RST 64
69#define OMAP3EVM_GEN2_ETHR_GPIO_RST 7
61 70
62static u8 omap3_evm_version; 71static u8 omap3_evm_version;
63 72
@@ -124,10 +133,15 @@ static struct platform_device omap3evm_smsc911x_device = {
124 133
125static inline void __init omap3evm_init_smsc911x(void) 134static inline void __init omap3evm_init_smsc911x(void)
126{ 135{
127 int eth_cs; 136 int eth_cs, eth_rst;
128 struct clk *l3ck; 137 struct clk *l3ck;
129 unsigned int rate; 138 unsigned int rate;
130 139
140 if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
141 eth_rst = OMAP3EVM_GEN1_ETHR_GPIO_RST;
142 else
143 eth_rst = OMAP3EVM_GEN2_ETHR_GPIO_RST;
144
131 eth_cs = OMAP3EVM_SMSC911X_CS; 145 eth_cs = OMAP3EVM_SMSC911X_CS;
132 146
133 l3ck = clk_get(NULL, "l3_ck"); 147 l3ck = clk_get(NULL, "l3_ck");
@@ -136,6 +150,27 @@ static inline void __init omap3evm_init_smsc911x(void)
136 else 150 else
137 rate = clk_get_rate(l3ck); 151 rate = clk_get_rate(l3ck);
138 152
153 /* Configure ethernet controller reset gpio */
154 if (cpu_is_omap3430()) {
155 if (gpio_request(eth_rst, "SMSC911x gpio") < 0) {
156 pr_err(KERN_ERR "Failed to request %d for smsc911x\n",
157 eth_rst);
158 return;
159 }
160
161 if (gpio_direction_output(eth_rst, 1) < 0) {
162 pr_err(KERN_ERR "Failed to set direction of %d for" \
163 " smsc911x\n", eth_rst);
164 return;
165 }
166 /* reset pulse to ethernet controller*/
167 usleep_range(150, 220);
168 gpio_set_value(eth_rst, 0);
169 usleep_range(150, 220);
170 gpio_set_value(eth_rst, 1);
171 usleep_range(1, 2);
172 }
173
139 if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) { 174 if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) {
140 printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", 175 printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
141 OMAP3EVM_ETHR_GPIO_IRQ); 176 OMAP3EVM_ETHR_GPIO_IRQ);
@@ -235,9 +270,9 @@ static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev)
235 gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 0); 270 gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 0);
236 271
237 if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) 272 if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
238 gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0); 273 gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
239 else 274 else
240 gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1); 275 gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
241 276
242 lcd_enabled = 1; 277 lcd_enabled = 1;
243 return 0; 278 return 0;
@@ -248,9 +283,9 @@ static void omap3_evm_disable_lcd(struct omap_dss_device *dssdev)
248 gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 1); 283 gpio_set_value(OMAP3EVM_LCD_PANEL_ENVDD, 1);
249 284
250 if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) 285 if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
251 gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1); 286 gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
252 else 287 else
253 gpio_set_value(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0); 288 gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
254 289
255 lcd_enabled = 0; 290 lcd_enabled = 0;
256} 291}
@@ -289,7 +324,7 @@ static int omap3_evm_enable_dvi(struct omap_dss_device *dssdev)
289 return -EINVAL; 324 return -EINVAL;
290 } 325 }
291 326
292 gpio_set_value(OMAP3EVM_DVI_PANEL_EN_GPIO, 1); 327 gpio_set_value_cansleep(OMAP3EVM_DVI_PANEL_EN_GPIO, 1);
293 328
294 dvi_enabled = 1; 329 dvi_enabled = 1;
295 return 0; 330 return 0;
@@ -297,7 +332,7 @@ static int omap3_evm_enable_dvi(struct omap_dss_device *dssdev)
297 332
298static void omap3_evm_disable_dvi(struct omap_dss_device *dssdev) 333static void omap3_evm_disable_dvi(struct omap_dss_device *dssdev)
299{ 334{
300 gpio_set_value(OMAP3EVM_DVI_PANEL_EN_GPIO, 0); 335 gpio_set_value_cansleep(OMAP3EVM_DVI_PANEL_EN_GPIO, 0);
301 336
302 dvi_enabled = 0; 337 dvi_enabled = 0;
303} 338}
@@ -381,6 +416,16 @@ static struct omap2_hsmmc_info mmc[] = {
381 .gpio_cd = -EINVAL, 416 .gpio_cd = -EINVAL,
382 .gpio_wp = 63, 417 .gpio_wp = 63,
383 }, 418 },
419#ifdef CONFIG_WL12XX_PLATFORM_DATA
420 {
421 .name = "wl1271",
422 .mmc = 2,
423 .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
424 .gpio_wp = -EINVAL,
425 .gpio_cd = -EINVAL,
426 .nonremovable = true,
427 },
428#endif
384 {} /* Terminator */ 429 {} /* Terminator */
385}; 430};
386 431
@@ -411,6 +456,8 @@ static struct platform_device leds_gpio = {
411static int omap3evm_twl_gpio_setup(struct device *dev, 456static int omap3evm_twl_gpio_setup(struct device *dev,
412 unsigned gpio, unsigned ngpio) 457 unsigned gpio, unsigned ngpio)
413{ 458{
459 int r;
460
414 /* gpio + 0 is "mmc0_cd" (input/IRQ) */ 461 /* gpio + 0 is "mmc0_cd" (input/IRQ) */
415 omap_mux_init_gpio(63, OMAP_PIN_INPUT); 462 omap_mux_init_gpio(63, OMAP_PIN_INPUT);
416 mmc[0].gpio_cd = gpio + 0; 463 mmc[0].gpio_cd = gpio + 0;
@@ -426,8 +473,12 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
426 */ 473 */
427 474
428 /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ 475 /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
429 gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); 476 r = gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
430 gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); 477 if (!r)
478 r = gpio_direction_output(gpio + TWL4030_GPIO_MAX,
479 (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) ? 1 : 0);
480 if (r)
481 printk(KERN_ERR "failed to get/set lcd_bkl gpio\n");
431 482
432 /* gpio + 7 == DVI Enable */ 483 /* gpio + 7 == DVI Enable */
433 gpio_request(gpio + 7, "EN_DVI"); 484 gpio_request(gpio + 7, "EN_DVI");
@@ -538,6 +589,69 @@ static struct regulator_init_data omap3_evm_vpll2 = {
538 .consumer_supplies = &omap3_evm_vpll2_supply, 589 .consumer_supplies = &omap3_evm_vpll2_supply,
539}; 590};
540 591
592/* ads7846 on SPI */
593static struct regulator_consumer_supply omap3evm_vio_supply =
594 REGULATOR_SUPPLY("vcc", "spi1.0");
595
596/* VIO for ads7846 */
597static struct regulator_init_data omap3evm_vio = {
598 .constraints = {
599 .min_uV = 1800000,
600 .max_uV = 1800000,
601 .apply_uV = true,
602 .valid_modes_mask = REGULATOR_MODE_NORMAL
603 | REGULATOR_MODE_STANDBY,
604 .valid_ops_mask = REGULATOR_CHANGE_MODE
605 | REGULATOR_CHANGE_STATUS,
606 },
607 .num_consumer_supplies = 1,
608 .consumer_supplies = &omap3evm_vio_supply,
609};
610
611#ifdef CONFIG_WL12XX_PLATFORM_DATA
612
613#define OMAP3EVM_WLAN_PMENA_GPIO (150)
614#define OMAP3EVM_WLAN_IRQ_GPIO (149)
615
616static struct regulator_consumer_supply omap3evm_vmmc2_supply = {
617 .supply = "vmmc",
618 .dev_name = "mmci-omap-hs.1",
619};
620
621/* VMMC2 for driving the WL12xx module */
622static struct regulator_init_data omap3evm_vmmc2 = {
623 .constraints = {
624 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
625 },
626 .num_consumer_supplies = 1,
627 .consumer_supplies = &omap3evm_vmmc2_supply,
628};
629
630static struct fixed_voltage_config omap3evm_vwlan = {
631 .supply_name = "vwl1271",
632 .microvolts = 1800000, /* 1.80V */
633 .gpio = OMAP3EVM_WLAN_PMENA_GPIO,
634 .startup_delay = 70000, /* 70ms */
635 .enable_high = 1,
636 .enabled_at_boot = 0,
637 .init_data = &omap3evm_vmmc2,
638};
639
640static struct platform_device omap3evm_vwlan_device = {
641 .name = "reg-fixed-voltage",
642 .id = 1,
643 .dev = {
644 .platform_data = &omap3evm_vwlan,
645 },
646};
647
648struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
649 .irq = OMAP_GPIO_IRQ(OMAP3EVM_WLAN_IRQ_GPIO),
650 /* ref clock is 38.4 MHz */
651 .board_ref_clock = 2,
652};
653#endif
654
541static struct twl4030_platform_data omap3evm_twldata = { 655static struct twl4030_platform_data omap3evm_twldata = {
542 .irq_base = TWL4030_IRQ_BASE, 656 .irq_base = TWL4030_IRQ_BASE,
543 .irq_end = TWL4030_IRQ_END, 657 .irq_end = TWL4030_IRQ_END,
@@ -550,6 +664,7 @@ static struct twl4030_platform_data omap3evm_twldata = {
550 .codec = &omap3evm_codec_data, 664 .codec = &omap3evm_codec_data,
551 .vdac = &omap3_evm_vdac, 665 .vdac = &omap3_evm_vdac,
552 .vpll2 = &omap3_evm_vpll2, 666 .vpll2 = &omap3_evm_vpll2,
667 .vio = &omap3evm_vio,
553}; 668};
554 669
555static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = { 670static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = {
@@ -651,14 +766,61 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
651}; 766};
652 767
653#ifdef CONFIG_OMAP_MUX 768#ifdef CONFIG_OMAP_MUX
654static struct omap_board_mux board_mux[] __initdata = { 769static struct omap_board_mux omap35x_board_mux[] __initdata = {
770 OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
771 OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
772 OMAP_PIN_OFF_WAKEUPENABLE),
773 OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
774 OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
775 OMAP_PIN_OFF_WAKEUPENABLE),
776 OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
777 OMAP_PIN_OFF_NONE),
778 OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
779 OMAP_PIN_OFF_NONE),
780#ifdef CONFIG_WL12XX_PLATFORM_DATA
781 /* WLAN IRQ - GPIO 149 */
782 OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
783
784 /* WLAN POWER ENABLE - GPIO 150 */
785 OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
786
787 /* MMC2 SDIO pin muxes for WL12xx */
788 OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
789 OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
790 OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
791 OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
792 OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
793 OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
794#endif
795 { .reg_offset = OMAP_MUX_TERMINATOR },
796};
797
798static struct omap_board_mux omap36x_board_mux[] __initdata = {
655 OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP | 799 OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
656 OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW | 800 OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
657 OMAP_PIN_OFF_WAKEUPENABLE), 801 OMAP_PIN_OFF_WAKEUPENABLE),
658 OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | 802 OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
659 OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW), 803 OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
804 OMAP_PIN_OFF_WAKEUPENABLE),
805 /* AM/DM37x EVM: DSS data bus muxed with sys_boot */
806 OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
807 OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
808 OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
809 OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
810 OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
811 OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
812 OMAP3_MUX(SYS_BOOT0, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
813 OMAP3_MUX(SYS_BOOT1, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
814 OMAP3_MUX(SYS_BOOT3, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
815 OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
816 OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
817 OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
818
660 { .reg_offset = OMAP_MUX_TERMINATOR }, 819 { .reg_offset = OMAP_MUX_TERMINATOR },
661}; 820};
821#else
822#define omap35x_board_mux NULL
823#define omap36x_board_mux NULL
662#endif 824#endif
663 825
664static struct omap_musb_board_data musb_board_data = { 826static struct omap_musb_board_data musb_board_data = {
@@ -670,7 +832,11 @@ static struct omap_musb_board_data musb_board_data = {
670static void __init omap3_evm_init(void) 832static void __init omap3_evm_init(void)
671{ 833{
672 omap3_evm_get_revision(); 834 omap3_evm_get_revision();
673 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 835
836 if (cpu_is_omap3630())
837 omap3_mux_init(omap36x_board_mux, OMAP_PACKAGE_CBB);
838 else
839 omap3_mux_init(omap35x_board_mux, OMAP_PACKAGE_CBB);
674 840
675 omap3_evm_i2c_init(); 841 omap3_evm_i2c_init();
676 842
@@ -714,6 +880,13 @@ static void __init omap3_evm_init(void)
714 ads7846_dev_init(); 880 ads7846_dev_init();
715 omap3evm_init_smsc911x(); 881 omap3evm_init_smsc911x();
716 omap3_evm_display_init(); 882 omap3_evm_display_init();
883
884#ifdef CONFIG_WL12XX_PLATFORM_DATA
885 /* WL12xx WLAN Init */
886 if (wl12xx_set_platform_data(&omap3evm_wlan_data))
887 pr_err("error setting wl12xx data\n");
888 platform_device_register(&omap3evm_vwlan_device);
889#endif
717} 890}
718 891
719MACHINE_START(OMAP3EVM, "OMAP3 EVM") 892MACHINE_START(OMAP3EVM, "OMAP3 EVM")
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index fca5b9e80c18..3dd241b95159 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -26,6 +26,8 @@
26#include <linux/usb/otg.h> 26#include <linux/usb/otg.h>
27#include <linux/i2c/twl.h> 27#include <linux/i2c/twl.h>
28#include <linux/regulator/machine.h> 28#include <linux/regulator/machine.h>
29#include <linux/regulator/fixed.h>
30#include <linux/wl12xx.h>
29 31
30#include <mach/hardware.h> 32#include <mach/hardware.h>
31#include <mach/omap4-common.h> 33#include <mach/omap4-common.h>
@@ -45,6 +47,8 @@
45 47
46#define GPIO_HUB_POWER 1 48#define GPIO_HUB_POWER 1
47#define GPIO_HUB_NRESET 62 49#define GPIO_HUB_NRESET 62
50#define GPIO_WIFI_PMENA 43
51#define GPIO_WIFI_IRQ 53
48 52
49static struct gpio_led gpio_leds[] = { 53static struct gpio_led gpio_leds[] = {
50 { 54 {
@@ -161,6 +165,15 @@ static struct omap2_hsmmc_info mmc[] = {
161 .gpio_wp = -EINVAL, 165 .gpio_wp = -EINVAL,
162 .gpio_cd = -EINVAL, 166 .gpio_cd = -EINVAL,
163 }, 167 },
168 {
169 .name = "wl1271",
170 .mmc = 5,
171 .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
172 .gpio_wp = -EINVAL,
173 .gpio_cd = -EINVAL,
174 .ocr_mask = MMC_VDD_165_195,
175 .nonremovable = true,
176 },
164 {} /* Terminator */ 177 {} /* Terminator */
165}; 178};
166 179
@@ -171,6 +184,43 @@ static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = {
171 }, 184 },
172}; 185};
173 186
187static struct regulator_consumer_supply omap4_panda_vmmc5_supply = {
188 .supply = "vmmc",
189 .dev_name = "mmci-omap-hs.4",
190};
191
192static struct regulator_init_data panda_vmmc5 = {
193 .constraints = {
194 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
195 },
196 .num_consumer_supplies = 1,
197 .consumer_supplies = &omap4_panda_vmmc5_supply,
198};
199
200static struct fixed_voltage_config panda_vwlan = {
201 .supply_name = "vwl1271",
202 .microvolts = 1800000, /* 1.8V */
203 .gpio = GPIO_WIFI_PMENA,
204 .startup_delay = 70000, /* 70msec */
205 .enable_high = 1,
206 .enabled_at_boot = 0,
207 .init_data = &panda_vmmc5,
208};
209
210static struct platform_device omap_vwlan_device = {
211 .name = "reg-fixed-voltage",
212 .id = 1,
213 .dev = {
214 .platform_data = &panda_vwlan,
215 },
216};
217
218struct wl12xx_platform_data omap_panda_wlan_data __initdata = {
219 .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
220 /* PANDA ref clock is 38.4 MHz */
221 .board_ref_clock = 2,
222};
223
174static int omap4_twl6030_hsmmc_late_init(struct device *dev) 224static int omap4_twl6030_hsmmc_late_init(struct device *dev)
175{ 225{
176 int ret = 0; 226 int ret = 0;
@@ -304,7 +354,6 @@ static struct regulator_init_data omap4_panda_vana = {
304 .constraints = { 354 .constraints = {
305 .min_uV = 2100000, 355 .min_uV = 2100000,
306 .max_uV = 2100000, 356 .max_uV = 2100000,
307 .apply_uV = true,
308 .valid_modes_mask = REGULATOR_MODE_NORMAL 357 .valid_modes_mask = REGULATOR_MODE_NORMAL
309 | REGULATOR_MODE_STANDBY, 358 | REGULATOR_MODE_STANDBY,
310 .valid_ops_mask = REGULATOR_CHANGE_MODE 359 .valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -316,7 +365,6 @@ static struct regulator_init_data omap4_panda_vcxio = {
316 .constraints = { 365 .constraints = {
317 .min_uV = 1800000, 366 .min_uV = 1800000,
318 .max_uV = 1800000, 367 .max_uV = 1800000,
319 .apply_uV = true,
320 .valid_modes_mask = REGULATOR_MODE_NORMAL 368 .valid_modes_mask = REGULATOR_MODE_NORMAL
321 | REGULATOR_MODE_STANDBY, 369 | REGULATOR_MODE_STANDBY,
322 .valid_ops_mask = REGULATOR_CHANGE_MODE 370 .valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -328,7 +376,6 @@ static struct regulator_init_data omap4_panda_vdac = {
328 .constraints = { 376 .constraints = {
329 .min_uV = 1800000, 377 .min_uV = 1800000,
330 .max_uV = 1800000, 378 .max_uV = 1800000,
331 .apply_uV = true,
332 .valid_modes_mask = REGULATOR_MODE_NORMAL 379 .valid_modes_mask = REGULATOR_MODE_NORMAL
333 | REGULATOR_MODE_STANDBY, 380 | REGULATOR_MODE_STANDBY,
334 .valid_ops_mask = REGULATOR_CHANGE_MODE 381 .valid_ops_mask = REGULATOR_CHANGE_MODE
@@ -390,6 +437,19 @@ static int __init omap4_panda_i2c_init(void)
390 437
391#ifdef CONFIG_OMAP_MUX 438#ifdef CONFIG_OMAP_MUX
392static struct omap_board_mux board_mux[] __initdata = { 439static struct omap_board_mux board_mux[] __initdata = {
440 /* WLAN IRQ - GPIO 53 */
441 OMAP4_MUX(GPMC_NCS3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
442 /* WLAN POWER ENABLE - GPIO 43 */
443 OMAP4_MUX(GPMC_A19, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
444 /* WLAN SDIO: MMC5 CMD */
445 OMAP4_MUX(SDMMC5_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
446 /* WLAN SDIO: MMC5 CLK */
447 OMAP4_MUX(SDMMC5_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
448 /* WLAN SDIO: MMC5 DAT[0-3] */
449 OMAP4_MUX(SDMMC5_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
450 OMAP4_MUX(SDMMC5_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
451 OMAP4_MUX(SDMMC5_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
452 OMAP4_MUX(SDMMC5_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
393 { .reg_offset = OMAP_MUX_TERMINATOR }, 453 { .reg_offset = OMAP_MUX_TERMINATOR },
394}; 454};
395#else 455#else
@@ -404,8 +464,12 @@ static void __init omap4_panda_init(void)
404 package = OMAP_PACKAGE_CBL; 464 package = OMAP_PACKAGE_CBL;
405 omap4_mux_init(board_mux, package); 465 omap4_mux_init(board_mux, package);
406 466
467 if (wl12xx_set_platform_data(&omap_panda_wlan_data))
468 pr_err("error setting wl12xx data\n");
469
407 omap4_panda_i2c_init(); 470 omap4_panda_i2c_init();
408 platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); 471 platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
472 platform_device_register(&omap_vwlan_device);
409 omap_serial_init(); 473 omap_serial_init();
410 omap4_twl6030_hsmmc_init(mmc); 474 omap4_twl6030_hsmmc_init(mmc);
411 omap4_ehci_init(); 475 omap4_ehci_init();
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index 85d4170f30ab..7e3f1595d77b 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -16,6 +16,7 @@
16#include <linux/input.h> 16#include <linux/input.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/i2c/twl.h> 18#include <linux/i2c/twl.h>
19#include <linux/mtd/nand.h>
19 20
20#include <asm/mach-types.h> 21#include <asm/mach-types.h>
21#include <asm/mach/arch.h> 22#include <asm/mach/arch.h>
@@ -124,8 +125,8 @@ static void __init omap_zoom_init(void)
124 usb_ehci_init(&ehci_pdata); 125 usb_ehci_init(&ehci_pdata);
125 } 126 }
126 127
127 board_nand_init(zoom_nand_partitions, 128 board_nand_init(zoom_nand_partitions, ARRAY_SIZE(zoom_nand_partitions),
128 ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS); 129 ZOOM_NAND_CS, NAND_BUSWIDTH_16);
129 zoom_debugboard_init(); 130 zoom_debugboard_init();
130 zoom_peripherals_init(); 131 zoom_peripherals_init();
131 zoom_display_init(); 132 zoom_display_init();
diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c
index a781cd6795a4..e25364de028a 100644
--- a/arch/arm/mach-omap2/clkt_clksel.c
+++ b/arch/arm/mach-omap2/clkt_clksel.c
@@ -97,7 +97,7 @@ static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk,
97 u32 *field_val) 97 u32 *field_val)
98{ 98{
99 const struct clksel *clks; 99 const struct clksel *clks;
100 const struct clksel_rate *clkr, *max_clkr; 100 const struct clksel_rate *clkr, *max_clkr = NULL;
101 u8 max_div = 0; 101 u8 max_div = 0;
102 102
103 clks = _get_clksel_by_parent(clk, src_clk); 103 clks = _get_clksel_by_parent(clk, src_clk);
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 8800486f9467..9ee876fd367a 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -15,6 +15,7 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h>
18 19
19#include <mach/hardware.h> 20#include <mach/hardware.h>
20#include <mach/irqs.h> 21#include <mach/irqs.h>
@@ -320,163 +321,55 @@ static inline void omap_init_audio(void) {}
320 321
321#include <plat/mcspi.h> 322#include <plat/mcspi.h>
322 323
323#define OMAP2_MCSPI1_BASE 0x48098000 324struct omap_device_pm_latency omap_mcspi_latency[] = {
324#define OMAP2_MCSPI2_BASE 0x4809a000 325 [0] = {
325#define OMAP2_MCSPI3_BASE 0x480b8000 326 .deactivate_func = omap_device_idle_hwmods,
326#define OMAP2_MCSPI4_BASE 0x480ba000 327 .activate_func = omap_device_enable_hwmods,
327 328 .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
328#define OMAP4_MCSPI1_BASE 0x48098100
329#define OMAP4_MCSPI2_BASE 0x4809a100
330#define OMAP4_MCSPI3_BASE 0x480b8100
331#define OMAP4_MCSPI4_BASE 0x480ba100
332
333static struct omap2_mcspi_platform_config omap2_mcspi1_config = {
334 .num_cs = 4,
335};
336
337static struct resource omap2_mcspi1_resources[] = {
338 {
339 .start = OMAP2_MCSPI1_BASE,
340 .end = OMAP2_MCSPI1_BASE + 0xff,
341 .flags = IORESOURCE_MEM,
342 },
343};
344
345static struct platform_device omap2_mcspi1 = {
346 .name = "omap2_mcspi",
347 .id = 1,
348 .num_resources = ARRAY_SIZE(omap2_mcspi1_resources),
349 .resource = omap2_mcspi1_resources,
350 .dev = {
351 .platform_data = &omap2_mcspi1_config,
352 },
353};
354
355static struct omap2_mcspi_platform_config omap2_mcspi2_config = {
356 .num_cs = 2,
357};
358
359static struct resource omap2_mcspi2_resources[] = {
360 {
361 .start = OMAP2_MCSPI2_BASE,
362 .end = OMAP2_MCSPI2_BASE + 0xff,
363 .flags = IORESOURCE_MEM,
364 },
365};
366
367static struct platform_device omap2_mcspi2 = {
368 .name = "omap2_mcspi",
369 .id = 2,
370 .num_resources = ARRAY_SIZE(omap2_mcspi2_resources),
371 .resource = omap2_mcspi2_resources,
372 .dev = {
373 .platform_data = &omap2_mcspi2_config,
374 },
375};
376
377#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
378 defined(CONFIG_ARCH_OMAP4)
379static struct omap2_mcspi_platform_config omap2_mcspi3_config = {
380 .num_cs = 2,
381};
382
383static struct resource omap2_mcspi3_resources[] = {
384 {
385 .start = OMAP2_MCSPI3_BASE,
386 .end = OMAP2_MCSPI3_BASE + 0xff,
387 .flags = IORESOURCE_MEM,
388 },
389};
390
391static struct platform_device omap2_mcspi3 = {
392 .name = "omap2_mcspi",
393 .id = 3,
394 .num_resources = ARRAY_SIZE(omap2_mcspi3_resources),
395 .resource = omap2_mcspi3_resources,
396 .dev = {
397 .platform_data = &omap2_mcspi3_config,
398 },
399};
400#endif
401
402#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
403static struct omap2_mcspi_platform_config omap2_mcspi4_config = {
404 .num_cs = 1,
405};
406
407static struct resource omap2_mcspi4_resources[] = {
408 {
409 .start = OMAP2_MCSPI4_BASE,
410 .end = OMAP2_MCSPI4_BASE + 0xff,
411 .flags = IORESOURCE_MEM,
412 },
413};
414
415static struct platform_device omap2_mcspi4 = {
416 .name = "omap2_mcspi",
417 .id = 4,
418 .num_resources = ARRAY_SIZE(omap2_mcspi4_resources),
419 .resource = omap2_mcspi4_resources,
420 .dev = {
421 .platform_data = &omap2_mcspi4_config,
422 }, 329 },
423}; 330};
424#endif
425 331
426#ifdef CONFIG_ARCH_OMAP4 332static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
427static inline void omap4_mcspi_fixup(void)
428{ 333{
429 omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE; 334 struct omap_device *od;
430 omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff; 335 char *name = "omap2_mcspi";
431 omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE; 336 struct omap2_mcspi_platform_config *pdata;
432 omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff; 337 static int spi_num;
433 omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE; 338 struct omap2_mcspi_dev_attr *mcspi_attrib = oh->dev_attr;
434 omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff; 339
435 omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE; 340 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
436 omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff; 341 if (!pdata) {
437} 342 pr_err("Memory allocation for McSPI device failed\n");
438#else 343 return -ENOMEM;
439static inline void omap4_mcspi_fixup(void) 344 }
440{
441}
442#endif
443 345
444#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 346 pdata->num_cs = mcspi_attrib->num_chipselect;
445 defined(CONFIG_ARCH_OMAP4) 347 switch (oh->class->rev) {
446static inline void omap2_mcspi3_init(void) 348 case OMAP2_MCSPI_REV:
447{ 349 case OMAP3_MCSPI_REV:
448 platform_device_register(&omap2_mcspi3); 350 pdata->regs_offset = 0;
449} 351 break;
450#else 352 case OMAP4_MCSPI_REV:
451static inline void omap2_mcspi3_init(void) 353 pdata->regs_offset = OMAP4_MCSPI_REG_OFFSET;
452{ 354 break;
453} 355 default:
454#endif 356 pr_err("Invalid McSPI Revision value\n");
357 return -EINVAL;
358 }
455 359
456#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) 360 spi_num++;
457static inline void omap2_mcspi4_init(void) 361 od = omap_device_build(name, spi_num, oh, pdata,
458{ 362 sizeof(*pdata), omap_mcspi_latency,
459 platform_device_register(&omap2_mcspi4); 363 ARRAY_SIZE(omap_mcspi_latency), 0);
460} 364 WARN(IS_ERR(od), "Cant build omap_device for %s:%s\n",
461#else 365 name, oh->name);
462static inline void omap2_mcspi4_init(void) 366 kfree(pdata);
463{ 367 return 0;
464} 368}
465#endif
466 369
467static void omap_init_mcspi(void) 370static void omap_init_mcspi(void)
468{ 371{
469 if (cpu_is_omap44xx()) 372 omap_hwmod_for_each_by_class("mcspi", omap_mcspi_init, NULL);
470 omap4_mcspi_fixup();
471
472 platform_device_register(&omap2_mcspi1);
473 platform_device_register(&omap2_mcspi2);
474
475 if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
476 omap2_mcspi3_init();
477
478 if (cpu_is_omap343x() || cpu_is_omap44xx())
479 omap2_mcspi4_init();
480} 373}
481 374
482#else 375#else
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 2bb29c160702..c1791d08ae56 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/mtd/nand.h>
15 16
16#include <asm/mach/flash.h> 17#include <asm/mach/flash.h>
17 18
@@ -69,8 +70,10 @@ static int omap2_nand_gpmc_retime(void)
69 t.wr_cycle = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->wr_cycle); 70 t.wr_cycle = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->wr_cycle);
70 71
71 /* Configure GPMC */ 72 /* Configure GPMC */
72 gpmc_cs_configure(gpmc_nand_data->cs, 73 if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
73 GPMC_CONFIG_DEV_SIZE, gpmc_nand_data->devsize); 74 gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 1);
75 else
76 gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 0);
74 gpmc_cs_configure(gpmc_nand_data->cs, 77 gpmc_cs_configure(gpmc_nand_data->cs,
75 GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND); 78 GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND);
76 err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t); 79 err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t);
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 3a7d25fb00ef..d776ded9830d 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -94,7 +94,7 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
94} 94}
95 95
96static void set_onenand_cfg(void __iomem *onenand_base, int latency, 96static void set_onenand_cfg(void __iomem *onenand_base, int latency,
97 int sync_read, int sync_write, int hf) 97 int sync_read, int sync_write, int hf, int vhf)
98{ 98{
99 u32 reg; 99 u32 reg;
100 100
@@ -114,12 +114,57 @@ static void set_onenand_cfg(void __iomem *onenand_base, int latency,
114 reg |= ONENAND_SYS_CFG1_HF; 114 reg |= ONENAND_SYS_CFG1_HF;
115 else 115 else
116 reg &= ~ONENAND_SYS_CFG1_HF; 116 reg &= ~ONENAND_SYS_CFG1_HF;
117 if (vhf)
118 reg |= ONENAND_SYS_CFG1_VHF;
119 else
120 reg &= ~ONENAND_SYS_CFG1_VHF;
117 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); 121 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
118} 122}
119 123
124static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg,
125 void __iomem *onenand_base, bool *clk_dep)
126{
127 u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID);
128 int freq = 0;
129
130 if (cfg->get_freq) {
131 struct onenand_freq_info fi;
132
133 fi.maf_id = readw(onenand_base + ONENAND_REG_MANUFACTURER_ID);
134 fi.dev_id = readw(onenand_base + ONENAND_REG_DEVICE_ID);
135 fi.ver_id = ver;
136 freq = cfg->get_freq(&fi, clk_dep);
137 if (freq)
138 return freq;
139 }
140
141 switch ((ver >> 4) & 0xf) {
142 case 0:
143 freq = 40;
144 break;
145 case 1:
146 freq = 54;
147 break;
148 case 2:
149 freq = 66;
150 break;
151 case 3:
152 freq = 83;
153 break;
154 case 4:
155 freq = 104;
156 break;
157 default:
158 freq = 54;
159 break;
160 }
161
162 return freq;
163}
164
120static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg, 165static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
121 void __iomem *onenand_base, 166 void __iomem *onenand_base,
122 int freq) 167 int *freq_ptr)
123{ 168{
124 struct gpmc_timings t; 169 struct gpmc_timings t;
125 const int t_cer = 15; 170 const int t_cer = 15;
@@ -130,10 +175,11 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
130 const int t_wph = 30; 175 const int t_wph = 30;
131 int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo; 176 int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
132 int tick_ns, div, fclk_offset_ns, fclk_offset, gpmc_clk_ns, latency; 177 int tick_ns, div, fclk_offset_ns, fclk_offset, gpmc_clk_ns, latency;
133 int first_time = 0, hf = 0, sync_read = 0, sync_write = 0; 178 int first_time = 0, hf = 0, vhf = 0, sync_read = 0, sync_write = 0;
134 int err, ticks_cez; 179 int err, ticks_cez;
135 int cs = cfg->cs; 180 int cs = cfg->cs, freq = *freq_ptr;
136 u32 reg; 181 u32 reg;
182 bool clk_dep = false;
137 183
138 if (cfg->flags & ONENAND_SYNC_READ) { 184 if (cfg->flags & ONENAND_SYNC_READ) {
139 sync_read = 1; 185 sync_read = 1;
@@ -148,27 +194,7 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
148 err = omap2_onenand_set_async_mode(cs, onenand_base); 194 err = omap2_onenand_set_async_mode(cs, onenand_base);
149 if (err) 195 if (err)
150 return err; 196 return err;
151 reg = readw(onenand_base + ONENAND_REG_VERSION_ID); 197 freq = omap2_onenand_get_freq(cfg, onenand_base, &clk_dep);
152 switch ((reg >> 4) & 0xf) {
153 case 0:
154 freq = 40;
155 break;
156 case 1:
157 freq = 54;
158 break;
159 case 2:
160 freq = 66;
161 break;
162 case 3:
163 freq = 83;
164 break;
165 case 4:
166 freq = 104;
167 break;
168 default:
169 freq = 54;
170 break;
171 }
172 first_time = 1; 198 first_time = 1;
173 } 199 }
174 200
@@ -180,7 +206,7 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
180 t_avdh = 2; 206 t_avdh = 2;
181 t_ach = 3; 207 t_ach = 3;
182 t_aavdh = 6; 208 t_aavdh = 6;
183 t_rdyo = 9; 209 t_rdyo = 6;
184 break; 210 break;
185 case 83: 211 case 83:
186 min_gpmc_clk_period = 12000; /* 83 MHz */ 212 min_gpmc_clk_period = 12000; /* 83 MHz */
@@ -217,16 +243,36 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
217 gpmc_clk_ns = gpmc_ticks_to_ns(div); 243 gpmc_clk_ns = gpmc_ticks_to_ns(div);
218 if (gpmc_clk_ns < 15) /* >66Mhz */ 244 if (gpmc_clk_ns < 15) /* >66Mhz */
219 hf = 1; 245 hf = 1;
220 if (hf) 246 if (gpmc_clk_ns < 12) /* >83Mhz */
247 vhf = 1;
248 if (vhf)
249 latency = 8;
250 else if (hf)
221 latency = 6; 251 latency = 6;
222 else if (gpmc_clk_ns >= 25) /* 40 MHz*/ 252 else if (gpmc_clk_ns >= 25) /* 40 MHz*/
223 latency = 3; 253 latency = 3;
224 else 254 else
225 latency = 4; 255 latency = 4;
226 256
257 if (clk_dep) {
258 if (gpmc_clk_ns < 12) { /* >83Mhz */
259 t_ces = 3;
260 t_avds = 4;
261 } else if (gpmc_clk_ns < 15) { /* >66Mhz */
262 t_ces = 5;
263 t_avds = 4;
264 } else if (gpmc_clk_ns < 25) { /* >40Mhz */
265 t_ces = 6;
266 t_avds = 5;
267 } else {
268 t_ces = 7;
269 t_avds = 7;
270 }
271 }
272
227 if (first_time) 273 if (first_time)
228 set_onenand_cfg(onenand_base, latency, 274 set_onenand_cfg(onenand_base, latency,
229 sync_read, sync_write, hf); 275 sync_read, sync_write, hf, vhf);
230 276
231 if (div == 1) { 277 if (div == 1) {
232 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2); 278 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2);
@@ -264,6 +310,9 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
264 /* Read */ 310 /* Read */
265 t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh)); 311 t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh));
266 t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach)); 312 t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach));
313 /* Force at least 1 clk between AVD High to OE Low */
314 if (t.oe_on <= t.adv_rd_off)
315 t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(1);
267 t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div); 316 t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div);
268 t.oe_off = t.access + gpmc_round_ns_to_ticks(1); 317 t.oe_off = t.access + gpmc_round_ns_to_ticks(1);
269 t.cs_rd_off = t.oe_off; 318 t.cs_rd_off = t.oe_off;
@@ -317,18 +366,20 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
317 if (err) 366 if (err)
318 return err; 367 return err;
319 368
320 set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf); 369 set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf, vhf);
370
371 *freq_ptr = freq;
321 372
322 return 0; 373 return 0;
323} 374}
324 375
325static int gpmc_onenand_setup(void __iomem *onenand_base, int freq) 376static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
326{ 377{
327 struct device *dev = &gpmc_onenand_device.dev; 378 struct device *dev = &gpmc_onenand_device.dev;
328 379
329 /* Set sync timings in GPMC */ 380 /* Set sync timings in GPMC */
330 if (omap2_onenand_set_sync_mode(gpmc_onenand_data, onenand_base, 381 if (omap2_onenand_set_sync_mode(gpmc_onenand_data, onenand_base,
331 freq) < 0) { 382 freq_ptr) < 0) {
332 dev_err(dev, "Unable to set synchronous mode\n"); 383 dev_err(dev, "Unable to set synchronous mode\n");
333 return -EINVAL; 384 return -EINVAL;
334 } 385 }
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 1b7b3e7d02f7..674174365f78 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#undef DEBUG 15#undef DEBUG
16 16
17#include <linux/irq.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/err.h> 20#include <linux/err.h>
@@ -22,6 +23,7 @@
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <linux/io.h> 24#include <linux/io.h>
24#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/interrupt.h>
25 27
26#include <asm/mach-types.h> 28#include <asm/mach-types.h>
27#include <plat/gpmc.h> 29#include <plat/gpmc.h>
@@ -58,7 +60,6 @@
58#define GPMC_CHUNK_SHIFT 24 /* 16 MB */ 60#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
59#define GPMC_SECTION_SHIFT 28 /* 128 MB */ 61#define GPMC_SECTION_SHIFT 28 /* 128 MB */
60 62
61#define PREFETCH_FIFOTHRESHOLD (0x40 << 8)
62#define CS_NUM_SHIFT 24 63#define CS_NUM_SHIFT 24
63#define ENABLE_PREFETCH (0x1 << 7) 64#define ENABLE_PREFETCH (0x1 << 7)
64#define DMA_MPU_MODE 2 65#define DMA_MPU_MODE 2
@@ -100,6 +101,8 @@ static void __iomem *gpmc_base;
100 101
101static struct clk *gpmc_l3_clk; 102static struct clk *gpmc_l3_clk;
102 103
104static irqreturn_t gpmc_handle_irq(int irq, void *dev);
105
103static void gpmc_write_reg(int idx, u32 val) 106static void gpmc_write_reg(int idx, u32 val)
104{ 107{
105 __raw_writel(val, gpmc_base + idx); 108 __raw_writel(val, gpmc_base + idx);
@@ -497,6 +500,10 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
497 u32 regval = 0; 500 u32 regval = 0;
498 501
499 switch (cmd) { 502 switch (cmd) {
503 case GPMC_ENABLE_IRQ:
504 gpmc_write_reg(GPMC_IRQENABLE, wval);
505 break;
506
500 case GPMC_SET_IRQ_STATUS: 507 case GPMC_SET_IRQ_STATUS:
501 gpmc_write_reg(GPMC_IRQSTATUS, wval); 508 gpmc_write_reg(GPMC_IRQSTATUS, wval);
502 break; 509 break;
@@ -598,15 +605,19 @@ EXPORT_SYMBOL(gpmc_nand_write);
598/** 605/**
599 * gpmc_prefetch_enable - configures and starts prefetch transfer 606 * gpmc_prefetch_enable - configures and starts prefetch transfer
600 * @cs: cs (chip select) number 607 * @cs: cs (chip select) number
608 * @fifo_th: fifo threshold to be used for read/ write
601 * @dma_mode: dma mode enable (1) or disable (0) 609 * @dma_mode: dma mode enable (1) or disable (0)
602 * @u32_count: number of bytes to be transferred 610 * @u32_count: number of bytes to be transferred
603 * @is_write: prefetch read(0) or write post(1) mode 611 * @is_write: prefetch read(0) or write post(1) mode
604 */ 612 */
605int gpmc_prefetch_enable(int cs, int dma_mode, 613int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
606 unsigned int u32_count, int is_write) 614 unsigned int u32_count, int is_write)
607{ 615{
608 616
609 if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) { 617 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) {
618 pr_err("gpmc: fifo threshold is not supported\n");
619 return -1;
620 } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
610 /* Set the amount of bytes to be prefetched */ 621 /* Set the amount of bytes to be prefetched */
611 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count); 622 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
612 623
@@ -614,7 +625,7 @@ int gpmc_prefetch_enable(int cs, int dma_mode,
614 * enable the engine. Set which cs is has requested for. 625 * enable the engine. Set which cs is has requested for.
615 */ 626 */
616 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) | 627 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) |
617 PREFETCH_FIFOTHRESHOLD | 628 PREFETCH_FIFOTHRESHOLD(fifo_th) |
618 ENABLE_PREFETCH | 629 ENABLE_PREFETCH |
619 (dma_mode << DMA_MPU_MODE) | 630 (dma_mode << DMA_MPU_MODE) |
620 (0x1 & is_write))); 631 (0x1 & is_write)));
@@ -678,9 +689,10 @@ static void __init gpmc_mem_init(void)
678 } 689 }
679} 690}
680 691
681void __init gpmc_init(void) 692static int __init gpmc_init(void)
682{ 693{
683 u32 l; 694 u32 l, irq;
695 int cs, ret = -EINVAL;
684 char *ck = NULL; 696 char *ck = NULL;
685 697
686 if (cpu_is_omap24xx()) { 698 if (cpu_is_omap24xx()) {
@@ -698,7 +710,7 @@ void __init gpmc_init(void)
698 } 710 }
699 711
700 if (WARN_ON(!ck)) 712 if (WARN_ON(!ck))
701 return; 713 return ret;
702 714
703 gpmc_l3_clk = clk_get(NULL, ck); 715 gpmc_l3_clk = clk_get(NULL, ck);
704 if (IS_ERR(gpmc_l3_clk)) { 716 if (IS_ERR(gpmc_l3_clk)) {
@@ -723,6 +735,36 @@ void __init gpmc_init(void)
723 l |= (0x02 << 3) | (1 << 0); 735 l |= (0x02 << 3) | (1 << 0);
724 gpmc_write_reg(GPMC_SYSCONFIG, l); 736 gpmc_write_reg(GPMC_SYSCONFIG, l);
725 gpmc_mem_init(); 737 gpmc_mem_init();
738
739 /* initalize the irq_chained */
740 irq = OMAP_GPMC_IRQ_BASE;
741 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
742 set_irq_handler(irq, handle_simple_irq);
743 set_irq_flags(irq, IRQF_VALID);
744 irq++;
745 }
746
747 ret = request_irq(INT_34XX_GPMC_IRQ,
748 gpmc_handle_irq, IRQF_SHARED, "gpmc", gpmc_base);
749 if (ret)
750 pr_err("gpmc: irq-%d could not claim: err %d\n",
751 INT_34XX_GPMC_IRQ, ret);
752 return ret;
753}
754postcore_initcall(gpmc_init);
755
756static irqreturn_t gpmc_handle_irq(int irq, void *dev)
757{
758 u8 cs;
759
760 if (irq != INT_34XX_GPMC_IRQ)
761 return IRQ_HANDLED;
762 /* check cs to invoke the irq */
763 cs = ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1)) >> CS_NUM_SHIFT) & 0x7;
764 if (OMAP_GPMC_IRQ_BASE+cs <= OMAP_GPMC_IRQ_END)
765 generic_handle_irq(OMAP_GPMC_IRQ_BASE+cs);
766
767 return IRQ_HANDLED;
726} 768}
727 769
728#ifdef CONFIG_ARCH_OMAP3 770#ifdef CONFIG_ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 34272e4863fd..5496bc7d40ad 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -350,6 +350,11 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
350 mmc->slots[0].after_set_reg = NULL; 350 mmc->slots[0].after_set_reg = NULL;
351 } 351 }
352 break; 352 break;
353 case 4:
354 case 5:
355 mmc->slots[0].before_set_reg = NULL;
356 mmc->slots[0].after_set_reg = NULL;
357 break;
353 default: 358 default:
354 pr_err("MMC%d configuration not supported!\n", c->mmc); 359 pr_err("MMC%d configuration not supported!\n", c->mmc);
355 kfree(mmc); 360 kfree(mmc);
diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c
new file mode 100644
index 000000000000..06d4a80660a5
--- /dev/null
+++ b/arch/arm/mach-omap2/hwspinlock.c
@@ -0,0 +1,63 @@
1/*
2 * OMAP hardware spinlock device initialization
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Simon Que <sque@ti.com>
7 * Hari Kanigeri <h-kanigeri2@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/err.h>
22
23#include <plat/omap_hwmod.h>
24#include <plat/omap_device.h>
25
26struct omap_device_pm_latency omap_spinlock_latency[] = {
27 {
28 .deactivate_func = omap_device_idle_hwmods,
29 .activate_func = omap_device_enable_hwmods,
30 .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
31 }
32};
33
34int __init hwspinlocks_init(void)
35{
36 int retval = 0;
37 struct omap_hwmod *oh;
38 struct omap_device *od;
39 const char *oh_name = "spinlock";
40 const char *dev_name = "omap_hwspinlock";
41
42 /*
43 * Hwmod lookup will fail in case our platform doesn't support the
44 * hardware spinlock module, so it is safe to run this initcall
45 * on all omaps
46 */
47 oh = omap_hwmod_lookup(oh_name);
48 if (oh == NULL)
49 return -EINVAL;
50
51 od = omap_device_build(dev_name, 0, oh, NULL, 0,
52 omap_spinlock_latency,
53 ARRAY_SIZE(omap_spinlock_latency), false);
54 if (IS_ERR(od)) {
55 pr_err("Can't build omap_device for %s:%s\n", dev_name,
56 oh_name);
57 retval = PTR_ERR(od);
58 }
59
60 return retval;
61}
62/* early board code might need to reserve specific hwspinlock instances */
63postcore_initcall(hwspinlocks_init);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index b8b49e4ae928..657f3c84687c 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -30,7 +30,6 @@
30 30
31#include <plat/sram.h> 31#include <plat/sram.h>
32#include <plat/sdrc.h> 32#include <plat/sdrc.h>
33#include <plat/gpmc.h>
34#include <plat/serial.h> 33#include <plat/serial.h>
35 34
36#include "clock2xxx.h" 35#include "clock2xxx.h"
@@ -422,7 +421,6 @@ void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
422 omap2_sdrc_init(sdrc_cs0, sdrc_cs1); 421 omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
423 _omap2_init_reprogram_sdrc(); 422 _omap2_init_reprogram_sdrc();
424 } 423 }
425 gpmc_init();
426 424
427 omap_irq_base_init(); 425 omap_irq_base_init();
428} 426}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index b85c630b64d6..7fffd340c76f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -18,6 +18,7 @@
18#include <plat/serial.h> 18#include <plat/serial.h>
19#include <plat/i2c.h> 19#include <plat/i2c.h>
20#include <plat/gpio.h> 20#include <plat/gpio.h>
21#include <plat/mcspi.h>
21 22
22#include "omap_hwmod_common_data.h" 23#include "omap_hwmod_common_data.h"
23 24
@@ -44,6 +45,8 @@ static struct omap_hwmod omap2420_gpio2_hwmod;
44static struct omap_hwmod omap2420_gpio3_hwmod; 45static struct omap_hwmod omap2420_gpio3_hwmod;
45static struct omap_hwmod omap2420_gpio4_hwmod; 46static struct omap_hwmod omap2420_gpio4_hwmod;
46static struct omap_hwmod omap2420_dma_system_hwmod; 47static struct omap_hwmod omap2420_dma_system_hwmod;
48static struct omap_hwmod omap2420_mcspi1_hwmod;
49static struct omap_hwmod omap2420_mcspi2_hwmod;
47 50
48/* L3 -> L4_CORE interface */ 51/* L3 -> L4_CORE interface */
49static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = { 52static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = {
@@ -88,6 +91,42 @@ static struct omap_hwmod omap2420_uart3_hwmod;
88static struct omap_hwmod omap2420_i2c1_hwmod; 91static struct omap_hwmod omap2420_i2c1_hwmod;
89static struct omap_hwmod omap2420_i2c2_hwmod; 92static struct omap_hwmod omap2420_i2c2_hwmod;
90 93
94/* l4 core -> mcspi1 interface */
95static struct omap_hwmod_addr_space omap2420_mcspi1_addr_space[] = {
96 {
97 .pa_start = 0x48098000,
98 .pa_end = 0x480980ff,
99 .flags = ADDR_TYPE_RT,
100 },
101};
102
103static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi1 = {
104 .master = &omap2420_l4_core_hwmod,
105 .slave = &omap2420_mcspi1_hwmod,
106 .clk = "mcspi1_ick",
107 .addr = omap2420_mcspi1_addr_space,
108 .addr_cnt = ARRAY_SIZE(omap2420_mcspi1_addr_space),
109 .user = OCP_USER_MPU | OCP_USER_SDMA,
110};
111
112/* l4 core -> mcspi2 interface */
113static struct omap_hwmod_addr_space omap2420_mcspi2_addr_space[] = {
114 {
115 .pa_start = 0x4809a000,
116 .pa_end = 0x4809a0ff,
117 .flags = ADDR_TYPE_RT,
118 },
119};
120
121static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi2 = {
122 .master = &omap2420_l4_core_hwmod,
123 .slave = &omap2420_mcspi2_hwmod,
124 .clk = "mcspi2_ick",
125 .addr = omap2420_mcspi2_addr_space,
126 .addr_cnt = ARRAY_SIZE(omap2420_mcspi2_addr_space),
127 .user = OCP_USER_MPU | OCP_USER_SDMA,
128};
129
91/* L4_CORE -> L4_WKUP interface */ 130/* L4_CORE -> L4_WKUP interface */
92static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = { 131static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
93 .master = &omap2420_l4_core_hwmod, 132 .master = &omap2420_l4_core_hwmod,
@@ -864,6 +903,119 @@ static struct omap_hwmod omap2420_dma_system_hwmod = {
864 .flags = HWMOD_NO_IDLEST, 903 .flags = HWMOD_NO_IDLEST,
865}; 904};
866 905
906/*
907 * 'mcspi' class
908 * multichannel serial port interface (mcspi) / master/slave synchronous serial
909 * bus
910 */
911
912static struct omap_hwmod_class_sysconfig omap2420_mcspi_sysc = {
913 .rev_offs = 0x0000,
914 .sysc_offs = 0x0010,
915 .syss_offs = 0x0014,
916 .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
917 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
918 SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
919 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
920 .sysc_fields = &omap_hwmod_sysc_type1,
921};
922
923static struct omap_hwmod_class omap2420_mcspi_class = {
924 .name = "mcspi",
925 .sysc = &omap2420_mcspi_sysc,
926 .rev = OMAP2_MCSPI_REV,
927};
928
929/* mcspi1 */
930static struct omap_hwmod_irq_info omap2420_mcspi1_mpu_irqs[] = {
931 { .irq = 65 },
932};
933
934static struct omap_hwmod_dma_info omap2420_mcspi1_sdma_reqs[] = {
935 { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
936 { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
937 { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
938 { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
939 { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
940 { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
941 { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
942 { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
943};
944
945static struct omap_hwmod_ocp_if *omap2420_mcspi1_slaves[] = {
946 &omap2420_l4_core__mcspi1,
947};
948
949static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
950 .num_chipselect = 4,
951};
952
953static struct omap_hwmod omap2420_mcspi1_hwmod = {
954 .name = "mcspi1_hwmod",
955 .mpu_irqs = omap2420_mcspi1_mpu_irqs,
956 .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi1_mpu_irqs),
957 .sdma_reqs = omap2420_mcspi1_sdma_reqs,
958 .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi1_sdma_reqs),
959 .main_clk = "mcspi1_fck",
960 .prcm = {
961 .omap2 = {
962 .module_offs = CORE_MOD,
963 .prcm_reg_id = 1,
964 .module_bit = OMAP24XX_EN_MCSPI1_SHIFT,
965 .idlest_reg_id = 1,
966 .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT,
967 },
968 },
969 .slaves = omap2420_mcspi1_slaves,
970 .slaves_cnt = ARRAY_SIZE(omap2420_mcspi1_slaves),
971 .class = &omap2420_mcspi_class,
972 .dev_attr = &omap_mcspi1_dev_attr,
973 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
974};
975
976/* mcspi2 */
977static struct omap_hwmod_irq_info omap2420_mcspi2_mpu_irqs[] = {
978 { .irq = 66 },
979};
980
981static struct omap_hwmod_dma_info omap2420_mcspi2_sdma_reqs[] = {
982 { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
983 { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
984 { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
985 { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
986};
987
988static struct omap_hwmod_ocp_if *omap2420_mcspi2_slaves[] = {
989 &omap2420_l4_core__mcspi2,
990};
991
992static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
993 .num_chipselect = 2,
994};
995
996static struct omap_hwmod omap2420_mcspi2_hwmod = {
997 .name = "mcspi2_hwmod",
998 .mpu_irqs = omap2420_mcspi2_mpu_irqs,
999 .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi2_mpu_irqs),
1000 .sdma_reqs = omap2420_mcspi2_sdma_reqs,
1001 .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi2_sdma_reqs),
1002 .main_clk = "mcspi2_fck",
1003 .prcm = {
1004 .omap2 = {
1005 .module_offs = CORE_MOD,
1006 .prcm_reg_id = 1,
1007 .module_bit = OMAP24XX_EN_MCSPI2_SHIFT,
1008 .idlest_reg_id = 1,
1009 .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT,
1010 },
1011 },
1012 .slaves = omap2420_mcspi2_slaves,
1013 .slaves_cnt = ARRAY_SIZE(omap2420_mcspi2_slaves),
1014 .class = &omap2420_mcspi_class,
1015 .dev_attr = &omap_mcspi2_dev_attr,
1016 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
1017};
1018
867static __initdata struct omap_hwmod *omap2420_hwmods[] = { 1019static __initdata struct omap_hwmod *omap2420_hwmods[] = {
868 &omap2420_l3_main_hwmod, 1020 &omap2420_l3_main_hwmod,
869 &omap2420_l4_core_hwmod, 1021 &omap2420_l4_core_hwmod,
@@ -885,6 +1037,10 @@ static __initdata struct omap_hwmod *omap2420_hwmods[] = {
885 1037
886 /* dma_system class*/ 1038 /* dma_system class*/
887 &omap2420_dma_system_hwmod, 1039 &omap2420_dma_system_hwmod,
1040
1041 /* mcspi class */
1042 &omap2420_mcspi1_hwmod,
1043 &omap2420_mcspi2_hwmod,
888 NULL, 1044 NULL,
889}; 1045};
890 1046
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 8ecfbcde13ba..7ba688a1c840 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -18,6 +18,7 @@
18#include <plat/serial.h> 18#include <plat/serial.h>
19#include <plat/i2c.h> 19#include <plat/i2c.h>
20#include <plat/gpio.h> 20#include <plat/gpio.h>
21#include <plat/mcspi.h>
21 22
22#include "omap_hwmod_common_data.h" 23#include "omap_hwmod_common_data.h"
23 24
@@ -45,6 +46,9 @@ static struct omap_hwmod omap2430_gpio3_hwmod;
45static struct omap_hwmod omap2430_gpio4_hwmod; 46static struct omap_hwmod omap2430_gpio4_hwmod;
46static struct omap_hwmod omap2430_gpio5_hwmod; 47static struct omap_hwmod omap2430_gpio5_hwmod;
47static struct omap_hwmod omap2430_dma_system_hwmod; 48static struct omap_hwmod omap2430_dma_system_hwmod;
49static struct omap_hwmod omap2430_mcspi1_hwmod;
50static struct omap_hwmod omap2430_mcspi2_hwmod;
51static struct omap_hwmod omap2430_mcspi3_hwmod;
48 52
49/* L3 -> L4_CORE interface */ 53/* L3 -> L4_CORE interface */
50static struct omap_hwmod_ocp_if omap2430_l3_main__l4_core = { 54static struct omap_hwmod_ocp_if omap2430_l3_main__l4_core = {
@@ -89,6 +93,16 @@ static struct omap_hwmod omap2430_uart3_hwmod;
89static struct omap_hwmod omap2430_i2c1_hwmod; 93static struct omap_hwmod omap2430_i2c1_hwmod;
90static struct omap_hwmod omap2430_i2c2_hwmod; 94static struct omap_hwmod omap2430_i2c2_hwmod;
91 95
96static struct omap_hwmod omap2430_usbhsotg_hwmod;
97
98/* l3_core -> usbhsotg interface */
99static struct omap_hwmod_ocp_if omap2430_usbhsotg__l3 = {
100 .master = &omap2430_usbhsotg_hwmod,
101 .slave = &omap2430_l3_main_hwmod,
102 .clk = "core_l3_ck",
103 .user = OCP_USER_MPU,
104};
105
92/* I2C IP block address space length (in bytes) */ 106/* I2C IP block address space length (in bytes) */
93#define OMAP2_I2C_AS_LEN 128 107#define OMAP2_I2C_AS_LEN 128
94 108
@@ -189,6 +203,35 @@ static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
189 .user = OCP_USER_MPU | OCP_USER_SDMA, 203 .user = OCP_USER_MPU | OCP_USER_SDMA,
190}; 204};
191 205
206/*
207* usbhsotg interface data
208*/
209static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
210 {
211 .pa_start = OMAP243X_HS_BASE,
212 .pa_end = OMAP243X_HS_BASE + SZ_4K - 1,
213 .flags = ADDR_TYPE_RT
214 },
215};
216
217/* l4_core ->usbhsotg interface */
218static struct omap_hwmod_ocp_if omap2430_l4_core__usbhsotg = {
219 .master = &omap2430_l4_core_hwmod,
220 .slave = &omap2430_usbhsotg_hwmod,
221 .clk = "usb_l4_ick",
222 .addr = omap2430_usbhsotg_addrs,
223 .addr_cnt = ARRAY_SIZE(omap2430_usbhsotg_addrs),
224 .user = OCP_USER_MPU,
225};
226
227static struct omap_hwmod_ocp_if *omap2430_usbhsotg_masters[] = {
228 &omap2430_usbhsotg__l3,
229};
230
231static struct omap_hwmod_ocp_if *omap2430_usbhsotg_slaves[] = {
232 &omap2430_l4_core__usbhsotg,
233};
234
192/* Slave interfaces on the L4_CORE interconnect */ 235/* Slave interfaces on the L4_CORE interconnect */
193static struct omap_hwmod_ocp_if *omap2430_l4_core_slaves[] = { 236static struct omap_hwmod_ocp_if *omap2430_l4_core_slaves[] = {
194 &omap2430_l3_main__l4_core, 237 &omap2430_l3_main__l4_core,
@@ -223,6 +266,60 @@ static struct omap_hwmod_ocp_if *omap2430_l4_wkup_slaves[] = {
223static struct omap_hwmod_ocp_if *omap2430_l4_wkup_masters[] = { 266static struct omap_hwmod_ocp_if *omap2430_l4_wkup_masters[] = {
224}; 267};
225 268
269/* l4 core -> mcspi1 interface */
270static struct omap_hwmod_addr_space omap2430_mcspi1_addr_space[] = {
271 {
272 .pa_start = 0x48098000,
273 .pa_end = 0x480980ff,
274 .flags = ADDR_TYPE_RT,
275 },
276};
277
278static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi1 = {
279 .master = &omap2430_l4_core_hwmod,
280 .slave = &omap2430_mcspi1_hwmod,
281 .clk = "mcspi1_ick",
282 .addr = omap2430_mcspi1_addr_space,
283 .addr_cnt = ARRAY_SIZE(omap2430_mcspi1_addr_space),
284 .user = OCP_USER_MPU | OCP_USER_SDMA,
285};
286
287/* l4 core -> mcspi2 interface */
288static struct omap_hwmod_addr_space omap2430_mcspi2_addr_space[] = {
289 {
290 .pa_start = 0x4809a000,
291 .pa_end = 0x4809a0ff,
292 .flags = ADDR_TYPE_RT,
293 },
294};
295
296static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi2 = {
297 .master = &omap2430_l4_core_hwmod,
298 .slave = &omap2430_mcspi2_hwmod,
299 .clk = "mcspi2_ick",
300 .addr = omap2430_mcspi2_addr_space,
301 .addr_cnt = ARRAY_SIZE(omap2430_mcspi2_addr_space),
302 .user = OCP_USER_MPU | OCP_USER_SDMA,
303};
304
305/* l4 core -> mcspi3 interface */
306static struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
307 {
308 .pa_start = 0x480b8000,
309 .pa_end = 0x480b80ff,
310 .flags = ADDR_TYPE_RT,
311 },
312};
313
314static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi3 = {
315 .master = &omap2430_l4_core_hwmod,
316 .slave = &omap2430_mcspi3_hwmod,
317 .clk = "mcspi3_ick",
318 .addr = omap2430_mcspi3_addr_space,
319 .addr_cnt = ARRAY_SIZE(omap2430_mcspi3_addr_space),
320 .user = OCP_USER_MPU | OCP_USER_SDMA,
321};
322
226/* L4 WKUP */ 323/* L4 WKUP */
227static struct omap_hwmod omap2430_l4_wkup_hwmod = { 324static struct omap_hwmod omap2430_l4_wkup_hwmod = {
228 .name = "l4_wkup", 325 .name = "l4_wkup",
@@ -919,6 +1016,220 @@ static struct omap_hwmod omap2430_dma_system_hwmod = {
919 .flags = HWMOD_NO_IDLEST, 1016 .flags = HWMOD_NO_IDLEST,
920}; 1017};
921 1018
1019/*
1020 * 'mcspi' class
1021 * multichannel serial port interface (mcspi) / master/slave synchronous serial
1022 * bus
1023 */
1024
1025static struct omap_hwmod_class_sysconfig omap2430_mcspi_sysc = {
1026 .rev_offs = 0x0000,
1027 .sysc_offs = 0x0010,
1028 .syss_offs = 0x0014,
1029 .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
1030 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
1031 SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
1032 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1033 .sysc_fields = &omap_hwmod_sysc_type1,
1034};
1035
1036static struct omap_hwmod_class omap2430_mcspi_class = {
1037 .name = "mcspi",
1038 .sysc = &omap2430_mcspi_sysc,
1039 .rev = OMAP2_MCSPI_REV,
1040};
1041
1042/* mcspi1 */
1043static struct omap_hwmod_irq_info omap2430_mcspi1_mpu_irqs[] = {
1044 { .irq = 65 },
1045};
1046
1047static struct omap_hwmod_dma_info omap2430_mcspi1_sdma_reqs[] = {
1048 { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
1049 { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
1050 { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
1051 { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
1052 { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
1053 { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
1054 { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
1055 { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
1056};
1057
1058static struct omap_hwmod_ocp_if *omap2430_mcspi1_slaves[] = {
1059 &omap2430_l4_core__mcspi1,
1060};
1061
1062static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
1063 .num_chipselect = 4,
1064};
1065
1066static struct omap_hwmod omap2430_mcspi1_hwmod = {
1067 .name = "mcspi1_hwmod",
1068 .mpu_irqs = omap2430_mcspi1_mpu_irqs,
1069 .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi1_mpu_irqs),
1070 .sdma_reqs = omap2430_mcspi1_sdma_reqs,
1071 .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi1_sdma_reqs),
1072 .main_clk = "mcspi1_fck",
1073 .prcm = {
1074 .omap2 = {
1075 .module_offs = CORE_MOD,
1076 .prcm_reg_id = 1,
1077 .module_bit = OMAP24XX_EN_MCSPI1_SHIFT,
1078 .idlest_reg_id = 1,
1079 .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT,
1080 },
1081 },
1082 .slaves = omap2430_mcspi1_slaves,
1083 .slaves_cnt = ARRAY_SIZE(omap2430_mcspi1_slaves),
1084 .class = &omap2430_mcspi_class,
1085 .dev_attr = &omap_mcspi1_dev_attr,
1086 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
1087};
1088
1089/* mcspi2 */
1090static struct omap_hwmod_irq_info omap2430_mcspi2_mpu_irqs[] = {
1091 { .irq = 66 },
1092};
1093
1094static struct omap_hwmod_dma_info omap2430_mcspi2_sdma_reqs[] = {
1095 { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
1096 { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
1097 { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
1098 { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
1099};
1100
1101static struct omap_hwmod_ocp_if *omap2430_mcspi2_slaves[] = {
1102 &omap2430_l4_core__mcspi2,
1103};
1104
1105static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
1106 .num_chipselect = 2,
1107};
1108
1109static struct omap_hwmod omap2430_mcspi2_hwmod = {
1110 .name = "mcspi2_hwmod",
1111 .mpu_irqs = omap2430_mcspi2_mpu_irqs,
1112 .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi2_mpu_irqs),
1113 .sdma_reqs = omap2430_mcspi2_sdma_reqs,
1114 .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi2_sdma_reqs),
1115 .main_clk = "mcspi2_fck",
1116 .prcm = {
1117 .omap2 = {
1118 .module_offs = CORE_MOD,
1119 .prcm_reg_id = 1,
1120 .module_bit = OMAP24XX_EN_MCSPI2_SHIFT,
1121 .idlest_reg_id = 1,
1122 .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT,
1123 },
1124 },
1125 .slaves = omap2430_mcspi2_slaves,
1126 .slaves_cnt = ARRAY_SIZE(omap2430_mcspi2_slaves),
1127 .class = &omap2430_mcspi_class,
1128 .dev_attr = &omap_mcspi2_dev_attr,
1129 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
1130};
1131
1132/* mcspi3 */
1133static struct omap_hwmod_irq_info omap2430_mcspi3_mpu_irqs[] = {
1134 { .irq = 91 },
1135};
1136
1137static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = {
1138 { .name = "tx0", .dma_req = 15 }, /* DMA_SPI3_TX0 */
1139 { .name = "rx0", .dma_req = 16 }, /* DMA_SPI3_RX0 */
1140 { .name = "tx1", .dma_req = 23 }, /* DMA_SPI3_TX1 */
1141 { .name = "rx1", .dma_req = 24 }, /* DMA_SPI3_RX1 */
1142};
1143
1144static struct omap_hwmod_ocp_if *omap2430_mcspi3_slaves[] = {
1145 &omap2430_l4_core__mcspi3,
1146};
1147
1148static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
1149 .num_chipselect = 2,
1150};
1151
1152static struct omap_hwmod omap2430_mcspi3_hwmod = {
1153 .name = "mcspi3_hwmod",
1154 .mpu_irqs = omap2430_mcspi3_mpu_irqs,
1155 .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi3_mpu_irqs),
1156 .sdma_reqs = omap2430_mcspi3_sdma_reqs,
1157 .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi3_sdma_reqs),
1158 .main_clk = "mcspi3_fck",
1159 .prcm = {
1160 .omap2 = {
1161 .module_offs = CORE_MOD,
1162 .prcm_reg_id = 2,
1163 .module_bit = OMAP2430_EN_MCSPI3_SHIFT,
1164 .idlest_reg_id = 2,
1165 .idlest_idle_bit = OMAP2430_ST_MCSPI3_SHIFT,
1166 },
1167 },
1168 .slaves = omap2430_mcspi3_slaves,
1169 .slaves_cnt = ARRAY_SIZE(omap2430_mcspi3_slaves),
1170 .class = &omap2430_mcspi_class,
1171 .dev_attr = &omap_mcspi3_dev_attr,
1172 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
1173};
1174
1175/*
1176 * usbhsotg
1177 */
1178static struct omap_hwmod_class_sysconfig omap2430_usbhsotg_sysc = {
1179 .rev_offs = 0x0400,
1180 .sysc_offs = 0x0404,
1181 .syss_offs = 0x0408,
1182 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE|
1183 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
1184 SYSC_HAS_AUTOIDLE),
1185 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1186 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
1187 .sysc_fields = &omap_hwmod_sysc_type1,
1188};
1189
1190static struct omap_hwmod_class usbotg_class = {
1191 .name = "usbotg",
1192 .sysc = &omap2430_usbhsotg_sysc,
1193};
1194
1195/* usb_otg_hs */
1196static struct omap_hwmod_irq_info omap2430_usbhsotg_mpu_irqs[] = {
1197
1198 { .name = "mc", .irq = 92 },
1199 { .name = "dma", .irq = 93 },
1200};
1201
1202static struct omap_hwmod omap2430_usbhsotg_hwmod = {
1203 .name = "usb_otg_hs",
1204 .mpu_irqs = omap2430_usbhsotg_mpu_irqs,
1205 .mpu_irqs_cnt = ARRAY_SIZE(omap2430_usbhsotg_mpu_irqs),
1206 .main_clk = "usbhs_ick",
1207 .prcm = {
1208 .omap2 = {
1209 .prcm_reg_id = 1,
1210 .module_bit = OMAP2430_EN_USBHS_MASK,
1211 .module_offs = CORE_MOD,
1212 .idlest_reg_id = 1,
1213 .idlest_idle_bit = OMAP2430_ST_USBHS_SHIFT,
1214 },
1215 },
1216 .masters = omap2430_usbhsotg_masters,
1217 .masters_cnt = ARRAY_SIZE(omap2430_usbhsotg_masters),
1218 .slaves = omap2430_usbhsotg_slaves,
1219 .slaves_cnt = ARRAY_SIZE(omap2430_usbhsotg_slaves),
1220 .class = &usbotg_class,
1221 /*
1222 * Erratum ID: i479 idle_req / idle_ack mechanism potentially
1223 * broken when autoidle is enabled
1224 * workaround is to disable the autoidle bit at module level.
1225 */
1226 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
1227 | HWMOD_SWSUP_MSTANDBY,
1228 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
1229};
1230
1231
1232
922static __initdata struct omap_hwmod *omap2430_hwmods[] = { 1233static __initdata struct omap_hwmod *omap2430_hwmods[] = {
923 &omap2430_l3_main_hwmod, 1234 &omap2430_l3_main_hwmod,
924 &omap2430_l4_core_hwmod, 1235 &omap2430_l4_core_hwmod,
@@ -941,6 +1252,15 @@ static __initdata struct omap_hwmod *omap2430_hwmods[] = {
941 1252
942 /* dma_system class*/ 1253 /* dma_system class*/
943 &omap2430_dma_system_hwmod, 1254 &omap2430_dma_system_hwmod,
1255
1256 /* mcspi class */
1257 &omap2430_mcspi1_hwmod,
1258 &omap2430_mcspi2_hwmod,
1259 &omap2430_mcspi3_hwmod,
1260
1261 /* usbotg class*/
1262 &omap2430_usbhsotg_hwmod,
1263
944 NULL, 1264 NULL,
945}; 1265};
946 1266
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 8d8181334f86..879f55f272e2 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -22,12 +22,14 @@
22#include <plat/i2c.h> 22#include <plat/i2c.h>
23#include <plat/gpio.h> 23#include <plat/gpio.h>
24#include <plat/smartreflex.h> 24#include <plat/smartreflex.h>
25#include <plat/mcspi.h>
25 26
26#include "omap_hwmod_common_data.h" 27#include "omap_hwmod_common_data.h"
27 28
28#include "prm-regbits-34xx.h" 29#include "prm-regbits-34xx.h"
29#include "cm-regbits-34xx.h" 30#include "cm-regbits-34xx.h"
30#include "wd_timer.h" 31#include "wd_timer.h"
32#include <mach/am35xx.h>
31 33
32/* 34/*
33 * OMAP3xxx hardware module integration data 35 * OMAP3xxx hardware module integration data
@@ -55,6 +57,11 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod;
55static struct omap_hwmod omap3xxx_gpio6_hwmod; 57static struct omap_hwmod omap3xxx_gpio6_hwmod;
56static struct omap_hwmod omap34xx_sr1_hwmod; 58static struct omap_hwmod omap34xx_sr1_hwmod;
57static struct omap_hwmod omap34xx_sr2_hwmod; 59static struct omap_hwmod omap34xx_sr2_hwmod;
60static struct omap_hwmod omap34xx_mcspi1;
61static struct omap_hwmod omap34xx_mcspi2;
62static struct omap_hwmod omap34xx_mcspi3;
63static struct omap_hwmod omap34xx_mcspi4;
64static struct omap_hwmod am35xx_usbhsotg_hwmod;
58 65
59static struct omap_hwmod omap3xxx_dma_system_hwmod; 66static struct omap_hwmod omap3xxx_dma_system_hwmod;
60 67
@@ -107,7 +114,23 @@ static struct omap_hwmod omap3xxx_uart1_hwmod;
107static struct omap_hwmod omap3xxx_uart2_hwmod; 114static struct omap_hwmod omap3xxx_uart2_hwmod;
108static struct omap_hwmod omap3xxx_uart3_hwmod; 115static struct omap_hwmod omap3xxx_uart3_hwmod;
109static struct omap_hwmod omap3xxx_uart4_hwmod; 116static struct omap_hwmod omap3xxx_uart4_hwmod;
117static struct omap_hwmod omap3xxx_usbhsotg_hwmod;
110 118
119/* l3_core -> usbhsotg interface */
120static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = {
121 .master = &omap3xxx_usbhsotg_hwmod,
122 .slave = &omap3xxx_l3_main_hwmod,
123 .clk = "core_l3_ick",
124 .user = OCP_USER_MPU,
125};
126
127/* l3_core -> am35xx_usbhsotg interface */
128static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = {
129 .master = &am35xx_usbhsotg_hwmod,
130 .slave = &omap3xxx_l3_main_hwmod,
131 .clk = "core_l3_ick",
132 .user = OCP_USER_MPU,
133};
111/* L4_CORE -> L4_WKUP interface */ 134/* L4_CORE -> L4_WKUP interface */
112static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = { 135static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
113 .master = &omap3xxx_l4_core_hwmod, 136 .master = &omap3xxx_l4_core_hwmod,
@@ -301,6 +324,61 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
301 .user = OCP_USER_MPU, 324 .user = OCP_USER_MPU,
302}; 325};
303 326
327/*
328* usbhsotg interface data
329*/
330
331static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = {
332 {
333 .pa_start = OMAP34XX_HSUSB_OTG_BASE,
334 .pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1,
335 .flags = ADDR_TYPE_RT
336 },
337};
338
339/* l4_core -> usbhsotg */
340static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = {
341 .master = &omap3xxx_l4_core_hwmod,
342 .slave = &omap3xxx_usbhsotg_hwmod,
343 .clk = "l4_ick",
344 .addr = omap3xxx_usbhsotg_addrs,
345 .addr_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_addrs),
346 .user = OCP_USER_MPU,
347};
348
349static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_masters[] = {
350 &omap3xxx_usbhsotg__l3,
351};
352
353static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_slaves[] = {
354 &omap3xxx_l4_core__usbhsotg,
355};
356
357static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
358 {
359 .pa_start = AM35XX_IPSS_USBOTGSS_BASE,
360 .pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1,
361 .flags = ADDR_TYPE_RT
362 },
363};
364
365/* l4_core -> usbhsotg */
366static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
367 .master = &omap3xxx_l4_core_hwmod,
368 .slave = &am35xx_usbhsotg_hwmod,
369 .clk = "l4_ick",
370 .addr = am35xx_usbhsotg_addrs,
371 .addr_cnt = ARRAY_SIZE(am35xx_usbhsotg_addrs),
372 .user = OCP_USER_MPU,
373};
374
375static struct omap_hwmod_ocp_if *am35xx_usbhsotg_masters[] = {
376 &am35xx_usbhsotg__l3,
377};
378
379static struct omap_hwmod_ocp_if *am35xx_usbhsotg_slaves[] = {
380 &am35xx_l4_core__usbhsotg,
381};
304/* Slave interfaces on the L4_CORE interconnect */ 382/* Slave interfaces on the L4_CORE interconnect */
305static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = { 383static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = {
306 &omap3xxx_l3_main__l4_core, 384 &omap3xxx_l3_main__l4_core,
@@ -1356,6 +1434,360 @@ static struct omap_hwmod omap36xx_sr2_hwmod = {
1356 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1), 1434 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
1357}; 1435};
1358 1436
1437/* l4 core -> mcspi1 interface */
1438static struct omap_hwmod_addr_space omap34xx_mcspi1_addr_space[] = {
1439 {
1440 .pa_start = 0x48098000,
1441 .pa_end = 0x480980ff,
1442 .flags = ADDR_TYPE_RT,
1443 },
1444};
1445
1446static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = {
1447 .master = &omap3xxx_l4_core_hwmod,
1448 .slave = &omap34xx_mcspi1,
1449 .clk = "mcspi1_ick",
1450 .addr = omap34xx_mcspi1_addr_space,
1451 .addr_cnt = ARRAY_SIZE(omap34xx_mcspi1_addr_space),
1452 .user = OCP_USER_MPU | OCP_USER_SDMA,
1453};
1454
1455/* l4 core -> mcspi2 interface */
1456static struct omap_hwmod_addr_space omap34xx_mcspi2_addr_space[] = {
1457 {
1458 .pa_start = 0x4809a000,
1459 .pa_end = 0x4809a0ff,
1460 .flags = ADDR_TYPE_RT,
1461 },
1462};
1463
1464static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = {
1465 .master = &omap3xxx_l4_core_hwmod,
1466 .slave = &omap34xx_mcspi2,
1467 .clk = "mcspi2_ick",
1468 .addr = omap34xx_mcspi2_addr_space,
1469 .addr_cnt = ARRAY_SIZE(omap34xx_mcspi2_addr_space),
1470 .user = OCP_USER_MPU | OCP_USER_SDMA,
1471};
1472
1473/* l4 core -> mcspi3 interface */
1474static struct omap_hwmod_addr_space omap34xx_mcspi3_addr_space[] = {
1475 {
1476 .pa_start = 0x480b8000,
1477 .pa_end = 0x480b80ff,
1478 .flags = ADDR_TYPE_RT,
1479 },
1480};
1481
1482static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = {
1483 .master = &omap3xxx_l4_core_hwmod,
1484 .slave = &omap34xx_mcspi3,
1485 .clk = "mcspi3_ick",
1486 .addr = omap34xx_mcspi3_addr_space,
1487 .addr_cnt = ARRAY_SIZE(omap34xx_mcspi3_addr_space),
1488 .user = OCP_USER_MPU | OCP_USER_SDMA,
1489};
1490
1491/* l4 core -> mcspi4 interface */
1492static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = {
1493 {
1494 .pa_start = 0x480ba000,
1495 .pa_end = 0x480ba0ff,
1496 .flags = ADDR_TYPE_RT,
1497 },
1498};
1499
1500static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
1501 .master = &omap3xxx_l4_core_hwmod,
1502 .slave = &omap34xx_mcspi4,
1503 .clk = "mcspi4_ick",
1504 .addr = omap34xx_mcspi4_addr_space,
1505 .addr_cnt = ARRAY_SIZE(omap34xx_mcspi4_addr_space),
1506 .user = OCP_USER_MPU | OCP_USER_SDMA,
1507};
1508
1509/*
1510 * 'mcspi' class
1511 * multichannel serial port interface (mcspi) / master/slave synchronous serial
1512 * bus
1513 */
1514
1515static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = {
1516 .rev_offs = 0x0000,
1517 .sysc_offs = 0x0010,
1518 .syss_offs = 0x0014,
1519 .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
1520 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
1521 SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
1522 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1523 .sysc_fields = &omap_hwmod_sysc_type1,
1524};
1525
1526static struct omap_hwmod_class omap34xx_mcspi_class = {
1527 .name = "mcspi",
1528 .sysc = &omap34xx_mcspi_sysc,
1529 .rev = OMAP3_MCSPI_REV,
1530};
1531
1532/* mcspi1 */
1533static struct omap_hwmod_irq_info omap34xx_mcspi1_mpu_irqs[] = {
1534 { .name = "irq", .irq = 65 },
1535};
1536
1537static struct omap_hwmod_dma_info omap34xx_mcspi1_sdma_reqs[] = {
1538 { .name = "tx0", .dma_req = 35 },
1539 { .name = "rx0", .dma_req = 36 },
1540 { .name = "tx1", .dma_req = 37 },
1541 { .name = "rx1", .dma_req = 38 },
1542 { .name = "tx2", .dma_req = 39 },
1543 { .name = "rx2", .dma_req = 40 },
1544 { .name = "tx3", .dma_req = 41 },
1545 { .name = "rx3", .dma_req = 42 },
1546};
1547
1548static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = {
1549 &omap34xx_l4_core__mcspi1,
1550};
1551
1552static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
1553 .num_chipselect = 4,
1554};
1555
1556static struct omap_hwmod omap34xx_mcspi1 = {
1557 .name = "mcspi1",
1558 .mpu_irqs = omap34xx_mcspi1_mpu_irqs,
1559 .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_mpu_irqs),
1560 .sdma_reqs = omap34xx_mcspi1_sdma_reqs,
1561 .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_sdma_reqs),
1562 .main_clk = "mcspi1_fck",
1563 .prcm = {
1564 .omap2 = {
1565 .module_offs = CORE_MOD,
1566 .prcm_reg_id = 1,
1567 .module_bit = OMAP3430_EN_MCSPI1_SHIFT,
1568 .idlest_reg_id = 1,
1569 .idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT,
1570 },
1571 },
1572 .slaves = omap34xx_mcspi1_slaves,
1573 .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi1_slaves),
1574 .class = &omap34xx_mcspi_class,
1575 .dev_attr = &omap_mcspi1_dev_attr,
1576 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
1577};
1578
1579/* mcspi2 */
1580static struct omap_hwmod_irq_info omap34xx_mcspi2_mpu_irqs[] = {
1581 { .name = "irq", .irq = 66 },
1582};
1583
1584static struct omap_hwmod_dma_info omap34xx_mcspi2_sdma_reqs[] = {
1585 { .name = "tx0", .dma_req = 43 },
1586 { .name = "rx0", .dma_req = 44 },
1587 { .name = "tx1", .dma_req = 45 },
1588 { .name = "rx1", .dma_req = 46 },
1589};
1590
1591static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = {
1592 &omap34xx_l4_core__mcspi2,
1593};
1594
1595static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
1596 .num_chipselect = 2,
1597};
1598
1599static struct omap_hwmod omap34xx_mcspi2 = {
1600 .name = "mcspi2",
1601 .mpu_irqs = omap34xx_mcspi2_mpu_irqs,
1602 .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_mpu_irqs),
1603 .sdma_reqs = omap34xx_mcspi2_sdma_reqs,
1604 .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_sdma_reqs),
1605 .main_clk = "mcspi2_fck",
1606 .prcm = {
1607 .omap2 = {
1608 .module_offs = CORE_MOD,
1609 .prcm_reg_id = 1,
1610 .module_bit = OMAP3430_EN_MCSPI2_SHIFT,
1611 .idlest_reg_id = 1,
1612 .idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT,
1613 },
1614 },
1615 .slaves = omap34xx_mcspi2_slaves,
1616 .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi2_slaves),
1617 .class = &omap34xx_mcspi_class,
1618 .dev_attr = &omap_mcspi2_dev_attr,
1619 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
1620};
1621
1622/* mcspi3 */
1623static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = {
1624 { .name = "irq", .irq = 91 }, /* 91 */
1625};
1626
1627static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
1628 { .name = "tx0", .dma_req = 15 },
1629 { .name = "rx0", .dma_req = 16 },
1630 { .name = "tx1", .dma_req = 23 },
1631 { .name = "rx1", .dma_req = 24 },
1632};
1633
1634static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = {
1635 &omap34xx_l4_core__mcspi3,
1636};
1637
1638static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
1639 .num_chipselect = 2,
1640};
1641
1642static struct omap_hwmod omap34xx_mcspi3 = {
1643 .name = "mcspi3",
1644 .mpu_irqs = omap34xx_mcspi3_mpu_irqs,
1645 .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_mpu_irqs),
1646 .sdma_reqs = omap34xx_mcspi3_sdma_reqs,
1647 .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_sdma_reqs),
1648 .main_clk = "mcspi3_fck",
1649 .prcm = {
1650 .omap2 = {
1651 .module_offs = CORE_MOD,
1652 .prcm_reg_id = 1,
1653 .module_bit = OMAP3430_EN_MCSPI3_SHIFT,
1654 .idlest_reg_id = 1,
1655 .idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT,
1656 },
1657 },
1658 .slaves = omap34xx_mcspi3_slaves,
1659 .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi3_slaves),
1660 .class = &omap34xx_mcspi_class,
1661 .dev_attr = &omap_mcspi3_dev_attr,
1662 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
1663};
1664
1665/* SPI4 */
1666static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = {
1667 { .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */
1668};
1669
1670static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = {
1671 { .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */
1672 { .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */
1673};
1674
1675static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = {
1676 &omap34xx_l4_core__mcspi4,
1677};
1678
1679static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
1680 .num_chipselect = 1,
1681};
1682
1683static struct omap_hwmod omap34xx_mcspi4 = {
1684 .name = "mcspi4",
1685 .mpu_irqs = omap34xx_mcspi4_mpu_irqs,
1686 .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_mpu_irqs),
1687 .sdma_reqs = omap34xx_mcspi4_sdma_reqs,
1688 .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_sdma_reqs),
1689 .main_clk = "mcspi4_fck",
1690 .prcm = {
1691 .omap2 = {
1692 .module_offs = CORE_MOD,
1693 .prcm_reg_id = 1,
1694 .module_bit = OMAP3430_EN_MCSPI4_SHIFT,
1695 .idlest_reg_id = 1,
1696 .idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT,
1697 },
1698 },
1699 .slaves = omap34xx_mcspi4_slaves,
1700 .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi4_slaves),
1701 .class = &omap34xx_mcspi_class,
1702 .dev_attr = &omap_mcspi4_dev_attr,
1703 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
1704};
1705
1706/*
1707 * usbhsotg
1708 */
1709static struct omap_hwmod_class_sysconfig omap3xxx_usbhsotg_sysc = {
1710 .rev_offs = 0x0400,
1711 .sysc_offs = 0x0404,
1712 .syss_offs = 0x0408,
1713 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE|
1714 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
1715 SYSC_HAS_AUTOIDLE),
1716 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1717 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
1718 .sysc_fields = &omap_hwmod_sysc_type1,
1719};
1720
1721static struct omap_hwmod_class usbotg_class = {
1722 .name = "usbotg",
1723 .sysc = &omap3xxx_usbhsotg_sysc,
1724};
1725/* usb_otg_hs */
1726static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = {
1727
1728 { .name = "mc", .irq = 92 },
1729 { .name = "dma", .irq = 93 },
1730};
1731
1732static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
1733 .name = "usb_otg_hs",
1734 .mpu_irqs = omap3xxx_usbhsotg_mpu_irqs,
1735 .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_mpu_irqs),
1736 .main_clk = "hsotgusb_ick",
1737 .prcm = {
1738 .omap2 = {
1739 .prcm_reg_id = 1,
1740 .module_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
1741 .module_offs = CORE_MOD,
1742 .idlest_reg_id = 1,
1743 .idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT,
1744 .idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT
1745 },
1746 },
1747 .masters = omap3xxx_usbhsotg_masters,
1748 .masters_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_masters),
1749 .slaves = omap3xxx_usbhsotg_slaves,
1750 .slaves_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_slaves),
1751 .class = &usbotg_class,
1752
1753 /*
1754 * Erratum ID: i479 idle_req / idle_ack mechanism potentially
1755 * broken when autoidle is enabled
1756 * workaround is to disable the autoidle bit at module level.
1757 */
1758 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
1759 | HWMOD_SWSUP_MSTANDBY,
1760 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
1761};
1762
1763/* usb_otg_hs */
1764static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
1765
1766 { .name = "mc", .irq = 71 },
1767};
1768
1769static struct omap_hwmod_class am35xx_usbotg_class = {
1770 .name = "am35xx_usbotg",
1771 .sysc = NULL,
1772};
1773
1774static struct omap_hwmod am35xx_usbhsotg_hwmod = {
1775 .name = "am35x_otg_hs",
1776 .mpu_irqs = am35xx_usbhsotg_mpu_irqs,
1777 .mpu_irqs_cnt = ARRAY_SIZE(am35xx_usbhsotg_mpu_irqs),
1778 .main_clk = NULL,
1779 .prcm = {
1780 .omap2 = {
1781 },
1782 },
1783 .masters = am35xx_usbhsotg_masters,
1784 .masters_cnt = ARRAY_SIZE(am35xx_usbhsotg_masters),
1785 .slaves = am35xx_usbhsotg_slaves,
1786 .slaves_cnt = ARRAY_SIZE(am35xx_usbhsotg_slaves),
1787 .class = &am35xx_usbotg_class,
1788 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES3_1)
1789};
1790
1359static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { 1791static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
1360 &omap3xxx_l3_main_hwmod, 1792 &omap3xxx_l3_main_hwmod,
1361 &omap3xxx_l4_core_hwmod, 1793 &omap3xxx_l4_core_hwmod,
@@ -1387,6 +1819,19 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
1387 1819
1388 /* dma_system class*/ 1820 /* dma_system class*/
1389 &omap3xxx_dma_system_hwmod, 1821 &omap3xxx_dma_system_hwmod,
1822
1823 /* mcspi class */
1824 &omap34xx_mcspi1,
1825 &omap34xx_mcspi2,
1826 &omap34xx_mcspi3,
1827 &omap34xx_mcspi4,
1828
1829 /* usbotg class */
1830 &omap3xxx_usbhsotg_hwmod,
1831
1832 /* usbotg for am35x */
1833 &am35xx_usbhsotg_hwmod,
1834
1390 NULL, 1835 NULL,
1391}; 1836};
1392 1837
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 84e795cf0648..79a860178913 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -24,6 +24,7 @@
24#include <plat/cpu.h> 24#include <plat/cpu.h>
25#include <plat/gpio.h> 25#include <plat/gpio.h>
26#include <plat/dma.h> 26#include <plat/dma.h>
27#include <plat/mcspi.h>
27 28
28#include "omap_hwmod_common_data.h" 29#include "omap_hwmod_common_data.h"
29 30
@@ -3114,6 +3115,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_mcspi_sysc = {
3114static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = { 3115static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = {
3115 .name = "mcspi", 3116 .name = "mcspi",
3116 .sysc = &omap44xx_mcspi_sysc, 3117 .sysc = &omap44xx_mcspi_sysc,
3118 .rev = OMAP4_MCSPI_REV,
3117}; 3119};
3118 3120
3119/* mcspi1 */ 3121/* mcspi1 */
@@ -3156,6 +3158,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi1_slaves[] = {
3156 &omap44xx_l4_per__mcspi1, 3158 &omap44xx_l4_per__mcspi1,
3157}; 3159};
3158 3160
3161/* mcspi1 dev_attr */
3162static struct omap2_mcspi_dev_attr mcspi1_dev_attr = {
3163 .num_chipselect = 4,
3164};
3165
3159static struct omap_hwmod omap44xx_mcspi1_hwmod = { 3166static struct omap_hwmod omap44xx_mcspi1_hwmod = {
3160 .name = "mcspi1", 3167 .name = "mcspi1",
3161 .class = &omap44xx_mcspi_hwmod_class, 3168 .class = &omap44xx_mcspi_hwmod_class,
@@ -3169,6 +3176,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = {
3169 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL, 3176 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
3170 }, 3177 },
3171 }, 3178 },
3179 .dev_attr = &mcspi1_dev_attr,
3172 .slaves = omap44xx_mcspi1_slaves, 3180 .slaves = omap44xx_mcspi1_slaves,
3173 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves), 3181 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves),
3174 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), 3182 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
@@ -3210,6 +3218,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi2_slaves[] = {
3210 &omap44xx_l4_per__mcspi2, 3218 &omap44xx_l4_per__mcspi2,
3211}; 3219};
3212 3220
3221/* mcspi2 dev_attr */
3222static struct omap2_mcspi_dev_attr mcspi2_dev_attr = {
3223 .num_chipselect = 2,
3224};
3225
3213static struct omap_hwmod omap44xx_mcspi2_hwmod = { 3226static struct omap_hwmod omap44xx_mcspi2_hwmod = {
3214 .name = "mcspi2", 3227 .name = "mcspi2",
3215 .class = &omap44xx_mcspi_hwmod_class, 3228 .class = &omap44xx_mcspi_hwmod_class,
@@ -3223,6 +3236,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = {
3223 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL, 3236 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
3224 }, 3237 },
3225 }, 3238 },
3239 .dev_attr = &mcspi2_dev_attr,
3226 .slaves = omap44xx_mcspi2_slaves, 3240 .slaves = omap44xx_mcspi2_slaves,
3227 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves), 3241 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves),
3228 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), 3242 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
@@ -3264,6 +3278,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi3_slaves[] = {
3264 &omap44xx_l4_per__mcspi3, 3278 &omap44xx_l4_per__mcspi3,
3265}; 3279};
3266 3280
3281/* mcspi3 dev_attr */
3282static struct omap2_mcspi_dev_attr mcspi3_dev_attr = {
3283 .num_chipselect = 2,
3284};
3285
3267static struct omap_hwmod omap44xx_mcspi3_hwmod = { 3286static struct omap_hwmod omap44xx_mcspi3_hwmod = {
3268 .name = "mcspi3", 3287 .name = "mcspi3",
3269 .class = &omap44xx_mcspi_hwmod_class, 3288 .class = &omap44xx_mcspi_hwmod_class,
@@ -3277,6 +3296,7 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = {
3277 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL, 3296 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
3278 }, 3297 },
3279 }, 3298 },
3299 .dev_attr = &mcspi3_dev_attr,
3280 .slaves = omap44xx_mcspi3_slaves, 3300 .slaves = omap44xx_mcspi3_slaves,
3281 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves), 3301 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves),
3282 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), 3302 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
@@ -3316,6 +3336,11 @@ static struct omap_hwmod_ocp_if *omap44xx_mcspi4_slaves[] = {
3316 &omap44xx_l4_per__mcspi4, 3336 &omap44xx_l4_per__mcspi4,
3317}; 3337};
3318 3338
3339/* mcspi4 dev_attr */
3340static struct omap2_mcspi_dev_attr mcspi4_dev_attr = {
3341 .num_chipselect = 1,
3342};
3343
3319static struct omap_hwmod omap44xx_mcspi4_hwmod = { 3344static struct omap_hwmod omap44xx_mcspi4_hwmod = {
3320 .name = "mcspi4", 3345 .name = "mcspi4",
3321 .class = &omap44xx_mcspi_hwmod_class, 3346 .class = &omap44xx_mcspi_hwmod_class,
@@ -3329,6 +3354,7 @@ static struct omap_hwmod omap44xx_mcspi4_hwmod = {
3329 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL, 3354 .clkctrl_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
3330 }, 3355 },
3331 }, 3356 },
3357 .dev_attr = &mcspi4_dev_attr,
3332 .slaves = omap44xx_mcspi4_slaves, 3358 .slaves = omap44xx_mcspi4_slaves,
3333 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves), 3359 .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves),
3334 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430), 3360 .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
@@ -3963,6 +3989,7 @@ static struct omap_hwmod_ocp_if *omap44xx_timer1_slaves[] = {
3963static struct omap_hwmod omap44xx_timer1_hwmod = { 3989static struct omap_hwmod omap44xx_timer1_hwmod = {
3964 .name = "timer1", 3990 .name = "timer1",
3965 .class = &omap44xx_timer_1ms_hwmod_class, 3991 .class = &omap44xx_timer_1ms_hwmod_class,
3992 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
3966 .mpu_irqs = omap44xx_timer1_irqs, 3993 .mpu_irqs = omap44xx_timer1_irqs,
3967 .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer1_irqs), 3994 .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer1_irqs),
3968 .main_clk = "timer1_fck", 3995 .main_clk = "timer1_fck",
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 745252c60e32..f172ec06c06a 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -29,6 +29,7 @@
29#include <linux/usb.h> 29#include <linux/usb.h>
30 30
31#include <plat/usb.h> 31#include <plat/usb.h>
32#include "control.h"
32 33
33/* OMAP control module register for UTMI PHY */ 34/* OMAP control module register for UTMI PHY */
34#define CONTROL_DEV_CONF 0x300 35#define CONTROL_DEV_CONF 0x300
@@ -147,3 +148,95 @@ int omap4430_phy_exit(struct device *dev)
147 148
148 return 0; 149 return 0;
149} 150}
151
152void am35x_musb_reset(void)
153{
154 u32 regval;
155
156 /* Reset the musb interface */
157 regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
158
159 regval |= AM35XX_USBOTGSS_SW_RST;
160 omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
161
162 regval &= ~AM35XX_USBOTGSS_SW_RST;
163 omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
164
165 regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
166}
167
168void am35x_musb_phy_power(u8 on)
169{
170 unsigned long timeout = jiffies + msecs_to_jiffies(100);
171 u32 devconf2;
172
173 if (on) {
174 /*
175 * Start the on-chip PHY and its PLL.
176 */
177 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
178
179 devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
180 devconf2 |= CONF2_PHY_PLLON;
181
182 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
183
184 pr_info(KERN_INFO "Waiting for PHY clock good...\n");
185 while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
186 & CONF2_PHYCLKGD)) {
187 cpu_relax();
188
189 if (time_after(jiffies, timeout)) {
190 pr_err(KERN_ERR "musb PHY clock good timed out\n");
191 break;
192 }
193 }
194 } else {
195 /*
196 * Power down the on-chip PHY.
197 */
198 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
199
200 devconf2 &= ~CONF2_PHY_PLLON;
201 devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
202 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
203 }
204}
205
206void am35x_musb_clear_irq(void)
207{
208 u32 regval;
209
210 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
211 regval |= AM35XX_USBOTGSS_INT_CLR;
212 omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
213 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
214}
215
216void am35x_musb_set_mode(u8 musb_mode)
217{
218 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
219
220 devconf2 &= ~CONF2_OTGMODE;
221 switch (musb_mode) {
222#ifdef CONFIG_USB_MUSB_HDRC_HCD
223 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
224 devconf2 |= CONF2_FORCE_HOST;
225 break;
226#endif
227#ifdef CONFIG_USB_GADGET_MUSB_HDRC
228 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
229 devconf2 |= CONF2_FORCE_DEVICE;
230 break;
231#endif
232#ifdef CONFIG_USB_MUSB_OTG
233 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
234 devconf2 |= CONF2_NO_OVERRIDE;
235 break;
236#endif
237 default:
238 pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
239 }
240
241 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
242}
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 5298949d4b11..a9d4d143086d 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -30,118 +30,11 @@
30#include <mach/irqs.h> 30#include <mach/irqs.h>
31#include <mach/am35xx.h> 31#include <mach/am35xx.h>
32#include <plat/usb.h> 32#include <plat/usb.h>
33#include "control.h" 33#include <plat/omap_device.h>
34#include "mux.h"
34 35
35#if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X) 36#if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X)
36 37
37static void am35x_musb_reset(void)
38{
39 u32 regval;
40
41 /* Reset the musb interface */
42 regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
43
44 regval |= AM35XX_USBOTGSS_SW_RST;
45 omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
46
47 regval &= ~AM35XX_USBOTGSS_SW_RST;
48 omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
49
50 regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
51}
52
53static void am35x_musb_phy_power(u8 on)
54{
55 unsigned long timeout = jiffies + msecs_to_jiffies(100);
56 u32 devconf2;
57
58 if (on) {
59 /*
60 * Start the on-chip PHY and its PLL.
61 */
62 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
63
64 devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
65 devconf2 |= CONF2_PHY_PLLON;
66
67 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
68
69 pr_info(KERN_INFO "Waiting for PHY clock good...\n");
70 while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
71 & CONF2_PHYCLKGD)) {
72 cpu_relax();
73
74 if (time_after(jiffies, timeout)) {
75 pr_err(KERN_ERR "musb PHY clock good timed out\n");
76 break;
77 }
78 }
79 } else {
80 /*
81 * Power down the on-chip PHY.
82 */
83 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
84
85 devconf2 &= ~CONF2_PHY_PLLON;
86 devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN;
87 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
88 }
89}
90
91static void am35x_musb_clear_irq(void)
92{
93 u32 regval;
94
95 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
96 regval |= AM35XX_USBOTGSS_INT_CLR;
97 omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
98 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
99}
100
101static void am35x_musb_set_mode(u8 musb_mode)
102{
103 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
104
105 devconf2 &= ~CONF2_OTGMODE;
106 switch (musb_mode) {
107#ifdef CONFIG_USB_MUSB_HDRC_HCD
108 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
109 devconf2 |= CONF2_FORCE_HOST;
110 break;
111#endif
112#ifdef CONFIG_USB_GADGET_MUSB_HDRC
113 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
114 devconf2 |= CONF2_FORCE_DEVICE;
115 break;
116#endif
117#ifdef CONFIG_USB_MUSB_OTG
118 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
119 devconf2 |= CONF2_NO_OVERRIDE;
120 break;
121#endif
122 default:
123 pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
124 }
125
126 omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
127}
128
129static struct resource musb_resources[] = {
130 [0] = { /* start and end set dynamically */
131 .flags = IORESOURCE_MEM,
132 },
133 [1] = { /* general IRQ */
134 .start = INT_243X_HS_USB_MC,
135 .flags = IORESOURCE_IRQ,
136 .name = "mc",
137 },
138 [2] = { /* DMA IRQ */
139 .start = INT_243X_HS_USB_DMA,
140 .flags = IORESOURCE_IRQ,
141 .name = "dma",
142 },
143};
144
145static struct musb_hdrc_config musb_config = { 38static struct musb_hdrc_config musb_config = {
146 .multipoint = 1, 39 .multipoint = 1,
147 .dyn_fifo = 1, 40 .dyn_fifo = 1,
@@ -169,38 +62,65 @@ static struct musb_hdrc_platform_data musb_plat = {
169 62
170static u64 musb_dmamask = DMA_BIT_MASK(32); 63static u64 musb_dmamask = DMA_BIT_MASK(32);
171 64
172static struct platform_device musb_device = { 65static struct omap_device_pm_latency omap_musb_latency[] = {
173 .name = "musb-omap2430", 66 {
174 .id = -1, 67 .deactivate_func = omap_device_idle_hwmods,
175 .dev = { 68 .activate_func = omap_device_enable_hwmods,
176 .dma_mask = &musb_dmamask, 69 .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
177 .coherent_dma_mask = DMA_BIT_MASK(32),
178 .platform_data = &musb_plat,
179 }, 70 },
180 .num_resources = ARRAY_SIZE(musb_resources),
181 .resource = musb_resources,
182}; 71};
183 72
73static void usb_musb_mux_init(struct omap_musb_board_data *board_data)
74{
75 switch (board_data->interface_type) {
76 case MUSB_INTERFACE_UTMI:
77 omap_mux_init_signal("usba0_otg_dp", OMAP_PIN_INPUT);
78 omap_mux_init_signal("usba0_otg_dm", OMAP_PIN_INPUT);
79 break;
80 case MUSB_INTERFACE_ULPI:
81 omap_mux_init_signal("usba0_ulpiphy_clk",
82 OMAP_PIN_INPUT_PULLDOWN);
83 omap_mux_init_signal("usba0_ulpiphy_stp",
84 OMAP_PIN_INPUT_PULLDOWN);
85 omap_mux_init_signal("usba0_ulpiphy_dir",
86 OMAP_PIN_INPUT_PULLDOWN);
87 omap_mux_init_signal("usba0_ulpiphy_nxt",
88 OMAP_PIN_INPUT_PULLDOWN);
89 omap_mux_init_signal("usba0_ulpiphy_dat0",
90 OMAP_PIN_INPUT_PULLDOWN);
91 omap_mux_init_signal("usba0_ulpiphy_dat1",
92 OMAP_PIN_INPUT_PULLDOWN);
93 omap_mux_init_signal("usba0_ulpiphy_dat2",
94 OMAP_PIN_INPUT_PULLDOWN);
95 omap_mux_init_signal("usba0_ulpiphy_dat3",
96 OMAP_PIN_INPUT_PULLDOWN);
97 omap_mux_init_signal("usba0_ulpiphy_dat4",
98 OMAP_PIN_INPUT_PULLDOWN);
99 omap_mux_init_signal("usba0_ulpiphy_dat5",
100 OMAP_PIN_INPUT_PULLDOWN);
101 omap_mux_init_signal("usba0_ulpiphy_dat6",
102 OMAP_PIN_INPUT_PULLDOWN);
103 omap_mux_init_signal("usba0_ulpiphy_dat7",
104 OMAP_PIN_INPUT_PULLDOWN);
105 break;
106 default:
107 break;
108 }
109}
110
184void __init usb_musb_init(struct omap_musb_board_data *board_data) 111void __init usb_musb_init(struct omap_musb_board_data *board_data)
185{ 112{
186 if (cpu_is_omap243x()) { 113 struct omap_hwmod *oh;
187 musb_resources[0].start = OMAP243X_HS_BASE; 114 struct omap_device *od;
188 } else if (cpu_is_omap3517() || cpu_is_omap3505()) { 115 struct platform_device *pdev;
189 musb_device.name = "musb-am35x"; 116 struct device *dev;
190 musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE; 117 int bus_id = -1;
191 musb_resources[1].start = INT_35XX_USBOTG_IRQ; 118 const char *oh_name, *name;
192 board_data->set_phy_power = am35x_musb_phy_power; 119
193 board_data->clear_irq = am35x_musb_clear_irq; 120 if (cpu_is_omap3517() || cpu_is_omap3505()) {
194 board_data->set_mode = am35x_musb_set_mode;
195 board_data->reset = am35x_musb_reset;
196 } else if (cpu_is_omap34xx()) {
197 musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE;
198 } else if (cpu_is_omap44xx()) { 121 } else if (cpu_is_omap44xx()) {
199 musb_resources[0].start = OMAP44XX_HSUSB_OTG_BASE; 122 usb_musb_mux_init(board_data);
200 musb_resources[1].start = OMAP44XX_IRQ_HS_USB_MC_N;
201 musb_resources[2].start = OMAP44XX_IRQ_HS_USB_DMA_N;
202 } 123 }
203 musb_resources[0].end = musb_resources[0].start + SZ_4K - 1;
204 124
205 /* 125 /*
206 * REVISIT: This line can be removed once all the platforms using 126 * REVISIT: This line can be removed once all the platforms using
@@ -212,8 +132,35 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
212 musb_plat.mode = board_data->mode; 132 musb_plat.mode = board_data->mode;
213 musb_plat.extvbus = board_data->extvbus; 133 musb_plat.extvbus = board_data->extvbus;
214 134
215 if (platform_device_register(&musb_device) < 0) 135 if (cpu_is_omap3517() || cpu_is_omap3505()) {
216 printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); 136 oh_name = "am35x_otg_hs";
137 name = "musb-am35x";
138 } else {
139 oh_name = "usb_otg_hs";
140 name = "musb-omap2430";
141 }
142
143 oh = omap_hwmod_lookup(oh_name);
144 if (!oh) {
145 pr_err("Could not look up %s\n", oh_name);
146 return;
147 }
148
149 od = omap_device_build(name, bus_id, oh, &musb_plat,
150 sizeof(musb_plat), omap_musb_latency,
151 ARRAY_SIZE(omap_musb_latency), false);
152 if (IS_ERR(od)) {
153 pr_err("Could not build omap_device for %s %s\n",
154 name, oh_name);
155 return;
156 }
157
158 pdev = &od->pdev;
159 dev = &pdev->dev;
160 get_device(dev);
161 dev->dma_mask = &musb_dmamask;
162 dev->coherent_dma_mask = musb_dmamask;
163 put_device(dev);
217} 164}
218 165
219#else 166#else
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 85ded598853e..12b316165037 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -41,6 +41,8 @@
41#define GPMC_NAND_ADDRESS 0x0000000b 41#define GPMC_NAND_ADDRESS 0x0000000b
42#define GPMC_NAND_DATA 0x0000000c 42#define GPMC_NAND_DATA 0x0000000c
43 43
44#define GPMC_ENABLE_IRQ 0x0000000d
45
44/* ECC commands */ 46/* ECC commands */
45#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ 47#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
46#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ 48#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
@@ -78,6 +80,19 @@
78#define WR_RD_PIN_MONITORING 0x00600000 80#define WR_RD_PIN_MONITORING 0x00600000
79#define GPMC_PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) 81#define GPMC_PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
80#define GPMC_PREFETCH_STATUS_COUNT(val) (val & 0x00003fff) 82#define GPMC_PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
83#define GPMC_IRQ_FIFOEVENTENABLE 0x01
84#define GPMC_IRQ_COUNT_EVENT 0x02
85
86#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
87#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
88
89enum omap_ecc {
90 /* 1-bit ecc: stored at end of spare area */
91 OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */
92 OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
93 /* 1-bit ecc: stored at begining of spare area as romcode */
94 OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
95};
81 96
82/* 97/*
83 * Note that all values in this struct are in nanoseconds except sync_clk 98 * Note that all values in this struct are in nanoseconds except sync_clk
@@ -130,12 +145,11 @@ extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
130extern void gpmc_cs_free(int cs); 145extern void gpmc_cs_free(int cs);
131extern int gpmc_cs_set_reserved(int cs, int reserved); 146extern int gpmc_cs_set_reserved(int cs, int reserved);
132extern int gpmc_cs_reserved(int cs); 147extern int gpmc_cs_reserved(int cs);
133extern int gpmc_prefetch_enable(int cs, int dma_mode, 148extern int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
134 unsigned int u32_count, int is_write); 149 unsigned int u32_count, int is_write);
135extern int gpmc_prefetch_reset(int cs); 150extern int gpmc_prefetch_reset(int cs);
136extern void omap3_gpmc_save_context(void); 151extern void omap3_gpmc_save_context(void);
137extern void omap3_gpmc_restore_context(void); 152extern void omap3_gpmc_restore_context(void);
138extern void gpmc_init(void);
139extern int gpmc_read_status(int cmd); 153extern int gpmc_read_status(int cmd);
140extern int gpmc_cs_configure(int cs, int cmd, int wval); 154extern int gpmc_cs_configure(int cs, int cmd, int wval);
141extern int gpmc_nand_read(int cs, int cmd); 155extern int gpmc_nand_read(int cs, int cmd);
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 69230d685538..19cbb5e9ece2 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -154,6 +154,8 @@ extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end);
154extern void flush_iotlb_all(struct iommu *obj); 154extern void flush_iotlb_all(struct iommu *obj);
155 155
156extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); 156extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e);
157extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd,
158 u32 **ppte);
157extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); 159extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
158 160
159extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); 161extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 2910de921c52..1b911681e911 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -318,6 +318,7 @@
318#define INT_34XX_PRCM_MPU_IRQ 11 318#define INT_34XX_PRCM_MPU_IRQ 11
319#define INT_34XX_MCBSP1_IRQ 16 319#define INT_34XX_MCBSP1_IRQ 16
320#define INT_34XX_MCBSP2_IRQ 17 320#define INT_34XX_MCBSP2_IRQ 17
321#define INT_34XX_GPMC_IRQ 20
321#define INT_34XX_MCBSP3_IRQ 22 322#define INT_34XX_MCBSP3_IRQ 22
322#define INT_34XX_MCBSP4_IRQ 23 323#define INT_34XX_MCBSP4_IRQ 23
323#define INT_34XX_CAM_IRQ 24 324#define INT_34XX_CAM_IRQ 24
@@ -411,7 +412,13 @@
411#define TWL_IRQ_END TWL6030_IRQ_END 412#define TWL_IRQ_END TWL6030_IRQ_END
412#endif 413#endif
413 414
414#define NR_IRQS TWL_IRQ_END 415/* GPMC related */
416#define OMAP_GPMC_IRQ_BASE (TWL_IRQ_END)
417#define OMAP_GPMC_NR_IRQS 7
418#define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS)
419
420
421#define NR_IRQS OMAP_GPMC_IRQ_END
415 422
416#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32)) 423#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
417 424
diff --git a/arch/arm/plat-omap/include/plat/mcspi.h b/arch/arm/plat-omap/include/plat/mcspi.h
index 1254e4945b6f..3d51b18131cc 100644
--- a/arch/arm/plat-omap/include/plat/mcspi.h
+++ b/arch/arm/plat-omap/include/plat/mcspi.h
@@ -1,8 +1,19 @@
1#ifndef _OMAP2_MCSPI_H 1#ifndef _OMAP2_MCSPI_H
2#define _OMAP2_MCSPI_H 2#define _OMAP2_MCSPI_H
3 3
4#define OMAP2_MCSPI_REV 0
5#define OMAP3_MCSPI_REV 1
6#define OMAP4_MCSPI_REV 2
7
8#define OMAP4_MCSPI_REG_OFFSET 0x100
9
4struct omap2_mcspi_platform_config { 10struct omap2_mcspi_platform_config {
5 unsigned short num_cs; 11 unsigned short num_cs;
12 unsigned int regs_offset;
13};
14
15struct omap2_mcspi_dev_attr {
16 unsigned short num_chipselect;
6}; 17};
7 18
8struct omap2_mcspi_device_config { 19struct omap2_mcspi_device_config {
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h
index 6562cd082bb1..d86d1ecf0068 100644
--- a/arch/arm/plat-omap/include/plat/nand.h
+++ b/arch/arm/plat-omap/include/plat/nand.h
@@ -8,8 +8,16 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <plat/gpmc.h>
11#include <linux/mtd/partitions.h> 12#include <linux/mtd/partitions.h>
12 13
14enum nand_io {
15 NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */
16 NAND_OMAP_POLLED, /* polled mode, without prefetch */
17 NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */
18 NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */
19};
20
13struct omap_nand_platform_data { 21struct omap_nand_platform_data {
14 unsigned int options; 22 unsigned int options;
15 int cs; 23 int cs;
@@ -20,8 +28,11 @@ struct omap_nand_platform_data {
20 int (*nand_setup)(void); 28 int (*nand_setup)(void);
21 int (*dev_ready)(struct omap_nand_platform_data *); 29 int (*dev_ready)(struct omap_nand_platform_data *);
22 int dma_channel; 30 int dma_channel;
31 int gpmc_irq;
32 enum nand_io xfer_type;
23 unsigned long phys_base; 33 unsigned long phys_base;
24 int devsize; 34 int devsize;
35 enum omap_ecc ecc_opt;
25}; 36};
26 37
27/* minimum size for IO mapping */ 38/* minimum size for IO mapping */
diff --git a/arch/arm/plat-omap/include/plat/onenand.h b/arch/arm/plat-omap/include/plat/onenand.h
index affe87e9ece7..cbe897ca7f9e 100644
--- a/arch/arm/plat-omap/include/plat/onenand.h
+++ b/arch/arm/plat-omap/include/plat/onenand.h
@@ -15,12 +15,20 @@
15#define ONENAND_SYNC_READ (1 << 0) 15#define ONENAND_SYNC_READ (1 << 0)
16#define ONENAND_SYNC_READWRITE (1 << 1) 16#define ONENAND_SYNC_READWRITE (1 << 1)
17 17
18struct onenand_freq_info {
19 u16 maf_id;
20 u16 dev_id;
21 u16 ver_id;
22};
23
18struct omap_onenand_platform_data { 24struct omap_onenand_platform_data {
19 int cs; 25 int cs;
20 int gpio_irq; 26 int gpio_irq;
21 struct mtd_partition *parts; 27 struct mtd_partition *parts;
22 int nr_parts; 28 int nr_parts;
23 int (*onenand_setup)(void __iomem *, int freq); 29 int (*onenand_setup)(void __iomem *, int *freq_ptr);
30 int (*get_freq)(const struct onenand_freq_info *freq_info,
31 bool *clk_dep);
24 int dma_channel; 32 int dma_channel;
25 u8 flags; 33 u8 flags;
26 u8 regulator_can_sleep; 34 u8 regulator_can_sleep;
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 450a332f1009..077192759afc 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -91,6 +91,10 @@ extern int omap4430_phy_exit(struct device *dev);
91 91
92#endif 92#endif
93 93
94extern void am35x_musb_reset(void);
95extern void am35x_musb_phy_power(u8 on);
96extern void am35x_musb_clear_irq(void);
97extern void am35x_musb_set_mode(u8 musb_mode);
94 98
95/* 99/*
96 * FIXME correct answer depends on hmc_mode, 100 * FIXME correct answer depends on hmc_mode,
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9bfb71ff3a6a..177c7d156933 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig"
117source "drivers/platform/Kconfig" 117source "drivers/platform/Kconfig"
118 118
119source "drivers/clk/Kconfig" 119source "drivers/clk/Kconfig"
120
121source "drivers/hwspinlock/Kconfig"
120endmenu 122endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b423bb16c3a8..3f135b6fb014 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -117,3 +117,5 @@ obj-y += platform/
117obj-y += ieee802154/ 117obj-y += ieee802154/
118#common clk code 118#common clk code
119obj-y += clk/ 119obj-y += clk/
120
121obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
new file mode 100644
index 000000000000..eb4af28f8567
--- /dev/null
+++ b/drivers/hwspinlock/Kconfig
@@ -0,0 +1,22 @@
1#
2# Generic HWSPINLOCK framework
3#
4
5config HWSPINLOCK
6 tristate "Generic Hardware Spinlock framework"
7 help
8 Say y here to support the generic hardware spinlock framework.
9 You only need to enable this if you have hardware spinlock module
10 on your system (usually only relevant if your system has remote slave
11 coprocessors).
12
13 If unsure, say N.
14
15config HWSPINLOCK_OMAP
16 tristate "OMAP Hardware Spinlock device"
17 depends on HWSPINLOCK && ARCH_OMAP4
18 help
19 Say y here to support the OMAP Hardware Spinlock device (firstly
20 introduced in OMAP4).
21
22 If unsure, say N.
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
new file mode 100644
index 000000000000..5729a3f7ed3d
--- /dev/null
+++ b/drivers/hwspinlock/Makefile
@@ -0,0 +1,6 @@
1#
2# Generic Hardware Spinlock framework
3#
4
5obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
6obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
new file mode 100644
index 000000000000..43a62714b4fb
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -0,0 +1,548 @@
1/*
2 * Hardware spinlock framework
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/jiffies.h>
26#include <linux/radix-tree.h>
27#include <linux/hwspinlock.h>
28#include <linux/pm_runtime.h>
29
30#include "hwspinlock_internal.h"
31
32/* radix tree tags */
33#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
34
35/*
36 * A radix tree is used to maintain the available hwspinlock instances.
37 * The tree associates hwspinlock pointers with their integer key id,
38 * and provides easy-to-use API which makes the hwspinlock core code simple
39 * and easy to read.
40 *
41 * Radix trees are quick on lookups, and reasonably efficient in terms of
42 * storage, especially with high density usages such as this framework
43 * requires (a continuous range of integer keys, beginning with zero, is
44 * used as the ID's of the hwspinlock instances).
45 *
46 * The radix tree API supports tagging items in the tree, which this
47 * framework uses to mark unused hwspinlock instances (see the
48 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
49 * tree, looking for an unused hwspinlock instance, is now reduced to a
50 * single radix tree API call.
51 */
52static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
53
54/*
55 * Synchronization of access to the tree is achieved using this spinlock,
56 * as the radix-tree API requires that users provide all synchronisation.
57 */
58static DEFINE_SPINLOCK(hwspinlock_tree_lock);
59
60/**
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock
62 * @hwlock: an hwspinlock which we want to trylock
63 * @mode: controls whether local interrupts are disabled or not
64 * @flags: a pointer where the caller's interrupt state will be saved at (if
65 * requested)
66 *
67 * This function attempts to lock an hwspinlock, and will immediately
68 * fail if the hwspinlock is already taken.
69 *
70 * Upon a successful return from this function, preemption (and possibly
71 * interrupts) is disabled, so the caller must not sleep, and is advised to
72 * release the hwspinlock as soon as possible. This is required in order to
73 * minimize remote cores polling on the hardware interconnect.
74 *
75 * The user decides whether local interrupts are disabled or not, and if yes,
76 * whether he wants their previous state to be saved. It is up to the user
77 * to choose the appropriate @mode of operation, exactly the same way users
78 * should decide between spin_trylock, spin_trylock_irq and
79 * spin_trylock_irqsave.
80 *
81 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
82 * the hwspinlock was already taken.
83 * This function will never sleep.
84 */
85int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
86{
87 int ret;
88
89 BUG_ON(!hwlock);
90 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
91
92 /*
93 * This spin_lock{_irq, _irqsave} serves three purposes:
94 *
95 * 1. Disable preemption, in order to minimize the period of time
96 * in which the hwspinlock is taken. This is important in order
97 * to minimize the possible polling on the hardware interconnect
98 * by a remote user of this lock.
99 * 2. Make the hwspinlock SMP-safe (so we can take it from
100 * additional contexts on the local host).
101 * 3. Ensure that in_atomic/might_sleep checks catch potential
102 * problems with hwspinlock usage (e.g. scheduler checks like
103 * 'scheduling while atomic' etc.)
104 */
105 if (mode == HWLOCK_IRQSTATE)
106 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
107 else if (mode == HWLOCK_IRQ)
108 ret = spin_trylock_irq(&hwlock->lock);
109 else
110 ret = spin_trylock(&hwlock->lock);
111
112 /* is lock already taken by another context on the local cpu ? */
113 if (!ret)
114 return -EBUSY;
115
116 /* try to take the hwspinlock device */
117 ret = hwlock->ops->trylock(hwlock);
118
119 /* if hwlock is already taken, undo spin_trylock_* and exit */
120 if (!ret) {
121 if (mode == HWLOCK_IRQSTATE)
122 spin_unlock_irqrestore(&hwlock->lock, *flags);
123 else if (mode == HWLOCK_IRQ)
124 spin_unlock_irq(&hwlock->lock);
125 else
126 spin_unlock(&hwlock->lock);
127
128 return -EBUSY;
129 }
130
131 /*
132 * We can be sure the other core's memory operations
133 * are observable to us only _after_ we successfully take
134 * the hwspinlock, and we must make sure that subsequent memory
135 * operations (both reads and writes) will not be reordered before
136 * we actually took the hwspinlock.
137 *
138 * Note: the implicit memory barrier of the spinlock above is too
139 * early, so we need this additional explicit memory barrier.
140 */
141 mb();
142
143 return 0;
144}
145EXPORT_SYMBOL_GPL(__hwspin_trylock);
146
147/**
148 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
149 * @hwlock: the hwspinlock to be locked
150 * @timeout: timeout value in msecs
151 * @mode: mode which controls whether local interrupts are disabled or not
152 * @flags: a pointer to where the caller's interrupt state will be saved at (if
153 * requested)
154 *
155 * This function locks the given @hwlock. If the @hwlock
156 * is already taken, the function will busy loop waiting for it to
157 * be released, but give up after @timeout msecs have elapsed.
158 *
159 * Upon a successful return from this function, preemption is disabled
160 * (and possibly local interrupts, too), so the caller must not sleep,
161 * and is advised to release the hwspinlock as soon as possible.
162 * This is required in order to minimize remote cores polling on the
163 * hardware interconnect.
164 *
165 * The user decides whether local interrupts are disabled or not, and if yes,
166 * whether he wants their previous state to be saved. It is up to the user
167 * to choose the appropriate @mode of operation, exactly the same way users
168 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
169 *
170 * Returns 0 when the @hwlock was successfully taken, and an appropriate
171 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
172 * busy after @timeout msecs). The function will never sleep.
173 */
174int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
175 int mode, unsigned long *flags)
176{
177 int ret;
178 unsigned long expire;
179
180 expire = msecs_to_jiffies(to) + jiffies;
181
182 for (;;) {
183 /* Try to take the hwspinlock */
184 ret = __hwspin_trylock(hwlock, mode, flags);
185 if (ret != -EBUSY)
186 break;
187
188 /*
189 * The lock is already taken, let's check if the user wants
190 * us to try again
191 */
192 if (time_is_before_eq_jiffies(expire))
193 return -ETIMEDOUT;
194
195 /*
196 * Allow platform-specific relax handlers to prevent
197 * hogging the interconnect (no sleeping, though)
198 */
199 if (hwlock->ops->relax)
200 hwlock->ops->relax(hwlock);
201 }
202
203 return ret;
204}
205EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
206
207/**
208 * __hwspin_unlock() - unlock a specific hwspinlock
209 * @hwlock: a previously-acquired hwspinlock which we want to unlock
210 * @mode: controls whether local interrupts needs to be restored or not
211 * @flags: previous caller's interrupt state to restore (if requested)
212 *
213 * This function will unlock a specific hwspinlock, enable preemption and
214 * (possibly) enable interrupts or restore their previous state.
215 * @hwlock must be already locked before calling this function: it is a bug
216 * to call unlock on a @hwlock that is already unlocked.
217 *
218 * The user decides whether local interrupts should be enabled or not, and
219 * if yes, whether he wants their previous state to be restored. It is up
220 * to the user to choose the appropriate @mode of operation, exactly the
221 * same way users decide between spin_unlock, spin_unlock_irq and
222 * spin_unlock_irqrestore.
223 *
224 * The function will never sleep.
225 */
226void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
227{
228 BUG_ON(!hwlock);
229 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
230
231 /*
232 * We must make sure that memory operations (both reads and writes),
233 * done before unlocking the hwspinlock, will not be reordered
234 * after the lock is released.
235 *
236 * That's the purpose of this explicit memory barrier.
237 *
238 * Note: the memory barrier induced by the spin_unlock below is too
239 * late; the other core is going to access memory soon after it will
240 * take the hwspinlock, and by then we want to be sure our memory
241 * operations are already observable.
242 */
243 mb();
244
245 hwlock->ops->unlock(hwlock);
246
247 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
248 if (mode == HWLOCK_IRQSTATE)
249 spin_unlock_irqrestore(&hwlock->lock, *flags);
250 else if (mode == HWLOCK_IRQ)
251 spin_unlock_irq(&hwlock->lock);
252 else
253 spin_unlock(&hwlock->lock);
254}
255EXPORT_SYMBOL_GPL(__hwspin_unlock);
256
257/**
258 * hwspin_lock_register() - register a new hw spinlock
259 * @hwlock: hwspinlock to register.
260 *
261 * This function should be called from the underlying platform-specific
262 * implementation, to register a new hwspinlock instance.
263 *
264 * Can be called from an atomic context (will not sleep) but not from
265 * within interrupt context.
266 *
267 * Returns 0 on success, or an appropriate error code on failure
268 */
269int hwspin_lock_register(struct hwspinlock *hwlock)
270{
271 struct hwspinlock *tmp;
272 int ret;
273
274 if (!hwlock || !hwlock->ops ||
275 !hwlock->ops->trylock || !hwlock->ops->unlock) {
276 pr_err("invalid parameters\n");
277 return -EINVAL;
278 }
279
280 spin_lock_init(&hwlock->lock);
281
282 spin_lock(&hwspinlock_tree_lock);
283
284 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
285 if (ret)
286 goto out;
287
288 /* mark this hwspinlock as available */
289 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
290 HWSPINLOCK_UNUSED);
291
292 /* self-sanity check which should never fail */
293 WARN_ON(tmp != hwlock);
294
295out:
296 spin_unlock(&hwspinlock_tree_lock);
297 return ret;
298}
299EXPORT_SYMBOL_GPL(hwspin_lock_register);
300
301/**
302 * hwspin_lock_unregister() - unregister an hw spinlock
303 * @id: index of the specific hwspinlock to unregister
304 *
305 * This function should be called from the underlying platform-specific
306 * implementation, to unregister an existing (and unused) hwspinlock.
307 *
308 * Can be called from an atomic context (will not sleep) but not from
309 * within interrupt context.
310 *
311 * Returns the address of hwspinlock @id on success, or NULL on failure
312 */
313struct hwspinlock *hwspin_lock_unregister(unsigned int id)
314{
315 struct hwspinlock *hwlock = NULL;
316 int ret;
317
318 spin_lock(&hwspinlock_tree_lock);
319
320 /* make sure the hwspinlock is not in use (tag is set) */
321 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
322 if (ret == 0) {
323 pr_err("hwspinlock %d still in use (or not present)\n", id);
324 goto out;
325 }
326
327 hwlock = radix_tree_delete(&hwspinlock_tree, id);
328 if (!hwlock) {
329 pr_err("failed to delete hwspinlock %d\n", id);
330 goto out;
331 }
332
333out:
334 spin_unlock(&hwspinlock_tree_lock);
335 return hwlock;
336}
337EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
338
339/**
340 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
341 *
342 * This is an internal function that prepares an hwspinlock instance
343 * before it is given to the user. The function assumes that
344 * hwspinlock_tree_lock is taken.
345 *
346 * Returns 0 or positive to indicate success, and a negative value to
347 * indicate an error (with the appropriate error code)
348 */
349static int __hwspin_lock_request(struct hwspinlock *hwlock)
350{
351 struct hwspinlock *tmp;
352 int ret;
353
354 /* prevent underlying implementation from being removed */
355 if (!try_module_get(hwlock->owner)) {
356 dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
357 return -EINVAL;
358 }
359
360 /* notify PM core that power is now needed */
361 ret = pm_runtime_get_sync(hwlock->dev);
362 if (ret < 0) {
363 dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
364 return ret;
365 }
366
367 /* mark hwspinlock as used, should not fail */
368 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
369 HWSPINLOCK_UNUSED);
370
371 /* self-sanity check that should never fail */
372 WARN_ON(tmp != hwlock);
373
374 return ret;
375}
376
377/**
378 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
379 * @hwlock: a valid hwspinlock instance
380 *
381 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
382 */
383int hwspin_lock_get_id(struct hwspinlock *hwlock)
384{
385 if (!hwlock) {
386 pr_err("invalid hwlock\n");
387 return -EINVAL;
388 }
389
390 return hwlock->id;
391}
392EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
393
394/**
395 * hwspin_lock_request() - request an hwspinlock
396 *
397 * This function should be called by users of the hwspinlock device,
398 * in order to dynamically assign them an unused hwspinlock.
399 * Usually the user of this lock will then have to communicate the lock's id
400 * to the remote core before it can be used for synchronization (to get the
401 * id of a given hwlock, use hwspin_lock_get_id()).
402 *
403 * Can be called from an atomic context (will not sleep) but not from
404 * within interrupt context (simply because there is no use case for
405 * that yet).
406 *
407 * Returns the address of the assigned hwspinlock, or NULL on error
408 */
409struct hwspinlock *hwspin_lock_request(void)
410{
411 struct hwspinlock *hwlock;
412 int ret;
413
414 spin_lock(&hwspinlock_tree_lock);
415
416 /* look for an unused lock */
417 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
418 0, 1, HWSPINLOCK_UNUSED);
419 if (ret == 0) {
420 pr_warn("a free hwspinlock is not available\n");
421 hwlock = NULL;
422 goto out;
423 }
424
425 /* sanity check that should never fail */
426 WARN_ON(ret > 1);
427
428 /* mark as used and power up */
429 ret = __hwspin_lock_request(hwlock);
430 if (ret < 0)
431 hwlock = NULL;
432
433out:
434 spin_unlock(&hwspinlock_tree_lock);
435 return hwlock;
436}
437EXPORT_SYMBOL_GPL(hwspin_lock_request);
438
439/**
440 * hwspin_lock_request_specific() - request for a specific hwspinlock
441 * @id: index of the specific hwspinlock that is requested
442 *
443 * This function should be called by users of the hwspinlock module,
444 * in order to assign them a specific hwspinlock.
445 * Usually early board code will be calling this function in order to
446 * reserve specific hwspinlock ids for predefined purposes.
447 *
448 * Can be called from an atomic context (will not sleep) but not from
449 * within interrupt context (simply because there is no use case for
450 * that yet).
451 *
452 * Returns the address of the assigned hwspinlock, or NULL on error
453 */
454struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
455{
456 struct hwspinlock *hwlock;
457 int ret;
458
459 spin_lock(&hwspinlock_tree_lock);
460
461 /* make sure this hwspinlock exists */
462 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
463 if (!hwlock) {
464 pr_warn("hwspinlock %u does not exist\n", id);
465 goto out;
466 }
467
468 /* sanity check (this shouldn't happen) */
469 WARN_ON(hwlock->id != id);
470
471 /* make sure this hwspinlock is unused */
472 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
473 if (ret == 0) {
474 pr_warn("hwspinlock %u is already in use\n", id);
475 hwlock = NULL;
476 goto out;
477 }
478
479 /* mark as used and power up */
480 ret = __hwspin_lock_request(hwlock);
481 if (ret < 0)
482 hwlock = NULL;
483
484out:
485 spin_unlock(&hwspinlock_tree_lock);
486 return hwlock;
487}
488EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
489
490/**
491 * hwspin_lock_free() - free a specific hwspinlock
492 * @hwlock: the specific hwspinlock to free
493 *
494 * This function mark @hwlock as free again.
495 * Should only be called with an @hwlock that was retrieved from
496 * an earlier call to omap_hwspin_lock_request{_specific}.
497 *
498 * Can be called from an atomic context (will not sleep) but not from
499 * within interrupt context (simply because there is no use case for
500 * that yet).
501 *
502 * Returns 0 on success, or an appropriate error code on failure
503 */
504int hwspin_lock_free(struct hwspinlock *hwlock)
505{
506 struct hwspinlock *tmp;
507 int ret;
508
509 if (!hwlock) {
510 pr_err("invalid hwlock\n");
511 return -EINVAL;
512 }
513
514 spin_lock(&hwspinlock_tree_lock);
515
516 /* make sure the hwspinlock is used */
517 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
518 HWSPINLOCK_UNUSED);
519 if (ret == 1) {
520 dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
521 dump_stack();
522 ret = -EINVAL;
523 goto out;
524 }
525
526 /* notify the underlying device that power is not needed */
527 ret = pm_runtime_put(hwlock->dev);
528 if (ret < 0)
529 goto out;
530
531 /* mark this hwspinlock as available */
532 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
533 HWSPINLOCK_UNUSED);
534
535 /* sanity check (this shouldn't happen) */
536 WARN_ON(tmp != hwlock);
537
538 module_put(hwlock->owner);
539
540out:
541 spin_unlock(&hwspinlock_tree_lock);
542 return ret;
543}
544EXPORT_SYMBOL_GPL(hwspin_lock_free);
545
546MODULE_LICENSE("GPL v2");
547MODULE_DESCRIPTION("Hardware spinlock interface");
548MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
new file mode 100644
index 000000000000..69935e6b93e5
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -0,0 +1,61 @@
1/*
2 * Hardware spinlocks internal header
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __HWSPINLOCK_HWSPINLOCK_H
19#define __HWSPINLOCK_HWSPINLOCK_H
20
21#include <linux/spinlock.h>
22#include <linux/device.h>
23
24/**
25 * struct hwspinlock_ops - platform-specific hwspinlock handlers
26 *
27 * @trylock: make a single attempt to take the lock. returns 0 on
28 * failure and true on success. may _not_ sleep.
29 * @unlock: release the lock. always succeed. may _not_ sleep.
30 * @relax: optional, platform-specific relax handler, called by hwspinlock
31 * core while spinning on a lock, between two successive
32 * invocations of @trylock. may _not_ sleep.
33 */
34struct hwspinlock_ops {
35 int (*trylock)(struct hwspinlock *lock);
36 void (*unlock)(struct hwspinlock *lock);
37 void (*relax)(struct hwspinlock *lock);
38};
39
40/**
41 * struct hwspinlock - this struct represents a single hwspinlock instance
42 *
43 * @dev: underlying device, will be used to invoke runtime PM api
44 * @ops: platform-specific hwspinlock handlers
45 * @id: a global, unique, system-wide, index of the lock.
46 * @lock: initialized and used by hwspinlock core
47 * @owner: underlying implementation module, used to maintain module ref count
48 *
49 * Note: currently simplicity was opted for, but later we can squeeze some
50 * memory bytes by grouping the dev, ops and owner members in a single
51 * per-platform struct, and have all hwspinlocks point at it.
52 */
53struct hwspinlock {
54 struct device *dev;
55 const struct hwspinlock_ops *ops;
56 int id;
57 spinlock_t lock;
58 struct module *owner;
59};
60
61#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
new file mode 100644
index 000000000000..a8f02734c026
--- /dev/null
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -0,0 +1,231 @@
1/*
2 * OMAP hardware spinlock driver
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Simon Que <sque@ti.com>
7 * Hari Kanigeri <h-kanigeri2@ti.com>
8 * Ohad Ben-Cohen <ohad@wizery.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/device.h>
23#include <linux/delay.h>
24#include <linux/io.h>
25#include <linux/bitops.h>
26#include <linux/pm_runtime.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/hwspinlock.h>
30#include <linux/platform_device.h>
31
32#include "hwspinlock_internal.h"
33
34/* Spinlock register offsets */
35#define SYSSTATUS_OFFSET 0x0014
36#define LOCK_BASE_OFFSET 0x0800
37
38#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
39
40/* Possible values of SPINLOCK_LOCK_REG */
41#define SPINLOCK_NOTTAKEN (0) /* free */
42#define SPINLOCK_TAKEN (1) /* locked */
43
44#define to_omap_hwspinlock(lock) \
45 container_of(lock, struct omap_hwspinlock, lock)
46
47struct omap_hwspinlock {
48 struct hwspinlock lock;
49 void __iomem *addr;
50};
51
52struct omap_hwspinlock_state {
53 int num_locks; /* Total number of locks in system */
54 void __iomem *io_base; /* Mapped base address */
55};
56
57static int omap_hwspinlock_trylock(struct hwspinlock *lock)
58{
59 struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
60
61 /* attempt to acquire the lock by reading its value */
62 return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
63}
64
65static void omap_hwspinlock_unlock(struct hwspinlock *lock)
66{
67 struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
68
69 /* release the lock by writing 0 to it */
70 writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
71}
72
73/*
74 * relax the OMAP interconnect while spinning on it.
75 *
76 * The specs recommended that the retry delay time will be
77 * just over half of the time that a requester would be
78 * expected to hold the lock.
79 *
80 * The number below is taken from an hardware specs example,
81 * obviously it is somewhat arbitrary.
82 */
83static void omap_hwspinlock_relax(struct hwspinlock *lock)
84{
85 ndelay(50);
86}
87
88static const struct hwspinlock_ops omap_hwspinlock_ops = {
89 .trylock = omap_hwspinlock_trylock,
90 .unlock = omap_hwspinlock_unlock,
91 .relax = omap_hwspinlock_relax,
92};
93
94static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
95{
96 struct omap_hwspinlock *omap_lock;
97 struct omap_hwspinlock_state *state;
98 struct hwspinlock *lock;
99 struct resource *res;
100 void __iomem *io_base;
101 int i, ret;
102
103 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
104 if (!res)
105 return -ENODEV;
106
107 state = kzalloc(sizeof(*state), GFP_KERNEL);
108 if (!state)
109 return -ENOMEM;
110
111 io_base = ioremap(res->start, resource_size(res));
112 if (!io_base) {
113 ret = -ENOMEM;
114 goto free_state;
115 }
116
117 /* Determine number of locks */
118 i = readl(io_base + SYSSTATUS_OFFSET);
119 i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
120
121 /* one of the four lsb's must be set, and nothing else */
122 if (hweight_long(i & 0xf) != 1 || i > 8) {
123 ret = -EINVAL;
124 goto iounmap_base;
125 }
126
127 state->num_locks = i * 32;
128 state->io_base = io_base;
129
130 platform_set_drvdata(pdev, state);
131
132 /*
133 * runtime PM will make sure the clock of this module is
134 * enabled iff at least one lock is requested
135 */
136 pm_runtime_enable(&pdev->dev);
137
138 for (i = 0; i < state->num_locks; i++) {
139 omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
140 if (!omap_lock) {
141 ret = -ENOMEM;
142 goto free_locks;
143 }
144
145 omap_lock->lock.dev = &pdev->dev;
146 omap_lock->lock.owner = THIS_MODULE;
147 omap_lock->lock.id = i;
148 omap_lock->lock.ops = &omap_hwspinlock_ops;
149 omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
150
151 ret = hwspin_lock_register(&omap_lock->lock);
152 if (ret) {
153 kfree(omap_lock);
154 goto free_locks;
155 }
156 }
157
158 return 0;
159
160free_locks:
161 while (--i >= 0) {
162 lock = hwspin_lock_unregister(i);
163 /* this should't happen, but let's give our best effort */
164 if (!lock) {
165 dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
166 continue;
167 }
168 omap_lock = to_omap_hwspinlock(lock);
169 kfree(omap_lock);
170 }
171 pm_runtime_disable(&pdev->dev);
172iounmap_base:
173 iounmap(io_base);
174free_state:
175 kfree(state);
176 return ret;
177}
178
179static int omap_hwspinlock_remove(struct platform_device *pdev)
180{
181 struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
182 struct hwspinlock *lock;
183 struct omap_hwspinlock *omap_lock;
184 int i;
185
186 for (i = 0; i < state->num_locks; i++) {
187 lock = hwspin_lock_unregister(i);
188 /* this shouldn't happen at this point. if it does, at least
189 * don't continue with the remove */
190 if (!lock) {
191 dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
192 return -EBUSY;
193 }
194
195 omap_lock = to_omap_hwspinlock(lock);
196 kfree(omap_lock);
197 }
198
199 pm_runtime_disable(&pdev->dev);
200 iounmap(state->io_base);
201 kfree(state);
202
203 return 0;
204}
205
206static struct platform_driver omap_hwspinlock_driver = {
207 .probe = omap_hwspinlock_probe,
208 .remove = omap_hwspinlock_remove,
209 .driver = {
210 .name = "omap_hwspinlock",
211 },
212};
213
214static int __init omap_hwspinlock_init(void)
215{
216 return platform_driver_register(&omap_hwspinlock_driver);
217}
218/* board init code might need to reserve hwspinlocks for predefined purposes */
219postcore_initcall(omap_hwspinlock_init);
220
221static void __exit omap_hwspinlock_exit(void)
222{
223 platform_driver_unregister(&omap_hwspinlock_driver);
224}
225module_exit(omap_hwspinlock_exit);
226
227MODULE_LICENSE("GPL v2");
228MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
229MODULE_AUTHOR("Simon Que <sque@ti.com>");
230MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
231MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 078fdf11af03..8c42573f42ea 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
260 return ret; 260 return ret;
261} 261}
262 262
263static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, 263static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
264 int vdd) 264 int vdd)
265{ 265{
266 struct omap_hsmmc_host *host = 266 struct omap_hsmmc_host *host =
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
316 return ret; 316 return ret;
317} 317}
318 318
319static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
320 int vdd)
321{
322 return 0;
323}
324
319static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, 325static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
320 int vdd, int cardsleep) 326 int vdd, int cardsleep)
321{ 327{
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
326 return regulator_set_mode(host->vcc, mode); 332 return regulator_set_mode(host->vcc, mode);
327} 333}
328 334
329static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, 335static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
330 int vdd, int cardsleep) 336 int vdd, int cardsleep)
331{ 337{
332 struct omap_hsmmc_host *host = 338 struct omap_hsmmc_host *host =
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
365 return regulator_enable(host->vcc_aux); 371 return regulator_enable(host->vcc_aux);
366} 372}
367 373
374static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
375 int vdd, int cardsleep)
376{
377 return 0;
378}
379
368static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 380static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
369{ 381{
370 struct regulator *reg; 382 struct regulator *reg;
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
379 break; 391 break;
380 case OMAP_MMC2_DEVID: 392 case OMAP_MMC2_DEVID:
381 case OMAP_MMC3_DEVID: 393 case OMAP_MMC3_DEVID:
394 case OMAP_MMC5_DEVID:
382 /* Off-chip level shifting, or none */ 395 /* Off-chip level shifting, or none */
383 mmc_slot(host).set_power = omap_hsmmc_23_set_power; 396 mmc_slot(host).set_power = omap_hsmmc_235_set_power;
384 mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; 397 mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
385 break; 398 break;
399 case OMAP_MMC4_DEVID:
400 mmc_slot(host).set_power = omap_hsmmc_4_set_power;
401 mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
386 default: 402 default:
387 pr_err("MMC%d configuration not supported!\n", host->id); 403 pr_err("MMC%d configuration not supported!\n", host->id);
388 return -EINVAL; 404 return -EINVAL;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c89592239bc7..178e2006063d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2
106 help 106 help
107 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 107 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
108 108
109config MTD_NAND_OMAP_PREFETCH
110 bool "GPMC prefetch support for NAND Flash device"
111 depends on MTD_NAND_OMAP2
112 default y
113 help
114 The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
115 to improve the performance.
116
117config MTD_NAND_OMAP_PREFETCH_DMA
118 depends on MTD_NAND_OMAP_PREFETCH
119 bool "DMA mode"
120 default n
121 help
122 The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
123 or in DMA interrupt mode.
124 Say y for DMA mode or MPU mode will be used
125
126config MTD_NAND_IDS 109config MTD_NAND_IDS
127 tristate 110 tristate
128 111
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 15682ec8530e..4e33972ad17a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h>
14#include <linux/jiffies.h> 15#include <linux/jiffies.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
24#include <plat/nand.h> 25#include <plat/nand.h>
25 26
26#define DRIVER_NAME "omap2-nand" 27#define DRIVER_NAME "omap2-nand"
28#define OMAP_NAND_TIMEOUT_MS 5000
27 29
28#define NAND_Ecc_P1e (1 << 0) 30#define NAND_Ecc_P1e (1 << 0)
29#define NAND_Ecc_P2e (1 << 1) 31#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
96static const char *part_probes[] = { "cmdlinepart", NULL }; 98static const char *part_probes[] = { "cmdlinepart", NULL };
97#endif 99#endif
98 100
99#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH 101/* oob info generated runtime depending on ecc algorithm and layout selected */
100static int use_prefetch = 1; 102static struct nand_ecclayout omap_oobinfo;
101 103/* Define some generic bad / good block scan pattern which are used
102/* "modprobe ... use_prefetch=0" etc */ 104 * while scanning a device for factory marked good / bad blocks
103module_param(use_prefetch, bool, 0); 105 */
104MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); 106static uint8_t scan_ff_pattern[] = { 0xff };
105 107static struct nand_bbt_descr bb_descrip_flashbased = {
106#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA 108 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
107static int use_dma = 1; 109 .offs = 0,
110 .len = 1,
111 .pattern = scan_ff_pattern,
112};
108 113
109/* "modprobe ... use_dma=0" etc */
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
112#else
113static const int use_dma;
114#endif
115#else
116const int use_prefetch;
117static const int use_dma;
118#endif
119 114
120struct omap_nand_info { 115struct omap_nand_info {
121 struct nand_hw_control controller; 116 struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
129 unsigned long phys_base; 124 unsigned long phys_base;
130 struct completion comp; 125 struct completion comp;
131 int dma_ch; 126 int dma_ch;
127 int gpmc_irq;
128 enum {
129 OMAP_NAND_IO_READ = 0, /* read */
130 OMAP_NAND_IO_WRITE, /* write */
131 } iomode;
132 u_char *buf;
133 int buf_len;
132}; 134};
133 135
134/** 136/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
256 } 258 }
257 259
258 /* configure and start prefetch transfer */ 260 /* configure and start prefetch transfer */
259 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); 261 ret = gpmc_prefetch_enable(info->gpmc_cs,
262 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
260 if (ret) { 263 if (ret) {
261 /* PFPW engine is busy, use cpu copy method */ 264 /* PFPW engine is busy, use cpu copy method */
262 if (info->nand.options & NAND_BUSWIDTH_16) 265 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
288{ 291{
289 struct omap_nand_info *info = container_of(mtd, 292 struct omap_nand_info *info = container_of(mtd,
290 struct omap_nand_info, mtd); 293 struct omap_nand_info, mtd);
291 uint32_t pref_count = 0, w_count = 0; 294 uint32_t w_count = 0;
292 int i = 0, ret = 0; 295 int i = 0, ret = 0;
293 u16 *p; 296 u16 *p;
297 unsigned long tim, limit;
294 298
295 /* take care of subpage writes */ 299 /* take care of subpage writes */
296 if (len % 2 != 0) { 300 if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
300 } 304 }
301 305
302 /* configure and start prefetch transfer */ 306 /* configure and start prefetch transfer */
303 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); 307 ret = gpmc_prefetch_enable(info->gpmc_cs,
308 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
304 if (ret) { 309 if (ret) {
305 /* PFPW engine is busy, use cpu copy method */ 310 /* PFPW engine is busy, use cpu copy method */
306 if (info->nand.options & NAND_BUSWIDTH_16) 311 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
316 iowrite16(*p++, info->nand.IO_ADDR_W); 321 iowrite16(*p++, info->nand.IO_ADDR_W);
317 } 322 }
318 /* wait for data to flushed-out before reset the prefetch */ 323 /* wait for data to flushed-out before reset the prefetch */
319 do { 324 tim = 0;
320 pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); 325 limit = (loops_per_jiffy *
321 } while (pref_count); 326 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
327 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
328 cpu_relax();
329
322 /* disable and stop the PFPW engine */ 330 /* disable and stop the PFPW engine */
323 gpmc_prefetch_reset(info->gpmc_cs); 331 gpmc_prefetch_reset(info->gpmc_cs);
324 } 332 }
325} 333}
326 334
327#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
328/* 335/*
329 * omap_nand_dma_cb: callback on the completion of dma transfer 336 * omap_nand_dma_cb: callback on the completion of dma transfer
330 * @lch: logical channel 337 * @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
348{ 355{
349 struct omap_nand_info *info = container_of(mtd, 356 struct omap_nand_info *info = container_of(mtd,
350 struct omap_nand_info, mtd); 357 struct omap_nand_info, mtd);
351 uint32_t prefetch_status = 0;
352 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 358 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
353 DMA_FROM_DEVICE; 359 DMA_FROM_DEVICE;
354 dma_addr_t dma_addr; 360 dma_addr_t dma_addr;
355 int ret; 361 int ret;
362 unsigned long tim, limit;
356 363
357 /* The fifo depth is 64 bytes. We have a sync at each frame and frame 364 /* The fifo depth is 64 bytes max.
358 * length is 64 bytes. 365 * But configure the FIFO-threahold to 32 to get a sync at each frame
366 * and frame length is 32 bytes.
359 */ 367 */
360 int buf_len = len >> 6; 368 int buf_len = len >> 6;
361 369
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
396 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); 404 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
397 } 405 }
398 /* configure and start prefetch transfer */ 406 /* configure and start prefetch transfer */
399 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); 407 ret = gpmc_prefetch_enable(info->gpmc_cs,
408 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
400 if (ret) 409 if (ret)
401 /* PFPW engine is busy, use cpu copy methode */ 410 /* PFPW engine is busy, use cpu copy method */
402 goto out_copy; 411 goto out_copy;
403 412
404 init_completion(&info->comp); 413 init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
407 416
408 /* setup and start DMA using dma_addr */ 417 /* setup and start DMA using dma_addr */
409 wait_for_completion(&info->comp); 418 wait_for_completion(&info->comp);
419 tim = 0;
420 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
421 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
422 cpu_relax();
410 423
411 do {
412 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
413 } while (prefetch_status);
414 /* disable and stop the PFPW engine */ 424 /* disable and stop the PFPW engine */
415 gpmc_prefetch_reset(info->gpmc_cs); 425 gpmc_prefetch_reset(info->gpmc_cs);
416 426
@@ -426,14 +436,6 @@ out_copy:
426 : omap_write_buf8(mtd, (u_char *) addr, len); 436 : omap_write_buf8(mtd, (u_char *) addr, len);
427 return 0; 437 return 0;
428} 438}
429#else
430static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
431static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
432 unsigned int len, int is_write)
433{
434 return 0;
435}
436#endif
437 439
438/** 440/**
439 * omap_read_buf_dma_pref - read data from NAND controller into buffer 441 * omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
466 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 468 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
467} 469}
468 470
471/*
472 * omap_nand_irq - GMPC irq handler
473 * @this_irq: gpmc irq number
474 * @dev: omap_nand_info structure pointer is passed here
475 */
476static irqreturn_t omap_nand_irq(int this_irq, void *dev)
477{
478 struct omap_nand_info *info = (struct omap_nand_info *) dev;
479 u32 bytes;
480 u32 irq_stat;
481
482 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
483 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
484 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
485 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
486 if (irq_stat & 0x2)
487 goto done;
488
489 if (info->buf_len && (info->buf_len < bytes))
490 bytes = info->buf_len;
491 else if (!info->buf_len)
492 bytes = 0;
493 iowrite32_rep(info->nand.IO_ADDR_W,
494 (u32 *)info->buf, bytes >> 2);
495 info->buf = info->buf + bytes;
496 info->buf_len -= bytes;
497
498 } else {
499 ioread32_rep(info->nand.IO_ADDR_R,
500 (u32 *)info->buf, bytes >> 2);
501 info->buf = info->buf + bytes;
502
503 if (irq_stat & 0x2)
504 goto done;
505 }
506 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
507
508 return IRQ_HANDLED;
509
510done:
511 complete(&info->comp);
512 /* disable irq */
513 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
514
515 /* clear status */
516 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
517
518 return IRQ_HANDLED;
519}
520
521/*
522 * omap_read_buf_irq_pref - read data from NAND controller into buffer
523 * @mtd: MTD device structure
524 * @buf: buffer to store date
525 * @len: number of bytes to read
526 */
527static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
528{
529 struct omap_nand_info *info = container_of(mtd,
530 struct omap_nand_info, mtd);
531 int ret = 0;
532
533 if (len <= mtd->oobsize) {
534 omap_read_buf_pref(mtd, buf, len);
535 return;
536 }
537
538 info->iomode = OMAP_NAND_IO_READ;
539 info->buf = buf;
540 init_completion(&info->comp);
541
542 /* configure and start prefetch transfer */
543 ret = gpmc_prefetch_enable(info->gpmc_cs,
544 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
545 if (ret)
546 /* PFPW engine is busy, use cpu copy method */
547 goto out_copy;
548
549 info->buf_len = len;
550 /* enable irq */
551 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
552 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
553
554 /* waiting for read to complete */
555 wait_for_completion(&info->comp);
556
557 /* disable and stop the PFPW engine */
558 gpmc_prefetch_reset(info->gpmc_cs);
559 return;
560
561out_copy:
562 if (info->nand.options & NAND_BUSWIDTH_16)
563 omap_read_buf16(mtd, buf, len);
564 else
565 omap_read_buf8(mtd, buf, len);
566}
567
568/*
569 * omap_write_buf_irq_pref - write buffer to NAND controller
570 * @mtd: MTD device structure
571 * @buf: data buffer
572 * @len: number of bytes to write
573 */
574static void omap_write_buf_irq_pref(struct mtd_info *mtd,
575 const u_char *buf, int len)
576{
577 struct omap_nand_info *info = container_of(mtd,
578 struct omap_nand_info, mtd);
579 int ret = 0;
580 unsigned long tim, limit;
581
582 if (len <= mtd->oobsize) {
583 omap_write_buf_pref(mtd, buf, len);
584 return;
585 }
586
587 info->iomode = OMAP_NAND_IO_WRITE;
588 info->buf = (u_char *) buf;
589 init_completion(&info->comp);
590
591 /* configure and start prefetch transfer : size=24 */
592 ret = gpmc_prefetch_enable(info->gpmc_cs,
593 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
594 if (ret)
595 /* PFPW engine is busy, use cpu copy method */
596 goto out_copy;
597
598 info->buf_len = len;
599 /* enable irq */
600 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
601 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
602
603 /* waiting for write to complete */
604 wait_for_completion(&info->comp);
605 /* wait for data to flushed-out before reset the prefetch */
606 tim = 0;
607 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
608 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
609 cpu_relax();
610
611 /* disable and stop the PFPW engine */
612 gpmc_prefetch_reset(info->gpmc_cs);
613 return;
614
615out_copy:
616 if (info->nand.options & NAND_BUSWIDTH_16)
617 omap_write_buf16(mtd, buf, len);
618 else
619 omap_write_buf8(mtd, buf, len);
620}
621
469/** 622/**
470 * omap_verify_buf - Verify chip data against buffer 623 * omap_verify_buf - Verify chip data against buffer
471 * @mtd: MTD device structure 624 * @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
487 return 0; 640 return 0;
488} 641}
489 642
490#ifdef CONFIG_MTD_NAND_OMAP_HWECC
491
492/** 643/**
493 * gen_true_ecc - This function will generate true ECC value 644 * gen_true_ecc - This function will generate true ECC value
494 * @ecc_buf: buffer to store ecc code 645 * @ecc_buf: buffer to store ecc code
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
708 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); 859 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
709} 860}
710 861
711#endif
712
713/** 862/**
714 * omap_wait - wait until the command is done 863 * omap_wait - wait until the command is done
715 * @mtd: MTD device structure 864 * @mtd: MTD device structure
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
779 struct omap_nand_info *info; 928 struct omap_nand_info *info;
780 struct omap_nand_platform_data *pdata; 929 struct omap_nand_platform_data *pdata;
781 int err; 930 int err;
931 int i, offset;
782 932
783 pdata = pdev->dev.platform_data; 933 pdata = pdev->dev.platform_data;
784 if (pdata == NULL) { 934 if (pdata == NULL) {
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
804 info->mtd.name = dev_name(&pdev->dev); 954 info->mtd.name = dev_name(&pdev->dev);
805 info->mtd.owner = THIS_MODULE; 955 info->mtd.owner = THIS_MODULE;
806 956
807 info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; 957 info->nand.options = pdata->devsize;
808 info->nand.options |= NAND_SKIP_BBTSCAN; 958 info->nand.options |= NAND_SKIP_BBTSCAN;
809 959
810 /* NAND write protect off */ 960 /* NAND write protect off */
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
842 info->nand.chip_delay = 50; 992 info->nand.chip_delay = 50;
843 } 993 }
844 994
845 if (use_prefetch) { 995 switch (pdata->xfer_type) {
846 996 case NAND_OMAP_PREFETCH_POLLED:
847 info->nand.read_buf = omap_read_buf_pref; 997 info->nand.read_buf = omap_read_buf_pref;
848 info->nand.write_buf = omap_write_buf_pref; 998 info->nand.write_buf = omap_write_buf_pref;
849 if (use_dma) { 999 break;
850 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1000
851 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1001 case NAND_OMAP_POLLED:
852 if (err < 0) {
853 info->dma_ch = -1;
854 printk(KERN_WARNING "DMA request failed."
855 " Non-dma data transfer mode\n");
856 } else {
857 omap_set_dma_dest_burst_mode(info->dma_ch,
858 OMAP_DMA_DATA_BURST_16);
859 omap_set_dma_src_burst_mode(info->dma_ch,
860 OMAP_DMA_DATA_BURST_16);
861
862 info->nand.read_buf = omap_read_buf_dma_pref;
863 info->nand.write_buf = omap_write_buf_dma_pref;
864 }
865 }
866 } else {
867 if (info->nand.options & NAND_BUSWIDTH_16) { 1002 if (info->nand.options & NAND_BUSWIDTH_16) {
868 info->nand.read_buf = omap_read_buf16; 1003 info->nand.read_buf = omap_read_buf16;
869 info->nand.write_buf = omap_write_buf16; 1004 info->nand.write_buf = omap_write_buf16;
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
871 info->nand.read_buf = omap_read_buf8; 1006 info->nand.read_buf = omap_read_buf8;
872 info->nand.write_buf = omap_write_buf8; 1007 info->nand.write_buf = omap_write_buf8;
873 } 1008 }
1009 break;
1010
1011 case NAND_OMAP_PREFETCH_DMA:
1012 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1013 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1014 if (err < 0) {
1015 info->dma_ch = -1;
1016 dev_err(&pdev->dev, "DMA request failed!\n");
1017 goto out_release_mem_region;
1018 } else {
1019 omap_set_dma_dest_burst_mode(info->dma_ch,
1020 OMAP_DMA_DATA_BURST_16);
1021 omap_set_dma_src_burst_mode(info->dma_ch,
1022 OMAP_DMA_DATA_BURST_16);
1023
1024 info->nand.read_buf = omap_read_buf_dma_pref;
1025 info->nand.write_buf = omap_write_buf_dma_pref;
1026 }
1027 break;
1028
1029 case NAND_OMAP_PREFETCH_IRQ:
1030 err = request_irq(pdata->gpmc_irq,
1031 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1032 if (err) {
1033 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1034 pdata->gpmc_irq, err);
1035 goto out_release_mem_region;
1036 } else {
1037 info->gpmc_irq = pdata->gpmc_irq;
1038 info->nand.read_buf = omap_read_buf_irq_pref;
1039 info->nand.write_buf = omap_write_buf_irq_pref;
1040 }
1041 break;
1042
1043 default:
1044 dev_err(&pdev->dev,
1045 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1046 err = -EINVAL;
1047 goto out_release_mem_region;
874 } 1048 }
875 info->nand.verify_buf = omap_verify_buf;
876 1049
877#ifdef CONFIG_MTD_NAND_OMAP_HWECC 1050 info->nand.verify_buf = omap_verify_buf;
878 info->nand.ecc.bytes = 3;
879 info->nand.ecc.size = 512;
880 info->nand.ecc.calculate = omap_calculate_ecc;
881 info->nand.ecc.hwctl = omap_enable_hwecc;
882 info->nand.ecc.correct = omap_correct_data;
883 info->nand.ecc.mode = NAND_ECC_HW;
884 1051
885#else 1052 /* selsect the ecc type */
886 info->nand.ecc.mode = NAND_ECC_SOFT; 1053 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
887#endif 1054 info->nand.ecc.mode = NAND_ECC_SOFT;
1055 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1056 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1057 info->nand.ecc.bytes = 3;
1058 info->nand.ecc.size = 512;
1059 info->nand.ecc.calculate = omap_calculate_ecc;
1060 info->nand.ecc.hwctl = omap_enable_hwecc;
1061 info->nand.ecc.correct = omap_correct_data;
1062 info->nand.ecc.mode = NAND_ECC_HW;
1063 }
888 1064
889 /* DIP switches on some boards change between 8 and 16 bit 1065 /* DIP switches on some boards change between 8 and 16 bit
890 * bus widths for flash. Try the other width if the first try fails. 1066 * bus widths for flash. Try the other width if the first try fails.
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
897 } 1073 }
898 } 1074 }
899 1075
1076 /* rom code layout */
1077 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1078
1079 if (info->nand.options & NAND_BUSWIDTH_16)
1080 offset = 2;
1081 else {
1082 offset = 1;
1083 info->nand.badblock_pattern = &bb_descrip_flashbased;
1084 }
1085 omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
1086 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1087 omap_oobinfo.eccpos[i] = i+offset;
1088
1089 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1090 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1091 (offset + omap_oobinfo.eccbytes);
1092
1093 info->nand.ecc.layout = &omap_oobinfo;
1094 }
1095
900#ifdef CONFIG_MTD_PARTITIONS 1096#ifdef CONFIG_MTD_PARTITIONS
901 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1097 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
902 if (err > 0) 1098 if (err > 0)
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev)
926 mtd); 1122 mtd);
927 1123
928 platform_set_drvdata(pdev, NULL); 1124 platform_set_drvdata(pdev, NULL);
929 if (use_dma) 1125 if (info->dma_ch != -1)
930 omap_free_dma(info->dma_ch); 1126 omap_free_dma(info->dma_ch);
931 1127
1128 if (info->gpmc_irq)
1129 free_irq(info->gpmc_irq, info);
1130
932 /* Release NAND device, its internal structures and partitions */ 1131 /* Release NAND device, its internal structures and partitions */
933 nand_release(&info->mtd); 1132 nand_release(&info->mtd);
934 iounmap(info->nand.IO_ADDR_R); 1133 iounmap(info->nand.IO_ADDR_R);
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = {
947 1146
948static int __init omap_nand_init(void) 1147static int __init omap_nand_init(void)
949{ 1148{
950 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); 1149 pr_info("%s driver initializing\n", DRIVER_NAME);
951 1150
952 /* This check is required if driver is being
953 * loaded run time as a module
954 */
955 if ((1 == use_dma) && (0 == use_prefetch)) {
956 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
957 "without use_prefetch'. Prefetch will not be"
958 " used in either mode (mpu or dma)\n");
959 }
960 return platform_driver_register(&omap_nand_driver); 1151 return platform_driver_register(&omap_nand_driver);
961} 1152}
962 1153
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ac31f461cc1c..ec26399e3cf2 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
63 struct completion dma_done; 63 struct completion dma_done;
64 int dma_channel; 64 int dma_channel;
65 int freq; 65 int freq;
66 int (*setup)(void __iomem *base, int freq); 66 int (*setup)(void __iomem *base, int *freq_ptr);
67 struct regulator *regulator; 67 struct regulator *regulator;
68}; 68};
69 69
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
148 wait_err("controller error", state, ctrl, intr); 148 wait_err("controller error", state, ctrl, intr);
149 return -EIO; 149 return -EIO;
150 } 150 }
151 if ((intr & intr_flags) != intr_flags) { 151 if ((intr & intr_flags) == intr_flags)
152 wait_err("timeout", state, ctrl, intr); 152 return 0;
153 return -EIO; 153 /* Continue in wait for interrupt branch */
154 }
155 return 0;
156 } 154 }
157 155
158 if (state != FL_READING) { 156 if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
581 579
582 /* DMA is not in use so this is all that is needed */ 580 /* DMA is not in use so this is all that is needed */
583 /* Revisit for OMAP3! */ 581 /* Revisit for OMAP3! */
584 ret = c->setup(c->onenand.base, c->freq); 582 ret = c->setup(c->onenand.base, &c->freq);
585 583
586 return ret; 584 return ret;
587} 585}
@@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
673 } 671 }
674 672
675 if (pdata->onenand_setup != NULL) { 673 if (pdata->onenand_setup != NULL) {
676 r = pdata->onenand_setup(c->onenand.base, c->freq); 674 r = pdata->onenand_setup(c->onenand.base, &c->freq);
677 if (r < 0) { 675 if (r < 0) {
678 dev_err(&pdev->dev, "Onenand platform setup failed: " 676 dev_err(&pdev->dev, "Onenand platform setup failed: "
679 "%d\n", r); 677 "%d\n", r);
@@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
718 } 716 }
719 717
720 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " 718 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
721 "base %p\n", c->gpmc_cs, c->phys_base, 719 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
722 c->onenand.base); 720 c->onenand.base, c->freq);
723 721
724 c->pdev = pdev; 722 c->pdev = pdev;
725 c->mtd.name = dev_name(&pdev->dev); 723 c->mtd.name = dev_name(&pdev->dev);
@@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
754 if ((r = onenand_scan(&c->mtd, 1)) < 0) 752 if ((r = onenand_scan(&c->mtd, 1)) < 0)
755 goto err_release_regulator; 753 goto err_release_regulator;
756 754
757 switch ((c->onenand.version_id >> 4) & 0xf) {
758 case 0:
759 c->freq = 40;
760 break;
761 case 1:
762 c->freq = 54;
763 break;
764 case 2:
765 c->freq = 66;
766 break;
767 case 3:
768 c->freq = 83;
769 break;
770 case 4:
771 c->freq = 104;
772 break;
773 }
774
775#ifdef CONFIG_MTD_PARTITIONS 755#ifdef CONFIG_MTD_PARTITIONS
776 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); 756 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
777 if (r > 0) 757 if (r > 0)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index f076cc5c6fb0..36501adc125d 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2005, 2006 Nokia Corporation 4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and 5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrjölä <juha.yrjola@nokia.com> 6 * Juha Yrj�l� <juha.yrjola@nokia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
33#include <linux/clk.h> 33#include <linux/clk.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pm_runtime.h>
36 37
37#include <linux/spi/spi.h> 38#include <linux/spi/spi.h>
38 39
@@ -46,7 +47,6 @@
46#define OMAP2_MCSPI_MAX_CTRL 4 47#define OMAP2_MCSPI_MAX_CTRL 4
47 48
48#define OMAP2_MCSPI_REVISION 0x00 49#define OMAP2_MCSPI_REVISION 0x00
49#define OMAP2_MCSPI_SYSCONFIG 0x10
50#define OMAP2_MCSPI_SYSSTATUS 0x14 50#define OMAP2_MCSPI_SYSSTATUS 0x14
51#define OMAP2_MCSPI_IRQSTATUS 0x18 51#define OMAP2_MCSPI_IRQSTATUS 0x18
52#define OMAP2_MCSPI_IRQENABLE 0x1c 52#define OMAP2_MCSPI_IRQENABLE 0x1c
@@ -63,13 +63,6 @@
63 63
64/* per-register bitmasks: */ 64/* per-register bitmasks: */
65 65
66#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4)
67#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2)
68#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0)
69#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1)
70
71#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
72
73#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) 66#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
74#define OMAP2_MCSPI_MODULCTRL_MS BIT(2) 67#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
75#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) 68#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
@@ -122,13 +115,12 @@ struct omap2_mcspi {
122 spinlock_t lock; 115 spinlock_t lock;
123 struct list_head msg_queue; 116 struct list_head msg_queue;
124 struct spi_master *master; 117 struct spi_master *master;
125 struct clk *ick;
126 struct clk *fck;
127 /* Virtual base address of the controller */ 118 /* Virtual base address of the controller */
128 void __iomem *base; 119 void __iomem *base;
129 unsigned long phys; 120 unsigned long phys;
130 /* SPI1 has 4 channels, while SPI2 has 2 */ 121 /* SPI1 has 4 channels, while SPI2 has 2 */
131 struct omap2_mcspi_dma *dma_channels; 122 struct omap2_mcspi_dma *dma_channels;
123 struct device *dev;
132}; 124};
133 125
134struct omap2_mcspi_cs { 126struct omap2_mcspi_cs {
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs {
144 * corresponding registers are modified. 136 * corresponding registers are modified.
145 */ 137 */
146struct omap2_mcspi_regs { 138struct omap2_mcspi_regs {
147 u32 sysconfig;
148 u32 modulctrl; 139 u32 modulctrl;
149 u32 wakeupenable; 140 u32 wakeupenable;
150 struct list_head cs; 141 struct list_head cs;
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
268 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, 259 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
269 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); 260 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
270 261
271 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG,
272 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig);
273
274 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, 262 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
275 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); 263 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
276 264
@@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
280} 268}
281static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) 269static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
282{ 270{
283 clk_disable(mcspi->ick); 271 pm_runtime_put_sync(mcspi->dev);
284 clk_disable(mcspi->fck);
285} 272}
286 273
287static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) 274static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
288{ 275{
289 if (clk_enable(mcspi->ick)) 276 return pm_runtime_get_sync(mcspi->dev);
290 return -ENODEV;
291 if (clk_enable(mcspi->fck))
292 return -ENODEV;
293
294 omap2_mcspi_restore_ctx(mcspi);
295
296 return 0;
297} 277}
298 278
299static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) 279static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -819,8 +799,9 @@ static int omap2_mcspi_setup(struct spi_device *spi)
819 return ret; 799 return ret;
820 } 800 }
821 801
822 if (omap2_mcspi_enable_clocks(mcspi)) 802 ret = omap2_mcspi_enable_clocks(mcspi);
823 return -ENODEV; 803 if (ret < 0)
804 return ret;
824 805
825 ret = omap2_mcspi_setup_transfer(spi, NULL); 806 ret = omap2_mcspi_setup_transfer(spi, NULL);
826 omap2_mcspi_disable_clocks(mcspi); 807 omap2_mcspi_disable_clocks(mcspi);
@@ -863,10 +844,11 @@ static void omap2_mcspi_work(struct work_struct *work)
863 struct omap2_mcspi *mcspi; 844 struct omap2_mcspi *mcspi;
864 845
865 mcspi = container_of(work, struct omap2_mcspi, work); 846 mcspi = container_of(work, struct omap2_mcspi, work);
866 spin_lock_irq(&mcspi->lock);
867 847
868 if (omap2_mcspi_enable_clocks(mcspi)) 848 if (omap2_mcspi_enable_clocks(mcspi) < 0)
869 goto out; 849 return;
850
851 spin_lock_irq(&mcspi->lock);
870 852
871 /* We only enable one channel at a time -- the one whose message is 853 /* We only enable one channel at a time -- the one whose message is
872 * at the head of the queue -- although this controller would gladly 854 * at the head of the queue -- although this controller would gladly
@@ -979,10 +961,9 @@ static void omap2_mcspi_work(struct work_struct *work)
979 spin_lock_irq(&mcspi->lock); 961 spin_lock_irq(&mcspi->lock);
980 } 962 }
981 963
982 omap2_mcspi_disable_clocks(mcspi);
983
984out:
985 spin_unlock_irq(&mcspi->lock); 964 spin_unlock_irq(&mcspi->lock);
965
966 omap2_mcspi_disable_clocks(mcspi);
986} 967}
987 968
988static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) 969static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
@@ -1058,25 +1039,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
1058 return 0; 1039 return 0;
1059} 1040}
1060 1041
1061static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) 1042static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1062{ 1043{
1063 struct spi_master *master = mcspi->master; 1044 struct spi_master *master = mcspi->master;
1064 u32 tmp; 1045 u32 tmp;
1046 int ret = 0;
1065 1047
1066 if (omap2_mcspi_enable_clocks(mcspi)) 1048 ret = omap2_mcspi_enable_clocks(mcspi);
1067 return -1; 1049 if (ret < 0)
1068 1050 return ret;
1069 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
1070 OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
1071 do {
1072 tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
1073 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
1074
1075 tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
1076 OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
1077 OMAP2_MCSPI_SYSCONFIG_SMARTIDLE;
1078 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp);
1079 omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp;
1080 1051
1081 tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; 1052 tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1082 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); 1053 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
@@ -1087,91 +1058,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
1087 return 0; 1058 return 0;
1088} 1059}
1089 1060
1090static u8 __initdata spi1_rxdma_id [] = { 1061static int omap_mcspi_runtime_resume(struct device *dev)
1091 OMAP24XX_DMA_SPI1_RX0, 1062{
1092 OMAP24XX_DMA_SPI1_RX1, 1063 struct omap2_mcspi *mcspi;
1093 OMAP24XX_DMA_SPI1_RX2, 1064 struct spi_master *master;
1094 OMAP24XX_DMA_SPI1_RX3,
1095};
1096
1097static u8 __initdata spi1_txdma_id [] = {
1098 OMAP24XX_DMA_SPI1_TX0,
1099 OMAP24XX_DMA_SPI1_TX1,
1100 OMAP24XX_DMA_SPI1_TX2,
1101 OMAP24XX_DMA_SPI1_TX3,
1102};
1103
1104static u8 __initdata spi2_rxdma_id[] = {
1105 OMAP24XX_DMA_SPI2_RX0,
1106 OMAP24XX_DMA_SPI2_RX1,
1107};
1108
1109static u8 __initdata spi2_txdma_id[] = {
1110 OMAP24XX_DMA_SPI2_TX0,
1111 OMAP24XX_DMA_SPI2_TX1,
1112};
1113
1114#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
1115 || defined(CONFIG_ARCH_OMAP4)
1116static u8 __initdata spi3_rxdma_id[] = {
1117 OMAP24XX_DMA_SPI3_RX0,
1118 OMAP24XX_DMA_SPI3_RX1,
1119};
1120 1065
1121static u8 __initdata spi3_txdma_id[] = { 1066 master = dev_get_drvdata(dev);
1122 OMAP24XX_DMA_SPI3_TX0, 1067 mcspi = spi_master_get_devdata(master);
1123 OMAP24XX_DMA_SPI3_TX1, 1068 omap2_mcspi_restore_ctx(mcspi);
1124};
1125#endif
1126 1069
1127#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) 1070 return 0;
1128static u8 __initdata spi4_rxdma_id[] = { 1071}
1129 OMAP34XX_DMA_SPI4_RX0,
1130};
1131 1072
1132static u8 __initdata spi4_txdma_id[] = {
1133 OMAP34XX_DMA_SPI4_TX0,
1134};
1135#endif
1136 1073
1137static int __init omap2_mcspi_probe(struct platform_device *pdev) 1074static int __init omap2_mcspi_probe(struct platform_device *pdev)
1138{ 1075{
1139 struct spi_master *master; 1076 struct spi_master *master;
1077 struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
1140 struct omap2_mcspi *mcspi; 1078 struct omap2_mcspi *mcspi;
1141 struct resource *r; 1079 struct resource *r;
1142 int status = 0, i; 1080 int status = 0, i;
1143 const u8 *rxdma_id, *txdma_id;
1144 unsigned num_chipselect;
1145
1146 switch (pdev->id) {
1147 case 1:
1148 rxdma_id = spi1_rxdma_id;
1149 txdma_id = spi1_txdma_id;
1150 num_chipselect = 4;
1151 break;
1152 case 2:
1153 rxdma_id = spi2_rxdma_id;
1154 txdma_id = spi2_txdma_id;
1155 num_chipselect = 2;
1156 break;
1157#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
1158 || defined(CONFIG_ARCH_OMAP4)
1159 case 3:
1160 rxdma_id = spi3_rxdma_id;
1161 txdma_id = spi3_txdma_id;
1162 num_chipselect = 2;
1163 break;
1164#endif
1165#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
1166 case 4:
1167 rxdma_id = spi4_rxdma_id;
1168 txdma_id = spi4_txdma_id;
1169 num_chipselect = 1;
1170 break;
1171#endif
1172 default:
1173 return -EINVAL;
1174 }
1175 1081
1176 master = spi_alloc_master(&pdev->dev, sizeof *mcspi); 1082 master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1177 if (master == NULL) { 1083 if (master == NULL) {
@@ -1188,7 +1094,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1188 master->setup = omap2_mcspi_setup; 1094 master->setup = omap2_mcspi_setup;
1189 master->transfer = omap2_mcspi_transfer; 1095 master->transfer = omap2_mcspi_transfer;
1190 master->cleanup = omap2_mcspi_cleanup; 1096 master->cleanup = omap2_mcspi_cleanup;
1191 master->num_chipselect = num_chipselect; 1097 master->num_chipselect = pdata->num_cs;
1192 1098
1193 dev_set_drvdata(&pdev->dev, master); 1099 dev_set_drvdata(&pdev->dev, master);
1194 1100
@@ -1206,49 +1112,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1206 goto err1; 1112 goto err1;
1207 } 1113 }
1208 1114
1115 r->start += pdata->regs_offset;
1116 r->end += pdata->regs_offset;
1209 mcspi->phys = r->start; 1117 mcspi->phys = r->start;
1210 mcspi->base = ioremap(r->start, r->end - r->start + 1); 1118 mcspi->base = ioremap(r->start, r->end - r->start + 1);
1211 if (!mcspi->base) { 1119 if (!mcspi->base) {
1212 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); 1120 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1213 status = -ENOMEM; 1121 status = -ENOMEM;
1214 goto err1aa; 1122 goto err2;
1215 } 1123 }
1216 1124
1125 mcspi->dev = &pdev->dev;
1217 INIT_WORK(&mcspi->work, omap2_mcspi_work); 1126 INIT_WORK(&mcspi->work, omap2_mcspi_work);
1218 1127
1219 spin_lock_init(&mcspi->lock); 1128 spin_lock_init(&mcspi->lock);
1220 INIT_LIST_HEAD(&mcspi->msg_queue); 1129 INIT_LIST_HEAD(&mcspi->msg_queue);
1221 INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); 1130 INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
1222 1131
1223 mcspi->ick = clk_get(&pdev->dev, "ick");
1224 if (IS_ERR(mcspi->ick)) {
1225 dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
1226 status = PTR_ERR(mcspi->ick);
1227 goto err1a;
1228 }
1229 mcspi->fck = clk_get(&pdev->dev, "fck");
1230 if (IS_ERR(mcspi->fck)) {
1231 dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
1232 status = PTR_ERR(mcspi->fck);
1233 goto err2;
1234 }
1235
1236 mcspi->dma_channels = kcalloc(master->num_chipselect, 1132 mcspi->dma_channels = kcalloc(master->num_chipselect,
1237 sizeof(struct omap2_mcspi_dma), 1133 sizeof(struct omap2_mcspi_dma),
1238 GFP_KERNEL); 1134 GFP_KERNEL);
1239 1135
1240 if (mcspi->dma_channels == NULL) 1136 if (mcspi->dma_channels == NULL)
1241 goto err3; 1137 goto err2;
1138
1139 for (i = 0; i < master->num_chipselect; i++) {
1140 char dma_ch_name[14];
1141 struct resource *dma_res;
1142
1143 sprintf(dma_ch_name, "rx%d", i);
1144 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1145 dma_ch_name);
1146 if (!dma_res) {
1147 dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1148 status = -ENODEV;
1149 break;
1150 }
1242 1151
1243 for (i = 0; i < num_chipselect; i++) {
1244 mcspi->dma_channels[i].dma_rx_channel = -1; 1152 mcspi->dma_channels[i].dma_rx_channel = -1;
1245 mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i]; 1153 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1154 sprintf(dma_ch_name, "tx%d", i);
1155 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1156 dma_ch_name);
1157 if (!dma_res) {
1158 dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1159 status = -ENODEV;
1160 break;
1161 }
1162
1246 mcspi->dma_channels[i].dma_tx_channel = -1; 1163 mcspi->dma_channels[i].dma_tx_channel = -1;
1247 mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i]; 1164 mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1248 } 1165 }
1249 1166
1250 if (omap2_mcspi_reset(mcspi) < 0) 1167 pm_runtime_enable(&pdev->dev);
1251 goto err4; 1168
1169 if (status || omap2_mcspi_master_setup(mcspi) < 0)
1170 goto err3;
1252 1171
1253 status = spi_register_master(master); 1172 status = spi_register_master(master);
1254 if (status < 0) 1173 if (status < 0)
@@ -1257,17 +1176,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1257 return status; 1176 return status;
1258 1177
1259err4: 1178err4:
1260 kfree(mcspi->dma_channels); 1179 spi_master_put(master);
1261err3: 1180err3:
1262 clk_put(mcspi->fck); 1181 kfree(mcspi->dma_channels);
1263err2: 1182err2:
1264 clk_put(mcspi->ick);
1265err1a:
1266 iounmap(mcspi->base);
1267err1aa:
1268 release_mem_region(r->start, (r->end - r->start) + 1); 1183 release_mem_region(r->start, (r->end - r->start) + 1);
1184 iounmap(mcspi->base);
1269err1: 1185err1:
1270 spi_master_put(master);
1271 return status; 1186 return status;
1272} 1187}
1273 1188
@@ -1283,9 +1198,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
1283 mcspi = spi_master_get_devdata(master); 1198 mcspi = spi_master_get_devdata(master);
1284 dma_channels = mcspi->dma_channels; 1199 dma_channels = mcspi->dma_channels;
1285 1200
1286 clk_put(mcspi->fck); 1201 omap2_mcspi_disable_clocks(mcspi);
1287 clk_put(mcspi->ick);
1288
1289 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1202 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1290 release_mem_region(r->start, (r->end - r->start) + 1); 1203 release_mem_region(r->start, (r->end - r->start) + 1);
1291 1204
@@ -1336,6 +1249,7 @@ static int omap2_mcspi_resume(struct device *dev)
1336 1249
1337static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1250static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1338 .resume = omap2_mcspi_resume, 1251 .resume = omap2_mcspi_resume,
1252 .runtime_resume = omap_mcspi_runtime_resume,
1339}; 1253};
1340 1254
1341static struct platform_driver omap2_mcspi_driver = { 1255static struct platform_driver omap2_mcspi_driver = {
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
new file mode 100644
index 000000000000..8390efc457eb
--- /dev/null
+++ b/include/linux/hwspinlock.h
@@ -0,0 +1,292 @@
1/*
2 * Hardware spinlock public header
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __LINUX_HWSPINLOCK_H
19#define __LINUX_HWSPINLOCK_H
20
21#include <linux/err.h>
22#include <linux/sched.h>
23
24/* hwspinlock mode argument */
25#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
26#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
27
28struct hwspinlock;
29
30#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
31
32int hwspin_lock_register(struct hwspinlock *lock);
33struct hwspinlock *hwspin_lock_unregister(unsigned int id);
34struct hwspinlock *hwspin_lock_request(void);
35struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
36int hwspin_lock_free(struct hwspinlock *hwlock);
37int hwspin_lock_get_id(struct hwspinlock *hwlock);
38int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
39 unsigned long *);
40int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
41void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
42
43#else /* !CONFIG_HWSPINLOCK */
44
45/*
46 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
47 * enabled. We prefer to silently succeed in this case, and let the
48 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
49 * required on a given setup, users will still work.
50 *
51 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
52 * we _do_ want users to fail (no point in registering hwspinlock instances if
53 * the framework is not available).
54 *
55 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
56 * users. Others, which care, can still check this with IS_ERR.
57 */
58static inline struct hwspinlock *hwspin_lock_request(void)
59{
60 return ERR_PTR(-ENODEV);
61}
62
63static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
64{
65 return ERR_PTR(-ENODEV);
66}
67
68static inline int hwspin_lock_free(struct hwspinlock *hwlock)
69{
70 return 0;
71}
72
73static inline
74int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
75 int mode, unsigned long *flags)
76{
77 return 0;
78}
79
80static inline
81int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
82{
83 return 0;
84}
85
86static inline
87void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
88{
89 return 0;
90}
91
92static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
93{
94 return 0;
95}
96
97static inline int hwspin_lock_register(struct hwspinlock *hwlock)
98{
99 return -ENODEV;
100}
101
102static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
103{
104 return NULL;
105}
106
107#endif /* !CONFIG_HWSPINLOCK */
108
109/**
110 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
111 * @hwlock: an hwspinlock which we want to trylock
112 * @flags: a pointer to where the caller's interrupt state will be saved at
113 *
114 * This function attempts to lock the underlying hwspinlock, and will
115 * immediately fail if the hwspinlock is already locked.
116 *
117 * Upon a successful return from this function, preemption and local
118 * interrupts are disabled (previous interrupts state is saved at @flags),
119 * so the caller must not sleep, and is advised to release the hwspinlock
120 * as soon as possible.
121 *
122 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
123 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
124 */
125static inline
126int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
127{
128 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
129}
130
131/**
132 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
133 * @hwlock: an hwspinlock which we want to trylock
134 *
135 * This function attempts to lock the underlying hwspinlock, and will
136 * immediately fail if the hwspinlock is already locked.
137 *
138 * Upon a successful return from this function, preemption and local
139 * interrupts are disabled, so the caller must not sleep, and is advised
140 * to release the hwspinlock as soon as possible.
141 *
142 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
143 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
144 */
145static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
146{
147 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
148}
149
150/**
151 * hwspin_trylock() - attempt to lock a specific hwspinlock
152 * @hwlock: an hwspinlock which we want to trylock
153 *
154 * This function attempts to lock an hwspinlock, and will immediately fail
155 * if the hwspinlock is already taken.
156 *
157 * Upon a successful return from this function, preemption is disabled,
158 * so the caller must not sleep, and is advised to release the hwspinlock
159 * as soon as possible. This is required in order to minimize remote cores
160 * polling on the hardware interconnect.
161 *
162 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
163 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
164 */
165static inline int hwspin_trylock(struct hwspinlock *hwlock)
166{
167 return __hwspin_trylock(hwlock, 0, NULL);
168}
169
170/**
171 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
172 * @hwlock: the hwspinlock to be locked
173 * @to: timeout value in msecs
174 * @flags: a pointer to where the caller's interrupt state will be saved at
175 *
176 * This function locks the underlying @hwlock. If the @hwlock
177 * is already taken, the function will busy loop waiting for it to
178 * be released, but give up when @timeout msecs have elapsed.
179 *
180 * Upon a successful return from this function, preemption and local interrupts
181 * are disabled (plus previous interrupt state is saved), so the caller must
182 * not sleep, and is advised to release the hwspinlock as soon as possible.
183 *
184 * Returns 0 when the @hwlock was successfully taken, and an appropriate
185 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
186 * busy after @timeout msecs). The function will never sleep.
187 */
188static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
189 unsigned int to, unsigned long *flags)
190{
191 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
192}
193
194/**
195 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
196 * @hwlock: the hwspinlock to be locked
197 * @to: timeout value in msecs
198 *
199 * This function locks the underlying @hwlock. If the @hwlock
200 * is already taken, the function will busy loop waiting for it to
201 * be released, but give up when @timeout msecs have elapsed.
202 *
203 * Upon a successful return from this function, preemption and local interrupts
204 * are disabled so the caller must not sleep, and is advised to release the
205 * hwspinlock as soon as possible.
206 *
207 * Returns 0 when the @hwlock was successfully taken, and an appropriate
208 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
209 * busy after @timeout msecs). The function will never sleep.
210 */
211static inline
212int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
213{
214 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
215}
216
217/**
218 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
219 * @hwlock: the hwspinlock to be locked
220 * @to: timeout value in msecs
221 *
222 * This function locks the underlying @hwlock. If the @hwlock
223 * is already taken, the function will busy loop waiting for it to
224 * be released, but give up when @timeout msecs have elapsed.
225 *
226 * Upon a successful return from this function, preemption is disabled
227 * so the caller must not sleep, and is advised to release the hwspinlock
228 * as soon as possible.
229 * This is required in order to minimize remote cores polling on the
230 * hardware interconnect.
231 *
232 * Returns 0 when the @hwlock was successfully taken, and an appropriate
233 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
234 * busy after @timeout msecs). The function will never sleep.
235 */
236static inline
237int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
238{
239 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
240}
241
242/**
243 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
244 * @hwlock: a previously-acquired hwspinlock which we want to unlock
245 * @flags: previous caller's interrupt state to restore
246 *
247 * This function will unlock a specific hwspinlock, enable preemption and
248 * restore the previous state of the local interrupts. It should be used
249 * to undo, e.g., hwspin_trylock_irqsave().
250 *
251 * @hwlock must be already locked before calling this function: it is a bug
252 * to call unlock on a @hwlock that is already unlocked.
253 */
254static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
255 unsigned long *flags)
256{
257 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
258}
259
260/**
261 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
262 * @hwlock: a previously-acquired hwspinlock which we want to unlock
263 *
264 * This function will unlock a specific hwspinlock, enable preemption and
265 * enable local interrupts. Should be used to undo hwspin_lock_irq().
266 *
267 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
268 * calling this function: it is a bug to call unlock on a @hwlock that is
269 * already unlocked.
270 */
271static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
272{
273 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
274}
275
276/**
277 * hwspin_unlock() - unlock hwspinlock
278 * @hwlock: a previously-acquired hwspinlock which we want to unlock
279 *
280 * This function will unlock a specific hwspinlock and enable preemption
281 * back.
282 *
283 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
284 * this function: it is a bug to call unlock on a @hwlock that is already
285 * unlocked.
286 */
287static inline void hwspin_unlock(struct hwspinlock *hwlock)
288{
289 __hwspin_unlock(hwlock, 0, NULL);
290}
291
292#endif /* __LINUX_HWSPINLOCK_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index cd6f3b431195..d60130f88eed 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -168,6 +168,7 @@
168#define ONENAND_SYS_CFG1_INT (1 << 6) 168#define ONENAND_SYS_CFG1_INT (1 << 6)
169#define ONENAND_SYS_CFG1_IOBE (1 << 5) 169#define ONENAND_SYS_CFG1_IOBE (1 << 5)
170#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) 170#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4)
171#define ONENAND_SYS_CFG1_VHF (1 << 3)
171#define ONENAND_SYS_CFG1_HF (1 << 2) 172#define ONENAND_SYS_CFG1_HF (1 << 2)
172#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1) 173#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1)
173 174